@genkit-ai/compat-oai 0.0.2
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/LICENSE +203 -0
- package/README.md +201 -0
- package/lib/audio.d.mts +77 -0
- package/lib/audio.d.ts +77 -0
- package/lib/audio.js +185 -0
- package/lib/audio.js.map +1 -0
- package/lib/audio.mjs +159 -0
- package/lib/audio.mjs.map +1 -0
- package/lib/deepseek/deepseek.d.mts +96 -0
- package/lib/deepseek/deepseek.d.ts +96 -0
- package/lib/deepseek/deepseek.js +62 -0
- package/lib/deepseek/deepseek.js.map +1 -0
- package/lib/deepseek/deepseek.mjs +38 -0
- package/lib/deepseek/deepseek.mjs.map +1 -0
- package/lib/deepseek/index.d.mts +67 -0
- package/lib/deepseek/index.d.ts +67 -0
- package/lib/deepseek/index.js +93 -0
- package/lib/deepseek/index.js.map +1 -0
- package/lib/deepseek/index.mjs +64 -0
- package/lib/deepseek/index.mjs.map +1 -0
- package/lib/embedder.d.mts +41 -0
- package/lib/embedder.d.ts +41 -0
- package/lib/embedder.js +51 -0
- package/lib/embedder.js.map +1 -0
- package/lib/embedder.mjs +27 -0
- package/lib/embedder.mjs.map +1 -0
- package/lib/image.d.mts +68 -0
- package/lib/image.d.ts +68 -0
- package/lib/image.js +105 -0
- package/lib/image.js.map +1 -0
- package/lib/image.mjs +79 -0
- package/lib/image.mjs.map +1 -0
- package/lib/index.d.mts +66 -0
- package/lib/index.d.ts +66 -0
- package/lib/index.js +56 -0
- package/lib/index.js.map +1 -0
- package/lib/index.mjs +32 -0
- package/lib/index.mjs.map +1 -0
- package/lib/model.d.mts +144 -0
- package/lib/model.d.ts +144 -0
- package/lib/model.js +349 -0
- package/lib/model.js.map +1 -0
- package/lib/model.mjs +315 -0
- package/lib/model.mjs.map +1 -0
- package/lib/openai/dalle.d.mts +51 -0
- package/lib/openai/dalle.d.ts +51 -0
- package/lib/openai/dalle.js +43 -0
- package/lib/openai/dalle.js.map +1 -0
- package/lib/openai/dalle.mjs +21 -0
- package/lib/openai/dalle.mjs.map +1 -0
- package/lib/openai/embedder.d.mts +79 -0
- package/lib/openai/embedder.d.ts +79 -0
- package/lib/openai/embedder.js +82 -0
- package/lib/openai/embedder.js.map +1 -0
- package/lib/openai/embedder.mjs +53 -0
- package/lib/openai/embedder.mjs.map +1 -0
- package/lib/openai/gpt.d.mts +1207 -0
- package/lib/openai/gpt.d.ts +1207 -0
- package/lib/openai/gpt.js +326 -0
- package/lib/openai/gpt.js.map +1 -0
- package/lib/openai/gpt.mjs +286 -0
- package/lib/openai/gpt.mjs.map +1 -0
- package/lib/openai/index.d.mts +77 -0
- package/lib/openai/index.d.ts +77 -0
- package/lib/openai/index.js +195 -0
- package/lib/openai/index.js.map +1 -0
- package/lib/openai/index.mjs +182 -0
- package/lib/openai/index.mjs.map +1 -0
- package/lib/openai/tts.d.mts +96 -0
- package/lib/openai/tts.d.ts +96 -0
- package/lib/openai/tts.js +83 -0
- package/lib/openai/tts.js.map +1 -0
- package/lib/openai/tts.mjs +54 -0
- package/lib/openai/tts.mjs.map +1 -0
- package/lib/openai/whisper.d.mts +441 -0
- package/lib/openai/whisper.d.ts +441 -0
- package/lib/openai/whisper.js +83 -0
- package/lib/openai/whisper.js.map +1 -0
- package/lib/openai/whisper.mjs +55 -0
- package/lib/openai/whisper.mjs.map +1 -0
- package/lib/xai/grok-image.d.mts +66 -0
- package/lib/xai/grok-image.d.ts +66 -0
- package/lib/xai/grok-image.js +43 -0
- package/lib/xai/grok-image.js.map +1 -0
- package/lib/xai/grok-image.mjs +21 -0
- package/lib/xai/grok-image.mjs.map +1 -0
- package/lib/xai/grok.d.mts +192 -0
- package/lib/xai/grok.d.ts +192 -0
- package/lib/xai/grok.js +61 -0
- package/lib/xai/grok.js.map +1 -0
- package/lib/xai/grok.mjs +37 -0
- package/lib/xai/grok.mjs.map +1 -0
- package/lib/xai/index.d.mts +70 -0
- package/lib/xai/index.d.ts +70 -0
- package/lib/xai/index.js +118 -0
- package/lib/xai/index.js.map +1 -0
- package/lib/xai/index.mjs +93 -0
- package/lib/xai/index.mjs.map +1 -0
- package/package.json +93 -0
package/lib/model.d.mts
ADDED
|
@@ -0,0 +1,144 @@
|
|
|
1
|
+
import { z, Role, Part, MessageData, ToolRequestPart, GenerateResponseData, GenerateRequest, StreamingCallback, Genkit, ModelReference } from 'genkit';
|
|
2
|
+
import { ToolDefinition, GenerateResponseChunkData, ModelAction } from 'genkit/model';
|
|
3
|
+
import OpenAI from 'openai';
|
|
4
|
+
import { ChatCompletionRole, ChatCompletionTool, ChatCompletionContentPart, ChatCompletionMessageParam, ChatCompletionMessageToolCall, ChatCompletionChunk, ChatCompletion } from 'openai/resources/index.mjs';
|
|
5
|
+
|
|
6
|
+
/**
|
|
7
|
+
* Copyright 2024 The Fire Company
|
|
8
|
+
* Copyright 2024 Google LLC
|
|
9
|
+
*
|
|
10
|
+
* Licensed under the Apache License, Version 2.0 (the "License");
|
|
11
|
+
* you may not use this file except in compliance with the License.
|
|
12
|
+
* You may obtain a copy of the License at
|
|
13
|
+
*
|
|
14
|
+
* http://www.apache.org/licenses/LICENSE-2.0
|
|
15
|
+
*
|
|
16
|
+
* Unless required by applicable law or agreed to in writing, software
|
|
17
|
+
* distributed under the License is distributed on an "AS IS" BASIS,
|
|
18
|
+
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
19
|
+
* See the License for the specific language governing permissions and
|
|
20
|
+
* limitations under the License.
|
|
21
|
+
*/
|
|
22
|
+
|
|
23
|
+
declare const VisualDetailLevelSchema: z.ZodOptional<z.ZodEnum<["auto", "low", "high"]>>;
|
|
24
|
+
type VisualDetailLevel = z.infer<typeof VisualDetailLevelSchema>;
|
|
25
|
+
declare const ChatCompletionCommonConfigSchema: z.ZodObject<{
|
|
26
|
+
version: z.ZodOptional<z.ZodString>;
|
|
27
|
+
maxOutputTokens: z.ZodOptional<z.ZodNumber>;
|
|
28
|
+
topK: z.ZodOptional<z.ZodNumber>;
|
|
29
|
+
topP: z.ZodOptional<z.ZodNumber>;
|
|
30
|
+
stopSequences: z.ZodOptional<z.ZodArray<z.ZodString, "many">>;
|
|
31
|
+
} & {
|
|
32
|
+
temperature: z.ZodOptional<z.ZodNumber>;
|
|
33
|
+
frequencyPenalty: z.ZodOptional<z.ZodNumber>;
|
|
34
|
+
logProbs: z.ZodOptional<z.ZodBoolean>;
|
|
35
|
+
presencePenalty: z.ZodOptional<z.ZodNumber>;
|
|
36
|
+
topLogProbs: z.ZodOptional<z.ZodNumber>;
|
|
37
|
+
}, "passthrough", z.ZodTypeAny, z.objectOutputType<{
|
|
38
|
+
version: z.ZodOptional<z.ZodString>;
|
|
39
|
+
maxOutputTokens: z.ZodOptional<z.ZodNumber>;
|
|
40
|
+
topK: z.ZodOptional<z.ZodNumber>;
|
|
41
|
+
topP: z.ZodOptional<z.ZodNumber>;
|
|
42
|
+
stopSequences: z.ZodOptional<z.ZodArray<z.ZodString, "many">>;
|
|
43
|
+
} & {
|
|
44
|
+
temperature: z.ZodOptional<z.ZodNumber>;
|
|
45
|
+
frequencyPenalty: z.ZodOptional<z.ZodNumber>;
|
|
46
|
+
logProbs: z.ZodOptional<z.ZodBoolean>;
|
|
47
|
+
presencePenalty: z.ZodOptional<z.ZodNumber>;
|
|
48
|
+
topLogProbs: z.ZodOptional<z.ZodNumber>;
|
|
49
|
+
}, z.ZodTypeAny, "passthrough">, z.objectInputType<{
|
|
50
|
+
version: z.ZodOptional<z.ZodString>;
|
|
51
|
+
maxOutputTokens: z.ZodOptional<z.ZodNumber>;
|
|
52
|
+
topK: z.ZodOptional<z.ZodNumber>;
|
|
53
|
+
topP: z.ZodOptional<z.ZodNumber>;
|
|
54
|
+
stopSequences: z.ZodOptional<z.ZodArray<z.ZodString, "many">>;
|
|
55
|
+
} & {
|
|
56
|
+
temperature: z.ZodOptional<z.ZodNumber>;
|
|
57
|
+
frequencyPenalty: z.ZodOptional<z.ZodNumber>;
|
|
58
|
+
logProbs: z.ZodOptional<z.ZodBoolean>;
|
|
59
|
+
presencePenalty: z.ZodOptional<z.ZodNumber>;
|
|
60
|
+
topLogProbs: z.ZodOptional<z.ZodNumber>;
|
|
61
|
+
}, z.ZodTypeAny, "passthrough">>;
|
|
62
|
+
declare function toOpenAIRole(role: Role): ChatCompletionRole;
|
|
63
|
+
/**
|
|
64
|
+
* Converts a Genkit ToolDefinition to an OpenAI ChatCompletionTool object.
|
|
65
|
+
* @param tool The Genkit ToolDefinition to convert.
|
|
66
|
+
* @returns The converted OpenAI ChatCompletionTool object.
|
|
67
|
+
*/
|
|
68
|
+
declare function toOpenAITool(tool: ToolDefinition): ChatCompletionTool;
|
|
69
|
+
/**
|
|
70
|
+
* Converts a Genkit Part to the corresponding OpenAI ChatCompletionContentPart.
|
|
71
|
+
* @param part The Genkit Part to convert.
|
|
72
|
+
* @param visualDetailLevel The visual detail level to use for media parts.
|
|
73
|
+
* @returns The corresponding OpenAI ChatCompletionContentPart.
|
|
74
|
+
* @throws Error if the part contains unsupported fields for the current message role.
|
|
75
|
+
*/
|
|
76
|
+
declare function toOpenAITextAndMedia(part: Part, visualDetailLevel: VisualDetailLevel): ChatCompletionContentPart;
|
|
77
|
+
/**
|
|
78
|
+
* Converts a Genkit MessageData array to an OpenAI ChatCompletionMessageParam array.
|
|
79
|
+
* @param messages The Genkit MessageData array to convert.
|
|
80
|
+
* @param visualDetailLevel The visual detail level to use for media parts.
|
|
81
|
+
* @returns The converted OpenAI ChatCompletionMessageParam array.
|
|
82
|
+
*/
|
|
83
|
+
declare function toOpenAIMessages(messages: MessageData[], visualDetailLevel?: VisualDetailLevel): ChatCompletionMessageParam[];
|
|
84
|
+
/**
|
|
85
|
+
* Converts an OpenAI tool call to a Genkit ToolRequestPart.
|
|
86
|
+
* @param toolCall The OpenAI tool call to convert.
|
|
87
|
+
* @returns The converted Genkit ToolRequestPart.
|
|
88
|
+
*/
|
|
89
|
+
declare function fromOpenAIToolCall(toolCall: ChatCompletionMessageToolCall | ChatCompletionChunk.Choice.Delta.ToolCall, choice: ChatCompletion.Choice | ChatCompletionChunk.Choice): ToolRequestPart;
|
|
90
|
+
/**
|
|
91
|
+
* Converts an OpenAI message event to a Genkit GenerateResponseData object.
|
|
92
|
+
* @param choice The OpenAI message event to convert.
|
|
93
|
+
* @param jsonMode Whether the event is a JSON response.
|
|
94
|
+
* @returns The converted Genkit GenerateResponseData object.
|
|
95
|
+
*/
|
|
96
|
+
declare function fromOpenAIChoice(choice: ChatCompletion.Choice, jsonMode?: boolean): GenerateResponseData;
|
|
97
|
+
/**
|
|
98
|
+
* Converts an OpenAI message stream event to a Genkit GenerateResponseData
|
|
99
|
+
* object.
|
|
100
|
+
* @param choice The OpenAI message stream event to convert.
|
|
101
|
+
* @param jsonMode Whether the event is a JSON response.
|
|
102
|
+
* @returns The converted Genkit GenerateResponseData object.
|
|
103
|
+
*/
|
|
104
|
+
declare function fromOpenAIChunkChoice(choice: ChatCompletionChunk.Choice, jsonMode?: boolean): GenerateResponseData;
|
|
105
|
+
/**
|
|
106
|
+
* Converts an OpenAI request to an OpenAI API request body.
|
|
107
|
+
* @param modelName The name of the OpenAI model to use.
|
|
108
|
+
* @param request The Genkit GenerateRequest to convert.
|
|
109
|
+
* @returns The converted OpenAI API request body.
|
|
110
|
+
* @throws An error if the specified model is not supported or if an unsupported output format is requested.
|
|
111
|
+
*/
|
|
112
|
+
declare function toOpenAIRequestBody(modelName: string, request: GenerateRequest): OpenAI.Chat.Completions.ChatCompletionCreateParamsNonStreaming;
|
|
113
|
+
/**
|
|
114
|
+
* Creates the runner used by Genkit to interact with an OpenAI compatible
|
|
115
|
+
* model.
|
|
116
|
+
* @param name The name of the GPT model.
|
|
117
|
+
* @param client The OpenAI client instance.
|
|
118
|
+
* @returns The runner that Genkit will call when the model is invoked.
|
|
119
|
+
*/
|
|
120
|
+
declare function openAIModelRunner(name: string, client: OpenAI): (request: GenerateRequest, streamingCallback?: StreamingCallback<GenerateResponseChunkData>) => Promise<GenerateResponseData>;
|
|
121
|
+
/**
|
|
122
|
+
* Method to define a new Genkit Model that is compatible with Open AI
|
|
123
|
+
* Chat Completions API.
|
|
124
|
+
*
|
|
125
|
+
* These models are to be used to chat with a large language model.
|
|
126
|
+
*
|
|
127
|
+
* @param params An object containing parameters for defining the OpenAI
|
|
128
|
+
* Chat model.
|
|
129
|
+
* @param params.ai The Genkit AI instance.
|
|
130
|
+
* @param params.name The name of the model.
|
|
131
|
+
* @param params.client The OpenAI client instance.
|
|
132
|
+
* @param params.modelRef Optional reference to the model's configuration and
|
|
133
|
+
* custom options.
|
|
134
|
+
|
|
135
|
+
* @returns the created {@link ModelAction}
|
|
136
|
+
*/
|
|
137
|
+
declare function defineCompatOpenAIModel<CustomOptions extends z.ZodTypeAny = z.ZodTypeAny>(params: {
|
|
138
|
+
ai: Genkit;
|
|
139
|
+
name: string;
|
|
140
|
+
client: OpenAI;
|
|
141
|
+
modelRef?: ModelReference<CustomOptions>;
|
|
142
|
+
}): ModelAction;
|
|
143
|
+
|
|
144
|
+
export { ChatCompletionCommonConfigSchema, defineCompatOpenAIModel, fromOpenAIChoice, fromOpenAIChunkChoice, fromOpenAIToolCall, openAIModelRunner, toOpenAIMessages, toOpenAIRequestBody, toOpenAIRole, toOpenAITextAndMedia, toOpenAITool };
|
package/lib/model.d.ts
ADDED
|
@@ -0,0 +1,144 @@
|
|
|
1
|
+
import { z, Role, Part, MessageData, ToolRequestPart, GenerateResponseData, GenerateRequest, StreamingCallback, Genkit, ModelReference } from 'genkit';
|
|
2
|
+
import { ToolDefinition, GenerateResponseChunkData, ModelAction } from 'genkit/model';
|
|
3
|
+
import OpenAI from 'openai';
|
|
4
|
+
import { ChatCompletionRole, ChatCompletionTool, ChatCompletionContentPart, ChatCompletionMessageParam, ChatCompletionMessageToolCall, ChatCompletionChunk, ChatCompletion } from 'openai/resources/index.mjs';
|
|
5
|
+
|
|
6
|
+
/**
|
|
7
|
+
* Copyright 2024 The Fire Company
|
|
8
|
+
* Copyright 2024 Google LLC
|
|
9
|
+
*
|
|
10
|
+
* Licensed under the Apache License, Version 2.0 (the "License");
|
|
11
|
+
* you may not use this file except in compliance with the License.
|
|
12
|
+
* You may obtain a copy of the License at
|
|
13
|
+
*
|
|
14
|
+
* http://www.apache.org/licenses/LICENSE-2.0
|
|
15
|
+
*
|
|
16
|
+
* Unless required by applicable law or agreed to in writing, software
|
|
17
|
+
* distributed under the License is distributed on an "AS IS" BASIS,
|
|
18
|
+
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
19
|
+
* See the License for the specific language governing permissions and
|
|
20
|
+
* limitations under the License.
|
|
21
|
+
*/
|
|
22
|
+
|
|
23
|
+
declare const VisualDetailLevelSchema: z.ZodOptional<z.ZodEnum<["auto", "low", "high"]>>;
|
|
24
|
+
type VisualDetailLevel = z.infer<typeof VisualDetailLevelSchema>;
|
|
25
|
+
declare const ChatCompletionCommonConfigSchema: z.ZodObject<{
|
|
26
|
+
version: z.ZodOptional<z.ZodString>;
|
|
27
|
+
maxOutputTokens: z.ZodOptional<z.ZodNumber>;
|
|
28
|
+
topK: z.ZodOptional<z.ZodNumber>;
|
|
29
|
+
topP: z.ZodOptional<z.ZodNumber>;
|
|
30
|
+
stopSequences: z.ZodOptional<z.ZodArray<z.ZodString, "many">>;
|
|
31
|
+
} & {
|
|
32
|
+
temperature: z.ZodOptional<z.ZodNumber>;
|
|
33
|
+
frequencyPenalty: z.ZodOptional<z.ZodNumber>;
|
|
34
|
+
logProbs: z.ZodOptional<z.ZodBoolean>;
|
|
35
|
+
presencePenalty: z.ZodOptional<z.ZodNumber>;
|
|
36
|
+
topLogProbs: z.ZodOptional<z.ZodNumber>;
|
|
37
|
+
}, "passthrough", z.ZodTypeAny, z.objectOutputType<{
|
|
38
|
+
version: z.ZodOptional<z.ZodString>;
|
|
39
|
+
maxOutputTokens: z.ZodOptional<z.ZodNumber>;
|
|
40
|
+
topK: z.ZodOptional<z.ZodNumber>;
|
|
41
|
+
topP: z.ZodOptional<z.ZodNumber>;
|
|
42
|
+
stopSequences: z.ZodOptional<z.ZodArray<z.ZodString, "many">>;
|
|
43
|
+
} & {
|
|
44
|
+
temperature: z.ZodOptional<z.ZodNumber>;
|
|
45
|
+
frequencyPenalty: z.ZodOptional<z.ZodNumber>;
|
|
46
|
+
logProbs: z.ZodOptional<z.ZodBoolean>;
|
|
47
|
+
presencePenalty: z.ZodOptional<z.ZodNumber>;
|
|
48
|
+
topLogProbs: z.ZodOptional<z.ZodNumber>;
|
|
49
|
+
}, z.ZodTypeAny, "passthrough">, z.objectInputType<{
|
|
50
|
+
version: z.ZodOptional<z.ZodString>;
|
|
51
|
+
maxOutputTokens: z.ZodOptional<z.ZodNumber>;
|
|
52
|
+
topK: z.ZodOptional<z.ZodNumber>;
|
|
53
|
+
topP: z.ZodOptional<z.ZodNumber>;
|
|
54
|
+
stopSequences: z.ZodOptional<z.ZodArray<z.ZodString, "many">>;
|
|
55
|
+
} & {
|
|
56
|
+
temperature: z.ZodOptional<z.ZodNumber>;
|
|
57
|
+
frequencyPenalty: z.ZodOptional<z.ZodNumber>;
|
|
58
|
+
logProbs: z.ZodOptional<z.ZodBoolean>;
|
|
59
|
+
presencePenalty: z.ZodOptional<z.ZodNumber>;
|
|
60
|
+
topLogProbs: z.ZodOptional<z.ZodNumber>;
|
|
61
|
+
}, z.ZodTypeAny, "passthrough">>;
|
|
62
|
+
declare function toOpenAIRole(role: Role): ChatCompletionRole;
|
|
63
|
+
/**
|
|
64
|
+
* Converts a Genkit ToolDefinition to an OpenAI ChatCompletionTool object.
|
|
65
|
+
* @param tool The Genkit ToolDefinition to convert.
|
|
66
|
+
* @returns The converted OpenAI ChatCompletionTool object.
|
|
67
|
+
*/
|
|
68
|
+
declare function toOpenAITool(tool: ToolDefinition): ChatCompletionTool;
|
|
69
|
+
/**
|
|
70
|
+
* Converts a Genkit Part to the corresponding OpenAI ChatCompletionContentPart.
|
|
71
|
+
* @param part The Genkit Part to convert.
|
|
72
|
+
* @param visualDetailLevel The visual detail level to use for media parts.
|
|
73
|
+
* @returns The corresponding OpenAI ChatCompletionContentPart.
|
|
74
|
+
* @throws Error if the part contains unsupported fields for the current message role.
|
|
75
|
+
*/
|
|
76
|
+
declare function toOpenAITextAndMedia(part: Part, visualDetailLevel: VisualDetailLevel): ChatCompletionContentPart;
|
|
77
|
+
/**
|
|
78
|
+
* Converts a Genkit MessageData array to an OpenAI ChatCompletionMessageParam array.
|
|
79
|
+
* @param messages The Genkit MessageData array to convert.
|
|
80
|
+
* @param visualDetailLevel The visual detail level to use for media parts.
|
|
81
|
+
* @returns The converted OpenAI ChatCompletionMessageParam array.
|
|
82
|
+
*/
|
|
83
|
+
declare function toOpenAIMessages(messages: MessageData[], visualDetailLevel?: VisualDetailLevel): ChatCompletionMessageParam[];
|
|
84
|
+
/**
|
|
85
|
+
* Converts an OpenAI tool call to a Genkit ToolRequestPart.
|
|
86
|
+
* @param toolCall The OpenAI tool call to convert.
|
|
87
|
+
* @returns The converted Genkit ToolRequestPart.
|
|
88
|
+
*/
|
|
89
|
+
declare function fromOpenAIToolCall(toolCall: ChatCompletionMessageToolCall | ChatCompletionChunk.Choice.Delta.ToolCall, choice: ChatCompletion.Choice | ChatCompletionChunk.Choice): ToolRequestPart;
|
|
90
|
+
/**
|
|
91
|
+
* Converts an OpenAI message event to a Genkit GenerateResponseData object.
|
|
92
|
+
* @param choice The OpenAI message event to convert.
|
|
93
|
+
* @param jsonMode Whether the event is a JSON response.
|
|
94
|
+
* @returns The converted Genkit GenerateResponseData object.
|
|
95
|
+
*/
|
|
96
|
+
declare function fromOpenAIChoice(choice: ChatCompletion.Choice, jsonMode?: boolean): GenerateResponseData;
|
|
97
|
+
/**
|
|
98
|
+
* Converts an OpenAI message stream event to a Genkit GenerateResponseData
|
|
99
|
+
* object.
|
|
100
|
+
* @param choice The OpenAI message stream event to convert.
|
|
101
|
+
* @param jsonMode Whether the event is a JSON response.
|
|
102
|
+
* @returns The converted Genkit GenerateResponseData object.
|
|
103
|
+
*/
|
|
104
|
+
declare function fromOpenAIChunkChoice(choice: ChatCompletionChunk.Choice, jsonMode?: boolean): GenerateResponseData;
|
|
105
|
+
/**
|
|
106
|
+
* Converts an OpenAI request to an OpenAI API request body.
|
|
107
|
+
* @param modelName The name of the OpenAI model to use.
|
|
108
|
+
* @param request The Genkit GenerateRequest to convert.
|
|
109
|
+
* @returns The converted OpenAI API request body.
|
|
110
|
+
* @throws An error if the specified model is not supported or if an unsupported output format is requested.
|
|
111
|
+
*/
|
|
112
|
+
declare function toOpenAIRequestBody(modelName: string, request: GenerateRequest): OpenAI.Chat.Completions.ChatCompletionCreateParamsNonStreaming;
|
|
113
|
+
/**
|
|
114
|
+
* Creates the runner used by Genkit to interact with an OpenAI compatible
|
|
115
|
+
* model.
|
|
116
|
+
* @param name The name of the GPT model.
|
|
117
|
+
* @param client The OpenAI client instance.
|
|
118
|
+
* @returns The runner that Genkit will call when the model is invoked.
|
|
119
|
+
*/
|
|
120
|
+
declare function openAIModelRunner(name: string, client: OpenAI): (request: GenerateRequest, streamingCallback?: StreamingCallback<GenerateResponseChunkData>) => Promise<GenerateResponseData>;
|
|
121
|
+
/**
|
|
122
|
+
* Method to define a new Genkit Model that is compatible with Open AI
|
|
123
|
+
* Chat Completions API.
|
|
124
|
+
*
|
|
125
|
+
* These models are to be used to chat with a large language model.
|
|
126
|
+
*
|
|
127
|
+
* @param params An object containing parameters for defining the OpenAI
|
|
128
|
+
* Chat model.
|
|
129
|
+
* @param params.ai The Genkit AI instance.
|
|
130
|
+
* @param params.name The name of the model.
|
|
131
|
+
* @param params.client The OpenAI client instance.
|
|
132
|
+
* @param params.modelRef Optional reference to the model's configuration and
|
|
133
|
+
* custom options.
|
|
134
|
+
|
|
135
|
+
* @returns the created {@link ModelAction}
|
|
136
|
+
*/
|
|
137
|
+
declare function defineCompatOpenAIModel<CustomOptions extends z.ZodTypeAny = z.ZodTypeAny>(params: {
|
|
138
|
+
ai: Genkit;
|
|
139
|
+
name: string;
|
|
140
|
+
client: OpenAI;
|
|
141
|
+
modelRef?: ModelReference<CustomOptions>;
|
|
142
|
+
}): ModelAction;
|
|
143
|
+
|
|
144
|
+
export { ChatCompletionCommonConfigSchema, defineCompatOpenAIModel, fromOpenAIChoice, fromOpenAIChunkChoice, fromOpenAIToolCall, openAIModelRunner, toOpenAIMessages, toOpenAIRequestBody, toOpenAIRole, toOpenAITextAndMedia, toOpenAITool };
|
package/lib/model.js
ADDED
|
@@ -0,0 +1,349 @@
|
|
|
1
|
+
"use strict";
|
|
2
|
+
var __defProp = Object.defineProperty;
|
|
3
|
+
var __getOwnPropDesc = Object.getOwnPropertyDescriptor;
|
|
4
|
+
var __getOwnPropNames = Object.getOwnPropertyNames;
|
|
5
|
+
var __hasOwnProp = Object.prototype.hasOwnProperty;
|
|
6
|
+
var __export = (target, all) => {
|
|
7
|
+
for (var name in all)
|
|
8
|
+
__defProp(target, name, { get: all[name], enumerable: true });
|
|
9
|
+
};
|
|
10
|
+
var __copyProps = (to, from, except, desc) => {
|
|
11
|
+
if (from && typeof from === "object" || typeof from === "function") {
|
|
12
|
+
for (let key of __getOwnPropNames(from))
|
|
13
|
+
if (!__hasOwnProp.call(to, key) && key !== except)
|
|
14
|
+
__defProp(to, key, { get: () => from[key], enumerable: !(desc = __getOwnPropDesc(from, key)) || desc.enumerable });
|
|
15
|
+
}
|
|
16
|
+
return to;
|
|
17
|
+
};
|
|
18
|
+
var __toCommonJS = (mod) => __copyProps(__defProp({}, "__esModule", { value: true }), mod);
|
|
19
|
+
var model_exports = {};
|
|
20
|
+
__export(model_exports, {
|
|
21
|
+
ChatCompletionCommonConfigSchema: () => ChatCompletionCommonConfigSchema,
|
|
22
|
+
defineCompatOpenAIModel: () => defineCompatOpenAIModel,
|
|
23
|
+
fromOpenAIChoice: () => fromOpenAIChoice,
|
|
24
|
+
fromOpenAIChunkChoice: () => fromOpenAIChunkChoice,
|
|
25
|
+
fromOpenAIToolCall: () => fromOpenAIToolCall,
|
|
26
|
+
openAIModelRunner: () => openAIModelRunner,
|
|
27
|
+
toOpenAIMessages: () => toOpenAIMessages,
|
|
28
|
+
toOpenAIRequestBody: () => toOpenAIRequestBody,
|
|
29
|
+
toOpenAIRole: () => toOpenAIRole,
|
|
30
|
+
toOpenAITextAndMedia: () => toOpenAITextAndMedia,
|
|
31
|
+
toOpenAITool: () => toOpenAITool
|
|
32
|
+
});
|
|
33
|
+
module.exports = __toCommonJS(model_exports);
|
|
34
|
+
var import_genkit = require("genkit");
|
|
35
|
+
const VisualDetailLevelSchema = import_genkit.z.enum(["auto", "low", "high"]).optional();
|
|
36
|
+
const ChatCompletionCommonConfigSchema = import_genkit.GenerationCommonConfigSchema.extend({
|
|
37
|
+
temperature: import_genkit.z.number().min(0).max(2).optional(),
|
|
38
|
+
frequencyPenalty: import_genkit.z.number().min(-2).max(2).optional(),
|
|
39
|
+
logProbs: import_genkit.z.boolean().optional(),
|
|
40
|
+
presencePenalty: import_genkit.z.number().min(-2).max(2).optional(),
|
|
41
|
+
topLogProbs: import_genkit.z.number().int().min(0).max(20).optional()
|
|
42
|
+
});
|
|
43
|
+
function toOpenAIRole(role) {
|
|
44
|
+
switch (role) {
|
|
45
|
+
case "user":
|
|
46
|
+
return "user";
|
|
47
|
+
case "model":
|
|
48
|
+
return "assistant";
|
|
49
|
+
case "system":
|
|
50
|
+
return "system";
|
|
51
|
+
case "tool":
|
|
52
|
+
return "tool";
|
|
53
|
+
default:
|
|
54
|
+
throw new Error(`role ${role} doesn't map to an OpenAI role.`);
|
|
55
|
+
}
|
|
56
|
+
}
|
|
57
|
+
function toOpenAITool(tool) {
|
|
58
|
+
return {
|
|
59
|
+
type: "function",
|
|
60
|
+
function: {
|
|
61
|
+
name: tool.name,
|
|
62
|
+
parameters: tool.inputSchema !== null ? tool.inputSchema : void 0
|
|
63
|
+
}
|
|
64
|
+
};
|
|
65
|
+
}
|
|
66
|
+
function toOpenAITextAndMedia(part, visualDetailLevel) {
|
|
67
|
+
if (part.text) {
|
|
68
|
+
return {
|
|
69
|
+
type: "text",
|
|
70
|
+
text: part.text
|
|
71
|
+
};
|
|
72
|
+
} else if (part.media) {
|
|
73
|
+
return {
|
|
74
|
+
type: "image_url",
|
|
75
|
+
image_url: {
|
|
76
|
+
url: part.media.url,
|
|
77
|
+
detail: visualDetailLevel
|
|
78
|
+
}
|
|
79
|
+
};
|
|
80
|
+
}
|
|
81
|
+
throw Error(
|
|
82
|
+
`Unsupported genkit part fields encountered for current message role: ${JSON.stringify(part)}.`
|
|
83
|
+
);
|
|
84
|
+
}
|
|
85
|
+
function toOpenAIMessages(messages, visualDetailLevel = "auto") {
|
|
86
|
+
const apiMessages = [];
|
|
87
|
+
for (const message of messages) {
|
|
88
|
+
const msg = new import_genkit.Message(message);
|
|
89
|
+
const role = toOpenAIRole(message.role);
|
|
90
|
+
switch (role) {
|
|
91
|
+
case "user":
|
|
92
|
+
const content = msg.content.map(
|
|
93
|
+
(part) => toOpenAITextAndMedia(part, visualDetailLevel)
|
|
94
|
+
);
|
|
95
|
+
const onlyTextContent = content.some((item) => item.type !== "text");
|
|
96
|
+
if (!onlyTextContent) {
|
|
97
|
+
content.forEach((item) => {
|
|
98
|
+
if (item.type === "text") {
|
|
99
|
+
apiMessages.push({
|
|
100
|
+
role,
|
|
101
|
+
content: item.text
|
|
102
|
+
});
|
|
103
|
+
}
|
|
104
|
+
});
|
|
105
|
+
} else {
|
|
106
|
+
apiMessages.push({
|
|
107
|
+
role,
|
|
108
|
+
content
|
|
109
|
+
});
|
|
110
|
+
}
|
|
111
|
+
break;
|
|
112
|
+
case "system":
|
|
113
|
+
apiMessages.push({
|
|
114
|
+
role,
|
|
115
|
+
content: msg.text
|
|
116
|
+
});
|
|
117
|
+
break;
|
|
118
|
+
case "assistant": {
|
|
119
|
+
const toolCalls = msg.content.filter(
|
|
120
|
+
(part) => Boolean(part.toolRequest)
|
|
121
|
+
).map((part) => ({
|
|
122
|
+
id: part.toolRequest.ref ?? "",
|
|
123
|
+
type: "function",
|
|
124
|
+
function: {
|
|
125
|
+
name: part.toolRequest.name,
|
|
126
|
+
arguments: JSON.stringify(part.toolRequest.input)
|
|
127
|
+
}
|
|
128
|
+
}));
|
|
129
|
+
if (toolCalls.length > 0) {
|
|
130
|
+
apiMessages.push({
|
|
131
|
+
role,
|
|
132
|
+
tool_calls: toolCalls
|
|
133
|
+
});
|
|
134
|
+
} else {
|
|
135
|
+
apiMessages.push({
|
|
136
|
+
role,
|
|
137
|
+
content: msg.text
|
|
138
|
+
});
|
|
139
|
+
}
|
|
140
|
+
break;
|
|
141
|
+
}
|
|
142
|
+
case "tool": {
|
|
143
|
+
const toolResponseParts = msg.toolResponseParts();
|
|
144
|
+
toolResponseParts.map((part) => {
|
|
145
|
+
apiMessages.push({
|
|
146
|
+
role,
|
|
147
|
+
tool_call_id: part.toolResponse.ref ?? "",
|
|
148
|
+
content: typeof part.toolResponse.output === "string" ? part.toolResponse.output : JSON.stringify(part.toolResponse.output)
|
|
149
|
+
});
|
|
150
|
+
});
|
|
151
|
+
break;
|
|
152
|
+
}
|
|
153
|
+
}
|
|
154
|
+
}
|
|
155
|
+
return apiMessages;
|
|
156
|
+
}
|
|
157
|
+
const finishReasonMap = {
|
|
158
|
+
length: "length",
|
|
159
|
+
stop: "stop",
|
|
160
|
+
tool_calls: "stop",
|
|
161
|
+
content_filter: "blocked"
|
|
162
|
+
};
|
|
163
|
+
function fromOpenAIToolCall(toolCall, choice) {
|
|
164
|
+
if (!toolCall.function) {
|
|
165
|
+
throw Error(
|
|
166
|
+
`Unexpected openAI chunk choice. tool_calls was provided but one or more tool_calls is missing.`
|
|
167
|
+
);
|
|
168
|
+
}
|
|
169
|
+
const f = toolCall.function;
|
|
170
|
+
if (choice.finish_reason === "tool_calls") {
|
|
171
|
+
return {
|
|
172
|
+
toolRequest: {
|
|
173
|
+
name: f.name,
|
|
174
|
+
ref: toolCall.id,
|
|
175
|
+
input: f.arguments ? JSON.parse(f.arguments) : f.arguments
|
|
176
|
+
}
|
|
177
|
+
};
|
|
178
|
+
} else {
|
|
179
|
+
return {
|
|
180
|
+
toolRequest: {
|
|
181
|
+
name: f.name,
|
|
182
|
+
ref: toolCall.id,
|
|
183
|
+
input: ""
|
|
184
|
+
}
|
|
185
|
+
};
|
|
186
|
+
}
|
|
187
|
+
}
|
|
188
|
+
function fromOpenAIChoice(choice, jsonMode = false) {
|
|
189
|
+
const toolRequestParts = choice.message.tool_calls?.map(
|
|
190
|
+
(toolCall) => fromOpenAIToolCall(toolCall, choice)
|
|
191
|
+
);
|
|
192
|
+
return {
|
|
193
|
+
finishReason: finishReasonMap[choice.finish_reason] || "other",
|
|
194
|
+
message: {
|
|
195
|
+
role: "model",
|
|
196
|
+
content: toolRequestParts ? (
|
|
197
|
+
// Note: Not sure why I have to cast here exactly.
|
|
198
|
+
// Otherwise it thinks toolRequest must be 'undefined' if provided
|
|
199
|
+
toolRequestParts
|
|
200
|
+
) : [
|
|
201
|
+
jsonMode ? { data: JSON.parse(choice.message.content) } : { text: choice.message.content }
|
|
202
|
+
]
|
|
203
|
+
}
|
|
204
|
+
};
|
|
205
|
+
}
|
|
206
|
+
function fromOpenAIChunkChoice(choice, jsonMode = false) {
|
|
207
|
+
const toolRequestParts = choice.delta.tool_calls?.map(
|
|
208
|
+
(toolCall) => fromOpenAIToolCall(toolCall, choice)
|
|
209
|
+
);
|
|
210
|
+
return {
|
|
211
|
+
finishReason: choice.finish_reason ? finishReasonMap[choice.finish_reason] || "other" : "unknown",
|
|
212
|
+
message: {
|
|
213
|
+
role: "model",
|
|
214
|
+
content: toolRequestParts ? (
|
|
215
|
+
// Note: Not sure why I have to cast here exactly.
|
|
216
|
+
// Otherwise it thinks toolRequest must be 'undefined' if provided
|
|
217
|
+
toolRequestParts
|
|
218
|
+
) : [
|
|
219
|
+
jsonMode ? { data: JSON.parse(choice.delta.content) } : { text: choice.delta.content }
|
|
220
|
+
]
|
|
221
|
+
}
|
|
222
|
+
};
|
|
223
|
+
}
|
|
224
|
+
function toOpenAIRequestBody(modelName, request) {
|
|
225
|
+
const messages = toOpenAIMessages(
|
|
226
|
+
request.messages,
|
|
227
|
+
request.config?.visualDetailLevel
|
|
228
|
+
);
|
|
229
|
+
const {
|
|
230
|
+
temperature,
|
|
231
|
+
maxOutputTokens,
|
|
232
|
+
// unused
|
|
233
|
+
topK,
|
|
234
|
+
// unused
|
|
235
|
+
topP: top_p,
|
|
236
|
+
frequencyPenalty: frequency_penalty,
|
|
237
|
+
logProbs: logprobs,
|
|
238
|
+
presencePenalty: presence_penalty,
|
|
239
|
+
topLogProbs: top_logprobs,
|
|
240
|
+
stopSequences: stop,
|
|
241
|
+
version: modelVersion,
|
|
242
|
+
tools: toolsFromConfig,
|
|
243
|
+
...restOfConfig
|
|
244
|
+
} = request.config ?? {};
|
|
245
|
+
const tools = request.tools?.map(toOpenAITool) ?? [];
|
|
246
|
+
if (toolsFromConfig) {
|
|
247
|
+
tools.push(...toolsFromConfig);
|
|
248
|
+
}
|
|
249
|
+
const body = {
|
|
250
|
+
model: modelVersion ?? modelName,
|
|
251
|
+
messages,
|
|
252
|
+
tools: tools.length > 0 ? tools : void 0,
|
|
253
|
+
temperature,
|
|
254
|
+
top_p,
|
|
255
|
+
stop,
|
|
256
|
+
frequency_penalty,
|
|
257
|
+
presence_penalty,
|
|
258
|
+
top_logprobs,
|
|
259
|
+
logprobs,
|
|
260
|
+
...restOfConfig
|
|
261
|
+
// passthrough for other config
|
|
262
|
+
};
|
|
263
|
+
const response_format = request.output?.format;
|
|
264
|
+
if (response_format === "json") {
|
|
265
|
+
body.response_format = {
|
|
266
|
+
type: "json_object"
|
|
267
|
+
};
|
|
268
|
+
} else if (response_format === "text") {
|
|
269
|
+
body.response_format = {
|
|
270
|
+
type: "text"
|
|
271
|
+
};
|
|
272
|
+
}
|
|
273
|
+
for (const key in body) {
|
|
274
|
+
if (!body[key] || Array.isArray(body[key]) && !body[key].length)
|
|
275
|
+
delete body[key];
|
|
276
|
+
}
|
|
277
|
+
return body;
|
|
278
|
+
}
|
|
279
|
+
function openAIModelRunner(name, client) {
|
|
280
|
+
return async (request, streamingCallback) => {
|
|
281
|
+
let response;
|
|
282
|
+
const body = toOpenAIRequestBody(name, request);
|
|
283
|
+
if (streamingCallback) {
|
|
284
|
+
const stream = client.beta.chat.completions.stream({
|
|
285
|
+
...body,
|
|
286
|
+
stream: true,
|
|
287
|
+
stream_options: {
|
|
288
|
+
include_usage: true
|
|
289
|
+
}
|
|
290
|
+
});
|
|
291
|
+
for await (const chunk of stream) {
|
|
292
|
+
chunk.choices?.forEach((chunk2) => {
|
|
293
|
+
const c = fromOpenAIChunkChoice(chunk2);
|
|
294
|
+
streamingCallback({
|
|
295
|
+
index: chunk2.index,
|
|
296
|
+
content: c.message?.content ?? []
|
|
297
|
+
});
|
|
298
|
+
});
|
|
299
|
+
}
|
|
300
|
+
response = await stream.finalChatCompletion();
|
|
301
|
+
} else {
|
|
302
|
+
response = await client.chat.completions.create(body);
|
|
303
|
+
}
|
|
304
|
+
const standardResponse = {
|
|
305
|
+
usage: {
|
|
306
|
+
inputTokens: response.usage?.prompt_tokens,
|
|
307
|
+
outputTokens: response.usage?.completion_tokens,
|
|
308
|
+
totalTokens: response.usage?.total_tokens
|
|
309
|
+
},
|
|
310
|
+
raw: response
|
|
311
|
+
};
|
|
312
|
+
if (response.choices.length === 0) {
|
|
313
|
+
return standardResponse;
|
|
314
|
+
} else {
|
|
315
|
+
const choice = response.choices[0];
|
|
316
|
+
return {
|
|
317
|
+
...fromOpenAIChoice(choice, request.output?.format === "json"),
|
|
318
|
+
...standardResponse
|
|
319
|
+
};
|
|
320
|
+
}
|
|
321
|
+
};
|
|
322
|
+
}
|
|
323
|
+
function defineCompatOpenAIModel(params) {
|
|
324
|
+
const { ai, name, client, modelRef } = params;
|
|
325
|
+
const model = name.split("/").pop();
|
|
326
|
+
return ai.defineModel(
|
|
327
|
+
{
|
|
328
|
+
name,
|
|
329
|
+
...modelRef?.info,
|
|
330
|
+
configSchema: modelRef?.configSchema
|
|
331
|
+
},
|
|
332
|
+
openAIModelRunner(model, client)
|
|
333
|
+
);
|
|
334
|
+
}
|
|
335
|
+
// Annotate the CommonJS export names for ESM import in node:
|
|
336
|
+
0 && (module.exports = {
|
|
337
|
+
ChatCompletionCommonConfigSchema,
|
|
338
|
+
defineCompatOpenAIModel,
|
|
339
|
+
fromOpenAIChoice,
|
|
340
|
+
fromOpenAIChunkChoice,
|
|
341
|
+
fromOpenAIToolCall,
|
|
342
|
+
openAIModelRunner,
|
|
343
|
+
toOpenAIMessages,
|
|
344
|
+
toOpenAIRequestBody,
|
|
345
|
+
toOpenAIRole,
|
|
346
|
+
toOpenAITextAndMedia,
|
|
347
|
+
toOpenAITool
|
|
348
|
+
});
|
|
349
|
+
//# sourceMappingURL=model.js.map
|
package/lib/model.js.map
ADDED
|
@@ -0,0 +1 @@
|
|
|
1
|
+
{"version":3,"sources":["../src/model.ts"],"sourcesContent":["/**\n * Copyright 2024 The Fire Company\n * Copyright 2024 Google LLC\n *\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n */\n\nimport type {\n GenerateRequest,\n GenerateResponseData,\n Genkit,\n MessageData,\n ModelReference,\n Part,\n Role,\n StreamingCallback,\n ToolRequestPart,\n} from 'genkit';\nimport { GenerationCommonConfigSchema, Message, z } from 'genkit';\nimport type {\n GenerateResponseChunkData,\n ModelAction,\n ToolDefinition,\n} from 'genkit/model';\nimport type OpenAI from 'openai';\nimport type {\n ChatCompletion,\n ChatCompletionChunk,\n ChatCompletionContentPart,\n ChatCompletionCreateParamsNonStreaming,\n ChatCompletionMessageParam,\n ChatCompletionMessageToolCall,\n ChatCompletionRole,\n ChatCompletionTool,\n CompletionChoice,\n} from 'openai/resources/index.mjs';\n\nconst VisualDetailLevelSchema = z.enum(['auto', 'low', 'high']).optional();\n\ntype VisualDetailLevel = z.infer<typeof VisualDetailLevelSchema>;\n\nexport const ChatCompletionCommonConfigSchema =\n GenerationCommonConfigSchema.extend({\n temperature: z.number().min(0).max(2).optional(),\n frequencyPenalty: z.number().min(-2).max(2).optional(),\n logProbs: z.boolean().optional(),\n presencePenalty: z.number().min(-2).max(2).optional(),\n topLogProbs: z.number().int().min(0).max(20).optional(),\n });\n\nexport function toOpenAIRole(role: Role): ChatCompletionRole {\n switch (role) {\n case 'user':\n return 'user';\n case 'model':\n return 'assistant';\n case 'system':\n return 'system';\n case 'tool':\n return 'tool';\n default:\n throw new Error(`role ${role} doesn't map to an OpenAI role.`);\n }\n}\n\n/**\n * Converts a Genkit ToolDefinition to an OpenAI ChatCompletionTool object.\n * @param tool The Genkit ToolDefinition to convert.\n * @returns The converted OpenAI ChatCompletionTool object.\n */\nexport function toOpenAITool(tool: ToolDefinition): ChatCompletionTool {\n return {\n type: 'function',\n function: {\n name: tool.name,\n parameters: tool.inputSchema !== null ? tool.inputSchema : undefined,\n },\n };\n}\n\n/**\n * Converts a Genkit Part to the corresponding OpenAI ChatCompletionContentPart.\n * @param part The Genkit Part to convert.\n * @param visualDetailLevel The visual detail level to use for media parts.\n * @returns The corresponding OpenAI ChatCompletionContentPart.\n * @throws Error if the part contains unsupported fields for the current message role.\n */\nexport function toOpenAITextAndMedia(\n part: Part,\n visualDetailLevel: VisualDetailLevel\n): ChatCompletionContentPart {\n if (part.text) {\n return {\n type: 'text',\n text: part.text,\n };\n } else if (part.media) {\n return {\n type: 'image_url',\n image_url: {\n url: part.media.url,\n detail: visualDetailLevel,\n },\n };\n }\n throw Error(\n `Unsupported genkit part fields encountered for current message role: ${JSON.stringify(part)}.`\n );\n}\n\n/**\n * Converts a Genkit MessageData array to an OpenAI ChatCompletionMessageParam array.\n * @param messages The Genkit MessageData array to convert.\n * @param visualDetailLevel The visual detail level to use for media parts.\n * @returns The converted OpenAI ChatCompletionMessageParam array.\n */\nexport function toOpenAIMessages(\n messages: MessageData[],\n visualDetailLevel: VisualDetailLevel = 'auto'\n): ChatCompletionMessageParam[] {\n const apiMessages: ChatCompletionMessageParam[] = [];\n for (const message of messages) {\n const msg = new Message(message);\n const role = toOpenAIRole(message.role);\n switch (role) {\n case 'user':\n const content = msg.content.map((part) =>\n toOpenAITextAndMedia(part, visualDetailLevel)\n );\n // Check if we have only text content\n const onlyTextContent = content.some((item) => item.type !== 'text');\n\n // If all items are strings, just add them as text\n if (!onlyTextContent) {\n content.forEach((item) => {\n if (item.type === 'text') {\n apiMessages.push({\n role: role,\n content: item.text,\n });\n }\n });\n } else {\n apiMessages.push({\n role: role,\n content: content,\n });\n }\n break;\n case 'system':\n apiMessages.push({\n role: role,\n content: msg.text,\n });\n break;\n case 'assistant': {\n const toolCalls: ChatCompletionMessageToolCall[] = msg.content\n .filter(\n (\n part\n ): part is Part & {\n toolRequest: NonNullable<Part['toolRequest']>;\n } => Boolean(part.toolRequest)\n )\n .map((part) => ({\n id: part.toolRequest.ref ?? '',\n type: 'function',\n function: {\n name: part.toolRequest.name,\n arguments: JSON.stringify(part.toolRequest.input),\n },\n }));\n if (toolCalls.length > 0) {\n apiMessages.push({\n role: role,\n tool_calls: toolCalls,\n });\n } else {\n apiMessages.push({\n role: role,\n content: msg.text,\n });\n }\n break;\n }\n case 'tool': {\n const toolResponseParts = msg.toolResponseParts();\n toolResponseParts.map((part) => {\n apiMessages.push({\n role: role,\n tool_call_id: part.toolResponse.ref ?? '',\n content:\n typeof part.toolResponse.output === 'string'\n ? part.toolResponse.output\n : JSON.stringify(part.toolResponse.output),\n });\n });\n break;\n }\n }\n }\n return apiMessages;\n}\n\nconst finishReasonMap: Record<\n // OpenAI Node SDK doesn't support tool_call in the enum, but it is returned from the API\n CompletionChoice['finish_reason'] | 'tool_calls',\n GenerateResponseData['finishReason']\n> = {\n length: 'length',\n stop: 'stop',\n tool_calls: 'stop',\n content_filter: 'blocked',\n};\n\n/**\n * Converts an OpenAI tool call to a Genkit ToolRequestPart.\n * @param toolCall The OpenAI tool call to convert.\n * @returns The converted Genkit ToolRequestPart.\n */\nexport function fromOpenAIToolCall(\n toolCall:\n | ChatCompletionMessageToolCall\n | ChatCompletionChunk.Choice.Delta.ToolCall,\n choice: ChatCompletion.Choice | ChatCompletionChunk.Choice\n): ToolRequestPart {\n if (!toolCall.function) {\n throw Error(\n `Unexpected openAI chunk choice. tool_calls was provided but one or more tool_calls is missing.`\n );\n }\n const f = toolCall.function;\n\n // Only parse arugments when it is a JSON object and the finish reason is tool_calls to avoid parsing errors\n if (choice.finish_reason === 'tool_calls') {\n return {\n toolRequest: {\n name: f.name!,\n ref: toolCall.id,\n input: f.arguments ? JSON.parse(f.arguments) : f.arguments,\n },\n };\n } else {\n return {\n toolRequest: {\n name: f.name!,\n ref: toolCall.id,\n input: '',\n },\n };\n }\n}\n\n/**\n * Converts an OpenAI message event to a Genkit GenerateResponseData object.\n * @param choice The OpenAI message event to convert.\n * @param jsonMode Whether the event is a JSON response.\n * @returns The converted Genkit GenerateResponseData object.\n */\nexport function fromOpenAIChoice(\n choice: ChatCompletion.Choice,\n jsonMode = false\n): GenerateResponseData {\n const toolRequestParts = choice.message.tool_calls?.map((toolCall) =>\n fromOpenAIToolCall(toolCall, choice)\n );\n return {\n finishReason: finishReasonMap[choice.finish_reason] || 'other',\n message: {\n role: 'model',\n content: toolRequestParts\n ? // Note: Not sure why I have to cast here exactly.\n // Otherwise it thinks toolRequest must be 'undefined' if provided\n (toolRequestParts as ToolRequestPart[])\n : [\n jsonMode\n ? { data: JSON.parse(choice.message.content!) }\n : { text: choice.message.content! },\n ],\n },\n };\n}\n\n/**\n * Converts an OpenAI message stream event to a Genkit GenerateResponseData\n * object.\n * @param choice The OpenAI message stream event to convert.\n * @param jsonMode Whether the event is a JSON response.\n * @returns The converted Genkit GenerateResponseData object.\n */\nexport function fromOpenAIChunkChoice(\n choice: ChatCompletionChunk.Choice,\n jsonMode = false\n): GenerateResponseData {\n const toolRequestParts = choice.delta.tool_calls?.map((toolCall) =>\n fromOpenAIToolCall(toolCall, choice)\n );\n return {\n finishReason: choice.finish_reason\n ? finishReasonMap[choice.finish_reason] || 'other'\n : 'unknown',\n message: {\n role: 'model',\n content: toolRequestParts\n ? // Note: Not sure why I have to cast here exactly.\n // Otherwise it thinks toolRequest must be 'undefined' if provided\n (toolRequestParts as ToolRequestPart[])\n : [\n jsonMode\n ? { data: JSON.parse(choice.delta.content!) }\n : { text: choice.delta.content! },\n ],\n },\n };\n}\n\n/**\n * Converts an OpenAI request to an OpenAI API request body.\n * @param modelName The name of the OpenAI model to use.\n * @param request The Genkit GenerateRequest to convert.\n * @returns The converted OpenAI API request body.\n * @throws An error if the specified model is not supported or if an unsupported output format is requested.\n */\nexport function toOpenAIRequestBody(\n modelName: string,\n request: GenerateRequest\n) {\n const messages = toOpenAIMessages(\n request.messages,\n request.config?.visualDetailLevel\n );\n const {\n temperature,\n maxOutputTokens, // unused\n topK, // unused\n topP: top_p,\n frequencyPenalty: frequency_penalty,\n logProbs: logprobs,\n presencePenalty: presence_penalty,\n topLogProbs: top_logprobs,\n stopSequences: stop,\n version: modelVersion,\n tools: toolsFromConfig,\n ...restOfConfig\n } = request.config ?? {};\n\n const tools: ChatCompletionTool[] = request.tools?.map(toOpenAITool) ?? [];\n if (toolsFromConfig) {\n tools.push(...(toolsFromConfig as any[]));\n }\n const body = {\n model: modelVersion ?? modelName,\n messages,\n tools: tools.length > 0 ? tools : undefined,\n temperature,\n top_p,\n stop,\n frequency_penalty,\n presence_penalty,\n top_logprobs,\n logprobs,\n ...restOfConfig, // passthrough for other config\n } as ChatCompletionCreateParamsNonStreaming;\n\n const response_format = request.output?.format;\n if (response_format === 'json') {\n body.response_format = {\n type: 'json_object',\n };\n } else if (response_format === 'text') {\n body.response_format = {\n type: 'text',\n };\n }\n for (const key in body) {\n if (!body[key] || (Array.isArray(body[key]) && !body[key].length))\n delete body[key];\n }\n return body;\n}\n\n/**\n * Creates the runner used by Genkit to interact with an OpenAI compatible\n * model.\n * @param name The name of the GPT model.\n * @param client The OpenAI client instance.\n * @returns The runner that Genkit will call when the model is invoked.\n */\nexport function openAIModelRunner(name: string, client: OpenAI) {\n return async (\n request: GenerateRequest,\n streamingCallback?: StreamingCallback<GenerateResponseChunkData>\n ): Promise<GenerateResponseData> => {\n let response: ChatCompletion;\n const body = toOpenAIRequestBody(name, request);\n if (streamingCallback) {\n const stream = client.beta.chat.completions.stream({\n ...body,\n stream: true,\n stream_options: {\n include_usage: true,\n },\n });\n for await (const chunk of stream) {\n chunk.choices?.forEach((chunk) => {\n const c = fromOpenAIChunkChoice(chunk);\n streamingCallback({\n index: chunk.index,\n content: c.message?.content ?? [],\n });\n });\n }\n response = await stream.finalChatCompletion();\n } else {\n response = await client.chat.completions.create(body);\n }\n const standardResponse: GenerateResponseData = {\n usage: {\n inputTokens: response.usage?.prompt_tokens,\n outputTokens: response.usage?.completion_tokens,\n totalTokens: response.usage?.total_tokens,\n },\n raw: response,\n };\n if (response.choices.length === 0) {\n return standardResponse;\n } else {\n const choice = response.choices[0];\n return {\n ...fromOpenAIChoice(choice, request.output?.format === 'json'),\n ...standardResponse,\n };\n }\n };\n}\n\n/**\n * Method to define a new Genkit Model that is compatible with Open AI\n * Chat Completions API. \n *\n * These models are to be used to chat with a large language model.\n *\n * @param params An object containing parameters for defining the OpenAI\n * Chat model.\n * @param params.ai The Genkit AI instance.\n * @param params.name The name of the model.\n * @param params.client The OpenAI client instance.\n * @param params.modelRef Optional reference to the model's configuration and\n * custom options.\n\n * @returns the created {@link ModelAction}\n */\nexport function defineCompatOpenAIModel<\n CustomOptions extends z.ZodTypeAny = z.ZodTypeAny,\n>(params: {\n ai: Genkit;\n name: string;\n client: OpenAI;\n modelRef?: ModelReference<CustomOptions>;\n}): ModelAction {\n const { ai, name, client, modelRef } = params;\n const model = name.split('/').pop();\n\n return ai.defineModel(\n {\n name,\n ...modelRef?.info,\n configSchema: modelRef?.configSchema,\n },\n openAIModelRunner(model!, client)\n );\n}\n"],"mappings":";;;;;;;;;;;;;;;;;;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AA4BA,oBAAyD;AAmBzD,MAAM,0BAA0B,gBAAE,KAAK,CAAC,QAAQ,OAAO,MAAM,CAAC,EAAE,SAAS;AAIlE,MAAM,mCACX,2CAA6B,OAAO;AAAA,EAClC,aAAa,gBAAE,OAAO,EAAE,IAAI,CAAC,EAAE,IAAI,CAAC,EAAE,SAAS;AAAA,EAC/C,kBAAkB,gBAAE,OAAO,EAAE,IAAI,EAAE,EAAE,IAAI,CAAC,EAAE,SAAS;AAAA,EACrD,UAAU,gBAAE,QAAQ,EAAE,SAAS;AAAA,EAC/B,iBAAiB,gBAAE,OAAO,EAAE,IAAI,EAAE,EAAE,IAAI,CAAC,EAAE,SAAS;AAAA,EACpD,aAAa,gBAAE,OAAO,EAAE,IAAI,EAAE,IAAI,CAAC,EAAE,IAAI,EAAE,EAAE,SAAS;AACxD,CAAC;AAEI,SAAS,aAAa,MAAgC;AAC3D,UAAQ,MAAM;AAAA,IACZ,KAAK;AACH,aAAO;AAAA,IACT,KAAK;AACH,aAAO;AAAA,IACT,KAAK;AACH,aAAO;AAAA,IACT,KAAK;AACH,aAAO;AAAA,IACT;AACE,YAAM,IAAI,MAAM,QAAQ,IAAI,iCAAiC;AAAA,EACjE;AACF;AAOO,SAAS,aAAa,MAA0C;AACrE,SAAO;AAAA,IACL,MAAM;AAAA,IACN,UAAU;AAAA,MACR,MAAM,KAAK;AAAA,MACX,YAAY,KAAK,gBAAgB,OAAO,KAAK,cAAc;AAAA,IAC7D;AAAA,EACF;AACF;AASO,SAAS,qBACd,MACA,mBAC2B;AAC3B,MAAI,KAAK,MAAM;AACb,WAAO;AAAA,MACL,MAAM;AAAA,MACN,MAAM,KAAK;AAAA,IACb;AAAA,EACF,WAAW,KAAK,OAAO;AACrB,WAAO;AAAA,MACL,MAAM;AAAA,MACN,WAAW;AAAA,QACT,KAAK,KAAK,MAAM;AAAA,QAChB,QAAQ;AAAA,MACV;AAAA,IACF;AAAA,EACF;AACA,QAAM;AAAA,IACJ,wEAAwE,KAAK,UAAU,IAAI,CAAC;AAAA,EAC9F;AACF;AAQO,SAAS,iBACd,UACA,oBAAuC,QACT;AAC9B,QAAM,cAA4C,CAAC;AACnD,aAAW,WAAW,UAAU;AAC9B,UAAM,MAAM,IAAI,sBAAQ,OAAO;AAC/B,UAAM,OAAO,aAAa,QAAQ,IAAI;AACtC,YAAQ,MAAM;AAAA,MACZ,KAAK;AACH,cAAM,UAAU,IAAI,QAAQ;AAAA,UAAI,CAAC,SAC/B,qBAAqB,MAAM,iBAAiB;AAAA,QAC9C;AAEA,cAAM,kBAAkB,QAAQ,KAAK,CAAC,SAAS,KAAK,SAAS,MAAM;AAGnE,YAAI,CAAC,iBAAiB;AACpB,kBAAQ,QAAQ,CAAC,SAAS;AACxB,gBAAI,KAAK,SAAS,QAAQ;AACxB,0BAAY,KAAK;AAAA,gBACf;AAAA,gBACA,SAAS,KAAK;AAAA,cAChB,CAAC;AAAA,YACH;AAAA,UACF,CAAC;AAAA,QACH,OAAO;AACL,sBAAY,KAAK;AAAA,YACf;AAAA,YACA;AAAA,UACF,CAAC;AAAA,QACH;AACA;AAAA,MACF,KAAK;AACH,oBAAY,KAAK;AAAA,UACf;AAAA,UACA,SAAS,IAAI;AAAA,QACf,CAAC;AACD;AAAA,MACF,KAAK,aAAa;AAChB,cAAM,YAA6C,IAAI,QACpD;AAAA,UACC,CACE,SAGG,QAAQ,KAAK,WAAW;AAAA,QAC/B,EACC,IAAI,CAAC,UAAU;AAAA,UACd,IAAI,KAAK,YAAY,OAAO;AAAA,UAC5B,MAAM;AAAA,UACN,UAAU;AAAA,YACR,MAAM,KAAK,YAAY;AAAA,YACvB,WAAW,KAAK,UAAU,KAAK,YAAY,KAAK;AAAA,UAClD;AAAA,QACF,EAAE;AACJ,YAAI,UAAU,SAAS,GAAG;AACxB,sBAAY,KAAK;AAAA,YACf;AAAA,YACA,YAAY;AAAA,UACd,CAAC;AAAA,QACH,OAAO;AACL,sBAAY,KAAK;AAAA,YACf;AAAA,YACA,SAAS,IAAI;AAAA,UACf,CAAC;AAAA,QACH;AACA;AAAA,MACF;AAAA,MACA,KAAK,QAAQ;AACX,cAAM,oBAAoB,IAAI,kBAAkB;AAChD,0BAAkB,IAAI,CAAC,SAAS;AAC9B,sBAAY,KAAK;AAAA,YACf;AAAA,YACA,cAAc,KAAK,aAAa,OAAO;AAAA,YACvC,SACE,OAAO,KAAK,aAAa,WAAW,WAChC,KAAK,aAAa,SAClB,KAAK,UAAU,KAAK,aAAa,MAAM;AAAA,UAC/C,CAAC;AAAA,QACH,CAAC;AACD;AAAA,MACF;AAAA,IACF;AAAA,EACF;AACA,SAAO;AACT;AAEA,MAAM,kBAIF;AAAA,EACF,QAAQ;AAAA,EACR,MAAM;AAAA,EACN,YAAY;AAAA,EACZ,gBAAgB;AAClB;AAOO,SAAS,mBACd,UAGA,QACiB;AACjB,MAAI,CAAC,SAAS,UAAU;AACtB,UAAM;AAAA,MACJ;AAAA,IACF;AAAA,EACF;AACA,QAAM,IAAI,SAAS;AAGnB,MAAI,OAAO,kBAAkB,cAAc;AACzC,WAAO;AAAA,MACL,aAAa;AAAA,QACX,MAAM,EAAE;AAAA,QACR,KAAK,SAAS;AAAA,QACd,OAAO,EAAE,YAAY,KAAK,MAAM,EAAE,SAAS,IAAI,EAAE;AAAA,MACnD;AAAA,IACF;AAAA,EACF,OAAO;AACL,WAAO;AAAA,MACL,aAAa;AAAA,QACX,MAAM,EAAE;AAAA,QACR,KAAK,SAAS;AAAA,QACd,OAAO;AAAA,MACT;AAAA,IACF;AAAA,EACF;AACF;AAQO,SAAS,iBACd,QACA,WAAW,OACW;AACtB,QAAM,mBAAmB,OAAO,QAAQ,YAAY;AAAA,IAAI,CAAC,aACvD,mBAAmB,UAAU,MAAM;AAAA,EACrC;AACA,SAAO;AAAA,IACL,cAAc,gBAAgB,OAAO,aAAa,KAAK;AAAA,IACvD,SAAS;AAAA,MACP,MAAM;AAAA,MACN,SAAS;AAAA;AAAA;AAAA,QAGJ;AAAA,UACD;AAAA,QACE,WACI,EAAE,MAAM,KAAK,MAAM,OAAO,QAAQ,OAAQ,EAAE,IAC5C,EAAE,MAAM,OAAO,QAAQ,QAAS;AAAA,MACtC;AAAA,IACN;AAAA,EACF;AACF;AASO,SAAS,sBACd,QACA,WAAW,OACW;AACtB,QAAM,mBAAmB,OAAO,MAAM,YAAY;AAAA,IAAI,CAAC,aACrD,mBAAmB,UAAU,MAAM;AAAA,EACrC;AACA,SAAO;AAAA,IACL,cAAc,OAAO,gBACjB,gBAAgB,OAAO,aAAa,KAAK,UACzC;AAAA,IACJ,SAAS;AAAA,MACP,MAAM;AAAA,MACN,SAAS;AAAA;AAAA;AAAA,QAGJ;AAAA,UACD;AAAA,QACE,WACI,EAAE,MAAM,KAAK,MAAM,OAAO,MAAM,OAAQ,EAAE,IAC1C,EAAE,MAAM,OAAO,MAAM,QAAS;AAAA,MACpC;AAAA,IACN;AAAA,EACF;AACF;AASO,SAAS,oBACd,WACA,SACA;AACA,QAAM,WAAW;AAAA,IACf,QAAQ;AAAA,IACR,QAAQ,QAAQ;AAAA,EAClB;AACA,QAAM;AAAA,IACJ;AAAA,IACA;AAAA;AAAA,IACA;AAAA;AAAA,IACA,MAAM;AAAA,IACN,kBAAkB;AAAA,IAClB,UAAU;AAAA,IACV,iBAAiB;AAAA,IACjB,aAAa;AAAA,IACb,eAAe;AAAA,IACf,SAAS;AAAA,IACT,OAAO;AAAA,IACP,GAAG;AAAA,EACL,IAAI,QAAQ,UAAU,CAAC;AAEvB,QAAM,QAA8B,QAAQ,OAAO,IAAI,YAAY,KAAK,CAAC;AACzE,MAAI,iBAAiB;AACnB,UAAM,KAAK,GAAI,eAAyB;AAAA,EAC1C;AACA,QAAM,OAAO;AAAA,IACX,OAAO,gBAAgB;AAAA,IACvB;AAAA,IACA,OAAO,MAAM,SAAS,IAAI,QAAQ;AAAA,IAClC;AAAA,IACA;AAAA,IACA;AAAA,IACA;AAAA,IACA;AAAA,IACA;AAAA,IACA;AAAA,IACA,GAAG;AAAA;AAAA,EACL;AAEA,QAAM,kBAAkB,QAAQ,QAAQ;AACxC,MAAI,oBAAoB,QAAQ;AAC9B,SAAK,kBAAkB;AAAA,MACrB,MAAM;AAAA,IACR;AAAA,EACF,WAAW,oBAAoB,QAAQ;AACrC,SAAK,kBAAkB;AAAA,MACrB,MAAM;AAAA,IACR;AAAA,EACF;AACA,aAAW,OAAO,MAAM;AACtB,QAAI,CAAC,KAAK,GAAG,KAAM,MAAM,QAAQ,KAAK,GAAG,CAAC,KAAK,CAAC,KAAK,GAAG,EAAE;AACxD,aAAO,KAAK,GAAG;AAAA,EACnB;AACA,SAAO;AACT;AASO,SAAS,kBAAkB,MAAc,QAAgB;AAC9D,SAAO,OACL,SACA,sBACkC;AAClC,QAAI;AACJ,UAAM,OAAO,oBAAoB,MAAM,OAAO;AAC9C,QAAI,mBAAmB;AACrB,YAAM,SAAS,OAAO,KAAK,KAAK,YAAY,OAAO;AAAA,QACjD,GAAG;AAAA,QACH,QAAQ;AAAA,QACR,gBAAgB;AAAA,UACd,eAAe;AAAA,QACjB;AAAA,MACF,CAAC;AACD,uBAAiB,SAAS,QAAQ;AAChC,cAAM,SAAS,QAAQ,CAACA,WAAU;AAChC,gBAAM,IAAI,sBAAsBA,MAAK;AACrC,4BAAkB;AAAA,YAChB,OAAOA,OAAM;AAAA,YACb,SAAS,EAAE,SAAS,WAAW,CAAC;AAAA,UAClC,CAAC;AAAA,QACH,CAAC;AAAA,MACH;AACA,iBAAW,MAAM,OAAO,oBAAoB;AAAA,IAC9C,OAAO;AACL,iBAAW,MAAM,OAAO,KAAK,YAAY,OAAO,IAAI;AAAA,IACtD;AACA,UAAM,mBAAyC;AAAA,MAC7C,OAAO;AAAA,QACL,aAAa,SAAS,OAAO;AAAA,QAC7B,cAAc,SAAS,OAAO;AAAA,QAC9B,aAAa,SAAS,OAAO;AAAA,MAC/B;AAAA,MACA,KAAK;AAAA,IACP;AACA,QAAI,SAAS,QAAQ,WAAW,GAAG;AACjC,aAAO;AAAA,IACT,OAAO;AACL,YAAM,SAAS,SAAS,QAAQ,CAAC;AACjC,aAAO;AAAA,QACL,GAAG,iBAAiB,QAAQ,QAAQ,QAAQ,WAAW,MAAM;AAAA,QAC7D,GAAG;AAAA,MACL;AAAA,IACF;AAAA,EACF;AACF;AAkBO,SAAS,wBAEd,QAKc;AACd,QAAM,EAAE,IAAI,MAAM,QAAQ,SAAS,IAAI;AACvC,QAAM,QAAQ,KAAK,MAAM,GAAG,EAAE,IAAI;AAElC,SAAO,GAAG;AAAA,IACR;AAAA,MACE;AAAA,MACA,GAAG,UAAU;AAAA,MACb,cAAc,UAAU;AAAA,IAC1B;AAAA,IACA,kBAAkB,OAAQ,MAAM;AAAA,EAClC;AACF;","names":["chunk"]}
|