@ai-sdk/openai 3.0.14 → 3.0.15
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/CHANGELOG.md +6 -0
- package/dist/index.js +1 -1
- package/dist/index.mjs +1 -1
- package/package.json +6 -5
- package/src/chat/__fixtures__/azure-model-router.1.chunks.txt +8 -0
- package/src/chat/__snapshots__/openai-chat-language-model.test.ts.snap +88 -0
- package/src/chat/convert-openai-chat-usage.ts +57 -0
- package/src/chat/convert-to-openai-chat-messages.test.ts +516 -0
- package/src/chat/convert-to-openai-chat-messages.ts +225 -0
- package/src/chat/get-response-metadata.ts +15 -0
- package/src/chat/map-openai-finish-reason.ts +19 -0
- package/src/chat/openai-chat-api.ts +198 -0
- package/src/chat/openai-chat-language-model.test.ts +3496 -0
- package/src/chat/openai-chat-language-model.ts +700 -0
- package/src/chat/openai-chat-options.ts +186 -0
- package/src/chat/openai-chat-prepare-tools.test.ts +322 -0
- package/src/chat/openai-chat-prepare-tools.ts +84 -0
- package/src/chat/openai-chat-prompt.ts +70 -0
- package/src/completion/convert-openai-completion-usage.ts +46 -0
- package/src/completion/convert-to-openai-completion-prompt.ts +93 -0
- package/src/completion/get-response-metadata.ts +15 -0
- package/src/completion/map-openai-finish-reason.ts +19 -0
- package/src/completion/openai-completion-api.ts +81 -0
- package/src/completion/openai-completion-language-model.test.ts +752 -0
- package/src/completion/openai-completion-language-model.ts +336 -0
- package/src/completion/openai-completion-options.ts +58 -0
- package/src/embedding/__snapshots__/openai-embedding-model.test.ts.snap +43 -0
- package/src/embedding/openai-embedding-api.ts +13 -0
- package/src/embedding/openai-embedding-model.test.ts +146 -0
- package/src/embedding/openai-embedding-model.ts +95 -0
- package/src/embedding/openai-embedding-options.ts +30 -0
- package/src/image/openai-image-api.ts +35 -0
- package/src/image/openai-image-model.test.ts +722 -0
- package/src/image/openai-image-model.ts +305 -0
- package/src/image/openai-image-options.ts +28 -0
- package/src/index.ts +9 -0
- package/src/internal/index.ts +19 -0
- package/src/openai-config.ts +18 -0
- package/src/openai-error.test.ts +34 -0
- package/src/openai-error.ts +22 -0
- package/src/openai-language-model-capabilities.test.ts +93 -0
- package/src/openai-language-model-capabilities.ts +54 -0
- package/src/openai-provider.test.ts +98 -0
- package/src/openai-provider.ts +270 -0
- package/src/openai-tools.ts +114 -0
- package/src/responses/__fixtures__/openai-apply-patch-tool-delete.1.chunks.txt +5 -0
- package/src/responses/__fixtures__/openai-apply-patch-tool.1.chunks.txt +38 -0
- package/src/responses/__fixtures__/openai-apply-patch-tool.1.json +69 -0
- package/src/responses/__fixtures__/openai-code-interpreter-tool.1.chunks.txt +393 -0
- package/src/responses/__fixtures__/openai-code-interpreter-tool.1.json +137 -0
- package/src/responses/__fixtures__/openai-error.1.chunks.txt +4 -0
- package/src/responses/__fixtures__/openai-error.1.json +8 -0
- package/src/responses/__fixtures__/openai-file-search-tool.1.chunks.txt +94 -0
- package/src/responses/__fixtures__/openai-file-search-tool.1.json +89 -0
- package/src/responses/__fixtures__/openai-file-search-tool.2.chunks.txt +93 -0
- package/src/responses/__fixtures__/openai-file-search-tool.2.json +112 -0
- package/src/responses/__fixtures__/openai-image-generation-tool.1.chunks.txt +16 -0
- package/src/responses/__fixtures__/openai-image-generation-tool.1.json +96 -0
- package/src/responses/__fixtures__/openai-local-shell-tool.1.chunks.txt +7 -0
- package/src/responses/__fixtures__/openai-local-shell-tool.1.json +70 -0
- package/src/responses/__fixtures__/openai-mcp-tool-approval.1.chunks.txt +11 -0
- package/src/responses/__fixtures__/openai-mcp-tool-approval.1.json +169 -0
- package/src/responses/__fixtures__/openai-mcp-tool-approval.2.chunks.txt +123 -0
- package/src/responses/__fixtures__/openai-mcp-tool-approval.2.json +176 -0
- package/src/responses/__fixtures__/openai-mcp-tool-approval.3.chunks.txt +11 -0
- package/src/responses/__fixtures__/openai-mcp-tool-approval.3.json +169 -0
- package/src/responses/__fixtures__/openai-mcp-tool-approval.4.chunks.txt +84 -0
- package/src/responses/__fixtures__/openai-mcp-tool-approval.4.json +182 -0
- package/src/responses/__fixtures__/openai-mcp-tool.1.chunks.txt +373 -0
- package/src/responses/__fixtures__/openai-mcp-tool.1.json +159 -0
- package/src/responses/__fixtures__/openai-reasoning-encrypted-content.1.chunks.txt +110 -0
- package/src/responses/__fixtures__/openai-reasoning-encrypted-content.1.json +117 -0
- package/src/responses/__fixtures__/openai-shell-tool.1.chunks.txt +182 -0
- package/src/responses/__fixtures__/openai-shell-tool.1.json +73 -0
- package/src/responses/__fixtures__/openai-web-search-tool.1.chunks.txt +185 -0
- package/src/responses/__fixtures__/openai-web-search-tool.1.json +266 -0
- package/src/responses/__snapshots__/openai-responses-language-model.test.ts.snap +10955 -0
- package/src/responses/convert-openai-responses-usage.ts +53 -0
- package/src/responses/convert-to-openai-responses-input.test.ts +2976 -0
- package/src/responses/convert-to-openai-responses-input.ts +578 -0
- package/src/responses/map-openai-responses-finish-reason.ts +22 -0
- package/src/responses/openai-responses-api.test.ts +89 -0
- package/src/responses/openai-responses-api.ts +1086 -0
- package/src/responses/openai-responses-language-model.test.ts +6927 -0
- package/src/responses/openai-responses-language-model.ts +1932 -0
- package/src/responses/openai-responses-options.ts +312 -0
- package/src/responses/openai-responses-prepare-tools.test.ts +924 -0
- package/src/responses/openai-responses-prepare-tools.ts +264 -0
- package/src/responses/openai-responses-provider-metadata.ts +39 -0
- package/src/speech/openai-speech-api.ts +38 -0
- package/src/speech/openai-speech-model.test.ts +202 -0
- package/src/speech/openai-speech-model.ts +137 -0
- package/src/speech/openai-speech-options.ts +22 -0
- package/src/tool/apply-patch.ts +141 -0
- package/src/tool/code-interpreter.ts +104 -0
- package/src/tool/file-search.ts +145 -0
- package/src/tool/image-generation.ts +126 -0
- package/src/tool/local-shell.test-d.ts +20 -0
- package/src/tool/local-shell.ts +72 -0
- package/src/tool/mcp.ts +125 -0
- package/src/tool/shell.ts +85 -0
- package/src/tool/web-search-preview.ts +139 -0
- package/src/tool/web-search.test-d.ts +13 -0
- package/src/tool/web-search.ts +179 -0
- package/src/transcription/openai-transcription-api.ts +37 -0
- package/src/transcription/openai-transcription-model.test.ts +507 -0
- package/src/transcription/openai-transcription-model.ts +232 -0
- package/src/transcription/openai-transcription-options.ts +50 -0
- package/src/transcription/transcription-test.mp3 +0 -0
- package/src/version.ts +6 -0
|
@@ -0,0 +1,141 @@
|
|
|
1
|
+
import {
|
|
2
|
+
createProviderToolFactoryWithOutputSchema,
|
|
3
|
+
lazySchema,
|
|
4
|
+
zodSchema,
|
|
5
|
+
} from '@ai-sdk/provider-utils';
|
|
6
|
+
import { z } from 'zod/v4';
|
|
7
|
+
|
|
8
|
+
/**
|
|
9
|
+
* Schema for the apply_patch input - what the model sends.
|
|
10
|
+
*
|
|
11
|
+
* Refer the official spec here: https://platform.openai.com/docs/api-reference/responses/create#responses_create-input-input_item_list-item-apply_patch_tool_call
|
|
12
|
+
*
|
|
13
|
+
*/
|
|
14
|
+
export const applyPatchInputSchema = lazySchema(() =>
|
|
15
|
+
zodSchema(
|
|
16
|
+
z.object({
|
|
17
|
+
callId: z.string(),
|
|
18
|
+
operation: z.discriminatedUnion('type', [
|
|
19
|
+
z.object({
|
|
20
|
+
type: z.literal('create_file'),
|
|
21
|
+
path: z.string(),
|
|
22
|
+
diff: z.string(),
|
|
23
|
+
}),
|
|
24
|
+
z.object({
|
|
25
|
+
type: z.literal('delete_file'),
|
|
26
|
+
path: z.string(),
|
|
27
|
+
}),
|
|
28
|
+
z.object({
|
|
29
|
+
type: z.literal('update_file'),
|
|
30
|
+
path: z.string(),
|
|
31
|
+
diff: z.string(),
|
|
32
|
+
}),
|
|
33
|
+
]),
|
|
34
|
+
}),
|
|
35
|
+
),
|
|
36
|
+
);
|
|
37
|
+
|
|
38
|
+
/**
|
|
39
|
+
* Schema for the apply_patch output - what we send back.
|
|
40
|
+
*/
|
|
41
|
+
export const applyPatchOutputSchema = lazySchema(() =>
|
|
42
|
+
zodSchema(
|
|
43
|
+
z.object({
|
|
44
|
+
status: z.enum(['completed', 'failed']),
|
|
45
|
+
output: z.string().optional(),
|
|
46
|
+
}),
|
|
47
|
+
),
|
|
48
|
+
);
|
|
49
|
+
|
|
50
|
+
/**
|
|
51
|
+
* Schema for tool arguments (configuration options).
|
|
52
|
+
* The apply_patch tool doesn't require any configuration options.
|
|
53
|
+
*/
|
|
54
|
+
export const applyPatchArgsSchema = lazySchema(() => zodSchema(z.object({})));
|
|
55
|
+
|
|
56
|
+
/**
|
|
57
|
+
* Type definitions for the apply_patch operations.
|
|
58
|
+
*/
|
|
59
|
+
export type ApplyPatchOperation =
|
|
60
|
+
| {
|
|
61
|
+
type: 'create_file';
|
|
62
|
+
/**
|
|
63
|
+
* Path of the file to create relative to the workspace root.
|
|
64
|
+
*/
|
|
65
|
+
path: string;
|
|
66
|
+
/**
|
|
67
|
+
* Unified diff content to apply when creating the file.
|
|
68
|
+
*/
|
|
69
|
+
diff: string;
|
|
70
|
+
}
|
|
71
|
+
| {
|
|
72
|
+
type: 'delete_file';
|
|
73
|
+
/**
|
|
74
|
+
* Path of the file to delete relative to the workspace root.
|
|
75
|
+
*/
|
|
76
|
+
path: string;
|
|
77
|
+
}
|
|
78
|
+
| {
|
|
79
|
+
type: 'update_file';
|
|
80
|
+
/**
|
|
81
|
+
* Path of the file to update relative to the workspace root.
|
|
82
|
+
*/
|
|
83
|
+
path: string;
|
|
84
|
+
/**
|
|
85
|
+
* Unified diff content to apply to the existing file.
|
|
86
|
+
*/
|
|
87
|
+
diff: string;
|
|
88
|
+
};
|
|
89
|
+
|
|
90
|
+
/**
|
|
91
|
+
* The apply_patch tool lets GPT-5.1 create, update, and delete files in your
|
|
92
|
+
* codebase using structured diffs. Instead of just suggesting edits, the model
|
|
93
|
+
* emits patch operations that your application applies and then reports back on,
|
|
94
|
+
* enabling iterative, multi-step code editing workflows.
|
|
95
|
+
*
|
|
96
|
+
* The tool factory creates a provider-defined tool that:
|
|
97
|
+
* - Receives patch operations from the model (create_file, update_file, delete_file)
|
|
98
|
+
* - Returns the status of applying those patches (completed or failed)
|
|
99
|
+
*
|
|
100
|
+
*/
|
|
101
|
+
export const applyPatchToolFactory = createProviderToolFactoryWithOutputSchema<
|
|
102
|
+
{
|
|
103
|
+
/**
|
|
104
|
+
* The unique ID of the apply patch tool call generated by the model.
|
|
105
|
+
*/
|
|
106
|
+
callId: string;
|
|
107
|
+
|
|
108
|
+
/**
|
|
109
|
+
* The specific create, delete, or update instruction for the apply_patch tool call.
|
|
110
|
+
*/
|
|
111
|
+
operation: ApplyPatchOperation;
|
|
112
|
+
},
|
|
113
|
+
{
|
|
114
|
+
/**
|
|
115
|
+
* The status of the apply patch tool call output.
|
|
116
|
+
* - 'completed': The patch was applied successfully.
|
|
117
|
+
* - 'failed': The patch failed to apply.
|
|
118
|
+
*/
|
|
119
|
+
status: 'completed' | 'failed';
|
|
120
|
+
|
|
121
|
+
/**
|
|
122
|
+
* Optional human-readable log text from the apply patch tool
|
|
123
|
+
* (e.g., patch results or errors).
|
|
124
|
+
*/
|
|
125
|
+
output?: string;
|
|
126
|
+
},
|
|
127
|
+
// No configuration options for apply_patch
|
|
128
|
+
{}
|
|
129
|
+
>({
|
|
130
|
+
id: 'openai.apply_patch',
|
|
131
|
+
inputSchema: applyPatchInputSchema,
|
|
132
|
+
outputSchema: applyPatchOutputSchema,
|
|
133
|
+
});
|
|
134
|
+
|
|
135
|
+
/**
|
|
136
|
+
* The apply_patch tool lets GPT-5.1 create, update, and delete files in your
|
|
137
|
+
* codebase using structured diffs. Instead of just suggesting edits, the model
|
|
138
|
+
* emits patch operations that your application applies and then reports back on,
|
|
139
|
+
* enabling iterative, multi-step code editing workflows.
|
|
140
|
+
*/
|
|
141
|
+
export const applyPatch = applyPatchToolFactory;
|
|
@@ -0,0 +1,104 @@
|
|
|
1
|
+
import {
|
|
2
|
+
createProviderToolFactoryWithOutputSchema,
|
|
3
|
+
lazySchema,
|
|
4
|
+
zodSchema,
|
|
5
|
+
} from '@ai-sdk/provider-utils';
|
|
6
|
+
import { z } from 'zod/v4';
|
|
7
|
+
|
|
8
|
+
export const codeInterpreterInputSchema = lazySchema(() =>
|
|
9
|
+
zodSchema(
|
|
10
|
+
z.object({
|
|
11
|
+
code: z.string().nullish(),
|
|
12
|
+
containerId: z.string(),
|
|
13
|
+
}),
|
|
14
|
+
),
|
|
15
|
+
);
|
|
16
|
+
|
|
17
|
+
export const codeInterpreterOutputSchema = lazySchema(() =>
|
|
18
|
+
zodSchema(
|
|
19
|
+
z.object({
|
|
20
|
+
outputs: z
|
|
21
|
+
.array(
|
|
22
|
+
z.discriminatedUnion('type', [
|
|
23
|
+
z.object({ type: z.literal('logs'), logs: z.string() }),
|
|
24
|
+
z.object({ type: z.literal('image'), url: z.string() }),
|
|
25
|
+
]),
|
|
26
|
+
)
|
|
27
|
+
.nullish(),
|
|
28
|
+
}),
|
|
29
|
+
),
|
|
30
|
+
);
|
|
31
|
+
|
|
32
|
+
export const codeInterpreterArgsSchema = lazySchema(() =>
|
|
33
|
+
zodSchema(
|
|
34
|
+
z.object({
|
|
35
|
+
container: z
|
|
36
|
+
.union([
|
|
37
|
+
z.string(),
|
|
38
|
+
z.object({
|
|
39
|
+
fileIds: z.array(z.string()).optional(),
|
|
40
|
+
}),
|
|
41
|
+
])
|
|
42
|
+
.optional(),
|
|
43
|
+
}),
|
|
44
|
+
),
|
|
45
|
+
);
|
|
46
|
+
|
|
47
|
+
type CodeInterpreterArgs = {
|
|
48
|
+
/**
|
|
49
|
+
* The code interpreter container.
|
|
50
|
+
* Can be a container ID
|
|
51
|
+
* or an object that specifies uploaded file IDs to make available to your code.
|
|
52
|
+
*/
|
|
53
|
+
container?: string | { fileIds?: string[] };
|
|
54
|
+
};
|
|
55
|
+
|
|
56
|
+
export const codeInterpreterToolFactory =
|
|
57
|
+
createProviderToolFactoryWithOutputSchema<
|
|
58
|
+
{
|
|
59
|
+
/**
|
|
60
|
+
* The code to run, or null if not available.
|
|
61
|
+
*/
|
|
62
|
+
code?: string | null;
|
|
63
|
+
|
|
64
|
+
/**
|
|
65
|
+
* The ID of the container used to run the code.
|
|
66
|
+
*/
|
|
67
|
+
containerId: string;
|
|
68
|
+
},
|
|
69
|
+
{
|
|
70
|
+
/**
|
|
71
|
+
* The outputs generated by the code interpreter, such as logs or images.
|
|
72
|
+
* Can be null if no outputs are available.
|
|
73
|
+
*/
|
|
74
|
+
outputs?: Array<
|
|
75
|
+
| {
|
|
76
|
+
type: 'logs';
|
|
77
|
+
|
|
78
|
+
/**
|
|
79
|
+
* The logs output from the code interpreter.
|
|
80
|
+
*/
|
|
81
|
+
logs: string;
|
|
82
|
+
}
|
|
83
|
+
| {
|
|
84
|
+
type: 'image';
|
|
85
|
+
|
|
86
|
+
/**
|
|
87
|
+
* The URL of the image output from the code interpreter.
|
|
88
|
+
*/
|
|
89
|
+
url: string;
|
|
90
|
+
}
|
|
91
|
+
> | null;
|
|
92
|
+
},
|
|
93
|
+
CodeInterpreterArgs
|
|
94
|
+
>({
|
|
95
|
+
id: 'openai.code_interpreter',
|
|
96
|
+
inputSchema: codeInterpreterInputSchema,
|
|
97
|
+
outputSchema: codeInterpreterOutputSchema,
|
|
98
|
+
});
|
|
99
|
+
|
|
100
|
+
export const codeInterpreter = (
|
|
101
|
+
args: CodeInterpreterArgs = {}, // default
|
|
102
|
+
) => {
|
|
103
|
+
return codeInterpreterToolFactory(args);
|
|
104
|
+
};
|
|
@@ -0,0 +1,145 @@
|
|
|
1
|
+
import {
|
|
2
|
+
createProviderToolFactoryWithOutputSchema,
|
|
3
|
+
lazySchema,
|
|
4
|
+
zodSchema,
|
|
5
|
+
} from '@ai-sdk/provider-utils';
|
|
6
|
+
import { z } from 'zod/v4';
|
|
7
|
+
import {
|
|
8
|
+
OpenAIResponsesFileSearchToolComparisonFilter,
|
|
9
|
+
OpenAIResponsesFileSearchToolCompoundFilter,
|
|
10
|
+
} from '../responses/openai-responses-api';
|
|
11
|
+
|
|
12
|
+
const comparisonFilterSchema = z.object({
|
|
13
|
+
key: z.string(),
|
|
14
|
+
type: z.enum(['eq', 'ne', 'gt', 'gte', 'lt', 'lte', 'in', 'nin']),
|
|
15
|
+
value: z.union([z.string(), z.number(), z.boolean(), z.array(z.string())]),
|
|
16
|
+
});
|
|
17
|
+
|
|
18
|
+
const compoundFilterSchema: z.ZodType<any> = z.object({
|
|
19
|
+
type: z.enum(['and', 'or']),
|
|
20
|
+
filters: z.array(
|
|
21
|
+
z.union([comparisonFilterSchema, z.lazy(() => compoundFilterSchema)]),
|
|
22
|
+
),
|
|
23
|
+
});
|
|
24
|
+
|
|
25
|
+
export const fileSearchArgsSchema = lazySchema(() =>
|
|
26
|
+
zodSchema(
|
|
27
|
+
z.object({
|
|
28
|
+
vectorStoreIds: z.array(z.string()),
|
|
29
|
+
maxNumResults: z.number().optional(),
|
|
30
|
+
ranking: z
|
|
31
|
+
.object({
|
|
32
|
+
ranker: z.string().optional(),
|
|
33
|
+
scoreThreshold: z.number().optional(),
|
|
34
|
+
})
|
|
35
|
+
.optional(),
|
|
36
|
+
filters: z
|
|
37
|
+
.union([comparisonFilterSchema, compoundFilterSchema])
|
|
38
|
+
.optional(),
|
|
39
|
+
}),
|
|
40
|
+
),
|
|
41
|
+
);
|
|
42
|
+
|
|
43
|
+
export const fileSearchOutputSchema = lazySchema(() =>
|
|
44
|
+
zodSchema(
|
|
45
|
+
z.object({
|
|
46
|
+
queries: z.array(z.string()),
|
|
47
|
+
results: z
|
|
48
|
+
.array(
|
|
49
|
+
z.object({
|
|
50
|
+
attributes: z.record(z.string(), z.unknown()),
|
|
51
|
+
fileId: z.string(),
|
|
52
|
+
filename: z.string(),
|
|
53
|
+
score: z.number(),
|
|
54
|
+
text: z.string(),
|
|
55
|
+
}),
|
|
56
|
+
)
|
|
57
|
+
.nullable(),
|
|
58
|
+
}),
|
|
59
|
+
),
|
|
60
|
+
);
|
|
61
|
+
|
|
62
|
+
export const fileSearch = createProviderToolFactoryWithOutputSchema<
|
|
63
|
+
{},
|
|
64
|
+
{
|
|
65
|
+
/**
|
|
66
|
+
* The search query to execute.
|
|
67
|
+
*/
|
|
68
|
+
queries: string[];
|
|
69
|
+
|
|
70
|
+
/**
|
|
71
|
+
* The results of the file search tool call.
|
|
72
|
+
*/
|
|
73
|
+
results:
|
|
74
|
+
| null
|
|
75
|
+
| {
|
|
76
|
+
/**
|
|
77
|
+
* Set of 16 key-value pairs that can be attached to an object.
|
|
78
|
+
* This can be useful for storing additional information about the object
|
|
79
|
+
* in a structured format, and querying for objects via API or the dashboard.
|
|
80
|
+
* Keys are strings with a maximum length of 64 characters.
|
|
81
|
+
* Values are strings with a maximum length of 512 characters, booleans, or numbers.
|
|
82
|
+
*/
|
|
83
|
+
attributes: Record<string, unknown>;
|
|
84
|
+
|
|
85
|
+
/**
|
|
86
|
+
* The unique ID of the file.
|
|
87
|
+
*/
|
|
88
|
+
fileId: string;
|
|
89
|
+
|
|
90
|
+
/**
|
|
91
|
+
* The name of the file.
|
|
92
|
+
*/
|
|
93
|
+
filename: string;
|
|
94
|
+
|
|
95
|
+
/**
|
|
96
|
+
* The relevance score of the file - a value between 0 and 1.
|
|
97
|
+
*/
|
|
98
|
+
score: number;
|
|
99
|
+
|
|
100
|
+
/**
|
|
101
|
+
* The text that was retrieved from the file.
|
|
102
|
+
*/
|
|
103
|
+
text: string;
|
|
104
|
+
}[];
|
|
105
|
+
},
|
|
106
|
+
{
|
|
107
|
+
/**
|
|
108
|
+
* List of vector store IDs to search through.
|
|
109
|
+
*/
|
|
110
|
+
vectorStoreIds: string[];
|
|
111
|
+
|
|
112
|
+
/**
|
|
113
|
+
* Maximum number of search results to return. Defaults to 10.
|
|
114
|
+
*/
|
|
115
|
+
maxNumResults?: number;
|
|
116
|
+
|
|
117
|
+
/**
|
|
118
|
+
* Ranking options for the search.
|
|
119
|
+
*/
|
|
120
|
+
ranking?: {
|
|
121
|
+
/**
|
|
122
|
+
* The ranker to use for the file search.
|
|
123
|
+
*/
|
|
124
|
+
ranker?: string;
|
|
125
|
+
|
|
126
|
+
/**
|
|
127
|
+
* The score threshold for the file search, a number between 0 and 1.
|
|
128
|
+
* Numbers closer to 1 will attempt to return only the most relevant results,
|
|
129
|
+
* but may return fewer results.
|
|
130
|
+
*/
|
|
131
|
+
scoreThreshold?: number;
|
|
132
|
+
};
|
|
133
|
+
|
|
134
|
+
/**
|
|
135
|
+
* A filter to apply.
|
|
136
|
+
*/
|
|
137
|
+
filters?:
|
|
138
|
+
| OpenAIResponsesFileSearchToolComparisonFilter
|
|
139
|
+
| OpenAIResponsesFileSearchToolCompoundFilter;
|
|
140
|
+
}
|
|
141
|
+
>({
|
|
142
|
+
id: 'openai.file_search',
|
|
143
|
+
inputSchema: z.object({}),
|
|
144
|
+
outputSchema: fileSearchOutputSchema,
|
|
145
|
+
});
|
|
@@ -0,0 +1,126 @@
|
|
|
1
|
+
import {
|
|
2
|
+
createProviderToolFactoryWithOutputSchema,
|
|
3
|
+
lazySchema,
|
|
4
|
+
zodSchema,
|
|
5
|
+
} from '@ai-sdk/provider-utils';
|
|
6
|
+
import { z } from 'zod/v4';
|
|
7
|
+
|
|
8
|
+
export const imageGenerationArgsSchema = lazySchema(() =>
|
|
9
|
+
zodSchema(
|
|
10
|
+
z
|
|
11
|
+
.object({
|
|
12
|
+
background: z.enum(['auto', 'opaque', 'transparent']).optional(),
|
|
13
|
+
inputFidelity: z.enum(['low', 'high']).optional(),
|
|
14
|
+
inputImageMask: z
|
|
15
|
+
.object({
|
|
16
|
+
fileId: z.string().optional(),
|
|
17
|
+
imageUrl: z.string().optional(),
|
|
18
|
+
})
|
|
19
|
+
.optional(),
|
|
20
|
+
model: z.string().optional(),
|
|
21
|
+
moderation: z.enum(['auto']).optional(),
|
|
22
|
+
outputCompression: z.number().int().min(0).max(100).optional(),
|
|
23
|
+
outputFormat: z.enum(['png', 'jpeg', 'webp']).optional(),
|
|
24
|
+
partialImages: z.number().int().min(0).max(3).optional(),
|
|
25
|
+
quality: z.enum(['auto', 'low', 'medium', 'high']).optional(),
|
|
26
|
+
size: z
|
|
27
|
+
.enum(['1024x1024', '1024x1536', '1536x1024', 'auto'])
|
|
28
|
+
.optional(),
|
|
29
|
+
})
|
|
30
|
+
.strict(),
|
|
31
|
+
),
|
|
32
|
+
);
|
|
33
|
+
|
|
34
|
+
const imageGenerationInputSchema = lazySchema(() => zodSchema(z.object({})));
|
|
35
|
+
|
|
36
|
+
export const imageGenerationOutputSchema = lazySchema(() =>
|
|
37
|
+
zodSchema(z.object({ result: z.string() })),
|
|
38
|
+
);
|
|
39
|
+
|
|
40
|
+
type ImageGenerationArgs = {
|
|
41
|
+
/**
|
|
42
|
+
* Background type for the generated image. Default is 'auto'.
|
|
43
|
+
*/
|
|
44
|
+
background?: 'auto' | 'opaque' | 'transparent';
|
|
45
|
+
|
|
46
|
+
/**
|
|
47
|
+
* Input fidelity for the generated image. Default is 'low'.
|
|
48
|
+
*/
|
|
49
|
+
inputFidelity?: 'low' | 'high';
|
|
50
|
+
|
|
51
|
+
/**
|
|
52
|
+
* Optional mask for inpainting.
|
|
53
|
+
* Contains image_url (string, optional) and file_id (string, optional).
|
|
54
|
+
*/
|
|
55
|
+
inputImageMask?: {
|
|
56
|
+
/**
|
|
57
|
+
* File ID for the mask image.
|
|
58
|
+
*/
|
|
59
|
+
fileId?: string;
|
|
60
|
+
|
|
61
|
+
/**
|
|
62
|
+
* Base64-encoded mask image.
|
|
63
|
+
*/
|
|
64
|
+
imageUrl?: string;
|
|
65
|
+
};
|
|
66
|
+
|
|
67
|
+
/**
|
|
68
|
+
* The image generation model to use. Default: gpt-image-1.
|
|
69
|
+
*/
|
|
70
|
+
model?: string;
|
|
71
|
+
|
|
72
|
+
/**
|
|
73
|
+
* Moderation level for the generated image. Default: auto.
|
|
74
|
+
*/
|
|
75
|
+
moderation?: 'auto';
|
|
76
|
+
|
|
77
|
+
/**
|
|
78
|
+
* Compression level for the output image. Default: 100.
|
|
79
|
+
*/
|
|
80
|
+
outputCompression?: number;
|
|
81
|
+
|
|
82
|
+
/**
|
|
83
|
+
* The output format of the generated image. One of png, webp, or jpeg.
|
|
84
|
+
* Default: png
|
|
85
|
+
*/
|
|
86
|
+
outputFormat?: 'png' | 'jpeg' | 'webp';
|
|
87
|
+
|
|
88
|
+
/**
|
|
89
|
+
* Number of partial images to generate in streaming mode, from 0 (default value) to 3.
|
|
90
|
+
*/
|
|
91
|
+
partialImages?: number;
|
|
92
|
+
|
|
93
|
+
/**
|
|
94
|
+
* The quality of the generated image.
|
|
95
|
+
* One of low, medium, high, or auto. Default: auto.
|
|
96
|
+
*/
|
|
97
|
+
quality?: 'auto' | 'low' | 'medium' | 'high';
|
|
98
|
+
|
|
99
|
+
/**
|
|
100
|
+
* The size of the generated image.
|
|
101
|
+
* One of 1024x1024, 1024x1536, 1536x1024, or auto.
|
|
102
|
+
* Default: auto.
|
|
103
|
+
*/
|
|
104
|
+
size?: 'auto' | '1024x1024' | '1024x1536' | '1536x1024';
|
|
105
|
+
};
|
|
106
|
+
|
|
107
|
+
const imageGenerationToolFactory = createProviderToolFactoryWithOutputSchema<
|
|
108
|
+
{},
|
|
109
|
+
{
|
|
110
|
+
/**
|
|
111
|
+
* The generated image encoded in base64.
|
|
112
|
+
*/
|
|
113
|
+
result: string;
|
|
114
|
+
},
|
|
115
|
+
ImageGenerationArgs
|
|
116
|
+
>({
|
|
117
|
+
id: 'openai.image_generation',
|
|
118
|
+
inputSchema: imageGenerationInputSchema,
|
|
119
|
+
outputSchema: imageGenerationOutputSchema,
|
|
120
|
+
});
|
|
121
|
+
|
|
122
|
+
export const imageGeneration = (
|
|
123
|
+
args: ImageGenerationArgs = {}, // default
|
|
124
|
+
) => {
|
|
125
|
+
return imageGenerationToolFactory(args);
|
|
126
|
+
};
|
|
@@ -0,0 +1,20 @@
|
|
|
1
|
+
import { InferSchema, Tool } from '@ai-sdk/provider-utils';
|
|
2
|
+
import { describe, expectTypeOf, it } from 'vitest';
|
|
3
|
+
import {
|
|
4
|
+
localShell,
|
|
5
|
+
localShellInputSchema,
|
|
6
|
+
localShellOutputSchema,
|
|
7
|
+
} from './local-shell';
|
|
8
|
+
|
|
9
|
+
describe('local-shell tool type', () => {
|
|
10
|
+
it('should have Tool type', () => {
|
|
11
|
+
const localShellTool = localShell({});
|
|
12
|
+
|
|
13
|
+
expectTypeOf(localShellTool).toEqualTypeOf<
|
|
14
|
+
Tool<
|
|
15
|
+
InferSchema<typeof localShellInputSchema>,
|
|
16
|
+
InferSchema<typeof localShellOutputSchema>
|
|
17
|
+
>
|
|
18
|
+
>();
|
|
19
|
+
});
|
|
20
|
+
});
|
|
@@ -0,0 +1,72 @@
|
|
|
1
|
+
import {
|
|
2
|
+
createProviderToolFactoryWithOutputSchema,
|
|
3
|
+
lazySchema,
|
|
4
|
+
zodSchema,
|
|
5
|
+
} from '@ai-sdk/provider-utils';
|
|
6
|
+
import { z } from 'zod/v4';
|
|
7
|
+
|
|
8
|
+
export const localShellInputSchema = lazySchema(() =>
|
|
9
|
+
zodSchema(
|
|
10
|
+
z.object({
|
|
11
|
+
action: z.object({
|
|
12
|
+
type: z.literal('exec'),
|
|
13
|
+
command: z.array(z.string()),
|
|
14
|
+
timeoutMs: z.number().optional(),
|
|
15
|
+
user: z.string().optional(),
|
|
16
|
+
workingDirectory: z.string().optional(),
|
|
17
|
+
env: z.record(z.string(), z.string()).optional(),
|
|
18
|
+
}),
|
|
19
|
+
}),
|
|
20
|
+
),
|
|
21
|
+
);
|
|
22
|
+
|
|
23
|
+
export const localShellOutputSchema = lazySchema(() =>
|
|
24
|
+
zodSchema(z.object({ output: z.string() })),
|
|
25
|
+
);
|
|
26
|
+
|
|
27
|
+
export const localShell = createProviderToolFactoryWithOutputSchema<
|
|
28
|
+
{
|
|
29
|
+
/**
|
|
30
|
+
* Execute a shell command on the server.
|
|
31
|
+
*/
|
|
32
|
+
action: {
|
|
33
|
+
type: 'exec';
|
|
34
|
+
|
|
35
|
+
/**
|
|
36
|
+
* The command to run.
|
|
37
|
+
*/
|
|
38
|
+
command: string[];
|
|
39
|
+
|
|
40
|
+
/**
|
|
41
|
+
* Optional timeout in milliseconds for the command.
|
|
42
|
+
*/
|
|
43
|
+
timeoutMs?: number;
|
|
44
|
+
|
|
45
|
+
/**
|
|
46
|
+
* Optional user to run the command as.
|
|
47
|
+
*/
|
|
48
|
+
user?: string;
|
|
49
|
+
|
|
50
|
+
/**
|
|
51
|
+
* Optional working directory to run the command in.
|
|
52
|
+
*/
|
|
53
|
+
workingDirectory?: string;
|
|
54
|
+
|
|
55
|
+
/**
|
|
56
|
+
* Environment variables to set for the command.
|
|
57
|
+
*/
|
|
58
|
+
env?: Record<string, string>;
|
|
59
|
+
};
|
|
60
|
+
},
|
|
61
|
+
{
|
|
62
|
+
/**
|
|
63
|
+
* The output of local shell tool call.
|
|
64
|
+
*/
|
|
65
|
+
output: string;
|
|
66
|
+
},
|
|
67
|
+
{}
|
|
68
|
+
>({
|
|
69
|
+
id: 'openai.local_shell',
|
|
70
|
+
inputSchema: localShellInputSchema,
|
|
71
|
+
outputSchema: localShellOutputSchema,
|
|
72
|
+
});
|