@wellze/integration-ai 0.0.0
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/dist/generate.d.mts +190 -0
- package/dist/generate.mjs +172 -0
- package/dist/index.d.mts +140 -0
- package/dist/index.mjs +5 -0
- package/dist/schemas-Dd9H2hSI.mjs +187 -0
- package/dist/stream.d.mts +104 -0
- package/dist/stream.mjs +95 -0
- package/package.json +60 -0
|
@@ -0,0 +1,190 @@
|
|
|
1
|
+
import { z } from "zod";
|
|
2
|
+
import * as _wellze_workflow_core0 from "@wellze/workflow-core";
|
|
3
|
+
|
|
4
|
+
//#region src/generate.d.ts
|
|
5
|
+
declare const generateTextOpenAI: _wellze_workflow_core0.StepWithMetadata<{
|
|
6
|
+
model?: string | undefined;
|
|
7
|
+
system?: string | undefined;
|
|
8
|
+
prompt?: string | undefined;
|
|
9
|
+
messages?: {
|
|
10
|
+
role: "system" | "user" | "assistant";
|
|
11
|
+
content: string;
|
|
12
|
+
}[] | undefined;
|
|
13
|
+
maxOutputTokens?: number | undefined;
|
|
14
|
+
temperature?: number | undefined;
|
|
15
|
+
topP?: number | undefined;
|
|
16
|
+
stopSequences?: string[] | undefined;
|
|
17
|
+
}, {
|
|
18
|
+
text: string;
|
|
19
|
+
finishReason: string;
|
|
20
|
+
usage: {
|
|
21
|
+
promptTokens: number;
|
|
22
|
+
completionTokens: number;
|
|
23
|
+
totalTokens: number;
|
|
24
|
+
};
|
|
25
|
+
}, [_wellze_workflow_core0.IntegrationBinding<_wellze_workflow_core0.IntegrationDefinition<"openai", z.ZodObject<{
|
|
26
|
+
OPENAI_API_KEY: z.ZodString;
|
|
27
|
+
}, z.core.$strip>> | _wellze_workflow_core0.IntegrationDefinition<"anthropic", z.ZodObject<{
|
|
28
|
+
ANTHROPIC_API_KEY: z.ZodString;
|
|
29
|
+
}, z.core.$strip>>>]>, generateTextOpenAITool: _wellze_workflow_core0.ToolWithMetadata<{
|
|
30
|
+
model?: string | undefined;
|
|
31
|
+
system?: string | undefined;
|
|
32
|
+
prompt?: string | undefined;
|
|
33
|
+
messages?: {
|
|
34
|
+
role: "system" | "user" | "assistant";
|
|
35
|
+
content: string;
|
|
36
|
+
}[] | undefined;
|
|
37
|
+
maxOutputTokens?: number | undefined;
|
|
38
|
+
temperature?: number | undefined;
|
|
39
|
+
topP?: number | undefined;
|
|
40
|
+
stopSequences?: string[] | undefined;
|
|
41
|
+
}, {
|
|
42
|
+
text: string;
|
|
43
|
+
finishReason: string;
|
|
44
|
+
usage: {
|
|
45
|
+
promptTokens: number;
|
|
46
|
+
completionTokens: number;
|
|
47
|
+
totalTokens: number;
|
|
48
|
+
};
|
|
49
|
+
}, readonly [_wellze_workflow_core0.IntegrationBinding<_wellze_workflow_core0.IntegrationDefinition<"openai", z.ZodObject<{
|
|
50
|
+
OPENAI_API_KEY: z.ZodString;
|
|
51
|
+
}, z.core.$strip>> | _wellze_workflow_core0.IntegrationDefinition<"anthropic", z.ZodObject<{
|
|
52
|
+
ANTHROPIC_API_KEY: z.ZodString;
|
|
53
|
+
}, z.core.$strip>>>]>;
|
|
54
|
+
declare const generateTextAnthropic: _wellze_workflow_core0.StepWithMetadata<{
|
|
55
|
+
model?: string | undefined;
|
|
56
|
+
system?: string | undefined;
|
|
57
|
+
prompt?: string | undefined;
|
|
58
|
+
messages?: {
|
|
59
|
+
role: "system" | "user" | "assistant";
|
|
60
|
+
content: string;
|
|
61
|
+
}[] | undefined;
|
|
62
|
+
maxOutputTokens?: number | undefined;
|
|
63
|
+
temperature?: number | undefined;
|
|
64
|
+
topP?: number | undefined;
|
|
65
|
+
stopSequences?: string[] | undefined;
|
|
66
|
+
}, {
|
|
67
|
+
text: string;
|
|
68
|
+
finishReason: string;
|
|
69
|
+
usage: {
|
|
70
|
+
promptTokens: number;
|
|
71
|
+
completionTokens: number;
|
|
72
|
+
totalTokens: number;
|
|
73
|
+
};
|
|
74
|
+
}, [_wellze_workflow_core0.IntegrationBinding<_wellze_workflow_core0.IntegrationDefinition<"openai", z.ZodObject<{
|
|
75
|
+
OPENAI_API_KEY: z.ZodString;
|
|
76
|
+
}, z.core.$strip>> | _wellze_workflow_core0.IntegrationDefinition<"anthropic", z.ZodObject<{
|
|
77
|
+
ANTHROPIC_API_KEY: z.ZodString;
|
|
78
|
+
}, z.core.$strip>>>]>, generateTextAnthropicTool: _wellze_workflow_core0.ToolWithMetadata<{
|
|
79
|
+
model?: string | undefined;
|
|
80
|
+
system?: string | undefined;
|
|
81
|
+
prompt?: string | undefined;
|
|
82
|
+
messages?: {
|
|
83
|
+
role: "system" | "user" | "assistant";
|
|
84
|
+
content: string;
|
|
85
|
+
}[] | undefined;
|
|
86
|
+
maxOutputTokens?: number | undefined;
|
|
87
|
+
temperature?: number | undefined;
|
|
88
|
+
topP?: number | undefined;
|
|
89
|
+
stopSequences?: string[] | undefined;
|
|
90
|
+
}, {
|
|
91
|
+
text: string;
|
|
92
|
+
finishReason: string;
|
|
93
|
+
usage: {
|
|
94
|
+
promptTokens: number;
|
|
95
|
+
completionTokens: number;
|
|
96
|
+
totalTokens: number;
|
|
97
|
+
};
|
|
98
|
+
}, readonly [_wellze_workflow_core0.IntegrationBinding<_wellze_workflow_core0.IntegrationDefinition<"openai", z.ZodObject<{
|
|
99
|
+
OPENAI_API_KEY: z.ZodString;
|
|
100
|
+
}, z.core.$strip>> | _wellze_workflow_core0.IntegrationDefinition<"anthropic", z.ZodObject<{
|
|
101
|
+
ANTHROPIC_API_KEY: z.ZodString;
|
|
102
|
+
}, z.core.$strip>>>]>;
|
|
103
|
+
declare const generateObjectOpenAI: _wellze_workflow_core0.StepWithMetadata<{
|
|
104
|
+
prompt: string;
|
|
105
|
+
schema: Record<string, unknown>;
|
|
106
|
+
model?: string | undefined;
|
|
107
|
+
system?: string | undefined;
|
|
108
|
+
schemaName?: string | undefined;
|
|
109
|
+
schemaDescription?: string | undefined;
|
|
110
|
+
maxOutputTokens?: number | undefined;
|
|
111
|
+
temperature?: number | undefined;
|
|
112
|
+
}, {
|
|
113
|
+
object: Record<string, unknown>;
|
|
114
|
+
finishReason: string;
|
|
115
|
+
usage: {
|
|
116
|
+
promptTokens: number;
|
|
117
|
+
completionTokens: number;
|
|
118
|
+
totalTokens: number;
|
|
119
|
+
};
|
|
120
|
+
}, [_wellze_workflow_core0.IntegrationBinding<_wellze_workflow_core0.IntegrationDefinition<"openai", z.ZodObject<{
|
|
121
|
+
OPENAI_API_KEY: z.ZodString;
|
|
122
|
+
}, z.core.$strip>> | _wellze_workflow_core0.IntegrationDefinition<"anthropic", z.ZodObject<{
|
|
123
|
+
ANTHROPIC_API_KEY: z.ZodString;
|
|
124
|
+
}, z.core.$strip>>>]>, generateObjectOpenAITool: _wellze_workflow_core0.ToolWithMetadata<{
|
|
125
|
+
prompt: string;
|
|
126
|
+
schema: Record<string, unknown>;
|
|
127
|
+
model?: string | undefined;
|
|
128
|
+
system?: string | undefined;
|
|
129
|
+
schemaName?: string | undefined;
|
|
130
|
+
schemaDescription?: string | undefined;
|
|
131
|
+
maxOutputTokens?: number | undefined;
|
|
132
|
+
temperature?: number | undefined;
|
|
133
|
+
}, {
|
|
134
|
+
object: Record<string, unknown>;
|
|
135
|
+
finishReason: string;
|
|
136
|
+
usage: {
|
|
137
|
+
promptTokens: number;
|
|
138
|
+
completionTokens: number;
|
|
139
|
+
totalTokens: number;
|
|
140
|
+
};
|
|
141
|
+
}, readonly [_wellze_workflow_core0.IntegrationBinding<_wellze_workflow_core0.IntegrationDefinition<"openai", z.ZodObject<{
|
|
142
|
+
OPENAI_API_KEY: z.ZodString;
|
|
143
|
+
}, z.core.$strip>> | _wellze_workflow_core0.IntegrationDefinition<"anthropic", z.ZodObject<{
|
|
144
|
+
ANTHROPIC_API_KEY: z.ZodString;
|
|
145
|
+
}, z.core.$strip>>>]>;
|
|
146
|
+
declare const generateObjectAnthropic: _wellze_workflow_core0.StepWithMetadata<{
|
|
147
|
+
prompt: string;
|
|
148
|
+
schema: Record<string, unknown>;
|
|
149
|
+
model?: string | undefined;
|
|
150
|
+
system?: string | undefined;
|
|
151
|
+
schemaName?: string | undefined;
|
|
152
|
+
schemaDescription?: string | undefined;
|
|
153
|
+
maxOutputTokens?: number | undefined;
|
|
154
|
+
temperature?: number | undefined;
|
|
155
|
+
}, {
|
|
156
|
+
object: Record<string, unknown>;
|
|
157
|
+
finishReason: string;
|
|
158
|
+
usage: {
|
|
159
|
+
promptTokens: number;
|
|
160
|
+
completionTokens: number;
|
|
161
|
+
totalTokens: number;
|
|
162
|
+
};
|
|
163
|
+
}, [_wellze_workflow_core0.IntegrationBinding<_wellze_workflow_core0.IntegrationDefinition<"openai", z.ZodObject<{
|
|
164
|
+
OPENAI_API_KEY: z.ZodString;
|
|
165
|
+
}, z.core.$strip>> | _wellze_workflow_core0.IntegrationDefinition<"anthropic", z.ZodObject<{
|
|
166
|
+
ANTHROPIC_API_KEY: z.ZodString;
|
|
167
|
+
}, z.core.$strip>>>]>, generateObjectAnthropicTool: _wellze_workflow_core0.ToolWithMetadata<{
|
|
168
|
+
prompt: string;
|
|
169
|
+
schema: Record<string, unknown>;
|
|
170
|
+
model?: string | undefined;
|
|
171
|
+
system?: string | undefined;
|
|
172
|
+
schemaName?: string | undefined;
|
|
173
|
+
schemaDescription?: string | undefined;
|
|
174
|
+
maxOutputTokens?: number | undefined;
|
|
175
|
+
temperature?: number | undefined;
|
|
176
|
+
}, {
|
|
177
|
+
object: Record<string, unknown>;
|
|
178
|
+
finishReason: string;
|
|
179
|
+
usage: {
|
|
180
|
+
promptTokens: number;
|
|
181
|
+
completionTokens: number;
|
|
182
|
+
totalTokens: number;
|
|
183
|
+
};
|
|
184
|
+
}, readonly [_wellze_workflow_core0.IntegrationBinding<_wellze_workflow_core0.IntegrationDefinition<"openai", z.ZodObject<{
|
|
185
|
+
OPENAI_API_KEY: z.ZodString;
|
|
186
|
+
}, z.core.$strip>> | _wellze_workflow_core0.IntegrationDefinition<"anthropic", z.ZodObject<{
|
|
187
|
+
ANTHROPIC_API_KEY: z.ZodString;
|
|
188
|
+
}, z.core.$strip>>>]>;
|
|
189
|
+
//#endregion
|
|
190
|
+
export { generateObjectAnthropic, generateObjectAnthropicTool, generateObjectOpenAI, generateObjectOpenAITool, generateTextAnthropic, generateTextAnthropicTool, generateTextOpenAI, generateTextOpenAITool };
|
|
@@ -0,0 +1,172 @@
|
|
|
1
|
+
import { a as generateTextResponseSchema, d as defineAIOperation, f as createAnthropicProvider, l as anthropic, p as createOpenAIProvider, t as aiMessageSchema, u as openai } from "./schemas-Dd9H2hSI.mjs";
|
|
2
|
+
import { generateObject, generateText } from "ai";
|
|
3
|
+
import { z } from "zod";
|
|
4
|
+
|
|
5
|
+
//#region src/generate.ts
|
|
6
|
+
/**
|
|
7
|
+
* ai/generate.ts
|
|
8
|
+
*
|
|
9
|
+
* Steps for text generation and structured object generation using the AI SDK.
|
|
10
|
+
*
|
|
11
|
+
* Provides steps for both OpenAI and Anthropic providers. Each step wraps
|
|
12
|
+
* the AI SDK's `generateText` function with type-safe credential injection.
|
|
13
|
+
*/
|
|
14
|
+
/** Map AI SDK usage to our schema shape. */
|
|
15
|
+
function mapUsage(usage) {
|
|
16
|
+
const input = usage.inputTokens ?? 0;
|
|
17
|
+
const output = usage.outputTokens ?? 0;
|
|
18
|
+
return {
|
|
19
|
+
promptTokens: input,
|
|
20
|
+
completionTokens: output,
|
|
21
|
+
totalTokens: input + output
|
|
22
|
+
};
|
|
23
|
+
}
|
|
24
|
+
const { step: generateTextOpenAI, tool: generateTextOpenAITool } = defineAIOperation({
|
|
25
|
+
name: "Generate Text (OpenAI)",
|
|
26
|
+
description: "Generate text using an OpenAI model via the AI SDK",
|
|
27
|
+
integration: openai,
|
|
28
|
+
input: z.object({
|
|
29
|
+
model: z.string().optional(),
|
|
30
|
+
system: z.string().optional(),
|
|
31
|
+
prompt: z.string().optional(),
|
|
32
|
+
messages: z.array(aiMessageSchema).optional(),
|
|
33
|
+
maxOutputTokens: z.number().optional(),
|
|
34
|
+
temperature: z.number().optional(),
|
|
35
|
+
topP: z.number().optional(),
|
|
36
|
+
stopSequences: z.array(z.string()).optional()
|
|
37
|
+
}),
|
|
38
|
+
output: generateTextResponseSchema,
|
|
39
|
+
run: async (input, credentials) => {
|
|
40
|
+
const result = await generateText({
|
|
41
|
+
model: createOpenAIProvider(credentials)(input.model ?? "gpt-4o"),
|
|
42
|
+
system: input.system,
|
|
43
|
+
...input.messages ? { messages: input.messages } : { prompt: input.prompt ?? "" },
|
|
44
|
+
maxOutputTokens: input.maxOutputTokens,
|
|
45
|
+
temperature: input.temperature,
|
|
46
|
+
topP: input.topP,
|
|
47
|
+
stopSequences: input.stopSequences
|
|
48
|
+
});
|
|
49
|
+
return {
|
|
50
|
+
text: result.text,
|
|
51
|
+
finishReason: result.finishReason,
|
|
52
|
+
usage: mapUsage(result.usage)
|
|
53
|
+
};
|
|
54
|
+
}
|
|
55
|
+
});
|
|
56
|
+
const { step: generateTextAnthropic, tool: generateTextAnthropicTool } = defineAIOperation({
|
|
57
|
+
name: "Generate Text (Anthropic)",
|
|
58
|
+
description: "Generate text using an Anthropic Claude model via the AI SDK",
|
|
59
|
+
integration: anthropic,
|
|
60
|
+
input: z.object({
|
|
61
|
+
model: z.string().optional(),
|
|
62
|
+
system: z.string().optional(),
|
|
63
|
+
prompt: z.string().optional(),
|
|
64
|
+
messages: z.array(aiMessageSchema).optional(),
|
|
65
|
+
maxOutputTokens: z.number().optional(),
|
|
66
|
+
temperature: z.number().optional(),
|
|
67
|
+
topP: z.number().optional(),
|
|
68
|
+
stopSequences: z.array(z.string()).optional()
|
|
69
|
+
}),
|
|
70
|
+
output: generateTextResponseSchema,
|
|
71
|
+
run: async (input, credentials) => {
|
|
72
|
+
const result = await generateText({
|
|
73
|
+
model: createAnthropicProvider(credentials)(input.model ?? "claude-sonnet-4-20250514"),
|
|
74
|
+
system: input.system,
|
|
75
|
+
...input.messages ? { messages: input.messages } : { prompt: input.prompt ?? "" },
|
|
76
|
+
maxOutputTokens: input.maxOutputTokens ?? 4096,
|
|
77
|
+
temperature: input.temperature,
|
|
78
|
+
topP: input.topP,
|
|
79
|
+
stopSequences: input.stopSequences
|
|
80
|
+
});
|
|
81
|
+
return {
|
|
82
|
+
text: result.text,
|
|
83
|
+
finishReason: result.finishReason,
|
|
84
|
+
usage: mapUsage(result.usage)
|
|
85
|
+
};
|
|
86
|
+
}
|
|
87
|
+
});
|
|
88
|
+
const { step: generateObjectOpenAI, tool: generateObjectOpenAITool } = defineAIOperation({
|
|
89
|
+
name: "Generate Object (OpenAI)",
|
|
90
|
+
description: "Generate a structured JSON object using an OpenAI model via the AI SDK",
|
|
91
|
+
integration: openai,
|
|
92
|
+
input: z.object({
|
|
93
|
+
model: z.string().optional(),
|
|
94
|
+
system: z.string().optional(),
|
|
95
|
+
prompt: z.string(),
|
|
96
|
+
schema: z.record(z.string(), z.unknown()),
|
|
97
|
+
schemaName: z.string().optional(),
|
|
98
|
+
schemaDescription: z.string().optional(),
|
|
99
|
+
maxOutputTokens: z.number().optional(),
|
|
100
|
+
temperature: z.number().optional()
|
|
101
|
+
}),
|
|
102
|
+
output: z.object({
|
|
103
|
+
object: z.record(z.string(), z.unknown()),
|
|
104
|
+
finishReason: z.string(),
|
|
105
|
+
usage: z.object({
|
|
106
|
+
promptTokens: z.number(),
|
|
107
|
+
completionTokens: z.number(),
|
|
108
|
+
totalTokens: z.number()
|
|
109
|
+
})
|
|
110
|
+
}),
|
|
111
|
+
run: async (input, credentials) => {
|
|
112
|
+
const result = await generateObject({
|
|
113
|
+
model: createOpenAIProvider(credentials)(input.model ?? "gpt-4o"),
|
|
114
|
+
system: input.system,
|
|
115
|
+
prompt: input.prompt,
|
|
116
|
+
schema: z.object(Object.fromEntries(Object.entries(input.schema).map(([key]) => [key, z.unknown()]))),
|
|
117
|
+
schemaName: input.schemaName,
|
|
118
|
+
schemaDescription: input.schemaDescription,
|
|
119
|
+
maxOutputTokens: input.maxOutputTokens,
|
|
120
|
+
temperature: input.temperature
|
|
121
|
+
});
|
|
122
|
+
return {
|
|
123
|
+
object: result.object,
|
|
124
|
+
finishReason: result.finishReason,
|
|
125
|
+
usage: mapUsage(result.usage)
|
|
126
|
+
};
|
|
127
|
+
}
|
|
128
|
+
});
|
|
129
|
+
const { step: generateObjectAnthropic, tool: generateObjectAnthropicTool } = defineAIOperation({
|
|
130
|
+
name: "Generate Object (Anthropic)",
|
|
131
|
+
description: "Generate a structured JSON object using an Anthropic model via the AI SDK",
|
|
132
|
+
integration: anthropic,
|
|
133
|
+
input: z.object({
|
|
134
|
+
model: z.string().optional(),
|
|
135
|
+
system: z.string().optional(),
|
|
136
|
+
prompt: z.string(),
|
|
137
|
+
schema: z.record(z.string(), z.unknown()),
|
|
138
|
+
schemaName: z.string().optional(),
|
|
139
|
+
schemaDescription: z.string().optional(),
|
|
140
|
+
maxOutputTokens: z.number().optional(),
|
|
141
|
+
temperature: z.number().optional()
|
|
142
|
+
}),
|
|
143
|
+
output: z.object({
|
|
144
|
+
object: z.record(z.string(), z.unknown()),
|
|
145
|
+
finishReason: z.string(),
|
|
146
|
+
usage: z.object({
|
|
147
|
+
promptTokens: z.number(),
|
|
148
|
+
completionTokens: z.number(),
|
|
149
|
+
totalTokens: z.number()
|
|
150
|
+
})
|
|
151
|
+
}),
|
|
152
|
+
run: async (input, credentials) => {
|
|
153
|
+
const result = await generateObject({
|
|
154
|
+
model: createAnthropicProvider(credentials)(input.model ?? "claude-sonnet-4-20250514"),
|
|
155
|
+
system: input.system,
|
|
156
|
+
prompt: input.prompt,
|
|
157
|
+
schema: z.object(Object.fromEntries(Object.entries(input.schema).map(([key]) => [key, z.unknown()]))),
|
|
158
|
+
schemaName: input.schemaName,
|
|
159
|
+
schemaDescription: input.schemaDescription,
|
|
160
|
+
maxOutputTokens: input.maxOutputTokens ?? 4096,
|
|
161
|
+
temperature: input.temperature
|
|
162
|
+
});
|
|
163
|
+
return {
|
|
164
|
+
object: result.object,
|
|
165
|
+
finishReason: result.finishReason,
|
|
166
|
+
usage: mapUsage(result.usage)
|
|
167
|
+
};
|
|
168
|
+
}
|
|
169
|
+
});
|
|
170
|
+
|
|
171
|
+
//#endregion
|
|
172
|
+
export { generateObjectAnthropic, generateObjectAnthropicTool, generateObjectOpenAI, generateObjectOpenAITool, generateTextAnthropic, generateTextAnthropicTool, generateTextOpenAI, generateTextOpenAITool };
|
package/dist/index.d.mts
ADDED
|
@@ -0,0 +1,140 @@
|
|
|
1
|
+
import { generateObjectAnthropic, generateObjectAnthropicTool, generateObjectOpenAI, generateObjectOpenAITool, generateTextAnthropic, generateTextAnthropicTool, generateTextOpenAI, generateTextOpenAITool } from "./generate.mjs";
|
|
2
|
+
import { streamTextAnthropic, streamTextAnthropicTool, streamTextOpenAI, streamTextOpenAITool } from "./stream.mjs";
|
|
3
|
+
import * as _ai_sdk_anthropic0 from "@ai-sdk/anthropic";
|
|
4
|
+
import * as _ai_sdk_openai0 from "@ai-sdk/openai";
|
|
5
|
+
import { z } from "zod";
|
|
6
|
+
import * as _wellze_workflow_core0 from "@wellze/workflow-core";
|
|
7
|
+
import { CredentialsOf } from "@wellze/workflow-core";
|
|
8
|
+
|
|
9
|
+
//#region src/integration.d.ts
|
|
10
|
+
/**
|
|
11
|
+
* OpenAI integration — passthrough auth via API key.
|
|
12
|
+
*/
|
|
13
|
+
declare const openai: _wellze_workflow_core0.IntegrationDefinition<"openai", z.ZodObject<{
|
|
14
|
+
OPENAI_API_KEY: z.ZodString;
|
|
15
|
+
}, z.core.$strip>>;
|
|
16
|
+
/**
|
|
17
|
+
* Anthropic integration — passthrough auth via API key.
|
|
18
|
+
*/
|
|
19
|
+
declare const anthropic: _wellze_workflow_core0.IntegrationDefinition<"anthropic", z.ZodObject<{
|
|
20
|
+
ANTHROPIC_API_KEY: z.ZodString;
|
|
21
|
+
}, z.core.$strip>>;
|
|
22
|
+
/**
|
|
23
|
+
* Credentials injected into steps after resolution.
|
|
24
|
+
*
|
|
25
|
+
* Derived from the integration definitions — not hardcoded. If the integration
|
|
26
|
+
* auth output schemas change, these types update automatically.
|
|
27
|
+
*/
|
|
28
|
+
type OpenAICredentials = CredentialsOf<typeof openai>;
|
|
29
|
+
type AnthropicCredentials = CredentialsOf<typeof anthropic>;
|
|
30
|
+
//#endregion
|
|
31
|
+
//#region src/client.d.ts
|
|
32
|
+
/**
|
|
33
|
+
* Create an OpenAI provider from resolved credentials.
|
|
34
|
+
*
|
|
35
|
+
* @param credentials - Resolved credentials containing the API key
|
|
36
|
+
* @returns A configured `@ai-sdk/openai` provider instance
|
|
37
|
+
*/
|
|
38
|
+
declare function createOpenAIProvider(credentials: OpenAICredentials): _ai_sdk_openai0.OpenAIProvider;
|
|
39
|
+
/**
|
|
40
|
+
* Create an Anthropic provider from resolved credentials.
|
|
41
|
+
*
|
|
42
|
+
* @param credentials - Resolved credentials containing the API key
|
|
43
|
+
* @returns A configured `@ai-sdk/anthropic` provider instance
|
|
44
|
+
*/
|
|
45
|
+
declare function createAnthropicProvider(credentials: AnthropicCredentials): _ai_sdk_anthropic0.AnthropicProvider;
|
|
46
|
+
/**
|
|
47
|
+
* Types of the configured provider instances.
|
|
48
|
+
*/
|
|
49
|
+
type OpenAIProvider = ReturnType<typeof createOpenAIProvider>;
|
|
50
|
+
type AnthropicProvider = ReturnType<typeof createAnthropicProvider>;
|
|
51
|
+
//#endregion
|
|
52
|
+
//#region src/schemas.d.ts
|
|
53
|
+
/**
|
|
54
|
+
* Token usage information returned by the AI SDK.
|
|
55
|
+
*/
|
|
56
|
+
declare const tokenUsageSchema: z.ZodObject<{
|
|
57
|
+
promptTokens: z.ZodNumber;
|
|
58
|
+
completionTokens: z.ZodNumber;
|
|
59
|
+
totalTokens: z.ZodNumber;
|
|
60
|
+
}, z.core.$strip>;
|
|
61
|
+
type TokenUsage = z.infer<typeof tokenUsageSchema>;
|
|
62
|
+
/**
|
|
63
|
+
* Schema for generateText response.
|
|
64
|
+
*/
|
|
65
|
+
declare const generateTextResponseSchema: z.ZodObject<{
|
|
66
|
+
text: z.ZodString;
|
|
67
|
+
finishReason: z.ZodString;
|
|
68
|
+
usage: z.ZodObject<{
|
|
69
|
+
promptTokens: z.ZodNumber;
|
|
70
|
+
completionTokens: z.ZodNumber;
|
|
71
|
+
totalTokens: z.ZodNumber;
|
|
72
|
+
}, z.core.$strip>;
|
|
73
|
+
}, z.core.$strip>;
|
|
74
|
+
type GenerateTextResponse = z.infer<typeof generateTextResponseSchema>;
|
|
75
|
+
/**
|
|
76
|
+
* Schema factory for generateObject responses.
|
|
77
|
+
* The object shape is determined by the caller's schema.
|
|
78
|
+
*/
|
|
79
|
+
declare function generateObjectResponseSchema<T extends z.ZodTypeAny>(objectSchema: T): z.ZodObject<{
|
|
80
|
+
object: T;
|
|
81
|
+
finishReason: z.ZodString;
|
|
82
|
+
usage: z.ZodObject<{
|
|
83
|
+
promptTokens: z.ZodNumber;
|
|
84
|
+
completionTokens: z.ZodNumber;
|
|
85
|
+
totalTokens: z.ZodNumber;
|
|
86
|
+
}, z.core.$strip>;
|
|
87
|
+
}, z.core.$strip>;
|
|
88
|
+
/**
|
|
89
|
+
* Schema for streamText collected response (after stream completes).
|
|
90
|
+
*/
|
|
91
|
+
declare const streamTextResponseSchema: z.ZodObject<{
|
|
92
|
+
text: z.ZodString;
|
|
93
|
+
finishReason: z.ZodString;
|
|
94
|
+
usage: z.ZodObject<{
|
|
95
|
+
promptTokens: z.ZodNumber;
|
|
96
|
+
completionTokens: z.ZodNumber;
|
|
97
|
+
totalTokens: z.ZodNumber;
|
|
98
|
+
}, z.core.$strip>;
|
|
99
|
+
}, z.core.$strip>;
|
|
100
|
+
type StreamTextResponse = z.infer<typeof streamTextResponseSchema>;
|
|
101
|
+
/**
|
|
102
|
+
* Supported provider names.
|
|
103
|
+
*/
|
|
104
|
+
declare const aiProviderName: z.ZodEnum<{
|
|
105
|
+
openai: "openai";
|
|
106
|
+
anthropic: "anthropic";
|
|
107
|
+
}>;
|
|
108
|
+
type AIProviderName = z.infer<typeof aiProviderName>;
|
|
109
|
+
/**
|
|
110
|
+
* Common model identifiers for convenience.
|
|
111
|
+
*/
|
|
112
|
+
declare const openaiModels: z.ZodEnum<{
|
|
113
|
+
"gpt-4-turbo": "gpt-4-turbo";
|
|
114
|
+
"gpt-4o-mini": "gpt-4o-mini";
|
|
115
|
+
"gpt-4o": "gpt-4o";
|
|
116
|
+
o1: "o1";
|
|
117
|
+
"o3-mini": "o3-mini";
|
|
118
|
+
"o1-mini": "o1-mini";
|
|
119
|
+
}>;
|
|
120
|
+
type OpenAIModel = z.infer<typeof openaiModels>;
|
|
121
|
+
declare const anthropicModels: z.ZodEnum<{
|
|
122
|
+
"claude-opus-4-5-20251101": "claude-opus-4-5-20251101";
|
|
123
|
+
"claude-sonnet-4-20250514": "claude-sonnet-4-20250514";
|
|
124
|
+
"claude-haiku-3-5-20241022": "claude-haiku-3-5-20241022";
|
|
125
|
+
}>;
|
|
126
|
+
type AnthropicModel = z.infer<typeof anthropicModels>;
|
|
127
|
+
/**
|
|
128
|
+
* Schema for a message in a conversation.
|
|
129
|
+
*/
|
|
130
|
+
declare const aiMessageSchema: z.ZodObject<{
|
|
131
|
+
role: z.ZodEnum<{
|
|
132
|
+
system: "system";
|
|
133
|
+
user: "user";
|
|
134
|
+
assistant: "assistant";
|
|
135
|
+
}>;
|
|
136
|
+
content: z.ZodString;
|
|
137
|
+
}, z.core.$strip>;
|
|
138
|
+
type AIMessage = z.infer<typeof aiMessageSchema>;
|
|
139
|
+
//#endregion
|
|
140
|
+
export { type AIMessage, type AIProviderName, type AnthropicCredentials, type AnthropicModel, type AnthropicProvider, type GenerateTextResponse, type OpenAICredentials, type OpenAIModel, type OpenAIProvider, type StreamTextResponse, type TokenUsage, aiMessageSchema, aiProviderName, anthropic, anthropicModels, createAnthropicProvider, createOpenAIProvider, generateObjectAnthropic, generateObjectAnthropicTool, generateObjectOpenAI, generateObjectOpenAITool, generateObjectResponseSchema, generateTextAnthropic, generateTextAnthropicTool, generateTextOpenAI, generateTextOpenAITool, generateTextResponseSchema, openai, openaiModels, streamTextAnthropic, streamTextAnthropicTool, streamTextOpenAI, streamTextOpenAITool, streamTextResponseSchema, tokenUsageSchema };
|
package/dist/index.mjs
ADDED
|
@@ -0,0 +1,5 @@
|
|
|
1
|
+
import { a as generateTextResponseSchema, c as tokenUsageSchema, f as createAnthropicProvider, i as generateObjectResponseSchema, l as anthropic, n as aiProviderName, o as openaiModels, p as createOpenAIProvider, r as anthropicModels, s as streamTextResponseSchema, t as aiMessageSchema, u as openai } from "./schemas-Dd9H2hSI.mjs";
|
|
2
|
+
import { generateObjectAnthropic, generateObjectAnthropicTool, generateObjectOpenAI, generateObjectOpenAITool, generateTextAnthropic, generateTextAnthropicTool, generateTextOpenAI, generateTextOpenAITool } from "./generate.mjs";
|
|
3
|
+
import { streamTextAnthropic, streamTextAnthropicTool, streamTextOpenAI, streamTextOpenAITool } from "./stream.mjs";
|
|
4
|
+
|
|
5
|
+
export { aiMessageSchema, aiProviderName, anthropic, anthropicModels, createAnthropicProvider, createOpenAIProvider, generateObjectAnthropic, generateObjectAnthropicTool, generateObjectOpenAI, generateObjectOpenAITool, generateObjectResponseSchema, generateTextAnthropic, generateTextAnthropicTool, generateTextOpenAI, generateTextOpenAITool, generateTextResponseSchema, openai, openaiModels, streamTextAnthropic, streamTextAnthropicTool, streamTextOpenAI, streamTextOpenAITool, streamTextResponseSchema, tokenUsageSchema };
|
|
@@ -0,0 +1,187 @@
|
|
|
1
|
+
import { createAnthropic } from "@ai-sdk/anthropic";
|
|
2
|
+
import { createOpenAI } from "@ai-sdk/openai";
|
|
3
|
+
import { z } from "zod";
|
|
4
|
+
import { defineIntegration, defineStep } from "@wellze/workflow-core";
|
|
5
|
+
import { defineTool } from "@wellze/workflow-core/agent/tool";
|
|
6
|
+
|
|
7
|
+
//#region src/client.ts
|
|
8
|
+
/**
|
|
9
|
+
* ai/client.ts
|
|
10
|
+
*
|
|
11
|
+
* Provider factory functions that create AI SDK provider instances
|
|
12
|
+
* from resolved credentials.
|
|
13
|
+
*
|
|
14
|
+
* Keeps SDK instantiation in one place so steps don't need to import
|
|
15
|
+
* provider packages directly.
|
|
16
|
+
*/
|
|
17
|
+
/**
|
|
18
|
+
* Create an OpenAI provider from resolved credentials.
|
|
19
|
+
*
|
|
20
|
+
* @param credentials - Resolved credentials containing the API key
|
|
21
|
+
* @returns A configured `@ai-sdk/openai` provider instance
|
|
22
|
+
*/
|
|
23
|
+
function createOpenAIProvider(credentials) {
|
|
24
|
+
return createOpenAI({ apiKey: credentials.OPENAI_API_KEY });
|
|
25
|
+
}
|
|
26
|
+
/**
|
|
27
|
+
* Create an Anthropic provider from resolved credentials.
|
|
28
|
+
*
|
|
29
|
+
* @param credentials - Resolved credentials containing the API key
|
|
30
|
+
* @returns A configured `@ai-sdk/anthropic` provider instance
|
|
31
|
+
*/
|
|
32
|
+
function createAnthropicProvider(credentials) {
|
|
33
|
+
return createAnthropic({ apiKey: credentials.ANTHROPIC_API_KEY });
|
|
34
|
+
}
|
|
35
|
+
|
|
36
|
+
//#endregion
|
|
37
|
+
//#region src/factory.ts
|
|
38
|
+
/**
|
|
39
|
+
* Factory function that creates both a step and a tool from a single definition.
|
|
40
|
+
* Accepts an integration parameter to support both OpenAI and Anthropic.
|
|
41
|
+
* Produces scope-agnostic bindings (no scope set at factory level).
|
|
42
|
+
*/
|
|
43
|
+
function defineAIOperation(config) {
|
|
44
|
+
const { name, description, integration, input, output, run, needsApproval } = config;
|
|
45
|
+
const binding = { integration };
|
|
46
|
+
return {
|
|
47
|
+
step: defineStep({
|
|
48
|
+
name,
|
|
49
|
+
description,
|
|
50
|
+
integrations: [binding],
|
|
51
|
+
input,
|
|
52
|
+
output,
|
|
53
|
+
run: async (input, credentials) => {
|
|
54
|
+
return run(input, credentials);
|
|
55
|
+
}
|
|
56
|
+
}),
|
|
57
|
+
tool: defineTool({
|
|
58
|
+
name,
|
|
59
|
+
description,
|
|
60
|
+
integrations: [binding],
|
|
61
|
+
input,
|
|
62
|
+
output,
|
|
63
|
+
needsApproval,
|
|
64
|
+
run: async (input, credentials) => {
|
|
65
|
+
return run(input, credentials);
|
|
66
|
+
}
|
|
67
|
+
})
|
|
68
|
+
};
|
|
69
|
+
}
|
|
70
|
+
|
|
71
|
+
//#endregion
|
|
72
|
+
//#region src/integration.ts
|
|
73
|
+
/**
|
|
74
|
+
* ai/integration.ts
|
|
75
|
+
*
|
|
76
|
+
* AI provider integration definitions — passthrough auth via API keys.
|
|
77
|
+
*
|
|
78
|
+
* Defines separate integrations for each supported LLM provider.
|
|
79
|
+
* Credentials are API keys that get injected into steps for
|
|
80
|
+
* provider instantiation.
|
|
81
|
+
*/
|
|
82
|
+
/**
|
|
83
|
+
* OpenAI integration — passthrough auth via API key.
|
|
84
|
+
*/
|
|
85
|
+
const openai = defineIntegration({
|
|
86
|
+
name: "openai",
|
|
87
|
+
namespace: "wellze",
|
|
88
|
+
description: "OpenAI — GPT models for text generation, structured output, and streaming",
|
|
89
|
+
auth: {
|
|
90
|
+
credentials: z.object({ OPENAI_API_KEY: z.string() }),
|
|
91
|
+
output: z.object({ OPENAI_API_KEY: z.string() }),
|
|
92
|
+
resolve: async (creds) => ({ OPENAI_API_KEY: creds.OPENAI_API_KEY })
|
|
93
|
+
}
|
|
94
|
+
});
|
|
95
|
+
/**
|
|
96
|
+
* Anthropic integration — passthrough auth via API key.
|
|
97
|
+
*/
|
|
98
|
+
const anthropic = defineIntegration({
|
|
99
|
+
name: "anthropic",
|
|
100
|
+
namespace: "wellze",
|
|
101
|
+
description: "Anthropic — Claude models for text generation, structured output, and streaming",
|
|
102
|
+
auth: {
|
|
103
|
+
credentials: z.object({ ANTHROPIC_API_KEY: z.string() }),
|
|
104
|
+
output: z.object({ ANTHROPIC_API_KEY: z.string() }),
|
|
105
|
+
resolve: async (creds) => ({ ANTHROPIC_API_KEY: creds.ANTHROPIC_API_KEY })
|
|
106
|
+
}
|
|
107
|
+
});
|
|
108
|
+
|
|
109
|
+
//#endregion
|
|
110
|
+
//#region src/schemas.ts
|
|
111
|
+
/**
|
|
112
|
+
* ai/schemas.ts
|
|
113
|
+
*
|
|
114
|
+
* Zod schemas for AI SDK response shapes.
|
|
115
|
+
*
|
|
116
|
+
* These schemas define the output types for generate and stream steps.
|
|
117
|
+
* Types are inferred from schemas — never define types separately.
|
|
118
|
+
*/
|
|
119
|
+
/**
|
|
120
|
+
* Token usage information returned by the AI SDK.
|
|
121
|
+
*/
|
|
122
|
+
const tokenUsageSchema = z.object({
|
|
123
|
+
promptTokens: z.number(),
|
|
124
|
+
completionTokens: z.number(),
|
|
125
|
+
totalTokens: z.number()
|
|
126
|
+
});
|
|
127
|
+
/**
|
|
128
|
+
* Schema for generateText response.
|
|
129
|
+
*/
|
|
130
|
+
const generateTextResponseSchema = z.object({
|
|
131
|
+
text: z.string(),
|
|
132
|
+
finishReason: z.string(),
|
|
133
|
+
usage: tokenUsageSchema
|
|
134
|
+
});
|
|
135
|
+
/**
|
|
136
|
+
* Schema factory for generateObject responses.
|
|
137
|
+
* The object shape is determined by the caller's schema.
|
|
138
|
+
*/
|
|
139
|
+
function generateObjectResponseSchema(objectSchema) {
|
|
140
|
+
return z.object({
|
|
141
|
+
object: objectSchema,
|
|
142
|
+
finishReason: z.string(),
|
|
143
|
+
usage: tokenUsageSchema
|
|
144
|
+
});
|
|
145
|
+
}
|
|
146
|
+
/**
|
|
147
|
+
* Schema for streamText collected response (after stream completes).
|
|
148
|
+
*/
|
|
149
|
+
const streamTextResponseSchema = z.object({
|
|
150
|
+
text: z.string(),
|
|
151
|
+
finishReason: z.string(),
|
|
152
|
+
usage: tokenUsageSchema
|
|
153
|
+
});
|
|
154
|
+
/**
|
|
155
|
+
* Supported provider names.
|
|
156
|
+
*/
|
|
157
|
+
const aiProviderName = z.enum(["openai", "anthropic"]);
|
|
158
|
+
/**
|
|
159
|
+
* Common model identifiers for convenience.
|
|
160
|
+
*/
|
|
161
|
+
const openaiModels = z.enum([
|
|
162
|
+
"gpt-4o",
|
|
163
|
+
"gpt-4o-mini",
|
|
164
|
+
"gpt-4-turbo",
|
|
165
|
+
"o1",
|
|
166
|
+
"o1-mini",
|
|
167
|
+
"o3-mini"
|
|
168
|
+
]);
|
|
169
|
+
const anthropicModels = z.enum([
|
|
170
|
+
"claude-opus-4-5-20251101",
|
|
171
|
+
"claude-sonnet-4-20250514",
|
|
172
|
+
"claude-haiku-3-5-20241022"
|
|
173
|
+
]);
|
|
174
|
+
/**
|
|
175
|
+
* Schema for a message in a conversation.
|
|
176
|
+
*/
|
|
177
|
+
const aiMessageSchema = z.object({
|
|
178
|
+
role: z.enum([
|
|
179
|
+
"system",
|
|
180
|
+
"user",
|
|
181
|
+
"assistant"
|
|
182
|
+
]),
|
|
183
|
+
content: z.string()
|
|
184
|
+
});
|
|
185
|
+
|
|
186
|
+
//#endregion
|
|
187
|
+
export { generateTextResponseSchema as a, tokenUsageSchema as c, defineAIOperation as d, createAnthropicProvider as f, generateObjectResponseSchema as i, anthropic as l, aiProviderName as n, openaiModels as o, createOpenAIProvider as p, anthropicModels as r, streamTextResponseSchema as s, aiMessageSchema as t, openai as u };
|
|
@@ -0,0 +1,104 @@
|
|
|
1
|
+
import { z } from "zod";
|
|
2
|
+
import * as _wellze_workflow_core0 from "@wellze/workflow-core";
|
|
3
|
+
|
|
4
|
+
//#region src/stream.d.ts
|
|
5
|
+
declare const streamTextOpenAI: _wellze_workflow_core0.StepWithMetadata<{
|
|
6
|
+
model?: string | undefined;
|
|
7
|
+
system?: string | undefined;
|
|
8
|
+
prompt?: string | undefined;
|
|
9
|
+
messages?: {
|
|
10
|
+
role: "system" | "user" | "assistant";
|
|
11
|
+
content: string;
|
|
12
|
+
}[] | undefined;
|
|
13
|
+
maxOutputTokens?: number | undefined;
|
|
14
|
+
temperature?: number | undefined;
|
|
15
|
+
topP?: number | undefined;
|
|
16
|
+
stopSequences?: string[] | undefined;
|
|
17
|
+
}, {
|
|
18
|
+
text: string;
|
|
19
|
+
finishReason: string;
|
|
20
|
+
usage: {
|
|
21
|
+
promptTokens: number;
|
|
22
|
+
completionTokens: number;
|
|
23
|
+
totalTokens: number;
|
|
24
|
+
};
|
|
25
|
+
}, [_wellze_workflow_core0.IntegrationBinding<_wellze_workflow_core0.IntegrationDefinition<"openai", z.ZodObject<{
|
|
26
|
+
OPENAI_API_KEY: z.ZodString;
|
|
27
|
+
}, z.core.$strip>> | _wellze_workflow_core0.IntegrationDefinition<"anthropic", z.ZodObject<{
|
|
28
|
+
ANTHROPIC_API_KEY: z.ZodString;
|
|
29
|
+
}, z.core.$strip>>>]>, streamTextOpenAITool: _wellze_workflow_core0.ToolWithMetadata<{
|
|
30
|
+
model?: string | undefined;
|
|
31
|
+
system?: string | undefined;
|
|
32
|
+
prompt?: string | undefined;
|
|
33
|
+
messages?: {
|
|
34
|
+
role: "system" | "user" | "assistant";
|
|
35
|
+
content: string;
|
|
36
|
+
}[] | undefined;
|
|
37
|
+
maxOutputTokens?: number | undefined;
|
|
38
|
+
temperature?: number | undefined;
|
|
39
|
+
topP?: number | undefined;
|
|
40
|
+
stopSequences?: string[] | undefined;
|
|
41
|
+
}, {
|
|
42
|
+
text: string;
|
|
43
|
+
finishReason: string;
|
|
44
|
+
usage: {
|
|
45
|
+
promptTokens: number;
|
|
46
|
+
completionTokens: number;
|
|
47
|
+
totalTokens: number;
|
|
48
|
+
};
|
|
49
|
+
}, readonly [_wellze_workflow_core0.IntegrationBinding<_wellze_workflow_core0.IntegrationDefinition<"openai", z.ZodObject<{
|
|
50
|
+
OPENAI_API_KEY: z.ZodString;
|
|
51
|
+
}, z.core.$strip>> | _wellze_workflow_core0.IntegrationDefinition<"anthropic", z.ZodObject<{
|
|
52
|
+
ANTHROPIC_API_KEY: z.ZodString;
|
|
53
|
+
}, z.core.$strip>>>]>;
|
|
54
|
+
declare const streamTextAnthropic: _wellze_workflow_core0.StepWithMetadata<{
|
|
55
|
+
model?: string | undefined;
|
|
56
|
+
system?: string | undefined;
|
|
57
|
+
prompt?: string | undefined;
|
|
58
|
+
messages?: {
|
|
59
|
+
role: "system" | "user" | "assistant";
|
|
60
|
+
content: string;
|
|
61
|
+
}[] | undefined;
|
|
62
|
+
maxOutputTokens?: number | undefined;
|
|
63
|
+
temperature?: number | undefined;
|
|
64
|
+
topP?: number | undefined;
|
|
65
|
+
stopSequences?: string[] | undefined;
|
|
66
|
+
}, {
|
|
67
|
+
text: string;
|
|
68
|
+
finishReason: string;
|
|
69
|
+
usage: {
|
|
70
|
+
promptTokens: number;
|
|
71
|
+
completionTokens: number;
|
|
72
|
+
totalTokens: number;
|
|
73
|
+
};
|
|
74
|
+
}, [_wellze_workflow_core0.IntegrationBinding<_wellze_workflow_core0.IntegrationDefinition<"openai", z.ZodObject<{
|
|
75
|
+
OPENAI_API_KEY: z.ZodString;
|
|
76
|
+
}, z.core.$strip>> | _wellze_workflow_core0.IntegrationDefinition<"anthropic", z.ZodObject<{
|
|
77
|
+
ANTHROPIC_API_KEY: z.ZodString;
|
|
78
|
+
}, z.core.$strip>>>]>, streamTextAnthropicTool: _wellze_workflow_core0.ToolWithMetadata<{
|
|
79
|
+
model?: string | undefined;
|
|
80
|
+
system?: string | undefined;
|
|
81
|
+
prompt?: string | undefined;
|
|
82
|
+
messages?: {
|
|
83
|
+
role: "system" | "user" | "assistant";
|
|
84
|
+
content: string;
|
|
85
|
+
}[] | undefined;
|
|
86
|
+
maxOutputTokens?: number | undefined;
|
|
87
|
+
temperature?: number | undefined;
|
|
88
|
+
topP?: number | undefined;
|
|
89
|
+
stopSequences?: string[] | undefined;
|
|
90
|
+
}, {
|
|
91
|
+
text: string;
|
|
92
|
+
finishReason: string;
|
|
93
|
+
usage: {
|
|
94
|
+
promptTokens: number;
|
|
95
|
+
completionTokens: number;
|
|
96
|
+
totalTokens: number;
|
|
97
|
+
};
|
|
98
|
+
}, readonly [_wellze_workflow_core0.IntegrationBinding<_wellze_workflow_core0.IntegrationDefinition<"openai", z.ZodObject<{
|
|
99
|
+
OPENAI_API_KEY: z.ZodString;
|
|
100
|
+
}, z.core.$strip>> | _wellze_workflow_core0.IntegrationDefinition<"anthropic", z.ZodObject<{
|
|
101
|
+
ANTHROPIC_API_KEY: z.ZodString;
|
|
102
|
+
}, z.core.$strip>>>]>;
|
|
103
|
+
//#endregion
|
|
104
|
+
export { streamTextAnthropic, streamTextAnthropicTool, streamTextOpenAI, streamTextOpenAITool };
|
package/dist/stream.mjs
ADDED
|
@@ -0,0 +1,95 @@
|
|
|
1
|
+
import { d as defineAIOperation, f as createAnthropicProvider, l as anthropic, p as createOpenAIProvider, s as streamTextResponseSchema, t as aiMessageSchema, u as openai } from "./schemas-Dd9H2hSI.mjs";
|
|
2
|
+
import { streamText } from "ai";
|
|
3
|
+
import { z } from "zod";
|
|
4
|
+
|
|
5
|
+
//#region src/stream.ts
|
|
6
|
+
/**
|
|
7
|
+
* ai/stream.ts
|
|
8
|
+
*
|
|
9
|
+
* Steps for streaming text generation using the AI SDK.
|
|
10
|
+
*
|
|
11
|
+
* These steps collect the full stream and return the completed result.
|
|
12
|
+
* For true streaming in HTTP responses, consumers should use the AI SDK
|
|
13
|
+
* directly with `streamText().toTextStreamResponse()`.
|
|
14
|
+
*/
|
|
15
|
+
const { step: streamTextOpenAI, tool: streamTextOpenAITool } = defineAIOperation({
|
|
16
|
+
name: "Stream Text (OpenAI)",
|
|
17
|
+
description: "Stream text generation using an OpenAI model, returning the collected result",
|
|
18
|
+
integration: openai,
|
|
19
|
+
input: z.object({
|
|
20
|
+
model: z.string().optional(),
|
|
21
|
+
system: z.string().optional(),
|
|
22
|
+
prompt: z.string().optional(),
|
|
23
|
+
messages: z.array(aiMessageSchema).optional(),
|
|
24
|
+
maxOutputTokens: z.number().optional(),
|
|
25
|
+
temperature: z.number().optional(),
|
|
26
|
+
topP: z.number().optional(),
|
|
27
|
+
stopSequences: z.array(z.string()).optional()
|
|
28
|
+
}),
|
|
29
|
+
output: streamTextResponseSchema,
|
|
30
|
+
run: async (input, credentials) => {
|
|
31
|
+
const result = streamText({
|
|
32
|
+
model: createOpenAIProvider(credentials)(input.model ?? "gpt-4o"),
|
|
33
|
+
system: input.system,
|
|
34
|
+
...input.messages ? { messages: input.messages } : { prompt: input.prompt ?? "" },
|
|
35
|
+
maxOutputTokens: input.maxOutputTokens,
|
|
36
|
+
temperature: input.temperature,
|
|
37
|
+
topP: input.topP,
|
|
38
|
+
stopSequences: input.stopSequences
|
|
39
|
+
});
|
|
40
|
+
const text = await result.text;
|
|
41
|
+
const finishReason = await result.finishReason;
|
|
42
|
+
const usage = await result.usage;
|
|
43
|
+
return {
|
|
44
|
+
text,
|
|
45
|
+
finishReason,
|
|
46
|
+
usage: {
|
|
47
|
+
promptTokens: usage.inputTokens ?? 0,
|
|
48
|
+
completionTokens: usage.outputTokens ?? 0,
|
|
49
|
+
totalTokens: (usage.inputTokens ?? 0) + (usage.outputTokens ?? 0)
|
|
50
|
+
}
|
|
51
|
+
};
|
|
52
|
+
}
|
|
53
|
+
});
|
|
54
|
+
const { step: streamTextAnthropic, tool: streamTextAnthropicTool } = defineAIOperation({
|
|
55
|
+
name: "Stream Text (Anthropic)",
|
|
56
|
+
description: "Stream text generation using an Anthropic model, returning the collected result",
|
|
57
|
+
integration: anthropic,
|
|
58
|
+
input: z.object({
|
|
59
|
+
model: z.string().optional(),
|
|
60
|
+
system: z.string().optional(),
|
|
61
|
+
prompt: z.string().optional(),
|
|
62
|
+
messages: z.array(aiMessageSchema).optional(),
|
|
63
|
+
maxOutputTokens: z.number().optional(),
|
|
64
|
+
temperature: z.number().optional(),
|
|
65
|
+
topP: z.number().optional(),
|
|
66
|
+
stopSequences: z.array(z.string()).optional()
|
|
67
|
+
}),
|
|
68
|
+
output: streamTextResponseSchema,
|
|
69
|
+
run: async (input, credentials) => {
|
|
70
|
+
const result = streamText({
|
|
71
|
+
model: createAnthropicProvider(credentials)(input.model ?? "claude-sonnet-4-20250514"),
|
|
72
|
+
system: input.system,
|
|
73
|
+
...input.messages ? { messages: input.messages } : { prompt: input.prompt ?? "" },
|
|
74
|
+
maxOutputTokens: input.maxOutputTokens ?? 4096,
|
|
75
|
+
temperature: input.temperature,
|
|
76
|
+
topP: input.topP,
|
|
77
|
+
stopSequences: input.stopSequences
|
|
78
|
+
});
|
|
79
|
+
const text = await result.text;
|
|
80
|
+
const finishReason = await result.finishReason;
|
|
81
|
+
const usage = await result.usage;
|
|
82
|
+
return {
|
|
83
|
+
text,
|
|
84
|
+
finishReason,
|
|
85
|
+
usage: {
|
|
86
|
+
promptTokens: usage.inputTokens ?? 0,
|
|
87
|
+
completionTokens: usage.outputTokens ?? 0,
|
|
88
|
+
totalTokens: (usage.inputTokens ?? 0) + (usage.outputTokens ?? 0)
|
|
89
|
+
}
|
|
90
|
+
};
|
|
91
|
+
}
|
|
92
|
+
});
|
|
93
|
+
|
|
94
|
+
//#endregion
|
|
95
|
+
export { streamTextAnthropic, streamTextAnthropicTool, streamTextOpenAI, streamTextOpenAITool };
|
package/package.json
ADDED
|
@@ -0,0 +1,60 @@
|
|
|
1
|
+
{
|
|
2
|
+
"name": "@wellze/integration-ai",
|
|
3
|
+
"version": "0.0.0",
|
|
4
|
+
"private": false,
|
|
5
|
+
"type": "module",
|
|
6
|
+
"exports": {
|
|
7
|
+
".": {
|
|
8
|
+
"types": "./dist/index.d.mts",
|
|
9
|
+
"default": "./dist/index.mjs"
|
|
10
|
+
},
|
|
11
|
+
"./generate": {
|
|
12
|
+
"types": "./dist/generate.d.mts",
|
|
13
|
+
"default": "./dist/generate.mjs"
|
|
14
|
+
},
|
|
15
|
+
"./stream": {
|
|
16
|
+
"types": "./dist/stream.d.mts",
|
|
17
|
+
"default": "./dist/stream.mjs"
|
|
18
|
+
}
|
|
19
|
+
},
|
|
20
|
+
"files": [
|
|
21
|
+
"dist",
|
|
22
|
+
"README.md",
|
|
23
|
+
"LICENSE"
|
|
24
|
+
],
|
|
25
|
+
"dependencies": {
|
|
26
|
+
"@ai-sdk/anthropic": "^3.0.45",
|
|
27
|
+
"@ai-sdk/openai": "^3.0.30",
|
|
28
|
+
"ai": "^6.0.91",
|
|
29
|
+
"zod": "^4.3.6",
|
|
30
|
+
"@wellze/workflow-core": "0.0.0"
|
|
31
|
+
},
|
|
32
|
+
"devDependencies": {
|
|
33
|
+
"@types/node": "^22.19.11",
|
|
34
|
+
"tsdown": "^0.20.3",
|
|
35
|
+
"typescript": "^5.9.3",
|
|
36
|
+
"vitest": "^4.0.18",
|
|
37
|
+
"@wellze/test-utils": "0.0.0",
|
|
38
|
+
"@wellze/typescript-config": "0.0.0"
|
|
39
|
+
},
|
|
40
|
+
"keywords": [
|
|
41
|
+
"ai",
|
|
42
|
+
"llm",
|
|
43
|
+
"wellze",
|
|
44
|
+
"integration",
|
|
45
|
+
"openai",
|
|
46
|
+
"anthropic"
|
|
47
|
+
],
|
|
48
|
+
"repository": {
|
|
49
|
+
"type": "git",
|
|
50
|
+
"url": "https://github.com/your-org/wellze",
|
|
51
|
+
"directory": "packages/integration-ai"
|
|
52
|
+
},
|
|
53
|
+
"license": "MIT",
|
|
54
|
+
"scripts": {
|
|
55
|
+
"typecheck": "tsgo --build",
|
|
56
|
+
"build": "tsdown",
|
|
57
|
+
"lint": "biome check --write .",
|
|
58
|
+
"test:unit": "vitest run --passWithNoTests"
|
|
59
|
+
}
|
|
60
|
+
}
|