@aigne/core 1.14.0 → 1.16.0

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (145) hide show
  1. package/CHANGELOG.md +27 -0
  2. package/README.md +9 -7
  3. package/README.zh.md +9 -7
  4. package/lib/cjs/agents/agent.d.ts +129 -6
  5. package/lib/cjs/agents/agent.js +112 -20
  6. package/lib/cjs/agents/ai-agent.d.ts +3 -2
  7. package/lib/cjs/agents/ai-agent.js +12 -9
  8. package/lib/{esm/models → cjs/agents}/chat-model.d.ts +24 -13
  9. package/lib/cjs/{models → agents}/chat-model.js +48 -7
  10. package/lib/cjs/agents/guide-rail-agent.d.ts +62 -0
  11. package/lib/cjs/agents/guide-rail-agent.js +14 -0
  12. package/lib/cjs/agents/mcp-agent.js +9 -9
  13. package/lib/cjs/agents/team-agent.js +1 -1
  14. package/lib/cjs/aigne/aigne.d.ts +3 -2
  15. package/lib/cjs/aigne/aigne.js +2 -2
  16. package/lib/cjs/aigne/context.d.ts +2 -1
  17. package/lib/cjs/aigne/context.js +8 -1
  18. package/lib/cjs/index.d.ts +1 -1
  19. package/lib/cjs/index.js +1 -1
  20. package/lib/cjs/loader/agent-yaml.d.ts +1 -1
  21. package/lib/cjs/loader/index.d.ts +18 -11
  22. package/lib/cjs/loader/index.js +8 -27
  23. package/lib/cjs/memory/retriever.d.ts +2 -2
  24. package/lib/cjs/prompt/prompt-builder.d.ts +3 -3
  25. package/lib/cjs/prompt/template.d.ts +3 -3
  26. package/lib/cjs/prompt/template.js +1 -1
  27. package/lib/cjs/utils/json-schema.js +1 -1
  28. package/lib/cjs/utils/logger.d.ts +33 -8
  29. package/lib/cjs/utils/logger.js +63 -5
  30. package/lib/cjs/utils/model-utils.d.ts +1 -1
  31. package/lib/cjs/utils/stream-utils.d.ts +3 -2
  32. package/lib/cjs/utils/stream-utils.js +50 -26
  33. package/lib/cjs/utils/type-utils.d.ts +5 -0
  34. package/lib/dts/agents/agent.d.ts +129 -6
  35. package/lib/dts/agents/ai-agent.d.ts +3 -2
  36. package/lib/{cjs/models → dts/agents}/chat-model.d.ts +24 -13
  37. package/lib/dts/agents/guide-rail-agent.d.ts +62 -0
  38. package/lib/dts/aigne/aigne.d.ts +3 -2
  39. package/lib/dts/aigne/context.d.ts +2 -1
  40. package/lib/dts/index.d.ts +1 -1
  41. package/lib/dts/loader/agent-yaml.d.ts +1 -1
  42. package/lib/dts/loader/index.d.ts +18 -11
  43. package/lib/dts/memory/retriever.d.ts +2 -2
  44. package/lib/dts/prompt/prompt-builder.d.ts +3 -3
  45. package/lib/dts/prompt/template.d.ts +3 -3
  46. package/lib/dts/utils/logger.d.ts +33 -8
  47. package/lib/dts/utils/model-utils.d.ts +1 -1
  48. package/lib/dts/utils/stream-utils.d.ts +3 -2
  49. package/lib/dts/utils/type-utils.d.ts +5 -0
  50. package/lib/esm/agents/agent.d.ts +129 -6
  51. package/lib/esm/agents/agent.js +112 -20
  52. package/lib/esm/agents/ai-agent.d.ts +3 -2
  53. package/lib/esm/agents/ai-agent.js +12 -9
  54. package/lib/{dts/models → esm/agents}/chat-model.d.ts +24 -13
  55. package/lib/esm/{models → agents}/chat-model.js +48 -7
  56. package/lib/esm/agents/guide-rail-agent.d.ts +62 -0
  57. package/lib/esm/agents/guide-rail-agent.js +11 -0
  58. package/lib/esm/agents/mcp-agent.js +9 -9
  59. package/lib/esm/agents/team-agent.js +2 -2
  60. package/lib/esm/aigne/aigne.d.ts +3 -2
  61. package/lib/esm/aigne/aigne.js +2 -2
  62. package/lib/esm/aigne/context.d.ts +2 -1
  63. package/lib/esm/aigne/context.js +9 -2
  64. package/lib/esm/index.d.ts +1 -1
  65. package/lib/esm/index.js +1 -1
  66. package/lib/esm/loader/agent-yaml.d.ts +1 -1
  67. package/lib/esm/loader/index.d.ts +18 -11
  68. package/lib/esm/loader/index.js +8 -27
  69. package/lib/esm/memory/retriever.d.ts +2 -2
  70. package/lib/esm/prompt/prompt-builder.d.ts +3 -3
  71. package/lib/esm/prompt/template.d.ts +3 -3
  72. package/lib/esm/prompt/template.js +1 -1
  73. package/lib/esm/utils/json-schema.js +1 -1
  74. package/lib/esm/utils/logger.d.ts +33 -8
  75. package/lib/esm/utils/logger.js +61 -4
  76. package/lib/esm/utils/model-utils.d.ts +1 -1
  77. package/lib/esm/utils/stream-utils.d.ts +3 -2
  78. package/lib/esm/utils/stream-utils.js +48 -25
  79. package/lib/esm/utils/type-utils.d.ts +5 -0
  80. package/package.json +1 -20
  81. package/lib/cjs/client/client.d.ts +0 -97
  82. package/lib/cjs/client/client.js +0 -87
  83. package/lib/cjs/client/index.d.ts +0 -1
  84. package/lib/cjs/client/index.js +0 -17
  85. package/lib/cjs/models/bedrock-chat-model.d.ts +0 -79
  86. package/lib/cjs/models/bedrock-chat-model.js +0 -303
  87. package/lib/cjs/models/claude-chat-model.d.ts +0 -114
  88. package/lib/cjs/models/claude-chat-model.js +0 -317
  89. package/lib/cjs/models/deepseek-chat-model.d.ts +0 -23
  90. package/lib/cjs/models/deepseek-chat-model.js +0 -35
  91. package/lib/cjs/models/gemini-chat-model.d.ts +0 -23
  92. package/lib/cjs/models/gemini-chat-model.js +0 -35
  93. package/lib/cjs/models/ollama-chat-model.d.ts +0 -22
  94. package/lib/cjs/models/ollama-chat-model.js +0 -34
  95. package/lib/cjs/models/open-router-chat-model.d.ts +0 -22
  96. package/lib/cjs/models/open-router-chat-model.js +0 -34
  97. package/lib/cjs/models/openai-chat-model.d.ts +0 -166
  98. package/lib/cjs/models/openai-chat-model.js +0 -415
  99. package/lib/cjs/models/xai-chat-model.d.ts +0 -21
  100. package/lib/cjs/models/xai-chat-model.js +0 -33
  101. package/lib/cjs/server/error.d.ts +0 -15
  102. package/lib/cjs/server/error.js +0 -22
  103. package/lib/cjs/server/index.d.ts +0 -2
  104. package/lib/cjs/server/index.js +0 -18
  105. package/lib/cjs/server/server.d.ts +0 -135
  106. package/lib/cjs/server/server.js +0 -188
  107. package/lib/dts/client/client.d.ts +0 -97
  108. package/lib/dts/client/index.d.ts +0 -1
  109. package/lib/dts/models/bedrock-chat-model.d.ts +0 -79
  110. package/lib/dts/models/claude-chat-model.d.ts +0 -114
  111. package/lib/dts/models/deepseek-chat-model.d.ts +0 -23
  112. package/lib/dts/models/gemini-chat-model.d.ts +0 -23
  113. package/lib/dts/models/ollama-chat-model.d.ts +0 -22
  114. package/lib/dts/models/open-router-chat-model.d.ts +0 -22
  115. package/lib/dts/models/openai-chat-model.d.ts +0 -166
  116. package/lib/dts/models/xai-chat-model.d.ts +0 -21
  117. package/lib/dts/server/error.d.ts +0 -15
  118. package/lib/dts/server/index.d.ts +0 -2
  119. package/lib/dts/server/server.d.ts +0 -135
  120. package/lib/esm/client/client.d.ts +0 -97
  121. package/lib/esm/client/client.js +0 -83
  122. package/lib/esm/client/index.d.ts +0 -1
  123. package/lib/esm/client/index.js +0 -1
  124. package/lib/esm/models/bedrock-chat-model.d.ts +0 -79
  125. package/lib/esm/models/bedrock-chat-model.js +0 -298
  126. package/lib/esm/models/claude-chat-model.d.ts +0 -114
  127. package/lib/esm/models/claude-chat-model.js +0 -310
  128. package/lib/esm/models/deepseek-chat-model.d.ts +0 -23
  129. package/lib/esm/models/deepseek-chat-model.js +0 -31
  130. package/lib/esm/models/gemini-chat-model.d.ts +0 -23
  131. package/lib/esm/models/gemini-chat-model.js +0 -31
  132. package/lib/esm/models/ollama-chat-model.d.ts +0 -22
  133. package/lib/esm/models/ollama-chat-model.js +0 -30
  134. package/lib/esm/models/open-router-chat-model.d.ts +0 -22
  135. package/lib/esm/models/open-router-chat-model.js +0 -30
  136. package/lib/esm/models/openai-chat-model.d.ts +0 -166
  137. package/lib/esm/models/openai-chat-model.js +0 -405
  138. package/lib/esm/models/xai-chat-model.d.ts +0 -21
  139. package/lib/esm/models/xai-chat-model.js +0 -29
  140. package/lib/esm/server/error.d.ts +0 -15
  141. package/lib/esm/server/error.js +0 -18
  142. package/lib/esm/server/index.d.ts +0 -2
  143. package/lib/esm/server/index.js +0 -2
  144. package/lib/esm/server/server.d.ts +0 -135
  145. package/lib/esm/server/server.js +0 -181
@@ -1,310 +0,0 @@
1
- import Anthropic from "@anthropic-ai/sdk";
2
- import { z } from "zod";
3
- import { parseJSON } from "../utils/json-schema.js";
4
- import { mergeUsage } from "../utils/model-utils.js";
5
- import { agentResponseStreamToObject } from "../utils/stream-utils.js";
6
- import { checkArguments, isEmpty, isNonNullable, } from "../utils/type-utils.js";
7
- import { ChatModel, } from "./chat-model.js";
8
- const CHAT_MODEL_CLAUDE_DEFAULT_MODEL = "claude-3-7-sonnet-latest";
9
- /**
10
- * @hidden
11
- */
12
- export const claudeChatModelOptionsSchema = z.object({
13
- apiKey: z.string().optional(),
14
- model: z.string().optional(),
15
- modelOptions: z
16
- .object({
17
- model: z.string().optional(),
18
- temperature: z.number().optional(),
19
- topP: z.number().optional(),
20
- frequencyPenalty: z.number().optional(),
21
- presencePenalty: z.number().optional(),
22
- parallelToolCalls: z.boolean().optional().default(true),
23
- })
24
- .optional(),
25
- });
26
- /**
27
- * Implementation of the ChatModel interface for Anthropic's Claude API
28
- *
29
- * This model provides access to Claude's capabilities including:
30
- * - Text generation
31
- * - Tool use
32
- * - JSON structured output
33
- *
34
- * Default model: 'claude-3-7-sonnet-latest'
35
- *
36
- * @example
37
- * Here's how to create and use a Claude chat model:
38
- * {@includeCode ../../test/models/claude-chat-model.test.ts#example-claude-chat-model}
39
- *
40
- * @example
41
- * Here's an example with streaming response:
42
- * {@includeCode ../../test/models/claude-chat-model.test.ts#example-claude-chat-model-streaming-async-generator}
43
- */
44
- export class ClaudeChatModel extends ChatModel {
45
- options;
46
- constructor(options) {
47
- if (options)
48
- checkArguments("ClaudeChatModel", claudeChatModelOptionsSchema, options);
49
- super();
50
- this.options = options;
51
- }
52
- /**
53
- * @hidden
54
- */
55
- _client;
56
- get client() {
57
- const apiKey = this.options?.apiKey || process.env.ANTHROPIC_API_KEY || process.env.CLAUDE_API_KEY;
58
- if (!apiKey)
59
- throw new Error("Api Key is required for ClaudeChatModel");
60
- this._client ??= new Anthropic({ apiKey });
61
- return this._client;
62
- }
63
- get modelOptions() {
64
- return this.options?.modelOptions;
65
- }
66
- /**
67
- * Process the input using Claude's chat model
68
- * @param input - The input to process
69
- * @returns The processed output from the model
70
- */
71
- process(input) {
72
- return this._process(input);
73
- }
74
- async _process(input) {
75
- const model = this.options?.model || CHAT_MODEL_CLAUDE_DEFAULT_MODEL;
76
- const disableParallelToolUse = input.modelOptions?.parallelToolCalls === false ||
77
- this.modelOptions?.parallelToolCalls === false;
78
- const body = {
79
- model,
80
- temperature: input.modelOptions?.temperature ?? this.modelOptions?.temperature,
81
- top_p: input.modelOptions?.topP ?? this.modelOptions?.topP,
82
- // TODO: make dynamic based on model https://docs.anthropic.com/en/docs/about-claude/models/all-models
83
- max_tokens: /claude-3-[5|7]/.test(model) ? 8192 : 4096,
84
- ...convertMessages(input),
85
- ...convertTools({ ...input, disableParallelToolUse }),
86
- };
87
- const stream = this.client.messages.stream({
88
- ...body,
89
- stream: true,
90
- });
91
- if (input.responseFormat?.type !== "json_schema") {
92
- return this.extractResultFromClaudeStream(stream, true);
93
- }
94
- const result = await this.extractResultFromClaudeStream(stream);
95
- // Claude doesn't support json_schema response and tool calls in the same request,
96
- // so we need to make a separate request for json_schema response when the tool calls is empty
97
- if (!result.toolCalls?.length && input.responseFormat?.type === "json_schema") {
98
- const output = await this.requestStructuredOutput(body, input.responseFormat);
99
- return {
100
- ...output,
101
- // merge usage from both requests
102
- usage: mergeUsage(result.usage, output.usage),
103
- };
104
- }
105
- return result;
106
- }
107
- async extractResultFromClaudeStream(stream, streaming) {
108
- const logs = [];
109
- const result = new ReadableStream({
110
- async start(controller) {
111
- try {
112
- const toolCalls = [];
113
- let usage;
114
- let model;
115
- for await (const chunk of stream) {
116
- if (chunk.type === "message_start") {
117
- if (!model) {
118
- model = chunk.message.model;
119
- controller.enqueue({ delta: { json: { model } } });
120
- }
121
- const { input_tokens, output_tokens } = chunk.message.usage;
122
- usage = {
123
- inputTokens: input_tokens,
124
- outputTokens: output_tokens,
125
- };
126
- }
127
- if (chunk.type === "message_delta" && usage) {
128
- usage.outputTokens = chunk.usage.output_tokens;
129
- }
130
- logs.push(JSON.stringify(chunk));
131
- // handle streaming text
132
- if (chunk.type === "content_block_delta" && chunk.delta.type === "text_delta") {
133
- controller.enqueue({ delta: { text: { text: chunk.delta.text } } });
134
- }
135
- if (chunk.type === "content_block_start" && chunk.content_block.type === "tool_use") {
136
- toolCalls[chunk.index] = {
137
- type: "function",
138
- id: chunk.content_block.id,
139
- function: {
140
- name: chunk.content_block.name,
141
- arguments: {},
142
- },
143
- args: "",
144
- };
145
- }
146
- if (chunk.type === "content_block_delta" && chunk.delta.type === "input_json_delta") {
147
- const call = toolCalls[chunk.index];
148
- if (!call)
149
- throw new Error("Tool call not found");
150
- call.args += chunk.delta.partial_json;
151
- }
152
- }
153
- controller.enqueue({ delta: { json: { usage } } });
154
- if (toolCalls.length) {
155
- controller.enqueue({
156
- delta: {
157
- json: {
158
- toolCalls: toolCalls
159
- .map(({ args, ...c }) => ({
160
- ...c,
161
- function: {
162
- ...c.function,
163
- // NOTE: claude may return a blank string for empty object (the tool's input schema is a empty object)
164
- arguments: args.trim() ? parseJSON(args) : {},
165
- },
166
- }))
167
- .filter(isNonNullable),
168
- },
169
- },
170
- });
171
- }
172
- controller.close();
173
- }
174
- catch (error) {
175
- controller.error(error);
176
- }
177
- },
178
- });
179
- return streaming ? result : await agentResponseStreamToObject(result);
180
- }
181
- async requestStructuredOutput(body, responseFormat) {
182
- if (responseFormat?.type !== "json_schema") {
183
- throw new Error("Expected json_schema response format");
184
- }
185
- const result = await this.client.messages.create({
186
- ...body,
187
- tools: [
188
- {
189
- name: "generate_json",
190
- description: "Generate a json result by given context",
191
- input_schema: responseFormat.jsonSchema.schema,
192
- },
193
- ],
194
- tool_choice: {
195
- type: "tool",
196
- name: "generate_json",
197
- disable_parallel_tool_use: true,
198
- },
199
- stream: false,
200
- });
201
- const jsonTool = result.content.find((i) => i.type === "tool_use" && i.name === "generate_json");
202
- if (!jsonTool)
203
- throw new Error("Json tool not found");
204
- return {
205
- json: jsonTool.input,
206
- model: result.model,
207
- usage: {
208
- inputTokens: result.usage.input_tokens,
209
- outputTokens: result.usage.output_tokens,
210
- },
211
- };
212
- }
213
- }
214
- function convertMessages({ messages, responseFormat }) {
215
- const systemMessages = [];
216
- const msgs = [];
217
- for (const msg of messages) {
218
- if (msg.role === "system") {
219
- if (typeof msg.content !== "string")
220
- throw new Error("System message must have content");
221
- systemMessages.push(msg.content);
222
- }
223
- else if (msg.role === "tool") {
224
- if (!msg.toolCallId)
225
- throw new Error("Tool message must have toolCallId");
226
- if (typeof msg.content !== "string")
227
- throw new Error("Tool message must have string content");
228
- msgs.push({
229
- role: "user",
230
- content: [{ type: "tool_result", tool_use_id: msg.toolCallId, content: msg.content }],
231
- });
232
- }
233
- else if (msg.role === "user") {
234
- if (!msg.content)
235
- throw new Error("User message must have content");
236
- msgs.push({ role: "user", content: convertContent(msg.content) });
237
- }
238
- else if (msg.role === "agent") {
239
- if (msg.toolCalls?.length) {
240
- msgs.push({
241
- role: "assistant",
242
- content: msg.toolCalls.map((i) => ({
243
- type: "tool_use",
244
- id: i.id,
245
- name: i.function.name,
246
- input: i.function.arguments,
247
- })),
248
- });
249
- }
250
- else if (msg.content) {
251
- msgs.push({ role: "assistant", content: convertContent(msg.content) });
252
- }
253
- else {
254
- throw new Error("Agent message must have content or toolCalls");
255
- }
256
- }
257
- }
258
- if (responseFormat?.type === "json_schema") {
259
- systemMessages.push(`You should provide a json response with schema: ${JSON.stringify(responseFormat.jsonSchema.schema)}`);
260
- }
261
- const system = systemMessages.join("\n").trim() || undefined;
262
- // Claude requires at least one message, so we add a system message if there are no messages
263
- if (msgs.length === 0) {
264
- if (!system)
265
- throw new Error("No messages provided");
266
- return { messages: [{ role: "user", content: system }] };
267
- }
268
- return { messages: msgs, system };
269
- }
270
- function convertContent(content) {
271
- if (typeof content === "string")
272
- return content;
273
- if (Array.isArray(content)) {
274
- return content.map((item) => item.type === "image_url"
275
- ? { type: "image", source: { type: "url", url: item.url } }
276
- : { type: "text", text: item.text });
277
- }
278
- throw new Error("Invalid chat message content");
279
- }
280
- function convertTools({ tools, toolChoice, disableParallelToolUse, }) {
281
- let choice;
282
- if (typeof toolChoice === "object" && "type" in toolChoice && toolChoice.type === "function") {
283
- choice = {
284
- type: "tool",
285
- name: toolChoice.function.name,
286
- disable_parallel_tool_use: disableParallelToolUse,
287
- };
288
- }
289
- else if (toolChoice === "required") {
290
- choice = { type: "any", disable_parallel_tool_use: disableParallelToolUse };
291
- }
292
- else if (toolChoice === "auto") {
293
- choice = { type: "auto", disable_parallel_tool_use: disableParallelToolUse };
294
- }
295
- else if (toolChoice === "none") {
296
- choice = { type: "none" };
297
- }
298
- return {
299
- tools: tools?.length
300
- ? tools.map((i) => ({
301
- name: i.function.name,
302
- description: i.function.description,
303
- input_schema: isEmpty(i.function.parameters)
304
- ? { type: "object" }
305
- : i.function.parameters,
306
- }))
307
- : undefined,
308
- tool_choice: choice,
309
- };
310
- }
@@ -1,23 +0,0 @@
1
- import { OpenAIChatModel, type OpenAIChatModelOptions } from "./openai-chat-model.js";
2
- /**
3
- * Implementation of the ChatModel interface for DeepSeek's API
4
- *
5
- * This model uses OpenAI-compatible API format to interact with DeepSeek's models,
6
- * but with specific configuration and capabilities for DeepSeek.
7
- *
8
- * Default model: 'deepseek-chat'
9
- *
10
- * @example
11
- * Here's how to create and use a DeepSeek chat model:
12
- * {@includeCode ../../test/models/deepseek-chat-model.test.ts#example-deepseek-chat-model}
13
- *
14
- * @example
15
- * Here's an example with streaming response:
16
- * {@includeCode ../../test/models/deepseek-chat-model.test.ts#example-deepseek-chat-model-streaming}
17
- */
18
- export declare class DeepSeekChatModel extends OpenAIChatModel {
19
- constructor(options?: OpenAIChatModelOptions);
20
- protected apiKeyEnvName: string;
21
- protected supportsNativeStructuredOutputs: boolean;
22
- protected supportsToolsEmptyParameters: boolean;
23
- }
@@ -1,31 +0,0 @@
1
- import { OpenAIChatModel } from "./openai-chat-model.js";
2
- const DEEPSEEK_DEFAULT_CHAT_MODEL = "deepseek-chat";
3
- const DEEPSEEK_BASE_URL = "https://api.deepseek.com";
4
- /**
5
- * Implementation of the ChatModel interface for DeepSeek's API
6
- *
7
- * This model uses OpenAI-compatible API format to interact with DeepSeek's models,
8
- * but with specific configuration and capabilities for DeepSeek.
9
- *
10
- * Default model: 'deepseek-chat'
11
- *
12
- * @example
13
- * Here's how to create and use a DeepSeek chat model:
14
- * {@includeCode ../../test/models/deepseek-chat-model.test.ts#example-deepseek-chat-model}
15
- *
16
- * @example
17
- * Here's an example with streaming response:
18
- * {@includeCode ../../test/models/deepseek-chat-model.test.ts#example-deepseek-chat-model-streaming}
19
- */
20
- export class DeepSeekChatModel extends OpenAIChatModel {
21
- constructor(options) {
22
- super({
23
- ...options,
24
- model: options?.model || DEEPSEEK_DEFAULT_CHAT_MODEL,
25
- baseURL: options?.baseURL || DEEPSEEK_BASE_URL,
26
- });
27
- }
28
- apiKeyEnvName = "DEEPSEEK_API_KEY";
29
- supportsNativeStructuredOutputs = false;
30
- supportsToolsEmptyParameters = false;
31
- }
@@ -1,23 +0,0 @@
1
- import { OpenAIChatModel, type OpenAIChatModelOptions } from "./openai-chat-model.js";
2
- /**
3
- * Implementation of the ChatModel interface for Google's Gemini API
4
- *
5
- * This model uses OpenAI-compatible API format to interact with Google's Gemini models,
6
- * providing access to models like Gemini 1.5 and Gemini 2.0.
7
- *
8
- * @example
9
- * Here's how to create and use a Gemini chat model:
10
- * {@includeCode ../../test/models/gemini-chat-model.test.ts#example-gemini-chat-model}
11
- *
12
- * @example
13
- * Here's an example with streaming response:
14
- * {@includeCode ../../test/models/gemini-chat-model.test.ts#example-gemini-chat-model-streaming}
15
- */
16
- export declare class GeminiChatModel extends OpenAIChatModel {
17
- constructor(options?: OpenAIChatModelOptions);
18
- protected apiKeyEnvName: string;
19
- protected supportsEndWithSystemMessage: boolean;
20
- protected supportsToolsUseWithJsonSchema: boolean;
21
- protected supportsParallelToolCalls: boolean;
22
- protected supportsToolStreaming: boolean;
23
- }
@@ -1,31 +0,0 @@
1
- import { OpenAIChatModel } from "./openai-chat-model.js";
2
- const GEMINI_BASE_URL = "https://generativelanguage.googleapis.com/v1beta/openai";
3
- const GEMINI_DEFAULT_CHAT_MODEL = "gemini-2.0-flash";
4
- /**
5
- * Implementation of the ChatModel interface for Google's Gemini API
6
- *
7
- * This model uses OpenAI-compatible API format to interact with Google's Gemini models,
8
- * providing access to models like Gemini 1.5 and Gemini 2.0.
9
- *
10
- * @example
11
- * Here's how to create and use a Gemini chat model:
12
- * {@includeCode ../../test/models/gemini-chat-model.test.ts#example-gemini-chat-model}
13
- *
14
- * @example
15
- * Here's an example with streaming response:
16
- * {@includeCode ../../test/models/gemini-chat-model.test.ts#example-gemini-chat-model-streaming}
17
- */
18
- export class GeminiChatModel extends OpenAIChatModel {
19
- constructor(options) {
20
- super({
21
- ...options,
22
- model: options?.model || GEMINI_DEFAULT_CHAT_MODEL,
23
- baseURL: options?.baseURL || GEMINI_BASE_URL,
24
- });
25
- }
26
- apiKeyEnvName = "GEMINI_API_KEY";
27
- supportsEndWithSystemMessage = false;
28
- supportsToolsUseWithJsonSchema = false;
29
- supportsParallelToolCalls = false;
30
- supportsToolStreaming = false;
31
- }
@@ -1,22 +0,0 @@
1
- import { OpenAIChatModel, type OpenAIChatModelOptions } from "./openai-chat-model.js";
2
- /**
3
- * Implementation of the ChatModel interface for Ollama
4
- *
5
- * This model allows you to run open-source LLMs locally using Ollama,
6
- * with an OpenAI-compatible API interface.
7
- *
8
- * Default model: 'llama3.2'
9
- *
10
- * @example
11
- * Here's how to create and use an Ollama chat model:
12
- * {@includeCode ../../test/models/ollama-chat-model.test.ts#example-ollama-chat-model}
13
- *
14
- * @example
15
- * Here's an example with streaming response:
16
- * {@includeCode ../../test/models/ollama-chat-model.test.ts#example-ollama-chat-model-streaming}
17
- */
18
- export declare class OllamaChatModel extends OpenAIChatModel {
19
- constructor(options?: OpenAIChatModelOptions);
20
- protected apiKeyEnvName: string;
21
- protected apiKeyDefault: string;
22
- }
@@ -1,30 +0,0 @@
1
- import { OpenAIChatModel } from "./openai-chat-model.js";
2
- const OLLAMA_DEFAULT_BASE_URL = "http://localhost:11434/v1";
3
- const OLLAMA_DEFAULT_CHAT_MODEL = "llama3.2";
4
- /**
5
- * Implementation of the ChatModel interface for Ollama
6
- *
7
- * This model allows you to run open-source LLMs locally using Ollama,
8
- * with an OpenAI-compatible API interface.
9
- *
10
- * Default model: 'llama3.2'
11
- *
12
- * @example
13
- * Here's how to create and use an Ollama chat model:
14
- * {@includeCode ../../test/models/ollama-chat-model.test.ts#example-ollama-chat-model}
15
- *
16
- * @example
17
- * Here's an example with streaming response:
18
- * {@includeCode ../../test/models/ollama-chat-model.test.ts#example-ollama-chat-model-streaming}
19
- */
20
- export class OllamaChatModel extends OpenAIChatModel {
21
- constructor(options) {
22
- super({
23
- ...options,
24
- model: options?.model || OLLAMA_DEFAULT_CHAT_MODEL,
25
- baseURL: options?.baseURL || process.env.OLLAMA_BASE_URL || OLLAMA_DEFAULT_BASE_URL,
26
- });
27
- }
28
- apiKeyEnvName = "OLLAMA_API_KEY";
29
- apiKeyDefault = "ollama";
30
- }
@@ -1,22 +0,0 @@
1
- import { OpenAIChatModel, type OpenAIChatModelOptions } from "./openai-chat-model.js";
2
- /**
3
- * Implementation of the ChatModel interface for OpenRouter service
4
- *
5
- * OpenRouter provides access to a variety of large language models through a unified API.
6
- * This implementation uses the OpenAI-compatible interface to connect to OpenRouter's service.
7
- *
8
- * Default model: 'openai/gpt-4o'
9
- *
10
- * @example
11
- * Here's how to create and use an OpenRouter chat model:
12
- * {@includeCode ../../test/models/open-router-chat-model.test.ts#example-openrouter-chat-model}
13
- *
14
- * @example
15
- * Here's an example with streaming response:
16
- * {@includeCode ../../test/models/open-router-chat-model.test.ts#example-openrouter-chat-model-streaming}
17
- */
18
- export declare class OpenRouterChatModel extends OpenAIChatModel {
19
- constructor(options?: OpenAIChatModelOptions);
20
- protected apiKeyEnvName: string;
21
- protected supportsParallelToolCalls: boolean;
22
- }
@@ -1,30 +0,0 @@
1
- import { OpenAIChatModel } from "./openai-chat-model.js";
2
- const OPEN_ROUTER_DEFAULT_CHAT_MODEL = "openai/gpt-4o";
3
- const OPEN_ROUTER_BASE_URL = "https://openrouter.ai/api/v1";
4
- /**
5
- * Implementation of the ChatModel interface for OpenRouter service
6
- *
7
- * OpenRouter provides access to a variety of large language models through a unified API.
8
- * This implementation uses the OpenAI-compatible interface to connect to OpenRouter's service.
9
- *
10
- * Default model: 'openai/gpt-4o'
11
- *
12
- * @example
13
- * Here's how to create and use an OpenRouter chat model:
14
- * {@includeCode ../../test/models/open-router-chat-model.test.ts#example-openrouter-chat-model}
15
- *
16
- * @example
17
- * Here's an example with streaming response:
18
- * {@includeCode ../../test/models/open-router-chat-model.test.ts#example-openrouter-chat-model-streaming}
19
- */
20
- export class OpenRouterChatModel extends OpenAIChatModel {
21
- constructor(options) {
22
- super({
23
- ...options,
24
- model: options?.model || OPEN_ROUTER_DEFAULT_CHAT_MODEL,
25
- baseURL: options?.baseURL || OPEN_ROUTER_BASE_URL,
26
- });
27
- }
28
- apiKeyEnvName = "OPEN_ROUTER_API_KEY";
29
- supportsParallelToolCalls = false;
30
- }