@operor/llm 0.1.0
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/dist/index.d.ts +95 -0
- package/dist/index.d.ts.map +1 -0
- package/dist/index.js +227 -0
- package/dist/index.js.map +1 -0
- package/package.json +24 -0
- package/src/AIProvider.ts +238 -0
- package/src/LLMGateway.ts +25 -0
- package/src/index.ts +11 -0
- package/src/types.ts +52 -0
- package/tsconfig.json +9 -0
- package/tsdown.config.ts +10 -0
package/dist/index.d.ts
ADDED
|
@@ -0,0 +1,95 @@
|
|
|
1
|
+
import { LanguageModelV1 } from "ai";
|
|
2
|
+
|
|
3
|
+
//#region src/types.d.ts
|
|
4
|
+
interface SimpleTool {
|
|
5
|
+
name: string;
|
|
6
|
+
description: string;
|
|
7
|
+
parameters: Record<string, any>;
|
|
8
|
+
}
|
|
9
|
+
interface LLMConfig {
|
|
10
|
+
provider: 'openai' | 'anthropic' | 'google' | 'groq' | 'ollama';
|
|
11
|
+
apiKey?: string;
|
|
12
|
+
model?: string;
|
|
13
|
+
temperature?: number;
|
|
14
|
+
maxTokens?: number;
|
|
15
|
+
baseURL?: string;
|
|
16
|
+
}
|
|
17
|
+
interface LLMMessage {
|
|
18
|
+
role: 'system' | 'user' | 'assistant' | 'function';
|
|
19
|
+
content: string;
|
|
20
|
+
name?: string;
|
|
21
|
+
}
|
|
22
|
+
interface LLMToolCall {
|
|
23
|
+
id: string;
|
|
24
|
+
name: string;
|
|
25
|
+
arguments: Record<string, any>;
|
|
26
|
+
}
|
|
27
|
+
interface LLMResponse {
|
|
28
|
+
text: string;
|
|
29
|
+
toolCalls?: LLMToolCall[];
|
|
30
|
+
usage?: {
|
|
31
|
+
promptTokens: number;
|
|
32
|
+
completionTokens: number;
|
|
33
|
+
totalTokens: number;
|
|
34
|
+
};
|
|
35
|
+
cost?: number;
|
|
36
|
+
finishReason?: string;
|
|
37
|
+
}
|
|
38
|
+
interface LLMProvider {
|
|
39
|
+
complete(messages: LLMMessage[], options?: {
|
|
40
|
+
tools?: SimpleTool[];
|
|
41
|
+
temperature?: number;
|
|
42
|
+
maxTokens?: number;
|
|
43
|
+
}): Promise<LLMResponse>;
|
|
44
|
+
}
|
|
45
|
+
//#endregion
|
|
46
|
+
//#region src/LLMGateway.d.ts
|
|
47
|
+
declare class LLMGateway {
|
|
48
|
+
private provider;
|
|
49
|
+
constructor(config: LLMConfig);
|
|
50
|
+
getProvider(): LLMProvider;
|
|
51
|
+
}
|
|
52
|
+
//#endregion
|
|
53
|
+
//#region src/AIProvider.d.ts
|
|
54
|
+
interface AIProviderConfig {
|
|
55
|
+
provider: LLMConfig['provider'];
|
|
56
|
+
apiKey?: string;
|
|
57
|
+
model?: string;
|
|
58
|
+
temperature?: number;
|
|
59
|
+
maxTokens?: number;
|
|
60
|
+
baseURL?: string;
|
|
61
|
+
}
|
|
62
|
+
/** Known models per provider — used by /model list */
|
|
63
|
+
declare const MODEL_CATALOG: Record<string, string[]>;
|
|
64
|
+
/** Default model per provider */
|
|
65
|
+
declare const DEFAULT_MODELS: Record<string, string>;
|
|
66
|
+
declare class AIProvider implements LLMProvider {
|
|
67
|
+
private config;
|
|
68
|
+
/** Extra API keys stored at runtime (provider → key) */
|
|
69
|
+
private apiKeys;
|
|
70
|
+
constructor(config: AIProviderConfig);
|
|
71
|
+
/** Update provider/model/apiKey at runtime */
|
|
72
|
+
setConfig(patch: Partial<Pick<AIProviderConfig, 'provider' | 'model' | 'apiKey'>>): void;
|
|
73
|
+
/** Store an API key for a provider */
|
|
74
|
+
setApiKey(provider: string, key: string): void;
|
|
75
|
+
/** Get API key for a provider (stored keys take precedence) */
|
|
76
|
+
getApiKey(provider?: string): string | undefined;
|
|
77
|
+
getConfig(): Readonly<AIProviderConfig>;
|
|
78
|
+
/** Return the model catalog and defaults for /model list */
|
|
79
|
+
getModelCatalog(): {
|
|
80
|
+
catalog: Record<string, string[]>;
|
|
81
|
+
defaults: Record<string, string>;
|
|
82
|
+
};
|
|
83
|
+
getModel(): LanguageModelV1;
|
|
84
|
+
private convertTools;
|
|
85
|
+
complete(messages: LLMMessage[], options?: {
|
|
86
|
+
tools?: SimpleTool[];
|
|
87
|
+
temperature?: number;
|
|
88
|
+
maxTokens?: number;
|
|
89
|
+
}): Promise<LLMResponse>;
|
|
90
|
+
getModelName(): string;
|
|
91
|
+
getProviderName(): string;
|
|
92
|
+
}
|
|
93
|
+
//#endregion
|
|
94
|
+
export { AIProvider, type AIProviderConfig, DEFAULT_MODELS, type LLMConfig, LLMGateway, type LLMMessage, type LLMProvider, type LLMResponse, type LLMToolCall, MODEL_CATALOG, type SimpleTool };
|
|
95
|
+
//# sourceMappingURL=index.d.ts.map
|
|
@@ -0,0 +1 @@
|
|
|
1
|
+
{"version":3,"file":"index.d.ts","names":[],"sources":["../src/types.ts","../src/LLMGateway.ts","../src/AIProvider.ts"],"mappings":";;;UAGiB,UAAA;EACf,IAAA;EACA,WAAA;EACA,UAAA,EAAY,MAAA;AAAA;AAAA,UAGG,SAAA;EACf,QAAA;EACA,MAAA;EACA,KAAA;EACA,WAAA;EACA,SAAA;EACA,OAAA;AAAA;AAAA,UAGe,UAAA;EACf,IAAA;EACA,OAAA;EACA,IAAA;AAAA;AAAA,UAGe,WAAA;EACf,EAAA;EACA,IAAA;EACA,SAAA,EAAW,MAAA;AAAA;AAAA,UAGI,WAAA;EACf,IAAA;EACA,SAAA,GAAY,WAAA;EACZ,KAAA;IACE,YAAA;IACA,gBAAA;IACA,WAAA;EAAA;EAEF,IAAA;EACA,YAAA;AAAA;AAAA,UAGe,WAAA;EACf,QAAA,CACE,QAAA,EAAU,UAAA,IACV,OAAA;IACE,KAAA,GAAQ,UAAA;IACR,WAAA;IACA,SAAA;EAAA,IAED,OAAA,CAAQ,WAAA;AAAA;;;cC/CA,UAAA;EAAA,QACH,QAAA;cAEI,MAAA,EAAQ,SAAA;EAepB,WAAA,CAAA,GAAe,WAAA;AAAA;;;UCfA,gBAAA;EACf,QAAA,EAAU,SAAA;EACV,MAAA;EACA,KAAA;EACA,WAAA;EACA,SAAA;EACA,OAAA;AAAA;;cAIW,aAAA,EAAe,MAAA;;cASf,cAAA,EAAgB,MAAA;AAAA,cAQhB,UAAA,YAAsB,WAAA;EAAA,QACzB,MAAA;EFzBgB;EAAA,QE2BhB,OAAA;cAEI,MAAA,EAAQ,gBAAA;EF1BpB;EEmCA,SAAA,CAAU,KAAA,EAAO,OAAA,CAAQ,IAAA,CAAK,gBAAA;EFjC9B;EEwCA,SAAA,CAAU,QAAA,UAAkB,GAAA;EFvCrB;EE4CP,SAAA,CAAU,QAAA;EAKV,SAAA,CAAA,GAAa,QAAA,CAAS,gBAAA;EF9CG;EEmDzB,eAAA,CAAA;IAAqB,OAAA,EAAS,MAAA;IAA0B,QAAA,EAAU,MAAA;EAAA;EAIlE,QAAA,CAAA,GAAY,eAAA;EAAA,QAmCJ,YAAA;EAmEF,QAAA,CACJ,QAAA,EAAU,UAAA,IACV,OAAA;IACE,KAAA,GAAQ,UAAA;IACR,WAAA;IACA,SAAA;EAAA,IAED,OAAA,CAAQ,WAAA;EAgDX,YAAA,CAAA;EAIA,eAAA,CAAA;AAAA"}
|
package/dist/index.js
ADDED
|
@@ -0,0 +1,227 @@
|
|
|
1
|
+
import { generateText, jsonSchema, tool } from "ai";
|
|
2
|
+
import { createOpenAI } from "@ai-sdk/openai";
|
|
3
|
+
import { createAnthropic } from "@ai-sdk/anthropic";
|
|
4
|
+
import { createGoogleGenerativeAI } from "@ai-sdk/google";
|
|
5
|
+
|
|
6
|
+
//#region src/AIProvider.ts
|
|
7
|
+
/** Known models per provider — used by /model list */
|
|
8
|
+
const MODEL_CATALOG = {
|
|
9
|
+
openai: [
|
|
10
|
+
"gpt-5-mini",
|
|
11
|
+
"gpt-5.1",
|
|
12
|
+
"gpt-5.2",
|
|
13
|
+
"gpt-5.3-codex",
|
|
14
|
+
"o3-mini"
|
|
15
|
+
],
|
|
16
|
+
anthropic: [
|
|
17
|
+
"claude-sonnet-4-6-20250218",
|
|
18
|
+
"claude-opus-4-6-20250205",
|
|
19
|
+
"claude-haiku-4-5-20251001"
|
|
20
|
+
],
|
|
21
|
+
google: [
|
|
22
|
+
"gemini-2.5-flash",
|
|
23
|
+
"gemini-2.5-pro",
|
|
24
|
+
"gemini-2.0-flash",
|
|
25
|
+
"gemini-3-flash"
|
|
26
|
+
],
|
|
27
|
+
groq: [
|
|
28
|
+
"llama-3.3-70b-versatile",
|
|
29
|
+
"llama-3.1-8b-instant",
|
|
30
|
+
"meta-llama/llama-4-scout-17b-16e-instruct",
|
|
31
|
+
"mistral-saba-24b",
|
|
32
|
+
"qwen/qwen3-32b"
|
|
33
|
+
],
|
|
34
|
+
ollama: [
|
|
35
|
+
"llama3.2",
|
|
36
|
+
"llama3.1",
|
|
37
|
+
"mistral",
|
|
38
|
+
"gemma2"
|
|
39
|
+
]
|
|
40
|
+
};
|
|
41
|
+
/** Default model per provider */
|
|
42
|
+
const DEFAULT_MODELS = {
|
|
43
|
+
openai: "gpt-5-mini",
|
|
44
|
+
anthropic: "claude-sonnet-4-5-20250929",
|
|
45
|
+
google: "gemini-2.0-flash",
|
|
46
|
+
groq: "llama-3.3-70b-versatile",
|
|
47
|
+
ollama: "llama3.2"
|
|
48
|
+
};
|
|
49
|
+
var AIProvider = class {
|
|
50
|
+
config;
|
|
51
|
+
/** Extra API keys stored at runtime (provider → key) */
|
|
52
|
+
apiKeys = /* @__PURE__ */ new Map();
|
|
53
|
+
constructor(config) {
|
|
54
|
+
this.config = {
|
|
55
|
+
temperature: .7,
|
|
56
|
+
maxTokens: 1e3,
|
|
57
|
+
...config
|
|
58
|
+
};
|
|
59
|
+
}
|
|
60
|
+
/** Update provider/model/apiKey at runtime */
|
|
61
|
+
setConfig(patch) {
|
|
62
|
+
if (patch.provider) this.config.provider = patch.provider;
|
|
63
|
+
if (patch.model) this.config.model = patch.model;
|
|
64
|
+
if (patch.apiKey) this.config.apiKey = patch.apiKey;
|
|
65
|
+
}
|
|
66
|
+
/** Store an API key for a provider */
|
|
67
|
+
setApiKey(provider, key) {
|
|
68
|
+
this.apiKeys.set(provider, key);
|
|
69
|
+
}
|
|
70
|
+
/** Get API key for a provider (stored keys take precedence) */
|
|
71
|
+
getApiKey(provider) {
|
|
72
|
+
const p = provider || this.config.provider;
|
|
73
|
+
return this.apiKeys.get(p) || (p === this.config.provider ? this.config.apiKey : void 0);
|
|
74
|
+
}
|
|
75
|
+
getConfig() {
|
|
76
|
+
return this.config;
|
|
77
|
+
}
|
|
78
|
+
/** Return the model catalog and defaults for /model list */
|
|
79
|
+
getModelCatalog() {
|
|
80
|
+
return {
|
|
81
|
+
catalog: MODEL_CATALOG,
|
|
82
|
+
defaults: DEFAULT_MODELS
|
|
83
|
+
};
|
|
84
|
+
}
|
|
85
|
+
getModel() {
|
|
86
|
+
const { provider, apiKey, baseURL, model } = this.config;
|
|
87
|
+
switch (provider) {
|
|
88
|
+
case "openai": return createOpenAI({
|
|
89
|
+
apiKey,
|
|
90
|
+
baseURL
|
|
91
|
+
})(model || "gpt-5-mini");
|
|
92
|
+
case "anthropic": return createAnthropic({
|
|
93
|
+
apiKey,
|
|
94
|
+
baseURL
|
|
95
|
+
})(model || "claude-sonnet-4-5-20250929");
|
|
96
|
+
case "google": return createGoogleGenerativeAI({
|
|
97
|
+
apiKey,
|
|
98
|
+
baseURL
|
|
99
|
+
})(model || "gemini-2.0-flash");
|
|
100
|
+
case "groq": return createOpenAI({
|
|
101
|
+
apiKey,
|
|
102
|
+
baseURL: baseURL || "https://api.groq.com/openai/v1"
|
|
103
|
+
})(model || "llama-3.3-70b-versatile");
|
|
104
|
+
case "ollama": return createOpenAI({
|
|
105
|
+
apiKey: apiKey || "ollama",
|
|
106
|
+
baseURL: baseURL || "http://localhost:11434/v1"
|
|
107
|
+
})(model || "llama3.2");
|
|
108
|
+
default: throw new Error(`Unknown provider: ${provider}`);
|
|
109
|
+
}
|
|
110
|
+
}
|
|
111
|
+
convertTools(tools) {
|
|
112
|
+
const result = {};
|
|
113
|
+
for (const t of tools) {
|
|
114
|
+
const params = t.parameters;
|
|
115
|
+
if (params && typeof params === "object" && params.type === "object" && params.properties) {
|
|
116
|
+
result[t.name] = tool({
|
|
117
|
+
description: t.description,
|
|
118
|
+
inputSchema: jsonSchema(params)
|
|
119
|
+
});
|
|
120
|
+
continue;
|
|
121
|
+
}
|
|
122
|
+
const properties = {};
|
|
123
|
+
const required = [];
|
|
124
|
+
if (params && typeof params === "object") for (const [key, value] of Object.entries(params)) {
|
|
125
|
+
if (!value || typeof value !== "object") continue;
|
|
126
|
+
const prop = { description: value.description || "" };
|
|
127
|
+
if (value.enum) {
|
|
128
|
+
prop.type = "string";
|
|
129
|
+
prop.enum = value.enum;
|
|
130
|
+
} else switch (value.type) {
|
|
131
|
+
case "number":
|
|
132
|
+
case "integer":
|
|
133
|
+
prop.type = value.type;
|
|
134
|
+
break;
|
|
135
|
+
case "boolean":
|
|
136
|
+
prop.type = "boolean";
|
|
137
|
+
break;
|
|
138
|
+
case "array":
|
|
139
|
+
prop.type = "array";
|
|
140
|
+
break;
|
|
141
|
+
default: prop.type = "string";
|
|
142
|
+
}
|
|
143
|
+
properties[key] = prop;
|
|
144
|
+
if (value.required) required.push(key);
|
|
145
|
+
}
|
|
146
|
+
result[t.name] = tool({
|
|
147
|
+
description: t.description,
|
|
148
|
+
inputSchema: jsonSchema({
|
|
149
|
+
type: "object",
|
|
150
|
+
properties,
|
|
151
|
+
required: required.length > 0 ? required : void 0,
|
|
152
|
+
additionalProperties: false
|
|
153
|
+
})
|
|
154
|
+
});
|
|
155
|
+
}
|
|
156
|
+
return result;
|
|
157
|
+
}
|
|
158
|
+
async complete(messages, options = {}) {
|
|
159
|
+
const maxTokens = options.maxTokens ?? this.config.maxTokens;
|
|
160
|
+
const modelName = this.config.model || "";
|
|
161
|
+
const temperature = /^(gpt-5|o[1-4])/.test(modelName) ? void 0 : options.temperature ?? this.config.temperature;
|
|
162
|
+
const sdkMessages = messages.map((msg) => {
|
|
163
|
+
if (msg.role === "function") return {
|
|
164
|
+
role: "user",
|
|
165
|
+
content: `[Function result: ${msg.content}]`
|
|
166
|
+
};
|
|
167
|
+
return {
|
|
168
|
+
role: msg.role,
|
|
169
|
+
content: msg.content
|
|
170
|
+
};
|
|
171
|
+
});
|
|
172
|
+
const tools = options.tools && options.tools.length > 0 ? this.convertTools(options.tools) : void 0;
|
|
173
|
+
const result = await generateText({
|
|
174
|
+
model: this.getModel(),
|
|
175
|
+
messages: sdkMessages,
|
|
176
|
+
temperature,
|
|
177
|
+
maxTokens,
|
|
178
|
+
tools,
|
|
179
|
+
maxSteps: 1
|
|
180
|
+
});
|
|
181
|
+
const toolCalls = result.toolCalls?.length ? result.toolCalls.map((tc) => ({
|
|
182
|
+
id: tc.toolCallId,
|
|
183
|
+
name: tc.toolName,
|
|
184
|
+
arguments: tc.input ?? tc.args
|
|
185
|
+
})) : void 0;
|
|
186
|
+
return {
|
|
187
|
+
text: result.text || "",
|
|
188
|
+
toolCalls,
|
|
189
|
+
usage: {
|
|
190
|
+
promptTokens: result.usage?.promptTokens || 0,
|
|
191
|
+
completionTokens: result.usage?.completionTokens || 0,
|
|
192
|
+
totalTokens: (result.usage?.promptTokens || 0) + (result.usage?.completionTokens || 0)
|
|
193
|
+
},
|
|
194
|
+
finishReason: result.finishReason
|
|
195
|
+
};
|
|
196
|
+
}
|
|
197
|
+
getModelName() {
|
|
198
|
+
return this.config.model || "default";
|
|
199
|
+
}
|
|
200
|
+
getProviderName() {
|
|
201
|
+
return this.config.provider;
|
|
202
|
+
}
|
|
203
|
+
};
|
|
204
|
+
|
|
205
|
+
//#endregion
|
|
206
|
+
//#region src/LLMGateway.ts
|
|
207
|
+
var LLMGateway = class {
|
|
208
|
+
provider;
|
|
209
|
+
constructor(config) {
|
|
210
|
+
if (!config.apiKey && config.provider !== "ollama") throw new Error(`API key required for provider: ${config.provider}`);
|
|
211
|
+
this.provider = new AIProvider({
|
|
212
|
+
provider: config.provider,
|
|
213
|
+
apiKey: config.apiKey,
|
|
214
|
+
model: config.model,
|
|
215
|
+
temperature: config.temperature,
|
|
216
|
+
maxTokens: config.maxTokens,
|
|
217
|
+
baseURL: config.baseURL
|
|
218
|
+
});
|
|
219
|
+
}
|
|
220
|
+
getProvider() {
|
|
221
|
+
return this.provider;
|
|
222
|
+
}
|
|
223
|
+
};
|
|
224
|
+
|
|
225
|
+
//#endregion
|
|
226
|
+
export { AIProvider, DEFAULT_MODELS, LLMGateway, MODEL_CATALOG };
|
|
227
|
+
//# sourceMappingURL=index.js.map
|
|
@@ -0,0 +1 @@
|
|
|
1
|
+
{"version":3,"file":"index.js","names":["aiTool"],"sources":["../src/AIProvider.ts","../src/LLMGateway.ts"],"sourcesContent":["import { generateText, tool as aiTool, jsonSchema, type LanguageModelV1 } from 'ai';\nimport { createOpenAI } from '@ai-sdk/openai';\nimport { createAnthropic } from '@ai-sdk/anthropic';\nimport { createGoogleGenerativeAI } from '@ai-sdk/google';\nimport type { LLMProvider, LLMMessage, LLMResponse, LLMToolCall, SimpleTool, LLMConfig } from './types.js';\n\nexport interface AIProviderConfig {\n provider: LLMConfig['provider'];\n apiKey?: string;\n model?: string;\n temperature?: number;\n maxTokens?: number;\n baseURL?: string;\n}\n\n/** Known models per provider — used by /model list */\nexport const MODEL_CATALOG: Record<string, string[]> = {\n openai: ['gpt-5-mini', 'gpt-5.1', 'gpt-5.2', 'gpt-5.3-codex', 'o3-mini'],\n anthropic: ['claude-sonnet-4-6-20250218', 'claude-opus-4-6-20250205', 'claude-haiku-4-5-20251001'],\n google: ['gemini-2.5-flash', 'gemini-2.5-pro', 'gemini-2.0-flash', 'gemini-3-flash'],\n groq: ['llama-3.3-70b-versatile', 'llama-3.1-8b-instant', 'meta-llama/llama-4-scout-17b-16e-instruct', 'mistral-saba-24b', 'qwen/qwen3-32b'],\n ollama: ['llama3.2', 'llama3.1', 'mistral', 'gemma2'],\n};\n\n/** Default model per provider */\nexport const DEFAULT_MODELS: Record<string, string> = {\n openai: 'gpt-5-mini',\n anthropic: 'claude-sonnet-4-5-20250929',\n google: 'gemini-2.0-flash',\n groq: 'llama-3.3-70b-versatile',\n ollama: 'llama3.2',\n};\n\nexport class AIProvider implements LLMProvider {\n private config: AIProviderConfig & { temperature: number; maxTokens: number };\n /** Extra API keys stored at runtime (provider → key) */\n private apiKeys: Map<string, string> = new Map();\n\n constructor(config: AIProviderConfig) {\n this.config = {\n temperature: 0.7,\n maxTokens: 1000,\n ...config,\n };\n }\n\n /** Update provider/model/apiKey at runtime */\n setConfig(patch: Partial<Pick<AIProviderConfig, 'provider' | 'model' | 'apiKey'>>) {\n if (patch.provider) this.config.provider = patch.provider;\n if (patch.model) this.config.model = patch.model;\n if (patch.apiKey) this.config.apiKey = patch.apiKey;\n }\n\n /** Store an API key for a provider */\n setApiKey(provider: string, key: string) {\n this.apiKeys.set(provider, key);\n }\n\n /** Get API key for a provider (stored keys take precedence) */\n getApiKey(provider?: string): string | undefined {\n const p = provider || this.config.provider;\n return this.apiKeys.get(p) || (p === this.config.provider ? this.config.apiKey : undefined);\n }\n\n getConfig(): Readonly<AIProviderConfig> {\n return this.config;\n }\n\n /** Return the model catalog and defaults for /model list */\n getModelCatalog(): { catalog: Record<string, string[]>; defaults: Record<string, string> } {\n return { catalog: MODEL_CATALOG, defaults: DEFAULT_MODELS };\n }\n\n getModel(): LanguageModelV1 {\n const { provider, apiKey, baseURL, model } = this.config;\n\n switch (provider) {\n case 'openai': {\n const openai = createOpenAI({ apiKey, baseURL });\n return openai(model || 'gpt-5-mini');\n }\n case 'anthropic': {\n const anthropic = createAnthropic({ apiKey, baseURL });\n return anthropic(model || 'claude-sonnet-4-5-20250929');\n }\n case 'google': {\n const google = createGoogleGenerativeAI({ apiKey, baseURL });\n return google(model || 'gemini-2.0-flash');\n }\n case 'groq': {\n const groq = createOpenAI({\n apiKey,\n baseURL: baseURL || 'https://api.groq.com/openai/v1',\n });\n return groq(model || 'llama-3.3-70b-versatile');\n }\n case 'ollama': {\n const ollama = createOpenAI({\n apiKey: apiKey || 'ollama',\n baseURL: baseURL || 'http://localhost:11434/v1',\n });\n return ollama(model || 'llama3.2');\n }\n default:\n throw new Error(`Unknown provider: ${provider}`);\n }\n }\n\n private convertTools(tools: SimpleTool[]): Record<string, ReturnType<typeof aiTool>> {\n const result: Record<string, ReturnType<typeof aiTool>> = {};\n\n for (const t of tools) {\n const params = t.parameters;\n\n // JSON Schema passthrough: if parameters already look like a JSON Schema object\n // (has type: 'object' and properties), pass directly to jsonSchema()\n if (params && typeof params === 'object' && params.type === 'object' && params.properties) {\n result[t.name] = aiTool({\n description: t.description,\n inputSchema: jsonSchema(params),\n });\n continue;\n }\n\n // Flat format conversion (original Operor Tool parameter format)\n const properties: Record<string, any> = {};\n const required: string[] = [];\n\n if (params && typeof params === 'object') {\n for (const [key, value] of Object.entries(params)) {\n if (!value || typeof value !== 'object') continue;\n\n const prop: any = { description: value.description || '' };\n\n if (value.enum) {\n prop.type = 'string';\n prop.enum = value.enum;\n } else {\n switch (value.type) {\n case 'number':\n case 'integer':\n prop.type = value.type;\n break;\n case 'boolean':\n prop.type = 'boolean';\n break;\n case 'array':\n prop.type = 'array';\n break;\n default:\n prop.type = 'string';\n }\n }\n\n properties[key] = prop;\n if (value.required) {\n required.push(key);\n }\n }\n }\n\n result[t.name] = aiTool({\n description: t.description,\n inputSchema: jsonSchema({\n type: 'object',\n properties,\n required: required.length > 0 ? required : undefined,\n additionalProperties: false,\n }),\n });\n }\n\n return result;\n }\n\n async complete(\n messages: LLMMessage[],\n options: {\n tools?: SimpleTool[];\n temperature?: number;\n maxTokens?: number;\n } = {}\n ): Promise<LLMResponse> {\n const maxTokens = options.maxTokens ?? this.config.maxTokens;\n\n // Reasoning models (GPT-5 family, o-series) do not support temperature\n const modelName = this.config.model || '';\n const isReasoningModel = /^(gpt-5|o[1-4])/.test(modelName);\n const temperature = isReasoningModel ? undefined : (options.temperature ?? this.config.temperature);\n\n const sdkMessages = messages.map((msg) => {\n if (msg.role === 'function') {\n return { role: 'user' as const, content: `[Function result: ${msg.content}]` };\n }\n return { role: msg.role as 'system' | 'user' | 'assistant', content: msg.content };\n });\n\n const tools = options.tools && options.tools.length > 0\n ? this.convertTools(options.tools)\n : undefined;\n\n const result = await generateText({\n model: this.getModel(),\n messages: sdkMessages,\n temperature,\n maxTokens,\n tools,\n maxSteps: 1,\n });\n\n const toolCalls: LLMToolCall[] | undefined = result.toolCalls?.length\n ? result.toolCalls.map((tc) => ({\n id: tc.toolCallId,\n name: tc.toolName,\n arguments: (tc as any).input ?? (tc as any).args as Record<string, any>,\n }))\n : undefined;\n\n return {\n text: result.text || '',\n toolCalls,\n usage: {\n promptTokens: result.usage?.promptTokens || 0,\n completionTokens: result.usage?.completionTokens || 0,\n totalTokens: (result.usage?.promptTokens || 0) + (result.usage?.completionTokens || 0),\n },\n finishReason: result.finishReason,\n };\n }\n\n getModelName(): string {\n return this.config.model || 'default';\n }\n\n getProviderName(): string {\n return this.config.provider;\n }\n}\n","import type { LLMProvider, LLMConfig } from './types.js';\nimport { AIProvider } from './AIProvider.js';\n\nexport class LLMGateway {\n private provider: LLMProvider;\n\n constructor(config: LLMConfig) {\n if (!config.apiKey && config.provider !== 'ollama') {\n throw new Error(`API key required for provider: ${config.provider}`);\n }\n\n this.provider = new AIProvider({\n provider: config.provider,\n apiKey: config.apiKey,\n model: config.model,\n temperature: config.temperature,\n maxTokens: config.maxTokens,\n baseURL: config.baseURL,\n });\n }\n\n getProvider(): LLMProvider {\n return this.provider;\n }\n}\n"],"mappings":";;;;;;;AAgBA,MAAa,gBAA0C;CACrD,QAAQ;EAAC;EAAc;EAAW;EAAW;EAAiB;EAAU;CACxE,WAAW;EAAC;EAA8B;EAA4B;EAA4B;CAClG,QAAQ;EAAC;EAAoB;EAAkB;EAAoB;EAAiB;CACpF,MAAM;EAAC;EAA2B;EAAwB;EAA6C;EAAoB;EAAiB;CAC5I,QAAQ;EAAC;EAAY;EAAY;EAAW;EAAS;CACtD;;AAGD,MAAa,iBAAyC;CACpD,QAAQ;CACR,WAAW;CACX,QAAQ;CACR,MAAM;CACN,QAAQ;CACT;AAED,IAAa,aAAb,MAA+C;CAC7C,AAAQ;;CAER,AAAQ,0BAA+B,IAAI,KAAK;CAEhD,YAAY,QAA0B;AACpC,OAAK,SAAS;GACZ,aAAa;GACb,WAAW;GACX,GAAG;GACJ;;;CAIH,UAAU,OAAyE;AACjF,MAAI,MAAM,SAAU,MAAK,OAAO,WAAW,MAAM;AACjD,MAAI,MAAM,MAAO,MAAK,OAAO,QAAQ,MAAM;AAC3C,MAAI,MAAM,OAAQ,MAAK,OAAO,SAAS,MAAM;;;CAI/C,UAAU,UAAkB,KAAa;AACvC,OAAK,QAAQ,IAAI,UAAU,IAAI;;;CAIjC,UAAU,UAAuC;EAC/C,MAAM,IAAI,YAAY,KAAK,OAAO;AAClC,SAAO,KAAK,QAAQ,IAAI,EAAE,KAAK,MAAM,KAAK,OAAO,WAAW,KAAK,OAAO,SAAS;;CAGnF,YAAwC;AACtC,SAAO,KAAK;;;CAId,kBAA2F;AACzF,SAAO;GAAE,SAAS;GAAe,UAAU;GAAgB;;CAG7D,WAA4B;EAC1B,MAAM,EAAE,UAAU,QAAQ,SAAS,UAAU,KAAK;AAElD,UAAQ,UAAR;GACE,KAAK,SAEH,QADe,aAAa;IAAE;IAAQ;IAAS,CAAC,CAClC,SAAS,aAAa;GAEtC,KAAK,YAEH,QADkB,gBAAgB;IAAE;IAAQ;IAAS,CAAC,CACrC,SAAS,6BAA6B;GAEzD,KAAK,SAEH,QADe,yBAAyB;IAAE;IAAQ;IAAS,CAAC,CAC9C,SAAS,mBAAmB;GAE5C,KAAK,OAKH,QAJa,aAAa;IACxB;IACA,SAAS,WAAW;IACrB,CAAC,CACU,SAAS,0BAA0B;GAEjD,KAAK,SAKH,QAJe,aAAa;IAC1B,QAAQ,UAAU;IAClB,SAAS,WAAW;IACrB,CAAC,CACY,SAAS,WAAW;GAEpC,QACE,OAAM,IAAI,MAAM,qBAAqB,WAAW;;;CAItD,AAAQ,aAAa,OAAgE;EACnF,MAAM,SAAoD,EAAE;AAE5D,OAAK,MAAM,KAAK,OAAO;GACrB,MAAM,SAAS,EAAE;AAIjB,OAAI,UAAU,OAAO,WAAW,YAAY,OAAO,SAAS,YAAY,OAAO,YAAY;AACzF,WAAO,EAAE,QAAQA,KAAO;KACtB,aAAa,EAAE;KACf,aAAa,WAAW,OAAO;KAChC,CAAC;AACF;;GAIF,MAAM,aAAkC,EAAE;GAC1C,MAAM,WAAqB,EAAE;AAE7B,OAAI,UAAU,OAAO,WAAW,SAC9B,MAAK,MAAM,CAAC,KAAK,UAAU,OAAO,QAAQ,OAAO,EAAE;AACjD,QAAI,CAAC,SAAS,OAAO,UAAU,SAAU;IAEzC,MAAM,OAAY,EAAE,aAAa,MAAM,eAAe,IAAI;AAE1D,QAAI,MAAM,MAAM;AACd,UAAK,OAAO;AACZ,UAAK,OAAO,MAAM;UAElB,SAAQ,MAAM,MAAd;KACE,KAAK;KACL,KAAK;AACH,WAAK,OAAO,MAAM;AAClB;KACF,KAAK;AACH,WAAK,OAAO;AACZ;KACF,KAAK;AACH,WAAK,OAAO;AACZ;KACF,QACE,MAAK,OAAO;;AAIlB,eAAW,OAAO;AAClB,QAAI,MAAM,SACR,UAAS,KAAK,IAAI;;AAKxB,UAAO,EAAE,QAAQA,KAAO;IACtB,aAAa,EAAE;IACf,aAAa,WAAW;KACtB,MAAM;KACN;KACA,UAAU,SAAS,SAAS,IAAI,WAAW;KAC3C,sBAAsB;KACvB,CAAC;IACH,CAAC;;AAGJ,SAAO;;CAGT,MAAM,SACJ,UACA,UAII,EAAE,EACgB;EACtB,MAAM,YAAY,QAAQ,aAAa,KAAK,OAAO;EAGnD,MAAM,YAAY,KAAK,OAAO,SAAS;EAEvC,MAAM,cADmB,kBAAkB,KAAK,UAAU,GACnB,SAAa,QAAQ,eAAe,KAAK,OAAO;EAEvF,MAAM,cAAc,SAAS,KAAK,QAAQ;AACxC,OAAI,IAAI,SAAS,WACf,QAAO;IAAE,MAAM;IAAiB,SAAS,qBAAqB,IAAI,QAAQ;IAAI;AAEhF,UAAO;IAAE,MAAM,IAAI;IAAyC,SAAS,IAAI;IAAS;IAClF;EAEF,MAAM,QAAQ,QAAQ,SAAS,QAAQ,MAAM,SAAS,IAClD,KAAK,aAAa,QAAQ,MAAM,GAChC;EAEJ,MAAM,SAAS,MAAM,aAAa;GAChC,OAAO,KAAK,UAAU;GACtB,UAAU;GACV;GACA;GACA;GACA,UAAU;GACX,CAAC;EAEF,MAAM,YAAuC,OAAO,WAAW,SAC3D,OAAO,UAAU,KAAK,QAAQ;GAC5B,IAAI,GAAG;GACP,MAAM,GAAG;GACT,WAAY,GAAW,SAAU,GAAW;GAC7C,EAAE,GACH;AAEJ,SAAO;GACL,MAAM,OAAO,QAAQ;GACrB;GACA,OAAO;IACL,cAAc,OAAO,OAAO,gBAAgB;IAC5C,kBAAkB,OAAO,OAAO,oBAAoB;IACpD,cAAc,OAAO,OAAO,gBAAgB,MAAM,OAAO,OAAO,oBAAoB;IACrF;GACD,cAAc,OAAO;GACtB;;CAGH,eAAuB;AACrB,SAAO,KAAK,OAAO,SAAS;;CAG9B,kBAA0B;AACxB,SAAO,KAAK,OAAO;;;;;;ACxOvB,IAAa,aAAb,MAAwB;CACtB,AAAQ;CAER,YAAY,QAAmB;AAC7B,MAAI,CAAC,OAAO,UAAU,OAAO,aAAa,SACxC,OAAM,IAAI,MAAM,kCAAkC,OAAO,WAAW;AAGtE,OAAK,WAAW,IAAI,WAAW;GAC7B,UAAU,OAAO;GACjB,QAAQ,OAAO;GACf,OAAO,OAAO;GACd,aAAa,OAAO;GACpB,WAAW,OAAO;GAClB,SAAS,OAAO;GACjB,CAAC;;CAGJ,cAA2B;AACzB,SAAO,KAAK"}
|
package/package.json
ADDED
|
@@ -0,0 +1,24 @@
|
|
|
1
|
+
{
|
|
2
|
+
"name": "@operor/llm",
|
|
3
|
+
"version": "0.1.0",
|
|
4
|
+
"description": "LLM providers for Agent OS",
|
|
5
|
+
"type": "module",
|
|
6
|
+
"main": "./dist/index.js",
|
|
7
|
+
"types": "./dist/index.d.ts",
|
|
8
|
+
"dependencies": {
|
|
9
|
+
"ai": "^6.0.0",
|
|
10
|
+
"@ai-sdk/openai": "^3.0.0",
|
|
11
|
+
"@ai-sdk/anthropic": "^3.0.0",
|
|
12
|
+
"@ai-sdk/google": "^3.0.0",
|
|
13
|
+
"zod": "^4.0.0",
|
|
14
|
+
"@operor/core": "0.1.0"
|
|
15
|
+
},
|
|
16
|
+
"devDependencies": {
|
|
17
|
+
"tsdown": "^0.20.3",
|
|
18
|
+
"typescript": "^5.7.0"
|
|
19
|
+
},
|
|
20
|
+
"scripts": {
|
|
21
|
+
"build": "tsdown",
|
|
22
|
+
"dev": "tsdown --watch"
|
|
23
|
+
}
|
|
24
|
+
}
|
|
@@ -0,0 +1,238 @@
|
|
|
1
|
+
import { generateText, tool as aiTool, jsonSchema, type LanguageModelV1 } from 'ai';
|
|
2
|
+
import { createOpenAI } from '@ai-sdk/openai';
|
|
3
|
+
import { createAnthropic } from '@ai-sdk/anthropic';
|
|
4
|
+
import { createGoogleGenerativeAI } from '@ai-sdk/google';
|
|
5
|
+
import type { LLMProvider, LLMMessage, LLMResponse, LLMToolCall, SimpleTool, LLMConfig } from './types.js';
|
|
6
|
+
|
|
7
|
+
export interface AIProviderConfig {
|
|
8
|
+
provider: LLMConfig['provider'];
|
|
9
|
+
apiKey?: string;
|
|
10
|
+
model?: string;
|
|
11
|
+
temperature?: number;
|
|
12
|
+
maxTokens?: number;
|
|
13
|
+
baseURL?: string;
|
|
14
|
+
}
|
|
15
|
+
|
|
16
|
+
/** Known models per provider — used by /model list */
|
|
17
|
+
export const MODEL_CATALOG: Record<string, string[]> = {
|
|
18
|
+
openai: ['gpt-5-mini', 'gpt-5.1', 'gpt-5.2', 'gpt-5.3-codex', 'o3-mini'],
|
|
19
|
+
anthropic: ['claude-sonnet-4-6-20250218', 'claude-opus-4-6-20250205', 'claude-haiku-4-5-20251001'],
|
|
20
|
+
google: ['gemini-2.5-flash', 'gemini-2.5-pro', 'gemini-2.0-flash', 'gemini-3-flash'],
|
|
21
|
+
groq: ['llama-3.3-70b-versatile', 'llama-3.1-8b-instant', 'meta-llama/llama-4-scout-17b-16e-instruct', 'mistral-saba-24b', 'qwen/qwen3-32b'],
|
|
22
|
+
ollama: ['llama3.2', 'llama3.1', 'mistral', 'gemma2'],
|
|
23
|
+
};
|
|
24
|
+
|
|
25
|
+
/** Default model per provider */
|
|
26
|
+
export const DEFAULT_MODELS: Record<string, string> = {
|
|
27
|
+
openai: 'gpt-5-mini',
|
|
28
|
+
anthropic: 'claude-sonnet-4-5-20250929',
|
|
29
|
+
google: 'gemini-2.0-flash',
|
|
30
|
+
groq: 'llama-3.3-70b-versatile',
|
|
31
|
+
ollama: 'llama3.2',
|
|
32
|
+
};
|
|
33
|
+
|
|
34
|
+
export class AIProvider implements LLMProvider {
|
|
35
|
+
private config: AIProviderConfig & { temperature: number; maxTokens: number };
|
|
36
|
+
/** Extra API keys stored at runtime (provider → key) */
|
|
37
|
+
private apiKeys: Map<string, string> = new Map();
|
|
38
|
+
|
|
39
|
+
constructor(config: AIProviderConfig) {
|
|
40
|
+
this.config = {
|
|
41
|
+
temperature: 0.7,
|
|
42
|
+
maxTokens: 1000,
|
|
43
|
+
...config,
|
|
44
|
+
};
|
|
45
|
+
}
|
|
46
|
+
|
|
47
|
+
/** Update provider/model/apiKey at runtime */
|
|
48
|
+
setConfig(patch: Partial<Pick<AIProviderConfig, 'provider' | 'model' | 'apiKey'>>) {
|
|
49
|
+
if (patch.provider) this.config.provider = patch.provider;
|
|
50
|
+
if (patch.model) this.config.model = patch.model;
|
|
51
|
+
if (patch.apiKey) this.config.apiKey = patch.apiKey;
|
|
52
|
+
}
|
|
53
|
+
|
|
54
|
+
/** Store an API key for a provider */
|
|
55
|
+
setApiKey(provider: string, key: string) {
|
|
56
|
+
this.apiKeys.set(provider, key);
|
|
57
|
+
}
|
|
58
|
+
|
|
59
|
+
/** Get API key for a provider (stored keys take precedence) */
|
|
60
|
+
getApiKey(provider?: string): string | undefined {
|
|
61
|
+
const p = provider || this.config.provider;
|
|
62
|
+
return this.apiKeys.get(p) || (p === this.config.provider ? this.config.apiKey : undefined);
|
|
63
|
+
}
|
|
64
|
+
|
|
65
|
+
getConfig(): Readonly<AIProviderConfig> {
|
|
66
|
+
return this.config;
|
|
67
|
+
}
|
|
68
|
+
|
|
69
|
+
/** Return the model catalog and defaults for /model list */
|
|
70
|
+
getModelCatalog(): { catalog: Record<string, string[]>; defaults: Record<string, string> } {
|
|
71
|
+
return { catalog: MODEL_CATALOG, defaults: DEFAULT_MODELS };
|
|
72
|
+
}
|
|
73
|
+
|
|
74
|
+
getModel(): LanguageModelV1 {
|
|
75
|
+
const { provider, apiKey, baseURL, model } = this.config;
|
|
76
|
+
|
|
77
|
+
switch (provider) {
|
|
78
|
+
case 'openai': {
|
|
79
|
+
const openai = createOpenAI({ apiKey, baseURL });
|
|
80
|
+
return openai(model || 'gpt-5-mini');
|
|
81
|
+
}
|
|
82
|
+
case 'anthropic': {
|
|
83
|
+
const anthropic = createAnthropic({ apiKey, baseURL });
|
|
84
|
+
return anthropic(model || 'claude-sonnet-4-5-20250929');
|
|
85
|
+
}
|
|
86
|
+
case 'google': {
|
|
87
|
+
const google = createGoogleGenerativeAI({ apiKey, baseURL });
|
|
88
|
+
return google(model || 'gemini-2.0-flash');
|
|
89
|
+
}
|
|
90
|
+
case 'groq': {
|
|
91
|
+
const groq = createOpenAI({
|
|
92
|
+
apiKey,
|
|
93
|
+
baseURL: baseURL || 'https://api.groq.com/openai/v1',
|
|
94
|
+
});
|
|
95
|
+
return groq(model || 'llama-3.3-70b-versatile');
|
|
96
|
+
}
|
|
97
|
+
case 'ollama': {
|
|
98
|
+
const ollama = createOpenAI({
|
|
99
|
+
apiKey: apiKey || 'ollama',
|
|
100
|
+
baseURL: baseURL || 'http://localhost:11434/v1',
|
|
101
|
+
});
|
|
102
|
+
return ollama(model || 'llama3.2');
|
|
103
|
+
}
|
|
104
|
+
default:
|
|
105
|
+
throw new Error(`Unknown provider: ${provider}`);
|
|
106
|
+
}
|
|
107
|
+
}
|
|
108
|
+
|
|
109
|
+
private convertTools(tools: SimpleTool[]): Record<string, ReturnType<typeof aiTool>> {
|
|
110
|
+
const result: Record<string, ReturnType<typeof aiTool>> = {};
|
|
111
|
+
|
|
112
|
+
for (const t of tools) {
|
|
113
|
+
const params = t.parameters;
|
|
114
|
+
|
|
115
|
+
// JSON Schema passthrough: if parameters already look like a JSON Schema object
|
|
116
|
+
// (has type: 'object' and properties), pass directly to jsonSchema()
|
|
117
|
+
if (params && typeof params === 'object' && params.type === 'object' && params.properties) {
|
|
118
|
+
result[t.name] = aiTool({
|
|
119
|
+
description: t.description,
|
|
120
|
+
inputSchema: jsonSchema(params),
|
|
121
|
+
});
|
|
122
|
+
continue;
|
|
123
|
+
}
|
|
124
|
+
|
|
125
|
+
// Flat format conversion (original Operor Tool parameter format)
|
|
126
|
+
const properties: Record<string, any> = {};
|
|
127
|
+
const required: string[] = [];
|
|
128
|
+
|
|
129
|
+
if (params && typeof params === 'object') {
|
|
130
|
+
for (const [key, value] of Object.entries(params)) {
|
|
131
|
+
if (!value || typeof value !== 'object') continue;
|
|
132
|
+
|
|
133
|
+
const prop: any = { description: value.description || '' };
|
|
134
|
+
|
|
135
|
+
if (value.enum) {
|
|
136
|
+
prop.type = 'string';
|
|
137
|
+
prop.enum = value.enum;
|
|
138
|
+
} else {
|
|
139
|
+
switch (value.type) {
|
|
140
|
+
case 'number':
|
|
141
|
+
case 'integer':
|
|
142
|
+
prop.type = value.type;
|
|
143
|
+
break;
|
|
144
|
+
case 'boolean':
|
|
145
|
+
prop.type = 'boolean';
|
|
146
|
+
break;
|
|
147
|
+
case 'array':
|
|
148
|
+
prop.type = 'array';
|
|
149
|
+
break;
|
|
150
|
+
default:
|
|
151
|
+
prop.type = 'string';
|
|
152
|
+
}
|
|
153
|
+
}
|
|
154
|
+
|
|
155
|
+
properties[key] = prop;
|
|
156
|
+
if (value.required) {
|
|
157
|
+
required.push(key);
|
|
158
|
+
}
|
|
159
|
+
}
|
|
160
|
+
}
|
|
161
|
+
|
|
162
|
+
result[t.name] = aiTool({
|
|
163
|
+
description: t.description,
|
|
164
|
+
inputSchema: jsonSchema({
|
|
165
|
+
type: 'object',
|
|
166
|
+
properties,
|
|
167
|
+
required: required.length > 0 ? required : undefined,
|
|
168
|
+
additionalProperties: false,
|
|
169
|
+
}),
|
|
170
|
+
});
|
|
171
|
+
}
|
|
172
|
+
|
|
173
|
+
return result;
|
|
174
|
+
}
|
|
175
|
+
|
|
176
|
+
async complete(
|
|
177
|
+
messages: LLMMessage[],
|
|
178
|
+
options: {
|
|
179
|
+
tools?: SimpleTool[];
|
|
180
|
+
temperature?: number;
|
|
181
|
+
maxTokens?: number;
|
|
182
|
+
} = {}
|
|
183
|
+
): Promise<LLMResponse> {
|
|
184
|
+
const maxTokens = options.maxTokens ?? this.config.maxTokens;
|
|
185
|
+
|
|
186
|
+
// Reasoning models (GPT-5 family, o-series) do not support temperature
|
|
187
|
+
const modelName = this.config.model || '';
|
|
188
|
+
const isReasoningModel = /^(gpt-5|o[1-4])/.test(modelName);
|
|
189
|
+
const temperature = isReasoningModel ? undefined : (options.temperature ?? this.config.temperature);
|
|
190
|
+
|
|
191
|
+
const sdkMessages = messages.map((msg) => {
|
|
192
|
+
if (msg.role === 'function') {
|
|
193
|
+
return { role: 'user' as const, content: `[Function result: ${msg.content}]` };
|
|
194
|
+
}
|
|
195
|
+
return { role: msg.role as 'system' | 'user' | 'assistant', content: msg.content };
|
|
196
|
+
});
|
|
197
|
+
|
|
198
|
+
const tools = options.tools && options.tools.length > 0
|
|
199
|
+
? this.convertTools(options.tools)
|
|
200
|
+
: undefined;
|
|
201
|
+
|
|
202
|
+
const result = await generateText({
|
|
203
|
+
model: this.getModel(),
|
|
204
|
+
messages: sdkMessages,
|
|
205
|
+
temperature,
|
|
206
|
+
maxTokens,
|
|
207
|
+
tools,
|
|
208
|
+
maxSteps: 1,
|
|
209
|
+
});
|
|
210
|
+
|
|
211
|
+
const toolCalls: LLMToolCall[] | undefined = result.toolCalls?.length
|
|
212
|
+
? result.toolCalls.map((tc) => ({
|
|
213
|
+
id: tc.toolCallId,
|
|
214
|
+
name: tc.toolName,
|
|
215
|
+
arguments: (tc as any).input ?? (tc as any).args as Record<string, any>,
|
|
216
|
+
}))
|
|
217
|
+
: undefined;
|
|
218
|
+
|
|
219
|
+
return {
|
|
220
|
+
text: result.text || '',
|
|
221
|
+
toolCalls,
|
|
222
|
+
usage: {
|
|
223
|
+
promptTokens: result.usage?.promptTokens || 0,
|
|
224
|
+
completionTokens: result.usage?.completionTokens || 0,
|
|
225
|
+
totalTokens: (result.usage?.promptTokens || 0) + (result.usage?.completionTokens || 0),
|
|
226
|
+
},
|
|
227
|
+
finishReason: result.finishReason,
|
|
228
|
+
};
|
|
229
|
+
}
|
|
230
|
+
|
|
231
|
+
getModelName(): string {
|
|
232
|
+
return this.config.model || 'default';
|
|
233
|
+
}
|
|
234
|
+
|
|
235
|
+
getProviderName(): string {
|
|
236
|
+
return this.config.provider;
|
|
237
|
+
}
|
|
238
|
+
}
|
|
@@ -0,0 +1,25 @@
|
|
|
1
|
+
import type { LLMProvider, LLMConfig } from './types.js';
|
|
2
|
+
import { AIProvider } from './AIProvider.js';
|
|
3
|
+
|
|
4
|
+
export class LLMGateway {
|
|
5
|
+
private provider: LLMProvider;
|
|
6
|
+
|
|
7
|
+
constructor(config: LLMConfig) {
|
|
8
|
+
if (!config.apiKey && config.provider !== 'ollama') {
|
|
9
|
+
throw new Error(`API key required for provider: ${config.provider}`);
|
|
10
|
+
}
|
|
11
|
+
|
|
12
|
+
this.provider = new AIProvider({
|
|
13
|
+
provider: config.provider,
|
|
14
|
+
apiKey: config.apiKey,
|
|
15
|
+
model: config.model,
|
|
16
|
+
temperature: config.temperature,
|
|
17
|
+
maxTokens: config.maxTokens,
|
|
18
|
+
baseURL: config.baseURL,
|
|
19
|
+
});
|
|
20
|
+
}
|
|
21
|
+
|
|
22
|
+
getProvider(): LLMProvider {
|
|
23
|
+
return this.provider;
|
|
24
|
+
}
|
|
25
|
+
}
|
package/src/index.ts
ADDED
|
@@ -0,0 +1,11 @@
|
|
|
1
|
+
export { LLMGateway } from './LLMGateway.js';
|
|
2
|
+
export { AIProvider, MODEL_CATALOG, DEFAULT_MODELS } from './AIProvider.js';
|
|
3
|
+
export type { AIProviderConfig } from './AIProvider.js';
|
|
4
|
+
export type {
|
|
5
|
+
LLMConfig,
|
|
6
|
+
LLMMessage,
|
|
7
|
+
LLMResponse,
|
|
8
|
+
LLMToolCall,
|
|
9
|
+
LLMProvider,
|
|
10
|
+
SimpleTool,
|
|
11
|
+
} from './types.js';
|
package/src/types.ts
ADDED
|
@@ -0,0 +1,52 @@
|
|
|
1
|
+
import type { Tool } from '@operor/core';
|
|
2
|
+
|
|
3
|
+
// Re-export simple version for external use
|
|
4
|
+
export interface SimpleTool {
|
|
5
|
+
name: string;
|
|
6
|
+
description: string;
|
|
7
|
+
parameters: Record<string, any>;
|
|
8
|
+
}
|
|
9
|
+
|
|
10
|
+
export interface LLMConfig {
|
|
11
|
+
provider: 'openai' | 'anthropic' | 'google' | 'groq' | 'ollama';
|
|
12
|
+
apiKey?: string;
|
|
13
|
+
model?: string;
|
|
14
|
+
temperature?: number;
|
|
15
|
+
maxTokens?: number;
|
|
16
|
+
baseURL?: string;
|
|
17
|
+
}
|
|
18
|
+
|
|
19
|
+
export interface LLMMessage {
|
|
20
|
+
role: 'system' | 'user' | 'assistant' | 'function';
|
|
21
|
+
content: string;
|
|
22
|
+
name?: string;
|
|
23
|
+
}
|
|
24
|
+
|
|
25
|
+
export interface LLMToolCall {
|
|
26
|
+
id: string;
|
|
27
|
+
name: string;
|
|
28
|
+
arguments: Record<string, any>;
|
|
29
|
+
}
|
|
30
|
+
|
|
31
|
+
export interface LLMResponse {
|
|
32
|
+
text: string;
|
|
33
|
+
toolCalls?: LLMToolCall[];
|
|
34
|
+
usage?: {
|
|
35
|
+
promptTokens: number;
|
|
36
|
+
completionTokens: number;
|
|
37
|
+
totalTokens: number;
|
|
38
|
+
};
|
|
39
|
+
cost?: number;
|
|
40
|
+
finishReason?: string;
|
|
41
|
+
}
|
|
42
|
+
|
|
43
|
+
export interface LLMProvider {
|
|
44
|
+
complete(
|
|
45
|
+
messages: LLMMessage[],
|
|
46
|
+
options?: {
|
|
47
|
+
tools?: SimpleTool[];
|
|
48
|
+
temperature?: number;
|
|
49
|
+
maxTokens?: number;
|
|
50
|
+
}
|
|
51
|
+
): Promise<LLMResponse>;
|
|
52
|
+
}
|
package/tsconfig.json
ADDED