@tigmart/ai-core 0.0.1
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/dist/index.d.mts +53 -0
- package/dist/index.d.ts +53 -0
- package/dist/index.js +267 -0
- package/dist/index.mjs +227 -0
- package/package.json +37 -0
package/dist/index.d.mts
ADDED
|
@@ -0,0 +1,53 @@
|
|
|
1
|
+
import { ProviderAdapter, Model, Message, StreamOptions, StreamChunk } from '@topsoft/ai-types';
|
|
2
|
+
|
|
3
|
+
declare class OpenAIAdapter implements ProviderAdapter {
|
|
4
|
+
readonly id = "openai";
|
|
5
|
+
readonly name = "OpenAI";
|
|
6
|
+
readonly models: Model[];
|
|
7
|
+
private readonly client;
|
|
8
|
+
/**
|
|
9
|
+
* @param apiKey If omitted, falls back to process.env.OPENAI_API_KEY.
|
|
10
|
+
* Must never be passed from client-side code.
|
|
11
|
+
*/
|
|
12
|
+
constructor(apiKey?: string);
|
|
13
|
+
stream(messages: Message[], modelId: string, options?: StreamOptions): AsyncGenerator<StreamChunk>;
|
|
14
|
+
}
|
|
15
|
+
|
|
16
|
+
declare class ClaudeAdapter implements ProviderAdapter {
|
|
17
|
+
readonly id = "claude";
|
|
18
|
+
readonly name = "Claude";
|
|
19
|
+
readonly models: Model[];
|
|
20
|
+
stream(_messages: Message[], _modelId: string): AsyncIterable<StreamChunk>;
|
|
21
|
+
}
|
|
22
|
+
|
|
23
|
+
declare class GeminiAdapter implements ProviderAdapter {
|
|
24
|
+
readonly id = "gemini";
|
|
25
|
+
readonly name = "Gemini";
|
|
26
|
+
readonly models: Model[];
|
|
27
|
+
private readonly client;
|
|
28
|
+
constructor(apiKey?: string);
|
|
29
|
+
stream(messages: Message[], modelId: string, options?: {
|
|
30
|
+
signal?: AbortSignal;
|
|
31
|
+
}): AsyncIterable<StreamChunk>;
|
|
32
|
+
}
|
|
33
|
+
|
|
34
|
+
/**
|
|
35
|
+
* Holds all registered provider adapters and resolves them by ID.
|
|
36
|
+
* Instantiated once in the API route handler — adapters are stateless
|
|
37
|
+
* beyond their config, so a singleton per server process is correct.
|
|
38
|
+
*
|
|
39
|
+
* Usage in apps/playground/app/api/chat/route.ts:
|
|
40
|
+
* const registry = new ProviderRegistry([
|
|
41
|
+
* new OpenAIAdapter(),
|
|
42
|
+
* ]);
|
|
43
|
+
*/
|
|
44
|
+
declare class ProviderRegistry {
|
|
45
|
+
private readonly adapters;
|
|
46
|
+
constructor(adapters: ProviderAdapter[]);
|
|
47
|
+
/** Returns the adapter for the given provider ID. Throws if not registered. */
|
|
48
|
+
get(providerId: string): ProviderAdapter;
|
|
49
|
+
/** Returns all registered adapters. Used to expose available providers to the client. */
|
|
50
|
+
getAll(): ProviderAdapter[];
|
|
51
|
+
}
|
|
52
|
+
|
|
53
|
+
export { ClaudeAdapter, GeminiAdapter, OpenAIAdapter, ProviderRegistry };
|
package/dist/index.d.ts
ADDED
|
@@ -0,0 +1,53 @@
|
|
|
1
|
+
import { ProviderAdapter, Model, Message, StreamOptions, StreamChunk } from '@topsoft/ai-types';
|
|
2
|
+
|
|
3
|
+
declare class OpenAIAdapter implements ProviderAdapter {
|
|
4
|
+
readonly id = "openai";
|
|
5
|
+
readonly name = "OpenAI";
|
|
6
|
+
readonly models: Model[];
|
|
7
|
+
private readonly client;
|
|
8
|
+
/**
|
|
9
|
+
* @param apiKey If omitted, falls back to process.env.OPENAI_API_KEY.
|
|
10
|
+
* Must never be passed from client-side code.
|
|
11
|
+
*/
|
|
12
|
+
constructor(apiKey?: string);
|
|
13
|
+
stream(messages: Message[], modelId: string, options?: StreamOptions): AsyncGenerator<StreamChunk>;
|
|
14
|
+
}
|
|
15
|
+
|
|
16
|
+
declare class ClaudeAdapter implements ProviderAdapter {
|
|
17
|
+
readonly id = "claude";
|
|
18
|
+
readonly name = "Claude";
|
|
19
|
+
readonly models: Model[];
|
|
20
|
+
stream(_messages: Message[], _modelId: string): AsyncIterable<StreamChunk>;
|
|
21
|
+
}
|
|
22
|
+
|
|
23
|
+
declare class GeminiAdapter implements ProviderAdapter {
|
|
24
|
+
readonly id = "gemini";
|
|
25
|
+
readonly name = "Gemini";
|
|
26
|
+
readonly models: Model[];
|
|
27
|
+
private readonly client;
|
|
28
|
+
constructor(apiKey?: string);
|
|
29
|
+
stream(messages: Message[], modelId: string, options?: {
|
|
30
|
+
signal?: AbortSignal;
|
|
31
|
+
}): AsyncIterable<StreamChunk>;
|
|
32
|
+
}
|
|
33
|
+
|
|
34
|
+
/**
|
|
35
|
+
* Holds all registered provider adapters and resolves them by ID.
|
|
36
|
+
* Instantiated once in the API route handler — adapters are stateless
|
|
37
|
+
* beyond their config, so a singleton per server process is correct.
|
|
38
|
+
*
|
|
39
|
+
* Usage in apps/playground/app/api/chat/route.ts:
|
|
40
|
+
* const registry = new ProviderRegistry([
|
|
41
|
+
* new OpenAIAdapter(),
|
|
42
|
+
* ]);
|
|
43
|
+
*/
|
|
44
|
+
declare class ProviderRegistry {
|
|
45
|
+
private readonly adapters;
|
|
46
|
+
constructor(adapters: ProviderAdapter[]);
|
|
47
|
+
/** Returns the adapter for the given provider ID. Throws if not registered. */
|
|
48
|
+
get(providerId: string): ProviderAdapter;
|
|
49
|
+
/** Returns all registered adapters. Used to expose available providers to the client. */
|
|
50
|
+
getAll(): ProviderAdapter[];
|
|
51
|
+
}
|
|
52
|
+
|
|
53
|
+
export { ClaudeAdapter, GeminiAdapter, OpenAIAdapter, ProviderRegistry };
|
package/dist/index.js
ADDED
|
@@ -0,0 +1,267 @@
|
|
|
1
|
+
"use strict";
|
|
2
|
+
var __create = Object.create;
|
|
3
|
+
var __defProp = Object.defineProperty;
|
|
4
|
+
var __getOwnPropDesc = Object.getOwnPropertyDescriptor;
|
|
5
|
+
var __getOwnPropNames = Object.getOwnPropertyNames;
|
|
6
|
+
var __getProtoOf = Object.getPrototypeOf;
|
|
7
|
+
var __hasOwnProp = Object.prototype.hasOwnProperty;
|
|
8
|
+
var __export = (target, all) => {
|
|
9
|
+
for (var name in all)
|
|
10
|
+
__defProp(target, name, { get: all[name], enumerable: true });
|
|
11
|
+
};
|
|
12
|
+
var __copyProps = (to, from, except, desc) => {
|
|
13
|
+
if (from && typeof from === "object" || typeof from === "function") {
|
|
14
|
+
for (let key of __getOwnPropNames(from))
|
|
15
|
+
if (!__hasOwnProp.call(to, key) && key !== except)
|
|
16
|
+
__defProp(to, key, { get: () => from[key], enumerable: !(desc = __getOwnPropDesc(from, key)) || desc.enumerable });
|
|
17
|
+
}
|
|
18
|
+
return to;
|
|
19
|
+
};
|
|
20
|
+
var __toESM = (mod, isNodeMode, target) => (target = mod != null ? __create(__getProtoOf(mod)) : {}, __copyProps(
|
|
21
|
+
// If the importer is in node compatibility mode or this is not an ESM
|
|
22
|
+
// file that has been converted to a CommonJS file using a Babel-
|
|
23
|
+
// compatible transform (i.e. "__esModule" has not been set), then set
|
|
24
|
+
// "default" to the CommonJS "module.exports" for node compatibility.
|
|
25
|
+
isNodeMode || !mod || !mod.__esModule ? __defProp(target, "default", { value: mod, enumerable: true }) : target,
|
|
26
|
+
mod
|
|
27
|
+
));
|
|
28
|
+
var __toCommonJS = (mod) => __copyProps(__defProp({}, "__esModule", { value: true }), mod);
|
|
29
|
+
|
|
30
|
+
// src/index.ts
|
|
31
|
+
var index_exports = {};
|
|
32
|
+
__export(index_exports, {
|
|
33
|
+
ClaudeAdapter: () => ClaudeAdapter,
|
|
34
|
+
GeminiAdapter: () => GeminiAdapter,
|
|
35
|
+
OpenAIAdapter: () => OpenAIAdapter,
|
|
36
|
+
ProviderRegistry: () => ProviderRegistry
|
|
37
|
+
});
|
|
38
|
+
module.exports = __toCommonJS(index_exports);
|
|
39
|
+
|
|
40
|
+
// src/adapters/openai.ts
|
|
41
|
+
var import_openai = __toESM(require("openai"));
|
|
42
|
+
var MODELS = [
|
|
43
|
+
{
|
|
44
|
+
id: "gpt-4o",
|
|
45
|
+
name: "GPT-4o",
|
|
46
|
+
contextWindow: 128e3,
|
|
47
|
+
capabilities: { streaming: true, images: true, tools: false }
|
|
48
|
+
},
|
|
49
|
+
{
|
|
50
|
+
id: "gpt-4o-mini",
|
|
51
|
+
name: "GPT-4o Mini",
|
|
52
|
+
contextWindow: 128e3,
|
|
53
|
+
capabilities: { streaming: true, images: false, tools: false }
|
|
54
|
+
}
|
|
55
|
+
];
|
|
56
|
+
var VALID_ROLES = /* @__PURE__ */ new Set(["system", "user", "assistant"]);
|
|
57
|
+
function isValidRole(role) {
|
|
58
|
+
return VALID_ROLES.has(role);
|
|
59
|
+
}
|
|
60
|
+
function toOpenAIMessages(messages) {
|
|
61
|
+
const result = [];
|
|
62
|
+
for (const msg of messages) {
|
|
63
|
+
if (!isValidRole(msg.role)) continue;
|
|
64
|
+
const text = msg.parts.filter((p) => p.type === "text").map((p) => p.text).join("\n").trim();
|
|
65
|
+
if (!text) continue;
|
|
66
|
+
result.push({ role: msg.role, content: text });
|
|
67
|
+
}
|
|
68
|
+
return result;
|
|
69
|
+
}
|
|
70
|
+
var OpenAIAdapter = class {
|
|
71
|
+
id = "openai";
|
|
72
|
+
name = "OpenAI";
|
|
73
|
+
models = MODELS;
|
|
74
|
+
client;
|
|
75
|
+
/**
|
|
76
|
+
* @param apiKey If omitted, falls back to process.env.OPENAI_API_KEY.
|
|
77
|
+
* Must never be passed from client-side code.
|
|
78
|
+
*/
|
|
79
|
+
constructor(apiKey) {
|
|
80
|
+
const key = apiKey ?? process.env["OPENAI_API_KEY"];
|
|
81
|
+
if (!key) {
|
|
82
|
+
throw new Error(
|
|
83
|
+
"OpenAIAdapter: API key is required. Set OPENAI_API_KEY environment variable or pass it to the constructor."
|
|
84
|
+
);
|
|
85
|
+
}
|
|
86
|
+
this.client = new import_openai.default({ apiKey: key });
|
|
87
|
+
}
|
|
88
|
+
async *stream(messages, modelId, options) {
|
|
89
|
+
if (!this.models.some((m) => m.id === modelId)) {
|
|
90
|
+
throw new Error(`Model "${modelId}" not supported by OpenAI adapter`);
|
|
91
|
+
}
|
|
92
|
+
const openAiMessages = toOpenAIMessages(messages);
|
|
93
|
+
try {
|
|
94
|
+
const completion = await this.client.chat.completions.create(
|
|
95
|
+
{
|
|
96
|
+
model: modelId,
|
|
97
|
+
messages: openAiMessages,
|
|
98
|
+
stream: true,
|
|
99
|
+
stream_options: { include_usage: true },
|
|
100
|
+
...options?.maxTokens !== void 0 && { max_tokens: options.maxTokens },
|
|
101
|
+
...options?.temperature !== void 0 && { temperature: options.temperature }
|
|
102
|
+
},
|
|
103
|
+
{ signal: options?.signal }
|
|
104
|
+
);
|
|
105
|
+
let usage;
|
|
106
|
+
for await (const chunk of completion) {
|
|
107
|
+
const choice = chunk.choices?.[0];
|
|
108
|
+
const content = choice?.delta?.content;
|
|
109
|
+
if (typeof content === "string" && content.length > 0) {
|
|
110
|
+
yield { type: "delta", content };
|
|
111
|
+
}
|
|
112
|
+
const u = chunk.usage;
|
|
113
|
+
if (u != null && typeof u.prompt_tokens === "number" && typeof u.completion_tokens === "number" && typeof u.total_tokens === "number") {
|
|
114
|
+
usage = {
|
|
115
|
+
promptTokens: u.prompt_tokens,
|
|
116
|
+
completionTokens: u.completion_tokens,
|
|
117
|
+
totalTokens: u.total_tokens
|
|
118
|
+
};
|
|
119
|
+
}
|
|
120
|
+
}
|
|
121
|
+
yield { type: "done", usage };
|
|
122
|
+
} catch (err) {
|
|
123
|
+
if (options?.signal?.aborted) return;
|
|
124
|
+
yield {
|
|
125
|
+
type: "error",
|
|
126
|
+
error: err instanceof Error ? err.message : JSON.stringify(err)
|
|
127
|
+
};
|
|
128
|
+
}
|
|
129
|
+
}
|
|
130
|
+
};
|
|
131
|
+
|
|
132
|
+
// src/adapters/claude.ts
|
|
133
|
+
var MODELS2 = [
|
|
134
|
+
{
|
|
135
|
+
id: "claude-3-5-sonnet-20241022",
|
|
136
|
+
name: "Claude 3.5 Sonnet",
|
|
137
|
+
contextWindow: 2e5,
|
|
138
|
+
capabilities: { streaming: true, images: true, tools: false }
|
|
139
|
+
}
|
|
140
|
+
];
|
|
141
|
+
var ClaudeAdapter = class {
|
|
142
|
+
id = "claude";
|
|
143
|
+
name = "Claude";
|
|
144
|
+
models = MODELS2;
|
|
145
|
+
// TODO: Implement real Anthropic API integration.
|
|
146
|
+
async *stream(_messages, _modelId) {
|
|
147
|
+
if (!this.models.some((m) => m.id === _modelId)) {
|
|
148
|
+
yield { type: "error", error: `Model "${_modelId}" is not supported by the Claude adapter` };
|
|
149
|
+
return;
|
|
150
|
+
}
|
|
151
|
+
yield {
|
|
152
|
+
type: "delta",
|
|
153
|
+
content: "[Claude integration is not yet implemented. This is a placeholder.]"
|
|
154
|
+
};
|
|
155
|
+
yield { type: "done" };
|
|
156
|
+
}
|
|
157
|
+
};
|
|
158
|
+
|
|
159
|
+
// src/adapters/gemini.ts
|
|
160
|
+
var import_genai = require("@google/genai");
|
|
161
|
+
var MODELS3 = [
|
|
162
|
+
{
|
|
163
|
+
id: "gemini-2.5-flash",
|
|
164
|
+
name: "Gemini 2.5 Flash",
|
|
165
|
+
contextWindow: 1e6,
|
|
166
|
+
capabilities: { streaming: true, images: false, tools: false }
|
|
167
|
+
},
|
|
168
|
+
{
|
|
169
|
+
id: "gemini-2.5-pro",
|
|
170
|
+
name: "Gemini 2.5 Pro",
|
|
171
|
+
contextWindow: 2e6,
|
|
172
|
+
capabilities: { streaming: true, images: false, tools: false }
|
|
173
|
+
}
|
|
174
|
+
];
|
|
175
|
+
function toGeminiContents(messages) {
|
|
176
|
+
const contents = [];
|
|
177
|
+
for (const msg of messages) {
|
|
178
|
+
const text = msg.parts.filter((p) => p.type === "text").map((p) => p.text).join("\n").trim();
|
|
179
|
+
if (!text) continue;
|
|
180
|
+
const role = msg.role === "assistant" ? "model" : "user";
|
|
181
|
+
const prev = contents[contents.length - 1];
|
|
182
|
+
if (prev && prev.role === role) {
|
|
183
|
+
const existing = prev.parts[0];
|
|
184
|
+
if (existing) {
|
|
185
|
+
existing.text += "\n" + text;
|
|
186
|
+
}
|
|
187
|
+
} else {
|
|
188
|
+
contents.push({ role, parts: [{ text }] });
|
|
189
|
+
}
|
|
190
|
+
}
|
|
191
|
+
return contents;
|
|
192
|
+
}
|
|
193
|
+
var GeminiAdapter = class {
|
|
194
|
+
id = "gemini";
|
|
195
|
+
name = "Gemini";
|
|
196
|
+
models = MODELS3;
|
|
197
|
+
client;
|
|
198
|
+
constructor(apiKey) {
|
|
199
|
+
const key = apiKey ?? process.env["GEMINI_API_KEY"];
|
|
200
|
+
if (!key) {
|
|
201
|
+
throw new Error(
|
|
202
|
+
"GeminiAdapter: API key is required. Set GEMINI_API_KEY environment variable or pass it to the constructor."
|
|
203
|
+
);
|
|
204
|
+
}
|
|
205
|
+
this.client = new import_genai.GoogleGenAI({ apiKey: key });
|
|
206
|
+
}
|
|
207
|
+
async *stream(messages, modelId, options) {
|
|
208
|
+
if (!this.models.some((m) => m.id === modelId)) {
|
|
209
|
+
yield { type: "error", error: `Model "${modelId}" is not supported by the Gemini adapter` };
|
|
210
|
+
return;
|
|
211
|
+
}
|
|
212
|
+
const contents = toGeminiContents(messages);
|
|
213
|
+
if (contents.length === 0) {
|
|
214
|
+
yield { type: "error", error: "No valid messages to send" };
|
|
215
|
+
return;
|
|
216
|
+
}
|
|
217
|
+
try {
|
|
218
|
+
const result = await this.client.models.generateContentStream({
|
|
219
|
+
model: modelId,
|
|
220
|
+
contents
|
|
221
|
+
});
|
|
222
|
+
for await (const chunk of result) {
|
|
223
|
+
if (options?.signal?.aborted) break;
|
|
224
|
+
const text = chunk.text;
|
|
225
|
+
if (text) {
|
|
226
|
+
yield { type: "delta", content: text };
|
|
227
|
+
}
|
|
228
|
+
}
|
|
229
|
+
yield { type: "done" };
|
|
230
|
+
} catch (err) {
|
|
231
|
+
if (options?.signal?.aborted) return;
|
|
232
|
+
yield {
|
|
233
|
+
type: "error",
|
|
234
|
+
error: err instanceof Error ? err.message : "Gemini streaming error"
|
|
235
|
+
};
|
|
236
|
+
}
|
|
237
|
+
}
|
|
238
|
+
};
|
|
239
|
+
|
|
240
|
+
// src/registry.ts
|
|
241
|
+
var ProviderRegistry = class {
|
|
242
|
+
adapters;
|
|
243
|
+
constructor(adapters) {
|
|
244
|
+
this.adapters = new Map(adapters.map((a) => [a.id, a]));
|
|
245
|
+
}
|
|
246
|
+
/** Returns the adapter for the given provider ID. Throws if not registered. */
|
|
247
|
+
get(providerId) {
|
|
248
|
+
const adapter = this.adapters.get(providerId);
|
|
249
|
+
if (!adapter) {
|
|
250
|
+
throw new Error(
|
|
251
|
+
`Provider "${providerId}" is not registered. Registered providers: ${[...this.adapters.keys()].join(", ")}`
|
|
252
|
+
);
|
|
253
|
+
}
|
|
254
|
+
return adapter;
|
|
255
|
+
}
|
|
256
|
+
/** Returns all registered adapters. Used to expose available providers to the client. */
|
|
257
|
+
getAll() {
|
|
258
|
+
return Array.from(this.adapters.values());
|
|
259
|
+
}
|
|
260
|
+
};
|
|
261
|
+
// Annotate the CommonJS export names for ESM import in node:
|
|
262
|
+
0 && (module.exports = {
|
|
263
|
+
ClaudeAdapter,
|
|
264
|
+
GeminiAdapter,
|
|
265
|
+
OpenAIAdapter,
|
|
266
|
+
ProviderRegistry
|
|
267
|
+
});
|
package/dist/index.mjs
ADDED
|
@@ -0,0 +1,227 @@
|
|
|
1
|
+
// src/adapters/openai.ts
|
|
2
|
+
import OpenAI from "openai";
|
|
3
|
+
var MODELS = [
|
|
4
|
+
{
|
|
5
|
+
id: "gpt-4o",
|
|
6
|
+
name: "GPT-4o",
|
|
7
|
+
contextWindow: 128e3,
|
|
8
|
+
capabilities: { streaming: true, images: true, tools: false }
|
|
9
|
+
},
|
|
10
|
+
{
|
|
11
|
+
id: "gpt-4o-mini",
|
|
12
|
+
name: "GPT-4o Mini",
|
|
13
|
+
contextWindow: 128e3,
|
|
14
|
+
capabilities: { streaming: true, images: false, tools: false }
|
|
15
|
+
}
|
|
16
|
+
];
|
|
17
|
+
var VALID_ROLES = /* @__PURE__ */ new Set(["system", "user", "assistant"]);
|
|
18
|
+
function isValidRole(role) {
|
|
19
|
+
return VALID_ROLES.has(role);
|
|
20
|
+
}
|
|
21
|
+
function toOpenAIMessages(messages) {
|
|
22
|
+
const result = [];
|
|
23
|
+
for (const msg of messages) {
|
|
24
|
+
if (!isValidRole(msg.role)) continue;
|
|
25
|
+
const text = msg.parts.filter((p) => p.type === "text").map((p) => p.text).join("\n").trim();
|
|
26
|
+
if (!text) continue;
|
|
27
|
+
result.push({ role: msg.role, content: text });
|
|
28
|
+
}
|
|
29
|
+
return result;
|
|
30
|
+
}
|
|
31
|
+
var OpenAIAdapter = class {
|
|
32
|
+
id = "openai";
|
|
33
|
+
name = "OpenAI";
|
|
34
|
+
models = MODELS;
|
|
35
|
+
client;
|
|
36
|
+
/**
|
|
37
|
+
* @param apiKey If omitted, falls back to process.env.OPENAI_API_KEY.
|
|
38
|
+
* Must never be passed from client-side code.
|
|
39
|
+
*/
|
|
40
|
+
constructor(apiKey) {
|
|
41
|
+
const key = apiKey ?? process.env["OPENAI_API_KEY"];
|
|
42
|
+
if (!key) {
|
|
43
|
+
throw new Error(
|
|
44
|
+
"OpenAIAdapter: API key is required. Set OPENAI_API_KEY environment variable or pass it to the constructor."
|
|
45
|
+
);
|
|
46
|
+
}
|
|
47
|
+
this.client = new OpenAI({ apiKey: key });
|
|
48
|
+
}
|
|
49
|
+
async *stream(messages, modelId, options) {
|
|
50
|
+
if (!this.models.some((m) => m.id === modelId)) {
|
|
51
|
+
throw new Error(`Model "${modelId}" not supported by OpenAI adapter`);
|
|
52
|
+
}
|
|
53
|
+
const openAiMessages = toOpenAIMessages(messages);
|
|
54
|
+
try {
|
|
55
|
+
const completion = await this.client.chat.completions.create(
|
|
56
|
+
{
|
|
57
|
+
model: modelId,
|
|
58
|
+
messages: openAiMessages,
|
|
59
|
+
stream: true,
|
|
60
|
+
stream_options: { include_usage: true },
|
|
61
|
+
...options?.maxTokens !== void 0 && { max_tokens: options.maxTokens },
|
|
62
|
+
...options?.temperature !== void 0 && { temperature: options.temperature }
|
|
63
|
+
},
|
|
64
|
+
{ signal: options?.signal }
|
|
65
|
+
);
|
|
66
|
+
let usage;
|
|
67
|
+
for await (const chunk of completion) {
|
|
68
|
+
const choice = chunk.choices?.[0];
|
|
69
|
+
const content = choice?.delta?.content;
|
|
70
|
+
if (typeof content === "string" && content.length > 0) {
|
|
71
|
+
yield { type: "delta", content };
|
|
72
|
+
}
|
|
73
|
+
const u = chunk.usage;
|
|
74
|
+
if (u != null && typeof u.prompt_tokens === "number" && typeof u.completion_tokens === "number" && typeof u.total_tokens === "number") {
|
|
75
|
+
usage = {
|
|
76
|
+
promptTokens: u.prompt_tokens,
|
|
77
|
+
completionTokens: u.completion_tokens,
|
|
78
|
+
totalTokens: u.total_tokens
|
|
79
|
+
};
|
|
80
|
+
}
|
|
81
|
+
}
|
|
82
|
+
yield { type: "done", usage };
|
|
83
|
+
} catch (err) {
|
|
84
|
+
if (options?.signal?.aborted) return;
|
|
85
|
+
yield {
|
|
86
|
+
type: "error",
|
|
87
|
+
error: err instanceof Error ? err.message : JSON.stringify(err)
|
|
88
|
+
};
|
|
89
|
+
}
|
|
90
|
+
}
|
|
91
|
+
};
|
|
92
|
+
|
|
93
|
+
// src/adapters/claude.ts
|
|
94
|
+
var MODELS2 = [
|
|
95
|
+
{
|
|
96
|
+
id: "claude-3-5-sonnet-20241022",
|
|
97
|
+
name: "Claude 3.5 Sonnet",
|
|
98
|
+
contextWindow: 2e5,
|
|
99
|
+
capabilities: { streaming: true, images: true, tools: false }
|
|
100
|
+
}
|
|
101
|
+
];
|
|
102
|
+
var ClaudeAdapter = class {
|
|
103
|
+
id = "claude";
|
|
104
|
+
name = "Claude";
|
|
105
|
+
models = MODELS2;
|
|
106
|
+
// TODO: Implement real Anthropic API integration.
|
|
107
|
+
async *stream(_messages, _modelId) {
|
|
108
|
+
if (!this.models.some((m) => m.id === _modelId)) {
|
|
109
|
+
yield { type: "error", error: `Model "${_modelId}" is not supported by the Claude adapter` };
|
|
110
|
+
return;
|
|
111
|
+
}
|
|
112
|
+
yield {
|
|
113
|
+
type: "delta",
|
|
114
|
+
content: "[Claude integration is not yet implemented. This is a placeholder.]"
|
|
115
|
+
};
|
|
116
|
+
yield { type: "done" };
|
|
117
|
+
}
|
|
118
|
+
};
|
|
119
|
+
|
|
120
|
+
// src/adapters/gemini.ts
|
|
121
|
+
import { GoogleGenAI } from "@google/genai";
|
|
122
|
+
var MODELS3 = [
|
|
123
|
+
{
|
|
124
|
+
id: "gemini-2.5-flash",
|
|
125
|
+
name: "Gemini 2.5 Flash",
|
|
126
|
+
contextWindow: 1e6,
|
|
127
|
+
capabilities: { streaming: true, images: false, tools: false }
|
|
128
|
+
},
|
|
129
|
+
{
|
|
130
|
+
id: "gemini-2.5-pro",
|
|
131
|
+
name: "Gemini 2.5 Pro",
|
|
132
|
+
contextWindow: 2e6,
|
|
133
|
+
capabilities: { streaming: true, images: false, tools: false }
|
|
134
|
+
}
|
|
135
|
+
];
|
|
136
|
+
function toGeminiContents(messages) {
|
|
137
|
+
const contents = [];
|
|
138
|
+
for (const msg of messages) {
|
|
139
|
+
const text = msg.parts.filter((p) => p.type === "text").map((p) => p.text).join("\n").trim();
|
|
140
|
+
if (!text) continue;
|
|
141
|
+
const role = msg.role === "assistant" ? "model" : "user";
|
|
142
|
+
const prev = contents[contents.length - 1];
|
|
143
|
+
if (prev && prev.role === role) {
|
|
144
|
+
const existing = prev.parts[0];
|
|
145
|
+
if (existing) {
|
|
146
|
+
existing.text += "\n" + text;
|
|
147
|
+
}
|
|
148
|
+
} else {
|
|
149
|
+
contents.push({ role, parts: [{ text }] });
|
|
150
|
+
}
|
|
151
|
+
}
|
|
152
|
+
return contents;
|
|
153
|
+
}
|
|
154
|
+
var GeminiAdapter = class {
|
|
155
|
+
id = "gemini";
|
|
156
|
+
name = "Gemini";
|
|
157
|
+
models = MODELS3;
|
|
158
|
+
client;
|
|
159
|
+
constructor(apiKey) {
|
|
160
|
+
const key = apiKey ?? process.env["GEMINI_API_KEY"];
|
|
161
|
+
if (!key) {
|
|
162
|
+
throw new Error(
|
|
163
|
+
"GeminiAdapter: API key is required. Set GEMINI_API_KEY environment variable or pass it to the constructor."
|
|
164
|
+
);
|
|
165
|
+
}
|
|
166
|
+
this.client = new GoogleGenAI({ apiKey: key });
|
|
167
|
+
}
|
|
168
|
+
async *stream(messages, modelId, options) {
|
|
169
|
+
if (!this.models.some((m) => m.id === modelId)) {
|
|
170
|
+
yield { type: "error", error: `Model "${modelId}" is not supported by the Gemini adapter` };
|
|
171
|
+
return;
|
|
172
|
+
}
|
|
173
|
+
const contents = toGeminiContents(messages);
|
|
174
|
+
if (contents.length === 0) {
|
|
175
|
+
yield { type: "error", error: "No valid messages to send" };
|
|
176
|
+
return;
|
|
177
|
+
}
|
|
178
|
+
try {
|
|
179
|
+
const result = await this.client.models.generateContentStream({
|
|
180
|
+
model: modelId,
|
|
181
|
+
contents
|
|
182
|
+
});
|
|
183
|
+
for await (const chunk of result) {
|
|
184
|
+
if (options?.signal?.aborted) break;
|
|
185
|
+
const text = chunk.text;
|
|
186
|
+
if (text) {
|
|
187
|
+
yield { type: "delta", content: text };
|
|
188
|
+
}
|
|
189
|
+
}
|
|
190
|
+
yield { type: "done" };
|
|
191
|
+
} catch (err) {
|
|
192
|
+
if (options?.signal?.aborted) return;
|
|
193
|
+
yield {
|
|
194
|
+
type: "error",
|
|
195
|
+
error: err instanceof Error ? err.message : "Gemini streaming error"
|
|
196
|
+
};
|
|
197
|
+
}
|
|
198
|
+
}
|
|
199
|
+
};
|
|
200
|
+
|
|
201
|
+
// src/registry.ts
|
|
202
|
+
var ProviderRegistry = class {
|
|
203
|
+
adapters;
|
|
204
|
+
constructor(adapters) {
|
|
205
|
+
this.adapters = new Map(adapters.map((a) => [a.id, a]));
|
|
206
|
+
}
|
|
207
|
+
/** Returns the adapter for the given provider ID. Throws if not registered. */
|
|
208
|
+
get(providerId) {
|
|
209
|
+
const adapter = this.adapters.get(providerId);
|
|
210
|
+
if (!adapter) {
|
|
211
|
+
throw new Error(
|
|
212
|
+
`Provider "${providerId}" is not registered. Registered providers: ${[...this.adapters.keys()].join(", ")}`
|
|
213
|
+
);
|
|
214
|
+
}
|
|
215
|
+
return adapter;
|
|
216
|
+
}
|
|
217
|
+
/** Returns all registered adapters. Used to expose available providers to the client. */
|
|
218
|
+
getAll() {
|
|
219
|
+
return Array.from(this.adapters.values());
|
|
220
|
+
}
|
|
221
|
+
};
|
|
222
|
+
export {
|
|
223
|
+
ClaudeAdapter,
|
|
224
|
+
GeminiAdapter,
|
|
225
|
+
OpenAIAdapter,
|
|
226
|
+
ProviderRegistry
|
|
227
|
+
};
|
package/package.json
ADDED
|
@@ -0,0 +1,37 @@
|
|
|
1
|
+
{
|
|
2
|
+
"name": "@tigmart/ai-core",
|
|
3
|
+
"version": "0.0.1",
|
|
4
|
+
"description": "Server-side LLM provider adapters — Node.js only",
|
|
5
|
+
"main": "./dist/index.js",
|
|
6
|
+
"module": "./dist/index.mjs",
|
|
7
|
+
"types": "./dist/index.d.ts",
|
|
8
|
+
"exports": {
|
|
9
|
+
".": {
|
|
10
|
+
"types": "./dist/index.d.ts",
|
|
11
|
+
"import": "./dist/index.mjs",
|
|
12
|
+
"require": "./dist/index.js"
|
|
13
|
+
}
|
|
14
|
+
},
|
|
15
|
+
"files": [
|
|
16
|
+
"dist"
|
|
17
|
+
],
|
|
18
|
+
"license": "MIT",
|
|
19
|
+
"publishConfig": {
|
|
20
|
+
"access": "public"
|
|
21
|
+
},
|
|
22
|
+
"scripts": {
|
|
23
|
+
"build": "tsup",
|
|
24
|
+
"dev": "tsup --watch",
|
|
25
|
+
"typecheck": "tsc --noEmit"
|
|
26
|
+
},
|
|
27
|
+
"dependencies": {
|
|
28
|
+
"@google/genai": "^1.48.0",
|
|
29
|
+
"@tigmart/ai-types": "workspace:*",
|
|
30
|
+
"openai": "^4.76.0"
|
|
31
|
+
},
|
|
32
|
+
"devDependencies": {
|
|
33
|
+
"@types/node": "^22.19.17",
|
|
34
|
+
"tsup": "^8.3.0",
|
|
35
|
+
"typescript": "^5.7.0"
|
|
36
|
+
}
|
|
37
|
+
}
|