koishi-plugin-elysia-api-aggregator 0.1.0

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -0,0 +1,9 @@
1
+ import { Schema } from 'koishi';
2
+ import { AutoFetchSource, ManualModel } from '@elysia-api/shared';
3
+ export interface Config {
4
+ autoFetchSources: AutoFetchSource[];
5
+ manualModels: ManualModel[];
6
+ debugMode?: boolean;
7
+ }
8
+ export declare const Config: Schema<Config>;
9
+ export declare const name = "elysia-api-aggregator";
package/lib/index.cjs ADDED
@@ -0,0 +1,349 @@
1
+ var __defProp = Object.defineProperty;
2
+ var __getOwnPropDesc = Object.getOwnPropertyDescriptor;
3
+ var __getOwnPropNames = Object.getOwnPropertyNames;
4
+ var __hasOwnProp = Object.prototype.hasOwnProperty;
5
+ var __name = (target, value) => __defProp(target, "name", { value, configurable: true });
6
+ var __export = (target, all) => {
7
+ for (var name2 in all)
8
+ __defProp(target, name2, { get: all[name2], enumerable: true });
9
+ };
10
+ var __copyProps = (to, from, except, desc) => {
11
+ if (from && typeof from === "object" || typeof from === "function") {
12
+ for (let key of __getOwnPropNames(from))
13
+ if (!__hasOwnProp.call(to, key) && key !== except)
14
+ __defProp(to, key, { get: () => from[key], enumerable: !(desc = __getOwnPropDesc(from, key)) || desc.enumerable });
15
+ }
16
+ return to;
17
+ };
18
+ var __toCommonJS = (mod) => __copyProps(__defProp({}, "__esModule", { value: true }), mod);
19
+
20
+ // src/index.ts
21
+ var src_exports = {};
22
+ __export(src_exports, {
23
+ AggregatorService: () => AggregatorService,
24
+ Config: () => Config,
25
+ apply: () => apply,
26
+ name: () => name
27
+ });
28
+ module.exports = __toCommonJS(src_exports);
29
+ var import_koishi2 = require("koishi");
30
+
31
+ // src/model-fetcher.ts
32
+ var ModelFetcher = class {
33
+ constructor(ctx) {
34
+ this.ctx = ctx;
35
+ }
36
+ static {
37
+ __name(this, "ModelFetcher");
38
+ }
39
+ async fetchModels(source) {
40
+ this.ctx.logger.info(`Fetching models from ${source.name} (${source.platform})`);
41
+ try {
42
+ switch (source.platform) {
43
+ case "openai":
44
+ case "openai-compatible":
45
+ return await this.fetchOpenAIModels(source);
46
+ case "claude":
47
+ return await this.fetchClaudeModels(source);
48
+ case "gemini":
49
+ return await this.fetchGeminiModels(source);
50
+ default:
51
+ return [];
52
+ }
53
+ } catch (error) {
54
+ this.ctx.logger.error(`Failed to fetch models from ${source.name}: ${error}`);
55
+ return [];
56
+ }
57
+ }
58
+ async fetchOpenAIModels(source) {
59
+ const response = await fetch(`${source.baseUrl}/models`, {
60
+ headers: { "Authorization": `Bearer ${source.apiKey}` }
61
+ });
62
+ if (!response.ok) {
63
+ throw new Error(`HTTP ${response.status}: ${response.statusText}`);
64
+ }
65
+ const data = await response.json();
66
+ const models = data.data || [];
67
+ return models.map((model) => ({
68
+ id: `${source.name}:${model.id}`,
69
+ name: model.id,
70
+ source: "auto",
71
+ sourceName: source.name,
72
+ baseUrl: source.baseUrl,
73
+ apiKey: source.apiKey,
74
+ platform: "openai",
75
+ type: this.inferModelType(model.id),
76
+ maxTokens: this.inferMaxTokens(model.id),
77
+ visionCapable: this.hasVisionCapability(model.id),
78
+ toolsCapable: this.hasToolsCapability(model.id),
79
+ structuredOutput: this.hasStructuredOutput(model.id),
80
+ thinkingMode: "both",
81
+ available: true,
82
+ lastChecked: /* @__PURE__ */ new Date()
83
+ }));
84
+ }
85
+ async fetchClaudeModels(source) {
86
+ const knownModels = [
87
+ { id: "claude-3-7-sonnet-20250219", maxTokens: 2e5 },
88
+ { id: "claude-3-5-sonnet-20241022", maxTokens: 2e5 },
89
+ { id: "claude-3-5-haiku-20241022", maxTokens: 2e5 },
90
+ { id: "claude-3-opus-20240229", maxTokens: 2e5 }
91
+ ];
92
+ return knownModels.map((model) => ({
93
+ id: `${source.name}:${model.id}`,
94
+ name: model.id,
95
+ source: "auto",
96
+ sourceName: source.name,
97
+ baseUrl: source.baseUrl,
98
+ apiKey: source.apiKey,
99
+ platform: "claude",
100
+ type: "llm",
101
+ maxTokens: model.maxTokens,
102
+ visionCapable: true,
103
+ toolsCapable: true,
104
+ structuredOutput: true,
105
+ thinkingMode: "both",
106
+ available: true,
107
+ lastChecked: /* @__PURE__ */ new Date()
108
+ }));
109
+ }
110
+ async fetchGeminiModels(source) {
111
+ const response = await fetch(
112
+ `${source.baseUrl}/v1beta/models?key=${source.apiKey}`
113
+ );
114
+ if (!response.ok) {
115
+ throw new Error(`HTTP ${response.status}: ${response.statusText}`);
116
+ }
117
+ const data = await response.json();
118
+ const models = data.models || [];
119
+ return models.filter((m) => m.supportedGenerationMethods?.includes("generateContent")).map((model) => ({
120
+ id: `${source.name}:${model.name}`,
121
+ name: model.name,
122
+ source: "auto",
123
+ sourceName: source.name,
124
+ baseUrl: source.baseUrl,
125
+ apiKey: source.apiKey,
126
+ platform: "gemini",
127
+ type: "llm",
128
+ maxTokens: this.parseGeminiMaxTokens(model),
129
+ visionCapable: true,
130
+ toolsCapable: true,
131
+ structuredOutput: false,
132
+ thinkingMode: "both",
133
+ available: true,
134
+ lastChecked: /* @__PURE__ */ new Date()
135
+ }));
136
+ }
137
+ inferModelType(modelId) {
138
+ const id = modelId.toLowerCase();
139
+ if (id.includes("embed") || id.includes("text-embedding")) {
140
+ return "embedding";
141
+ }
142
+ if (id.includes("rerank")) {
143
+ return "reranker";
144
+ }
145
+ return "llm";
146
+ }
147
+ inferMaxTokens(modelId) {
148
+ const limits = {
149
+ "gpt-4o": 128e3,
150
+ "gpt-4o-mini": 128e3,
151
+ "gpt-4-turbo": 128e3,
152
+ "gpt-4": 8192,
153
+ "gpt-3.5-turbo": 16385,
154
+ "text-embedding-3-small": 8191,
155
+ "text-embedding-3-large": 8191,
156
+ "text-embedding-ada-002": 8191
157
+ };
158
+ for (const [key, value] of Object.entries(limits)) {
159
+ if (modelId.toLowerCase().includes(key)) {
160
+ return value;
161
+ }
162
+ }
163
+ return 128e3;
164
+ }
165
+ hasVisionCapability(modelId) {
166
+ const id = modelId.toLowerCase();
167
+ return id.includes("vision") || id.includes("gpt-4o") || id.includes("gpt-4-turbo");
168
+ }
169
+ hasToolsCapability(modelId) {
170
+ const id = modelId.toLowerCase();
171
+ return !id.includes("gpt-3.5");
172
+ }
173
+ hasStructuredOutput(modelId) {
174
+ const id = modelId.toLowerCase();
175
+ return id.includes("gpt-4o") || id.includes("gpt-4-turbo");
176
+ }
177
+ parseGeminiMaxTokens(model) {
178
+ return model.topK?.outputTokenLimit || 128e3;
179
+ }
180
+ };
181
+
182
+ // src/config.ts
183
+ var import_koishi = require("koishi");
184
+ var Config = import_koishi.Schema.intersect([
185
+ // Auto-fetch sources configuration
186
+ import_koishi.Schema.object({
187
+ autoFetchSources: import_koishi.Schema.array(
188
+ import_koishi.Schema.intersect([
189
+ import_koishi.Schema.object({
190
+ name: import_koishi.Schema.string().required().description("源名称"),
191
+ baseUrl: import_koishi.Schema.string().required().description("API 端点"),
192
+ apiKey: import_koishi.Schema.string().required().role("secret").description("API Key"),
193
+ platform: import_koishi.Schema.union([
194
+ import_koishi.Schema.const("openai").description("OpenAI"),
195
+ import_koishi.Schema.const("claude").description("Claude"),
196
+ import_koishi.Schema.const("gemini").description("Gemini"),
197
+ import_koishi.Schema.const("openai-compatible").description("OpenAI 兼容")
198
+ ]).description("平台类型"),
199
+ enabled: import_koishi.Schema.boolean().default(true).description("启用")
200
+ })
201
+ ])
202
+ ).role("table").description("自动拉取源")
203
+ }),
204
+ // Manual models
205
+ import_koishi.Schema.object({
206
+ manualModels: import_koishi.Schema.array(
207
+ import_koishi.Schema.object({
208
+ id: import_koishi.Schema.string().required().description("模型 ID"),
209
+ name: import_koishi.Schema.string().required().description("模型名称"),
210
+ sourceName: import_koishi.Schema.string().required().description("源名称"),
211
+ baseUrl: import_koishi.Schema.string().required().description("API 端点"),
212
+ apiKey: import_koishi.Schema.string().required().role("secret").description("API Key"),
213
+ platform: import_koishi.Schema.union([
214
+ import_koishi.Schema.const("openai").description("OpenAI"),
215
+ import_koishi.Schema.const("claude").description("Claude"),
216
+ import_koishi.Schema.const("gemini").description("Gemini")
217
+ ]).description("平台类型")
218
+ })
219
+ ).role("table").description("手动添加的模型")
220
+ }),
221
+ // Debug options
222
+ import_koishi.Schema.object({
223
+ debugMode: import_koishi.Schema.boolean().default(false).description("启用调试日志")
224
+ }).description("调试选项")
225
+ ]);
226
+ var name = "elysia-api-aggregator";
227
+
228
+ // src/index.ts
229
+ var AggregatorService = class extends import_koishi2.Service {
230
+ constructor(ctx, config) {
231
+ super(ctx, "elysia-api-aggregator");
232
+ this.ctx = ctx;
233
+ this.config = config;
234
+ }
235
+ static {
236
+ __name(this, "AggregatorService");
237
+ }
238
+ models = [];
239
+ getAll() {
240
+ return this.models;
241
+ }
242
+ getById(id) {
243
+ return this.models.find((m) => m.id === id);
244
+ }
245
+ getByType(type) {
246
+ return this.models.filter((m) => m.type === type);
247
+ }
248
+ // 更新模型列表
249
+ updateModels(newModels) {
250
+ this.models.length = 0;
251
+ this.models.push(...newModels);
252
+ }
253
+ };
254
+ function apply(ctx, config) {
255
+ const service = new AggregatorService(ctx, config);
256
+ ctx["elysia-api-aggregator"] = service;
257
+ const fetcher = new ModelFetcher(ctx);
258
+ ctx.elysiaApi = {
259
+ models: {
260
+ getAll: /* @__PURE__ */ __name(() => service.getAll(), "getAll"),
261
+ getById: /* @__PURE__ */ __name((id) => service.getById(id), "getById"),
262
+ getByType: /* @__PURE__ */ __name((type) => service.getByType(type), "getByType")
263
+ }
264
+ };
265
+ async function loadModels() {
266
+ if (config.debugMode) {
267
+ ctx.logger.info("=== loadModels: Starting to load models ===");
268
+ } else {
269
+ ctx.logger.info("Loading models...");
270
+ }
271
+ const fetchedModels = [];
272
+ for (const source of config.autoFetchSources) {
273
+ if (!source.enabled) continue;
274
+ if (config.debugMode) {
275
+ ctx.logger.info(`loadModels: Fetching from ${source.name}`);
276
+ }
277
+ const sourceModels = await fetcher.fetchModels(source);
278
+ fetchedModels.push(...sourceModels);
279
+ if (config.debugMode) {
280
+ ctx.logger.info(`loadModels: Fetched ${sourceModels.length} models from ${source.name}`);
281
+ } else {
282
+ ctx.logger.info(`Fetched ${sourceModels.length} models from ${source.name}`);
283
+ }
284
+ }
285
+ if (config.debugMode) {
286
+ ctx.logger.info(`loadModels: Processing ${config.manualModels.length} manual models`);
287
+ }
288
+ const manualModels = config.manualModels.map((m) => {
289
+ if (config.debugMode) {
290
+ ctx.logger.info(`loadModels: Adding manual model ${m.id}`);
291
+ }
292
+ return {
293
+ id: m.id,
294
+ name: m.name,
295
+ source: "manual",
296
+ sourceName: m.sourceName,
297
+ baseUrl: m.baseUrl,
298
+ apiKey: m.apiKey,
299
+ platform: m.platform,
300
+ // 使用默认值
301
+ type: "llm",
302
+ maxTokens: 128e3,
303
+ visionCapable: false,
304
+ toolsCapable: false,
305
+ structuredOutput: false,
306
+ thinkingMode: "both",
307
+ available: true,
308
+ lastChecked: /* @__PURE__ */ new Date()
309
+ };
310
+ });
311
+ const allModels = [...fetchedModels, ...manualModels];
312
+ service.updateModels(allModels);
313
+ if (config.debugMode) {
314
+ ctx.logger.info(`loadModels: Total models loaded: ${allModels.length}`);
315
+ ctx.logger.info(`loadModels: Model IDs: ${allModels.map((m) => m.id).join(", ")}`);
316
+ ctx.logger.info(`loadModels: ctx.elysiaApi exists: ${ctx.elysiaApi != null}`);
317
+ ctx.logger.info(`loadModels: ctx.elysiaApi.models exists: ${ctx.elysiaApi?.models != null}`);
318
+ } else {
319
+ ctx.logger.info(`Total models loaded: ${allModels.length}`);
320
+ }
321
+ if (config.debugMode) {
322
+ ctx.logger.info(`loadModels: Emitting elysia-api/models-updated event with ${allModels.length} models`);
323
+ }
324
+ ctx.emit("elysia-api/models-updated", [...allModels]);
325
+ }
326
+ __name(loadModels, "loadModels");
327
+ ctx.on("ready", loadModels);
328
+ ctx.on("config", () => {
329
+ loadModels();
330
+ });
331
+ ctx.command("elysia-api.models.reload", "重新加载模型列表").action(async () => {
332
+ await loadModels();
333
+ const count = service.getAll().length;
334
+ return `已加载 ${count} 个模型`;
335
+ });
336
+ ctx.command("elysia-api.models.list", "列出所有模型").action(() => {
337
+ const all = ctx.elysiaApi.models.getAll();
338
+ return `可用模型列表 (${all.length}):
339
+ ` + all.map((m) => `- ${m.name} (${m.type})`).join("\n");
340
+ });
341
+ }
342
+ __name(apply, "apply");
343
+ // Annotate the CommonJS export names for ESM import in node:
344
+ 0 && (module.exports = {
345
+ AggregatorService,
346
+ Config,
347
+ apply,
348
+ name
349
+ });
package/lib/index.d.ts ADDED
@@ -0,0 +1,30 @@
1
+ import { Context, Service } from 'koishi';
2
+ import { Model, ModelType } from '@elysia-api/shared';
3
+ import { Config, name } from './config';
4
+ export { Config, name };
5
+ export declare class AggregatorService extends Service {
6
+ ctx: Context;
7
+ config: Config;
8
+ private models;
9
+ constructor(ctx: Context, config: Config);
10
+ getAll(): Model[];
11
+ getById(id: string): Model | undefined;
12
+ getByType(type: ModelType): Model[];
13
+ updateModels(newModels: Model[]): void;
14
+ }
15
+ declare module 'koishi' {
16
+ interface Context {
17
+ 'elysia-api-aggregator': AggregatorService;
18
+ elysiaApi?: {
19
+ models: {
20
+ getAll(): Model[];
21
+ getById(id: string): Model | undefined;
22
+ getByType(type: ModelType): Model[];
23
+ };
24
+ };
25
+ }
26
+ interface Events {
27
+ 'elysia-api/models-updated': (models: Model[]) => void;
28
+ }
29
+ }
30
+ export declare function apply(ctx: Context, config: Config): void;
package/lib/index.mjs ADDED
@@ -0,0 +1,324 @@
1
+ var __defProp = Object.defineProperty;
2
+ var __name = (target, value) => __defProp(target, "name", { value, configurable: true });
3
+
4
+ // src/index.ts
5
+ import { Service } from "koishi";
6
+
7
+ // src/model-fetcher.ts
8
+ var ModelFetcher = class {
9
+ constructor(ctx) {
10
+ this.ctx = ctx;
11
+ }
12
+ static {
13
+ __name(this, "ModelFetcher");
14
+ }
15
+ async fetchModels(source) {
16
+ this.ctx.logger.info(`Fetching models from ${source.name} (${source.platform})`);
17
+ try {
18
+ switch (source.platform) {
19
+ case "openai":
20
+ case "openai-compatible":
21
+ return await this.fetchOpenAIModels(source);
22
+ case "claude":
23
+ return await this.fetchClaudeModels(source);
24
+ case "gemini":
25
+ return await this.fetchGeminiModels(source);
26
+ default:
27
+ return [];
28
+ }
29
+ } catch (error) {
30
+ this.ctx.logger.error(`Failed to fetch models from ${source.name}: ${error}`);
31
+ return [];
32
+ }
33
+ }
34
+ async fetchOpenAIModels(source) {
35
+ const response = await fetch(`${source.baseUrl}/models`, {
36
+ headers: { "Authorization": `Bearer ${source.apiKey}` }
37
+ });
38
+ if (!response.ok) {
39
+ throw new Error(`HTTP ${response.status}: ${response.statusText}`);
40
+ }
41
+ const data = await response.json();
42
+ const models = data.data || [];
43
+ return models.map((model) => ({
44
+ id: `${source.name}:${model.id}`,
45
+ name: model.id,
46
+ source: "auto",
47
+ sourceName: source.name,
48
+ baseUrl: source.baseUrl,
49
+ apiKey: source.apiKey,
50
+ platform: "openai",
51
+ type: this.inferModelType(model.id),
52
+ maxTokens: this.inferMaxTokens(model.id),
53
+ visionCapable: this.hasVisionCapability(model.id),
54
+ toolsCapable: this.hasToolsCapability(model.id),
55
+ structuredOutput: this.hasStructuredOutput(model.id),
56
+ thinkingMode: "both",
57
+ available: true,
58
+ lastChecked: /* @__PURE__ */ new Date()
59
+ }));
60
+ }
61
+ async fetchClaudeModels(source) {
62
+ const knownModels = [
63
+ { id: "claude-3-7-sonnet-20250219", maxTokens: 2e5 },
64
+ { id: "claude-3-5-sonnet-20241022", maxTokens: 2e5 },
65
+ { id: "claude-3-5-haiku-20241022", maxTokens: 2e5 },
66
+ { id: "claude-3-opus-20240229", maxTokens: 2e5 }
67
+ ];
68
+ return knownModels.map((model) => ({
69
+ id: `${source.name}:${model.id}`,
70
+ name: model.id,
71
+ source: "auto",
72
+ sourceName: source.name,
73
+ baseUrl: source.baseUrl,
74
+ apiKey: source.apiKey,
75
+ platform: "claude",
76
+ type: "llm",
77
+ maxTokens: model.maxTokens,
78
+ visionCapable: true,
79
+ toolsCapable: true,
80
+ structuredOutput: true,
81
+ thinkingMode: "both",
82
+ available: true,
83
+ lastChecked: /* @__PURE__ */ new Date()
84
+ }));
85
+ }
86
+ async fetchGeminiModels(source) {
87
+ const response = await fetch(
88
+ `${source.baseUrl}/v1beta/models?key=${source.apiKey}`
89
+ );
90
+ if (!response.ok) {
91
+ throw new Error(`HTTP ${response.status}: ${response.statusText}`);
92
+ }
93
+ const data = await response.json();
94
+ const models = data.models || [];
95
+ return models.filter((m) => m.supportedGenerationMethods?.includes("generateContent")).map((model) => ({
96
+ id: `${source.name}:${model.name}`,
97
+ name: model.name,
98
+ source: "auto",
99
+ sourceName: source.name,
100
+ baseUrl: source.baseUrl,
101
+ apiKey: source.apiKey,
102
+ platform: "gemini",
103
+ type: "llm",
104
+ maxTokens: this.parseGeminiMaxTokens(model),
105
+ visionCapable: true,
106
+ toolsCapable: true,
107
+ structuredOutput: false,
108
+ thinkingMode: "both",
109
+ available: true,
110
+ lastChecked: /* @__PURE__ */ new Date()
111
+ }));
112
+ }
113
+ inferModelType(modelId) {
114
+ const id = modelId.toLowerCase();
115
+ if (id.includes("embed") || id.includes("text-embedding")) {
116
+ return "embedding";
117
+ }
118
+ if (id.includes("rerank")) {
119
+ return "reranker";
120
+ }
121
+ return "llm";
122
+ }
123
+ inferMaxTokens(modelId) {
124
+ const limits = {
125
+ "gpt-4o": 128e3,
126
+ "gpt-4o-mini": 128e3,
127
+ "gpt-4-turbo": 128e3,
128
+ "gpt-4": 8192,
129
+ "gpt-3.5-turbo": 16385,
130
+ "text-embedding-3-small": 8191,
131
+ "text-embedding-3-large": 8191,
132
+ "text-embedding-ada-002": 8191
133
+ };
134
+ for (const [key, value] of Object.entries(limits)) {
135
+ if (modelId.toLowerCase().includes(key)) {
136
+ return value;
137
+ }
138
+ }
139
+ return 128e3;
140
+ }
141
+ hasVisionCapability(modelId) {
142
+ const id = modelId.toLowerCase();
143
+ return id.includes("vision") || id.includes("gpt-4o") || id.includes("gpt-4-turbo");
144
+ }
145
+ hasToolsCapability(modelId) {
146
+ const id = modelId.toLowerCase();
147
+ return !id.includes("gpt-3.5");
148
+ }
149
+ hasStructuredOutput(modelId) {
150
+ const id = modelId.toLowerCase();
151
+ return id.includes("gpt-4o") || id.includes("gpt-4-turbo");
152
+ }
153
+ parseGeminiMaxTokens(model) {
154
+ return model.topK?.outputTokenLimit || 128e3;
155
+ }
156
+ };
157
+
158
+ // src/config.ts
159
+ import { Schema } from "koishi";
160
+ var Config = Schema.intersect([
161
+ // Auto-fetch sources configuration
162
+ Schema.object({
163
+ autoFetchSources: Schema.array(
164
+ Schema.intersect([
165
+ Schema.object({
166
+ name: Schema.string().required().description("源名称"),
167
+ baseUrl: Schema.string().required().description("API 端点"),
168
+ apiKey: Schema.string().required().role("secret").description("API Key"),
169
+ platform: Schema.union([
170
+ Schema.const("openai").description("OpenAI"),
171
+ Schema.const("claude").description("Claude"),
172
+ Schema.const("gemini").description("Gemini"),
173
+ Schema.const("openai-compatible").description("OpenAI 兼容")
174
+ ]).description("平台类型"),
175
+ enabled: Schema.boolean().default(true).description("启用")
176
+ })
177
+ ])
178
+ ).role("table").description("自动拉取源")
179
+ }),
180
+ // Manual models
181
+ Schema.object({
182
+ manualModels: Schema.array(
183
+ Schema.object({
184
+ id: Schema.string().required().description("模型 ID"),
185
+ name: Schema.string().required().description("模型名称"),
186
+ sourceName: Schema.string().required().description("源名称"),
187
+ baseUrl: Schema.string().required().description("API 端点"),
188
+ apiKey: Schema.string().required().role("secret").description("API Key"),
189
+ platform: Schema.union([
190
+ Schema.const("openai").description("OpenAI"),
191
+ Schema.const("claude").description("Claude"),
192
+ Schema.const("gemini").description("Gemini")
193
+ ]).description("平台类型")
194
+ })
195
+ ).role("table").description("手动添加的模型")
196
+ }),
197
+ // Debug options
198
+ Schema.object({
199
+ debugMode: Schema.boolean().default(false).description("启用调试日志")
200
+ }).description("调试选项")
201
+ ]);
202
+ var name = "elysia-api-aggregator";
203
+
204
+ // src/index.ts
205
+ var AggregatorService = class extends Service {
206
+ constructor(ctx, config) {
207
+ super(ctx, "elysia-api-aggregator");
208
+ this.ctx = ctx;
209
+ this.config = config;
210
+ }
211
+ static {
212
+ __name(this, "AggregatorService");
213
+ }
214
+ models = [];
215
+ getAll() {
216
+ return this.models;
217
+ }
218
+ getById(id) {
219
+ return this.models.find((m) => m.id === id);
220
+ }
221
+ getByType(type) {
222
+ return this.models.filter((m) => m.type === type);
223
+ }
224
+ // 更新模型列表
225
+ updateModels(newModels) {
226
+ this.models.length = 0;
227
+ this.models.push(...newModels);
228
+ }
229
+ };
230
+ function apply(ctx, config) {
231
+ const service = new AggregatorService(ctx, config);
232
+ ctx["elysia-api-aggregator"] = service;
233
+ const fetcher = new ModelFetcher(ctx);
234
+ ctx.elysiaApi = {
235
+ models: {
236
+ getAll: /* @__PURE__ */ __name(() => service.getAll(), "getAll"),
237
+ getById: /* @__PURE__ */ __name((id) => service.getById(id), "getById"),
238
+ getByType: /* @__PURE__ */ __name((type) => service.getByType(type), "getByType")
239
+ }
240
+ };
241
+ async function loadModels() {
242
+ if (config.debugMode) {
243
+ ctx.logger.info("=== loadModels: Starting to load models ===");
244
+ } else {
245
+ ctx.logger.info("Loading models...");
246
+ }
247
+ const fetchedModels = [];
248
+ for (const source of config.autoFetchSources) {
249
+ if (!source.enabled) continue;
250
+ if (config.debugMode) {
251
+ ctx.logger.info(`loadModels: Fetching from ${source.name}`);
252
+ }
253
+ const sourceModels = await fetcher.fetchModels(source);
254
+ fetchedModels.push(...sourceModels);
255
+ if (config.debugMode) {
256
+ ctx.logger.info(`loadModels: Fetched ${sourceModels.length} models from ${source.name}`);
257
+ } else {
258
+ ctx.logger.info(`Fetched ${sourceModels.length} models from ${source.name}`);
259
+ }
260
+ }
261
+ if (config.debugMode) {
262
+ ctx.logger.info(`loadModels: Processing ${config.manualModels.length} manual models`);
263
+ }
264
+ const manualModels = config.manualModels.map((m) => {
265
+ if (config.debugMode) {
266
+ ctx.logger.info(`loadModels: Adding manual model ${m.id}`);
267
+ }
268
+ return {
269
+ id: m.id,
270
+ name: m.name,
271
+ source: "manual",
272
+ sourceName: m.sourceName,
273
+ baseUrl: m.baseUrl,
274
+ apiKey: m.apiKey,
275
+ platform: m.platform,
276
+ // 使用默认值
277
+ type: "llm",
278
+ maxTokens: 128e3,
279
+ visionCapable: false,
280
+ toolsCapable: false,
281
+ structuredOutput: false,
282
+ thinkingMode: "both",
283
+ available: true,
284
+ lastChecked: /* @__PURE__ */ new Date()
285
+ };
286
+ });
287
+ const allModels = [...fetchedModels, ...manualModels];
288
+ service.updateModels(allModels);
289
+ if (config.debugMode) {
290
+ ctx.logger.info(`loadModels: Total models loaded: ${allModels.length}`);
291
+ ctx.logger.info(`loadModels: Model IDs: ${allModels.map((m) => m.id).join(", ")}`);
292
+ ctx.logger.info(`loadModels: ctx.elysiaApi exists: ${ctx.elysiaApi != null}`);
293
+ ctx.logger.info(`loadModels: ctx.elysiaApi.models exists: ${ctx.elysiaApi?.models != null}`);
294
+ } else {
295
+ ctx.logger.info(`Total models loaded: ${allModels.length}`);
296
+ }
297
+ if (config.debugMode) {
298
+ ctx.logger.info(`loadModels: Emitting elysia-api/models-updated event with ${allModels.length} models`);
299
+ }
300
+ ctx.emit("elysia-api/models-updated", [...allModels]);
301
+ }
302
+ __name(loadModels, "loadModels");
303
+ ctx.on("ready", loadModels);
304
+ ctx.on("config", () => {
305
+ loadModels();
306
+ });
307
+ ctx.command("elysia-api.models.reload", "重新加载模型列表").action(async () => {
308
+ await loadModels();
309
+ const count = service.getAll().length;
310
+ return `已加载 ${count} 个模型`;
311
+ });
312
+ ctx.command("elysia-api.models.list", "列出所有模型").action(() => {
313
+ const all = ctx.elysiaApi.models.getAll();
314
+ return `可用模型列表 (${all.length}):
315
+ ` + all.map((m) => `- ${m.name} (${m.type})`).join("\n");
316
+ });
317
+ }
318
+ __name(apply, "apply");
319
+ export {
320
+ AggregatorService,
321
+ Config,
322
+ apply,
323
+ name
324
+ };
@@ -0,0 +1,15 @@
1
+ import { Model, AutoFetchSource } from '@elysia-api/shared';
2
+ export declare class ModelFetcher {
3
+ private ctx;
4
+ constructor(ctx: import('koishi').Context);
5
+ fetchModels(source: AutoFetchSource): Promise<Model[]>;
6
+ private fetchOpenAIModels;
7
+ private fetchClaudeModels;
8
+ private fetchGeminiModels;
9
+ private inferModelType;
10
+ private inferMaxTokens;
11
+ private hasVisionCapability;
12
+ private hasToolsCapability;
13
+ private hasStructuredOutput;
14
+ private parseGeminiMaxTokens;
15
+ }
@@ -0,0 +1,5 @@
1
+ import { Model } from '@elysia-api/shared';
2
+ export declare class ModelValidator {
3
+ validateModel(model: Model): Promise<boolean>;
4
+ validateEmbeddingModel(model: Model): Promise<boolean>;
5
+ }
package/package.json ADDED
@@ -0,0 +1,47 @@
1
+ {
2
+ "name": "koishi-plugin-elysia-api-aggregator",
3
+ "description": "Inspired by New-API, the Elysia-API model aggregator plugin allows automatic fetching and manual configuration of available AI models, designed to work with the orchestrator plugin.",
4
+ "version": "0.1.0",
5
+ "main": "lib/index.cjs",
6
+ "module": "lib/index.mjs",
7
+ "typings": "lib/index.d.ts",
8
+ "files": [
9
+ "lib",
10
+ "dist"
11
+ ],
12
+ "type": "module",
13
+ "exports": {
14
+ ".": {
15
+ "types": "./lib/index.d.ts",
16
+ "require": "./lib/index.cjs",
17
+ "import": "./lib/index.mjs"
18
+ },
19
+ "./package.json": "./package.json"
20
+ },
21
+ "license": "MIT",
22
+ "scripts": {
23
+ "build": "atsc -b"
24
+ },
25
+ "keywords": [
26
+ "chatbot",
27
+ "koishi",
28
+ "plugin",
29
+ "elysia-api",
30
+ "ai",
31
+ "llm"
32
+ ],
33
+ "peerDependencies": {
34
+ "koishi": "^4.18.9"
35
+ },
36
+ "koishi": {
37
+ "description": {
38
+ "zh": "借鉴自 New-API 的 Elysia-API 模型聚合插件,允许自动拉取和手动配置可用的 AI 模型,需要与模型编排插件配合工作。",
39
+ "en": "Inspired by New-API, the Elysia-API model aggregator plugin allows automatic fetching and manual configuration of available AI models, designed to work with the orchestrator plugin."
40
+ },
41
+ "service": {
42
+ "implements": [
43
+ "elysia-api-aggregator"
44
+ ]
45
+ }
46
+ }
47
+ }