@weisiren000/oiiai 0.1.0

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
package/README.md ADDED
@@ -0,0 +1,112 @@
1
+ # @weisiren000/oiiai
2
+
3
+ 统一的 AI Provider 接口封装,支持 OpenRouter、OpenAI、Anthropic 等多种 AI 服务。
4
+
5
+ ## 安装
6
+
7
+ ```bash
8
+ npm install @weisiren000/oiiai
9
+ ```
10
+
11
+ ## 快速开始
12
+
13
+ ```typescript
14
+ import { OpenRouterProvider } from '@weisiren000/oiiai';
15
+
16
+ const ai = new OpenRouterProvider('your-api-key');
17
+
18
+ // 简单问答
19
+ const answer = await ai.ask('deepseek/deepseek-chat', '你好');
20
+
21
+ // 带系统提示
22
+ const response = await ai.askWithSystem(
23
+ 'deepseek/deepseek-chat',
24
+ '你是一个有帮助的助手',
25
+ '介绍一下自己'
26
+ );
27
+
28
+ // 完整对话
29
+ const result = await ai.chat({
30
+ model: 'deepseek/deepseek-chat',
31
+ messages: [
32
+ { role: 'system', content: '你是一个简洁的助手' },
33
+ { role: 'user', content: '1+1=?' }
34
+ ],
35
+ temperature: 0.7,
36
+ maxTokens: 100
37
+ });
38
+
39
+ console.log(result.content);
40
+ console.log(result.usage);
41
+ ```
42
+
43
+ ## 流式输出
44
+
45
+ ```typescript
46
+ for await (const chunk of ai.chatStream({
47
+ model: 'deepseek/deepseek-chat',
48
+ messages: [{ role: 'user', content: '写一首诗' }]
49
+ })) {
50
+ if (chunk.type === 'content') {
51
+ process.stdout.write(chunk.text);
52
+ }
53
+ }
54
+ ```
55
+
56
+ ## 思考模型 (Reasoning)
57
+
58
+ 支持 OpenAI o1/o3、DeepSeek R1、Claude 等思考模型:
59
+
60
+ ```typescript
61
+ const result = await ai.chat({
62
+ model: 'deepseek/deepseek-r1',
63
+ messages: [{ role: 'user', content: '9.11 和 9.9 哪个大?' }],
64
+ reasoning: {
65
+ effort: 'high', // OpenAI/Grok 风格
66
+ // maxTokens: 2000, // Anthropic/Gemini 风格
67
+ // exclude: true, // 不返回思考内容
68
+ }
69
+ });
70
+
71
+ console.log('思考过程:', result.reasoning);
72
+ console.log('最终答案:', result.content);
73
+ ```
74
+
75
+ ## 统一接口
76
+
77
+ 所有 Provider 实现统一的 `AIProvider` 接口:
78
+
79
+ ```typescript
80
+ interface AIProvider {
81
+ readonly name: string;
82
+ chat(options: ChatOptions): Promise<ChatResult>;
83
+ chatStream(options: ChatOptions): AsyncGenerator<StreamChunk>;
84
+ ask(model: string, question: string, options?): Promise<string>;
85
+ askWithSystem(model: string, systemPrompt: string, userMessage: string, options?): Promise<string>;
86
+ listModels?(): Promise<ModelInfo[]>;
87
+ }
88
+ ```
89
+
90
+ ## 自定义 Provider
91
+
92
+ 继承 `BaseAIProvider` 实现自定义 Provider:
93
+
94
+ ```typescript
95
+ import { BaseAIProvider, ChatOptions, ChatResult, StreamChunk } from '@weisiren000/oiiai';
96
+
97
+ class MyProvider extends BaseAIProvider {
98
+ readonly name = 'my-provider';
99
+
100
+ async chat(options: ChatOptions): Promise<ChatResult> {
101
+ // 实现非流式请求
102
+ }
103
+
104
+ async *chatStream(options: ChatOptions): AsyncGenerator<StreamChunk> {
105
+ // 实现流式请求
106
+ }
107
+ }
108
+ ```
109
+
110
+ ## License
111
+
112
+ MIT
@@ -0,0 +1,173 @@
1
+ /**
2
+ * AI Provider 统一类型定义
3
+ */
4
+ interface ChatMessage {
5
+ role: 'system' | 'user' | 'assistant';
6
+ content: string;
7
+ }
8
+ /**
9
+ * 思考/推理配置
10
+ * - effort: OpenAI o1/o3, Grok 使用
11
+ * - maxTokens: Anthropic Claude, Gemini, Qwen 使用
12
+ */
13
+ interface ReasoningConfig {
14
+ /** 思考努力程度 */
15
+ effort?: 'high' | 'medium' | 'low';
16
+ /** 思考最大 token 数 */
17
+ maxTokens?: number;
18
+ /** 是否在响应中排除思考内容 */
19
+ exclude?: boolean;
20
+ /** 是否启用思考 */
21
+ enabled?: boolean;
22
+ }
23
+ interface ChatOptions {
24
+ /** 模型 ID */
25
+ model: string;
26
+ /** 对话消息列表 */
27
+ messages: ChatMessage[];
28
+ /** 温度参数 0-2 */
29
+ temperature?: number;
30
+ /** 最大输出 token 数 */
31
+ maxTokens?: number;
32
+ /** 思考/推理配置 */
33
+ reasoning?: ReasoningConfig;
34
+ }
35
+ interface ChatResult {
36
+ /** 最终回答内容 */
37
+ content: string;
38
+ /** 思考过程(仅 reasoning 模型有) */
39
+ reasoning: string | null;
40
+ /** 使用的模型 */
41
+ model: string;
42
+ /** Token 使用情况 */
43
+ usage: TokenUsage;
44
+ /** 完成原因 */
45
+ finishReason: string | null;
46
+ }
47
+ interface TokenUsage {
48
+ promptTokens: number;
49
+ completionTokens: number;
50
+ totalTokens: number;
51
+ }
52
+ interface StreamChunk {
53
+ type: 'reasoning' | 'content';
54
+ text: string;
55
+ }
56
+ interface ModelPricing {
57
+ /** 输入 token 单价 ($/token) */
58
+ prompt: string;
59
+ /** 输出 token 单价 ($/token) */
60
+ completion: string;
61
+ /** 请求单价 */
62
+ request: string;
63
+ /** 图片单价 */
64
+ image: string;
65
+ }
66
+ interface ModelInfo {
67
+ /** 模型 ID */
68
+ id: string;
69
+ /** 模型名称 */
70
+ name: string;
71
+ /** 模型描述 */
72
+ description: string;
73
+ /** 定价信息 */
74
+ pricing: ModelPricing;
75
+ /** 上下文长度 */
76
+ contextLength: number;
77
+ /** 支持的参数 */
78
+ supportedParameters: string[];
79
+ }
80
+
81
+ /**
82
+ * AI Provider 统一接口
83
+ */
84
+
85
+ /**
86
+ * AI Provider 基础接口
87
+ * 所有 provider 实现都需要实现这个接口
88
+ */
89
+ interface AIProvider {
90
+ /** Provider 名称 */
91
+ readonly name: string;
92
+ /**
93
+ * 发送聊天请求(非流式)
94
+ */
95
+ chat(options: ChatOptions): Promise<ChatResult>;
96
+ /**
97
+ * 发送流式聊天请求
98
+ */
99
+ chatStream(options: ChatOptions): AsyncGenerator<StreamChunk, void, unknown>;
100
+ /**
101
+ * 简单对话:单轮问答
102
+ */
103
+ ask(model: string, question: string, options?: Omit<ChatOptions, 'model' | 'messages'>): Promise<string>;
104
+ /**
105
+ * 带系统提示的对话
106
+ */
107
+ askWithSystem(model: string, systemPrompt: string, userMessage: string, options?: Omit<ChatOptions, 'model' | 'messages'>): Promise<string>;
108
+ /**
109
+ * 获取可用模型列表(可选实现)
110
+ */
111
+ listModels?(): Promise<ModelInfo[]>;
112
+ }
113
+ /**
114
+ * AI Provider 基础抽象类
115
+ * 提供一些通用实现,子类只需实现核心方法
116
+ */
117
+ declare abstract class BaseAIProvider implements AIProvider {
118
+ abstract readonly name: string;
119
+ abstract chat(options: ChatOptions): Promise<ChatResult>;
120
+ abstract chatStream(options: ChatOptions): AsyncGenerator<StreamChunk, void, unknown>;
121
+ /**
122
+ * 简单对话:单轮问答(默认实现)
123
+ */
124
+ ask(model: string, question: string, options?: Omit<ChatOptions, 'model' | 'messages'>): Promise<string>;
125
+ /**
126
+ * 带系统提示的对话(默认实现)
127
+ */
128
+ askWithSystem(model: string, systemPrompt: string, userMessage: string, options?: Omit<ChatOptions, 'model' | 'messages'>): Promise<string>;
129
+ }
130
+
131
+ /**
132
+ * OpenRouter Provider 实现
133
+ */
134
+
135
+ /**
136
+ * OpenRouter 扩展的模型信息
137
+ */
138
+ interface OpenRouterModelInfo extends ModelInfo {
139
+ /** 规范化的 slug */
140
+ canonicalSlug: string;
141
+ /** 创建时间戳 */
142
+ created: number;
143
+ /** 架构信息 */
144
+ architecture: {
145
+ modality: string;
146
+ inputModalities: string[];
147
+ outputModalities: string[];
148
+ tokenizer: string;
149
+ instructType: string;
150
+ };
151
+ }
152
+ /**
153
+ * OpenRouter Provider
154
+ */
155
+ declare class OpenRouterProvider extends BaseAIProvider {
156
+ readonly name = "openrouter";
157
+ private client;
158
+ constructor(apiKey: string);
159
+ /**
160
+ * 发送聊天请求(非流式)
161
+ */
162
+ chat(options: ChatOptions): Promise<ChatResult>;
163
+ /**
164
+ * 发送流式聊天请求
165
+ */
166
+ chatStream(options: ChatOptions): AsyncGenerator<StreamChunk, void, unknown>;
167
+ /**
168
+ * 获取可用模型列表
169
+ */
170
+ listModels(): Promise<OpenRouterModelInfo[]>;
171
+ }
172
+
173
+ export { type AIProvider, BaseAIProvider, type ChatMessage, type ChatOptions, type ChatResult, type ModelInfo, type ModelPricing, type OpenRouterModelInfo, OpenRouterProvider, type ReasoningConfig, type StreamChunk, type TokenUsage };
@@ -0,0 +1,173 @@
1
+ /**
2
+ * AI Provider 统一类型定义
3
+ */
4
+ interface ChatMessage {
5
+ role: 'system' | 'user' | 'assistant';
6
+ content: string;
7
+ }
8
+ /**
9
+ * 思考/推理配置
10
+ * - effort: OpenAI o1/o3, Grok 使用
11
+ * - maxTokens: Anthropic Claude, Gemini, Qwen 使用
12
+ */
13
+ interface ReasoningConfig {
14
+ /** 思考努力程度 */
15
+ effort?: 'high' | 'medium' | 'low';
16
+ /** 思考最大 token 数 */
17
+ maxTokens?: number;
18
+ /** 是否在响应中排除思考内容 */
19
+ exclude?: boolean;
20
+ /** 是否启用思考 */
21
+ enabled?: boolean;
22
+ }
23
+ interface ChatOptions {
24
+ /** 模型 ID */
25
+ model: string;
26
+ /** 对话消息列表 */
27
+ messages: ChatMessage[];
28
+ /** 温度参数 0-2 */
29
+ temperature?: number;
30
+ /** 最大输出 token 数 */
31
+ maxTokens?: number;
32
+ /** 思考/推理配置 */
33
+ reasoning?: ReasoningConfig;
34
+ }
35
+ interface ChatResult {
36
+ /** 最终回答内容 */
37
+ content: string;
38
+ /** 思考过程(仅 reasoning 模型有) */
39
+ reasoning: string | null;
40
+ /** 使用的模型 */
41
+ model: string;
42
+ /** Token 使用情况 */
43
+ usage: TokenUsage;
44
+ /** 完成原因 */
45
+ finishReason: string | null;
46
+ }
47
+ interface TokenUsage {
48
+ promptTokens: number;
49
+ completionTokens: number;
50
+ totalTokens: number;
51
+ }
52
+ interface StreamChunk {
53
+ type: 'reasoning' | 'content';
54
+ text: string;
55
+ }
56
+ interface ModelPricing {
57
+ /** 输入 token 单价 ($/token) */
58
+ prompt: string;
59
+ /** 输出 token 单价 ($/token) */
60
+ completion: string;
61
+ /** 请求单价 */
62
+ request: string;
63
+ /** 图片单价 */
64
+ image: string;
65
+ }
66
+ interface ModelInfo {
67
+ /** 模型 ID */
68
+ id: string;
69
+ /** 模型名称 */
70
+ name: string;
71
+ /** 模型描述 */
72
+ description: string;
73
+ /** 定价信息 */
74
+ pricing: ModelPricing;
75
+ /** 上下文长度 */
76
+ contextLength: number;
77
+ /** 支持的参数 */
78
+ supportedParameters: string[];
79
+ }
80
+
81
+ /**
82
+ * AI Provider 统一接口
83
+ */
84
+
85
+ /**
86
+ * AI Provider 基础接口
87
+ * 所有 provider 实现都需要实现这个接口
88
+ */
89
+ interface AIProvider {
90
+ /** Provider 名称 */
91
+ readonly name: string;
92
+ /**
93
+ * 发送聊天请求(非流式)
94
+ */
95
+ chat(options: ChatOptions): Promise<ChatResult>;
96
+ /**
97
+ * 发送流式聊天请求
98
+ */
99
+ chatStream(options: ChatOptions): AsyncGenerator<StreamChunk, void, unknown>;
100
+ /**
101
+ * 简单对话:单轮问答
102
+ */
103
+ ask(model: string, question: string, options?: Omit<ChatOptions, 'model' | 'messages'>): Promise<string>;
104
+ /**
105
+ * 带系统提示的对话
106
+ */
107
+ askWithSystem(model: string, systemPrompt: string, userMessage: string, options?: Omit<ChatOptions, 'model' | 'messages'>): Promise<string>;
108
+ /**
109
+ * 获取可用模型列表(可选实现)
110
+ */
111
+ listModels?(): Promise<ModelInfo[]>;
112
+ }
113
+ /**
114
+ * AI Provider 基础抽象类
115
+ * 提供一些通用实现,子类只需实现核心方法
116
+ */
117
+ declare abstract class BaseAIProvider implements AIProvider {
118
+ abstract readonly name: string;
119
+ abstract chat(options: ChatOptions): Promise<ChatResult>;
120
+ abstract chatStream(options: ChatOptions): AsyncGenerator<StreamChunk, void, unknown>;
121
+ /**
122
+ * 简单对话:单轮问答(默认实现)
123
+ */
124
+ ask(model: string, question: string, options?: Omit<ChatOptions, 'model' | 'messages'>): Promise<string>;
125
+ /**
126
+ * 带系统提示的对话(默认实现)
127
+ */
128
+ askWithSystem(model: string, systemPrompt: string, userMessage: string, options?: Omit<ChatOptions, 'model' | 'messages'>): Promise<string>;
129
+ }
130
+
131
+ /**
132
+ * OpenRouter Provider 实现
133
+ */
134
+
135
+ /**
136
+ * OpenRouter 扩展的模型信息
137
+ */
138
+ interface OpenRouterModelInfo extends ModelInfo {
139
+ /** 规范化的 slug */
140
+ canonicalSlug: string;
141
+ /** 创建时间戳 */
142
+ created: number;
143
+ /** 架构信息 */
144
+ architecture: {
145
+ modality: string;
146
+ inputModalities: string[];
147
+ outputModalities: string[];
148
+ tokenizer: string;
149
+ instructType: string;
150
+ };
151
+ }
152
+ /**
153
+ * OpenRouter Provider
154
+ */
155
+ declare class OpenRouterProvider extends BaseAIProvider {
156
+ readonly name = "openrouter";
157
+ private client;
158
+ constructor(apiKey: string);
159
+ /**
160
+ * 发送聊天请求(非流式)
161
+ */
162
+ chat(options: ChatOptions): Promise<ChatResult>;
163
+ /**
164
+ * 发送流式聊天请求
165
+ */
166
+ chatStream(options: ChatOptions): AsyncGenerator<StreamChunk, void, unknown>;
167
+ /**
168
+ * 获取可用模型列表
169
+ */
170
+ listModels(): Promise<OpenRouterModelInfo[]>;
171
+ }
172
+
173
+ export { type AIProvider, BaseAIProvider, type ChatMessage, type ChatOptions, type ChatResult, type ModelInfo, type ModelPricing, type OpenRouterModelInfo, OpenRouterProvider, type ReasoningConfig, type StreamChunk, type TokenUsage };
package/dist/index.js ADDED
@@ -0,0 +1,192 @@
1
+ "use strict";
2
+ var __defProp = Object.defineProperty;
3
+ var __getOwnPropDesc = Object.getOwnPropertyDescriptor;
4
+ var __getOwnPropNames = Object.getOwnPropertyNames;
5
+ var __hasOwnProp = Object.prototype.hasOwnProperty;
6
+ var __export = (target, all) => {
7
+ for (var name in all)
8
+ __defProp(target, name, { get: all[name], enumerable: true });
9
+ };
10
+ var __copyProps = (to, from, except, desc) => {
11
+ if (from && typeof from === "object" || typeof from === "function") {
12
+ for (let key of __getOwnPropNames(from))
13
+ if (!__hasOwnProp.call(to, key) && key !== except)
14
+ __defProp(to, key, { get: () => from[key], enumerable: !(desc = __getOwnPropDesc(from, key)) || desc.enumerable });
15
+ }
16
+ return to;
17
+ };
18
+ var __toCommonJS = (mod) => __copyProps(__defProp({}, "__esModule", { value: true }), mod);
19
+
20
+ // src/index.ts
21
+ var index_exports = {};
22
+ __export(index_exports, {
23
+ BaseAIProvider: () => BaseAIProvider,
24
+ OpenRouterProvider: () => OpenRouterProvider
25
+ });
26
+ module.exports = __toCommonJS(index_exports);
27
+
28
+ // src/base.ts
29
+ var BaseAIProvider = class {
30
+ /**
31
+ * 简单对话:单轮问答(默认实现)
32
+ */
33
+ async ask(model, question, options) {
34
+ const result = await this.chat({
35
+ model,
36
+ messages: [{ role: "user", content: question }],
37
+ ...options
38
+ });
39
+ return result.content;
40
+ }
41
+ /**
42
+ * 带系统提示的对话(默认实现)
43
+ */
44
+ async askWithSystem(model, systemPrompt, userMessage, options) {
45
+ const result = await this.chat({
46
+ model,
47
+ messages: [
48
+ { role: "system", content: systemPrompt },
49
+ { role: "user", content: userMessage }
50
+ ],
51
+ ...options
52
+ });
53
+ return result.content;
54
+ }
55
+ };
56
+
57
+ // src/providers/openrouter.ts
58
+ var import_sdk = require("@openrouter/sdk");
59
+ function extractTextContent(content) {
60
+ if (typeof content === "string") {
61
+ return content;
62
+ }
63
+ if (Array.isArray(content)) {
64
+ return content.filter(
65
+ (item) => typeof item === "object" && item !== null && item.type === "text" && typeof item.text === "string"
66
+ ).map((item) => item.text).join("");
67
+ }
68
+ return "";
69
+ }
70
+ function buildReasoningParam(config) {
71
+ if (!config) return void 0;
72
+ const param = {};
73
+ if (config.effort !== void 0) {
74
+ param.effort = config.effort;
75
+ }
76
+ if (config.maxTokens !== void 0) {
77
+ param.max_tokens = config.maxTokens;
78
+ }
79
+ if (config.exclude !== void 0) {
80
+ param.exclude = config.exclude;
81
+ }
82
+ if (config.enabled !== void 0) {
83
+ param.enabled = config.enabled;
84
+ }
85
+ return Object.keys(param).length > 0 ? param : void 0;
86
+ }
87
+ var OpenRouterProvider = class extends BaseAIProvider {
88
+ name = "openrouter";
89
+ client;
90
+ constructor(apiKey) {
91
+ super();
92
+ this.client = new import_sdk.OpenRouter({ apiKey });
93
+ }
94
+ /**
95
+ * 发送聊天请求(非流式)
96
+ */
97
+ async chat(options) {
98
+ const { model, messages, temperature = 0.7, maxTokens, reasoning } = options;
99
+ const reasoningParam = buildReasoningParam(reasoning);
100
+ const requestParams = {
101
+ model,
102
+ messages,
103
+ temperature,
104
+ maxTokens,
105
+ stream: false
106
+ };
107
+ if (reasoningParam) {
108
+ requestParams.reasoning = reasoningParam;
109
+ }
110
+ const result = await this.client.chat.send(requestParams);
111
+ const choice = result.choices[0];
112
+ if (!choice) {
113
+ throw new Error("No response from model");
114
+ }
115
+ const msg = choice.message;
116
+ const reasoningContent = msg.reasoning_content ?? msg.reasoning ?? null;
117
+ return {
118
+ content: extractTextContent(msg.content),
119
+ reasoning: reasoningContent ? extractTextContent(reasoningContent) : null,
120
+ model: result.model,
121
+ usage: {
122
+ promptTokens: result.usage?.promptTokens ?? 0,
123
+ completionTokens: result.usage?.completionTokens ?? 0,
124
+ totalTokens: result.usage?.totalTokens ?? 0
125
+ },
126
+ finishReason: choice.finishReason
127
+ };
128
+ }
129
+ /**
130
+ * 发送流式聊天请求
131
+ */
132
+ async *chatStream(options) {
133
+ const { model, messages, temperature = 0.7, maxTokens, reasoning } = options;
134
+ const reasoningParam = buildReasoningParam(reasoning);
135
+ const requestParams = {
136
+ model,
137
+ messages,
138
+ temperature,
139
+ maxTokens,
140
+ stream: true
141
+ };
142
+ if (reasoningParam) {
143
+ requestParams.reasoning = reasoningParam;
144
+ }
145
+ const stream = await this.client.chat.send(requestParams);
146
+ for await (const chunk of stream) {
147
+ const delta = chunk.choices?.[0]?.delta;
148
+ if (!delta) continue;
149
+ const reasoningContent = delta.reasoning_content ?? delta.reasoning;
150
+ if (reasoningContent) {
151
+ yield { type: "reasoning", text: extractTextContent(reasoningContent) };
152
+ }
153
+ if (delta.content) {
154
+ yield { type: "content", text: extractTextContent(delta.content) };
155
+ }
156
+ }
157
+ }
158
+ /**
159
+ * 获取可用模型列表
160
+ */
161
+ async listModels() {
162
+ const result = await this.client.models.list();
163
+ return (result.data ?? []).map((m) => ({
164
+ id: m.id,
165
+ canonicalSlug: m.canonical_slug ?? m.id,
166
+ name: m.name,
167
+ description: m.description ?? "",
168
+ created: m.created ?? 0,
169
+ pricing: {
170
+ prompt: m.pricing?.prompt ?? "0",
171
+ completion: m.pricing?.completion ?? "0",
172
+ request: m.pricing?.request ?? "0",
173
+ image: m.pricing?.image ?? "0"
174
+ },
175
+ contextLength: m.context_length ?? 0,
176
+ architecture: {
177
+ modality: m.architecture?.modality ?? "",
178
+ inputModalities: m.architecture?.input_modalities ?? [],
179
+ outputModalities: m.architecture?.output_modalities ?? [],
180
+ tokenizer: m.architecture?.tokenizer ?? "",
181
+ instructType: m.architecture?.instruct_type ?? ""
182
+ },
183
+ supportedParameters: m.supported_parameters ?? []
184
+ }));
185
+ }
186
+ };
187
+ // Annotate the CommonJS export names for ESM import in node:
188
+ 0 && (module.exports = {
189
+ BaseAIProvider,
190
+ OpenRouterProvider
191
+ });
192
+ //# sourceMappingURL=index.js.map
@@ -0,0 +1 @@
1
+ {"version":3,"sources":["../src/index.ts","../src/base.ts","../src/providers/openrouter.ts"],"sourcesContent":["// 基础接口和类型\r\nexport type { AIProvider } from './base';\r\nexport { BaseAIProvider } from './base';\r\nexport type {\r\n ChatMessage,\r\n ChatOptions,\r\n ChatResult,\r\n ReasoningConfig,\r\n StreamChunk,\r\n TokenUsage,\r\n ModelInfo,\r\n ModelPricing,\r\n} from './types';\r\n\r\n// Provider 实现\r\nexport { OpenRouterProvider } from './providers';\r\nexport type { OpenRouterModelInfo } from './providers';\r\n","/**\r\n * AI Provider 统一接口\r\n */\r\n\r\nimport type { ChatOptions, ChatResult, StreamChunk, ModelInfo } from './types';\r\n\r\n/**\r\n * AI Provider 基础接口\r\n * 所有 provider 实现都需要实现这个接口\r\n */\r\nexport interface AIProvider {\r\n /** Provider 名称 */\r\n readonly name: string;\r\n\r\n /**\r\n * 发送聊天请求(非流式)\r\n */\r\n chat(options: ChatOptions): Promise<ChatResult>;\r\n\r\n /**\r\n * 发送流式聊天请求\r\n */\r\n chatStream(options: ChatOptions): AsyncGenerator<StreamChunk, void, unknown>;\r\n\r\n /**\r\n * 简单对话:单轮问答\r\n */\r\n ask(model: string, question: string, options?: Omit<ChatOptions, 'model' | 'messages'>): Promise<string>;\r\n\r\n /**\r\n * 带系统提示的对话\r\n */\r\n askWithSystem(\r\n model: string,\r\n systemPrompt: string,\r\n userMessage: string,\r\n options?: Omit<ChatOptions, 'model' | 'messages'>\r\n ): Promise<string>;\r\n\r\n /**\r\n * 获取可用模型列表(可选实现)\r\n */\r\n listModels?(): Promise<ModelInfo[]>;\r\n}\r\n\r\n/**\r\n * AI Provider 基础抽象类\r\n * 提供一些通用实现,子类只需实现核心方法\r\n */\r\nexport abstract class BaseAIProvider implements AIProvider {\r\n abstract readonly name: string;\r\n\r\n abstract chat(options: ChatOptions): Promise<ChatResult>;\r\n\r\n abstract chatStream(options: ChatOptions): AsyncGenerator<StreamChunk, void, unknown>;\r\n\r\n /**\r\n * 简单对话:单轮问答(默认实现)\r\n */\r\n async ask(model: string, question: string, options?: Omit<ChatOptions, 'model' | 'messages'>): Promise<string> {\r\n const result = await this.chat({\r\n model,\r\n messages: [{ role: 'user', content: question }],\r\n ...options,\r\n });\r\n return result.content;\r\n }\r\n\r\n /**\r\n * 带系统提示的对话(默认实现)\r\n */\r\n async askWithSystem(\r\n model: string,\r\n systemPrompt: string,\r\n userMessage: string,\r\n options?: Omit<ChatOptions, 'model' | 'messages'>\r\n ): Promise<string> {\r\n const result = await this.chat({\r\n model,\r\n messages: [\r\n { role: 'system', content: systemPrompt },\r\n { role: 'user', content: userMessage },\r\n ],\r\n ...options,\r\n });\r\n return result.content;\r\n }\r\n}\r\n","/**\r\n * OpenRouter Provider 实现\r\n */\r\n\r\nimport { OpenRouter } from '@openrouter/sdk';\r\nimport { BaseAIProvider } from '../base';\r\nimport type { ChatOptions, ChatResult, StreamChunk, ModelInfo, ReasoningConfig } from '../types';\r\n\r\n/**\r\n * OpenRouter 扩展的模型信息\r\n */\r\nexport interface OpenRouterModelInfo extends ModelInfo {\r\n /** 规范化的 slug */\r\n canonicalSlug: string;\r\n /** 创建时间戳 */\r\n created: number;\r\n /** 架构信息 */\r\n architecture: {\r\n modality: string;\r\n inputModalities: string[];\r\n outputModalities: string[];\r\n tokenizer: string;\r\n instructType: string;\r\n };\r\n}\r\n\r\n/**\r\n * 从 SDK 返回的 content 中提取文本\r\n */\r\nfunction extractTextContent(content: unknown): string {\r\n if (typeof content === 'string') {\r\n return content;\r\n }\r\n if (Array.isArray(content)) {\r\n return content\r\n .filter(\r\n (item): item is { type: 'text'; text: string } =>\r\n typeof item === 'object' && item !== null && item.type === 'text' && typeof item.text === 'string'\r\n )\r\n .map((item) => item.text)\r\n .join('');\r\n }\r\n return '';\r\n}\r\n\r\n/**\r\n * 构建 reasoning 参数(转换为 API 需要的 snake_case 格式)\r\n */\r\nfunction buildReasoningParam(config?: ReasoningConfig): Record<string, unknown> | undefined {\r\n if (!config) return undefined;\r\n\r\n const param: Record<string, unknown> = {};\r\n\r\n if (config.effort !== undefined) {\r\n param.effort = config.effort;\r\n }\r\n if (config.maxTokens !== undefined) {\r\n param.max_tokens = config.maxTokens;\r\n }\r\n if (config.exclude !== undefined) {\r\n param.exclude = config.exclude;\r\n }\r\n if (config.enabled !== undefined) {\r\n param.enabled = config.enabled;\r\n }\r\n\r\n return Object.keys(param).length > 0 ? param : undefined;\r\n}\r\n\r\n\r\n/**\r\n * OpenRouter Provider\r\n */\r\nexport class OpenRouterProvider extends BaseAIProvider {\r\n readonly name = 'openrouter';\r\n private client: OpenRouter;\r\n\r\n constructor(apiKey: string) {\r\n super();\r\n this.client = new OpenRouter({ apiKey });\r\n }\r\n\r\n /**\r\n * 发送聊天请求(非流式)\r\n */\r\n async chat(options: ChatOptions): Promise<ChatResult> {\r\n const { model, messages, temperature = 0.7, maxTokens, reasoning } = options;\r\n\r\n const reasoningParam = buildReasoningParam(reasoning);\r\n\r\n // eslint-disable-next-line @typescript-eslint/no-explicit-any\r\n const requestParams: any = {\r\n model,\r\n messages,\r\n temperature,\r\n maxTokens,\r\n stream: false,\r\n };\r\n\r\n if (reasoningParam) {\r\n requestParams.reasoning = reasoningParam;\r\n }\r\n\r\n const result = await this.client.chat.send(requestParams);\r\n\r\n const choice = result.choices[0];\r\n if (!choice) {\r\n throw new Error('No response from model');\r\n }\r\n\r\n // eslint-disable-next-line @typescript-eslint/no-explicit-any\r\n const msg = choice.message as any;\r\n const reasoningContent = msg.reasoning_content ?? msg.reasoning ?? null;\r\n\r\n return {\r\n content: extractTextContent(msg.content),\r\n reasoning: reasoningContent ? extractTextContent(reasoningContent) : null,\r\n model: result.model,\r\n usage: {\r\n promptTokens: result.usage?.promptTokens ?? 0,\r\n completionTokens: result.usage?.completionTokens ?? 0,\r\n totalTokens: result.usage?.totalTokens ?? 0,\r\n },\r\n finishReason: choice.finishReason,\r\n };\r\n }\r\n\r\n /**\r\n * 发送流式聊天请求\r\n */\r\n async *chatStream(options: ChatOptions): AsyncGenerator<StreamChunk, void, unknown> {\r\n const { model, messages, temperature = 0.7, maxTokens, reasoning } = options;\r\n\r\n const reasoningParam = buildReasoningParam(reasoning);\r\n\r\n // eslint-disable-next-line @typescript-eslint/no-explicit-any\r\n const requestParams: any = {\r\n model,\r\n messages,\r\n temperature,\r\n maxTokens,\r\n stream: true,\r\n };\r\n\r\n if (reasoningParam) {\r\n requestParams.reasoning = reasoningParam;\r\n }\r\n\r\n const stream = (await this.client.chat.send(requestParams)) as unknown as AsyncIterable<{\r\n choices?: Array<{ delta?: unknown }>;\r\n }>;\r\n\r\n for await (const chunk of stream) {\r\n // eslint-disable-next-line @typescript-eslint/no-explicit-any\r\n const delta = chunk.choices?.[0]?.delta as any;\r\n if (!delta) continue;\r\n\r\n const reasoningContent = delta.reasoning_content ?? delta.reasoning;\r\n if (reasoningContent) {\r\n yield { type: 'reasoning', text: extractTextContent(reasoningContent) };\r\n }\r\n\r\n if (delta.content) {\r\n yield { type: 'content', text: extractTextContent(delta.content) };\r\n }\r\n }\r\n }\r\n\r\n /**\r\n * 获取可用模型列表\r\n */\r\n async listModels(): Promise<OpenRouterModelInfo[]> {\r\n const result = await this.client.models.list();\r\n\r\n // eslint-disable-next-line @typescript-eslint/no-explicit-any\r\n return (result.data ?? []).map((m: any) => ({\r\n id: m.id,\r\n canonicalSlug: m.canonical_slug ?? m.id,\r\n name: m.name,\r\n description: m.description ?? '',\r\n created: m.created ?? 0,\r\n pricing: {\r\n prompt: m.pricing?.prompt ?? '0',\r\n completion: m.pricing?.completion ?? '0',\r\n request: m.pricing?.request ?? '0',\r\n image: m.pricing?.image ?? '0',\r\n },\r\n contextLength: m.context_length ?? 0,\r\n architecture: {\r\n modality: m.architecture?.modality ?? '',\r\n inputModalities: m.architecture?.input_modalities ?? [],\r\n outputModalities: m.architecture?.output_modalities ?? [],\r\n tokenizer: m.architecture?.tokenizer ?? '',\r\n instructType: m.architecture?.instruct_type ?? '',\r\n },\r\n supportedParameters: m.supported_parameters ?? [],\r\n }));\r\n }\r\n}\r\n"],"mappings":";;;;;;;;;;;;;;;;;;;;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;;;ACiDO,IAAe,iBAAf,MAAoD;AAAA;AAAA;AAAA;AAAA,EAUzD,MAAM,IAAI,OAAe,UAAkB,SAAoE;AAC7G,UAAM,SAAS,MAAM,KAAK,KAAK;AAAA,MAC7B;AAAA,MACA,UAAU,CAAC,EAAE,MAAM,QAAQ,SAAS,SAAS,CAAC;AAAA,MAC9C,GAAG;AAAA,IACL,CAAC;AACD,WAAO,OAAO;AAAA,EAChB;AAAA;AAAA;AAAA;AAAA,EAKA,MAAM,cACJ,OACA,cACA,aACA,SACiB;AACjB,UAAM,SAAS,MAAM,KAAK,KAAK;AAAA,MAC7B;AAAA,MACA,UAAU;AAAA,QACR,EAAE,MAAM,UAAU,SAAS,aAAa;AAAA,QACxC,EAAE,MAAM,QAAQ,SAAS,YAAY;AAAA,MACvC;AAAA,MACA,GAAG;AAAA,IACL,CAAC;AACD,WAAO,OAAO;AAAA,EAChB;AACF;;;ACnFA,iBAA2B;AAyB3B,SAAS,mBAAmB,SAA0B;AACpD,MAAI,OAAO,YAAY,UAAU;AAC/B,WAAO;AAAA,EACT;AACA,MAAI,MAAM,QAAQ,OAAO,GAAG;AAC1B,WAAO,QACJ;AAAA,MACC,CAAC,SACC,OAAO,SAAS,YAAY,SAAS,QAAQ,KAAK,SAAS,UAAU,OAAO,KAAK,SAAS;AAAA,IAC9F,EACC,IAAI,CAAC,SAAS,KAAK,IAAI,EACvB,KAAK,EAAE;AAAA,EACZ;AACA,SAAO;AACT;AAKA,SAAS,oBAAoB,QAA+D;AAC1F,MAAI,CAAC,OAAQ,QAAO;AAEpB,QAAM,QAAiC,CAAC;AAExC,MAAI,OAAO,WAAW,QAAW;AAC/B,UAAM,SAAS,OAAO;AAAA,EACxB;AACA,MAAI,OAAO,cAAc,QAAW;AAClC,UAAM,aAAa,OAAO;AAAA,EAC5B;AACA,MAAI,OAAO,YAAY,QAAW;AAChC,UAAM,UAAU,OAAO;AAAA,EACzB;AACA,MAAI,OAAO,YAAY,QAAW;AAChC,UAAM,UAAU,OAAO;AAAA,EACzB;AAEA,SAAO,OAAO,KAAK,KAAK,EAAE,SAAS,IAAI,QAAQ;AACjD;AAMO,IAAM,qBAAN,cAAiC,eAAe;AAAA,EAC5C,OAAO;AAAA,EACR;AAAA,EAER,YAAY,QAAgB;AAC1B,UAAM;AACN,SAAK,SAAS,IAAI,sBAAW,EAAE,OAAO,CAAC;AAAA,EACzC;AAAA;AAAA;AAAA;AAAA,EAKA,MAAM,KAAK,SAA2C;AACpD,UAAM,EAAE,OAAO,UAAU,cAAc,KAAK,WAAW,UAAU,IAAI;AAErE,UAAM,iBAAiB,oBAAoB,SAAS;AAGpD,UAAM,gBAAqB;AAAA,MACzB;AAAA,MACA;AAAA,MACA;AAAA,MACA;AAAA,MACA,QAAQ;AAAA,IACV;AAEA,QAAI,gBAAgB;AAClB,oBAAc,YAAY;AAAA,IAC5B;AAEA,UAAM,SAAS,MAAM,KAAK,OAAO,KAAK,KAAK,aAAa;AAExD,UAAM,SAAS,OAAO,QAAQ,CAAC;AAC/B,QAAI,CAAC,QAAQ;AACX,YAAM,IAAI,MAAM,wBAAwB;AAAA,IAC1C;AAGA,UAAM,MAAM,OAAO;AACnB,UAAM,mBAAmB,IAAI,qBAAqB,IAAI,aAAa;AAEnE,WAAO;AAAA,MACL,SAAS,mBAAmB,IAAI,OAAO;AAAA,MACvC,WAAW,mBAAmB,mBAAmB,gBAAgB,IAAI;AAAA,MACrE,OAAO,OAAO;AAAA,MACd,OAAO;AAAA,QACL,cAAc,OAAO,OAAO,gBAAgB;AAAA,QAC5C,kBAAkB,OAAO,OAAO,oBAAoB;AAAA,QACpD,aAAa,OAAO,OAAO,eAAe;AAAA,MAC5C;AAAA,MACA,cAAc,OAAO;AAAA,IACvB;AAAA,EACF;AAAA;AAAA;AAAA;AAAA,EAKA,OAAO,WAAW,SAAkE;AAClF,UAAM,EAAE,OAAO,UAAU,cAAc,KAAK,WAAW,UAAU,IAAI;AAErE,UAAM,iBAAiB,oBAAoB,SAAS;AAGpD,UAAM,gBAAqB;AAAA,MACzB;AAAA,MACA;AAAA,MACA;AAAA,MACA;AAAA,MACA,QAAQ;AAAA,IACV;AAEA,QAAI,gBAAgB;AAClB,oBAAc,YAAY;AAAA,IAC5B;AAEA,UAAM,SAAU,MAAM,KAAK,OAAO,KAAK,KAAK,aAAa;AAIzD,qBAAiB,SAAS,QAAQ;AAEhC,YAAM,QAAQ,MAAM,UAAU,CAAC,GAAG;AAClC,UAAI,CAAC,MAAO;AAEZ,YAAM,mBAAmB,MAAM,qBAAqB,MAAM;AAC1D,UAAI,kBAAkB;AACpB,cAAM,EAAE,MAAM,aAAa,MAAM,mBAAmB,gBAAgB,EAAE;AAAA,MACxE;AAEA,UAAI,MAAM,SAAS;AACjB,cAAM,EAAE,MAAM,WAAW,MAAM,mBAAmB,MAAM,OAAO,EAAE;AAAA,MACnE;AAAA,IACF;AAAA,EACF;AAAA;AAAA;AAAA;AAAA,EAKA,MAAM,aAA6C;AACjD,UAAM,SAAS,MAAM,KAAK,OAAO,OAAO,KAAK;AAG7C,YAAQ,OAAO,QAAQ,CAAC,GAAG,IAAI,CAAC,OAAY;AAAA,MAC1C,IAAI,EAAE;AAAA,MACN,eAAe,EAAE,kBAAkB,EAAE;AAAA,MACrC,MAAM,EAAE;AAAA,MACR,aAAa,EAAE,eAAe;AAAA,MAC9B,SAAS,EAAE,WAAW;AAAA,MACtB,SAAS;AAAA,QACP,QAAQ,EAAE,SAAS,UAAU;AAAA,QAC7B,YAAY,EAAE,SAAS,cAAc;AAAA,QACrC,SAAS,EAAE,SAAS,WAAW;AAAA,QAC/B,OAAO,EAAE,SAAS,SAAS;AAAA,MAC7B;AAAA,MACA,eAAe,EAAE,kBAAkB;AAAA,MACnC,cAAc;AAAA,QACZ,UAAU,EAAE,cAAc,YAAY;AAAA,QACtC,iBAAiB,EAAE,cAAc,oBAAoB,CAAC;AAAA,QACtD,kBAAkB,EAAE,cAAc,qBAAqB,CAAC;AAAA,QACxD,WAAW,EAAE,cAAc,aAAa;AAAA,QACxC,cAAc,EAAE,cAAc,iBAAiB;AAAA,MACjD;AAAA,MACA,qBAAqB,EAAE,wBAAwB,CAAC;AAAA,IAClD,EAAE;AAAA,EACJ;AACF;","names":[]}
package/dist/index.mjs ADDED
@@ -0,0 +1,164 @@
1
+ // src/base.ts
2
+ var BaseAIProvider = class {
3
+ /**
4
+ * 简单对话:单轮问答(默认实现)
5
+ */
6
+ async ask(model, question, options) {
7
+ const result = await this.chat({
8
+ model,
9
+ messages: [{ role: "user", content: question }],
10
+ ...options
11
+ });
12
+ return result.content;
13
+ }
14
+ /**
15
+ * 带系统提示的对话(默认实现)
16
+ */
17
+ async askWithSystem(model, systemPrompt, userMessage, options) {
18
+ const result = await this.chat({
19
+ model,
20
+ messages: [
21
+ { role: "system", content: systemPrompt },
22
+ { role: "user", content: userMessage }
23
+ ],
24
+ ...options
25
+ });
26
+ return result.content;
27
+ }
28
+ };
29
+
30
+ // src/providers/openrouter.ts
31
+ import { OpenRouter } from "@openrouter/sdk";
32
+ function extractTextContent(content) {
33
+ if (typeof content === "string") {
34
+ return content;
35
+ }
36
+ if (Array.isArray(content)) {
37
+ return content.filter(
38
+ (item) => typeof item === "object" && item !== null && item.type === "text" && typeof item.text === "string"
39
+ ).map((item) => item.text).join("");
40
+ }
41
+ return "";
42
+ }
43
+ function buildReasoningParam(config) {
44
+ if (!config) return void 0;
45
+ const param = {};
46
+ if (config.effort !== void 0) {
47
+ param.effort = config.effort;
48
+ }
49
+ if (config.maxTokens !== void 0) {
50
+ param.max_tokens = config.maxTokens;
51
+ }
52
+ if (config.exclude !== void 0) {
53
+ param.exclude = config.exclude;
54
+ }
55
+ if (config.enabled !== void 0) {
56
+ param.enabled = config.enabled;
57
+ }
58
+ return Object.keys(param).length > 0 ? param : void 0;
59
+ }
60
+ var OpenRouterProvider = class extends BaseAIProvider {
61
+ name = "openrouter";
62
+ client;
63
+ constructor(apiKey) {
64
+ super();
65
+ this.client = new OpenRouter({ apiKey });
66
+ }
67
+ /**
68
+ * 发送聊天请求(非流式)
69
+ */
70
+ async chat(options) {
71
+ const { model, messages, temperature = 0.7, maxTokens, reasoning } = options;
72
+ const reasoningParam = buildReasoningParam(reasoning);
73
+ const requestParams = {
74
+ model,
75
+ messages,
76
+ temperature,
77
+ maxTokens,
78
+ stream: false
79
+ };
80
+ if (reasoningParam) {
81
+ requestParams.reasoning = reasoningParam;
82
+ }
83
+ const result = await this.client.chat.send(requestParams);
84
+ const choice = result.choices[0];
85
+ if (!choice) {
86
+ throw new Error("No response from model");
87
+ }
88
+ const msg = choice.message;
89
+ const reasoningContent = msg.reasoning_content ?? msg.reasoning ?? null;
90
+ return {
91
+ content: extractTextContent(msg.content),
92
+ reasoning: reasoningContent ? extractTextContent(reasoningContent) : null,
93
+ model: result.model,
94
+ usage: {
95
+ promptTokens: result.usage?.promptTokens ?? 0,
96
+ completionTokens: result.usage?.completionTokens ?? 0,
97
+ totalTokens: result.usage?.totalTokens ?? 0
98
+ },
99
+ finishReason: choice.finishReason
100
+ };
101
+ }
102
+ /**
103
+ * 发送流式聊天请求
104
+ */
105
+ async *chatStream(options) {
106
+ const { model, messages, temperature = 0.7, maxTokens, reasoning } = options;
107
+ const reasoningParam = buildReasoningParam(reasoning);
108
+ const requestParams = {
109
+ model,
110
+ messages,
111
+ temperature,
112
+ maxTokens,
113
+ stream: true
114
+ };
115
+ if (reasoningParam) {
116
+ requestParams.reasoning = reasoningParam;
117
+ }
118
+ const stream = await this.client.chat.send(requestParams);
119
+ for await (const chunk of stream) {
120
+ const delta = chunk.choices?.[0]?.delta;
121
+ if (!delta) continue;
122
+ const reasoningContent = delta.reasoning_content ?? delta.reasoning;
123
+ if (reasoningContent) {
124
+ yield { type: "reasoning", text: extractTextContent(reasoningContent) };
125
+ }
126
+ if (delta.content) {
127
+ yield { type: "content", text: extractTextContent(delta.content) };
128
+ }
129
+ }
130
+ }
131
+ /**
132
+ * 获取可用模型列表
133
+ */
134
+ async listModels() {
135
+ const result = await this.client.models.list();
136
+ return (result.data ?? []).map((m) => ({
137
+ id: m.id,
138
+ canonicalSlug: m.canonical_slug ?? m.id,
139
+ name: m.name,
140
+ description: m.description ?? "",
141
+ created: m.created ?? 0,
142
+ pricing: {
143
+ prompt: m.pricing?.prompt ?? "0",
144
+ completion: m.pricing?.completion ?? "0",
145
+ request: m.pricing?.request ?? "0",
146
+ image: m.pricing?.image ?? "0"
147
+ },
148
+ contextLength: m.context_length ?? 0,
149
+ architecture: {
150
+ modality: m.architecture?.modality ?? "",
151
+ inputModalities: m.architecture?.input_modalities ?? [],
152
+ outputModalities: m.architecture?.output_modalities ?? [],
153
+ tokenizer: m.architecture?.tokenizer ?? "",
154
+ instructType: m.architecture?.instruct_type ?? ""
155
+ },
156
+ supportedParameters: m.supported_parameters ?? []
157
+ }));
158
+ }
159
+ };
160
+ export {
161
+ BaseAIProvider,
162
+ OpenRouterProvider
163
+ };
164
+ //# sourceMappingURL=index.mjs.map
@@ -0,0 +1 @@
1
+ {"version":3,"sources":["../src/base.ts","../src/providers/openrouter.ts"],"sourcesContent":["/**\r\n * AI Provider 统一接口\r\n */\r\n\r\nimport type { ChatOptions, ChatResult, StreamChunk, ModelInfo } from './types';\r\n\r\n/**\r\n * AI Provider 基础接口\r\n * 所有 provider 实现都需要实现这个接口\r\n */\r\nexport interface AIProvider {\r\n /** Provider 名称 */\r\n readonly name: string;\r\n\r\n /**\r\n * 发送聊天请求(非流式)\r\n */\r\n chat(options: ChatOptions): Promise<ChatResult>;\r\n\r\n /**\r\n * 发送流式聊天请求\r\n */\r\n chatStream(options: ChatOptions): AsyncGenerator<StreamChunk, void, unknown>;\r\n\r\n /**\r\n * 简单对话:单轮问答\r\n */\r\n ask(model: string, question: string, options?: Omit<ChatOptions, 'model' | 'messages'>): Promise<string>;\r\n\r\n /**\r\n * 带系统提示的对话\r\n */\r\n askWithSystem(\r\n model: string,\r\n systemPrompt: string,\r\n userMessage: string,\r\n options?: Omit<ChatOptions, 'model' | 'messages'>\r\n ): Promise<string>;\r\n\r\n /**\r\n * 获取可用模型列表(可选实现)\r\n */\r\n listModels?(): Promise<ModelInfo[]>;\r\n}\r\n\r\n/**\r\n * AI Provider 基础抽象类\r\n * 提供一些通用实现,子类只需实现核心方法\r\n */\r\nexport abstract class BaseAIProvider implements AIProvider {\r\n abstract readonly name: string;\r\n\r\n abstract chat(options: ChatOptions): Promise<ChatResult>;\r\n\r\n abstract chatStream(options: ChatOptions): AsyncGenerator<StreamChunk, void, unknown>;\r\n\r\n /**\r\n * 简单对话:单轮问答(默认实现)\r\n */\r\n async ask(model: string, question: string, options?: Omit<ChatOptions, 'model' | 'messages'>): Promise<string> {\r\n const result = await this.chat({\r\n model,\r\n messages: [{ role: 'user', content: question }],\r\n ...options,\r\n });\r\n return result.content;\r\n }\r\n\r\n /**\r\n * 带系统提示的对话(默认实现)\r\n */\r\n async askWithSystem(\r\n model: string,\r\n systemPrompt: string,\r\n userMessage: string,\r\n options?: Omit<ChatOptions, 'model' | 'messages'>\r\n ): Promise<string> {\r\n const result = await this.chat({\r\n model,\r\n messages: [\r\n { role: 'system', content: systemPrompt },\r\n { role: 'user', content: userMessage },\r\n ],\r\n ...options,\r\n });\r\n return result.content;\r\n }\r\n}\r\n","/**\r\n * OpenRouter Provider 实现\r\n */\r\n\r\nimport { OpenRouter } from '@openrouter/sdk';\r\nimport { BaseAIProvider } from '../base';\r\nimport type { ChatOptions, ChatResult, StreamChunk, ModelInfo, ReasoningConfig } from '../types';\r\n\r\n/**\r\n * OpenRouter 扩展的模型信息\r\n */\r\nexport interface OpenRouterModelInfo extends ModelInfo {\r\n /** 规范化的 slug */\r\n canonicalSlug: string;\r\n /** 创建时间戳 */\r\n created: number;\r\n /** 架构信息 */\r\n architecture: {\r\n modality: string;\r\n inputModalities: string[];\r\n outputModalities: string[];\r\n tokenizer: string;\r\n instructType: string;\r\n };\r\n}\r\n\r\n/**\r\n * 从 SDK 返回的 content 中提取文本\r\n */\r\nfunction extractTextContent(content: unknown): string {\r\n if (typeof content === 'string') {\r\n return content;\r\n }\r\n if (Array.isArray(content)) {\r\n return content\r\n .filter(\r\n (item): item is { type: 'text'; text: string } =>\r\n typeof item === 'object' && item !== null && item.type === 'text' && typeof item.text === 'string'\r\n )\r\n .map((item) => item.text)\r\n .join('');\r\n }\r\n return '';\r\n}\r\n\r\n/**\r\n * 构建 reasoning 参数(转换为 API 需要的 snake_case 格式)\r\n */\r\nfunction buildReasoningParam(config?: ReasoningConfig): Record<string, unknown> | undefined {\r\n if (!config) return undefined;\r\n\r\n const param: Record<string, unknown> = {};\r\n\r\n if (config.effort !== undefined) {\r\n param.effort = config.effort;\r\n }\r\n if (config.maxTokens !== undefined) {\r\n param.max_tokens = config.maxTokens;\r\n }\r\n if (config.exclude !== undefined) {\r\n param.exclude = config.exclude;\r\n }\r\n if (config.enabled !== undefined) {\r\n param.enabled = config.enabled;\r\n }\r\n\r\n return Object.keys(param).length > 0 ? param : undefined;\r\n}\r\n\r\n\r\n/**\r\n * OpenRouter Provider\r\n */\r\nexport class OpenRouterProvider extends BaseAIProvider {\r\n readonly name = 'openrouter';\r\n private client: OpenRouter;\r\n\r\n constructor(apiKey: string) {\r\n super();\r\n this.client = new OpenRouter({ apiKey });\r\n }\r\n\r\n /**\r\n * 发送聊天请求(非流式)\r\n */\r\n async chat(options: ChatOptions): Promise<ChatResult> {\r\n const { model, messages, temperature = 0.7, maxTokens, reasoning } = options;\r\n\r\n const reasoningParam = buildReasoningParam(reasoning);\r\n\r\n // eslint-disable-next-line @typescript-eslint/no-explicit-any\r\n const requestParams: any = {\r\n model,\r\n messages,\r\n temperature,\r\n maxTokens,\r\n stream: false,\r\n };\r\n\r\n if (reasoningParam) {\r\n requestParams.reasoning = reasoningParam;\r\n }\r\n\r\n const result = await this.client.chat.send(requestParams);\r\n\r\n const choice = result.choices[0];\r\n if (!choice) {\r\n throw new Error('No response from model');\r\n }\r\n\r\n // eslint-disable-next-line @typescript-eslint/no-explicit-any\r\n const msg = choice.message as any;\r\n const reasoningContent = msg.reasoning_content ?? msg.reasoning ?? null;\r\n\r\n return {\r\n content: extractTextContent(msg.content),\r\n reasoning: reasoningContent ? extractTextContent(reasoningContent) : null,\r\n model: result.model,\r\n usage: {\r\n promptTokens: result.usage?.promptTokens ?? 0,\r\n completionTokens: result.usage?.completionTokens ?? 0,\r\n totalTokens: result.usage?.totalTokens ?? 0,\r\n },\r\n finishReason: choice.finishReason,\r\n };\r\n }\r\n\r\n /**\r\n * 发送流式聊天请求\r\n */\r\n async *chatStream(options: ChatOptions): AsyncGenerator<StreamChunk, void, unknown> {\r\n const { model, messages, temperature = 0.7, maxTokens, reasoning } = options;\r\n\r\n const reasoningParam = buildReasoningParam(reasoning);\r\n\r\n // eslint-disable-next-line @typescript-eslint/no-explicit-any\r\n const requestParams: any = {\r\n model,\r\n messages,\r\n temperature,\r\n maxTokens,\r\n stream: true,\r\n };\r\n\r\n if (reasoningParam) {\r\n requestParams.reasoning = reasoningParam;\r\n }\r\n\r\n const stream = (await this.client.chat.send(requestParams)) as unknown as AsyncIterable<{\r\n choices?: Array<{ delta?: unknown }>;\r\n }>;\r\n\r\n for await (const chunk of stream) {\r\n // eslint-disable-next-line @typescript-eslint/no-explicit-any\r\n const delta = chunk.choices?.[0]?.delta as any;\r\n if (!delta) continue;\r\n\r\n const reasoningContent = delta.reasoning_content ?? delta.reasoning;\r\n if (reasoningContent) {\r\n yield { type: 'reasoning', text: extractTextContent(reasoningContent) };\r\n }\r\n\r\n if (delta.content) {\r\n yield { type: 'content', text: extractTextContent(delta.content) };\r\n }\r\n }\r\n }\r\n\r\n /**\r\n * 获取可用模型列表\r\n */\r\n async listModels(): Promise<OpenRouterModelInfo[]> {\r\n const result = await this.client.models.list();\r\n\r\n // eslint-disable-next-line @typescript-eslint/no-explicit-any\r\n return (result.data ?? []).map((m: any) => ({\r\n id: m.id,\r\n canonicalSlug: m.canonical_slug ?? m.id,\r\n name: m.name,\r\n description: m.description ?? '',\r\n created: m.created ?? 0,\r\n pricing: {\r\n prompt: m.pricing?.prompt ?? '0',\r\n completion: m.pricing?.completion ?? '0',\r\n request: m.pricing?.request ?? '0',\r\n image: m.pricing?.image ?? '0',\r\n },\r\n contextLength: m.context_length ?? 0,\r\n architecture: {\r\n modality: m.architecture?.modality ?? '',\r\n inputModalities: m.architecture?.input_modalities ?? [],\r\n outputModalities: m.architecture?.output_modalities ?? [],\r\n tokenizer: m.architecture?.tokenizer ?? '',\r\n instructType: m.architecture?.instruct_type ?? '',\r\n },\r\n supportedParameters: m.supported_parameters ?? [],\r\n }));\r\n }\r\n}\r\n"],"mappings":";AAiDO,IAAe,iBAAf,MAAoD;AAAA;AAAA;AAAA;AAAA,EAUzD,MAAM,IAAI,OAAe,UAAkB,SAAoE;AAC7G,UAAM,SAAS,MAAM,KAAK,KAAK;AAAA,MAC7B;AAAA,MACA,UAAU,CAAC,EAAE,MAAM,QAAQ,SAAS,SAAS,CAAC;AAAA,MAC9C,GAAG;AAAA,IACL,CAAC;AACD,WAAO,OAAO;AAAA,EAChB;AAAA;AAAA;AAAA;AAAA,EAKA,MAAM,cACJ,OACA,cACA,aACA,SACiB;AACjB,UAAM,SAAS,MAAM,KAAK,KAAK;AAAA,MAC7B;AAAA,MACA,UAAU;AAAA,QACR,EAAE,MAAM,UAAU,SAAS,aAAa;AAAA,QACxC,EAAE,MAAM,QAAQ,SAAS,YAAY;AAAA,MACvC;AAAA,MACA,GAAG;AAAA,IACL,CAAC;AACD,WAAO,OAAO;AAAA,EAChB;AACF;;;ACnFA,SAAS,kBAAkB;AAyB3B,SAAS,mBAAmB,SAA0B;AACpD,MAAI,OAAO,YAAY,UAAU;AAC/B,WAAO;AAAA,EACT;AACA,MAAI,MAAM,QAAQ,OAAO,GAAG;AAC1B,WAAO,QACJ;AAAA,MACC,CAAC,SACC,OAAO,SAAS,YAAY,SAAS,QAAQ,KAAK,SAAS,UAAU,OAAO,KAAK,SAAS;AAAA,IAC9F,EACC,IAAI,CAAC,SAAS,KAAK,IAAI,EACvB,KAAK,EAAE;AAAA,EACZ;AACA,SAAO;AACT;AAKA,SAAS,oBAAoB,QAA+D;AAC1F,MAAI,CAAC,OAAQ,QAAO;AAEpB,QAAM,QAAiC,CAAC;AAExC,MAAI,OAAO,WAAW,QAAW;AAC/B,UAAM,SAAS,OAAO;AAAA,EACxB;AACA,MAAI,OAAO,cAAc,QAAW;AAClC,UAAM,aAAa,OAAO;AAAA,EAC5B;AACA,MAAI,OAAO,YAAY,QAAW;AAChC,UAAM,UAAU,OAAO;AAAA,EACzB;AACA,MAAI,OAAO,YAAY,QAAW;AAChC,UAAM,UAAU,OAAO;AAAA,EACzB;AAEA,SAAO,OAAO,KAAK,KAAK,EAAE,SAAS,IAAI,QAAQ;AACjD;AAMO,IAAM,qBAAN,cAAiC,eAAe;AAAA,EAC5C,OAAO;AAAA,EACR;AAAA,EAER,YAAY,QAAgB;AAC1B,UAAM;AACN,SAAK,SAAS,IAAI,WAAW,EAAE,OAAO,CAAC;AAAA,EACzC;AAAA;AAAA;AAAA;AAAA,EAKA,MAAM,KAAK,SAA2C;AACpD,UAAM,EAAE,OAAO,UAAU,cAAc,KAAK,WAAW,UAAU,IAAI;AAErE,UAAM,iBAAiB,oBAAoB,SAAS;AAGpD,UAAM,gBAAqB;AAAA,MACzB;AAAA,MACA;AAAA,MACA;AAAA,MACA;AAAA,MACA,QAAQ;AAAA,IACV;AAEA,QAAI,gBAAgB;AAClB,oBAAc,YAAY;AAAA,IAC5B;AAEA,UAAM,SAAS,MAAM,KAAK,OAAO,KAAK,KAAK,aAAa;AAExD,UAAM,SAAS,OAAO,QAAQ,CAAC;AAC/B,QAAI,CAAC,QAAQ;AACX,YAAM,IAAI,MAAM,wBAAwB;AAAA,IAC1C;AAGA,UAAM,MAAM,OAAO;AACnB,UAAM,mBAAmB,IAAI,qBAAqB,IAAI,aAAa;AAEnE,WAAO;AAAA,MACL,SAAS,mBAAmB,IAAI,OAAO;AAAA,MACvC,WAAW,mBAAmB,mBAAmB,gBAAgB,IAAI;AAAA,MACrE,OAAO,OAAO;AAAA,MACd,OAAO;AAAA,QACL,cAAc,OAAO,OAAO,gBAAgB;AAAA,QAC5C,kBAAkB,OAAO,OAAO,oBAAoB;AAAA,QACpD,aAAa,OAAO,OAAO,eAAe;AAAA,MAC5C;AAAA,MACA,cAAc,OAAO;AAAA,IACvB;AAAA,EACF;AAAA;AAAA;AAAA;AAAA,EAKA,OAAO,WAAW,SAAkE;AAClF,UAAM,EAAE,OAAO,UAAU,cAAc,KAAK,WAAW,UAAU,IAAI;AAErE,UAAM,iBAAiB,oBAAoB,SAAS;AAGpD,UAAM,gBAAqB;AAAA,MACzB;AAAA,MACA;AAAA,MACA;AAAA,MACA;AAAA,MACA,QAAQ;AAAA,IACV;AAEA,QAAI,gBAAgB;AAClB,oBAAc,YAAY;AAAA,IAC5B;AAEA,UAAM,SAAU,MAAM,KAAK,OAAO,KAAK,KAAK,aAAa;AAIzD,qBAAiB,SAAS,QAAQ;AAEhC,YAAM,QAAQ,MAAM,UAAU,CAAC,GAAG;AAClC,UAAI,CAAC,MAAO;AAEZ,YAAM,mBAAmB,MAAM,qBAAqB,MAAM;AAC1D,UAAI,kBAAkB;AACpB,cAAM,EAAE,MAAM,aAAa,MAAM,mBAAmB,gBAAgB,EAAE;AAAA,MACxE;AAEA,UAAI,MAAM,SAAS;AACjB,cAAM,EAAE,MAAM,WAAW,MAAM,mBAAmB,MAAM,OAAO,EAAE;AAAA,MACnE;AAAA,IACF;AAAA,EACF;AAAA;AAAA;AAAA;AAAA,EAKA,MAAM,aAA6C;AACjD,UAAM,SAAS,MAAM,KAAK,OAAO,OAAO,KAAK;AAG7C,YAAQ,OAAO,QAAQ,CAAC,GAAG,IAAI,CAAC,OAAY;AAAA,MAC1C,IAAI,EAAE;AAAA,MACN,eAAe,EAAE,kBAAkB,EAAE;AAAA,MACrC,MAAM,EAAE;AAAA,MACR,aAAa,EAAE,eAAe;AAAA,MAC9B,SAAS,EAAE,WAAW;AAAA,MACtB,SAAS;AAAA,QACP,QAAQ,EAAE,SAAS,UAAU;AAAA,QAC7B,YAAY,EAAE,SAAS,cAAc;AAAA,QACrC,SAAS,EAAE,SAAS,WAAW;AAAA,QAC/B,OAAO,EAAE,SAAS,SAAS;AAAA,MAC7B;AAAA,MACA,eAAe,EAAE,kBAAkB;AAAA,MACnC,cAAc;AAAA,QACZ,UAAU,EAAE,cAAc,YAAY;AAAA,QACtC,iBAAiB,EAAE,cAAc,oBAAoB,CAAC;AAAA,QACtD,kBAAkB,EAAE,cAAc,qBAAqB,CAAC;AAAA,QACxD,WAAW,EAAE,cAAc,aAAa;AAAA,QACxC,cAAc,EAAE,cAAc,iBAAiB;AAAA,MACjD;AAAA,MACA,qBAAqB,EAAE,wBAAwB,CAAC;AAAA,IAClD,EAAE;AAAA,EACJ;AACF;","names":[]}
package/package.json ADDED
@@ -0,0 +1,49 @@
1
+ {
2
+ "name": "@weisiren000/oiiai",
3
+ "version": "0.1.0",
4
+ "description": "统一的 AI Provider 接口封装,支持 OpenRouter、OpenAI、Anthropic 等",
5
+ "main": "dist/index.js",
6
+ "module": "dist/index.mjs",
7
+ "types": "dist/index.d.ts",
8
+ "exports": {
9
+ ".": {
10
+ "import": "./dist/index.mjs",
11
+ "require": "./dist/index.js",
12
+ "types": "./dist/index.d.ts"
13
+ }
14
+ },
15
+ "files": [
16
+ "dist"
17
+ ],
18
+ "scripts": {
19
+ "build": "tsup",
20
+ "dev": "tsup --watch",
21
+ "prepublishOnly": "npm run build",
22
+ "test": "tsx src/__tests__/openrouter.test.ts"
23
+ },
24
+ "keywords": [
25
+ "ai",
26
+ "llm",
27
+ "openrouter",
28
+ "openai",
29
+ "anthropic",
30
+ "provider"
31
+ ],
32
+ "author": "",
33
+ "license": "MIT",
34
+ "dependencies": {
35
+ "@openrouter/sdk": "^0.2.11"
36
+ },
37
+ "devDependencies": {
38
+ "tsup": "^8.0.0",
39
+ "typescript": "^5.0.0",
40
+ "tsx": "^4.0.0",
41
+ "dotenv": "^16.0.0"
42
+ },
43
+ "peerDependencies": {
44
+ "typescript": ">=5.0.0"
45
+ },
46
+ "engines": {
47
+ "node": ">=18.0.0"
48
+ }
49
+ }