@weisiren000/oiiai 0.1.2 → 0.1.4

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
package/README.md CHANGED
@@ -1,201 +1,165 @@
1
- # @weisiren000/oiiai
2
-
3
- 统一的 AI Provider 接口封装,支持 OpenRouter、OpenAI、Anthropic 等多种 AI 服务。
4
-
5
- ## 安装
6
-
7
- ```bash
8
- npm install @weisiren000/oiiai
9
- ```
10
-
11
- ## 快速开始
12
-
13
- ```typescript
14
- import { OpenRouterProvider } from '@weisiren000/oiiai';
15
-
16
- const ai = new OpenRouterProvider('your-api-key');
17
-
18
- // 简单问答
19
- const answer = await ai.ask('deepseek/deepseek-chat', '你好');
20
-
21
- // 带系统提示
22
- const response = await ai.askWithSystem(
23
- 'deepseek/deepseek-chat',
24
- '你是一个有帮助的助手',
25
- '介绍一下自己'
26
- );
27
-
28
- // 完整对话
29
- const result = await ai.chat({
30
- model: 'deepseek/deepseek-chat',
31
- messages: [
32
- { role: 'system', content: '你是一个简洁的助手' },
33
- { role: 'user', content: '1+1=?' }
34
- ],
35
- temperature: 0.7,
36
- maxTokens: 100
37
- });
38
-
39
- console.log(result.content);
40
- console.log(result.usage);
41
- ```
42
-
43
- ## 流式输出
44
-
45
- ```typescript
46
- for await (const chunk of ai.chatStream({
47
- model: 'deepseek/deepseek-chat',
48
- messages: [{ role: 'user', content: '写一首诗' }]
49
- })) {
50
- if (chunk.type === 'content') {
51
- process.stdout.write(chunk.text);
52
- } else if (chunk.type === 'reasoning') {
53
- // 思考过程
54
- process.stdout.write(`[思考] ${chunk.text}`);
55
- }
56
- }
57
- ```
58
-
59
- ## 思考模型 (Reasoning)
60
-
61
- 支持 OpenAI o1/o3、DeepSeek R1、Claude 等思考模型:
62
-
63
- ```typescript
64
- const result = await ai.chat({
65
- model: 'deepseek/deepseek-r1',
66
- messages: [{ role: 'user', content: '9.11 和 9.9 哪个大?' }],
67
- reasoning: {
68
- effort: 'high', // OpenAI o1/o3, Grok
69
- // maxTokens: 2000, // Anthropic, Gemini, Qwen
70
- // exclude: true, // 不返回思考内容
71
- // enabled: true, // 显式启用
72
- }
73
- });
74
-
75
- console.log('思考过程:', result.reasoning);
76
- console.log('最终答案:', result.content);
77
- ```
78
-
79
- ## API
80
-
81
- ### AIProvider 接口
82
-
83
- 所有 Provider 实现统一接口:
84
-
85
- ```typescript
86
- interface AIProvider {
87
- readonly name: string;
88
- chat(options: ChatOptions): Promise<ChatResult>;
89
- chatStream(options: ChatOptions): AsyncGenerator<StreamChunk>;
90
- ask(model: string, question: string, options?): Promise<string>;
91
- askWithSystem(model: string, systemPrompt: string, userMessage: string, options?): Promise<string>;
92
- listModels?(): Promise<ModelInfo[]>;
93
- }
94
- ```
95
-
96
- ### 类型定义
97
-
98
- ```typescript
99
- interface ChatMessage {
100
- role: 'system' | 'user' | 'assistant';
101
- content: string;
102
- }
103
-
104
- interface ChatOptions {
105
- model: string;
106
- messages: ChatMessage[];
107
- temperature?: number;
108
- maxTokens?: number;
109
- reasoning?: ReasoningConfig;
110
- }
111
-
112
- interface ChatResult {
113
- content: string;
114
- reasoning: string | null;
115
- model: string;
116
- usage: TokenUsage;
117
- finishReason: string | null;
118
- }
119
-
120
- interface StreamChunk {
121
- type: 'reasoning' | 'content';
122
- text: string;
123
- }
124
-
125
- interface ReasoningConfig {
126
- effort?: 'high' | 'medium' | 'low';
127
- maxTokens?: number;
128
- exclude?: boolean;
129
- enabled?: boolean;
130
- }
131
-
132
- interface TokenUsage {
133
- promptTokens: number;
134
- completionTokens: number;
135
- totalTokens: number;
136
- }
137
- ```
138
-
139
- ## ModelScope Provider
140
-
141
- ```typescript
142
- import { ModelScopeProvider } from '@weisiren000/oiiai';
143
-
144
- const ai = new ModelScopeProvider('your-modelscope-token');
145
-
146
- // 流式输出(自动启用思考模式)
147
- for await (const chunk of ai.chatStream({
148
- model: 'deepseek-ai/DeepSeek-V3.2',
149
- messages: [{ role: 'user', content: '9.9和9.11谁大' }]
150
- })) {
151
- if (chunk.type === 'reasoning') {
152
- process.stdout.write(chunk.text);
153
- } else {
154
- process.stdout.write(chunk.text);
155
- }
156
- }
157
- ```
158
-
159
- ## 自定义 Provider
160
-
161
- 继承 `BaseProvider` 实现自定义 Provider:
162
-
163
- ```typescript
164
- import { BaseProvider } from '@weisiren000/oiiai';
165
- import type { ChatOptions, ChatResult, StreamChunk } from '@weisiren000/oiiai';
166
-
167
- class MyProvider extends BaseProvider {
168
- readonly name = 'my-provider';
169
-
170
- constructor(apiKey: string) {
171
- super();
172
- // 初始化 SDK 客户端
173
- }
174
-
175
- async chat(options: ChatOptions): Promise<ChatResult> {
176
- // 实现非流式请求
177
- }
178
-
179
- async *chatStream(options: ChatOptions): AsyncGenerator<StreamChunk> {
180
- // 实现流式请求
181
- }
182
- }
183
- ```
184
-
185
- `BaseProvider` 已实现 `ask` 和 `askWithSystem` 的默认逻辑,子类只需实现 `chat` 和 `chatStream`。
186
-
187
- ## 获取模型列表
188
-
189
- ```typescript
190
- const models = await ai.listModels();
191
-
192
- for (const model of models) {
193
- console.log(`${model.id}: ${model.name}`);
194
- console.log(` 上下文长度: ${model.contextLength}`);
195
- console.log(` 价格: $${model.pricing.prompt}/token (输入)`);
196
- }
197
- ```
198
-
199
- ## License
200
-
201
- MIT
1
+ <div align="center">
2
+
3
+ # 🤖 @weisiren000/oiiai
4
+
5
+ **统一的 AI Provider 接口封装库**
6
+
7
+ [![TypeScript](https://img.shields.io/badge/TypeScript-5.0+-3178C6?style=flat-square&logo=typescript&logoColor=white)](https://www.typescriptlang.org/)
8
+ [![Node.js](https://img.shields.io/badge/Node.js-18+-339933?style=flat-square&logo=node.js&logoColor=white)](https://nodejs.org/)
9
+ [![License](https://img.shields.io/badge/License-MIT-yellow?style=flat-square)](LICENSE)
10
+ [![npm](https://img.shields.io/npm/v/@weisiren000/oiiai?style=flat-square&color=CB3837&logo=npm)](https://www.npmjs.com/package/@weisiren000/oiiai)
11
+
12
+ 支持 **OpenRouter** · **Gemini** · **Groq** · **HuggingFace** · **ModelScope**
13
+
14
+ [📖 详细文档](./docs/providers.md) · [🚀 快速开始](#快速开始) · [💡 示例](#使用示例)
15
+
16
+ </div>
17
+
18
+ ---
19
+
20
+ ## ✨ 特性
21
+
22
+ - 🔌 **统一接口** - 所有 Provider 使用相同 API,学会一个就会全部
23
+ - 🧠 **Reasoning 支持** - 统一的思考模式配置,自动转换各 Provider 格式
24
+ - 🌊 **流式输出** - 支持实时流式响应,区分思考/回答内容
25
+ - 📦 **TypeScript** - 完整类型定义,开发体验友好
26
+ - 🔧 **可扩展** - 轻松实现自定义 Provider
27
+
28
+ ## 📦 安装
29
+
30
+ ```bash
31
+ npm install @weisiren000/oiiai
32
+ ```
33
+
34
+ ## 🚀 快速开始
35
+
36
+ ```typescript
37
+ import { ai } from '@weisiren000/oiiai';
38
+
39
+ // 创建 Provider(所有 Provider 使用方式完全一致)
40
+ const openrouter = ai.openrouter('your-api-key');
41
+ const gemini = ai.gemini('your-api-key');
42
+ const groq = ai.groq('your-api-key');
43
+
44
+ // 简单问答
45
+ const answer = await openrouter.ask('openai/gpt-4o', '你好');
46
+ ```
47
+
48
+ ## 💡 使用示例
49
+
50
+ ### 简单问答
51
+
52
+ ```typescript
53
+ const answer = await provider.ask('model-id', '什么是 TypeScript?');
54
+ ```
55
+
56
+ ### 带系统提示
57
+
58
+ ```typescript
59
+ const answer = await provider.askWithSystem(
60
+ 'model-id',
61
+ '你是一个专业的代码助手',
62
+ '如何定义 TypeScript 接口?'
63
+ );
64
+ ```
65
+
66
+ ### 完整对话
67
+
68
+ ```typescript
69
+ const result = await provider.chat({
70
+ model: 'model-id',
71
+ messages: [
72
+ { role: 'system', content: '你是一个友好的助手' },
73
+ { role: 'user', content: '你好!' },
74
+ ],
75
+ temperature: 0.7,
76
+ maxTokens: 1000,
77
+ });
78
+
79
+ console.log(result.content); // 回答内容
80
+ console.log(result.usage); // Token 使用情况
81
+ ```
82
+
83
+ ### 流式输出
84
+
85
+ ```typescript
86
+ for await (const chunk of provider.chatStream({
87
+ model: 'model-id',
88
+ messages: [{ role: 'user', content: '写一首诗' }],
89
+ })) {
90
+ if (chunk.type === 'reasoning') {
91
+ process.stdout.write(`[思考] ${chunk.text}`);
92
+ } else {
93
+ process.stdout.write(chunk.text);
94
+ }
95
+ }
96
+ ```
97
+
98
+ ### 思考模式 (Reasoning)
99
+
100
+ ```typescript
101
+ const result = await provider.chat({
102
+ model: 'deepseek/deepseek-r1',
103
+ messages: [{ role: 'user', content: '9.11 和 9.9 哪个大?' }],
104
+ reasoning: {
105
+ effort: 'high', // 'off' | 'low' | 'medium' | 'high'
106
+ },
107
+ });
108
+
109
+ console.log('思考过程:', result.reasoning);
110
+ console.log('最终答案:', result.content);
111
+ ```
112
+
113
+ ## 🔧 支持的 Provider
114
+
115
+ | Provider | 服务商 | Reasoning 参数 | 参考文档 |
116
+ | ------------- | ------------- | ---------------------------- | ---------------------------------------------------------------------- |
117
+ | `openrouter` | OpenRouter | `reasoning.effort` | [Docs](https://openrouter.ai/docs/requests) |
118
+ | `gemini` | Google Gemini | `reasoning_effort` | [Docs](https://ai.google.dev/gemini-api/docs/text-generation) |
119
+ | `groq` | Groq | `reasoning_format: 'parsed'` | [Docs](https://console.groq.com/docs/reasoning) |
120
+ | `huggingface` | HuggingFace | 不支持 | - |
121
+ | `modelscope` | 魔搭社区 | `enable_thinking` | [Docs](https://www.alibabacloud.com/help/en/model-studio/deepseek-api) |
122
+
123
+ ## 📝 常用模型
124
+
125
+ ```typescript
126
+ // OpenRouter
127
+ 'openai/gpt-4o';
128
+ 'anthropic/claude-sonnet-4';
129
+ 'deepseek/deepseek-r1';
130
+
131
+ // Gemini
132
+ 'gemini-2.5-flash';
133
+ 'gemini-2.5-pro';
134
+
135
+ // Groq
136
+ 'llama-3.3-70b-versatile';
137
+ 'qwen/qwen3-32b';
138
+
139
+ // ModelScope
140
+ 'deepseek-ai/DeepSeek-R1';
141
+ 'Qwen/Qwen2.5-72B-Instruct';
142
+ ```
143
+
144
+ ## 🛠️ 自定义 Provider
145
+
146
+ ```typescript
147
+ import { BaseProvider } from '@weisiren000/oiiai';
148
+ import type { ChatOptions, ChatResult, StreamChunk } from '@weisiren000/oiiai';
149
+
150
+ class MyProvider extends BaseProvider {
151
+ readonly name = 'my-provider';
152
+
153
+ async chat(options: ChatOptions): Promise<ChatResult> {
154
+ // 实现非流式请求
155
+ }
156
+
157
+ async *chatStream(options: ChatOptions): AsyncGenerator<StreamChunk> {
158
+ // 实现流式请求
159
+ }
160
+ }
161
+ ```
162
+
163
+ ## 📄 License
164
+
165
+ MIT