ai-world-sdk 1.1.6 → 1.1.7
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/README.md +117 -6
- package/dist/__tests__/example.test.js +48 -0
- package/dist/base.d.ts +3 -0
- package/dist/base.js +4 -0
- package/dist/chat_models/anthropic.d.ts +33 -0
- package/dist/chat_models/google.d.ts +28 -0
- package/dist/chat_models/openai.d.ts +36 -0
- package/dist/config.d.ts +3 -3
- package/dist/config.js +2 -2
- package/dist/messages.d.ts +4 -1
- package/dist/messages.js +30 -0
- package/package.json +1 -1
package/README.md
CHANGED
|
@@ -339,6 +339,12 @@ const model = createChatModel('gemini-2.5-flash-image', {
|
|
|
339
339
|
**结构化输出参数:**
|
|
340
340
|
- `jsonSchema?: Record<string, any>` - JSON Schema 定义,用于结构化输出(使用 `with_structured_output`)
|
|
341
341
|
|
|
342
|
+
**推理模式参数:**
|
|
343
|
+
- `reasoning?: object` - 推理配置,不同模型配置不同:
|
|
344
|
+
- **OpenAI (o1, gpt-5 系列)**: `{ effort: 'low' | 'medium' | 'high', summary?: 'detailed' | 'auto' }`
|
|
345
|
+
- **Gemini**: `{ thinking_level: 'low' | 'high' }`
|
|
346
|
+
- **Claude**: `{ type: 'enabled', budget_tokens: number }`
|
|
347
|
+
|
|
342
348
|
### 图像生成
|
|
343
349
|
|
|
344
350
|
#### DoubaoImageGenerationClient
|
|
@@ -856,6 +862,107 @@ const response2 = await model.invoke([
|
|
|
856
862
|
]);
|
|
857
863
|
```
|
|
858
864
|
|
|
865
|
+
### 推理模式(Reasoning)
|
|
866
|
+
|
|
867
|
+
推理模式允许模型在生成响应时展示其思考过程。不同的模型提供商有不同的推理配置方式。
|
|
868
|
+
|
|
869
|
+
#### OpenAI 推理模式(o1、gpt-5 系列)
|
|
870
|
+
|
|
871
|
+
```typescript
|
|
872
|
+
import { ChatOpenAI, HumanMessage, AIMessageChunk } from 'ai-world-sdk';
|
|
873
|
+
|
|
874
|
+
const model = new ChatOpenAI({
|
|
875
|
+
modelName: 'gpt-5.2',
|
|
876
|
+
temperature: 0.7,
|
|
877
|
+
provider: 'aihubmix',
|
|
878
|
+
reasoning: {
|
|
879
|
+
effort: 'high', // 'low' | 'medium' | 'high'
|
|
880
|
+
summary: 'detailed', // 'detailed' | 'auto'(可选)
|
|
881
|
+
},
|
|
882
|
+
});
|
|
883
|
+
|
|
884
|
+
// 流式调用获取推理过程
|
|
885
|
+
let fullText = '';
|
|
886
|
+
let reasoningText = '';
|
|
887
|
+
for await (const chunk of model.stream([
|
|
888
|
+
new HumanMessage('解释量子纠缠原理'),
|
|
889
|
+
])) {
|
|
890
|
+
fullText += chunk.content || '';
|
|
891
|
+
reasoningText += chunk.reasoning || ''; // 获取推理内容
|
|
892
|
+
}
|
|
893
|
+
|
|
894
|
+
console.log('推理过程:', reasoningText);
|
|
895
|
+
console.log('最终回复:', fullText);
|
|
896
|
+
```
|
|
897
|
+
|
|
898
|
+
#### Google Gemini 推理模式
|
|
899
|
+
|
|
900
|
+
```typescript
|
|
901
|
+
import { ChatGoogleGenerativeAI, HumanMessage } from 'ai-world-sdk';
|
|
902
|
+
|
|
903
|
+
const model = new ChatGoogleGenerativeAI({
|
|
904
|
+
modelName: 'gemini-3-pro-preview',
|
|
905
|
+
temperature: 0.7,
|
|
906
|
+
provider: 'gemini',
|
|
907
|
+
reasoning: {
|
|
908
|
+
thinking_level: 'high', // 'low' | 'high'
|
|
909
|
+
},
|
|
910
|
+
});
|
|
911
|
+
|
|
912
|
+
// 流式调用获取思考过程
|
|
913
|
+
for await (const chunk of model.stream([
|
|
914
|
+
new HumanMessage('分析这个数学问题'),
|
|
915
|
+
])) {
|
|
916
|
+
console.log('内容:', chunk.content);
|
|
917
|
+
console.log('思考:', chunk.reasoning); // 获取思考内容
|
|
918
|
+
}
|
|
919
|
+
```
|
|
920
|
+
|
|
921
|
+
#### Anthropic Claude 推理模式(Extended Thinking)
|
|
922
|
+
|
|
923
|
+
```typescript
|
|
924
|
+
import { ChatAnthropic, HumanMessage } from 'ai-world-sdk';
|
|
925
|
+
|
|
926
|
+
const model = new ChatAnthropic({
|
|
927
|
+
modelName: 'claude-3-5-sonnet-20241022',
|
|
928
|
+
temperature: 0.7,
|
|
929
|
+
provider: 'aihubmix',
|
|
930
|
+
reasoning: {
|
|
931
|
+
type: 'enabled',
|
|
932
|
+
budget_tokens: 5000, // 思考预算(token 数量)
|
|
933
|
+
},
|
|
934
|
+
});
|
|
935
|
+
|
|
936
|
+
// 流式调用获取思考过程
|
|
937
|
+
for await (const chunk of model.stream([
|
|
938
|
+
new HumanMessage('解决这个复杂问题'),
|
|
939
|
+
])) {
|
|
940
|
+
console.log('内容:', chunk.content);
|
|
941
|
+
console.log('思考:', chunk.reasoning);
|
|
942
|
+
}
|
|
943
|
+
```
|
|
944
|
+
|
|
945
|
+
**推理参数说明:**
|
|
946
|
+
|
|
947
|
+
| 模型类型 | 参数 | 说明 |
|
|
948
|
+
|---------|------|------|
|
|
949
|
+
| OpenAI (o1, gpt-5) | `effort: 'low' \| 'medium' \| 'high'` | 推理努力程度 |
|
|
950
|
+
| OpenAI (o1, gpt-5) | `summary: 'detailed' \| 'auto'` | 推理摘要模式(可选) |
|
|
951
|
+
| Gemini | `thinking_level: 'low' \| 'high'` | 思考深度级别 |
|
|
952
|
+
| Claude | `type: 'enabled'` | 启用扩展思考 |
|
|
953
|
+
| Claude | `budget_tokens: number` | 思考预算(建议 2000-10000) |
|
|
954
|
+
|
|
955
|
+
**获取推理内容:**
|
|
956
|
+
|
|
957
|
+
在流式调用中,可以通过 `AIMessageChunk.reasoning` 属性获取模型的推理/思考内容:
|
|
958
|
+
|
|
959
|
+
```typescript
|
|
960
|
+
for await (const chunk of model.stream([...])) {
|
|
961
|
+
// chunk.content - 最终响应内容
|
|
962
|
+
// chunk.reasoning - 推理/思考过程(如果模型支持)
|
|
963
|
+
}
|
|
964
|
+
```
|
|
965
|
+
|
|
859
966
|
### 结构化输出(JSON Schema)
|
|
860
967
|
|
|
861
968
|
使用 `jsonSchema` 参数可以让模型返回结构化的 JSON 数据,而不是自由文本。
|
|
@@ -1486,16 +1593,20 @@ for await (const chunk of client.streamDownload({
|
|
|
1486
1593
|
|
|
1487
1594
|
### 聊天模型
|
|
1488
1595
|
|
|
1489
|
-
| 提供商 | Provider | 模型示例 | 模型类 |
|
|
1490
|
-
|
|
1491
|
-
| OpenAI | `aihubmix` | `gpt-4o-mini`, `gpt-4
|
|
1492
|
-
|
|
|
1493
|
-
|
|
|
1494
|
-
|
|
|
1596
|
+
| 提供商 | Provider | 模型示例 | 模型类 | 支持推理 |
|
|
1597
|
+
|--------|----------|----------|--------|---------|
|
|
1598
|
+
| OpenAI | `aihubmix` | `gpt-4o-mini`, `gpt-4` | `ChatOpenAI` | ❌ |
|
|
1599
|
+
| OpenAI | `aihubmix` | `o1-preview`, `o1-mini`, `gpt-5.2` | `ChatOpenAI` | ✅ |
|
|
1600
|
+
| Google Gemini | `gemini` 或 `aihubmix` | `gemini-2.5-flash-image` | `ChatGoogleGenerativeAI` | ❌ |
|
|
1601
|
+
| Google Gemini | `gemini` 或 `aihubmix` | `gemini-3-pro-preview` | `ChatGoogleGenerativeAI` | ✅ |
|
|
1602
|
+
| Anthropic Claude | `aihubmix` | `claude-3-sonnet-20240229` | `ChatAnthropic` | ❌ |
|
|
1603
|
+
| Anthropic Claude | `aihubmix` | `claude-3-5-sonnet-20241022` | `ChatAnthropic` | ✅ |
|
|
1604
|
+
| Doubao | `doubao` 或 `aihubmix` | `doubao-pro-4k`, `doubao-seedream-4-5-251128` | `ChatOpenAI` | ❌ |
|
|
1495
1605
|
|
|
1496
1606
|
**注意:**
|
|
1497
1607
|
- 使用 `aihubmix` 或 `api2img` provider 可以访问所有模型,推荐用于多模型场景
|
|
1498
1608
|
- 使用特定 provider(如 `gemini`、`doubao`)会直接调用对应的官方 API
|
|
1609
|
+
- 支持推理的模型可以通过 `reasoning` 参数启用思考/推理功能
|
|
1499
1610
|
|
|
1500
1611
|
### 图像生成模型
|
|
1501
1612
|
|
|
@@ -96,6 +96,31 @@ describe("Langchain SDK Tests", () => {
|
|
|
96
96
|
console.log("✅ 流式测试成功");
|
|
97
97
|
console.log(`完整回复长度: ${fullText.length} 字符`);
|
|
98
98
|
}, 30000);
|
|
99
|
+
test("ChatGoogleGenerativeAI - 流式调用 reasoning", async () => {
|
|
100
|
+
const gemini = new index_1.ChatGoogleGenerativeAI({
|
|
101
|
+
modelName: "gemini-3-pro-preview",
|
|
102
|
+
temperature: 0.7,
|
|
103
|
+
provider: "gemini",
|
|
104
|
+
reasoning: {
|
|
105
|
+
thinking_level: "high",
|
|
106
|
+
},
|
|
107
|
+
});
|
|
108
|
+
let fullText = "";
|
|
109
|
+
let reasoningText = "";
|
|
110
|
+
for await (const chunk of gemini.stream([
|
|
111
|
+
new index_1.HumanMessage("请介绍人工智能。"),
|
|
112
|
+
])) {
|
|
113
|
+
// chunk 是 AIMessageChunk 对象,提取 content
|
|
114
|
+
fullText += extractTextFromChunk(chunk);
|
|
115
|
+
reasoningText += chunk.reasoning ?? "";
|
|
116
|
+
console.log("AI stream chunk:", chunk);
|
|
117
|
+
console.log("AI stream text:", fullText);
|
|
118
|
+
}
|
|
119
|
+
console.log("✅ ChatGoogleGenerativeAI 流式测试 reasoning: " + reasoningText);
|
|
120
|
+
console.log("✅ ChatGoogleGenerativeAI 流式测试 content: " + fullText);
|
|
121
|
+
expect(fullText.length).toBeGreaterThan(0);
|
|
122
|
+
console.log(`完整回复长度: ${fullText.length} 字符`);
|
|
123
|
+
}, 30000);
|
|
99
124
|
test("createChatModel 工厂函数", async () => {
|
|
100
125
|
const model = (0, index_1.createChatModel)("gemini-2.0-flash-exp-image-generation", {
|
|
101
126
|
temperature: 0.7,
|
|
@@ -894,6 +919,29 @@ describe("Langchain SDK Tests", () => {
|
|
|
894
919
|
console.log("✅ ChatOpenAI 流式测试成功 " + fullText);
|
|
895
920
|
console.log(`完整回复长度: ${fullText.length} 字符`);
|
|
896
921
|
}, 30000);
|
|
922
|
+
test("ChatOpenAI Reasoning - 流式调用", async () => {
|
|
923
|
+
const openai = new index_1.ChatOpenAI({
|
|
924
|
+
modelName: "gpt-5.2",
|
|
925
|
+
temperature: 0.7,
|
|
926
|
+
provider: "api2img",
|
|
927
|
+
reasoning: {
|
|
928
|
+
effort: "high",
|
|
929
|
+
summary: "detailed",
|
|
930
|
+
},
|
|
931
|
+
});
|
|
932
|
+
let fullText = "";
|
|
933
|
+
let reasoningText = "";
|
|
934
|
+
for await (const chunk of openai.stream([
|
|
935
|
+
new index_1.HumanMessage("请介绍人工智能。"),
|
|
936
|
+
])) {
|
|
937
|
+
fullText += extractTextFromChunk(chunk);
|
|
938
|
+
reasoningText += chunk.reasoning ?? "";
|
|
939
|
+
}
|
|
940
|
+
console.log("✅ ChatOpenAI 流式测试 reasoning: " + reasoningText);
|
|
941
|
+
expect(fullText.length).toBeGreaterThan(0);
|
|
942
|
+
console.log("✅ ChatOpenAI 流式测试 content: " + fullText);
|
|
943
|
+
console.log(`完整回复长度: ${fullText.length} 字符`);
|
|
944
|
+
}, 30000);
|
|
897
945
|
test("ChatOpenAI - batch 方法", async () => {
|
|
898
946
|
const openai = new index_1.ChatOpenAI({
|
|
899
947
|
modelName: "gpt-4",
|
package/dist/base.d.ts
CHANGED
|
@@ -39,10 +39,12 @@ export interface BindOptions {
|
|
|
39
39
|
name: string;
|
|
40
40
|
};
|
|
41
41
|
};
|
|
42
|
+
reasoning?: any;
|
|
42
43
|
}
|
|
43
44
|
export declare abstract class BaseChatModel {
|
|
44
45
|
protected headers: Record<string, string>;
|
|
45
46
|
protected temperature: number;
|
|
47
|
+
protected reasoning?: any;
|
|
46
48
|
protected maxTokens?: number;
|
|
47
49
|
protected topP?: number;
|
|
48
50
|
protected modelName: string;
|
|
@@ -62,6 +64,7 @@ export declare abstract class BaseChatModel {
|
|
|
62
64
|
apiKey?: string;
|
|
63
65
|
vertexai?: boolean;
|
|
64
66
|
jsonSchema?: Record<string, any>;
|
|
67
|
+
reasoning?: any;
|
|
65
68
|
});
|
|
66
69
|
/**
|
|
67
70
|
* Invoke the model with messages (non-streaming)
|
package/dist/base.js
CHANGED
|
@@ -23,6 +23,7 @@ class BaseChatModel {
|
|
|
23
23
|
this.apiKey = config.apiKey;
|
|
24
24
|
this.vertexai = config.vertexai;
|
|
25
25
|
this.jsonSchema = config.jsonSchema;
|
|
26
|
+
this.reasoning = config.reasoning;
|
|
26
27
|
this.temperature = config.temperature ?? 0.7;
|
|
27
28
|
this.maxTokens = config.maxTokens;
|
|
28
29
|
this.topP = config.topP;
|
|
@@ -44,6 +45,7 @@ class BaseChatModel {
|
|
|
44
45
|
top_p: options.topP ?? this.topP,
|
|
45
46
|
tools: options.tools,
|
|
46
47
|
tool_choice: options.toolChoice,
|
|
48
|
+
reasoning: this.reasoning,
|
|
47
49
|
},
|
|
48
50
|
model: this.modelName,
|
|
49
51
|
provider: this.provider,
|
|
@@ -95,6 +97,7 @@ class BaseChatModel {
|
|
|
95
97
|
top_p: options.topP ?? this.topP,
|
|
96
98
|
tools: options.tools,
|
|
97
99
|
tool_choice: options.toolChoice,
|
|
100
|
+
reasoning: this.reasoning,
|
|
98
101
|
},
|
|
99
102
|
model: this.modelName,
|
|
100
103
|
provider: this.provider,
|
|
@@ -216,6 +219,7 @@ class BaseChatModel {
|
|
|
216
219
|
temperature: this.temperature,
|
|
217
220
|
max_tokens: this.maxTokens,
|
|
218
221
|
top_p: this.topP,
|
|
222
|
+
reasoning: this.reasoning,
|
|
219
223
|
},
|
|
220
224
|
model: this.modelName,
|
|
221
225
|
provider: this.provider,
|
|
@@ -3,8 +3,41 @@
|
|
|
3
3
|
* Similar to LangChain.js ChatAnthropic
|
|
4
4
|
*/
|
|
5
5
|
import { BaseChatModel, BaseChatModelParams } from "../base";
|
|
6
|
+
/**
|
|
7
|
+
* Anthropic Claude reasoning configuration
|
|
8
|
+
* 用于控制 Claude 模型的扩展思考(Extended Thinking)功能
|
|
9
|
+
*/
|
|
10
|
+
export interface AnthropicReasoningConfig {
|
|
11
|
+
/**
|
|
12
|
+
* 思考类型,目前仅支持 "enabled"
|
|
13
|
+
*/
|
|
14
|
+
type: "enabled";
|
|
15
|
+
/**
|
|
16
|
+
* 思考预算(token 数量)
|
|
17
|
+
* 控制模型可以用于思考的最大 token 数
|
|
18
|
+
* 建议值:2000-10000
|
|
19
|
+
*/
|
|
20
|
+
budget_tokens: number;
|
|
21
|
+
}
|
|
6
22
|
export interface ChatAnthropicConfig extends BaseChatModelParams {
|
|
7
23
|
modelName: string;
|
|
24
|
+
/**
|
|
25
|
+
* Reasoning configuration for Claude models
|
|
26
|
+
* 启用扩展思考(Extended Thinking)功能,模型会在响应中包含思考过程
|
|
27
|
+
*
|
|
28
|
+
* @example
|
|
29
|
+
* ```typescript
|
|
30
|
+
* const model = new ChatAnthropic({
|
|
31
|
+
* modelName: 'claude-3-5-sonnet-20241022',
|
|
32
|
+
* provider: 'aihubmix',
|
|
33
|
+
* reasoning: {
|
|
34
|
+
* type: 'enabled',
|
|
35
|
+
* budget_tokens: 5000,
|
|
36
|
+
* },
|
|
37
|
+
* });
|
|
38
|
+
* ```
|
|
39
|
+
*/
|
|
40
|
+
reasoning?: AnthropicReasoningConfig;
|
|
8
41
|
}
|
|
9
42
|
export declare class ChatAnthropic extends BaseChatModel {
|
|
10
43
|
constructor(config: ChatAnthropicConfig);
|
|
@@ -3,8 +3,36 @@
|
|
|
3
3
|
* Similar to LangChain.js ChatGoogleGenerativeAI
|
|
4
4
|
*/
|
|
5
5
|
import { BaseChatModel, BaseChatModelParams } from "../base";
|
|
6
|
+
/**
|
|
7
|
+
* Google Gemini reasoning configuration
|
|
8
|
+
* 用于控制 Gemini 模型的思考深度
|
|
9
|
+
*/
|
|
10
|
+
export interface GeminiReasoningConfig {
|
|
11
|
+
/**
|
|
12
|
+
* 思考深度级别
|
|
13
|
+
* - "low": 低深度思考,响应更快
|
|
14
|
+
* - "high": 高深度思考,推理更深入
|
|
15
|
+
*/
|
|
16
|
+
thinking_level: "low" | "high";
|
|
17
|
+
}
|
|
6
18
|
export interface ChatGoogleGenerativeAIConfig extends BaseChatModelParams {
|
|
7
19
|
modelName: string;
|
|
20
|
+
/**
|
|
21
|
+
* Reasoning configuration for Gemini models
|
|
22
|
+
* 启用后,模型会在响应中包含思考过程
|
|
23
|
+
*
|
|
24
|
+
* @example
|
|
25
|
+
* ```typescript
|
|
26
|
+
* const model = new ChatGoogleGenerativeAI({
|
|
27
|
+
* modelName: 'gemini-3-pro-preview',
|
|
28
|
+
* provider: 'gemini',
|
|
29
|
+
* reasoning: {
|
|
30
|
+
* thinking_level: 'high',
|
|
31
|
+
* },
|
|
32
|
+
* });
|
|
33
|
+
* ```
|
|
34
|
+
*/
|
|
35
|
+
reasoning?: GeminiReasoningConfig;
|
|
8
36
|
}
|
|
9
37
|
export declare class ChatGoogleGenerativeAI extends BaseChatModel {
|
|
10
38
|
constructor(config: ChatGoogleGenerativeAIConfig);
|
|
@@ -3,8 +3,44 @@
|
|
|
3
3
|
* Similar to LangChain.js ChatOpenAI
|
|
4
4
|
*/
|
|
5
5
|
import { BaseChatModel, BaseChatModelParams } from "../base";
|
|
6
|
+
/**
|
|
7
|
+
* OpenAI reasoning configuration
|
|
8
|
+
* 用于控制 OpenAI 模型(如 o1、gpt-5 系列)的推理深度
|
|
9
|
+
*/
|
|
10
|
+
export interface OpenAIReasoningConfig {
|
|
11
|
+
/**
|
|
12
|
+
* 推理努力程度
|
|
13
|
+
* - "low": 低努力程度,响应更快
|
|
14
|
+
* - "medium": 中等努力程度
|
|
15
|
+
* - "high": 高努力程度,推理更深入
|
|
16
|
+
*/
|
|
17
|
+
effort: "low" | "medium" | "high";
|
|
18
|
+
/**
|
|
19
|
+
* 推理摘要模式
|
|
20
|
+
* - "detailed": 详细摘要
|
|
21
|
+
* - "auto": 自动选择
|
|
22
|
+
*/
|
|
23
|
+
summary?: "detailed" | "auto";
|
|
24
|
+
}
|
|
6
25
|
export interface ChatOpenAIConfig extends BaseChatModelParams {
|
|
7
26
|
modelName: string;
|
|
27
|
+
/**
|
|
28
|
+
* Reasoning configuration for OpenAI models (o1, gpt-5 series)
|
|
29
|
+
* 启用后,模型会在响应中包含推理过程
|
|
30
|
+
*
|
|
31
|
+
* @example
|
|
32
|
+
* ```typescript
|
|
33
|
+
* const model = new ChatOpenAI({
|
|
34
|
+
* modelName: 'gpt-5.2',
|
|
35
|
+
* provider: 'aihubmix',
|
|
36
|
+
* reasoning: {
|
|
37
|
+
* effort: 'high',
|
|
38
|
+
* summary: 'detailed',
|
|
39
|
+
* },
|
|
40
|
+
* });
|
|
41
|
+
* ```
|
|
42
|
+
*/
|
|
43
|
+
reasoning?: OpenAIReasoningConfig;
|
|
8
44
|
}
|
|
9
45
|
export declare class ChatOpenAI extends BaseChatModel {
|
|
10
46
|
constructor(config: ChatOpenAIConfig);
|
package/dist/config.d.ts
CHANGED
|
@@ -9,7 +9,7 @@
|
|
|
9
9
|
*
|
|
10
10
|
* 注意: {VERSION} 占位符会在构建时被替换为实际版本号
|
|
11
11
|
*/
|
|
12
|
-
export declare const SDK_SIGNATURE = "AI_WORLD_SDK_V:1.1.
|
|
12
|
+
export declare const SDK_SIGNATURE = "AI_WORLD_SDK_V:1.1.7";
|
|
13
13
|
/**
|
|
14
14
|
* 版本兼容性错误
|
|
15
15
|
*/
|
|
@@ -24,8 +24,8 @@ declare class SDKConfig {
|
|
|
24
24
|
private _pluginId;
|
|
25
25
|
private _versionCompatible;
|
|
26
26
|
private _versionCheckPromise;
|
|
27
|
-
readonly sdkSignature = "AI_WORLD_SDK_V:1.1.
|
|
28
|
-
readonly sdkVersion = "1.1.
|
|
27
|
+
readonly sdkSignature = "AI_WORLD_SDK_V:1.1.7";
|
|
28
|
+
readonly sdkVersion = "1.1.7";
|
|
29
29
|
constructor();
|
|
30
30
|
/**
|
|
31
31
|
* Set global base URL
|
package/dist/config.js
CHANGED
|
@@ -7,7 +7,7 @@ Object.defineProperty(exports, "__esModule", { value: true });
|
|
|
7
7
|
exports.sdkConfig = exports.VersionCompatibilityError = exports.SDK_SIGNATURE = void 0;
|
|
8
8
|
// SDK 版本号(构建时自动从 package.json 更新)
|
|
9
9
|
// 此版本号会在运行 npm run build 时自动从 package.json 读取并更新
|
|
10
|
-
const SDK_VERSION = "1.1.
|
|
10
|
+
const SDK_VERSION = "1.1.7";
|
|
11
11
|
/**
|
|
12
12
|
* SDK 特征码 - 用于在构建后的 JS 文件中识别 SDK 版本
|
|
13
13
|
* 格式: AI_WORLD_SDK_V:版本号
|
|
@@ -15,7 +15,7 @@ const SDK_VERSION = "1.1.6";
|
|
|
15
15
|
*
|
|
16
16
|
* 注意: {VERSION} 占位符会在构建时被替换为实际版本号
|
|
17
17
|
*/
|
|
18
|
-
exports.SDK_SIGNATURE = "AI_WORLD_SDK_V:1.1.
|
|
18
|
+
exports.SDK_SIGNATURE = "AI_WORLD_SDK_V:1.1.7";
|
|
19
19
|
/**
|
|
20
20
|
* 版本兼容性错误
|
|
21
21
|
*/
|
package/dist/messages.d.ts
CHANGED
|
@@ -3,9 +3,11 @@
|
|
|
3
3
|
* Similar to LangChain.js message interface
|
|
4
4
|
*/
|
|
5
5
|
export interface MessageContent {
|
|
6
|
-
type: "text" | "image_url";
|
|
6
|
+
type: "text" | "image_url" | "reasoning" | "thinking";
|
|
7
7
|
text?: string;
|
|
8
8
|
image_url?: string;
|
|
9
|
+
reasoning?: string;
|
|
10
|
+
thinking?: string;
|
|
9
11
|
}
|
|
10
12
|
export interface BaseMessage {
|
|
11
13
|
content: string | MessageContent[] | Record<string, any>;
|
|
@@ -52,6 +54,7 @@ export declare class AIMessageChunk implements BaseMessage {
|
|
|
52
54
|
usage_metadata?: Record<string, any>;
|
|
53
55
|
additional_kwargs?: Record<string, any>;
|
|
54
56
|
constructor(data: AIMessageChunkData | string | MessageContent[]);
|
|
57
|
+
get reasoning(): string | undefined;
|
|
55
58
|
toJSON(): {
|
|
56
59
|
role: string;
|
|
57
60
|
content: string | MessageContent[];
|
package/dist/messages.js
CHANGED
|
@@ -51,6 +51,36 @@ class AIMessageChunk {
|
|
|
51
51
|
this.additional_kwargs = data.additional_kwargs;
|
|
52
52
|
}
|
|
53
53
|
}
|
|
54
|
+
get reasoning() {
|
|
55
|
+
if (Array.isArray(this.content)) {
|
|
56
|
+
// 根据 content 返回 reasoning 文本
|
|
57
|
+
// content 可能为 MessageContent[],需要找到 type 为 "reasoning" 或 "thinking" 的 block,将其 summary 按顺序拼接其 text 字段
|
|
58
|
+
// 例如: [{'summary': [{'index': 0, 'type': 'summary_text', 'text': ' in'}], 'index': 0, 'type': 'reasoning'}]
|
|
59
|
+
// [{'type': 'thinking', 'thinking': "**Initiating AI Overview**\n", 'index': 0}]
|
|
60
|
+
// 类型也可能为 thinking,所以需要兼容 type 为 "reasoning" 或 "thinking" 的情况
|
|
61
|
+
const reasoningText = this.content
|
|
62
|
+
.filter((block) => typeof block === "object" &&
|
|
63
|
+
block !== null &&
|
|
64
|
+
"type" in block &&
|
|
65
|
+
(block.type === "reasoning" || block.type === "thinking"))
|
|
66
|
+
.map((block) => {
|
|
67
|
+
if (block.type === "thinking") {
|
|
68
|
+
return block.thinking ?? "";
|
|
69
|
+
}
|
|
70
|
+
if (Array.isArray(block.summary)) {
|
|
71
|
+
// 按 summary 的 index 排序,拼接 text
|
|
72
|
+
return block.summary
|
|
73
|
+
.sort((a, b) => (a.index ?? 0) - (b.index ?? 0))
|
|
74
|
+
.map((summaryItem) => summaryItem.text ?? "")
|
|
75
|
+
.join("");
|
|
76
|
+
}
|
|
77
|
+
return "";
|
|
78
|
+
})
|
|
79
|
+
.join("");
|
|
80
|
+
return reasoningText;
|
|
81
|
+
}
|
|
82
|
+
return '';
|
|
83
|
+
}
|
|
54
84
|
toJSON() {
|
|
55
85
|
return {
|
|
56
86
|
role: this.role,
|
package/package.json
CHANGED