@plolink/sdk 0.0.10 → 0.0.12

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -0,0 +1,179 @@
1
+ import { P as PlolinkClient } from '../../client-CAjIQKPm.cjs';
2
+ import '../../core-77EbLgbp.cjs';
3
+ import 'axios';
4
+
5
+ /**
6
+ * LLM 模块类型定义
7
+ */
8
+ /**
9
+ * 消息角色
10
+ */
11
+ type MessageRole = 'system' | 'user' | 'assistant';
12
+ /**
13
+ * 聊天消息
14
+ */
15
+ interface ChatMessage {
16
+ role: MessageRole;
17
+ content: string;
18
+ }
19
+ /**
20
+ * 聊天完成请求参数
21
+ */
22
+ interface ChatCompletionParams {
23
+ /** 模型名称 */
24
+ model: string;
25
+ /** 消息列表 */
26
+ messages: ChatMessage[];
27
+ /** 温度参数,范围 0-2,默认 0.7 */
28
+ temperature?: number;
29
+ /** 最大 token 数 */
30
+ max_tokens?: number;
31
+ /** 采样参数,范围 0-1 */
32
+ top_p?: number;
33
+ }
34
+ /**
35
+ * 聊天完成响应中的选择项
36
+ */
37
+ interface ChatCompletionChoice {
38
+ index: number;
39
+ message: ChatMessage;
40
+ finish_reason: string;
41
+ }
42
+ /**
43
+ * Token 使用情况
44
+ */
45
+ interface TokenUsage {
46
+ prompt_tokens: number;
47
+ completion_tokens: number;
48
+ total_tokens: number;
49
+ }
50
+ /**
51
+ * 聊天完成响应
52
+ */
53
+ interface ChatCompletionResponse {
54
+ id: string;
55
+ object: string;
56
+ created: number;
57
+ model: string;
58
+ choices: ChatCompletionChoice[];
59
+ usage: TokenUsage;
60
+ }
61
+ /**
62
+ * 模型信息
63
+ */
64
+ interface ModelInfo {
65
+ /** 模型 ID */
66
+ id: string;
67
+ /** 模型显示名称 */
68
+ name: string;
69
+ /** 供应商 */
70
+ provider: string;
71
+ /** 输入价格(元/1000 tokens) */
72
+ inputPrice: string;
73
+ /** 输出价格(元/1000 tokens) */
74
+ outputPrice: string;
75
+ }
76
+
77
+ /**
78
+ * LLM 模块
79
+ *
80
+ * @description
81
+ * 提供通用的 LLM 调用能力,兼容 OpenAI API 格式。
82
+ * 支持:
83
+ * - 聊天完成(chat completions)
84
+ * - 模型列表查询
85
+ * - 自动计费扣费
86
+ *
87
+ * @example
88
+ * ```typescript
89
+ * import { PlolinkClient } from '@plolink/sdk';
90
+ * import { LLM } from '@plolink/sdk/llm';
91
+ *
92
+ * const client = new PlolinkClient({
93
+ * token: 'sk-your-api-key'
94
+ * });
95
+ *
96
+ * const llm = new LLM(client);
97
+ *
98
+ * // 获取模型列表
99
+ * const models = await llm.getModels();
100
+ * console.log('可用模型:', models.map(m => m.id));
101
+ *
102
+ * // 调用聊天完成
103
+ * const response = await llm.chat({
104
+ * model: 'deepseek-v3-1-terminus',
105
+ * messages: [
106
+ * { role: 'system', content: '你是一个有帮助的助手' },
107
+ * { role: 'user', content: '你好,请介绍一下自己' }
108
+ * ],
109
+ * temperature: 0.7
110
+ * });
111
+ *
112
+ * console.log('回复:', response.choices[0].message.content);
113
+ * console.log('消耗 tokens:', response.usage);
114
+ * ```
115
+ *
116
+ * @module llm
117
+ */
118
+
119
+ /**
120
+ * LLM 模块类
121
+ */
122
+ declare class LLM {
123
+ private client;
124
+ constructor(client: PlolinkClient);
125
+ /**
126
+ * 聊天完成
127
+ *
128
+ * @description
129
+ * 调用 LLM 模型进行聊天对话。支持多轮对话,自动处理计费扣费。
130
+ *
131
+ * @param params - 聊天完成参数
132
+ * @returns 聊天完成响应,包含生成的内容和 token 使用信息
133
+ * @throws {PlolinkError} 当参数无效、余额不足或调用失败时抛出
134
+ *
135
+ * @example
136
+ * ```typescript
137
+ * const response = await llm.chat({
138
+ * model: 'deepseek-v3-1-terminus',
139
+ * messages: [
140
+ * { role: 'system', content: '你是一个专业的编程助手' },
141
+ * { role: 'user', content: '如何用 TypeScript 实现单例模式?' }
142
+ * ],
143
+ * temperature: 0.7
144
+ * });
145
+ *
146
+ * console.log('回复:', response.choices[0].message.content);
147
+ * console.log('输入 tokens:', response.usage.prompt_tokens);
148
+ * console.log('输出 tokens:', response.usage.completion_tokens);
149
+ * ```
150
+ */
151
+ chat(params: ChatCompletionParams): Promise<ChatCompletionResponse>;
152
+ /**
153
+ * 获取所有可用模型列表
154
+ *
155
+ * @description
156
+ * 获取所有可用的 LLM 模型及其详细信息,包括供应商和价格信息。
157
+ *
158
+ * @returns 模型信息列表
159
+ * @throws {PlolinkError} 当请求失败时抛出
160
+ *
161
+ * @example
162
+ * ```typescript
163
+ * const models = await llm.getModels();
164
+ *
165
+ * console.log(`共有 ${models.length} 个可用模型`);
166
+ *
167
+ * models.forEach(model => {
168
+ * console.log(`模型:${model.name}`);
169
+ * console.log(` ID: ${model.id}`);
170
+ * console.log(` 供应商: ${model.provider}`);
171
+ * console.log(` 输入价格: ${model.inputPrice} 元/1000 tokens`);
172
+ * console.log(` 输出价格: ${model.outputPrice} 元/1000 tokens`);
173
+ * });
174
+ * ```
175
+ */
176
+ getModels(): Promise<ModelInfo[]>;
177
+ }
178
+
179
+ export { type ChatCompletionChoice, type ChatCompletionParams, type ChatCompletionResponse, type ChatMessage, LLM, type MessageRole, type ModelInfo, type TokenUsage };
@@ -0,0 +1,179 @@
1
+ import { P as PlolinkClient } from '../../client-CwNikk7i.js';
2
+ import '../../core-77EbLgbp.js';
3
+ import 'axios';
4
+
5
+ /**
6
+ * LLM 模块类型定义
7
+ */
8
+ /**
9
+ * 消息角色
10
+ */
11
+ type MessageRole = 'system' | 'user' | 'assistant';
12
+ /**
13
+ * 聊天消息
14
+ */
15
+ interface ChatMessage {
16
+ role: MessageRole;
17
+ content: string;
18
+ }
19
+ /**
20
+ * 聊天完成请求参数
21
+ */
22
+ interface ChatCompletionParams {
23
+ /** 模型名称 */
24
+ model: string;
25
+ /** 消息列表 */
26
+ messages: ChatMessage[];
27
+ /** 温度参数,范围 0-2,默认 0.7 */
28
+ temperature?: number;
29
+ /** 最大 token 数 */
30
+ max_tokens?: number;
31
+ /** 采样参数,范围 0-1 */
32
+ top_p?: number;
33
+ }
34
+ /**
35
+ * 聊天完成响应中的选择项
36
+ */
37
+ interface ChatCompletionChoice {
38
+ index: number;
39
+ message: ChatMessage;
40
+ finish_reason: string;
41
+ }
42
+ /**
43
+ * Token 使用情况
44
+ */
45
+ interface TokenUsage {
46
+ prompt_tokens: number;
47
+ completion_tokens: number;
48
+ total_tokens: number;
49
+ }
50
+ /**
51
+ * 聊天完成响应
52
+ */
53
+ interface ChatCompletionResponse {
54
+ id: string;
55
+ object: string;
56
+ created: number;
57
+ model: string;
58
+ choices: ChatCompletionChoice[];
59
+ usage: TokenUsage;
60
+ }
61
+ /**
62
+ * 模型信息
63
+ */
64
+ interface ModelInfo {
65
+ /** 模型 ID */
66
+ id: string;
67
+ /** 模型显示名称 */
68
+ name: string;
69
+ /** 供应商 */
70
+ provider: string;
71
+ /** 输入价格(元/1000 tokens) */
72
+ inputPrice: string;
73
+ /** 输出价格(元/1000 tokens) */
74
+ outputPrice: string;
75
+ }
76
+
77
+ /**
78
+ * LLM 模块
79
+ *
80
+ * @description
81
+ * 提供通用的 LLM 调用能力,兼容 OpenAI API 格式。
82
+ * 支持:
83
+ * - 聊天完成(chat completions)
84
+ * - 模型列表查询
85
+ * - 自动计费扣费
86
+ *
87
+ * @example
88
+ * ```typescript
89
+ * import { PlolinkClient } from '@plolink/sdk';
90
+ * import { LLM } from '@plolink/sdk/llm';
91
+ *
92
+ * const client = new PlolinkClient({
93
+ * token: 'sk-your-api-key'
94
+ * });
95
+ *
96
+ * const llm = new LLM(client);
97
+ *
98
+ * // 获取模型列表
99
+ * const models = await llm.getModels();
100
+ * console.log('可用模型:', models.map(m => m.id));
101
+ *
102
+ * // 调用聊天完成
103
+ * const response = await llm.chat({
104
+ * model: 'deepseek-v3-1-terminus',
105
+ * messages: [
106
+ * { role: 'system', content: '你是一个有帮助的助手' },
107
+ * { role: 'user', content: '你好,请介绍一下自己' }
108
+ * ],
109
+ * temperature: 0.7
110
+ * });
111
+ *
112
+ * console.log('回复:', response.choices[0].message.content);
113
+ * console.log('消耗 tokens:', response.usage);
114
+ * ```
115
+ *
116
+ * @module llm
117
+ */
118
+
119
+ /**
120
+ * LLM 模块类
121
+ */
122
+ declare class LLM {
123
+ private client;
124
+ constructor(client: PlolinkClient);
125
+ /**
126
+ * 聊天完成
127
+ *
128
+ * @description
129
+ * 调用 LLM 模型进行聊天对话。支持多轮对话,自动处理计费扣费。
130
+ *
131
+ * @param params - 聊天完成参数
132
+ * @returns 聊天完成响应,包含生成的内容和 token 使用信息
133
+ * @throws {PlolinkError} 当参数无效、余额不足或调用失败时抛出
134
+ *
135
+ * @example
136
+ * ```typescript
137
+ * const response = await llm.chat({
138
+ * model: 'deepseek-v3-1-terminus',
139
+ * messages: [
140
+ * { role: 'system', content: '你是一个专业的编程助手' },
141
+ * { role: 'user', content: '如何用 TypeScript 实现单例模式?' }
142
+ * ],
143
+ * temperature: 0.7
144
+ * });
145
+ *
146
+ * console.log('回复:', response.choices[0].message.content);
147
+ * console.log('输入 tokens:', response.usage.prompt_tokens);
148
+ * console.log('输出 tokens:', response.usage.completion_tokens);
149
+ * ```
150
+ */
151
+ chat(params: ChatCompletionParams): Promise<ChatCompletionResponse>;
152
+ /**
153
+ * 获取所有可用模型列表
154
+ *
155
+ * @description
156
+ * 获取所有可用的 LLM 模型及其详细信息,包括供应商和价格信息。
157
+ *
158
+ * @returns 模型信息列表
159
+ * @throws {PlolinkError} 当请求失败时抛出
160
+ *
161
+ * @example
162
+ * ```typescript
163
+ * const models = await llm.getModels();
164
+ *
165
+ * console.log(`共有 ${models.length} 个可用模型`);
166
+ *
167
+ * models.forEach(model => {
168
+ * console.log(`模型:${model.name}`);
169
+ * console.log(` ID: ${model.id}`);
170
+ * console.log(` 供应商: ${model.provider}`);
171
+ * console.log(` 输入价格: ${model.inputPrice} 元/1000 tokens`);
172
+ * console.log(` 输出价格: ${model.outputPrice} 元/1000 tokens`);
173
+ * });
174
+ * ```
175
+ */
176
+ getModels(): Promise<ModelInfo[]>;
177
+ }
178
+
179
+ export { type ChatCompletionChoice, type ChatCompletionParams, type ChatCompletionResponse, type ChatMessage, LLM, type MessageRole, type ModelInfo, type TokenUsage };
@@ -0,0 +1,103 @@
1
+ import { PlolinkError } from '../../chunk-MD4O7FWT.js';
2
+
3
+ // src/modules/llm/index.ts
4
+ var LLM = class {
5
+ constructor(client) {
6
+ this.client = client;
7
+ }
8
+ /**
9
+ * 聊天完成
10
+ *
11
+ * @description
12
+ * 调用 LLM 模型进行聊天对话。支持多轮对话,自动处理计费扣费。
13
+ *
14
+ * @param params - 聊天完成参数
15
+ * @returns 聊天完成响应,包含生成的内容和 token 使用信息
16
+ * @throws {PlolinkError} 当参数无效、余额不足或调用失败时抛出
17
+ *
18
+ * @example
19
+ * ```typescript
20
+ * const response = await llm.chat({
21
+ * model: 'deepseek-v3-1-terminus',
22
+ * messages: [
23
+ * { role: 'system', content: '你是一个专业的编程助手' },
24
+ * { role: 'user', content: '如何用 TypeScript 实现单例模式?' }
25
+ * ],
26
+ * temperature: 0.7
27
+ * });
28
+ *
29
+ * console.log('回复:', response.choices[0].message.content);
30
+ * console.log('输入 tokens:', response.usage.prompt_tokens);
31
+ * console.log('输出 tokens:', response.usage.completion_tokens);
32
+ * ```
33
+ */
34
+ async chat(params) {
35
+ if (!params.model) {
36
+ throw new PlolinkError("Model is required", "INVALID_PARAMS");
37
+ }
38
+ if (!params.messages || params.messages.length === 0) {
39
+ throw new PlolinkError("Messages array cannot be empty", "INVALID_PARAMS");
40
+ }
41
+ for (const message of params.messages) {
42
+ if (!message.role || !["system", "user", "assistant"].includes(message.role)) {
43
+ throw new PlolinkError(
44
+ `Invalid message role: ${message.role}. Must be one of: system, user, assistant`,
45
+ "INVALID_PARAMS"
46
+ );
47
+ }
48
+ if (!message.content) {
49
+ throw new PlolinkError("Message content cannot be empty", "INVALID_PARAMS");
50
+ }
51
+ }
52
+ if (params.temperature !== void 0) {
53
+ if (params.temperature < 0 || params.temperature > 2) {
54
+ throw new PlolinkError("Temperature must be between 0 and 2", "INVALID_PARAMS");
55
+ }
56
+ }
57
+ if (params.max_tokens !== void 0 && params.max_tokens <= 0) {
58
+ throw new PlolinkError("max_tokens must be a positive number", "INVALID_PARAMS");
59
+ }
60
+ if (params.top_p !== void 0) {
61
+ if (params.top_p < 0 || params.top_p > 1) {
62
+ throw new PlolinkError("top_p must be between 0 and 1", "INVALID_PARAMS");
63
+ }
64
+ }
65
+ this.client.logger.info("Calling LLM chat completion", {
66
+ model: params.model,
67
+ messageCount: params.messages.length
68
+ });
69
+ return this.client.axiosInstance.post("/api/v1/llm/chat/completions", params);
70
+ }
71
+ /**
72
+ * 获取所有可用模型列表
73
+ *
74
+ * @description
75
+ * 获取所有可用的 LLM 模型及其详细信息,包括供应商和价格信息。
76
+ *
77
+ * @returns 模型信息列表
78
+ * @throws {PlolinkError} 当请求失败时抛出
79
+ *
80
+ * @example
81
+ * ```typescript
82
+ * const models = await llm.getModels();
83
+ *
84
+ * console.log(`共有 ${models.length} 个可用模型`);
85
+ *
86
+ * models.forEach(model => {
87
+ * console.log(`模型:${model.name}`);
88
+ * console.log(` ID: ${model.id}`);
89
+ * console.log(` 供应商: ${model.provider}`);
90
+ * console.log(` 输入价格: ${model.inputPrice} 元/1000 tokens`);
91
+ * console.log(` 输出价格: ${model.outputPrice} 元/1000 tokens`);
92
+ * });
93
+ * ```
94
+ */
95
+ async getModels() {
96
+ this.client.logger.info("Fetching available models");
97
+ return this.client.axiosInstance.get("/api/v1/llm/models");
98
+ }
99
+ };
100
+
101
+ export { LLM };
102
+ //# sourceMappingURL=index.js.map
103
+ //# sourceMappingURL=index.js.map
@@ -0,0 +1 @@
1
+ {"version":3,"sources":["../../../src/modules/llm/index.ts"],"names":[],"mappings":";;;AAgEO,IAAM,MAAN,MAAU;AAAA,EAGf,YAAY,MAAA,EAAuB;AACjC,IAAA,IAAA,CAAK,MAAA,GAAS,MAAA;AAAA,EAChB;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA,EA4BA,MAAa,KAAK,MAAA,EAA+D;AAE/E,IAAA,IAAI,CAAC,OAAO,KAAA,EAAO;AACjB,MAAA,MAAM,IAAI,YAAA,CAAa,mBAAA,EAAqB,gBAAgB,CAAA;AAAA,IAC9D;AAEA,IAAA,IAAI,CAAC,MAAA,CAAO,QAAA,IAAY,MAAA,CAAO,QAAA,CAAS,WAAW,CAAA,EAAG;AACpD,MAAA,MAAM,IAAI,YAAA,CAAa,gCAAA,EAAkC,gBAAgB,CAAA;AAAA,IAC3E;AAGA,IAAA,KAAA,MAAW,OAAA,IAAW,OAAO,QAAA,EAAU;AACrC,MAAA,IAAI,CAAC,OAAA,CAAQ,IAAA,IAAQ,CAAC,CAAC,QAAA,EAAU,MAAA,EAAQ,WAAW,CAAA,CAAE,QAAA,CAAS,OAAA,CAAQ,IAAI,CAAA,EAAG;AAC5E,QAAA,MAAM,IAAI,YAAA;AAAA,UACR,CAAA,sBAAA,EAAyB,QAAQ,IAAI,CAAA,yCAAA,CAAA;AAAA,UACrC;AAAA,SACF;AAAA,MACF;AACA,MAAA,IAAI,CAAC,QAAQ,OAAA,EAAS;AACpB,QAAA,MAAM,IAAI,YAAA,CAAa,iCAAA,EAAmC,gBAAgB,CAAA;AAAA,MAC5E;AAAA,IACF;AAGA,IAAA,IAAI,MAAA,CAAO,gBAAgB,MAAA,EAAW;AACpC,MAAA,IAAI,MAAA,CAAO,WAAA,GAAc,CAAA,IAAK,MAAA,CAAO,cAAc,CAAA,EAAG;AACpD,QAAA,MAAM,IAAI,YAAA,CAAa,qCAAA,EAAuC,gBAAgB,CAAA;AAAA,MAChF;AAAA,IACF;AAEA,IAAA,IAAI,MAAA,CAAO,UAAA,KAAe,MAAA,IAAa,MAAA,CAAO,cAAc,CAAA,EAAG;AAC7D,MAAA,MAAM,IAAI,YAAA,CAAa,sCAAA,EAAwC,gBAAgB,CAAA;AAAA,IACjF;AAEA,IAAA,IAAI,MAAA,CAAO,UAAU,MAAA,EAAW;AAC9B,MAAA,IAAI,MAAA,CAAO,KAAA,GAAQ,CAAA,IAAK,MAAA,CAAO,QAAQ,CAAA,EAAG;AACxC,QAAA,MAAM,IAAI,YAAA,CAAa,+BAAA,EAAiC,gBAAgB,CAAA;AAAA,MAC1E;AAAA,IACF;AAEA,IAAA,IAAA,CAAK,MAAA,CAAO,MAAA,CAAO,IAAA,CAAK,6BAAA,EAA+B;AAAA,MACrD,OAAO,MAAA,CAAO,KAAA;AAAA,MACd,YAAA,EAAc,OAAO,QAAA,CAAS;AAAA,KAC/B,CAAA;AAED,IAAA,OAAO,IAAA,CAAK,MAAA,CAAO,aAAA,CAAc,IAAA,CAAK,gCAAgC,MAAM,CAAA;AAAA,EAC9E;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA,EA0BA,MAAa,SAAA,GAAkC;AAC7C,IAAA,IAAA,CAAK,MAAA,CAAO,MAAA,CAAO,IAAA,CAAK,2BAA2B,CAAA;AAEnD,IAAA,OAAO,IAAA,CAAK,MAAA,CAAO,aAAA,CAAc,GAAA,CAAI,oBAAoB,CAAA;AAAA,EAC3D;AACF","file":"index.js","sourcesContent":["/**\n * LLM 模块\n * \n * @description\n * 提供通用的 LLM 调用能力,兼容 OpenAI API 格式。\n * 支持:\n * - 聊天完成(chat completions)\n * - 模型列表查询\n * - 自动计费扣费\n * \n * @example\n * ```typescript\n * import { PlolinkClient } from '@plolink/sdk';\n * import { LLM } from '@plolink/sdk/llm';\n * \n * const client = new PlolinkClient({\n * token: 'sk-your-api-key'\n * });\n * \n * const llm = new LLM(client);\n * \n * // 获取模型列表\n * const models = await llm.getModels();\n * console.log('可用模型:', models.map(m => m.id));\n * \n * // 调用聊天完成\n * const response = await llm.chat({\n * model: 'deepseek-v3-1-terminus',\n * messages: [\n * { role: 'system', content: '你是一个有帮助的助手' },\n * { role: 'user', content: '你好,请介绍一下自己' }\n * ],\n * temperature: 0.7\n * });\n * \n * console.log('回复:', response.choices[0].message.content);\n * console.log('消耗 tokens:', response.usage);\n * ```\n * \n * @module llm\n */\n\nimport { PlolinkClient } from '../../core/client';\nimport { PlolinkError } from '../../core/error';\nimport type {\n ChatCompletionParams,\n ChatCompletionResponse,\n ModelInfo,\n} from '../../types/llm';\n\n// Re-export 类型\nexport type {\n MessageRole,\n ChatMessage,\n ChatCompletionParams,\n ChatCompletionChoice,\n TokenUsage,\n ChatCompletionResponse,\n ModelInfo,\n} from '../../types/llm';\n\n/**\n * LLM 模块类\n */\nexport class LLM {\n private client: PlolinkClient;\n\n constructor(client: PlolinkClient) {\n this.client = client;\n }\n\n /**\n * 聊天完成\n * \n * @description\n * 调用 LLM 模型进行聊天对话。支持多轮对话,自动处理计费扣费。\n * \n * @param params - 聊天完成参数\n * @returns 聊天完成响应,包含生成的内容和 token 使用信息\n * @throws {PlolinkError} 当参数无效、余额不足或调用失败时抛出\n * \n * @example\n * ```typescript\n * const response = await llm.chat({\n * model: 'deepseek-v3-1-terminus',\n * messages: [\n * { role: 'system', content: '你是一个专业的编程助手' },\n * { role: 'user', content: '如何用 TypeScript 实现单例模式?' }\n * ],\n * temperature: 0.7\n * });\n * \n * console.log('回复:', response.choices[0].message.content);\n * console.log('输入 tokens:', response.usage.prompt_tokens);\n * console.log('输出 tokens:', response.usage.completion_tokens);\n * ```\n */\n public async chat(params: ChatCompletionParams): Promise<ChatCompletionResponse> {\n // 参数验证\n if (!params.model) {\n throw new PlolinkError('Model is required', 'INVALID_PARAMS');\n }\n\n if (!params.messages || params.messages.length === 0) {\n throw new PlolinkError('Messages array cannot be empty', 'INVALID_PARAMS');\n }\n\n // 验证每条消息\n for (const message of params.messages) {\n if (!message.role || !['system', 'user', 'assistant'].includes(message.role)) {\n throw new PlolinkError(\n `Invalid message role: ${message.role}. Must be one of: system, user, assistant`,\n 'INVALID_PARAMS'\n );\n }\n if (!message.content) {\n throw new PlolinkError('Message content cannot be empty', 'INVALID_PARAMS');\n }\n }\n\n // 验证可选参数\n if (params.temperature !== undefined) {\n if (params.temperature < 0 || params.temperature > 2) {\n throw new PlolinkError('Temperature must be between 0 and 2', 'INVALID_PARAMS');\n }\n }\n\n if (params.max_tokens !== undefined && params.max_tokens <= 0) {\n throw new PlolinkError('max_tokens must be a positive number', 'INVALID_PARAMS');\n }\n\n if (params.top_p !== undefined) {\n if (params.top_p < 0 || params.top_p > 1) {\n throw new PlolinkError('top_p must be between 0 and 1', 'INVALID_PARAMS');\n }\n }\n\n this.client.logger.info('Calling LLM chat completion', {\n model: params.model,\n messageCount: params.messages.length,\n });\n\n return this.client.axiosInstance.post('/api/v1/llm/chat/completions', params);\n }\n\n /**\n * 获取所有可用模型列表\n * \n * @description\n * 获取所有可用的 LLM 模型及其详细信息,包括供应商和价格信息。\n * \n * @returns 模型信息列表\n * @throws {PlolinkError} 当请求失败时抛出\n * \n * @example\n * ```typescript\n * const models = await llm.getModels();\n * \n * console.log(`共有 ${models.length} 个可用模型`);\n * \n * models.forEach(model => {\n * console.log(`模型:${model.name}`);\n * console.log(` ID: ${model.id}`);\n * console.log(` 供应商: ${model.provider}`);\n * console.log(` 输入价格: ${model.inputPrice} 元/1000 tokens`);\n * console.log(` 输出价格: ${model.outputPrice} 元/1000 tokens`);\n * });\n * ```\n */\n public async getModels(): Promise<ModelInfo[]> {\n this.client.logger.info('Fetching available models');\n\n return this.client.axiosInstance.get('/api/v1/llm/models');\n }\n}\n"]}