iflow2api-sdk 0.1.0__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -0,0 +1,123 @@
1
+ """iflow2api-sdk
2
+
3
+ Python SDK for iflow2api - iFlow CLI AI Service Client.
4
+
5
+ 提供对 iFlow API 的同步和异步访问,支持 OpenAI 兼容的 Chat Completions API。
6
+
7
+ Example:
8
+ >>> from iflow2api_sdk import IFlowClient
9
+ >>>
10
+ >>> # 使用本地代理
11
+ >>> client = IFlowClient(
12
+ ... api_key="not-needed",
13
+ ... base_url="http://localhost:28000/v1"
14
+ ... )
15
+ >>>
16
+ >>> # Chat Completions
17
+ >>> response = client.chat.completions.create(
18
+ ... model="glm-5",
19
+ ... messages=[{"role": "user", "content": "你好!"}]
20
+ ... )
21
+ >>> print(response.choices[0].message.content)
22
+ >>>
23
+ >>> # 流式响应
24
+ >>> for chunk in client.chat.completions.create(
25
+ ... model="glm-5",
26
+ ... messages=[{"role": "user", "content": "写一首诗"}],
27
+ ... stream=True
28
+ ... ):
29
+ ... if chunk.content:
30
+ ... print(chunk.content, end="", flush=True)
31
+ """
32
+
33
+ __version__ = "0.1.0"
34
+
35
+ # 客户端
36
+ from .client import IFlowClient
37
+ from .client_async import AsyncIFlowClient
38
+
39
+ # 异常
40
+ from .exceptions import (
41
+ APIError,
42
+ AuthenticationError,
43
+ ConfigurationError,
44
+ ConnectionError,
45
+ IFlowError,
46
+ InvalidRequestError,
47
+ ModelNotFoundError,
48
+ RateLimitError,
49
+ StreamError,
50
+ TimeoutError,
51
+ ValidationError,
52
+ )
53
+
54
+ # 模型
55
+ from .models import (
56
+ ChatCompletionChoice,
57
+ ChatCompletionMessage,
58
+ ChatCompletionRequest,
59
+ ChatCompletionResponse,
60
+ ChatCompletionStreamChoice,
61
+ ChatCompletionStreamChunk,
62
+ ChatCompletionStreamDelta,
63
+ ChatMessage,
64
+ ModelInfo,
65
+ ModelListResponse,
66
+ Role,
67
+ Usage,
68
+ assistant,
69
+ create_message,
70
+ system,
71
+ user,
72
+ )
73
+
74
+ # 流式响应
75
+ from .streaming import AsyncStreamResponse, StreamChunk, StreamResponse
76
+
77
+ # 配置
78
+ from .config import ClientConfig, RequestOptions
79
+
80
+ __all__ = [
81
+ # 版本
82
+ "__version__",
83
+ # 客户端
84
+ "IFlowClient",
85
+ "AsyncIFlowClient",
86
+ # 异常
87
+ "IFlowError",
88
+ "APIError",
89
+ "AuthenticationError",
90
+ "ConfigurationError",
91
+ "ConnectionError",
92
+ "InvalidRequestError",
93
+ "ModelNotFoundError",
94
+ "RateLimitError",
95
+ "StreamError",
96
+ "TimeoutError",
97
+ "ValidationError",
98
+ # 模型
99
+ "Role",
100
+ "ChatMessage",
101
+ "ChatCompletionRequest",
102
+ "ChatCompletionMessage",
103
+ "ChatCompletionChoice",
104
+ "ChatCompletionResponse",
105
+ "ChatCompletionStreamDelta",
106
+ "ChatCompletionStreamChoice",
107
+ "ChatCompletionStreamChunk",
108
+ "Usage",
109
+ "ModelInfo",
110
+ "ModelListResponse",
111
+ # 便捷函数
112
+ "create_message",
113
+ "system",
114
+ "user",
115
+ "assistant",
116
+ # 流式响应
117
+ "StreamChunk",
118
+ "StreamResponse",
119
+ "AsyncStreamResponse",
120
+ # 配置
121
+ "ClientConfig",
122
+ "RequestOptions",
123
+ ]
@@ -0,0 +1,21 @@
1
+ """API 模块
2
+
3
+ 导出所有 API 封装。
4
+ """
5
+
6
+ from .chat import (
7
+ AsyncChatAPI,
8
+ AsyncCompletionsAPI,
9
+ ChatAPI,
10
+ CompletionsAPI,
11
+ )
12
+ from .models import AsyncModelsAPI, ModelsAPI
13
+
14
+ __all__ = [
15
+ "ChatAPI",
16
+ "CompletionsAPI",
17
+ "ModelsAPI",
18
+ "AsyncChatAPI",
19
+ "AsyncCompletionsAPI",
20
+ "AsyncModelsAPI",
21
+ ]
@@ -0,0 +1,434 @@
1
+ """Chat Completions API 模块
2
+
3
+ 封装 Chat Completions API,支持流式和非流式响应。
4
+ """
5
+
6
+ import json
7
+ import re
8
+ from typing import TYPE_CHECKING, Any, Iterator, Literal, Optional, Union, overload
9
+
10
+ from ..exceptions import APIError, InvalidRequestError, ModelNotFoundError, ValidationError
11
+ from ..models.chat import (
12
+ ChatCompletionRequest,
13
+ ChatCompletionResponse,
14
+ ChatMessage,
15
+ )
16
+ from ..streaming import AsyncStreamResponse, StreamResponse
17
+
18
+ if TYPE_CHECKING:
19
+ from ..client import IFlowClient
20
+ from ..client_async import AsyncIFlowClient
21
+
22
+
23
+ def _configure_model_request(request_body: dict, model: str) -> dict:
24
+ """为特定模型配置必要的请求参数
25
+
26
+ 来源: iFlow CLI 源码中的模型配置 (iflow.js)
27
+
28
+ Args:
29
+ request_body: 原始请求体
30
+ model: 模型 ID
31
+
32
+ Returns:
33
+ 配置后的请求体(副本)
34
+ """
35
+ body = request_body.copy()
36
+ model_lower = model.lower()
37
+
38
+ # DeepSeek 模型
39
+ if model_lower.startswith("deepseek"):
40
+ if "thinking_mode" not in body:
41
+ body["thinking_mode"] = True
42
+ if "reasoning" not in body:
43
+ body["reasoning"] = True
44
+
45
+ # GLM-5 模型 (特殊配置)
46
+ elif model == "glm-5":
47
+ if "chat_template_kwargs" not in body:
48
+ body["chat_template_kwargs"] = {"enable_thinking": True}
49
+ if "enable_thinking" not in body:
50
+ body["enable_thinking"] = True
51
+ if "thinking" not in body:
52
+ body["thinking"] = {"type": "enabled"}
53
+
54
+ # GLM-4.7 模型
55
+ elif model == "glm-4.7":
56
+ if "chat_template_kwargs" not in body:
57
+ body["chat_template_kwargs"] = {"enable_thinking": True}
58
+
59
+ # 其他 GLM 模型
60
+ elif model_lower.startswith("glm-"):
61
+ if "chat_template_kwargs" not in body:
62
+ body["chat_template_kwargs"] = {"enable_thinking": True}
63
+
64
+ # Kimi-K2.5 模型
65
+ elif model_lower.startswith("kimi-k2.5"):
66
+ if "thinking" not in body:
67
+ body["thinking"] = {"type": "enabled"}
68
+
69
+ # 包含 "thinking" 的模型
70
+ elif "thinking" in model_lower:
71
+ if "thinking_mode" not in body:
72
+ body["thinking_mode"] = True
73
+
74
+ # mimo- 模型
75
+ elif model_lower.startswith("mimo-"):
76
+ if "thinking" not in body:
77
+ body["thinking"] = {"type": "enabled"}
78
+
79
+ # Claude 模型
80
+ elif "claude" in model_lower:
81
+ if "chat_template_kwargs" not in body:
82
+ body["chat_template_kwargs"] = {"enable_thinking": True}
83
+
84
+ # sonnet- 模型
85
+ elif "sonnet-" in model_lower:
86
+ if "chat_template_kwargs" not in body:
87
+ body["chat_template_kwargs"] = {"enable_thinking": True}
88
+
89
+ # 包含 "reasoning" 的模型
90
+ elif "reasoning" in model_lower:
91
+ if "reasoning" not in body:
92
+ body["reasoning"] = True
93
+
94
+ # Qwen 4B 模型 (不支持思考,需要删除相关参数)
95
+ if re.match(r"qwen.*4b", model_lower, re.IGNORECASE):
96
+ for key in ["thinking_mode", "reasoning", "chat_template_kwargs"]:
97
+ if key in body:
98
+ del body[key]
99
+
100
+ return body
101
+
102
+
103
+ def _normalize_response(result: dict) -> dict:
104
+ """规范化 OpenAI 格式响应
105
+
106
+ 某些模型(如 GLM-5)使用 reasoning_content 而非 content 返回内容,
107
+ 导致 OpenAI 兼容客户端无法读取助手消息。
108
+ """
109
+ choices = result.get("choices", [])
110
+ for choice in choices:
111
+ message = choice.get("message", {})
112
+ content = message.get("content")
113
+ reasoning_content = message.get("reasoning_content")
114
+
115
+ if not content and reasoning_content:
116
+ # content 为空但 reasoning_content 有值,将 reasoning_content 移动到 content
117
+ message["content"] = reasoning_content
118
+ del message["reasoning_content"]
119
+ elif content and reasoning_content:
120
+ # 两者都有值,删除 reasoning_content
121
+ del message["reasoning_content"]
122
+
123
+ return result
124
+
125
+
126
+ def _check_error_response(response: dict) -> None:
127
+ """检测 API 错误响应
128
+
129
+ 当 API 返回错误状态时,抛出明确的异常而不是 ValidationError。
130
+
131
+ Args:
132
+ response: API 响应数据
133
+
134
+ Raises:
135
+ APIError 或其子类
136
+ """
137
+ # 检查是否有错误状态标识
138
+ status = response.get("status")
139
+ if status is not None and str(status) not in ("200", 200):
140
+ message = response.get("msg") or response.get("message") or "Unknown error"
141
+ status_code = int(status) if isinstance(status, (int, str)) else 500
142
+
143
+ # 根据状态码抛出不同的异常
144
+ if status_code == 404:
145
+ raise ModelNotFoundError(
146
+ message,
147
+ details=response
148
+ )
149
+ elif status_code in (400, 422):
150
+ raise InvalidRequestError(
151
+ message,
152
+ status_code=status_code,
153
+ details=response
154
+ )
155
+ else:
156
+ raise APIError(
157
+ message,
158
+ status_code=status_code,
159
+ details=response
160
+ )
161
+
162
+ # 检查 OpenAI 格式的错误响应
163
+ if "error" in response and "choices" not in response:
164
+ error = response["error"]
165
+ message = error.get("message", "Unknown error")
166
+ error_type = error.get("type", "api_error")
167
+ status_code = error.get("code", 500)
168
+
169
+ if isinstance(status_code, str) and status_code.isdigit():
170
+ status_code = int(status_code)
171
+
172
+ raise APIError(
173
+ message,
174
+ status_code=int(status_code) if isinstance(status_code, (int, str)) else 500,
175
+ error_type=error_type,
176
+ details=response
177
+ )
178
+
179
+
180
+ def _validate_request_params(model: str, messages: list) -> None:
181
+ """验证请求参数
182
+
183
+ Args:
184
+ model: 模型 ID
185
+ messages: 消息列表
186
+
187
+ Raises:
188
+ ValidationError: 参数验证失败
189
+ """
190
+ if not model or not model.strip():
191
+ raise ValidationError("Model ID cannot be empty", field="model")
192
+
193
+ if messages is None or len(messages) == 0:
194
+ raise ValidationError("Messages list cannot be empty", field="messages")
195
+
196
+
197
+ class CompletionsAPI:
198
+ """Chat Completions API(同步)"""
199
+
200
+ def __init__(self, client: "IFlowClient"):
201
+ self._client = client
202
+
203
+ @overload
204
+ def create(
205
+ self,
206
+ *,
207
+ model: str,
208
+ messages: list[Union[ChatMessage, dict]],
209
+ stream: Literal[False] = False,
210
+ **kwargs,
211
+ ) -> ChatCompletionResponse: ...
212
+
213
+ @overload
214
+ def create(
215
+ self,
216
+ *,
217
+ model: str,
218
+ messages: list[Union[ChatMessage, dict]],
219
+ stream: Literal[True],
220
+ **kwargs,
221
+ ) -> StreamResponse: ...
222
+
223
+ def create(
224
+ self,
225
+ *,
226
+ model: str,
227
+ messages: list[Union[ChatMessage, dict]],
228
+ stream: bool = False,
229
+ **kwargs,
230
+ ) -> Union[ChatCompletionResponse, StreamResponse]:
231
+ """创建 Chat Completion
232
+
233
+ Args:
234
+ model: 模型 ID
235
+ messages: 消息列表
236
+ stream: 是否流式响应
237
+ **kwargs: 其他参数(temperature, max_tokens 等)
238
+
239
+ Returns:
240
+ 非流式: ChatCompletionResponse
241
+ 流式: StreamResponse
242
+
243
+ Raises:
244
+ ValidationError: 参数验证失败
245
+
246
+ Example:
247
+ >>> # 非流式
248
+ >>> response = client.chat.completions.create(
249
+ ... model="glm-5",
250
+ ... messages=[{"role": "user", "content": "你好"}]
251
+ ... )
252
+ >>> print(response.choices[0].message.content)
253
+
254
+ >>> # 流式
255
+ >>> for chunk in client.chat.completions.create(
256
+ ... model="glm-5",
257
+ ... messages=[{"role": "user", "content": "你好"}],
258
+ ... stream=True
259
+ ... ):
260
+ ... if chunk.content:
261
+ ... print(chunk.content, end="", flush=True)
262
+ """
263
+ # 参数校验
264
+ _validate_request_params(model, messages)
265
+
266
+ # 构建请求体
267
+ request_body = {
268
+ "model": model,
269
+ "messages": [
270
+ msg.model_dump() if isinstance(msg, ChatMessage) else msg
271
+ for msg in messages
272
+ ],
273
+ "stream": stream,
274
+ **kwargs,
275
+ }
276
+
277
+ # 为特定模型配置参数
278
+ request_body = _configure_model_request(request_body, model)
279
+
280
+ if stream:
281
+ return self._create_stream(request_body)
282
+ else:
283
+ return self._create_non_stream(request_body)
284
+
285
+ def _create_non_stream(self, request_body: dict) -> ChatCompletionResponse:
286
+ """非流式请求"""
287
+ response = self._client._request(
288
+ "POST",
289
+ "/chat/completions",
290
+ json=request_body,
291
+ )
292
+
293
+ # 检测错误响应
294
+ _check_error_response(response)
295
+
296
+ # 规范化响应
297
+ response = _normalize_response(response)
298
+
299
+ # 确保 usage 统计信息存在
300
+ if "usage" not in response:
301
+ response["usage"] = {
302
+ "prompt_tokens": 0,
303
+ "completion_tokens": 0,
304
+ "total_tokens": 0,
305
+ }
306
+
307
+ return ChatCompletionResponse(**response)
308
+
309
+ def _create_stream(self, request_body: dict) -> StreamResponse:
310
+ """流式请求"""
311
+ response_iter = self._client._request_stream(
312
+ "POST",
313
+ "/chat/completions",
314
+ json=request_body,
315
+ )
316
+ return StreamResponse(response_iter)
317
+
318
+
319
+ class ChatAPI:
320
+ """Chat API(同步)"""
321
+
322
+ def __init__(self, client: "IFlowClient"):
323
+ self._client = client
324
+ self.completions = CompletionsAPI(client)
325
+
326
+
327
+ class AsyncCompletionsAPI:
328
+ """Chat Completions API(异步)"""
329
+
330
+ def __init__(self, client: "AsyncIFlowClient"):
331
+ self._client = client
332
+
333
+ @overload
334
+ async def create(
335
+ self,
336
+ *,
337
+ model: str,
338
+ messages: list[Union[ChatMessage, dict]],
339
+ stream: Literal[False] = False,
340
+ **kwargs,
341
+ ) -> ChatCompletionResponse: ...
342
+
343
+ @overload
344
+ async def create(
345
+ self,
346
+ *,
347
+ model: str,
348
+ messages: list[Union[ChatMessage, dict]],
349
+ stream: Literal[True],
350
+ **kwargs,
351
+ ) -> AsyncStreamResponse: ...
352
+
353
+ async def create(
354
+ self,
355
+ *,
356
+ model: str,
357
+ messages: list[Union[ChatMessage, dict]],
358
+ stream: bool = False,
359
+ **kwargs,
360
+ ) -> Union[ChatCompletionResponse, AsyncStreamResponse]:
361
+ """创建 Chat Completion(异步)
362
+
363
+ Args:
364
+ model: 模型 ID
365
+ messages: 消息列表
366
+ stream: 是否流式响应
367
+ **kwargs: 其他参数
368
+
369
+ Returns:
370
+ 非流式: ChatCompletionResponse
371
+ 流式: AsyncStreamResponse
372
+
373
+ Raises:
374
+ ValidationError: 参数验证失败
375
+ """
376
+ # 参数校验
377
+ _validate_request_params(model, messages)
378
+
379
+ request_body = {
380
+ "model": model,
381
+ "messages": [
382
+ msg.model_dump() if isinstance(msg, ChatMessage) else msg
383
+ for msg in messages
384
+ ],
385
+ "stream": stream,
386
+ **kwargs,
387
+ }
388
+
389
+ request_body = _configure_model_request(request_body, model)
390
+
391
+ if stream:
392
+ return await self._create_stream(request_body)
393
+ else:
394
+ return await self._create_non_stream(request_body)
395
+
396
+ async def _create_non_stream(self, request_body: dict) -> ChatCompletionResponse:
397
+ """非流式请求(异步)"""
398
+ response = await self._client._request(
399
+ "POST",
400
+ "/chat/completions",
401
+ json=request_body,
402
+ )
403
+
404
+ # 检测错误响应
405
+ _check_error_response(response)
406
+
407
+ response = _normalize_response(response)
408
+
409
+ if "usage" not in response:
410
+ response["usage"] = {
411
+ "prompt_tokens": 0,
412
+ "completion_tokens": 0,
413
+ "total_tokens": 0,
414
+ }
415
+
416
+ return ChatCompletionResponse(**response)
417
+
418
+ async def _create_stream(self, request_body: dict) -> AsyncStreamResponse:
419
+ """流式请求(异步)"""
420
+ # 注意:_request_stream 是异步生成器,不能 await
421
+ response_iter = self._client._request_stream(
422
+ "POST",
423
+ "/chat/completions",
424
+ json=request_body,
425
+ )
426
+ return AsyncStreamResponse(response_iter)
427
+
428
+
429
+ class AsyncChatAPI:
430
+ """Chat API(异步)"""
431
+
432
+ def __init__(self, client: "AsyncIFlowClient"):
433
+ self._client = client
434
+ self.completions = AsyncCompletionsAPI(client)
@@ -0,0 +1,71 @@
1
+ """Models API 模块
2
+
3
+ 封装模型列表 API。
4
+ """
5
+
6
+ from __future__ import annotations
7
+
8
+ from typing import TYPE_CHECKING
9
+
10
+ from ..models.common import ModelInfo, ModelListResponse
11
+
12
+ if TYPE_CHECKING:
13
+ from ..client import IFlowClient
14
+ from ..client_async import AsyncIFlowClient
15
+
16
+
17
+ class ModelsAPI:
18
+ """Models API(同步)"""
19
+
20
+ def __init__(self, client: "IFlowClient"):
21
+ self._client = client
22
+
23
+ def list(self) -> ModelListResponse:
24
+ """获取可用模型列表
25
+
26
+ Returns:
27
+ ModelListResponse: 模型列表响应
28
+
29
+ Example:
30
+ >>> models = client.models.list()
31
+ >>> for model in models.data:
32
+ ... print(model.id)
33
+ """
34
+ response = self._client._request("GET", "/models")
35
+ return ModelListResponse(**response)
36
+
37
+ def get_model_ids(self) -> list[str]:
38
+ """获取模型 ID 列表
39
+
40
+ 便捷方法,返回所有模型的 ID。
41
+
42
+ Returns:
43
+ 模型 ID 列表
44
+ """
45
+ models = self.list()
46
+ return [m.id for m in models.data]
47
+
48
+
49
+ class AsyncModelsAPI:
50
+ """Models API(异步)"""
51
+
52
+ def __init__(self, client: "AsyncIFlowClient"):
53
+ self._client = client
54
+
55
+ async def list(self) -> ModelListResponse:
56
+ """获取可用模型列表
57
+
58
+ Returns:
59
+ ModelListResponse: 模型列表响应
60
+ """
61
+ response = await self._client._request("GET", "/models")
62
+ return ModelListResponse(**response)
63
+
64
+ async def get_model_ids(self) -> list[str]:
65
+ """获取模型 ID 列表
66
+
67
+ Returns:
68
+ 模型 ID 列表
69
+ """
70
+ models = await self.list()
71
+ return [m.id for m in models.data]