coze-coding-utils 0.2.1__py3-none-any.whl → 0.2.2a1__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (36) hide show
  1. coze_coding_utils/__init__.py +1 -1
  2. coze_coding_utils/error/__init__.py +31 -0
  3. coze_coding_utils/error/classifier.py +320 -0
  4. coze_coding_utils/error/codes.py +356 -0
  5. coze_coding_utils/error/exceptions.py +439 -0
  6. coze_coding_utils/error/patterns.py +939 -0
  7. coze_coding_utils/error/test_classifier.py +0 -0
  8. coze_coding_utils/file/__init__.py +0 -0
  9. coze_coding_utils/file/file.py +327 -0
  10. coze_coding_utils/helper/__init__.py +0 -0
  11. coze_coding_utils/helper/agent_helper.py +599 -0
  12. coze_coding_utils/helper/graph_helper.py +231 -0
  13. coze_coding_utils/log/__init__.py +0 -0
  14. coze_coding_utils/log/common.py +8 -0
  15. coze_coding_utils/log/config.py +10 -0
  16. coze_coding_utils/log/err_trace.py +88 -0
  17. coze_coding_utils/log/loop_trace.py +72 -0
  18. coze_coding_utils/log/node_log.py +487 -0
  19. coze_coding_utils/log/parser.py +255 -0
  20. coze_coding_utils/log/write_log.py +183 -0
  21. coze_coding_utils/messages/__init__.py +0 -0
  22. coze_coding_utils/messages/client.py +48 -0
  23. coze_coding_utils/messages/server.py +173 -0
  24. coze_coding_utils/openai/__init__.py +5 -0
  25. coze_coding_utils/openai/converter/__init__.py +6 -0
  26. coze_coding_utils/openai/converter/request_converter.py +165 -0
  27. coze_coding_utils/openai/converter/response_converter.py +467 -0
  28. coze_coding_utils/openai/handler.py +298 -0
  29. coze_coding_utils/openai/types/__init__.py +37 -0
  30. coze_coding_utils/openai/types/request.py +24 -0
  31. coze_coding_utils/openai/types/response.py +178 -0
  32. {coze_coding_utils-0.2.1.dist-info → coze_coding_utils-0.2.2a1.dist-info}/METADATA +2 -2
  33. coze_coding_utils-0.2.2a1.dist-info/RECORD +37 -0
  34. coze_coding_utils-0.2.1.dist-info/RECORD +0 -7
  35. {coze_coding_utils-0.2.1.dist-info → coze_coding_utils-0.2.2a1.dist-info}/WHEEL +0 -0
  36. {coze_coding_utils-0.2.1.dist-info → coze_coding_utils-0.2.2a1.dist-info}/licenses/LICENSE +0 -0
@@ -0,0 +1,298 @@
1
+ """OpenAI Chat Completions 处理器"""
2
+
3
+ import asyncio
4
+ import logging
5
+ import threading
6
+ import contextvars
7
+ from typing import Dict, Any, Union, AsyncGenerator
8
+
9
+ from fastapi.responses import StreamingResponse, JSONResponse
10
+
11
+ from coze_coding_utils.runtime_ctx.context import Context
12
+ from coze_coding_utils.openai.types.response import OpenAIError, OpenAIErrorResponse
13
+ from coze_coding_utils.openai.converter.request_converter import RequestConverter
14
+ from coze_coding_utils.openai.converter.response_converter import ResponseConverter
15
+ from coze_coding_utils.error import classify_error
16
+
17
+ logger = logging.getLogger(__name__)
18
+
19
+
20
+ class OpenAIChatHandler:
21
+ """OpenAI Chat Completions 处理器"""
22
+
23
+ def __init__(self, graph_service: Any):
24
+ """
25
+ 初始化处理器
26
+
27
+ Args:
28
+ graph_service: GraphService 实例
29
+ """
30
+ self.graph_service = graph_service
31
+ self.request_converter = RequestConverter()
32
+
33
+ async def handle(
34
+ self,
35
+ payload: Dict[str, Any],
36
+ ctx: Context,
37
+ ) -> Union[StreamingResponse, JSONResponse]:
38
+ """
39
+ 处理请求,根据 stream 参数返回流式或非流式响应
40
+
41
+ Args:
42
+ payload: 请求体
43
+ ctx: 上下文
44
+
45
+ Returns:
46
+ StreamingResponse 或 JSONResponse
47
+ """
48
+ try:
49
+ # 1. 解析请求
50
+ request = self.request_converter.parse(payload)
51
+ session_id = self.request_converter.get_session_id(request)
52
+
53
+ if not session_id:
54
+ return self._error_response(
55
+ message="session_id is required",
56
+ error_type="invalid_request_error",
57
+ code="400001",
58
+ status_code=400,
59
+ )
60
+
61
+ # 2. 初始化响应转换器
62
+ response_converter = ResponseConverter(
63
+ request_id=f"chatcmpl-{ctx.run_id}",
64
+ model=request.model,
65
+ )
66
+
67
+ # 3. 转换为 LangGraph 输入
68
+ stream_input = self.request_converter.to_stream_input(request)
69
+
70
+ if not stream_input.get("messages"):
71
+ return self._error_response(
72
+ message="No user message found",
73
+ error_type="invalid_request_error",
74
+ code="400002",
75
+ status_code=400,
76
+ )
77
+
78
+ # 4. 根据 stream 参数处理
79
+ if request.stream:
80
+ return self._handle_stream(
81
+ stream_input,
82
+ session_id,
83
+ response_converter,
84
+ ctx,
85
+ )
86
+ else:
87
+ return await self._handle_non_stream(
88
+ stream_input,
89
+ session_id,
90
+ response_converter,
91
+ ctx,
92
+ )
93
+
94
+ except Exception as e:
95
+ logger.error(f"Error in OpenAIChatHandler.handle: {e}", exc_info=True)
96
+ return self._handle_error(e)
97
+
98
+ def _handle_stream(
99
+ self,
100
+ stream_input: Dict[str, Any],
101
+ session_id: str,
102
+ response_converter: ResponseConverter,
103
+ ctx: Context,
104
+ ) -> StreamingResponse:
105
+ """流式响应处理"""
106
+
107
+ async def stream_generator() -> AsyncGenerator[str, None]:
108
+ """异步流式生成器"""
109
+ loop = asyncio.get_running_loop()
110
+ queue: asyncio.Queue = asyncio.Queue()
111
+ context = contextvars.copy_context()
112
+
113
+ def producer():
114
+ """后台线程生产者"""
115
+ try:
116
+ # 获取 graph 并配置
117
+ from utils.helper import graph_helper
118
+ graph = self.graph_service._get_graph(ctx)
119
+
120
+ if graph_helper.is_agent_proj():
121
+ from utils.log.loop_trace import init_agent_config
122
+ run_config = init_agent_config(graph, ctx)
123
+ else:
124
+ from utils.log.loop_trace import init_run_config
125
+ run_config = init_run_config(graph, ctx)
126
+
127
+ run_config["recursion_limit"] = 100
128
+ run_config["configurable"] = {"thread_id": session_id}
129
+
130
+ # 流式执行 - 直接使用 LangGraph 原始流
131
+ items = graph.stream(
132
+ stream_input,
133
+ stream_mode="messages",
134
+ config=run_config,
135
+ context=ctx,
136
+ )
137
+
138
+ # 使用 iter_langgraph_stream 方法,支持工具参数流式输出
139
+ for sse_data in response_converter.iter_langgraph_stream(items):
140
+ if sse_data != "data: [DONE]\n\n": # 不在这里发送 DONE
141
+ loop.call_soon_threadsafe(queue.put_nowait, sse_data)
142
+
143
+ except Exception as ex:
144
+ logger.error(f"Stream producer error: {ex}", exc_info=True)
145
+ err = classify_error(ex, {"node_name": "openai_stream"})
146
+ error_chunk = self._create_error_sse_chunk(
147
+ str(err.code),
148
+ str(ex),
149
+ response_converter.request_id,
150
+ )
151
+ loop.call_soon_threadsafe(queue.put_nowait, error_chunk)
152
+ finally:
153
+ loop.call_soon_threadsafe(queue.put_nowait, "data: [DONE]\n\n")
154
+ loop.call_soon_threadsafe(queue.put_nowait, None)
155
+
156
+ # 启动后台线程
157
+ threading.Thread(target=lambda: context.run(producer), daemon=True).start()
158
+
159
+ # 从队列消费
160
+ try:
161
+ while True:
162
+ item = await queue.get()
163
+ if item is None:
164
+ break
165
+ yield item
166
+ except asyncio.CancelledError:
167
+ logger.info(f"Stream cancelled for run_id: {ctx.run_id}")
168
+ raise
169
+
170
+ return StreamingResponse(
171
+ stream_generator(),
172
+ media_type="text/event-stream",
173
+ )
174
+
175
+ async def _handle_non_stream(
176
+ self,
177
+ stream_input: Dict[str, Any],
178
+ session_id: str,
179
+ response_converter: ResponseConverter,
180
+ ctx: Context,
181
+ ) -> JSONResponse:
182
+ """非流式响应处理"""
183
+ loop = asyncio.get_running_loop()
184
+ context = contextvars.copy_context()
185
+ result_future: asyncio.Future = loop.create_future()
186
+
187
+ def producer():
188
+ """后台线程生产者"""
189
+ try:
190
+ # 获取 graph 并配置
191
+ from utils.helper import graph_helper
192
+ graph = self.graph_service._get_graph(ctx)
193
+
194
+ if graph_helper.is_agent_proj():
195
+ from utils.log.loop_trace import init_agent_config
196
+ run_config = init_agent_config(graph, ctx)
197
+ else:
198
+ from utils.log.loop_trace import init_run_config
199
+ run_config = init_run_config(graph, ctx)
200
+
201
+ run_config["recursion_limit"] = 100
202
+ run_config["configurable"] = {"thread_id": session_id}
203
+
204
+ # 流式执行 - 直接使用 LangGraph 原始流
205
+ items = graph.stream(
206
+ stream_input,
207
+ stream_mode="messages",
208
+ config=run_config,
209
+ context=ctx,
210
+ )
211
+
212
+ # 使用 collect_langgraph_to_response 方法收集结果
213
+ response = response_converter.collect_langgraph_to_response(items)
214
+ loop.call_soon_threadsafe(
215
+ result_future.set_result,
216
+ response.to_dict()
217
+ )
218
+
219
+ except Exception as ex:
220
+ logger.error(f"Non-stream producer error: {ex}", exc_info=True)
221
+ loop.call_soon_threadsafe(
222
+ result_future.set_exception,
223
+ ex
224
+ )
225
+
226
+ # 启动后台线程
227
+ threading.Thread(target=lambda: context.run(producer), daemon=True).start()
228
+
229
+ try:
230
+ result = await result_future
231
+ return JSONResponse(content=result)
232
+ except Exception as e:
233
+ return self._handle_error(e)
234
+
235
+ def _handle_error(self, error: Exception) -> JSONResponse:
236
+ """错误处理,返回 OpenAI 标准错误格式"""
237
+ err = classify_error(error, {"node_name": "openai_handler"})
238
+
239
+ error_type = "internal_error"
240
+ status_code = 500
241
+
242
+ # 根据错误类型映射
243
+ error_category = err.category.name if err.category else "System"
244
+ if error_category in ("Invalid", "BadRequest"):
245
+ error_type = "invalid_request_error"
246
+ status_code = 400
247
+ elif error_category == "TimeOut":
248
+ error_type = "timeout_error"
249
+ status_code = 408
250
+ elif error_category == "NotFound":
251
+ error_type = "not_found_error"
252
+ status_code = 404
253
+
254
+ return self._error_response(
255
+ message=str(error),
256
+ error_type=error_type,
257
+ code=str(err.code),
258
+ status_code=status_code,
259
+ )
260
+
261
+ @staticmethod
262
+ def _error_response(
263
+ message: str,
264
+ error_type: str,
265
+ code: str,
266
+ status_code: int = 500,
267
+ ) -> JSONResponse:
268
+ """创建错误响应"""
269
+ error_resp = OpenAIErrorResponse(
270
+ error=OpenAIError(
271
+ message=message,
272
+ type=error_type,
273
+ code=code,
274
+ )
275
+ )
276
+ return JSONResponse(
277
+ content=error_resp.to_dict(),
278
+ status_code=status_code,
279
+ )
280
+
281
+ @staticmethod
282
+ def _create_error_sse_chunk(
283
+ code: str,
284
+ message: str,
285
+ request_id: str,
286
+ ) -> str:
287
+ """创建错误 SSE chunk"""
288
+ import json
289
+ error_data = {
290
+ "id": request_id,
291
+ "object": "chat.completion.chunk",
292
+ "error": {
293
+ "message": message,
294
+ "type": "internal_error",
295
+ "code": code,
296
+ }
297
+ }
298
+ return f"data: {json.dumps(error_data, ensure_ascii=False)}\n\n"
@@ -0,0 +1,37 @@
1
+ """OpenAI 类型定义"""
2
+
3
+ from coze_coding_utils.openai.types.request import (
4
+ ChatMessage,
5
+ ChatCompletionRequest,
6
+ )
7
+ from coze_coding_utils.openai.types.response import (
8
+ ToolCallFunction,
9
+ ToolCallChunk,
10
+ Delta,
11
+ ChunkChoice,
12
+ ChatCompletionChunk,
13
+ Usage,
14
+ Message,
15
+ Choice,
16
+ ChatCompletionResponse,
17
+ OpenAIError,
18
+ OpenAIErrorResponse,
19
+ )
20
+
21
+ __all__ = [
22
+ # Request types
23
+ "ChatMessage",
24
+ "ChatCompletionRequest",
25
+ # Response types
26
+ "ToolCallFunction",
27
+ "ToolCallChunk",
28
+ "Delta",
29
+ "ChunkChoice",
30
+ "ChatCompletionChunk",
31
+ "Usage",
32
+ "Message",
33
+ "Choice",
34
+ "ChatCompletionResponse",
35
+ "OpenAIError",
36
+ "OpenAIErrorResponse",
37
+ ]
@@ -0,0 +1,24 @@
1
+ """OpenAI Chat Completions API 请求类型定义"""
2
+
3
+ from dataclasses import dataclass, field
4
+ from typing import List, Optional, Union, Dict, Any
5
+
6
+
7
+ @dataclass
8
+ class ChatMessage:
9
+ """聊天消息"""
10
+ role: str # "user" | "assistant" | "tool" | "system"
11
+ content: Union[str, List[Dict[str, Any]], None] = None # 支持多模态
12
+ tool_calls: Optional[List[Dict[str, Any]]] = None # 工具调用 (assistant)
13
+ tool_call_id: Optional[str] = None # 工具响应关联ID (tool)
14
+
15
+
16
+ @dataclass
17
+ class ChatCompletionRequest:
18
+ """聊天完成请求"""
19
+ messages: List[ChatMessage] = field(default_factory=list)
20
+ model: str = "default"
21
+ stream: bool = False
22
+ session_id: str = "" # 扩展字段:会话 ID
23
+ temperature: Optional[float] = None
24
+ max_tokens: Optional[int] = None
@@ -0,0 +1,178 @@
1
+ """OpenAI Chat Completions API 响应类型定义"""
2
+
3
+ from dataclasses import dataclass, field, asdict
4
+ from typing import List, Optional, Dict, Any
5
+
6
+
7
+ @dataclass
8
+ class ToolCallFunction:
9
+ """工具调用函数"""
10
+ name: str = ""
11
+ arguments: str = ""
12
+
13
+ def to_dict(self) -> Dict[str, Any]:
14
+ return {"name": self.name, "arguments": self.arguments}
15
+
16
+
17
+ @dataclass
18
+ class ToolCallChunk:
19
+ """工具调用增量 (流式)"""
20
+ index: int = 0
21
+ id: Optional[str] = None
22
+ type: str = "function"
23
+ function: Optional[ToolCallFunction] = None
24
+
25
+ def to_dict(self) -> Dict[str, Any]:
26
+ result: Dict[str, Any] = {"index": self.index, "type": self.type}
27
+ if self.id is not None:
28
+ result["id"] = self.id
29
+ if self.function is not None:
30
+ result["function"] = self.function.to_dict()
31
+ return result
32
+
33
+
34
+ @dataclass
35
+ class Delta:
36
+ """流式响应增量"""
37
+ role: Optional[str] = None # "assistant" | "tool"
38
+ content: Optional[str] = None
39
+ tool_calls: Optional[List[ToolCallChunk]] = None
40
+ tool_call_id: Optional[str] = None # role=tool 时使用
41
+
42
+ def to_dict(self) -> Dict[str, Any]:
43
+ result: Dict[str, Any] = {}
44
+ if self.role is not None:
45
+ result["role"] = self.role
46
+ if self.content is not None:
47
+ result["content"] = self.content
48
+ if self.tool_calls is not None:
49
+ result["tool_calls"] = [tc.to_dict() for tc in self.tool_calls]
50
+ if self.tool_call_id is not None:
51
+ result["tool_call_id"] = self.tool_call_id
52
+ return result
53
+
54
+
55
+ @dataclass
56
+ class ChunkChoice:
57
+ """流式响应选项"""
58
+ index: int = 0
59
+ delta: Delta = field(default_factory=Delta)
60
+ finish_reason: Optional[str] = None
61
+
62
+ def to_dict(self) -> Dict[str, Any]:
63
+ result: Dict[str, Any] = {
64
+ "index": self.index,
65
+ "delta": self.delta.to_dict(),
66
+ }
67
+ # finish_reason 需要显式设置为 null
68
+ result["finish_reason"] = self.finish_reason
69
+ return result
70
+
71
+
72
+ @dataclass
73
+ class ChatCompletionChunk:
74
+ """流式响应 chunk"""
75
+ id: str = ""
76
+ object: str = "chat.completion.chunk"
77
+ created: int = 0
78
+ model: str = "default"
79
+ choices: List[ChunkChoice] = field(default_factory=list)
80
+
81
+ def to_dict(self) -> Dict[str, Any]:
82
+ return {
83
+ "id": self.id,
84
+ "object": self.object,
85
+ "created": self.created,
86
+ "model": self.model,
87
+ "choices": [c.to_dict() for c in self.choices],
88
+ }
89
+
90
+
91
+ @dataclass
92
+ class Usage:
93
+ """Token 使用情况"""
94
+ prompt_tokens: int = 0
95
+ completion_tokens: int = 0
96
+ total_tokens: int = 0
97
+
98
+ def to_dict(self) -> Dict[str, Any]:
99
+ return asdict(self)
100
+
101
+
102
+ @dataclass
103
+ class Message:
104
+ """非流式响应消息"""
105
+ role: str = "assistant"
106
+ content: Optional[str] = None
107
+ tool_calls: Optional[List[Dict[str, Any]]] = None
108
+ tool_call_id: Optional[str] = None
109
+
110
+ def to_dict(self) -> Dict[str, Any]:
111
+ result: Dict[str, Any] = {"role": self.role}
112
+ if self.content is not None:
113
+ result["content"] = self.content
114
+ if self.tool_calls is not None:
115
+ result["tool_calls"] = self.tool_calls
116
+ if self.tool_call_id is not None:
117
+ result["tool_call_id"] = self.tool_call_id
118
+ return result
119
+
120
+
121
+ @dataclass
122
+ class Choice:
123
+ """非流式响应选项"""
124
+ index: int = 0
125
+ message: Message = field(default_factory=Message)
126
+ finish_reason: str = "stop"
127
+
128
+ def to_dict(self) -> Dict[str, Any]:
129
+ return {
130
+ "index": self.index,
131
+ "message": self.message.to_dict(),
132
+ "finish_reason": self.finish_reason,
133
+ }
134
+
135
+
136
+ @dataclass
137
+ class ChatCompletionResponse:
138
+ """非流式响应"""
139
+ id: str = ""
140
+ object: str = "chat.completion"
141
+ created: int = 0
142
+ model: str = "default"
143
+ choices: List[Choice] = field(default_factory=list)
144
+ usage: Usage = field(default_factory=Usage)
145
+
146
+ def to_dict(self) -> Dict[str, Any]:
147
+ return {
148
+ "id": self.id,
149
+ "object": self.object,
150
+ "created": self.created,
151
+ "model": self.model,
152
+ "choices": [c.to_dict() for c in self.choices],
153
+ "usage": self.usage.to_dict(),
154
+ }
155
+
156
+
157
+ @dataclass
158
+ class OpenAIError:
159
+ """OpenAI 错误"""
160
+ message: str = ""
161
+ type: str = "internal_error"
162
+ code: str = ""
163
+
164
+ def to_dict(self) -> Dict[str, Any]:
165
+ return {
166
+ "message": self.message,
167
+ "type": self.type,
168
+ "code": self.code,
169
+ }
170
+
171
+
172
+ @dataclass
173
+ class OpenAIErrorResponse:
174
+ """OpenAI 错误响应"""
175
+ error: OpenAIError = field(default_factory=OpenAIError)
176
+
177
+ def to_dict(self) -> Dict[str, Any]:
178
+ return {"error": self.error.to_dict()}
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.4
2
2
  Name: coze-coding-utils
3
- Version: 0.2.1
3
+ Version: 0.2.2a1
4
4
  Summary: Utilities for Coze coding client runtime context and helpers.
5
5
  Project-URL: Homepage, https://code.byted.org/stone/coze-coding-client
6
6
  Author: Bytedance Stone Team
@@ -39,4 +39,4 @@ Requires Python 3.10+.
39
39
 
40
40
  ## License
41
41
 
42
- MIT
42
+ MIT
@@ -0,0 +1,37 @@
1
+ coze_coding_utils/__init__.py,sha256=OIMKOQLy07Uo5wQkLw3D7j6qRKt4o-smdW-dndYhpHo,37
2
+ coze_coding_utils/error/__init__.py,sha256=SbhsopZ8ZQsbXKZ-GPsw3Fq8AQAOC8W6bZgUZhIOw_k,886
3
+ coze_coding_utils/error/classifier.py,sha256=uXVmufL_sn4w7oNyvrEFXSI_8mCi4mXY353UK5d-d0Y,10028
4
+ coze_coding_utils/error/codes.py,sha256=34sC528UndVa96q0B2_BpvD-PVPPyZwL3wuVErxzx2U,17028
5
+ coze_coding_utils/error/exceptions.py,sha256=QjGk56ovGG-2V4gHcTeJq3-3ZIQQ8DF692zgIYcEJxI,17074
6
+ coze_coding_utils/error/patterns.py,sha256=YhhBcCoWQuvSbtA271eS3AB81pChypD7nDrPidQDu0s,44412
7
+ coze_coding_utils/error/test_classifier.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
8
+ coze_coding_utils/file/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
9
+ coze_coding_utils/file/file.py,sha256=CFhnxHpmh2aPeMOq3PwSsZFsIiQ-YigYxgYvzJAfx_0,11828
10
+ coze_coding_utils/helper/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
11
+ coze_coding_utils/helper/agent_helper.py,sha256=q1ZM30xLXoW-m0NJmJ_Y0M-kUAQCBstG_j7xkqsyRSU,22546
12
+ coze_coding_utils/helper/graph_helper.py,sha256=UNtqqiQNAQ4319qcC1vHiLYIL2eGzvGQRgXu3mgLq8Y,8893
13
+ coze_coding_utils/log/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
14
+ coze_coding_utils/log/common.py,sha256=mUNkCm68oaPaI6-a5UwLf87AfhrMnVPkEuri16guqKc,168
15
+ coze_coding_utils/log/config.py,sha256=Qkw3JRuGUKJ6CBY7WqHJOFeyCU47cArvUtMsSBifFMo,195
16
+ coze_coding_utils/log/err_trace.py,sha256=iwt5g8-AX0N2KuUpuLXjk5PocL6NZdAGddJh27Sxp_o,2954
17
+ coze_coding_utils/log/loop_trace.py,sha256=68sI1AHKd8oeLti-7trBygTZqIiUu_TcHt93DLItkXQ,2129
18
+ coze_coding_utils/log/node_log.py,sha256=AAreNJJc8Lx1QuCIwvss2ty9z5cDuHEzMobA4ZYSYBI,18040
19
+ coze_coding_utils/log/parser.py,sha256=XuzOkO8SzTbBP_XL1RMFzjqG4IG0iSRStAA2M6latt8,9728
20
+ coze_coding_utils/log/write_log.py,sha256=lDu4_Tjk6eYtWBXI-OOYLqe5ejqTtGnDcrH6M7fMQ2A,7223
21
+ coze_coding_utils/messages/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
22
+ coze_coding_utils/messages/client.py,sha256=b-pfTeQXbXx_66ut9gSZY4SM_tP8gvPb5HRblcDnKV0,1249
23
+ coze_coding_utils/messages/server.py,sha256=AQtiKWqp3A1iYylIgqUMhRNimDHJZXQGEPcir7pe1k8,5397
24
+ coze_coding_utils/openai/__init__.py,sha256=3nrbbDovMXGDgSFRnJXml6cRHAdlm2MlZA6jaXixcJ4,141
25
+ coze_coding_utils/openai/handler.py,sha256=IBrRG6JyqMAlTG6bmKnJsNMlI2E0uYmHLm0ZCL4Hbsg,10325
26
+ coze_coding_utils/openai/converter/__init__.py,sha256=UC6O3vfQItCFf2ur08ZPuqT8NK1gQQpC3pTkITBzc9M,243
27
+ coze_coding_utils/openai/converter/request_converter.py,sha256=P0tvLu7_nFoovHyHrJgkPBx7mQI7cmprvnAleF8pfLs,5838
28
+ coze_coding_utils/openai/converter/response_converter.py,sha256=atUwAa-sYaUpp-y-JUwRCw_QOnm0js0-S1JBO9Ucnco,18378
29
+ coze_coding_utils/openai/types/__init__.py,sha256=sFBXOYmTABfAXYTmhBLJ8ZLs-xUoWJJ97i8eFx08x3Y,708
30
+ coze_coding_utils/openai/types/request.py,sha256=IuNMT2Ce1--_32R30Q2q7Lb2dAwKNy3hZc4AguDeD9E,805
31
+ coze_coding_utils/openai/types/response.py,sha256=pjHHVR8LSMVFCc3fGzKqXrdoKDIfSCJEfICd_X9Nohc,4808
32
+ coze_coding_utils/runtime_ctx/__init__.py,sha256=4W8VliAYUP1KY2gLJ_YDy2TmcXYVm-PY7XikQD_bFwA,2
33
+ coze_coding_utils/runtime_ctx/context.py,sha256=G8ld-WnQ1pTJe5OOXC_dTbagXj9IxmpRiPM4X_jWW6o,3992
34
+ coze_coding_utils-0.2.2a1.dist-info/METADATA,sha256=v1GSxZ6VbVzMuIQfLHE5n37z1iYG4UJnKJ3BTCV-lag,979
35
+ coze_coding_utils-0.2.2a1.dist-info/WHEEL,sha256=WLgqFyCfm_KASv4WHyYy0P3pM_m7J5L9k2skdKLirC8,87
36
+ coze_coding_utils-0.2.2a1.dist-info/licenses/LICENSE,sha256=lzckZhAjHlpSJcWvppoST095IHFpBwKiB2pKcBv7vP4,1078
37
+ coze_coding_utils-0.2.2a1.dist-info/RECORD,,
@@ -1,7 +0,0 @@
1
- coze_coding_utils/__init__.py,sha256=dQtSrS3n4kQxFmK2BrA9n_e5r6YJEMGZ3enaYv-VLeg,36
2
- coze_coding_utils/runtime_ctx/__init__.py,sha256=4W8VliAYUP1KY2gLJ_YDy2TmcXYVm-PY7XikQD_bFwA,2
3
- coze_coding_utils/runtime_ctx/context.py,sha256=G8ld-WnQ1pTJe5OOXC_dTbagXj9IxmpRiPM4X_jWW6o,3992
4
- coze_coding_utils-0.2.1.dist-info/METADATA,sha256=PaeCjKPUN4nvLGpUYFo0BgyBmHbK9nhozzWxv2Efs-0,978
5
- coze_coding_utils-0.2.1.dist-info/WHEEL,sha256=WLgqFyCfm_KASv4WHyYy0P3pM_m7J5L9k2skdKLirC8,87
6
- coze_coding_utils-0.2.1.dist-info/licenses/LICENSE,sha256=lzckZhAjHlpSJcWvppoST095IHFpBwKiB2pKcBv7vP4,1078
7
- coze_coding_utils-0.2.1.dist-info/RECORD,,