auto-coder 0.1.311__py3-none-any.whl → 0.1.312__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of auto-coder might be problematic. Click here for more details.

@@ -7,7 +7,11 @@ import os
7
7
  import sys
8
8
  import re
9
9
  import byzerllm
10
+ from byzerllm import MetaHolder
11
+ import time
10
12
  from loguru import logger as global_logger
13
+ from autocoder.common.token_cost_caculate import TokenCostCalculator, TokenUsageStats
14
+ from autocoder.common import AutoCoderArgs
11
15
 
12
16
  class ActivePackage:
13
17
  """
@@ -18,20 +22,25 @@ class ActivePackage:
18
22
  然后基于现有信息和新信息一起生成更新后的文档。
19
23
  """
20
24
 
21
- def __init__(self, llm: byzerllm.ByzerLLM):
25
+ def __init__(self, llm: byzerllm.ByzerLLM, product_mode: str = "lite"):
22
26
  """
23
27
  初始化活动包生成器
24
28
 
25
29
  Args:
26
30
  llm: ByzerLLM实例,用于生成文档内容
31
+ product_mode: 产品模式,用于获取模型价格信息
27
32
  """
28
33
  self.llm = llm
34
+ self.product_mode = product_mode
29
35
  # 创建专用的 logger 实例
30
36
  self.logger = global_logger.bind(name="ActivePackage")
31
-
37
+ # 创建Token计费计算器
38
+ self.token_calculator = TokenCostCalculator(logger_name="ActivePackage.TokenCost")
39
+
32
40
  def generate_active_file(self, context: Dict[str, Any], query: str,
33
41
  existing_file_path: Optional[str] = None,
34
- file_changes: Optional[Dict[str, Tuple[str, str]]] = None) -> str:
42
+ file_changes: Optional[Dict[str, Tuple[str, str]]] = None,
43
+ args: Optional[AutoCoderArgs] = None) -> Tuple[str, Dict[str, Any]]:
35
44
  """
36
45
  生成完整的活动文件内容
37
46
 
@@ -40,11 +49,20 @@ class ActivePackage:
40
49
  query: 用户查询/需求
41
50
  existing_file_path: 可选的现有文件路径,如果提供,将读取并参考现有内容
42
51
  file_changes: 文件变更字典,键为文件路径,值为(变更前内容, 变更后内容)的元组
52
+ args: AutoCoderArgs实例,包含配置信息
43
53
 
44
54
  Returns:
45
- str: 生成的活动文件内容
55
+ Tuple[str, Dict[str, Any]]: 生成的活动文件内容和token使用及费用信息
46
56
  """
47
57
  try:
58
+ # 初始化token和费用统计
59
+ total_stats = {
60
+ "total_tokens": 0,
61
+ "input_tokens": 0,
62
+ "output_tokens": 0,
63
+ "cost": 0.0
64
+ }
65
+
48
66
  # 检查是否有现有文件
49
67
  existing_content = None
50
68
  if existing_file_path and os.path.exists(existing_file_path):
@@ -61,15 +79,34 @@ class ActivePackage:
61
79
  # 根据是否有现有内容选择不同的生成方式
62
80
  if existing_content:
63
81
  # 有现有内容,使用更新模式
64
- file_content = self.generate_updated_active_file(enhanced_context, query, existing_content)
82
+ file_content, usage_stats = self.generate_updated_active_file(enhanced_context, query, existing_content)
83
+ # 合并token和费用统计
84
+ total_stats["total_tokens"] += usage_stats["total_tokens"]
85
+ total_stats["input_tokens"] += usage_stats["input_tokens"]
86
+ total_stats["output_tokens"] += usage_stats["output_tokens"]
87
+ total_stats["cost"] += usage_stats["cost"]
65
88
  else:
66
89
  # 无现有内容,使用创建模式
67
- file_content = self.generate_new_active_file(enhanced_context, query)
90
+ file_content, usage_stats = self.generate_new_active_file(enhanced_context, query)
91
+ # 合并token和费用统计
92
+ total_stats["total_tokens"] += usage_stats["total_tokens"]
93
+ total_stats["input_tokens"] += usage_stats["input_tokens"]
94
+ total_stats["output_tokens"] += usage_stats["output_tokens"]
95
+ total_stats["cost"] += usage_stats["cost"]
68
96
 
69
- return file_content
97
+ return file_content, total_stats
70
98
  except Exception as e:
71
99
  self.logger.error(f"Error generating active file: {e}")
72
- return f"# 生成文档时出错\n\n错误: {str(e)}"
100
+ # 创建空统计
101
+ empty_stats = {
102
+ "total_tokens": 0,
103
+ "input_tokens": 0,
104
+ "output_tokens": 0,
105
+ "cost": 0.0
106
+ }
107
+ # 返回错误信息和空统计
108
+ dir_name = os.path.basename(context.get('directory_path', '未知目录')) if context else '未知目录'
109
+ return f"# 生成文档时出错 - {dir_name}\n\n错误: {str(e)}", empty_stats
73
110
 
74
111
  def _enhance_context_with_changes(self, context: Dict[str, Any],
75
112
  file_changes: Optional[Dict[str, Tuple[str, str]]]) -> Dict[str, Any]:
@@ -83,6 +120,11 @@ class ActivePackage:
83
120
  Returns:
84
121
  Dict[str, Any]: 增强后的上下文字典
85
122
  """
123
+ # 增加空值检查
124
+ if not context:
125
+ self.logger.warning("调用_enhance_context_with_changes时传入空context")
126
+ return {}
127
+
86
128
  if not file_changes:
87
129
  return context
88
130
 
@@ -132,7 +174,7 @@ class ActivePackage:
132
174
 
133
175
  return enhanced_context
134
176
 
135
- def generate_new_active_file(self, context: Dict[str, Any], query: str) -> str:
177
+ def generate_new_active_file(self, context: Dict[str, Any], query: str) -> Tuple[str, Dict[str, Any]]:
136
178
  """
137
179
  生成全新的活动文件内容
138
180
 
@@ -141,24 +183,79 @@ class ActivePackage:
141
183
  query: 用户查询/需求
142
184
 
143
185
  Returns:
144
- str: 新生成的活动文件内容
186
+ Tuple[str, Dict[str, Any]]: 新生成的活动文件内容和token使用及费用信息
145
187
  """
146
188
  try:
147
189
  # 1. 生成current change部分
148
- current_change = self.generate_current_change.with_llm(self.llm).run(context, query)
190
+ meta_holder_current_change = MetaHolder()
191
+ start_time_current_change = time.monotonic()
192
+ current_change = self.generate_current_change.with_llm(self.llm).with_meta(
193
+ meta_holder_current_change).run(context, query)
194
+ end_time_current_change = time.monotonic()
195
+
196
+ # 使用TokenCostCalculator跟踪token使用情况
197
+ current_change_stats: TokenUsageStats = self.token_calculator.track_token_usage(
198
+ llm=self.llm,
199
+ meta_holder=meta_holder_current_change,
200
+ operation_name="Current Change Generation",
201
+ start_time=start_time_current_change,
202
+ end_time=end_time_current_change,
203
+ product_mode=self.product_mode
204
+ )
205
+
206
+ self.logger.info(f"Current Change Generation - Total tokens: {current_change_stats.total_tokens}, Total cost: ${current_change_stats.total_cost:.6f}")
149
207
 
150
208
  # 2. 生成document部分
151
- document = self.generate_document.with_llm(self.llm).run(context, query)
209
+ meta_holder_document = MetaHolder()
210
+ start_time_document = time.monotonic()
211
+ document = self.generate_document.with_llm(self.llm).with_meta(
212
+ meta_holder_document).run(context, query)
213
+ end_time_document = time.monotonic()
214
+
215
+ # 使用TokenCostCalculator跟踪token使用情况
216
+ document_stats: TokenUsageStats = self.token_calculator.track_token_usage(
217
+ llm=self.llm,
218
+ meta_holder=meta_holder_document,
219
+ operation_name="Document Generation",
220
+ start_time=start_time_document,
221
+ end_time=end_time_document,
222
+ product_mode=self.product_mode
223
+ )
224
+
225
+ self.logger.info(f"Document Generation - Total tokens: {document_stats.total_tokens}, Total cost: ${document_stats.total_cost:.6f}")
226
+
227
+ # 计算总token使用统计
228
+ total_tokens = current_change_stats.total_tokens + document_stats.total_tokens
229
+ input_tokens = current_change_stats.input_tokens + document_stats.input_tokens
230
+ output_tokens = current_change_stats.output_tokens + document_stats.output_tokens
231
+ total_cost = current_change_stats.total_cost + document_stats.total_cost
232
+ self.logger.info(f"Total Usage - Tokens: {total_tokens}, Input: {input_tokens}, Output: {output_tokens}, Cost: ${total_cost:.6f}")
233
+
234
+ # 安全获取目录名称
235
+ dir_name = os.path.basename(context.get('directory_path', '未知目录'))
152
236
 
153
237
  # 3. 组合成完整的活动文件内容
154
- file_content = f"# 活动上下文 - {os.path.basename(context['directory_path'])}\n\n"
238
+ file_content = f"# 活动上下文 - {dir_name}\n\n"
155
239
  file_content += f"## 当前变更\n\n{current_change}\n\n"
156
240
  file_content += f"## 文档\n\n{document}\n"
157
241
 
158
- return file_content
242
+ return file_content, {
243
+ "total_tokens": total_tokens,
244
+ "input_tokens": input_tokens,
245
+ "output_tokens": output_tokens,
246
+ "cost": total_cost
247
+ }
159
248
  except Exception as e:
160
249
  self.logger.error(f"Error generating new active file: {e}")
161
- raise
250
+ # 返回错误信息和空统计
251
+ empty_stats = {
252
+ "total_tokens": 0,
253
+ "input_tokens": 0,
254
+ "output_tokens": 0,
255
+ "cost": 0.0
256
+ }
257
+ err_content = f"# 生成文档时出错 - {os.path.basename(context.get('directory_path', '未知目录'))}\n\n错误: {str(e)}"
258
+ return err_content, empty_stats
162
259
 
163
260
  def extract_sections(self, content: str) -> Tuple[str, str, str]:
164
261
  """
@@ -196,46 +293,90 @@ class ActivePackage:
196
293
  self.logger.error(f"Error extracting sections: {e}")
197
294
  return header, current_change_section, document_section
198
295
 
199
- def generate_updated_active_file(self, context: Dict[str, Any], query: str, existing_content: str) -> str:
296
+ def generate_updated_active_file(self, context: Dict[str, Any], query: str, existing_content: str) -> Tuple[str, Dict[str, Any]]:
200
297
  """
201
298
  基于现有内容生成更新后的活动文件内容
202
299
 
203
300
  Args:
204
301
  context: 目录上下文字典
205
302
  query: 用户查询/需求
206
- existing_content: 现有文件内容
303
+ existing_content: 现有的活动文件内容
207
304
 
208
305
  Returns:
209
- str: 更新后的活动文件内容
306
+ Tuple[str, Dict[str, Any]]: 更新后的活动文件内容和token使用及费用信息
210
307
  """
211
308
  try:
212
- # 1. 从现有内容中提取各个部分
309
+ # 1. 从现有内容中提取各部分
213
310
  header, existing_current_change, existing_document = self.extract_sections(existing_content)
214
311
 
215
- # 2. 分别更新每个部分
216
- updated_current_change = self.update_current_change.with_llm(self.llm).run(
217
- context=context,
218
- query=query,
219
- existing_current_change=existing_current_change
312
+ # 2. 更新current change部分
313
+ meta_holder_current_change = MetaHolder()
314
+ start_time_current_change = time.monotonic()
315
+ updated_current_change = self.update_current_change.with_llm(self.llm).with_meta(
316
+ meta_holder_current_change).run(context, query, existing_current_change)
317
+ end_time_current_change = time.monotonic()
318
+
319
+ # 使用TokenCostCalculator跟踪token使用情况
320
+ update_current_change_stats: TokenUsageStats = self.token_calculator.track_token_usage(
321
+ llm=self.llm,
322
+ meta_holder=meta_holder_current_change,
323
+ operation_name="Update Current Change",
324
+ start_time=start_time_current_change,
325
+ end_time=end_time_current_change,
326
+ product_mode=self.product_mode
220
327
  )
221
328
 
222
- updated_document = self.update_document.with_llm(self.llm).run(
223
- context=context,
224
- query=query,
225
- existing_document=existing_document
329
+ self.logger.info(f"Current Change Update - Total tokens: {update_current_change_stats.total_tokens}, Total cost: ${update_current_change_stats.total_cost:.6f}")
330
+
331
+ # 3. 更新document部分
332
+ meta_holder_document = MetaHolder()
333
+ start_time_document = time.monotonic()
334
+ updated_document = self.update_document.with_llm(self.llm).with_meta(
335
+ meta_holder_document).run(context, query, existing_document)
336
+ end_time_document = time.monotonic()
337
+
338
+ # 使用TokenCostCalculator跟踪token使用情况
339
+ update_document_stats: TokenUsageStats = self.token_calculator.track_token_usage(
340
+ llm=self.llm,
341
+ meta_holder=meta_holder_document,
342
+ operation_name="Update Document",
343
+ start_time=start_time_document,
344
+ end_time=end_time_document,
345
+ product_mode=self.product_mode
226
346
  )
227
347
 
228
- # 3. 组合成更新后的活动文件内容
229
- file_content = f"{header}"
348
+ self.logger.info(f"Document Update - Total tokens: {update_document_stats.total_tokens}, Total cost: ${update_document_stats.total_cost:.6f}")
349
+
350
+ # 计算总token使用统计
351
+ total_tokens = update_current_change_stats.total_tokens + update_document_stats.total_tokens
352
+ input_tokens = update_current_change_stats.input_tokens + update_document_stats.input_tokens
353
+ output_tokens = update_current_change_stats.output_tokens + update_document_stats.output_tokens
354
+ total_cost = update_current_change_stats.total_cost + update_document_stats.total_cost
355
+ self.logger.info(f"Total Usage - Tokens: {total_tokens}, Input: {input_tokens}, Output: {output_tokens}, Cost: ${total_cost:.6f}")
356
+
357
+ # 4. 组合成完整的活动文件内容
358
+ file_content = header
230
359
  file_content += f"## 当前变更\n\n{updated_current_change}\n\n"
231
360
  file_content += f"## 文档\n\n{updated_document}\n"
232
361
 
233
- return file_content
362
+ return file_content, {
363
+ "total_tokens": total_tokens,
364
+ "input_tokens": input_tokens,
365
+ "output_tokens": output_tokens,
366
+ "cost": total_cost
367
+ }
234
368
  except Exception as e:
235
369
  self.logger.error(f"Error updating active file: {e}")
236
- # 如果更新失败,回退到生成新文档
237
- self.logger.info("Falling back to generating new active file")
238
- return self.generate_new_active_file(context, query)
370
+ # 返回错误信息和空统计
371
+ empty_stats = {
372
+ "total_tokens": 0,
373
+ "input_tokens": 0,
374
+ "output_tokens": 0,
375
+ "cost": 0.0
376
+ }
377
+ dir_name = os.path.basename(context.get('directory_path', '未知目录'))
378
+ err_content = f"# 更新文档时出错 - {dir_name}\n\n错误: {str(e)}\n\n## 原始内容\n\n{existing_content}"
379
+ return err_content, empty_stats
239
380
 
240
381
  @byzerllm.prompt()
241
382
  def update_current_change(self, context: Dict[str, Any], query: str, existing_current_change: str) -> str:
autocoder/version.py CHANGED
@@ -1 +1 @@
1
- __version__ = "0.1.311"
1
+ __version__ = "0.1.312"