auto-coder 0.1.362__py3-none-any.whl → 0.1.363__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of auto-coder might be problematic. Click here for more details.
- {auto_coder-0.1.362.dist-info → auto_coder-0.1.363.dist-info}/METADATA +1 -1
- {auto_coder-0.1.362.dist-info → auto_coder-0.1.363.dist-info}/RECORD +46 -18
- autocoder/agent/base_agentic/__init__.py +0 -0
- autocoder/agent/base_agentic/agent_hub.py +169 -0
- autocoder/agent/base_agentic/agentic_lang.py +112 -0
- autocoder/agent/base_agentic/agentic_tool_display.py +180 -0
- autocoder/agent/base_agentic/base_agent.py +1582 -0
- autocoder/agent/base_agentic/default_tools.py +683 -0
- autocoder/agent/base_agentic/test_base_agent.py +82 -0
- autocoder/agent/base_agentic/tool_registry.py +425 -0
- autocoder/agent/base_agentic/tools/__init__.py +12 -0
- autocoder/agent/base_agentic/tools/ask_followup_question_tool_resolver.py +72 -0
- autocoder/agent/base_agentic/tools/attempt_completion_tool_resolver.py +37 -0
- autocoder/agent/base_agentic/tools/base_tool_resolver.py +35 -0
- autocoder/agent/base_agentic/tools/example_tool_resolver.py +46 -0
- autocoder/agent/base_agentic/tools/execute_command_tool_resolver.py +72 -0
- autocoder/agent/base_agentic/tools/list_files_tool_resolver.py +110 -0
- autocoder/agent/base_agentic/tools/plan_mode_respond_tool_resolver.py +35 -0
- autocoder/agent/base_agentic/tools/read_file_tool_resolver.py +54 -0
- autocoder/agent/base_agentic/tools/replace_in_file_tool_resolver.py +156 -0
- autocoder/agent/base_agentic/tools/search_files_tool_resolver.py +134 -0
- autocoder/agent/base_agentic/tools/talk_to_group_tool_resolver.py +96 -0
- autocoder/agent/base_agentic/tools/talk_to_tool_resolver.py +79 -0
- autocoder/agent/base_agentic/tools/use_mcp_tool_resolver.py +44 -0
- autocoder/agent/base_agentic/tools/write_to_file_tool_resolver.py +58 -0
- autocoder/agent/base_agentic/types.py +189 -0
- autocoder/agent/base_agentic/utils.py +100 -0
- autocoder/auto_coder_runner.py +4 -4
- autocoder/chat/conf_command.py +11 -10
- autocoder/common/rulefiles/autocoderrules_utils.py +24 -0
- autocoder/common/save_formatted_log.py +1 -1
- autocoder/common/v2/agent/agentic_edit.py +21 -19
- autocoder/common/v2/agent/agentic_edit_tools/replace_in_file_tool_resolver.py +73 -1
- autocoder/common/v2/agent/agentic_edit_tools/write_to_file_tool_resolver.py +132 -4
- autocoder/common/v2/agent/agentic_edit_types.py +1 -2
- autocoder/common/v2/agent/agentic_tool_display.py +2 -3
- autocoder/rag/long_context_rag.py +424 -397
- autocoder/rag/test_doc_filter.py +393 -0
- autocoder/rag/test_long_context_rag.py +473 -0
- autocoder/rag/test_token_limiter.py +342 -0
- autocoder/shadows/shadow_manager.py +1 -3
- autocoder/version.py +1 -1
- {auto_coder-0.1.362.dist-info → auto_coder-0.1.363.dist-info}/LICENSE +0 -0
- {auto_coder-0.1.362.dist-info → auto_coder-0.1.363.dist-info}/WHEEL +0 -0
- {auto_coder-0.1.362.dist-info → auto_coder-0.1.363.dist-info}/entry_points.txt +0 -0
- {auto_coder-0.1.362.dist-info → auto_coder-0.1.363.dist-info}/top_level.txt +0 -0
|
@@ -0,0 +1,342 @@
|
|
|
1
|
+
import pytest
|
|
2
|
+
import os
|
|
3
|
+
import shutil
|
|
4
|
+
import tempfile
|
|
5
|
+
from loguru import logger
|
|
6
|
+
from pathlib import Path
|
|
7
|
+
import time
|
|
8
|
+
import json
|
|
9
|
+
from typing import Dict, Any, List, Optional
|
|
10
|
+
|
|
11
|
+
# 导入被测模块
|
|
12
|
+
from autocoder.rag.token_limiter import TokenLimiter
|
|
13
|
+
from autocoder.common import AutoCoderArgs, SourceCode
|
|
14
|
+
from autocoder.rag.long_context_rag import RAGStat, RecallStat, ChunkStat, AnswerStat
|
|
15
|
+
from byzerllm.utils.types import SingleOutputMeta
|
|
16
|
+
|
|
17
|
+
# 1. 初始化FileMonitor(必须最先进行)
|
|
18
|
+
@pytest.fixture(scope="function")
|
|
19
|
+
def setup_file_monitor(temp_test_dir):
|
|
20
|
+
"""初始化FileMonitor,必须最先执行"""
|
|
21
|
+
try:
|
|
22
|
+
from autocoder.common.file_monitor.monitor import FileMonitor
|
|
23
|
+
monitor = FileMonitor(temp_test_dir)
|
|
24
|
+
monitor.reset_instance()
|
|
25
|
+
if not monitor.is_running():
|
|
26
|
+
monitor.start()
|
|
27
|
+
logger.info(f"文件监控已启动: {temp_test_dir}")
|
|
28
|
+
else:
|
|
29
|
+
logger.info(f"文件监控已在运行中: {monitor.root_dir}")
|
|
30
|
+
except Exception as e:
|
|
31
|
+
logger.error(f"初始化文件监控出错: {e}")
|
|
32
|
+
|
|
33
|
+
# 2. 加载规则文件
|
|
34
|
+
try:
|
|
35
|
+
from autocoder.common.rulefiles.autocoderrules_utils import get_rules, reset_rules_manager
|
|
36
|
+
reset_rules_manager()
|
|
37
|
+
rules = get_rules(temp_test_dir)
|
|
38
|
+
logger.info(f"已加载规则: {len(rules)} 条")
|
|
39
|
+
except Exception as e:
|
|
40
|
+
logger.error(f"加载规则出错: {e}")
|
|
41
|
+
|
|
42
|
+
return temp_test_dir
|
|
43
|
+
|
|
44
|
+
# Pytest Fixture: 临时测试目录
|
|
45
|
+
@pytest.fixture(scope="function")
|
|
46
|
+
def temp_test_dir():
|
|
47
|
+
"""提供一个临时的、测试后自动清理的目录"""
|
|
48
|
+
temp_dir = tempfile.mkdtemp()
|
|
49
|
+
logger.info(f"创建测试临时目录: {temp_dir}")
|
|
50
|
+
yield temp_dir
|
|
51
|
+
logger.info(f"清理测试临时目录: {temp_dir}")
|
|
52
|
+
shutil.rmtree(temp_dir)
|
|
53
|
+
|
|
54
|
+
# Pytest Fixture: 测试文件结构
|
|
55
|
+
@pytest.fixture(scope="function")
|
|
56
|
+
def test_files(temp_test_dir):
|
|
57
|
+
"""创建测试所需的文件/目录结构"""
|
|
58
|
+
# 创建示例文件
|
|
59
|
+
file_structure = {
|
|
60
|
+
"docs/guide.md": "# TokenLimiter 使用指南\n使用TokenLimiter可以控制文档分块和令牌限制。",
|
|
61
|
+
"docs/api.md": "# API说明\n## 初始化\n```python\nlimiter = TokenLimiter(count_tokens, full_text_limit, segment_limit, buff_limit, llm)\n```",
|
|
62
|
+
"src/example.py": "def add(a, b):\n return a + b\n\ndef subtract(a, b):\n return a - b",
|
|
63
|
+
"src/utils/helpers.py": "def format_text(text):\n return text.strip()\n\ndef count_words(text):\n return len(text.split())",
|
|
64
|
+
".gitignore": "*.log\n__pycache__/\n.cache/",
|
|
65
|
+
".autocoderignore": "*.log\n__pycache__/\n.cache/"
|
|
66
|
+
}
|
|
67
|
+
|
|
68
|
+
for file_path, content in file_structure.items():
|
|
69
|
+
full_path = os.path.join(temp_test_dir, file_path)
|
|
70
|
+
os.makedirs(os.path.dirname(full_path), exist_ok=True)
|
|
71
|
+
with open(full_path, 'w', encoding='utf-8') as f:
|
|
72
|
+
f.write(content)
|
|
73
|
+
|
|
74
|
+
return temp_test_dir
|
|
75
|
+
|
|
76
|
+
# Pytest Fixture: 配置参数
|
|
77
|
+
@pytest.fixture
|
|
78
|
+
def test_args():
|
|
79
|
+
"""创建测试用配置参数"""
|
|
80
|
+
return AutoCoderArgs(
|
|
81
|
+
source_dir=".",
|
|
82
|
+
context_prune=True,
|
|
83
|
+
context_prune_strategy="extract",
|
|
84
|
+
conversation_prune_safe_zone_tokens=400,
|
|
85
|
+
context_prune_sliding_window_size=10,
|
|
86
|
+
context_prune_sliding_window_overlap=2,
|
|
87
|
+
rag_context_window_limit=8000,
|
|
88
|
+
rag_doc_filter_relevance=3,
|
|
89
|
+
full_text_ratio=0.7,
|
|
90
|
+
segment_ratio=0.2,
|
|
91
|
+
index_filter_workers=1,
|
|
92
|
+
required_exts=".py,.md",
|
|
93
|
+
monitor_mode=False,
|
|
94
|
+
enable_hybrid_index=False,
|
|
95
|
+
disable_segment_reorder=False
|
|
96
|
+
)
|
|
97
|
+
|
|
98
|
+
# 3. 加载tokenizer (必须在FileMonitor和rules初始化之后)
|
|
99
|
+
@pytest.fixture
|
|
100
|
+
def load_tokenizer_fixture(setup_file_monitor):
|
|
101
|
+
"""加载tokenizer,必须在FileMonitor和rules初始化之后"""
|
|
102
|
+
from autocoder.auto_coder_runner import load_tokenizer
|
|
103
|
+
load_tokenizer()
|
|
104
|
+
logger.info("Tokenizer加载完成")
|
|
105
|
+
return True
|
|
106
|
+
|
|
107
|
+
# 4. 初始化LLM
|
|
108
|
+
@pytest.fixture
|
|
109
|
+
def real_llm(load_tokenizer_fixture):
|
|
110
|
+
"""创建真实的LLM对象,必须在tokenizer加载之后"""
|
|
111
|
+
from autocoder.utils.llms import get_single_llm
|
|
112
|
+
llm = get_single_llm("v3_chat", product_mode="lite")
|
|
113
|
+
logger.info(f"LLM初始化完成: {llm.default_model_name}")
|
|
114
|
+
return llm
|
|
115
|
+
|
|
116
|
+
# 5. TokenCounter实例
|
|
117
|
+
@pytest.fixture
|
|
118
|
+
def token_counter(load_tokenizer_fixture):
|
|
119
|
+
"""创建TokenCounter实例"""
|
|
120
|
+
from autocoder.rag.token_counter import TokenCounter
|
|
121
|
+
from autocoder.rag.variable_holder import VariableHolder
|
|
122
|
+
|
|
123
|
+
tokenizer_path = None
|
|
124
|
+
if hasattr(VariableHolder, "TOKENIZER_PATH") and VariableHolder.TOKENIZER_PATH:
|
|
125
|
+
tokenizer_path = VariableHolder.TOKENIZER_PATH
|
|
126
|
+
return TokenCounter(tokenizer_path)
|
|
127
|
+
|
|
128
|
+
# 如果没有可用的tokenizer_path,使用RemoteTokenCounter
|
|
129
|
+
from autocoder.rag.token_counter import RemoteTokenCounter
|
|
130
|
+
from byzerllm import ByzerLLM
|
|
131
|
+
|
|
132
|
+
tokenizer_llm = ByzerLLM()
|
|
133
|
+
if tokenizer_llm.is_model_exist("deepseek_tokenizer"):
|
|
134
|
+
tokenizer_llm.setup_default_model_name("deepseek_tokenizer")
|
|
135
|
+
return RemoteTokenCounter(tokenizer_llm)
|
|
136
|
+
|
|
137
|
+
pytest.skip("没有可用的tokenizer,跳过测试")
|
|
138
|
+
|
|
139
|
+
# 6. TokenLimiter实例
|
|
140
|
+
@pytest.fixture
|
|
141
|
+
def token_limiter(real_llm, token_counter, test_args):
|
|
142
|
+
"""创建TokenLimiter实例"""
|
|
143
|
+
from autocoder.rag.token_limiter import TokenLimiter
|
|
144
|
+
|
|
145
|
+
full_text_limit = int(test_args.rag_context_window_limit * test_args.full_text_ratio)
|
|
146
|
+
segment_limit = int(test_args.rag_context_window_limit * test_args.segment_ratio)
|
|
147
|
+
buff_limit = int(test_args.rag_context_window_limit * (1 - test_args.full_text_ratio - test_args.segment_ratio))
|
|
148
|
+
|
|
149
|
+
limiter = TokenLimiter(
|
|
150
|
+
count_tokens=token_counter.count_tokens,
|
|
151
|
+
full_text_limit=full_text_limit,
|
|
152
|
+
segment_limit=segment_limit,
|
|
153
|
+
buff_limit=buff_limit,
|
|
154
|
+
llm=real_llm,
|
|
155
|
+
disable_segment_reorder=test_args.disable_segment_reorder
|
|
156
|
+
)
|
|
157
|
+
|
|
158
|
+
return limiter
|
|
159
|
+
|
|
160
|
+
# --- 测试用例 ---
|
|
161
|
+
|
|
162
|
+
def test_limit_tokens_basic(token_limiter, test_files):
|
|
163
|
+
"""测试TokenLimiter的基本功能"""
|
|
164
|
+
# 创建测试文档
|
|
165
|
+
relevant_docs = [
|
|
166
|
+
SourceCode(
|
|
167
|
+
module_name=os.path.join(test_files, "docs/guide.md"),
|
|
168
|
+
source_code="# TokenLimiter 使用指南\n使用TokenLimiter可以控制文档分块和令牌限制。"
|
|
169
|
+
),
|
|
170
|
+
SourceCode(
|
|
171
|
+
module_name=os.path.join(test_files, "docs/api.md"),
|
|
172
|
+
source_code="# API说明\n## 初始化\n```python\nlimiter = TokenLimiter(count_tokens, full_text_limit, segment_limit, buff_limit, llm)\n```"
|
|
173
|
+
)
|
|
174
|
+
]
|
|
175
|
+
|
|
176
|
+
# 创建对话
|
|
177
|
+
conversations = [{"role": "user", "content": "如何使用TokenLimiter进行文档分块?"}]
|
|
178
|
+
|
|
179
|
+
# 执行令牌限制
|
|
180
|
+
result = token_limiter.limit_tokens(
|
|
181
|
+
relevant_docs=relevant_docs,
|
|
182
|
+
conversations=conversations,
|
|
183
|
+
index_filter_workers=1
|
|
184
|
+
)
|
|
185
|
+
|
|
186
|
+
# 验证结果
|
|
187
|
+
assert result is not None, "应该返回结果"
|
|
188
|
+
assert hasattr(result, "docs"), "结果应该包含文档"
|
|
189
|
+
assert len(result.docs) > 0, "应该至少返回一个文档"
|
|
190
|
+
assert hasattr(result, "input_tokens_counts"), "结果应该包含输入token计数"
|
|
191
|
+
assert hasattr(result, "generated_tokens_counts"), "结果应该包含生成token计数"
|
|
192
|
+
|
|
193
|
+
# 检查是否有第一轮全文文档
|
|
194
|
+
assert hasattr(token_limiter, "first_round_full_docs"), "应该有first_round_full_docs属性"
|
|
195
|
+
|
|
196
|
+
# 检查是否有第二轮提取文档
|
|
197
|
+
assert hasattr(token_limiter, "second_round_extracted_docs"), "应该有second_round_extracted_docs属性"
|
|
198
|
+
|
|
199
|
+
# 打印测试结果详情
|
|
200
|
+
logger.info("="*80)
|
|
201
|
+
logger.info("TokenLimiter基本功能测试结果:")
|
|
202
|
+
logger.info("-"*80)
|
|
203
|
+
logger.info(f"输入文档数: {len(relevant_docs)}")
|
|
204
|
+
logger.info(f"输出文档数: {len(result.docs)}")
|
|
205
|
+
logger.info(f"第一轮全文文档数: {len(token_limiter.first_round_full_docs)}")
|
|
206
|
+
logger.info(f"第二轮提取文档数: {len(token_limiter.second_round_extracted_docs)}")
|
|
207
|
+
logger.info(f"输入token计数: {result.input_tokens_counts}")
|
|
208
|
+
logger.info(f"生成token计数: {result.generated_tokens_counts}")
|
|
209
|
+
logger.info("="*80)
|
|
210
|
+
|
|
211
|
+
def test_limit_tokens_with_large_docs(token_limiter, test_files):
|
|
212
|
+
"""测试TokenLimiter处理大文档的能力"""
|
|
213
|
+
# 创建一个大的测试文档
|
|
214
|
+
large_content = "# 大文档测试\n\n" + "这是一个很长的文档。" * 100
|
|
215
|
+
|
|
216
|
+
relevant_docs = [
|
|
217
|
+
SourceCode(
|
|
218
|
+
module_name=os.path.join(test_files, "docs/large_doc.md"),
|
|
219
|
+
source_code=large_content
|
|
220
|
+
),
|
|
221
|
+
SourceCode(
|
|
222
|
+
module_name=os.path.join(test_files, "docs/guide.md"),
|
|
223
|
+
source_code="# TokenLimiter 使用指南\n使用TokenLimiter可以控制文档分块和令牌限制。"
|
|
224
|
+
)
|
|
225
|
+
]
|
|
226
|
+
|
|
227
|
+
# 创建对话
|
|
228
|
+
conversations = [{"role": "user", "content": "如何处理大型文档?"}]
|
|
229
|
+
|
|
230
|
+
# 执行令牌限制
|
|
231
|
+
result = token_limiter.limit_tokens(
|
|
232
|
+
relevant_docs=relevant_docs,
|
|
233
|
+
conversations=conversations,
|
|
234
|
+
index_filter_workers=1
|
|
235
|
+
)
|
|
236
|
+
|
|
237
|
+
# 验证结果
|
|
238
|
+
assert result is not None, "应该返回结果"
|
|
239
|
+
assert len(result.docs) > 0, "应该至少返回一个文档"
|
|
240
|
+
|
|
241
|
+
# 检查所有文档的总令牌数是否低于限制
|
|
242
|
+
total_tokens = sum([
|
|
243
|
+
token_limiter.count_tokens(doc.source_code) for doc in result.docs
|
|
244
|
+
])
|
|
245
|
+
total_limit = token_limiter.full_text_limit + token_limiter.segment_limit + token_limiter.buff_limit
|
|
246
|
+
|
|
247
|
+
assert total_tokens <= total_limit, f"总令牌数({total_tokens})应该不超过限制({total_limit})"
|
|
248
|
+
|
|
249
|
+
# 打印测试结果详情
|
|
250
|
+
logger.info("="*80)
|
|
251
|
+
logger.info("TokenLimiter处理大文档测试结果:")
|
|
252
|
+
logger.info("-"*80)
|
|
253
|
+
logger.info(f"输入文档数: {len(relevant_docs)}")
|
|
254
|
+
logger.info(f"输出文档数: {len(result.docs)}")
|
|
255
|
+
logger.info(f"总令牌数: {total_tokens}")
|
|
256
|
+
logger.info(f"总限制: {total_limit}")
|
|
257
|
+
logger.info(f"第一轮全文文档数: {len(token_limiter.first_round_full_docs)}")
|
|
258
|
+
logger.info(f"第二轮提取文档数: {len(token_limiter.second_round_extracted_docs)}")
|
|
259
|
+
logger.info("="*80)
|
|
260
|
+
|
|
261
|
+
def test_limit_tokens_integration(token_limiter, token_counter, real_llm, test_args, test_files):
|
|
262
|
+
"""集成测试:模拟LongContextRAG中的_process_document_chunking调用"""
|
|
263
|
+
# 创建测试文档
|
|
264
|
+
relevant_docs = [
|
|
265
|
+
SourceCode(
|
|
266
|
+
module_name=os.path.join(test_files, "docs/guide.md"),
|
|
267
|
+
source_code="# TokenLimiter 使用指南\n使用TokenLimiter可以控制文档分块和令牌限制。"
|
|
268
|
+
),
|
|
269
|
+
SourceCode(
|
|
270
|
+
module_name=os.path.join(test_files, "docs/api.md"),
|
|
271
|
+
source_code="# API说明\n## 初始化\n```python\nlimiter = TokenLimiter(count_tokens, full_text_limit, segment_limit, buff_limit, llm)\n```"
|
|
272
|
+
),
|
|
273
|
+
SourceCode(
|
|
274
|
+
module_name=os.path.join(test_files, "src/example.py"),
|
|
275
|
+
source_code="def add(a, b):\n return a + b\n\ndef subtract(a, b):\n return a - b"
|
|
276
|
+
)
|
|
277
|
+
]
|
|
278
|
+
|
|
279
|
+
# 创建对话
|
|
280
|
+
conversations = [{"role": "user", "content": "如何使用TokenLimiter?"}]
|
|
281
|
+
|
|
282
|
+
# 准备RAG统计数据
|
|
283
|
+
rag_stat = RAGStat(
|
|
284
|
+
recall_stat=RecallStat(
|
|
285
|
+
total_input_tokens=10, # 假设已有一些token
|
|
286
|
+
total_generated_tokens=5,
|
|
287
|
+
model_name=real_llm.default_model_name,
|
|
288
|
+
),
|
|
289
|
+
chunk_stat=ChunkStat(
|
|
290
|
+
total_input_tokens=0,
|
|
291
|
+
total_generated_tokens=0,
|
|
292
|
+
model_name=real_llm.default_model_name,
|
|
293
|
+
),
|
|
294
|
+
answer_stat=AnswerStat(
|
|
295
|
+
total_input_tokens=0,
|
|
296
|
+
total_generated_tokens=0,
|
|
297
|
+
model_name=real_llm.default_model_name,
|
|
298
|
+
),
|
|
299
|
+
)
|
|
300
|
+
|
|
301
|
+
# 模拟_process_document_chunking的处理逻辑
|
|
302
|
+
first_round_full_docs = []
|
|
303
|
+
second_round_extracted_docs = []
|
|
304
|
+
sencond_round_time = 0
|
|
305
|
+
|
|
306
|
+
start_time = time.time()
|
|
307
|
+
token_limiter_result = token_limiter.limit_tokens(
|
|
308
|
+
relevant_docs=relevant_docs,
|
|
309
|
+
conversations=conversations,
|
|
310
|
+
index_filter_workers=test_args.index_filter_workers or 1,
|
|
311
|
+
)
|
|
312
|
+
sencond_round_time = time.time() - start_time
|
|
313
|
+
|
|
314
|
+
# 更新统计信息
|
|
315
|
+
rag_stat.chunk_stat.total_input_tokens += sum(token_limiter_result.input_tokens_counts)
|
|
316
|
+
rag_stat.chunk_stat.total_generated_tokens += sum(token_limiter_result.generated_tokens_counts)
|
|
317
|
+
rag_stat.chunk_stat.model_name = token_limiter_result.model_name
|
|
318
|
+
|
|
319
|
+
final_relevant_docs = token_limiter_result.docs
|
|
320
|
+
first_round_full_docs = token_limiter.first_round_full_docs
|
|
321
|
+
second_round_extracted_docs = token_limiter.second_round_extracted_docs
|
|
322
|
+
|
|
323
|
+
# 验证结果
|
|
324
|
+
assert final_relevant_docs is not None, "应该返回处理后的文档"
|
|
325
|
+
assert len(final_relevant_docs) > 0, "应该至少返回一个文档"
|
|
326
|
+
assert rag_stat.chunk_stat.total_input_tokens > 0, "输入token计数应该增加"
|
|
327
|
+
|
|
328
|
+
# 打印测试结果详情
|
|
329
|
+
logger.info("="*80)
|
|
330
|
+
logger.info("TokenLimiter集成测试结果:")
|
|
331
|
+
logger.info("-"*80)
|
|
332
|
+
logger.info(f"处理时间: {sencond_round_time:.4f}秒")
|
|
333
|
+
logger.info(f"输入文档数: {len(relevant_docs)}")
|
|
334
|
+
logger.info(f"输出文档数: {len(final_relevant_docs)}")
|
|
335
|
+
logger.info(f"第一轮全文文档数: {len(first_round_full_docs)}")
|
|
336
|
+
logger.info(f"第二轮提取文档数: {len(second_round_extracted_docs)}")
|
|
337
|
+
logger.info(f"输入token总数: {rag_stat.chunk_stat.total_input_tokens}")
|
|
338
|
+
logger.info(f"生成token总数: {rag_stat.chunk_stat.total_generated_tokens}")
|
|
339
|
+
logger.info("="*80)
|
|
340
|
+
|
|
341
|
+
if __name__ == "__main__":
|
|
342
|
+
pytest.main(["-xvs", "test_token_limiter.py"])
|
autocoder/version.py
CHANGED
|
@@ -1 +1 @@
|
|
|
1
|
-
__version__ = "0.1.
|
|
1
|
+
__version__ = "0.1.363"
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|