auto-coder-web 0.1.80__py3-none-any.whl → 0.1.82__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- auto_coder_web/auto_coder_runner_wrapper.py +1 -2
- auto_coder_web/common_router/model_router.py +23 -12
- auto_coder_web/init_project.py +9 -0
- auto_coder_web/proxy.py +2 -2
- auto_coder_web/routers/auto_router.py +21 -17
- auto_coder_web/routers/chat_router.py +4 -4
- auto_coder_web/routers/coding_router.py +7 -15
- auto_coder_web/routers/direct_chat_router.py +42 -0
- auto_coder_web/version.py +1 -1
- auto_coder_web/web/assets/{main-X9KQjWaa.css → main-B3_hzhoO.css} +2 -2
- auto_coder_web/web/assets/main.js +290 -290
- auto_coder_web/web/index.html +1 -1
- {auto_coder_web-0.1.80.dist-info → auto_coder_web-0.1.82.dist-info}/METADATA +2 -2
- {auto_coder_web-0.1.80.dist-info → auto_coder_web-0.1.82.dist-info}/RECORD +17 -16
- {auto_coder_web-0.1.80.dist-info → auto_coder_web-0.1.82.dist-info}/WHEEL +0 -0
- {auto_coder_web-0.1.80.dist-info → auto_coder_web-0.1.82.dist-info}/entry_points.txt +0 -0
- {auto_coder_web-0.1.80.dist-info → auto_coder_web-0.1.82.dist-info}/top_level.txt +0 -0
@@ -175,7 +175,7 @@ class ModelInfo(BaseModel):
|
|
175
175
|
input_price: float
|
176
176
|
output_price: float
|
177
177
|
is_reasoning: bool
|
178
|
-
|
178
|
+
max_output_tokens: int = 8096
|
179
179
|
class ProviderConfig(BaseModel):
|
180
180
|
name: str
|
181
181
|
base_url: str
|
@@ -234,25 +234,36 @@ def load_providers() -> List[Dict]:
|
|
234
234
|
"is_reasoning": False
|
235
235
|
},
|
236
236
|
{
|
237
|
-
"id": "
|
238
|
-
"name": "
|
239
|
-
"input_price":
|
240
|
-
"output_price":
|
241
|
-
"is_reasoning": False
|
237
|
+
"id": "openai/gpt-4.1",
|
238
|
+
"name": "gpt-4.1",
|
239
|
+
"input_price": 14,
|
240
|
+
"output_price": 42,
|
241
|
+
"is_reasoning": False,
|
242
|
+
"max_output_tokens": 8096*3
|
242
243
|
},
|
243
244
|
{
|
244
|
-
"id": "
|
245
|
-
"name": "
|
246
|
-
"input_price":
|
247
|
-
"output_price":
|
248
|
-
"is_reasoning": False
|
245
|
+
"id": "openai/gpt-4.1-mini",
|
246
|
+
"name": "gpt-4.1-mini",
|
247
|
+
"input_price": 2.8,
|
248
|
+
"output_price": 11.2,
|
249
|
+
"is_reasoning": False,
|
250
|
+
"max_output_tokens": 8096*3
|
251
|
+
},
|
252
|
+
{
|
253
|
+
"id": "openai/gpt-4.1-nano",
|
254
|
+
"name": "gpt-4.1-nano",
|
255
|
+
"input_price": 0.7,
|
256
|
+
"output_price": 2.8,
|
257
|
+
"is_reasoning": False,
|
258
|
+
"max_output_tokens": 8096*3
|
249
259
|
},
|
250
260
|
{
|
251
261
|
"id": "google/gemini-2.5-pro-preview-03-25",
|
252
262
|
"name": "gemini-2.5-pro-preview-03-25",
|
253
263
|
"input_price": 0.0,
|
254
264
|
"output_price": 0.0,
|
255
|
-
"is_reasoning": False
|
265
|
+
"is_reasoning": False,
|
266
|
+
"max_output_tokens": 8096*2
|
256
267
|
}
|
257
268
|
]
|
258
269
|
},
|
auto_coder_web/init_project.py
CHANGED
@@ -20,6 +20,15 @@ def init_project(project_path: str):
|
|
20
20
|
f.write("\n/actions/")
|
21
21
|
f.write("\n/output.txt")
|
22
22
|
|
23
|
+
|
24
|
+
# 生成 .autocoderignore 文件,采用 .gitignore 格式
|
25
|
+
autocoderignore_path = os.path.join(source_dir, ".autocoderignore")
|
26
|
+
autocoderignore_content = (
|
27
|
+
"target\n"
|
28
|
+
)
|
29
|
+
with open(autocoderignore_path, "w", encoding="utf-8") as f:
|
30
|
+
f.write(autocoderignore_content)
|
31
|
+
|
23
32
|
print(
|
24
33
|
f"""Successfully initialized auto-coder project in {os.path.abspath(project_path)}."""
|
25
34
|
)
|
auto_coder_web/proxy.py
CHANGED
@@ -19,7 +19,7 @@ import sys
|
|
19
19
|
from auto_coder_web.terminal import terminal_manager
|
20
20
|
from autocoder.common import AutoCoderArgs
|
21
21
|
from auto_coder_web.auto_coder_runner_wrapper import AutoCoderRunnerWrapper
|
22
|
-
from auto_coder_web.routers import todo_router, settings_router, auto_router, commit_router, chat_router, coding_router, index_router, config_router, upload_router, rag_router, editable_preview_router,mcp_router
|
22
|
+
from auto_coder_web.routers import todo_router, settings_router, auto_router, commit_router, chat_router, coding_router, index_router, config_router, upload_router, rag_router, editable_preview_router, mcp_router, direct_chat_router
|
23
23
|
from auto_coder_web.expert_routers import history_router
|
24
24
|
from auto_coder_web.common_router import completions_router, file_router, auto_coder_conf_router, chat_list_router, file_group_router, model_router, compiler_router
|
25
25
|
from auto_coder_web.common_router import active_context_router
|
@@ -106,7 +106,7 @@ class ProxyServer:
|
|
106
106
|
self.app.include_router(editable_preview_router.router)
|
107
107
|
self.app.include_router(mcp_router.router)
|
108
108
|
self.app.include_router(active_context_router.router)
|
109
|
-
|
109
|
+
self.app.include_router(direct_chat_router.router)
|
110
110
|
|
111
111
|
@self.app.on_event("shutdown")
|
112
112
|
async def shutdown_event():
|
@@ -23,7 +23,7 @@ cancel_thread_pool = ThreadPoolExecutor(max_workers=5)
|
|
23
23
|
|
24
24
|
class AutoCommandRequest(BaseModel):
|
25
25
|
command: str
|
26
|
-
include_conversation_history: bool =
|
26
|
+
include_conversation_history: bool = True
|
27
27
|
buildin_conversation_history: bool = False
|
28
28
|
|
29
29
|
class EventPollRequest(BaseModel):
|
@@ -64,7 +64,7 @@ def ensure_task_dir(project_path: str) -> str:
|
|
64
64
|
@byzerllm.prompt()
|
65
65
|
def coding_prompt(messages: List[Dict[str, Any]], query: str):
|
66
66
|
'''
|
67
|
-
下面是我们已经产生的一个消息列表,其中 USER_RESPONSE
|
67
|
+
下面是我们已经产生的一个消息列表,其中 USER_RESPONSE 表示用户的输入,其他都是你的输出:
|
68
68
|
<messages>
|
69
69
|
{% for message in messages %}
|
70
70
|
<message>
|
@@ -96,7 +96,8 @@ async def auto_command(request: AutoCommandRequest, project_path: str = Depends(
|
|
96
96
|
try:
|
97
97
|
# 创建AutoCoderRunnerWrapper实例,使用从应用上下文获取的项目路径
|
98
98
|
wrapper = AutoCoderRunnerWrapper(project_path)
|
99
|
-
wrapper.configure_wrapper(f"event_file:{event_file}")
|
99
|
+
wrapper.configure_wrapper(f"event_file:{event_file}")
|
100
|
+
global_cancel.register_token(event_file)
|
100
101
|
prompt_text = request.command
|
101
102
|
|
102
103
|
if request.include_conversation_history:
|
@@ -110,33 +111,38 @@ async def auto_command(request: AutoCommandRequest, project_path: str = Depends(
|
|
110
111
|
current_session_name = session_data.get("session_name", "")
|
111
112
|
except Exception as e:
|
112
113
|
logger.error(f"Error reading current session: {str(e)}")
|
114
|
+
logger.exception(e)
|
113
115
|
|
114
116
|
# 获取历史消息
|
115
117
|
messages = []
|
116
118
|
if current_session_name:
|
117
119
|
chat_list_file = os.path.join(project_path, ".auto-coder", "auto-coder.web", "chat-lists", f"{current_session_name}.json")
|
120
|
+
logger.info(f"loading chat history from {chat_list_file}")
|
118
121
|
if os.path.exists(chat_list_file):
|
119
122
|
try:
|
120
|
-
with open(chat_list_file, 'r', encoding="utf-8") as f:
|
123
|
+
with open(chat_list_file, 'r', encoding="utf-8") as f:
|
121
124
|
chat_data = json.load(f)
|
122
125
|
# 从聊天历史中提取消息
|
123
|
-
for msg in chat_data.get("messages", []):
|
124
|
-
#
|
125
|
-
|
126
|
-
|
127
|
-
|
128
|
-
if msg.get("contentType","") in ["token_stat"]:
|
129
|
-
continue
|
126
|
+
for msg in chat_data.get("messages", []):
|
127
|
+
# if msg.get("metadata",{}).get("stream_out_type","") == "/agent/edit":
|
128
|
+
# messages.append(msg)
|
129
|
+
# continue
|
130
130
|
|
131
|
+
# if msg.get("type","") not in ["USER_RESPONSE","RESULT","COMPLETION"]:
|
132
|
+
# continue
|
133
|
+
if msg.get("contentType","") in ["token_stat"]:
|
134
|
+
continue
|
131
135
|
messages.append(msg)
|
132
|
-
except Exception as e:
|
136
|
+
except Exception as e:
|
133
137
|
logger.error(f"Error reading chat history: {str(e)}")
|
138
|
+
logger.exception(e)
|
134
139
|
|
135
140
|
if messages:
|
136
141
|
# 调用coding_prompt生成包含历史消息的提示
|
137
142
|
prompt_text = coding_prompt.prompt(messages, request.command)
|
138
143
|
|
139
|
-
# 调用auto_command_wrapper方法
|
144
|
+
# 调用auto_command_wrapper方法
|
145
|
+
logger.info(f"Executing auto command {file_id} with prompt: {prompt_text}")
|
140
146
|
result = wrapper.auto_command_wrapper(prompt_text, {
|
141
147
|
"event_file_id": file_id
|
142
148
|
})
|
@@ -369,12 +375,10 @@ async def cancel_task(request: CancelTaskRequest, project_path: str = Depends(ge
|
|
369
375
|
"""
|
370
376
|
# 定义在线程中执行的取消任务函数
|
371
377
|
def cancel_task_thread(event_file_id: str, project_path: str):
|
372
|
-
try:
|
373
|
-
# 设置全局取消标志
|
374
|
-
global_cancel.set()
|
375
|
-
|
378
|
+
try:
|
376
379
|
# 获取事件文件路径和事件管理器
|
377
380
|
event_file = get_event_file_path(file_id=event_file_id, project_path=project_path)
|
381
|
+
global_cancel.set(token=event_file)
|
378
382
|
event_manager = get_event_manager(event_file)
|
379
383
|
|
380
384
|
# 向事件流写入取消事件
|
@@ -68,7 +68,8 @@ async def chat_command(request: ChatCommandRequest, project_path: str = Depends(
|
|
68
68
|
try:
|
69
69
|
# 创建AutoCoderRunnerWrapper实例,使用从应用上下文获取的项目路径
|
70
70
|
wrapper = AutoCoderRunnerWrapper(project_path)
|
71
|
-
wrapper.configure_wrapper(f"event_file:{event_file}")
|
71
|
+
wrapper.configure_wrapper(f"event_file:{event_file}")
|
72
|
+
global_cancel.register_token(event_file)
|
72
73
|
|
73
74
|
# 调用chat方法
|
74
75
|
result = wrapper.chat_wrapper(request.command)
|
@@ -304,9 +305,8 @@ async def cancel_task(request: CancelTaskRequest, project_path: str = Depends(ge
|
|
304
305
|
event_file = get_event_file_path(file_id=request.event_file_id, project_path=project_path)
|
305
306
|
|
306
307
|
def cancel_in_thread():
|
307
|
-
try:
|
308
|
-
|
309
|
-
global_cancel.set_cancel(request.event_file_id)
|
308
|
+
try:
|
309
|
+
global_cancel.set(token=event_file)
|
310
310
|
|
311
311
|
# 获取事件管理器
|
312
312
|
event_manager = get_event_manager(event_file)
|
@@ -58,7 +58,7 @@ def ensure_task_dir(project_path: str) -> str:
|
|
58
58
|
@byzerllm.prompt()
|
59
59
|
def coding_prompt(messages: List[Dict[str, Any]], request: CodingCommandRequest):
|
60
60
|
'''
|
61
|
-
下面是我们已经产生的一个消息列表,其中 USER_RESPONSE
|
61
|
+
下面是我们已经产生的一个消息列表,其中 USER_RESPONSE 表示用户的输入,其他都是你的输出:
|
62
62
|
<messages>
|
63
63
|
{% for message in messages %}
|
64
64
|
<message>
|
@@ -89,6 +89,7 @@ async def coding_command(request: CodingCommandRequest, project_path: str = Depe
|
|
89
89
|
# 创建AutoCoderRunnerWrapper实例,使用从应用上下文获取的项目路径
|
90
90
|
wrapper = AutoCoderRunnerWrapper(project_path)
|
91
91
|
wrapper.configure_wrapper(f"event_file:{event_file}")
|
92
|
+
global_cancel.register_token(event_file)
|
92
93
|
|
93
94
|
# 获取当前会话名称
|
94
95
|
current_session_file = os.path.join(project_path, ".auto-coder", "auto-coder.web", "current-session.json")
|
@@ -123,20 +124,12 @@ async def coding_command(request: CodingCommandRequest, project_path: str = Depe
|
|
123
124
|
logger.error(f"Error reading chat history: {str(e)}")
|
124
125
|
|
125
126
|
# 构建提示信息
|
126
|
-
prompt_text =
|
127
|
+
prompt_text = request.command
|
127
128
|
if messages:
|
128
129
|
# 调用coding_prompt生成包含历史消息的提示
|
129
|
-
prompt_text =
|
130
|
-
logger.info(prompt_text)
|
130
|
+
prompt_text = coding_prompt.prompt(messages, request.command)
|
131
131
|
|
132
|
-
|
133
|
-
if prompt_text:
|
134
|
-
logger.info(f"Using conversation history with {len(messages)} messages for coding command")
|
135
|
-
result = wrapper.coding_wapper(prompt_text)
|
136
|
-
else:
|
137
|
-
# 如果没有历史消息或获取失败,直接传递原始命令
|
138
|
-
logger.info("Using original command without conversation history")
|
139
|
-
result = wrapper.coding_wapper(prompt_text + request.command)
|
132
|
+
result = wrapper.coding_wapper(prompt_text)
|
140
133
|
|
141
134
|
get_event_manager(event_file).write_completion(
|
142
135
|
EventContentCreator.create_completion(
|
@@ -370,9 +363,8 @@ async def cancel_task(request: CancelTaskRequest, project_path: str = Depends(ge
|
|
370
363
|
event_file = get_event_file_path(file_id=request.event_file_id, project_path=project_path)
|
371
364
|
|
372
365
|
def cancel_in_thread():
|
373
|
-
try:
|
374
|
-
|
375
|
-
global_cancel.set()
|
366
|
+
try:
|
367
|
+
global_cancel.set(token=event_file)
|
376
368
|
|
377
369
|
# 获取事件管理器
|
378
370
|
event_manager = get_event_manager(event_file)
|
@@ -0,0 +1,42 @@
|
|
1
|
+
import traceback
|
2
|
+
from fastapi import APIRouter, Request
|
3
|
+
from pydantic import BaseModel
|
4
|
+
from typing import Any, Dict
|
5
|
+
from loguru import logger
|
6
|
+
import byzerllm
|
7
|
+
|
8
|
+
from autocoder.utils.llms import get_single_llm
|
9
|
+
|
10
|
+
router = APIRouter()
|
11
|
+
|
12
|
+
class DirectChatRequest(BaseModel):
|
13
|
+
model: str
|
14
|
+
content: str
|
15
|
+
product_mode: str = "lite"
|
16
|
+
options: Dict[str, Any] = {}
|
17
|
+
|
18
|
+
class DirectChatResponse(BaseModel):
|
19
|
+
success: bool
|
20
|
+
result: Any = None
|
21
|
+
error: str = None
|
22
|
+
|
23
|
+
@router.post("/api/direct_chat", response_model=DirectChatResponse)
|
24
|
+
async def direct_chat(req: DirectChatRequest, request: Request):
|
25
|
+
"""
|
26
|
+
简单直聊API,指定模型和内容,返回模型回复。
|
27
|
+
"""
|
28
|
+
try:
|
29
|
+
# 获取模型
|
30
|
+
llm = get_single_llm(req.model, product_mode=req.product_mode)
|
31
|
+
|
32
|
+
@byzerllm.prompt()
|
33
|
+
def chat_func(content: str) -> str:
|
34
|
+
"""
|
35
|
+
{{ content }}
|
36
|
+
"""
|
37
|
+
# 支持自定义llm_config等参数
|
38
|
+
result = chat_func.with_llm(llm).run(req.content)
|
39
|
+
return DirectChatResponse(success=True, result=result)
|
40
|
+
except Exception as e:
|
41
|
+
logger.error(f"direct_chat error: {e}\n{traceback.format_exc()}")
|
42
|
+
return DirectChatResponse(success=False, error=f"{str(e)}")
|
auto_coder_web/version.py
CHANGED
@@ -1 +1 @@
|
|
1
|
-
__version__ = "0.1.
|
1
|
+
__version__ = "0.1.82"
|