auto-coder-web 0.1.79__py3-none-any.whl → 0.1.81__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -68,7 +68,7 @@ async def add_model(model: Model):
68
68
  except Exception as e:
69
69
  raise HTTPException(status_code=500, detail=str(e))
70
70
 
71
- @router.put("/api/models/{model_name}", response_model=Model)
71
+ @router.put("/api/models", response_model=Model)
72
72
  async def update_model(model_name: str, model: Model):
73
73
  """
74
74
  Update an existing model
@@ -91,7 +91,7 @@ async def update_model(model_name: str, model: Model):
91
91
  except Exception as e:
92
92
  raise HTTPException(status_code=500, detail=str(e))
93
93
 
94
- @router.delete("/api/models/{model_name}")
94
+ @router.delete("/api/models")
95
95
  async def delete_model(model_name: str):
96
96
  """
97
97
  Delete a model by name
@@ -175,7 +175,7 @@ class ModelInfo(BaseModel):
175
175
  input_price: float
176
176
  output_price: float
177
177
  is_reasoning: bool
178
-
178
+ max_output_tokens: int = 8096
179
179
  class ProviderConfig(BaseModel):
180
180
  name: str
181
181
  base_url: str
@@ -234,25 +234,36 @@ def load_providers() -> List[Dict]:
234
234
  "is_reasoning": False
235
235
  },
236
236
  {
237
- "id": "openrouter/quasar-alpha",
238
- "name": "quasar-alpha",
239
- "input_price": 0.0,
240
- "output_price": 0.0,
241
- "is_reasoning": False
237
+ "id": "openai/gpt-4.1",
238
+ "name": "gpt-4.1",
239
+ "input_price": 14,
240
+ "output_price": 42,
241
+ "is_reasoning": False,
242
+ "max_output_tokens": 8096*3
242
243
  },
243
244
  {
244
- "id": "openrouter/optimus-alpha",
245
- "name": "optimus-alpha",
246
- "input_price": 0.0,
247
- "output_price": 0.0,
248
- "is_reasoning": False
245
+ "id": "openai/gpt-4.1-mini",
246
+ "name": "gpt-4.1-mini",
247
+ "input_price": 2.8,
248
+ "output_price": 11.2,
249
+ "is_reasoning": False,
250
+ "max_output_tokens": 8096*3
251
+ },
252
+ {
253
+ "id": "openai/gpt-4.1-nano",
254
+ "name": "gpt-4.1-nano",
255
+ "input_price": 0.7,
256
+ "output_price": 2.8,
257
+ "is_reasoning": False,
258
+ "max_output_tokens": 8096*3
249
259
  },
250
260
  {
251
261
  "id": "google/gemini-2.5-pro-preview-03-25",
252
262
  "name": "gemini-2.5-pro-preview-03-25",
253
263
  "input_price": 0.0,
254
264
  "output_price": 0.0,
255
- "is_reasoning": False
265
+ "is_reasoning": False,
266
+ "max_output_tokens": 8096*2
256
267
  }
257
268
  ]
258
269
  },
@@ -23,7 +23,7 @@ cancel_thread_pool = ThreadPoolExecutor(max_workers=5)
23
23
 
24
24
  class AutoCommandRequest(BaseModel):
25
25
  command: str
26
- include_conversation_history: bool = False
26
+ include_conversation_history: bool = True
27
27
  buildin_conversation_history: bool = False
28
28
 
29
29
  class EventPollRequest(BaseModel):
@@ -64,7 +64,7 @@ def ensure_task_dir(project_path: str) -> str:
64
64
  @byzerllm.prompt()
65
65
  def coding_prompt(messages: List[Dict[str, Any]], query: str):
66
66
  '''
67
- 下面是我们已经产生的一个消息列表,其中 USER_RESPONSE 表示用户的输入,RESULT 你的输出:
67
+ 下面是我们已经产生的一个消息列表,其中 USER_RESPONSE 表示用户的输入,其他都是你的输出:
68
68
  <messages>
69
69
  {% for message in messages %}
70
70
  <message>
@@ -110,33 +110,38 @@ async def auto_command(request: AutoCommandRequest, project_path: str = Depends(
110
110
  current_session_name = session_data.get("session_name", "")
111
111
  except Exception as e:
112
112
  logger.error(f"Error reading current session: {str(e)}")
113
+ logger.exception(e)
113
114
 
114
115
  # 获取历史消息
115
116
  messages = []
116
117
  if current_session_name:
117
118
  chat_list_file = os.path.join(project_path, ".auto-coder", "auto-coder.web", "chat-lists", f"{current_session_name}.json")
119
+ logger.info(f"loading chat history from {chat_list_file}")
118
120
  if os.path.exists(chat_list_file):
119
121
  try:
120
- with open(chat_list_file, 'r', encoding="utf-8") as f:
122
+ with open(chat_list_file, 'r', encoding="utf-8") as f:
121
123
  chat_data = json.load(f)
122
124
  # 从聊天历史中提取消息
123
- for msg in chat_data.get("messages", []):
124
- # 只保留用户和中间结果信息
125
- if msg.get("type","") not in ["USER_RESPONSE","RESULT"]:
126
- continue
127
-
128
- if msg.get("contentType","") in ["token_stat"]:
129
- continue
125
+ for msg in chat_data.get("messages", []):
126
+ # if msg.get("metadata",{}).get("stream_out_type","") == "/agent/edit":
127
+ # messages.append(msg)
128
+ # continue
130
129
 
130
+ # if msg.get("type","") not in ["USER_RESPONSE","RESULT","COMPLETION"]:
131
+ # continue
132
+ if msg.get("contentType","") in ["token_stat"]:
133
+ continue
131
134
  messages.append(msg)
132
- except Exception as e:
135
+ except Exception as e:
133
136
  logger.error(f"Error reading chat history: {str(e)}")
137
+ logger.exception(e)
134
138
 
135
139
  if messages:
136
140
  # 调用coding_prompt生成包含历史消息的提示
137
141
  prompt_text = coding_prompt.prompt(messages, request.command)
138
142
 
139
- # 调用auto_command_wrapper方法
143
+ # 调用auto_command_wrapper方法
144
+ logger.info(f"Executing auto command {file_id} with prompt: {prompt_text}")
140
145
  result = wrapper.auto_command_wrapper(prompt_text, {
141
146
  "event_file_id": file_id
142
147
  })
@@ -58,7 +58,7 @@ def ensure_task_dir(project_path: str) -> str:
58
58
  @byzerllm.prompt()
59
59
  def coding_prompt(messages: List[Dict[str, Any]], request: CodingCommandRequest):
60
60
  '''
61
- 下面是我们已经产生的一个消息列表,其中 USER_RESPONSE 表示用户的输入,RESULT 你的输出:
61
+ 下面是我们已经产生的一个消息列表,其中 USER_RESPONSE 表示用户的输入,其他都是你的输出:
62
62
  <messages>
63
63
  {% for message in messages %}
64
64
  <message>
@@ -123,20 +123,12 @@ async def coding_command(request: CodingCommandRequest, project_path: str = Depe
123
123
  logger.error(f"Error reading chat history: {str(e)}")
124
124
 
125
125
  # 构建提示信息
126
- prompt_text = ""
126
+ prompt_text = request.command
127
127
  if messages:
128
128
  # 调用coding_prompt生成包含历史消息的提示
129
- prompt_text = prompt_text + coding_prompt.prompt(messages, request)
130
- logger.info(prompt_text)
129
+ prompt_text = coding_prompt.prompt(messages, request.command)
131
130
 
132
- # 调用coding方法,如果有历史消息,传递包含历史的提示
133
- if prompt_text:
134
- logger.info(f"Using conversation history with {len(messages)} messages for coding command")
135
- result = wrapper.coding_wapper(prompt_text)
136
- else:
137
- # 如果没有历史消息或获取失败,直接传递原始命令
138
- logger.info("Using original command without conversation history")
139
- result = wrapper.coding_wapper(prompt_text + request.command)
131
+ result = wrapper.coding_wapper(prompt_text)
140
132
 
141
133
  get_event_manager(event_file).write_completion(
142
134
  EventContentCreator.create_completion(
auto_coder_web/version.py CHANGED
@@ -1 +1 @@
1
- __version__ = "0.1.79"
1
+ __version__ = "0.1.81"