beswarm 0.2.38__py3-none-any.whl → 0.2.40__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of beswarm might be problematic. Click here for more details.

beswarm/agents/planact.py CHANGED
@@ -1,6 +1,5 @@
1
1
  import os
2
2
  import re
3
- import sys
4
3
  import copy
5
4
  import json
6
5
  import difflib
@@ -14,7 +13,7 @@ from ..broker import MessageBroker
14
13
  from ..aient.src.aient.models import chatgpt
15
14
  from ..aient.src.aient.plugins import get_function_call_list, registry
16
15
  from ..prompt import worker_system_prompt, instruction_system_prompt
17
- from ..utils import extract_xml_content, get_current_screen_image_message, replace_xml_content, register_mcp_tools
16
+ from ..utils import extract_xml_content, get_current_screen_image_message, replace_xml_content, register_mcp_tools, setup_logger
18
17
 
19
18
  class BaseAgent:
20
19
  """Base class for agents, handling common initialization and disposal."""
@@ -24,6 +23,7 @@ class BaseAgent:
24
23
  self.work_dir = work_dir
25
24
  self.cache_file = Path(work_dir) / ".beswarm" / "work_agent_conversation_history.json"
26
25
  self.config = agent_config
26
+ self.logger = agent_config.get("logger", None)
27
27
  self.cache_messages = cache_messages
28
28
  if cache_messages and isinstance(cache_messages, bool) and cache_messages == True:
29
29
  self.cache_messages = json.loads(self.cache_file.read_text(encoding="utf-8"))
@@ -119,7 +119,7 @@ class InstructionAgent(BaseAgent):
119
119
  raise Exception(f"Model: {self.config['engine']} not found!")
120
120
  if "'status_code': 413" in raw_response or \
121
121
  "'status_code': 400" in raw_response:
122
- self.broker.publish({"status": "error", "result": raw_response}, self.status_topic)
122
+ self.broker.publish({"status": "error", "result": "The request body is too long, please try again."}, self.status_topic)
123
123
  return
124
124
 
125
125
  self.broker.publish({"status": "new_message", "result": "\n🤖 指令智能体:\n" + raw_response}, self.status_topic)
@@ -146,7 +146,7 @@ class InstructionAgent(BaseAgent):
146
146
  )
147
147
  self.broker.publish({"instruction": instruction, "conversation": message["conversation"]}, self.publish_topic)
148
148
  else:
149
- print("\n❌ 指令智能体生成的指令不符合要求,正在重新生成。")
149
+ self.logger.error("\n❌ 指令智能体生成的指令不符合要求,正在重新生成。")
150
150
  self.broker.publish(message, self.error_topic)
151
151
 
152
152
 
@@ -176,7 +176,7 @@ class WorkerAgent(BaseAgent):
176
176
  response = await self.agent.ask_async(instruction)
177
177
 
178
178
  if response.strip() == '':
179
- print("\n❌ 工作智能体回复为空,请重新生成指令。")
179
+ self.logger.error("\n❌ 工作智能体回复为空,请重新生成指令。")
180
180
  self.broker.publish(message, self.error_topic)
181
181
  else:
182
182
  self.broker.publish({"status": "new_message", "result": "\n✅ 工作智能体:\n" + response}, self.status_topic)
@@ -184,19 +184,6 @@ class WorkerAgent(BaseAgent):
184
184
  "conversation": self.agent.conversation["default"]
185
185
  }, self.publish_topic)
186
186
 
187
- class Tee:
188
- def __init__(self, *files):
189
- self.files = files
190
-
191
- def write(self, obj):
192
- for f in self.files:
193
- f.write(obj)
194
- f.flush()
195
-
196
- def flush(self):
197
- for f in self.files:
198
- f.flush()
199
-
200
187
  class BrokerWorker:
201
188
  """The 'glue' class that orchestrates agents via a MessageBroker."""
202
189
  def __init__(self, goal: str, tools: List[Union[str, Dict]], work_dir: str, cache_messages: Union[bool, List[Dict]] = None, broker = None, mcp_manager = None, task_manager = None):
@@ -226,14 +213,10 @@ class BrokerWorker:
226
213
  if not self.cache_file.exists():
227
214
  self.cache_file.write_text("[]", encoding="utf-8")
228
215
 
229
- DEBUG = os.getenv("DEBUG", "false").lower() in ("true", "1", "t", "yes")
230
- if DEBUG:
231
- log_file = open(cache_dir / "history.log", "a", encoding="utf-8")
232
- log_file.write(f"========== {datetime.now().strftime('%Y-%m-%d %H:%M:%S')} ==========\n")
233
- original_stdout = sys.stdout
234
- original_stderr = sys.stderr
235
- sys.stdout = Tee(original_stdout, log_file)
236
- sys.stderr = Tee(original_stderr, log_file)
216
+ # 创建一个文件处理器,将日志写入任务自己的目录
217
+ log_file_path = cache_dir / "agent.log"
218
+ self.logger = setup_logger(f"task_{self.work_dir.name}", log_file_path)
219
+ self.logger.info(f"Logger for task '{self.goal}' initialized. Log file: {log_file_path}")
237
220
 
238
221
  async def _configure_tools(self):
239
222
  mcp_list = [item for item in self.tools if isinstance(item, dict)]
@@ -256,10 +239,12 @@ class BrokerWorker:
256
239
  self.task_completion_event.set()
257
240
 
258
241
  if message.get("status") == "error":
259
- raise Exception(message.get("result"))
242
+ self.logger.error(message.get("result"))
243
+ self.final_result = message.get("result")
244
+ self.task_completion_event.set()
260
245
 
261
246
  if message.get("status") == "new_message":
262
- print(message.get("result"))
247
+ self.logger.info(message.get("result"))
263
248
 
264
249
  def _setup_agents(self):
265
250
  instruction_agent_config = {
@@ -270,7 +255,7 @@ class BrokerWorker:
270
255
  workspace_path=self.work_dir, current_time=datetime.now().strftime("%Y-%m-%d %H:%M:%S")
271
256
  ),
272
257
  "print_log": os.getenv("DEBUG", "false").lower() in ("true", "1", "t", "yes"),
273
- "temperature": 0.7, "use_plugins": False
258
+ "temperature": 0.7, "use_plugins": False, "logger": self.logger
274
259
  }
275
260
 
276
261
  worker_agent_config = {
@@ -281,7 +266,7 @@ class BrokerWorker:
281
266
  shell=os.getenv('SHELL', 'Unknown'), current_time=datetime.now().strftime("%Y-%m-%d %H:%M:%S"),
282
267
  tools_list=self.tools_json
283
268
  ),
284
- "print_log": True, "temperature": 0.5, "function_call_max_loop": 100
269
+ "print_log": True, "temperature": 0.5, "function_call_max_loop": 100, "logger": self.logger
285
270
  }
286
271
 
287
272
  instruction_agent = InstructionAgent(
@@ -332,7 +317,9 @@ class BrokerWorker:
332
317
  yield message.get("result")
333
318
  break
334
319
  elif message.get("status") == "error":
335
- raise Exception(message.get("result"))
320
+ self.logger.error(message.get("result"))
321
+ self.final_result = message.get("result")
322
+ self.task_completion_event.set()
336
323
  finally:
337
324
  instruction_agent.dispose()
338
325
  worker_agent.dispose()
beswarm/aient/setup.py CHANGED
@@ -4,7 +4,7 @@ from setuptools import setup, find_packages
4
4
 
5
5
  setup(
6
6
  name="aient",
7
- version="1.1.54",
7
+ version="1.1.56",
8
8
  description="Aient: The Awakening of Agent.",
9
9
  long_description=Path.open(Path("README.md"), encoding="utf-8").read(),
10
10
  long_description_content_type="text/markdown",
@@ -1051,7 +1051,7 @@ async def get_gpt_payload(request, engine, provider, api_key=None):
1051
1051
 
1052
1052
  for field, value in request.model_dump(exclude_unset=True).items():
1053
1053
  if field not in miss_fields and value is not None:
1054
- if field == "max_tokens" and ("o1" in original_model or "o3" in original_model or "o4" in original_model):
1054
+ if field == "max_tokens" and ("o1" in original_model or "o3" in original_model or "o4" in original_model or "gpt-5" in original_model):
1055
1055
  payload["max_completion_tokens"] = value
1056
1056
  else:
1057
1057
  payload[field] = value
@@ -1071,7 +1071,9 @@ async def get_gpt_payload(request, engine, provider, api_key=None):
1071
1071
  elif request.model.endswith("low"):
1072
1072
  payload["reasoning_effort"] = "low"
1073
1073
 
1074
- if "o1" in original_model or "o3" in original_model or "o4" in original_model:
1074
+ if "o1" in original_model or \
1075
+ "o3" in original_model or "o4" in original_model or \
1076
+ "gpt-oss" in original_model or "gpt-5" in original_model:
1075
1077
  if request.model.endswith("high"):
1076
1078
  payload["reasoning_effort"] = "high"
1077
1079
  elif request.model.endswith("low"):
@@ -34,12 +34,14 @@ def gemini_json_poccess(response_str):
34
34
  candidatesTokenCount = safe_get(response_json, "usageMetadata", "candidatesTokenCount", default=0)
35
35
  totalTokenCount = safe_get(response_json, "usageMetadata", "totalTokenCount", default=0)
36
36
 
37
- content = safe_get(json_data, "parts", 0, "text", default="")
37
+ content = reasoning_content = safe_get(json_data, "parts", 0, "text", default="")
38
38
  b64_json = safe_get(json_data, "parts", 0, "inlineData", "data", default="")
39
39
  if b64_json:
40
40
  image_base64 = b64_json
41
41
 
42
42
  is_thinking = safe_get(json_data, "parts", 0, "thought", default=False)
43
+ if is_thinking:
44
+ content = safe_get(json_data, "parts", 1, "text", default="")
43
45
 
44
46
  function_call_name = safe_get(json_data, "functionCall", "name", default=None)
45
47
  function_full_response = safe_get(json_data, "functionCall", "args", default="")
@@ -47,7 +49,7 @@ def gemini_json_poccess(response_str):
47
49
 
48
50
  blockReason = safe_get(json_data, 0, "promptFeedback", "blockReason", default=None)
49
51
 
50
- return is_thinking, content, image_base64, function_call_name, function_full_response, finishReason, blockReason, promptTokenCount, candidatesTokenCount, totalTokenCount
52
+ return is_thinking, reasoning_content, content, image_base64, function_call_name, function_full_response, finishReason, blockReason, promptTokenCount, candidatesTokenCount, totalTokenCount
51
53
 
52
54
  async def fetch_gemini_response_stream(client, url, headers, payload, model):
53
55
  timestamp = int(datetime.timestamp(datetime.now()))
@@ -63,11 +65,18 @@ async def fetch_gemini_response_stream(client, url, headers, payload, model):
63
65
  parts_json = ""
64
66
  async for chunk in response.aiter_text():
65
67
  buffer += chunk
68
+ if buffer and "\n" not in buffer:
69
+ buffer += "\n"
66
70
 
67
71
  while "\n" in buffer:
68
72
  line, buffer = buffer.split("\n", 1)
69
73
  if line.startswith("data: "):
70
74
  parts_json = line.lstrip("data: ").strip()
75
+ try:
76
+ json.loads(parts_json)
77
+ except json.JSONDecodeError:
78
+ logger.error(f"JSON decode error: {parts_json}")
79
+ continue
71
80
  else:
72
81
  parts_json += line
73
82
  parts_json = parts_json.lstrip("[,")
@@ -77,12 +86,12 @@ async def fetch_gemini_response_stream(client, url, headers, payload, model):
77
86
  continue
78
87
 
79
88
  # https://ai.google.dev/api/generate-content?hl=zh-cn#FinishReason
80
- is_thinking, content, image_base64, function_call_name, function_full_response, finishReason, blockReason, promptTokenCount, candidatesTokenCount, totalTokenCount = gemini_json_poccess(parts_json)
89
+ is_thinking, reasoning_content, content, image_base64, function_call_name, function_full_response, finishReason, blockReason, promptTokenCount, candidatesTokenCount, totalTokenCount = gemini_json_poccess(parts_json)
81
90
 
82
91
  if is_thinking:
83
- sse_string = await generate_sse_response(timestamp, model, reasoning_content=content)
92
+ sse_string = await generate_sse_response(timestamp, model, reasoning_content=reasoning_content)
84
93
  yield sse_string
85
- elif not image_base64 and content:
94
+ if not image_base64 and content:
86
95
  sse_string = await generate_sse_response(timestamp, model, content=content)
87
96
  yield sse_string
88
97
 
@@ -4,6 +4,8 @@ import json
4
4
  import copy
5
5
  import httpx
6
6
  import asyncio
7
+ import logging
8
+ import inspect
7
9
  import requests
8
10
  from typing import Set
9
11
  from typing import Union, Optional, Callable, List, Dict, Any
@@ -11,6 +13,7 @@ from pathlib import Path
11
13
 
12
14
 
13
15
  from .base import BaseLLM
16
+ from ..plugins.registry import registry
14
17
  from ..plugins import PLUGINS, get_tools_result_async, function_call_list, update_tools_config
15
18
  from ..utils.scripts import safe_get, async_generator_to_sync, parse_function_xml, parse_continuous_json, convert_functions_to_xml, remove_xml_tags_and_content
16
19
  from ..core.request import prepare_request_payload
@@ -63,6 +66,7 @@ class chatgpt(BaseLLM):
63
66
  function_call_max_loop: int = 3,
64
67
  cut_history_by_function_name: str = "",
65
68
  cache_messages: list = None,
69
+ logger: logging.Logger = None,
66
70
  ) -> None:
67
71
  """
68
72
  Initialize Chatbot with API key (from https://platform.openai.com/account/api-keys)
@@ -83,6 +87,14 @@ class chatgpt(BaseLLM):
83
87
  self.cut_history_by_function_name = cut_history_by_function_name
84
88
  self.latest_file_content = {}
85
89
 
90
+ if logger:
91
+ self.logger = logger
92
+ else:
93
+ # 如果没有提供 logger,创建一个默认的,它只会打印到控制台
94
+ self.logger = logging.getLogger("chatgpt_default")
95
+ if not self.logger.handlers: # 防止重复添加 handler
96
+ self.logger.addHandler(logging.StreamHandler())
97
+ self.logger.setLevel(logging.INFO if print_log else logging.WARNING)
86
98
 
87
99
  # 注册和处理传入的工具
88
100
  self._register_tools(tools)
@@ -122,7 +134,7 @@ class chatgpt(BaseLLM):
122
134
  """
123
135
  Add a message to the conversation
124
136
  """
125
- # print("role", role, "function_name", function_name, "message", message)
137
+ # self.logger.info(f"role: {role}, function_name: {function_name}, message: {message}")
126
138
  if convo_id not in self.conversation:
127
139
  self.reset(convo_id=convo_id)
128
140
  if function_name == "" and message:
@@ -175,16 +187,13 @@ class chatgpt(BaseLLM):
175
187
  self.conversation[convo_id].append({"role": "assistant", "content": "我已经执行过这个工具了,接下来我需要做什么?"})
176
188
 
177
189
  else:
178
- print('\033[31m')
179
- print("error: add_to_conversation message is None or empty")
180
- print("role", role, "function_name", function_name, "message", message)
181
- print('\033[0m')
190
+ self.logger.error(f"error: add_to_conversation message is None or empty, role: {role}, function_name: {function_name}, message: {message}")
182
191
 
183
192
  conversation_len = len(self.conversation[convo_id]) - 1
184
193
  message_index = 0
185
194
  # if self.print_log:
186
195
  # replaced_text = json.loads(re.sub(r';base64,([A-Za-z0-9+/=]+)', ';base64,***', json.dumps(self.conversation[convo_id])))
187
- # print(json.dumps(replaced_text, indent=4, ensure_ascii=False))
196
+ # self.logger.info(json.dumps(replaced_text, indent=4, ensure_ascii=False))
188
197
  while message_index < conversation_len:
189
198
  if self.conversation[convo_id][message_index]["role"] == self.conversation[convo_id][message_index + 1]["role"]:
190
199
  if self.conversation[convo_id][message_index].get("content") and self.conversation[convo_id][message_index + 1].get("content") \
@@ -247,7 +256,7 @@ class chatgpt(BaseLLM):
247
256
  mess = self.conversation[convo_id].pop(1)
248
257
  string_mess = json.dumps(mess, ensure_ascii=False)
249
258
  self.current_tokens[convo_id] -= len(string_mess) / 4
250
- print("Truncate message:", mess)
259
+ self.logger.info(f"Truncate message: {mess}")
251
260
  else:
252
261
  break
253
262
 
@@ -327,7 +336,7 @@ class chatgpt(BaseLLM):
327
336
  request_data["tools"] = tools_request_body
328
337
  request_data["tool_choice"] = "auto"
329
338
 
330
- # print("request_data", json.dumps(request_data, indent=4, ensure_ascii=False))
339
+ # self.logger.info(f"request_data: {json.dumps(request_data, indent=4, ensure_ascii=False)}")
331
340
 
332
341
  # 调用核心模块的 prepare_request_payload 函数
333
342
  url, headers, json_post_body, engine_type = await prepare_request_payload(provider, request_data)
@@ -388,7 +397,7 @@ class chatgpt(BaseLLM):
388
397
  else:
389
398
  return str(line)
390
399
  except:
391
- print("json.loads error:", repr(line))
400
+ self.logger.error(f"json.loads error: {repr(line)}")
392
401
  return None
393
402
 
394
403
  resp = json.loads(line) if isinstance(line, str) else line
@@ -446,21 +455,56 @@ class chatgpt(BaseLLM):
446
455
  yield chunk
447
456
 
448
457
  if self.print_log:
449
- print("\n\rtotal_tokens", total_tokens)
458
+ self.logger.info(f"total_tokens: {total_tokens}")
450
459
 
451
460
  if response_role is None:
452
461
  response_role = "assistant"
453
462
 
463
+ missing_required_params = []
464
+
454
465
  if self.use_plugins == True:
455
466
  full_response = full_response.replace("<tool_code>", "").replace("</tool_code>", "")
456
467
  function_parameter = parse_function_xml(full_response)
457
468
  if function_parameter:
458
469
  invalid_tools = [tool_dict for tool_dict in function_parameter if tool_dict.get("function_name", "") not in self.plugins.keys()]
459
470
  function_parameter = [tool_dict for tool_dict in function_parameter if tool_dict.get("function_name", "") in self.plugins.keys()]
471
+
472
+ # Check for missing required parameters
473
+ valid_function_parameters = []
474
+ for tool_dict in function_parameter:
475
+ tool_name = tool_dict.get("function_name")
476
+ # tool_name must be in registry.tools, because it is in self.plugins which is from registry.tools
477
+ func = registry.tools.get(tool_name)
478
+ if not func:
479
+ continue
480
+
481
+ sig = inspect.signature(func)
482
+ provided_params = tool_dict.get("parameter", {})
483
+ # Ensure provided_params is a dictionary
484
+ if not isinstance(provided_params, dict):
485
+ self.logger.warning(f"Parameters for {tool_name} are not a dict: {provided_params}. Skipping.")
486
+ continue
487
+
488
+ missing_required_params = []
489
+ for param in sig.parameters.values():
490
+ # Check if the parameter has no default value and is not in the provided parameters
491
+ if param.default is inspect.Parameter.empty and param.name not in provided_params:
492
+ missing_required_params.append(param.name)
493
+
494
+ if not missing_required_params:
495
+ valid_function_parameters.append(tool_dict)
496
+ else:
497
+ if self.print_log:
498
+ self.logger.warning(
499
+ f"Skipping tool call for '{tool_name}' due to missing required parameters: {missing_required_params}"
500
+ )
501
+ missing_required_params.append(f"Error: {tool_name} missing required parameters: {missing_required_params}")
502
+ function_parameter = valid_function_parameters
503
+
460
504
  if self.print_log and invalid_tools:
461
- print("invalid_tools", invalid_tools)
462
- print("function_parameter", function_parameter)
463
- print("full_response", full_response)
505
+ self.logger.error(f"invalid_tools: {invalid_tools}")
506
+ self.logger.error(f"function_parameter: {function_parameter}")
507
+ self.logger.error(f"full_response: {full_response}")
464
508
  if function_parameter:
465
509
  need_function_call = True
466
510
  if isinstance(self.conversation[convo_id][-1]["content"], str) and \
@@ -470,14 +514,14 @@ class chatgpt(BaseLLM):
470
514
  else:
471
515
  need_function_call = False
472
516
  if self.print_log:
473
- print("Failed to parse function_parameter full_response", full_response)
517
+ self.logger.error(f"Failed to parse function_parameter full_response: {full_response}")
474
518
  full_response = ""
475
519
 
476
520
  # 处理函数调用
477
521
  if need_function_call and self.use_plugins == True:
478
522
  if self.print_log:
479
- print("function_parameter", function_parameter)
480
- print("function_full_response", function_full_response)
523
+ self.logger.info(f"function_parameter: {function_parameter}")
524
+ self.logger.info(f"function_full_response: {function_full_response}")
481
525
 
482
526
  function_response = ""
483
527
  # 定义处理单个工具调用的辅助函数
@@ -496,7 +540,7 @@ class chatgpt(BaseLLM):
496
540
  if function_call_max_tokens <= 0:
497
541
  function_call_max_tokens = int(self.truncate_limit / 2)
498
542
  if self.print_log:
499
- print(f"\033[32m function_call {tool_name}, max token: {function_call_max_tokens} \033[0m")
543
+ self.logger.info(f"function_call {tool_name}, max token: {function_call_max_tokens}")
500
544
 
501
545
  # 处理函数调用结果
502
546
  if is_async:
@@ -529,7 +573,7 @@ class chatgpt(BaseLLM):
529
573
  if function_full_response:
530
574
  function_parameter = parse_continuous_json(function_full_response, function_call_name)
531
575
  except Exception as e:
532
- print(f"解析JSON失败: {e}")
576
+ self.logger.error(f"解析JSON失败: {e}")
533
577
  # 保持原始工具调用
534
578
  tool_calls = [{
535
579
  'function_name': function_call_name,
@@ -579,6 +623,8 @@ class chatgpt(BaseLLM):
579
623
 
580
624
  # 合并所有工具响应
581
625
  function_response = "\n\n".join(all_responses).strip()
626
+ if missing_required_params:
627
+ function_response += "\n\n" + "\n\n".join(missing_required_params)
582
628
 
583
629
  # 使用第一个工具的名称和参数作为历史记录
584
630
  function_call_name = tool_calls[0]['function_name']
@@ -668,27 +714,56 @@ class chatgpt(BaseLLM):
668
714
 
669
715
  # 打印日志
670
716
  if self.print_log:
671
- print("api_url", kwargs.get('api_url', self.api_url.chat_url), url)
672
- print("api_key", kwargs.get('api_key', self.api_key))
717
+ self.logger.info(f"api_url: {kwargs.get('api_url', self.api_url.chat_url)}, {url}")
718
+ self.logger.info(f"api_key: {kwargs.get('api_key', self.api_key)}")
673
719
 
674
720
  # 发送请求并处理响应
675
721
  for _ in range(3):
676
722
  if self.print_log:
677
723
  replaced_text = json.loads(re.sub(r';base64,([A-Za-z0-9+/=]+)', ';base64,***', json.dumps(json_post)))
678
- print(json.dumps(replaced_text, indent=4, ensure_ascii=False))
724
+ replaced_text_str = json.dumps(replaced_text, indent=4, ensure_ascii=False)
725
+ self.logger.info(f"Request Body:\n{replaced_text_str}")
679
726
 
680
727
  try:
681
728
  # 改进处理方式,创建一个内部异步函数来处理异步调用
682
729
  async def process_async():
683
730
  # 异步调用 fetch_response_stream
684
- async_generator = fetch_response_stream(
685
- self.aclient,
686
- url,
687
- headers,
688
- json_post,
689
- engine_type,
690
- model or self.engine,
691
- )
731
+ # self.logger.info("--------------------------------")
732
+ # self.logger.info(prompt)
733
+ # self.logger.info(parse_function_xml(prompt))
734
+ # self.logger.info(convert_functions_to_xml(parse_function_xml(prompt)))
735
+ # self.logger.info(convert_functions_to_xml(parse_function_xml(prompt)).strip() == prompt)
736
+ # self.logger.info("--------------------------------")
737
+ if prompt and "</" in prompt and "<instructions>" not in prompt and convert_functions_to_xml(parse_function_xml(prompt)).strip() == prompt:
738
+ tmp_response = {
739
+ "id": "chatcmpl-zXCi5TxWy953TCcxFocSienhvx0BB",
740
+ "object": "chat.completion.chunk",
741
+ "created": 1754588695,
742
+ "model": "gemini-2.5-flash",
743
+ "choices": [
744
+ {
745
+ "index": 0,
746
+ "delta": {
747
+ "role": "assistant",
748
+ "content": prompt
749
+ },
750
+ "finish_reason": "stop"
751
+ }
752
+ ],
753
+ "system_fingerprint": "fp_d576307f90"
754
+ }
755
+ async def _mock_response_generator():
756
+ yield f"data: {json.dumps(tmp_response)}\n\n"
757
+ async_generator = _mock_response_generator()
758
+ else:
759
+ async_generator = fetch_response_stream(
760
+ self.aclient,
761
+ url,
762
+ headers,
763
+ json_post,
764
+ engine_type,
765
+ model or self.engine,
766
+ )
692
767
  # 异步处理响应流
693
768
  async for chunk in self._process_stream_response(
694
769
  async_generator,
@@ -709,15 +784,15 @@ class chatgpt(BaseLLM):
709
784
  # 将异步函数转换为同步生成器
710
785
  return async_generator_to_sync(process_async())
711
786
  except ConnectionError:
712
- print("连接错误,请检查服务器状态或网络连接。")
787
+ self.logger.error("连接错误,请检查服务器状态或网络连接。")
713
788
  return
714
789
  except requests.exceptions.ReadTimeout:
715
- print("请求超时,请检查网络连接或增加超时时间。")
790
+ self.logger.error("请求超时,请检查网络连接或增加超时时间。")
716
791
  return
717
792
  except httpx.RemoteProtocolError:
718
793
  continue
719
794
  except Exception as e:
720
- print(f"发生了未预料的错误:{e}")
795
+ self.logger.error(f"发生了未预料的错误:{e}")
721
796
  if "Invalid URL" in str(e):
722
797
  e = "您输入了无效的API URL,请使用正确的URL并使用`/start`命令重新设置API URL。具体错误如下:\n\n" + str(e)
723
798
  raise Exception(f"{e}")
@@ -755,28 +830,54 @@ class chatgpt(BaseLLM):
755
830
 
756
831
  # 打印日志
757
832
  if self.print_log:
758
- # print("api_url", kwargs.get('api_url', self.api_url.chat_url) == url)
759
- # print("api_url", kwargs.get('api_url', self.api_url.chat_url))
760
- print("api_url", url)
761
- # print("headers", headers)
762
- print("api_key", kwargs.get('api_key', self.api_key))
833
+ self.logger.info(f"api_url: {url}")
834
+ self.logger.info(f"api_key: {kwargs.get('api_key', self.api_key)}")
763
835
 
764
836
  # 发送请求并处理响应
765
837
  for _ in range(3):
766
838
  if self.print_log:
767
839
  replaced_text = json.loads(re.sub(r';base64,([A-Za-z0-9+/=]+)', ';base64,***', json.dumps(json_post)))
768
- print(json.dumps(replaced_text, indent=4, ensure_ascii=False))
840
+ replaced_text_str = json.dumps(replaced_text, indent=4, ensure_ascii=False)
841
+ self.logger.info(f"Request Body:\n{replaced_text_str}")
769
842
 
770
843
  try:
771
844
  # 使用fetch_response_stream处理响应
772
- generator = fetch_response_stream(
773
- self.aclient,
774
- url,
775
- headers,
776
- json_post,
777
- engine_type,
778
- model or self.engine,
779
- )
845
+ # self.logger.info("--------------------------------")
846
+ # self.logger.info(prompt)
847
+ # self.logger.info(parse_function_xml(prompt))
848
+ # self.logger.info(convert_functions_to_xml(parse_function_xml(prompt)))
849
+ # self.logger.info(convert_functions_to_xml(parse_function_xml(prompt)).strip() == prompt)
850
+ # self.logger.info("--------------------------------")
851
+ if prompt and "</" in prompt and "<instructions>" not in prompt and convert_functions_to_xml(parse_function_xml(prompt)).strip() == prompt:
852
+ tmp_response = {
853
+ "id": "chatcmpl-zXCi5TxWy953TCcxFocSienhvx0BB",
854
+ "object": "chat.completion.chunk",
855
+ "created": 1754588695,
856
+ "model": "gemini-2.5-flash",
857
+ "choices": [
858
+ {
859
+ "index": 0,
860
+ "delta": {
861
+ "role": "assistant",
862
+ "content": prompt
863
+ },
864
+ "finish_reason": "stop"
865
+ }
866
+ ],
867
+ "system_fingerprint": "fp_d576307f90"
868
+ }
869
+ async def _mock_response_generator():
870
+ yield f"data: {json.dumps(tmp_response)}\n\n"
871
+ generator = _mock_response_generator()
872
+ else:
873
+ generator = fetch_response_stream(
874
+ self.aclient,
875
+ url,
876
+ headers,
877
+ json_post,
878
+ engine_type,
879
+ model or self.engine,
880
+ )
780
881
  # if isinstance(chunk, dict) and "error" in chunk:
781
882
  # # 处理错误响应
782
883
  # if chunk["status_code"] in (400, 422, 503):
@@ -810,9 +911,9 @@ class chatgpt(BaseLLM):
810
911
  except httpx.RemoteProtocolError:
811
912
  continue
812
913
  except Exception as e:
813
- print(f"发生了未预料的错误:{e}")
914
+ self.logger.error(f"发生了未预料的错误:{e}")
814
915
  import traceback
815
- traceback.print_exc()
916
+ self.logger.error(traceback.format_exc())
816
917
  if "Invalid URL" in str(e):
817
918
  e = "您输入了无效的API URL,请使用正确的URL并使用`/start`命令重新设置API URL。具体错误如下:\n\n" + str(e)
818
919
  raise Exception(f"{e}")
@@ -979,7 +1080,7 @@ class chatgpt(BaseLLM):
979
1080
  return json_post, True, None
980
1081
 
981
1082
  except Exception as e:
982
- print(f"处理响应错误时出现异常: {e}")
1083
+ self.logger.error(f"处理响应错误时出现异常: {e}")
983
1084
  return json_post, False, str(e)
984
1085
 
985
1086
  def _handle_response_error_sync(self, response, json_post):
@@ -4,7 +4,7 @@ from ..utils.scripts import unescape_html
4
4
  import os
5
5
 
6
6
  @register_tool()
7
- def write_to_file(path, content, mode='w'):
7
+ def write_to_file(path, content, mode='w', newline=False):
8
8
  """
9
9
  ## write_to_file
10
10
  Description: Request to write full content to a file at the specified path. If the file exists, it will be overwritten with the provided content. If the file doesn't exist, it will be created. This tool will automatically create any directories needed to write the file.
@@ -12,6 +12,7 @@ Parameters:
12
12
  - path: (required) The path of the file to write to (relative to the current working directory ${args.cwd})
13
13
  - content: (required) The content to write to the file. ALWAYS provide the COMPLETE intended content of the file, without any truncation or omissions. You MUST include ALL parts of the file, even if they haven't been modified. Do NOT include the line numbers in the content though, just the actual content of the file.
14
14
  - mode: (optional) The mode to write to the file. Default is 'w'. 'w' for write, 'a' for append.
15
+ - newline: (optional) Whether to add a newline before the content. Default is False.
15
16
  Usage:
16
17
  <write_to_file>
17
18
  <path>File path here</path>
@@ -19,6 +20,7 @@ Usage:
19
20
  Your file content here
20
21
  </content>
21
22
  <mode>w</mode>
23
+ <newline>False</newline>
22
24
  </write_to_file>
23
25
 
24
26
  Example: Requesting to write to frontend-config.json
@@ -51,6 +53,9 @@ Example: Requesting to write to frontend-config.json
51
53
  if content.startswith("---\n") and (path.endswith(".md") or path.endswith(".txt")):
52
54
  content = "\n" + content
53
55
 
56
+ if newline:
57
+ content = '\n' + content
58
+
54
59
  # 写入文件
55
60
  try:
56
61
  with open(path, mode, encoding='utf-8') as file:
@@ -1,6 +1,6 @@
1
1
  def replace_with_asterisk(string, start=15, end=40):
2
2
  return string[:start] + '*' * (end - start) + string[end:]
3
3
 
4
- original_string = "sk-zIuWeeuWY8vNCVhhHCXLroNmA6QhBxnv0ARMFcODVQwwqGRg"
4
+ original_string = "sk-bvgiugvigycycyrfctdyxdxrts"
5
5
  result = replace_with_asterisk(original_string)
6
6
  print(result)