aient 1.1.55__tar.gz → 1.1.57__tar.gz

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (79) hide show
  1. {aient-1.1.55/src/aient.egg-info → aient-1.1.57}/PKG-INFO +1 -1
  2. {aient-1.1.55 → aient-1.1.57}/setup.py +1 -1
  3. {aient-1.1.55 → aient-1.1.57}/src/aient/core/request.py +1 -1
  4. {aient-1.1.55 → aient-1.1.57}/src/aient/models/chatgpt.py +154 -49
  5. {aient-1.1.55 → aient-1.1.57}/src/aient/plugins/write_file.py +6 -1
  6. {aient-1.1.55 → aient-1.1.57/src/aient.egg-info}/PKG-INFO +1 -1
  7. {aient-1.1.55 → aient-1.1.57}/test/test_API.py +1 -1
  8. {aient-1.1.55 → aient-1.1.57}/LICENSE +0 -0
  9. {aient-1.1.55 → aient-1.1.57}/MANIFEST.in +0 -0
  10. {aient-1.1.55 → aient-1.1.57}/README.md +0 -0
  11. {aient-1.1.55 → aient-1.1.57}/setup.cfg +0 -0
  12. {aient-1.1.55 → aient-1.1.57}/src/aient/__init__.py +0 -0
  13. {aient-1.1.55 → aient-1.1.57}/src/aient/core/.git +0 -0
  14. {aient-1.1.55 → aient-1.1.57}/src/aient/core/.gitignore +0 -0
  15. {aient-1.1.55 → aient-1.1.57}/src/aient/core/__init__.py +0 -0
  16. {aient-1.1.55 → aient-1.1.57}/src/aient/core/log_config.py +0 -0
  17. {aient-1.1.55 → aient-1.1.57}/src/aient/core/models.py +0 -0
  18. {aient-1.1.55 → aient-1.1.57}/src/aient/core/response.py +0 -0
  19. {aient-1.1.55 → aient-1.1.57}/src/aient/core/test/test_base_api.py +0 -0
  20. {aient-1.1.55 → aient-1.1.57}/src/aient/core/test/test_geminimask.py +0 -0
  21. {aient-1.1.55 → aient-1.1.57}/src/aient/core/test/test_image.py +0 -0
  22. {aient-1.1.55 → aient-1.1.57}/src/aient/core/test/test_payload.py +0 -0
  23. {aient-1.1.55 → aient-1.1.57}/src/aient/core/utils.py +0 -0
  24. {aient-1.1.55 → aient-1.1.57}/src/aient/models/__init__.py +0 -0
  25. {aient-1.1.55 → aient-1.1.57}/src/aient/models/audio.py +0 -0
  26. {aient-1.1.55 → aient-1.1.57}/src/aient/models/base.py +0 -0
  27. {aient-1.1.55 → aient-1.1.57}/src/aient/models/claude.py +0 -0
  28. {aient-1.1.55 → aient-1.1.57}/src/aient/models/duckduckgo.py +0 -0
  29. {aient-1.1.55 → aient-1.1.57}/src/aient/models/gemini.py +0 -0
  30. {aient-1.1.55 → aient-1.1.57}/src/aient/models/groq.py +0 -0
  31. {aient-1.1.55 → aient-1.1.57}/src/aient/models/vertex.py +0 -0
  32. {aient-1.1.55 → aient-1.1.57}/src/aient/plugins/__init__.py +0 -0
  33. {aient-1.1.55 → aient-1.1.57}/src/aient/plugins/arXiv.py +0 -0
  34. {aient-1.1.55 → aient-1.1.57}/src/aient/plugins/config.py +0 -0
  35. {aient-1.1.55 → aient-1.1.57}/src/aient/plugins/excute_command.py +0 -0
  36. {aient-1.1.55 → aient-1.1.57}/src/aient/plugins/get_time.py +0 -0
  37. {aient-1.1.55 → aient-1.1.57}/src/aient/plugins/image.py +0 -0
  38. {aient-1.1.55 → aient-1.1.57}/src/aient/plugins/list_directory.py +0 -0
  39. {aient-1.1.55 → aient-1.1.57}/src/aient/plugins/read_file.py +0 -0
  40. {aient-1.1.55 → aient-1.1.57}/src/aient/plugins/read_image.py +0 -0
  41. {aient-1.1.55 → aient-1.1.57}/src/aient/plugins/readonly.py +0 -0
  42. {aient-1.1.55 → aient-1.1.57}/src/aient/plugins/registry.py +0 -0
  43. {aient-1.1.55 → aient-1.1.57}/src/aient/plugins/run_python.py +0 -0
  44. {aient-1.1.55 → aient-1.1.57}/src/aient/plugins/websearch.py +0 -0
  45. {aient-1.1.55 → aient-1.1.57}/src/aient/utils/__init__.py +0 -0
  46. {aient-1.1.55 → aient-1.1.57}/src/aient/utils/prompt.py +0 -0
  47. {aient-1.1.55 → aient-1.1.57}/src/aient/utils/scripts.py +0 -0
  48. {aient-1.1.55 → aient-1.1.57}/src/aient.egg-info/SOURCES.txt +0 -0
  49. {aient-1.1.55 → aient-1.1.57}/src/aient.egg-info/dependency_links.txt +0 -0
  50. {aient-1.1.55 → aient-1.1.57}/src/aient.egg-info/requires.txt +0 -0
  51. {aient-1.1.55 → aient-1.1.57}/src/aient.egg-info/top_level.txt +0 -0
  52. {aient-1.1.55 → aient-1.1.57}/test/test.py +0 -0
  53. {aient-1.1.55 → aient-1.1.57}/test/test_Deepbricks.py +0 -0
  54. {aient-1.1.55 → aient-1.1.57}/test/test_Web_crawler.py +0 -0
  55. {aient-1.1.55 → aient-1.1.57}/test/test_aiwaves.py +0 -0
  56. {aient-1.1.55 → aient-1.1.57}/test/test_aiwaves_arxiv.py +0 -0
  57. {aient-1.1.55 → aient-1.1.57}/test/test_ask_gemini.py +0 -0
  58. {aient-1.1.55 → aient-1.1.57}/test/test_class.py +0 -0
  59. {aient-1.1.55 → aient-1.1.57}/test/test_claude.py +0 -0
  60. {aient-1.1.55 → aient-1.1.57}/test/test_claude_zh_char.py +0 -0
  61. {aient-1.1.55 → aient-1.1.57}/test/test_ddg_search.py +0 -0
  62. {aient-1.1.55 → aient-1.1.57}/test/test_download_pdf.py +0 -0
  63. {aient-1.1.55 → aient-1.1.57}/test/test_gemini.py +0 -0
  64. {aient-1.1.55 → aient-1.1.57}/test/test_get_token_dict.py +0 -0
  65. {aient-1.1.55 → aient-1.1.57}/test/test_google_search.py +0 -0
  66. {aient-1.1.55 → aient-1.1.57}/test/test_jieba.py +0 -0
  67. {aient-1.1.55 → aient-1.1.57}/test/test_json.py +0 -0
  68. {aient-1.1.55 → aient-1.1.57}/test/test_logging.py +0 -0
  69. {aient-1.1.55 → aient-1.1.57}/test/test_ollama.py +0 -0
  70. {aient-1.1.55 → aient-1.1.57}/test/test_plugin.py +0 -0
  71. {aient-1.1.55 → aient-1.1.57}/test/test_py_run.py +0 -0
  72. {aient-1.1.55 → aient-1.1.57}/test/test_requests.py +0 -0
  73. {aient-1.1.55 → aient-1.1.57}/test/test_search.py +0 -0
  74. {aient-1.1.55 → aient-1.1.57}/test/test_tikitoken.py +0 -0
  75. {aient-1.1.55 → aient-1.1.57}/test/test_token.py +0 -0
  76. {aient-1.1.55 → aient-1.1.57}/test/test_url.py +0 -0
  77. {aient-1.1.55 → aient-1.1.57}/test/test_whisper.py +0 -0
  78. {aient-1.1.55 → aient-1.1.57}/test/test_wildcard.py +0 -0
  79. {aient-1.1.55 → aient-1.1.57}/test/test_yjh.py +0 -0
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.4
2
2
  Name: aient
3
- Version: 1.1.55
3
+ Version: 1.1.57
4
4
  Summary: Aient: The Awakening of Agent.
5
5
  Description-Content-Type: text/markdown
6
6
  License-File: LICENSE
@@ -4,7 +4,7 @@ from setuptools import setup, find_packages
4
4
 
5
5
  setup(
6
6
  name="aient",
7
- version="1.1.55",
7
+ version="1.1.57",
8
8
  description="Aient: The Awakening of Agent.",
9
9
  long_description=Path.open(Path("README.md"), encoding="utf-8").read(),
10
10
  long_description_content_type="text/markdown",
@@ -1051,7 +1051,7 @@ async def get_gpt_payload(request, engine, provider, api_key=None):
1051
1051
 
1052
1052
  for field, value in request.model_dump(exclude_unset=True).items():
1053
1053
  if field not in miss_fields and value is not None:
1054
- if field == "max_tokens" and ("o1" in original_model or "o3" in original_model or "o4" in original_model):
1054
+ if field == "max_tokens" and ("o1" in original_model or "o3" in original_model or "o4" in original_model or "gpt-5" in original_model):
1055
1055
  payload["max_completion_tokens"] = value
1056
1056
  else:
1057
1057
  payload[field] = value
@@ -4,6 +4,8 @@ import json
4
4
  import copy
5
5
  import httpx
6
6
  import asyncio
7
+ import logging
8
+ import inspect
7
9
  import requests
8
10
  from typing import Set
9
11
  from typing import Union, Optional, Callable, List, Dict, Any
@@ -11,6 +13,7 @@ from pathlib import Path
11
13
 
12
14
 
13
15
  from .base import BaseLLM
16
+ from ..plugins.registry import registry
14
17
  from ..plugins import PLUGINS, get_tools_result_async, function_call_list, update_tools_config
15
18
  from ..utils.scripts import safe_get, async_generator_to_sync, parse_function_xml, parse_continuous_json, convert_functions_to_xml, remove_xml_tags_and_content
16
19
  from ..core.request import prepare_request_payload
@@ -63,6 +66,7 @@ class chatgpt(BaseLLM):
63
66
  function_call_max_loop: int = 3,
64
67
  cut_history_by_function_name: str = "",
65
68
  cache_messages: list = None,
69
+ logger: logging.Logger = None,
66
70
  ) -> None:
67
71
  """
68
72
  Initialize Chatbot with API key (from https://platform.openai.com/account/api-keys)
@@ -83,6 +87,14 @@ class chatgpt(BaseLLM):
83
87
  self.cut_history_by_function_name = cut_history_by_function_name
84
88
  self.latest_file_content = {}
85
89
 
90
+ if logger:
91
+ self.logger = logger
92
+ else:
93
+ # 如果没有提供 logger,创建一个默认的,它只会打印到控制台
94
+ self.logger = logging.getLogger("chatgpt_default")
95
+ if not self.logger.handlers: # 防止重复添加 handler
96
+ self.logger.addHandler(logging.StreamHandler())
97
+ self.logger.setLevel(logging.INFO if print_log else logging.WARNING)
86
98
 
87
99
  # 注册和处理传入的工具
88
100
  self._register_tools(tools)
@@ -122,7 +134,7 @@ class chatgpt(BaseLLM):
122
134
  """
123
135
  Add a message to the conversation
124
136
  """
125
- # print("role", role, "function_name", function_name, "message", message)
137
+ # self.logger.info(f"role: {role}, function_name: {function_name}, message: {message}")
126
138
  if convo_id not in self.conversation:
127
139
  self.reset(convo_id=convo_id)
128
140
  if function_name == "" and message:
@@ -175,16 +187,13 @@ class chatgpt(BaseLLM):
175
187
  self.conversation[convo_id].append({"role": "assistant", "content": "我已经执行过这个工具了,接下来我需要做什么?"})
176
188
 
177
189
  else:
178
- print('\033[31m')
179
- print("error: add_to_conversation message is None or empty")
180
- print("role", role, "function_name", function_name, "message", message)
181
- print('\033[0m')
190
+ self.logger.error(f"error: add_to_conversation message is None or empty, role: {role}, function_name: {function_name}, message: {message}")
182
191
 
183
192
  conversation_len = len(self.conversation[convo_id]) - 1
184
193
  message_index = 0
185
194
  # if self.print_log:
186
195
  # replaced_text = json.loads(re.sub(r';base64,([A-Za-z0-9+/=]+)', ';base64,***', json.dumps(self.conversation[convo_id])))
187
- # print(json.dumps(replaced_text, indent=4, ensure_ascii=False))
196
+ # self.logger.info(json.dumps(replaced_text, indent=4, ensure_ascii=False))
188
197
  while message_index < conversation_len:
189
198
  if self.conversation[convo_id][message_index]["role"] == self.conversation[convo_id][message_index + 1]["role"]:
190
199
  if self.conversation[convo_id][message_index].get("content") and self.conversation[convo_id][message_index + 1].get("content") \
@@ -247,7 +256,7 @@ class chatgpt(BaseLLM):
247
256
  mess = self.conversation[convo_id].pop(1)
248
257
  string_mess = json.dumps(mess, ensure_ascii=False)
249
258
  self.current_tokens[convo_id] -= len(string_mess) / 4
250
- print("Truncate message:", mess)
259
+ self.logger.info(f"Truncate message: {mess}")
251
260
  else:
252
261
  break
253
262
 
@@ -327,7 +336,7 @@ class chatgpt(BaseLLM):
327
336
  request_data["tools"] = tools_request_body
328
337
  request_data["tool_choice"] = "auto"
329
338
 
330
- # print("request_data", json.dumps(request_data, indent=4, ensure_ascii=False))
339
+ # self.logger.info(f"request_data: {json.dumps(request_data, indent=4, ensure_ascii=False)}")
331
340
 
332
341
  # 调用核心模块的 prepare_request_payload 函数
333
342
  url, headers, json_post_body, engine_type = await prepare_request_payload(provider, request_data)
@@ -388,7 +397,7 @@ class chatgpt(BaseLLM):
388
397
  else:
389
398
  return str(line)
390
399
  except:
391
- print("json.loads error:", repr(line))
400
+ self.logger.error(f"json.loads error: {repr(line)}")
392
401
  return None
393
402
 
394
403
  resp = json.loads(line) if isinstance(line, str) else line
@@ -446,21 +455,60 @@ class chatgpt(BaseLLM):
446
455
  yield chunk
447
456
 
448
457
  if self.print_log:
449
- print("\n\rtotal_tokens", total_tokens)
458
+ self.logger.info(f"total_tokens: {total_tokens}")
450
459
 
451
460
  if response_role is None:
452
461
  response_role = "assistant"
453
462
 
463
+ missing_required_params = []
464
+
454
465
  if self.use_plugins == True:
455
466
  full_response = full_response.replace("<tool_code>", "").replace("</tool_code>", "")
456
467
  function_parameter = parse_function_xml(full_response)
457
468
  if function_parameter:
458
469
  invalid_tools = [tool_dict for tool_dict in function_parameter if tool_dict.get("function_name", "") not in self.plugins.keys()]
459
470
  function_parameter = [tool_dict for tool_dict in function_parameter if tool_dict.get("function_name", "") in self.plugins.keys()]
471
+
472
+ # Check for missing required parameters
473
+ valid_function_parameters = []
474
+ for tool_dict in function_parameter:
475
+ tool_name = tool_dict.get("function_name")
476
+ # tool_name must be in registry.tools, because it is in self.plugins which is from registry.tools
477
+ func = registry.tools.get(tool_name)
478
+ if not func:
479
+ continue
480
+
481
+ sig = inspect.signature(func)
482
+ provided_params = tool_dict.get("parameter", {})
483
+ # Ensure provided_params is a dictionary
484
+ if not isinstance(provided_params, dict):
485
+ self.logger.warning(f"Parameters for {tool_name} are not a dict: {provided_params}. Skipping.")
486
+ continue
487
+
488
+ missing_required_params = []
489
+ for param in sig.parameters.values():
490
+ # Check if the parameter has no default value and is not in the provided parameters
491
+ if param.default is inspect.Parameter.empty and param.name not in provided_params:
492
+ missing_required_params.append(param.name)
493
+
494
+ if not missing_required_params:
495
+ valid_function_parameters.append(tool_dict)
496
+ else:
497
+ if self.print_log:
498
+ self.logger.warning(
499
+ f"Skipping tool call for '{tool_name}' due to missing required parameters: {missing_required_params}"
500
+ )
501
+ missing_required_params.append(f"Error: {tool_name} missing required parameters: {missing_required_params}")
502
+ function_parameter = valid_function_parameters
503
+
504
+ # 删除 task_complete 跟其他工具一起调用的情况,因为 task_complete 必须单独调用
505
+ if len(function_parameter) > 1:
506
+ function_parameter = [tool_dict for tool_dict in function_parameter if tool_dict.get("function_name", "") != "task_complete"]
507
+
460
508
  if self.print_log and invalid_tools:
461
- print("invalid_tools", invalid_tools)
462
- print("function_parameter", function_parameter)
463
- print("full_response", full_response)
509
+ self.logger.error(f"invalid_tools: {invalid_tools}")
510
+ self.logger.error(f"function_parameter: {function_parameter}")
511
+ self.logger.error(f"full_response: {full_response}")
464
512
  if function_parameter:
465
513
  need_function_call = True
466
514
  if isinstance(self.conversation[convo_id][-1]["content"], str) and \
@@ -470,14 +518,14 @@ class chatgpt(BaseLLM):
470
518
  else:
471
519
  need_function_call = False
472
520
  if self.print_log:
473
- print("Failed to parse function_parameter full_response", full_response)
521
+ self.logger.error(f"Failed to parse function_parameter full_response: {full_response}")
474
522
  full_response = ""
475
523
 
476
524
  # 处理函数调用
477
525
  if need_function_call and self.use_plugins == True:
478
526
  if self.print_log:
479
- print("function_parameter", function_parameter)
480
- print("function_full_response", function_full_response)
527
+ self.logger.info(f"function_parameter: {function_parameter}")
528
+ self.logger.info(f"function_full_response: {function_full_response}")
481
529
 
482
530
  function_response = ""
483
531
  # 定义处理单个工具调用的辅助函数
@@ -496,7 +544,7 @@ class chatgpt(BaseLLM):
496
544
  if function_call_max_tokens <= 0:
497
545
  function_call_max_tokens = int(self.truncate_limit / 2)
498
546
  if self.print_log:
499
- print(f"\033[32m function_call {tool_name}, max token: {function_call_max_tokens} \033[0m")
547
+ self.logger.info(f"function_call {tool_name}, max token: {function_call_max_tokens}")
500
548
 
501
549
  # 处理函数调用结果
502
550
  if is_async:
@@ -529,7 +577,7 @@ class chatgpt(BaseLLM):
529
577
  if function_full_response:
530
578
  function_parameter = parse_continuous_json(function_full_response, function_call_name)
531
579
  except Exception as e:
532
- print(f"解析JSON失败: {e}")
580
+ self.logger.error(f"解析JSON失败: {e}")
533
581
  # 保持原始工具调用
534
582
  tool_calls = [{
535
583
  'function_name': function_call_name,
@@ -579,6 +627,8 @@ class chatgpt(BaseLLM):
579
627
 
580
628
  # 合并所有工具响应
581
629
  function_response = "\n\n".join(all_responses).strip()
630
+ if missing_required_params:
631
+ function_response += "\n\n" + "\n\n".join(missing_required_params)
582
632
 
583
633
  # 使用第一个工具的名称和参数作为历史记录
584
634
  function_call_name = tool_calls[0]['function_name']
@@ -668,27 +718,56 @@ class chatgpt(BaseLLM):
668
718
 
669
719
  # 打印日志
670
720
  if self.print_log:
671
- print("api_url", kwargs.get('api_url', self.api_url.chat_url), url)
672
- print("api_key", kwargs.get('api_key', self.api_key))
721
+ self.logger.info(f"api_url: {kwargs.get('api_url', self.api_url.chat_url)}, {url}")
722
+ self.logger.info(f"api_key: {kwargs.get('api_key', self.api_key)}")
673
723
 
674
724
  # 发送请求并处理响应
675
725
  for _ in range(3):
676
726
  if self.print_log:
677
727
  replaced_text = json.loads(re.sub(r';base64,([A-Za-z0-9+/=]+)', ';base64,***', json.dumps(json_post)))
678
- print(json.dumps(replaced_text, indent=4, ensure_ascii=False))
728
+ replaced_text_str = json.dumps(replaced_text, indent=4, ensure_ascii=False)
729
+ self.logger.info(f"Request Body:\n{replaced_text_str}")
679
730
 
680
731
  try:
681
732
  # 改进处理方式,创建一个内部异步函数来处理异步调用
682
733
  async def process_async():
683
734
  # 异步调用 fetch_response_stream
684
- async_generator = fetch_response_stream(
685
- self.aclient,
686
- url,
687
- headers,
688
- json_post,
689
- engine_type,
690
- model or self.engine,
691
- )
735
+ # self.logger.info("--------------------------------")
736
+ # self.logger.info(prompt)
737
+ # self.logger.info(parse_function_xml(prompt))
738
+ # self.logger.info(convert_functions_to_xml(parse_function_xml(prompt)))
739
+ # self.logger.info(convert_functions_to_xml(parse_function_xml(prompt)).strip() == prompt)
740
+ # self.logger.info("--------------------------------")
741
+ if prompt and "</" in prompt and "<instructions>" not in prompt and convert_functions_to_xml(parse_function_xml(prompt)).strip() == prompt:
742
+ tmp_response = {
743
+ "id": "chatcmpl-zXCi5TxWy953TCcxFocSienhvx0BB",
744
+ "object": "chat.completion.chunk",
745
+ "created": 1754588695,
746
+ "model": "gemini-2.5-flash",
747
+ "choices": [
748
+ {
749
+ "index": 0,
750
+ "delta": {
751
+ "role": "assistant",
752
+ "content": prompt
753
+ },
754
+ "finish_reason": "stop"
755
+ }
756
+ ],
757
+ "system_fingerprint": "fp_d576307f90"
758
+ }
759
+ async def _mock_response_generator():
760
+ yield f"data: {json.dumps(tmp_response)}\n\n"
761
+ async_generator = _mock_response_generator()
762
+ else:
763
+ async_generator = fetch_response_stream(
764
+ self.aclient,
765
+ url,
766
+ headers,
767
+ json_post,
768
+ engine_type,
769
+ model or self.engine,
770
+ )
692
771
  # 异步处理响应流
693
772
  async for chunk in self._process_stream_response(
694
773
  async_generator,
@@ -709,15 +788,15 @@ class chatgpt(BaseLLM):
709
788
  # 将异步函数转换为同步生成器
710
789
  return async_generator_to_sync(process_async())
711
790
  except ConnectionError:
712
- print("连接错误,请检查服务器状态或网络连接。")
791
+ self.logger.error("连接错误,请检查服务器状态或网络连接。")
713
792
  return
714
793
  except requests.exceptions.ReadTimeout:
715
- print("请求超时,请检查网络连接或增加超时时间。")
794
+ self.logger.error("请求超时,请检查网络连接或增加超时时间。")
716
795
  return
717
796
  except httpx.RemoteProtocolError:
718
797
  continue
719
798
  except Exception as e:
720
- print(f"发生了未预料的错误:{e}")
799
+ self.logger.error(f"发生了未预料的错误:{e}")
721
800
  if "Invalid URL" in str(e):
722
801
  e = "您输入了无效的API URL,请使用正确的URL并使用`/start`命令重新设置API URL。具体错误如下:\n\n" + str(e)
723
802
  raise Exception(f"{e}")
@@ -755,28 +834,54 @@ class chatgpt(BaseLLM):
755
834
 
756
835
  # 打印日志
757
836
  if self.print_log:
758
- # print("api_url", kwargs.get('api_url', self.api_url.chat_url) == url)
759
- # print("api_url", kwargs.get('api_url', self.api_url.chat_url))
760
- print("api_url", url)
761
- # print("headers", headers)
762
- print("api_key", kwargs.get('api_key', self.api_key))
837
+ self.logger.info(f"api_url: {url}")
838
+ self.logger.info(f"api_key: {kwargs.get('api_key', self.api_key)}")
763
839
 
764
840
  # 发送请求并处理响应
765
841
  for _ in range(3):
766
842
  if self.print_log:
767
843
  replaced_text = json.loads(re.sub(r';base64,([A-Za-z0-9+/=]+)', ';base64,***', json.dumps(json_post)))
768
- print(json.dumps(replaced_text, indent=4, ensure_ascii=False))
844
+ replaced_text_str = json.dumps(replaced_text, indent=4, ensure_ascii=False)
845
+ self.logger.info(f"Request Body:\n{replaced_text_str}")
769
846
 
770
847
  try:
771
848
  # 使用fetch_response_stream处理响应
772
- generator = fetch_response_stream(
773
- self.aclient,
774
- url,
775
- headers,
776
- json_post,
777
- engine_type,
778
- model or self.engine,
779
- )
849
+ # self.logger.info("--------------------------------")
850
+ # self.logger.info(prompt)
851
+ # self.logger.info(parse_function_xml(prompt))
852
+ # self.logger.info(convert_functions_to_xml(parse_function_xml(prompt)))
853
+ # self.logger.info(convert_functions_to_xml(parse_function_xml(prompt)).strip() == prompt)
854
+ # self.logger.info("--------------------------------")
855
+ if prompt and "</" in prompt and "<instructions>" not in prompt and convert_functions_to_xml(parse_function_xml(prompt)).strip() == prompt:
856
+ tmp_response = {
857
+ "id": "chatcmpl-zXCi5TxWy953TCcxFocSienhvx0BB",
858
+ "object": "chat.completion.chunk",
859
+ "created": 1754588695,
860
+ "model": "gemini-2.5-flash",
861
+ "choices": [
862
+ {
863
+ "index": 0,
864
+ "delta": {
865
+ "role": "assistant",
866
+ "content": prompt
867
+ },
868
+ "finish_reason": "stop"
869
+ }
870
+ ],
871
+ "system_fingerprint": "fp_d576307f90"
872
+ }
873
+ async def _mock_response_generator():
874
+ yield f"data: {json.dumps(tmp_response)}\n\n"
875
+ generator = _mock_response_generator()
876
+ else:
877
+ generator = fetch_response_stream(
878
+ self.aclient,
879
+ url,
880
+ headers,
881
+ json_post,
882
+ engine_type,
883
+ model or self.engine,
884
+ )
780
885
  # if isinstance(chunk, dict) and "error" in chunk:
781
886
  # # 处理错误响应
782
887
  # if chunk["status_code"] in (400, 422, 503):
@@ -810,9 +915,9 @@ class chatgpt(BaseLLM):
810
915
  except httpx.RemoteProtocolError:
811
916
  continue
812
917
  except Exception as e:
813
- print(f"发生了未预料的错误:{e}")
918
+ self.logger.error(f"发生了未预料的错误:{e}")
814
919
  import traceback
815
- traceback.print_exc()
920
+ self.logger.error(traceback.format_exc())
816
921
  if "Invalid URL" in str(e):
817
922
  e = "您输入了无效的API URL,请使用正确的URL并使用`/start`命令重新设置API URL。具体错误如下:\n\n" + str(e)
818
923
  raise Exception(f"{e}")
@@ -979,7 +1084,7 @@ class chatgpt(BaseLLM):
979
1084
  return json_post, True, None
980
1085
 
981
1086
  except Exception as e:
982
- print(f"处理响应错误时出现异常: {e}")
1087
+ self.logger.error(f"处理响应错误时出现异常: {e}")
983
1088
  return json_post, False, str(e)
984
1089
 
985
1090
  def _handle_response_error_sync(self, response, json_post):
@@ -4,7 +4,7 @@ from ..utils.scripts import unescape_html
4
4
  import os
5
5
 
6
6
  @register_tool()
7
- def write_to_file(path, content, mode='w'):
7
+ def write_to_file(path, content, mode='w', newline=False):
8
8
  """
9
9
  ## write_to_file
10
10
  Description: Request to write full content to a file at the specified path. If the file exists, it will be overwritten with the provided content. If the file doesn't exist, it will be created. This tool will automatically create any directories needed to write the file.
@@ -12,6 +12,7 @@ Parameters:
12
12
  - path: (required) The path of the file to write to (relative to the current working directory ${args.cwd})
13
13
  - content: (required) The content to write to the file. ALWAYS provide the COMPLETE intended content of the file, without any truncation or omissions. You MUST include ALL parts of the file, even if they haven't been modified. Do NOT include the line numbers in the content though, just the actual content of the file.
14
14
  - mode: (optional) The mode to write to the file. Default is 'w'. 'w' for write, 'a' for append.
15
+ - newline: (optional) Whether to add a newline before the content. Default is False.
15
16
  Usage:
16
17
  <write_to_file>
17
18
  <path>File path here</path>
@@ -19,6 +20,7 @@ Usage:
19
20
  Your file content here
20
21
  </content>
21
22
  <mode>w</mode>
23
+ <newline>False</newline>
22
24
  </write_to_file>
23
25
 
24
26
  Example: Requesting to write to frontend-config.json
@@ -51,6 +53,9 @@ Example: Requesting to write to frontend-config.json
51
53
  if content.startswith("---\n") and (path.endswith(".md") or path.endswith(".txt")):
52
54
  content = "\n" + content
53
55
 
56
+ if newline:
57
+ content = '\n' + content
58
+
54
59
  # 写入文件
55
60
  try:
56
61
  with open(path, mode, encoding='utf-8') as file:
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.4
2
2
  Name: aient
3
- Version: 1.1.55
3
+ Version: 1.1.57
4
4
  Summary: Aient: The Awakening of Agent.
5
5
  Description-Content-Type: text/markdown
6
6
  License-File: LICENSE
@@ -1,6 +1,6 @@
1
1
  def replace_with_asterisk(string, start=15, end=40):
2
2
  return string[:start] + '*' * (end - start) + string[end:]
3
3
 
4
- original_string = "sk-zIuWeeuWY8vNCVhhHCXLroNmA6QhBxnv0ARMFcODVQwwqGRg"
4
+ original_string = "sk-bvgiugvigycycyrfctdyxdxrts"
5
5
  result = replace_with_asterisk(original_string)
6
6
  print(result)
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes