aient 1.1.55__tar.gz → 1.1.56__tar.gz

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (79) hide show
  1. {aient-1.1.55/src/aient.egg-info → aient-1.1.56}/PKG-INFO +1 -1
  2. {aient-1.1.55 → aient-1.1.56}/setup.py +1 -1
  3. {aient-1.1.55 → aient-1.1.56}/src/aient/core/request.py +1 -1
  4. {aient-1.1.55 → aient-1.1.56}/src/aient/models/chatgpt.py +150 -49
  5. {aient-1.1.55 → aient-1.1.56}/src/aient/plugins/write_file.py +6 -1
  6. {aient-1.1.55 → aient-1.1.56/src/aient.egg-info}/PKG-INFO +1 -1
  7. {aient-1.1.55 → aient-1.1.56}/test/test_API.py +1 -1
  8. {aient-1.1.55 → aient-1.1.56}/LICENSE +0 -0
  9. {aient-1.1.55 → aient-1.1.56}/MANIFEST.in +0 -0
  10. {aient-1.1.55 → aient-1.1.56}/README.md +0 -0
  11. {aient-1.1.55 → aient-1.1.56}/setup.cfg +0 -0
  12. {aient-1.1.55 → aient-1.1.56}/src/aient/__init__.py +0 -0
  13. {aient-1.1.55 → aient-1.1.56}/src/aient/core/.git +0 -0
  14. {aient-1.1.55 → aient-1.1.56}/src/aient/core/.gitignore +0 -0
  15. {aient-1.1.55 → aient-1.1.56}/src/aient/core/__init__.py +0 -0
  16. {aient-1.1.55 → aient-1.1.56}/src/aient/core/log_config.py +0 -0
  17. {aient-1.1.55 → aient-1.1.56}/src/aient/core/models.py +0 -0
  18. {aient-1.1.55 → aient-1.1.56}/src/aient/core/response.py +0 -0
  19. {aient-1.1.55 → aient-1.1.56}/src/aient/core/test/test_base_api.py +0 -0
  20. {aient-1.1.55 → aient-1.1.56}/src/aient/core/test/test_geminimask.py +0 -0
  21. {aient-1.1.55 → aient-1.1.56}/src/aient/core/test/test_image.py +0 -0
  22. {aient-1.1.55 → aient-1.1.56}/src/aient/core/test/test_payload.py +0 -0
  23. {aient-1.1.55 → aient-1.1.56}/src/aient/core/utils.py +0 -0
  24. {aient-1.1.55 → aient-1.1.56}/src/aient/models/__init__.py +0 -0
  25. {aient-1.1.55 → aient-1.1.56}/src/aient/models/audio.py +0 -0
  26. {aient-1.1.55 → aient-1.1.56}/src/aient/models/base.py +0 -0
  27. {aient-1.1.55 → aient-1.1.56}/src/aient/models/claude.py +0 -0
  28. {aient-1.1.55 → aient-1.1.56}/src/aient/models/duckduckgo.py +0 -0
  29. {aient-1.1.55 → aient-1.1.56}/src/aient/models/gemini.py +0 -0
  30. {aient-1.1.55 → aient-1.1.56}/src/aient/models/groq.py +0 -0
  31. {aient-1.1.55 → aient-1.1.56}/src/aient/models/vertex.py +0 -0
  32. {aient-1.1.55 → aient-1.1.56}/src/aient/plugins/__init__.py +0 -0
  33. {aient-1.1.55 → aient-1.1.56}/src/aient/plugins/arXiv.py +0 -0
  34. {aient-1.1.55 → aient-1.1.56}/src/aient/plugins/config.py +0 -0
  35. {aient-1.1.55 → aient-1.1.56}/src/aient/plugins/excute_command.py +0 -0
  36. {aient-1.1.55 → aient-1.1.56}/src/aient/plugins/get_time.py +0 -0
  37. {aient-1.1.55 → aient-1.1.56}/src/aient/plugins/image.py +0 -0
  38. {aient-1.1.55 → aient-1.1.56}/src/aient/plugins/list_directory.py +0 -0
  39. {aient-1.1.55 → aient-1.1.56}/src/aient/plugins/read_file.py +0 -0
  40. {aient-1.1.55 → aient-1.1.56}/src/aient/plugins/read_image.py +0 -0
  41. {aient-1.1.55 → aient-1.1.56}/src/aient/plugins/readonly.py +0 -0
  42. {aient-1.1.55 → aient-1.1.56}/src/aient/plugins/registry.py +0 -0
  43. {aient-1.1.55 → aient-1.1.56}/src/aient/plugins/run_python.py +0 -0
  44. {aient-1.1.55 → aient-1.1.56}/src/aient/plugins/websearch.py +0 -0
  45. {aient-1.1.55 → aient-1.1.56}/src/aient/utils/__init__.py +0 -0
  46. {aient-1.1.55 → aient-1.1.56}/src/aient/utils/prompt.py +0 -0
  47. {aient-1.1.55 → aient-1.1.56}/src/aient/utils/scripts.py +0 -0
  48. {aient-1.1.55 → aient-1.1.56}/src/aient.egg-info/SOURCES.txt +0 -0
  49. {aient-1.1.55 → aient-1.1.56}/src/aient.egg-info/dependency_links.txt +0 -0
  50. {aient-1.1.55 → aient-1.1.56}/src/aient.egg-info/requires.txt +0 -0
  51. {aient-1.1.55 → aient-1.1.56}/src/aient.egg-info/top_level.txt +0 -0
  52. {aient-1.1.55 → aient-1.1.56}/test/test.py +0 -0
  53. {aient-1.1.55 → aient-1.1.56}/test/test_Deepbricks.py +0 -0
  54. {aient-1.1.55 → aient-1.1.56}/test/test_Web_crawler.py +0 -0
  55. {aient-1.1.55 → aient-1.1.56}/test/test_aiwaves.py +0 -0
  56. {aient-1.1.55 → aient-1.1.56}/test/test_aiwaves_arxiv.py +0 -0
  57. {aient-1.1.55 → aient-1.1.56}/test/test_ask_gemini.py +0 -0
  58. {aient-1.1.55 → aient-1.1.56}/test/test_class.py +0 -0
  59. {aient-1.1.55 → aient-1.1.56}/test/test_claude.py +0 -0
  60. {aient-1.1.55 → aient-1.1.56}/test/test_claude_zh_char.py +0 -0
  61. {aient-1.1.55 → aient-1.1.56}/test/test_ddg_search.py +0 -0
  62. {aient-1.1.55 → aient-1.1.56}/test/test_download_pdf.py +0 -0
  63. {aient-1.1.55 → aient-1.1.56}/test/test_gemini.py +0 -0
  64. {aient-1.1.55 → aient-1.1.56}/test/test_get_token_dict.py +0 -0
  65. {aient-1.1.55 → aient-1.1.56}/test/test_google_search.py +0 -0
  66. {aient-1.1.55 → aient-1.1.56}/test/test_jieba.py +0 -0
  67. {aient-1.1.55 → aient-1.1.56}/test/test_json.py +0 -0
  68. {aient-1.1.55 → aient-1.1.56}/test/test_logging.py +0 -0
  69. {aient-1.1.55 → aient-1.1.56}/test/test_ollama.py +0 -0
  70. {aient-1.1.55 → aient-1.1.56}/test/test_plugin.py +0 -0
  71. {aient-1.1.55 → aient-1.1.56}/test/test_py_run.py +0 -0
  72. {aient-1.1.55 → aient-1.1.56}/test/test_requests.py +0 -0
  73. {aient-1.1.55 → aient-1.1.56}/test/test_search.py +0 -0
  74. {aient-1.1.55 → aient-1.1.56}/test/test_tikitoken.py +0 -0
  75. {aient-1.1.55 → aient-1.1.56}/test/test_token.py +0 -0
  76. {aient-1.1.55 → aient-1.1.56}/test/test_url.py +0 -0
  77. {aient-1.1.55 → aient-1.1.56}/test/test_whisper.py +0 -0
  78. {aient-1.1.55 → aient-1.1.56}/test/test_wildcard.py +0 -0
  79. {aient-1.1.55 → aient-1.1.56}/test/test_yjh.py +0 -0
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.4
2
2
  Name: aient
3
- Version: 1.1.55
3
+ Version: 1.1.56
4
4
  Summary: Aient: The Awakening of Agent.
5
5
  Description-Content-Type: text/markdown
6
6
  License-File: LICENSE
@@ -4,7 +4,7 @@ from setuptools import setup, find_packages
4
4
 
5
5
  setup(
6
6
  name="aient",
7
- version="1.1.55",
7
+ version="1.1.56",
8
8
  description="Aient: The Awakening of Agent.",
9
9
  long_description=Path.open(Path("README.md"), encoding="utf-8").read(),
10
10
  long_description_content_type="text/markdown",
@@ -1051,7 +1051,7 @@ async def get_gpt_payload(request, engine, provider, api_key=None):
1051
1051
 
1052
1052
  for field, value in request.model_dump(exclude_unset=True).items():
1053
1053
  if field not in miss_fields and value is not None:
1054
- if field == "max_tokens" and ("o1" in original_model or "o3" in original_model or "o4" in original_model):
1054
+ if field == "max_tokens" and ("o1" in original_model or "o3" in original_model or "o4" in original_model or "gpt-5" in original_model):
1055
1055
  payload["max_completion_tokens"] = value
1056
1056
  else:
1057
1057
  payload[field] = value
@@ -4,6 +4,8 @@ import json
4
4
  import copy
5
5
  import httpx
6
6
  import asyncio
7
+ import logging
8
+ import inspect
7
9
  import requests
8
10
  from typing import Set
9
11
  from typing import Union, Optional, Callable, List, Dict, Any
@@ -11,6 +13,7 @@ from pathlib import Path
11
13
 
12
14
 
13
15
  from .base import BaseLLM
16
+ from ..plugins.registry import registry
14
17
  from ..plugins import PLUGINS, get_tools_result_async, function_call_list, update_tools_config
15
18
  from ..utils.scripts import safe_get, async_generator_to_sync, parse_function_xml, parse_continuous_json, convert_functions_to_xml, remove_xml_tags_and_content
16
19
  from ..core.request import prepare_request_payload
@@ -63,6 +66,7 @@ class chatgpt(BaseLLM):
63
66
  function_call_max_loop: int = 3,
64
67
  cut_history_by_function_name: str = "",
65
68
  cache_messages: list = None,
69
+ logger: logging.Logger = None,
66
70
  ) -> None:
67
71
  """
68
72
  Initialize Chatbot with API key (from https://platform.openai.com/account/api-keys)
@@ -83,6 +87,14 @@ class chatgpt(BaseLLM):
83
87
  self.cut_history_by_function_name = cut_history_by_function_name
84
88
  self.latest_file_content = {}
85
89
 
90
+ if logger:
91
+ self.logger = logger
92
+ else:
93
+ # 如果没有提供 logger,创建一个默认的,它只会打印到控制台
94
+ self.logger = logging.getLogger("chatgpt_default")
95
+ if not self.logger.handlers: # 防止重复添加 handler
96
+ self.logger.addHandler(logging.StreamHandler())
97
+ self.logger.setLevel(logging.INFO if print_log else logging.WARNING)
86
98
 
87
99
  # 注册和处理传入的工具
88
100
  self._register_tools(tools)
@@ -122,7 +134,7 @@ class chatgpt(BaseLLM):
122
134
  """
123
135
  Add a message to the conversation
124
136
  """
125
- # print("role", role, "function_name", function_name, "message", message)
137
+ # self.logger.info(f"role: {role}, function_name: {function_name}, message: {message}")
126
138
  if convo_id not in self.conversation:
127
139
  self.reset(convo_id=convo_id)
128
140
  if function_name == "" and message:
@@ -175,16 +187,13 @@ class chatgpt(BaseLLM):
175
187
  self.conversation[convo_id].append({"role": "assistant", "content": "我已经执行过这个工具了,接下来我需要做什么?"})
176
188
 
177
189
  else:
178
- print('\033[31m')
179
- print("error: add_to_conversation message is None or empty")
180
- print("role", role, "function_name", function_name, "message", message)
181
- print('\033[0m')
190
+ self.logger.error(f"error: add_to_conversation message is None or empty, role: {role}, function_name: {function_name}, message: {message}")
182
191
 
183
192
  conversation_len = len(self.conversation[convo_id]) - 1
184
193
  message_index = 0
185
194
  # if self.print_log:
186
195
  # replaced_text = json.loads(re.sub(r';base64,([A-Za-z0-9+/=]+)', ';base64,***', json.dumps(self.conversation[convo_id])))
187
- # print(json.dumps(replaced_text, indent=4, ensure_ascii=False))
196
+ # self.logger.info(json.dumps(replaced_text, indent=4, ensure_ascii=False))
188
197
  while message_index < conversation_len:
189
198
  if self.conversation[convo_id][message_index]["role"] == self.conversation[convo_id][message_index + 1]["role"]:
190
199
  if self.conversation[convo_id][message_index].get("content") and self.conversation[convo_id][message_index + 1].get("content") \
@@ -247,7 +256,7 @@ class chatgpt(BaseLLM):
247
256
  mess = self.conversation[convo_id].pop(1)
248
257
  string_mess = json.dumps(mess, ensure_ascii=False)
249
258
  self.current_tokens[convo_id] -= len(string_mess) / 4
250
- print("Truncate message:", mess)
259
+ self.logger.info(f"Truncate message: {mess}")
251
260
  else:
252
261
  break
253
262
 
@@ -327,7 +336,7 @@ class chatgpt(BaseLLM):
327
336
  request_data["tools"] = tools_request_body
328
337
  request_data["tool_choice"] = "auto"
329
338
 
330
- # print("request_data", json.dumps(request_data, indent=4, ensure_ascii=False))
339
+ # self.logger.info(f"request_data: {json.dumps(request_data, indent=4, ensure_ascii=False)}")
331
340
 
332
341
  # 调用核心模块的 prepare_request_payload 函数
333
342
  url, headers, json_post_body, engine_type = await prepare_request_payload(provider, request_data)
@@ -388,7 +397,7 @@ class chatgpt(BaseLLM):
388
397
  else:
389
398
  return str(line)
390
399
  except:
391
- print("json.loads error:", repr(line))
400
+ self.logger.error(f"json.loads error: {repr(line)}")
392
401
  return None
393
402
 
394
403
  resp = json.loads(line) if isinstance(line, str) else line
@@ -446,21 +455,56 @@ class chatgpt(BaseLLM):
446
455
  yield chunk
447
456
 
448
457
  if self.print_log:
449
- print("\n\rtotal_tokens", total_tokens)
458
+ self.logger.info(f"total_tokens: {total_tokens}")
450
459
 
451
460
  if response_role is None:
452
461
  response_role = "assistant"
453
462
 
463
+ missing_required_params = []
464
+
454
465
  if self.use_plugins == True:
455
466
  full_response = full_response.replace("<tool_code>", "").replace("</tool_code>", "")
456
467
  function_parameter = parse_function_xml(full_response)
457
468
  if function_parameter:
458
469
  invalid_tools = [tool_dict for tool_dict in function_parameter if tool_dict.get("function_name", "") not in self.plugins.keys()]
459
470
  function_parameter = [tool_dict for tool_dict in function_parameter if tool_dict.get("function_name", "") in self.plugins.keys()]
471
+
472
+ # Check for missing required parameters
473
+ valid_function_parameters = []
474
+ for tool_dict in function_parameter:
475
+ tool_name = tool_dict.get("function_name")
476
+ # tool_name must be in registry.tools, because it is in self.plugins which is from registry.tools
477
+ func = registry.tools.get(tool_name)
478
+ if not func:
479
+ continue
480
+
481
+ sig = inspect.signature(func)
482
+ provided_params = tool_dict.get("parameter", {})
483
+ # Ensure provided_params is a dictionary
484
+ if not isinstance(provided_params, dict):
485
+ self.logger.warning(f"Parameters for {tool_name} are not a dict: {provided_params}. Skipping.")
486
+ continue
487
+
488
+ missing_required_params = []
489
+ for param in sig.parameters.values():
490
+ # Check if the parameter has no default value and is not in the provided parameters
491
+ if param.default is inspect.Parameter.empty and param.name not in provided_params:
492
+ missing_required_params.append(param.name)
493
+
494
+ if not missing_required_params:
495
+ valid_function_parameters.append(tool_dict)
496
+ else:
497
+ if self.print_log:
498
+ self.logger.warning(
499
+ f"Skipping tool call for '{tool_name}' due to missing required parameters: {missing_required_params}"
500
+ )
501
+ missing_required_params.append(f"Error: {tool_name} missing required parameters: {missing_required_params}")
502
+ function_parameter = valid_function_parameters
503
+
460
504
  if self.print_log and invalid_tools:
461
- print("invalid_tools", invalid_tools)
462
- print("function_parameter", function_parameter)
463
- print("full_response", full_response)
505
+ self.logger.error(f"invalid_tools: {invalid_tools}")
506
+ self.logger.error(f"function_parameter: {function_parameter}")
507
+ self.logger.error(f"full_response: {full_response}")
464
508
  if function_parameter:
465
509
  need_function_call = True
466
510
  if isinstance(self.conversation[convo_id][-1]["content"], str) and \
@@ -470,14 +514,14 @@ class chatgpt(BaseLLM):
470
514
  else:
471
515
  need_function_call = False
472
516
  if self.print_log:
473
- print("Failed to parse function_parameter full_response", full_response)
517
+ self.logger.error(f"Failed to parse function_parameter full_response: {full_response}")
474
518
  full_response = ""
475
519
 
476
520
  # 处理函数调用
477
521
  if need_function_call and self.use_plugins == True:
478
522
  if self.print_log:
479
- print("function_parameter", function_parameter)
480
- print("function_full_response", function_full_response)
523
+ self.logger.info(f"function_parameter: {function_parameter}")
524
+ self.logger.info(f"function_full_response: {function_full_response}")
481
525
 
482
526
  function_response = ""
483
527
  # 定义处理单个工具调用的辅助函数
@@ -496,7 +540,7 @@ class chatgpt(BaseLLM):
496
540
  if function_call_max_tokens <= 0:
497
541
  function_call_max_tokens = int(self.truncate_limit / 2)
498
542
  if self.print_log:
499
- print(f"\033[32m function_call {tool_name}, max token: {function_call_max_tokens} \033[0m")
543
+ self.logger.info(f"function_call {tool_name}, max token: {function_call_max_tokens}")
500
544
 
501
545
  # 处理函数调用结果
502
546
  if is_async:
@@ -529,7 +573,7 @@ class chatgpt(BaseLLM):
529
573
  if function_full_response:
530
574
  function_parameter = parse_continuous_json(function_full_response, function_call_name)
531
575
  except Exception as e:
532
- print(f"解析JSON失败: {e}")
576
+ self.logger.error(f"解析JSON失败: {e}")
533
577
  # 保持原始工具调用
534
578
  tool_calls = [{
535
579
  'function_name': function_call_name,
@@ -579,6 +623,8 @@ class chatgpt(BaseLLM):
579
623
 
580
624
  # 合并所有工具响应
581
625
  function_response = "\n\n".join(all_responses).strip()
626
+ if missing_required_params:
627
+ function_response += "\n\n" + "\n\n".join(missing_required_params)
582
628
 
583
629
  # 使用第一个工具的名称和参数作为历史记录
584
630
  function_call_name = tool_calls[0]['function_name']
@@ -668,27 +714,56 @@ class chatgpt(BaseLLM):
668
714
 
669
715
  # 打印日志
670
716
  if self.print_log:
671
- print("api_url", kwargs.get('api_url', self.api_url.chat_url), url)
672
- print("api_key", kwargs.get('api_key', self.api_key))
717
+ self.logger.info(f"api_url: {kwargs.get('api_url', self.api_url.chat_url)}, {url}")
718
+ self.logger.info(f"api_key: {kwargs.get('api_key', self.api_key)}")
673
719
 
674
720
  # 发送请求并处理响应
675
721
  for _ in range(3):
676
722
  if self.print_log:
677
723
  replaced_text = json.loads(re.sub(r';base64,([A-Za-z0-9+/=]+)', ';base64,***', json.dumps(json_post)))
678
- print(json.dumps(replaced_text, indent=4, ensure_ascii=False))
724
+ replaced_text_str = json.dumps(replaced_text, indent=4, ensure_ascii=False)
725
+ self.logger.info(f"Request Body:\n{replaced_text_str}")
679
726
 
680
727
  try:
681
728
  # 改进处理方式,创建一个内部异步函数来处理异步调用
682
729
  async def process_async():
683
730
  # 异步调用 fetch_response_stream
684
- async_generator = fetch_response_stream(
685
- self.aclient,
686
- url,
687
- headers,
688
- json_post,
689
- engine_type,
690
- model or self.engine,
691
- )
731
+ # self.logger.info("--------------------------------")
732
+ # self.logger.info(prompt)
733
+ # self.logger.info(parse_function_xml(prompt))
734
+ # self.logger.info(convert_functions_to_xml(parse_function_xml(prompt)))
735
+ # self.logger.info(convert_functions_to_xml(parse_function_xml(prompt)).strip() == prompt)
736
+ # self.logger.info("--------------------------------")
737
+ if prompt and "</" in prompt and "<instructions>" not in prompt and convert_functions_to_xml(parse_function_xml(prompt)).strip() == prompt:
738
+ tmp_response = {
739
+ "id": "chatcmpl-zXCi5TxWy953TCcxFocSienhvx0BB",
740
+ "object": "chat.completion.chunk",
741
+ "created": 1754588695,
742
+ "model": "gemini-2.5-flash",
743
+ "choices": [
744
+ {
745
+ "index": 0,
746
+ "delta": {
747
+ "role": "assistant",
748
+ "content": prompt
749
+ },
750
+ "finish_reason": "stop"
751
+ }
752
+ ],
753
+ "system_fingerprint": "fp_d576307f90"
754
+ }
755
+ async def _mock_response_generator():
756
+ yield f"data: {json.dumps(tmp_response)}\n\n"
757
+ async_generator = _mock_response_generator()
758
+ else:
759
+ async_generator = fetch_response_stream(
760
+ self.aclient,
761
+ url,
762
+ headers,
763
+ json_post,
764
+ engine_type,
765
+ model or self.engine,
766
+ )
692
767
  # 异步处理响应流
693
768
  async for chunk in self._process_stream_response(
694
769
  async_generator,
@@ -709,15 +784,15 @@ class chatgpt(BaseLLM):
709
784
  # 将异步函数转换为同步生成器
710
785
  return async_generator_to_sync(process_async())
711
786
  except ConnectionError:
712
- print("连接错误,请检查服务器状态或网络连接。")
787
+ self.logger.error("连接错误,请检查服务器状态或网络连接。")
713
788
  return
714
789
  except requests.exceptions.ReadTimeout:
715
- print("请求超时,请检查网络连接或增加超时时间。")
790
+ self.logger.error("请求超时,请检查网络连接或增加超时时间。")
716
791
  return
717
792
  except httpx.RemoteProtocolError:
718
793
  continue
719
794
  except Exception as e:
720
- print(f"发生了未预料的错误:{e}")
795
+ self.logger.error(f"发生了未预料的错误:{e}")
721
796
  if "Invalid URL" in str(e):
722
797
  e = "您输入了无效的API URL,请使用正确的URL并使用`/start`命令重新设置API URL。具体错误如下:\n\n" + str(e)
723
798
  raise Exception(f"{e}")
@@ -755,28 +830,54 @@ class chatgpt(BaseLLM):
755
830
 
756
831
  # 打印日志
757
832
  if self.print_log:
758
- # print("api_url", kwargs.get('api_url', self.api_url.chat_url) == url)
759
- # print("api_url", kwargs.get('api_url', self.api_url.chat_url))
760
- print("api_url", url)
761
- # print("headers", headers)
762
- print("api_key", kwargs.get('api_key', self.api_key))
833
+ self.logger.info(f"api_url: {url}")
834
+ self.logger.info(f"api_key: {kwargs.get('api_key', self.api_key)}")
763
835
 
764
836
  # 发送请求并处理响应
765
837
  for _ in range(3):
766
838
  if self.print_log:
767
839
  replaced_text = json.loads(re.sub(r';base64,([A-Za-z0-9+/=]+)', ';base64,***', json.dumps(json_post)))
768
- print(json.dumps(replaced_text, indent=4, ensure_ascii=False))
840
+ replaced_text_str = json.dumps(replaced_text, indent=4, ensure_ascii=False)
841
+ self.logger.info(f"Request Body:\n{replaced_text_str}")
769
842
 
770
843
  try:
771
844
  # 使用fetch_response_stream处理响应
772
- generator = fetch_response_stream(
773
- self.aclient,
774
- url,
775
- headers,
776
- json_post,
777
- engine_type,
778
- model or self.engine,
779
- )
845
+ # self.logger.info("--------------------------------")
846
+ # self.logger.info(prompt)
847
+ # self.logger.info(parse_function_xml(prompt))
848
+ # self.logger.info(convert_functions_to_xml(parse_function_xml(prompt)))
849
+ # self.logger.info(convert_functions_to_xml(parse_function_xml(prompt)).strip() == prompt)
850
+ # self.logger.info("--------------------------------")
851
+ if prompt and "</" in prompt and "<instructions>" not in prompt and convert_functions_to_xml(parse_function_xml(prompt)).strip() == prompt:
852
+ tmp_response = {
853
+ "id": "chatcmpl-zXCi5TxWy953TCcxFocSienhvx0BB",
854
+ "object": "chat.completion.chunk",
855
+ "created": 1754588695,
856
+ "model": "gemini-2.5-flash",
857
+ "choices": [
858
+ {
859
+ "index": 0,
860
+ "delta": {
861
+ "role": "assistant",
862
+ "content": prompt
863
+ },
864
+ "finish_reason": "stop"
865
+ }
866
+ ],
867
+ "system_fingerprint": "fp_d576307f90"
868
+ }
869
+ async def _mock_response_generator():
870
+ yield f"data: {json.dumps(tmp_response)}\n\n"
871
+ generator = _mock_response_generator()
872
+ else:
873
+ generator = fetch_response_stream(
874
+ self.aclient,
875
+ url,
876
+ headers,
877
+ json_post,
878
+ engine_type,
879
+ model or self.engine,
880
+ )
780
881
  # if isinstance(chunk, dict) and "error" in chunk:
781
882
  # # 处理错误响应
782
883
  # if chunk["status_code"] in (400, 422, 503):
@@ -810,9 +911,9 @@ class chatgpt(BaseLLM):
810
911
  except httpx.RemoteProtocolError:
811
912
  continue
812
913
  except Exception as e:
813
- print(f"发生了未预料的错误:{e}")
914
+ self.logger.error(f"发生了未预料的错误:{e}")
814
915
  import traceback
815
- traceback.print_exc()
916
+ self.logger.error(traceback.format_exc())
816
917
  if "Invalid URL" in str(e):
817
918
  e = "您输入了无效的API URL,请使用正确的URL并使用`/start`命令重新设置API URL。具体错误如下:\n\n" + str(e)
818
919
  raise Exception(f"{e}")
@@ -979,7 +1080,7 @@ class chatgpt(BaseLLM):
979
1080
  return json_post, True, None
980
1081
 
981
1082
  except Exception as e:
982
- print(f"处理响应错误时出现异常: {e}")
1083
+ self.logger.error(f"处理响应错误时出现异常: {e}")
983
1084
  return json_post, False, str(e)
984
1085
 
985
1086
  def _handle_response_error_sync(self, response, json_post):
@@ -4,7 +4,7 @@ from ..utils.scripts import unescape_html
4
4
  import os
5
5
 
6
6
  @register_tool()
7
- def write_to_file(path, content, mode='w'):
7
+ def write_to_file(path, content, mode='w', newline=False):
8
8
  """
9
9
  ## write_to_file
10
10
  Description: Request to write full content to a file at the specified path. If the file exists, it will be overwritten with the provided content. If the file doesn't exist, it will be created. This tool will automatically create any directories needed to write the file.
@@ -12,6 +12,7 @@ Parameters:
12
12
  - path: (required) The path of the file to write to (relative to the current working directory ${args.cwd})
13
13
  - content: (required) The content to write to the file. ALWAYS provide the COMPLETE intended content of the file, without any truncation or omissions. You MUST include ALL parts of the file, even if they haven't been modified. Do NOT include the line numbers in the content though, just the actual content of the file.
14
14
  - mode: (optional) The mode to write to the file. Default is 'w'. 'w' for write, 'a' for append.
15
+ - newline: (optional) Whether to add a newline before the content. Default is False.
15
16
  Usage:
16
17
  <write_to_file>
17
18
  <path>File path here</path>
@@ -19,6 +20,7 @@ Usage:
19
20
  Your file content here
20
21
  </content>
21
22
  <mode>w</mode>
23
+ <newline>False</newline>
22
24
  </write_to_file>
23
25
 
24
26
  Example: Requesting to write to frontend-config.json
@@ -51,6 +53,9 @@ Example: Requesting to write to frontend-config.json
51
53
  if content.startswith("---\n") and (path.endswith(".md") or path.endswith(".txt")):
52
54
  content = "\n" + content
53
55
 
56
+ if newline:
57
+ content = '\n' + content
58
+
54
59
  # 写入文件
55
60
  try:
56
61
  with open(path, mode, encoding='utf-8') as file:
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.4
2
2
  Name: aient
3
- Version: 1.1.55
3
+ Version: 1.1.56
4
4
  Summary: Aient: The Awakening of Agent.
5
5
  Description-Content-Type: text/markdown
6
6
  License-File: LICENSE
@@ -1,6 +1,6 @@
1
1
  def replace_with_asterisk(string, start=15, end=40):
2
2
  return string[:start] + '*' * (end - start) + string[end:]
3
3
 
4
- original_string = "sk-zIuWeeuWY8vNCVhhHCXLroNmA6QhBxnv0ARMFcODVQwwqGRg"
4
+ original_string = "sk-bvgiugvigycycyrfctdyxdxrts"
5
5
  result = replace_with_asterisk(original_string)
6
6
  print(result)
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes