auto-coder 0.1.365__py3-none-any.whl → 0.1.366__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of auto-coder might be problematic. Click here for more details.
- {auto_coder-0.1.365.dist-info → auto_coder-0.1.366.dist-info}/METADATA +1 -1
- {auto_coder-0.1.365.dist-info → auto_coder-0.1.366.dist-info}/RECORD +12 -12
- autocoder/agent/base_agentic/base_agent.py +23 -8
- autocoder/common/file_checkpoint/manager.py +1 -0
- autocoder/common/utils_code_auto_generate.py +1 -1
- autocoder/common/v2/agent/agentic_edit.py +32 -14
- autocoder/events/event_manager_singleton.py +1 -2
- autocoder/version.py +1 -1
- {auto_coder-0.1.365.dist-info → auto_coder-0.1.366.dist-info}/LICENSE +0 -0
- {auto_coder-0.1.365.dist-info → auto_coder-0.1.366.dist-info}/WHEEL +0 -0
- {auto_coder-0.1.365.dist-info → auto_coder-0.1.366.dist-info}/entry_points.txt +0 -0
- {auto_coder-0.1.365.dist-info → auto_coder-0.1.366.dist-info}/top_level.txt +0 -0
|
@@ -14,7 +14,7 @@ autocoder/command_parser.py,sha256=fx1g9E6GaM273lGTcJqaFQ-hoksS_Ik2glBMnVltPCE,1
|
|
|
14
14
|
autocoder/lang.py,sha256=PFtATuOhHRnfpqHQkXr6p4C893JvpsgwTMif3l-GEi0,14321
|
|
15
15
|
autocoder/models.py,sha256=Gu50IATQtZtgEir1PpCfwgH6o4ygw6XqqbQRj3lx5dU,13798
|
|
16
16
|
autocoder/run_context.py,sha256=IUfSO6_gp2Wt1blFWAmOpN0b0nDrTTk4LmtCYUBIoro,1643
|
|
17
|
-
autocoder/version.py,sha256=
|
|
17
|
+
autocoder/version.py,sha256=A7C7tSowjNmr_-5lcW2Fq9hyKMLQ4SiVmIfg6RE9CAM,23
|
|
18
18
|
autocoder/agent/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
|
|
19
19
|
autocoder/agent/agentic_edit.py,sha256=XsfePZ-t6M-uBSdG1VLZXk1goqXk2HPeJ_A8IYyBuWQ,58896
|
|
20
20
|
autocoder/agent/agentic_edit_types.py,sha256=oFcDd_cxJ2yH9Ed1uTpD3BipudgoIEWDMPb5pAkq4gI,3288
|
|
@@ -47,7 +47,7 @@ autocoder/agent/base_agentic/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NM
|
|
|
47
47
|
autocoder/agent/base_agentic/agent_hub.py,sha256=3Al9rCKu-SRgAs1kvnMe4VIIErTIw85QTkhfFu5omO8,5605
|
|
48
48
|
autocoder/agent/base_agentic/agentic_lang.py,sha256=UCq1NY9USaYJakTWc-3cv_MeHxAb6se1PI4lsSwGrPM,3657
|
|
49
49
|
autocoder/agent/base_agentic/agentic_tool_display.py,sha256=UnAq8ovvpu88KLk19Ff0TW-Dq-k7YiRwZiIJgcYPwiY,6989
|
|
50
|
-
autocoder/agent/base_agentic/base_agent.py,sha256=
|
|
50
|
+
autocoder/agent/base_agentic/base_agent.py,sha256=H7dOBSrYbVhMF7VBJV6qtq2kSa1iF5pfNwqtHIEU9uw,86371
|
|
51
51
|
autocoder/agent/base_agentic/default_tools.py,sha256=ggtJjysejfDWD6JoG4vBDFrnXFFPRm7JsVVG6XcKer8,33060
|
|
52
52
|
autocoder/agent/base_agentic/test_base_agent.py,sha256=jok9f-DoEagzZRWjk-Zpy3gKw2ztZrsNzEc0XlvE7HU,2804
|
|
53
53
|
autocoder/agent/base_agentic/tool_registry.py,sha256=YFnUXJ78y7g3pm3yGgrhZ-0mx-C1ctdcA0r_ljGiE6o,14292
|
|
@@ -141,7 +141,7 @@ autocoder/common/test_run_cmd.py,sha256=0piPrNnxTPS8vJRnsVH6-lgB5zeLaXSRY5pPH13H
|
|
|
141
141
|
autocoder/common/text.py,sha256=KGRQq314GHBmY4MWG8ossRoQi1_DTotvhxchpn78c-k,1003
|
|
142
142
|
autocoder/common/token_cost_caculate.py,sha256=MSWJtl7YpQSUt-gFQoqUcJMblyPqHXe2ZioiZOFkV80,10085
|
|
143
143
|
autocoder/common/types.py,sha256=Cw_4RH-rGmAgQE-Ck69maMAMqlPCDA4Yj37QmuUY0mQ,713
|
|
144
|
-
autocoder/common/utils_code_auto_generate.py,sha256=
|
|
144
|
+
autocoder/common/utils_code_auto_generate.py,sha256=sqtLmVxRveRwqLwJ8UlDgwngwmh2AAo3vgl-I-ALcDQ,4597
|
|
145
145
|
autocoder/common/conversations/__init__.py,sha256=xGZeOFrDsgg2fkPK1zmvYBDhAyX66FtgOcZaxhYKJXU,1338
|
|
146
146
|
autocoder/common/conversations/compatibility.py,sha256=WuBXB4-dw5X9LUMsB16VWbihvRZQ1tT99m6zuBwDfqE,9606
|
|
147
147
|
autocoder/common/conversations/conversation_manager.py,sha256=ZhuhfSdOTncqgy3nHPoEU7Cg0dCsSl-VPcvLbUlL2Tk,18295
|
|
@@ -153,7 +153,7 @@ autocoder/common/file_checkpoint/__init__.py,sha256=qwoM0tIU-IMr-zGVCMN8yZtmz0NW
|
|
|
153
153
|
autocoder/common/file_checkpoint/backup.py,sha256=JO26vOG9k7d8b5jgT24PdccSrTuPqKghp1nz5cmjSiE,8813
|
|
154
154
|
autocoder/common/file_checkpoint/conversation_checkpoint.py,sha256=SFSTjA0fF5rsHlYdLQ-Dr9dfDl5JihndhjeqhN3OuMY,6322
|
|
155
155
|
autocoder/common/file_checkpoint/examples.py,sha256=HTik8E0ddvKjEPGwzizWJBHIP9URrWRyRUOKSjYRUG8,6272
|
|
156
|
-
autocoder/common/file_checkpoint/manager.py,sha256=
|
|
156
|
+
autocoder/common/file_checkpoint/manager.py,sha256=H1s1l8biFGpgrEHZK7Em59DqLSvSgguilSfWRXmRF7c,23769
|
|
157
157
|
autocoder/common/file_checkpoint/models.py,sha256=dcZL2QGnklsa_BV_QY81fH-H5hYfhelXrH6GSrubMZo,4730
|
|
158
158
|
autocoder/common/file_checkpoint/store.py,sha256=dgQe-1O_gPJ3QU6tHihGRp0G2jgD2IDxQ-w9zM6Yq54,12920
|
|
159
159
|
autocoder/common/file_checkpoint/test_backup.py,sha256=Z9Y2RyGqxwKPNc7nW-2jtsMAYzqt0qZGzLoq3pn2zCI,8930
|
|
@@ -187,7 +187,7 @@ autocoder/common/v2/code_editblock_manager.py,sha256=DMwJw-FAM6VyaBQV3p4xespHpgZ
|
|
|
187
187
|
autocoder/common/v2/code_manager.py,sha256=C403bS-f6urixwitlKHcml-J03hci-UyNwHJOqBiY6Q,9182
|
|
188
188
|
autocoder/common/v2/code_strict_diff_manager.py,sha256=Bys7tFAq4G03R1zUZuxrszBTvP4QB96jIw2y5BDLyRM,9424
|
|
189
189
|
autocoder/common/v2/agent/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
|
|
190
|
-
autocoder/common/v2/agent/agentic_edit.py,sha256=
|
|
190
|
+
autocoder/common/v2/agent/agentic_edit.py,sha256=Y4eQwEHHmtt8oQNHzkYcFQW2l8aEDQ4V37qXQ4YCw0o,115955
|
|
191
191
|
autocoder/common/v2/agent/agentic_edit_conversation.py,sha256=pFgWPWHKhZ4J9EcFmIdiGsrSolTZuYcH1qkgKdD8nwk,7726
|
|
192
192
|
autocoder/common/v2/agent/agentic_edit_types.py,sha256=nEcZc2MOZ_fQLaJX-YDha_x9Iim22ao4tykYM2iIy4k,4908
|
|
193
193
|
autocoder/common/v2/agent/agentic_tool_display.py,sha256=-a-JTQLc4q03E_rdIILKMI0B6DHN-5gcGlrqq-mBYK4,7239
|
|
@@ -235,7 +235,7 @@ autocoder/dispacher/actions/plugins/action_translate.py,sha256=GEn7dZA22jy5WyzIN
|
|
|
235
235
|
autocoder/events/__init__.py,sha256=1x_juwr9Ows2RADDa2LyI4QlmPxOVOXZeLO1cht-slM,1443
|
|
236
236
|
autocoder/events/event_content.py,sha256=eLHf5M1BifSqhzzEBgAWKn3JD5_z_1mWeNdZ53TpMqk,12240
|
|
237
237
|
autocoder/events/event_manager.py,sha256=ObbvPfNrrhC85w5VvsnLS9oy92oHEwqMN08qGPReNNA,11884
|
|
238
|
-
autocoder/events/event_manager_singleton.py,sha256=
|
|
238
|
+
autocoder/events/event_manager_singleton.py,sha256=HOyDeiJGhLcC1yirHavtg-PG9faWXhQEIFrb0sZBtec,14621
|
|
239
239
|
autocoder/events/event_store.py,sha256=y6tT3P-o3yhDptrKi-UmqI_ZBNg7v21FriI3f7lo_ME,12709
|
|
240
240
|
autocoder/events/event_types.py,sha256=W_S6PTDIBdufcuPosgz64iITzQy79flL8s3hWB-vZ9o,3638
|
|
241
241
|
autocoder/helper/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
|
|
@@ -354,9 +354,9 @@ autocoder/utils/types.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
|
|
|
354
354
|
autocoder/utils/auto_coder_utils/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
|
|
355
355
|
autocoder/utils/auto_coder_utils/chat_stream_out.py,sha256=t902pKxQ5xM7zgIHiAOsTPLwxhE6VuvXAqPy751S7fg,14096
|
|
356
356
|
autocoder/utils/chat_auto_coder_utils/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
|
|
357
|
-
auto_coder-0.1.
|
|
358
|
-
auto_coder-0.1.
|
|
359
|
-
auto_coder-0.1.
|
|
360
|
-
auto_coder-0.1.
|
|
361
|
-
auto_coder-0.1.
|
|
362
|
-
auto_coder-0.1.
|
|
357
|
+
auto_coder-0.1.366.dist-info/LICENSE,sha256=HrhfyXIkWY2tGFK11kg7vPCqhgh5DcxleloqdhrpyMY,11558
|
|
358
|
+
auto_coder-0.1.366.dist-info/METADATA,sha256=Lr6o3S0iHyom02KXAn3fOC3uqCQDBDtbZpwGcq-_2qs,2775
|
|
359
|
+
auto_coder-0.1.366.dist-info/WHEEL,sha256=GV9aMThwP_4oNCtvEC2ec3qUYutgWeAzklro_0m4WJQ,91
|
|
360
|
+
auto_coder-0.1.366.dist-info/entry_points.txt,sha256=0nzHtHH4pNcM7xq4EBA2toS28Qelrvcbrr59GqD_0Ak,350
|
|
361
|
+
auto_coder-0.1.366.dist-info/top_level.txt,sha256=Jqc0_uJSw2GwoFQAa9iJxYns-2mWla-9ok_Y3Gcznjk,10
|
|
362
|
+
auto_coder-0.1.366.dist-info/RECORD,,
|
|
@@ -794,10 +794,18 @@ class BaseAgent(ABC):
|
|
|
794
794
|
llm_response_gen)
|
|
795
795
|
|
|
796
796
|
event_count = 0
|
|
797
|
+
mark_event_should_finish = False
|
|
797
798
|
for event in parsed_events:
|
|
798
799
|
event_count += 1
|
|
799
800
|
logger.info(f"Processing event #{event_count}: {type(event).__name__}")
|
|
800
801
|
global_cancel.check_and_raise(token=self.args.event_file)
|
|
802
|
+
|
|
803
|
+
if mark_event_should_finish:
|
|
804
|
+
if isinstance(event, TokenUsageEvent):
|
|
805
|
+
logger.info("Yielding token usage event")
|
|
806
|
+
yield event
|
|
807
|
+
continue
|
|
808
|
+
|
|
801
809
|
if isinstance(event, (LLMOutputEvent, LLMThinkingEvent)):
|
|
802
810
|
assistant_buffer += event.text
|
|
803
811
|
logger.debug(f"Accumulated {len(assistant_buffer)} chars in assistant buffer")
|
|
@@ -903,7 +911,11 @@ class BaseAgent(ABC):
|
|
|
903
911
|
logger.info(
|
|
904
912
|
f"Added tool result to conversations for tool {type(tool_obj).__name__}")
|
|
905
913
|
logger.info(f"Breaking LLM cycle after executing tool: {tool_name}")
|
|
906
|
-
|
|
914
|
+
|
|
915
|
+
# 一次交互只能有一次工具,剩下的其实就没有用了,但是如果不让流式处理完,我们就无法获取服务端
|
|
916
|
+
# 返回的token消耗和计费,所以通过此标记来完成进入空转,直到流式走完,获取到最后的token消耗和计费
|
|
917
|
+
mark_event_should_finish = True
|
|
918
|
+
# break # After tool execution and result, break to start a new LLM cycle
|
|
907
919
|
|
|
908
920
|
elif isinstance(event, ErrorEvent):
|
|
909
921
|
logger.error(f"Error event occurred: {event.message}")
|
|
@@ -1022,13 +1034,15 @@ class BaseAgent(ABC):
|
|
|
1022
1034
|
f"Failed to parse tool XML for <{tool_tag}>: {e}\nXML:\n{tool_xml}")
|
|
1023
1035
|
return None
|
|
1024
1036
|
|
|
1025
|
-
|
|
1037
|
+
last_metadata = None
|
|
1026
1038
|
for content_chunk, metadata in generator:
|
|
1027
|
-
global_cancel.check_and_raise(token=self.args.event_file)
|
|
1028
|
-
meta_holder.meta = metadata
|
|
1039
|
+
global_cancel.check_and_raise(token=self.args.event_file)
|
|
1029
1040
|
if not content_chunk:
|
|
1041
|
+
last_metadata = metadata
|
|
1030
1042
|
continue
|
|
1031
|
-
|
|
1043
|
+
|
|
1044
|
+
last_metadata = metadata
|
|
1045
|
+
buffer += content_chunk
|
|
1032
1046
|
|
|
1033
1047
|
while True:
|
|
1034
1048
|
# Check for transitions: thinking -> text, tool -> text, text -> thinking, text -> tool
|
|
@@ -1138,9 +1152,7 @@ class BaseAgent(ABC):
|
|
|
1138
1152
|
|
|
1139
1153
|
# If no event was processed in this iteration, break inner loop
|
|
1140
1154
|
if not found_event:
|
|
1141
|
-
break
|
|
1142
|
-
|
|
1143
|
-
yield TokenUsageEvent(usage=meta_holder.meta)
|
|
1155
|
+
break
|
|
1144
1156
|
|
|
1145
1157
|
# After generator exhausted, yield any remaining content
|
|
1146
1158
|
if in_thinking_block:
|
|
@@ -1157,6 +1169,9 @@ class BaseAgent(ABC):
|
|
|
1157
1169
|
elif buffer:
|
|
1158
1170
|
# Yield remaining plain text
|
|
1159
1171
|
yield LLMOutputEvent(text=buffer)
|
|
1172
|
+
|
|
1173
|
+
# 这个要放在最后,防止其他关联的多个事件的信息中断
|
|
1174
|
+
yield TokenUsageEvent(usage=last_metadata)
|
|
1160
1175
|
|
|
1161
1176
|
def run_with_events(self, request: AgentRequest):
|
|
1162
1177
|
"""
|
|
@@ -868,23 +868,35 @@ class AgenticEdit:
|
|
|
868
868
|
assistant_buffer = ""
|
|
869
869
|
logger.info("Initializing stream chat with LLM")
|
|
870
870
|
|
|
871
|
-
## 实际请求大模型
|
|
871
|
+
# ## 实际请求大模型
|
|
872
872
|
llm_response_gen = stream_chat_with_continue(
|
|
873
873
|
llm=self.llm,
|
|
874
874
|
conversations=conversations,
|
|
875
875
|
llm_config={}, # Placeholder for future LLM configs
|
|
876
876
|
args=self.args
|
|
877
877
|
)
|
|
878
|
+
|
|
879
|
+
# llm_response_gen = self.llm.stream_chat_oai(
|
|
880
|
+
# conversations=conversations,
|
|
881
|
+
# delta_mode=True
|
|
882
|
+
# )
|
|
878
883
|
|
|
879
884
|
logger.info("Starting to parse LLM response stream")
|
|
880
885
|
parsed_events = self.stream_and_parse_llm_response(
|
|
881
886
|
llm_response_gen)
|
|
882
887
|
|
|
883
888
|
event_count = 0
|
|
889
|
+
mark_event_should_finish = False
|
|
884
890
|
for event in parsed_events:
|
|
885
|
-
event_count += 1
|
|
886
|
-
# logger.info(f"Processing event #{event_count}: {type(event).__name__}")
|
|
887
891
|
global_cancel.check_and_raise(token=self.args.event_file)
|
|
892
|
+
event_count += 1
|
|
893
|
+
|
|
894
|
+
if mark_event_should_finish:
|
|
895
|
+
if isinstance(event, TokenUsageEvent):
|
|
896
|
+
logger.info("Yielding token usage event")
|
|
897
|
+
yield event
|
|
898
|
+
continue
|
|
899
|
+
|
|
888
900
|
if isinstance(event, (LLMOutputEvent, LLMThinkingEvent)):
|
|
889
901
|
assistant_buffer += event.text
|
|
890
902
|
logger.debug(f"Accumulated {len(assistant_buffer)} chars in assistant buffer")
|
|
@@ -1005,7 +1017,11 @@ class AgenticEdit:
|
|
|
1005
1017
|
logger.info(
|
|
1006
1018
|
f"Added tool result to conversations for tool {type(tool_obj).__name__}")
|
|
1007
1019
|
logger.info(f"Breaking LLM cycle after executing tool: {tool_name}")
|
|
1008
|
-
|
|
1020
|
+
|
|
1021
|
+
# 一次交互只能有一次工具,剩下的其实就没有用了,但是如果不让流式处理完,我们就无法获取服务端
|
|
1022
|
+
# 返回的token消耗和计费,所以通过此标记来完成进入空转,直到流式走完,获取到最后的token消耗和计费
|
|
1023
|
+
mark_event_should_finish=True
|
|
1024
|
+
# break # After tool execution and result, break to start a new LLM cycle
|
|
1009
1025
|
|
|
1010
1026
|
elif isinstance(event, ErrorEvent):
|
|
1011
1027
|
logger.error(f"Error event occurred: {event.message}")
|
|
@@ -1013,10 +1029,10 @@ class AgenticEdit:
|
|
|
1013
1029
|
# Optionally stop the process on parsing errors
|
|
1014
1030
|
# logger.error("Stopping analyze loop due to parsing error.")
|
|
1015
1031
|
# return
|
|
1016
|
-
|
|
1017
1032
|
elif isinstance(event, TokenUsageEvent):
|
|
1018
1033
|
logger.info("Yielding token usage event")
|
|
1019
|
-
yield event
|
|
1034
|
+
yield event
|
|
1035
|
+
|
|
1020
1036
|
|
|
1021
1037
|
if not tool_executed:
|
|
1022
1038
|
# No tool executed in this LLM response cycle
|
|
@@ -1136,14 +1152,16 @@ class AgenticEdit:
|
|
|
1136
1152
|
logger.exception(
|
|
1137
1153
|
f"Failed to parse tool XML for <{tool_tag}>: {e}\nXML:\n{tool_xml}")
|
|
1138
1154
|
return None
|
|
1139
|
-
|
|
1140
|
-
|
|
1141
|
-
for content_chunk, metadata in generator:
|
|
1142
|
-
global_cancel.check_and_raise(token=self.args.event_file)
|
|
1143
|
-
meta_holder.meta = metadata
|
|
1155
|
+
|
|
1156
|
+
last_metadata = None
|
|
1157
|
+
for content_chunk, metadata in generator:
|
|
1158
|
+
global_cancel.check_and_raise(token=self.args.event_file)
|
|
1144
1159
|
if not content_chunk:
|
|
1160
|
+
last_metadata = metadata
|
|
1145
1161
|
continue
|
|
1146
|
-
|
|
1162
|
+
|
|
1163
|
+
last_metadata = metadata
|
|
1164
|
+
buffer += content_chunk
|
|
1147
1165
|
|
|
1148
1166
|
while True:
|
|
1149
1167
|
# Check for transitions: thinking -> text, tool -> text, text -> thinking, text -> tool
|
|
@@ -1271,8 +1289,8 @@ class AgenticEdit:
|
|
|
1271
1289
|
# Yield remaining plain text
|
|
1272
1290
|
yield LLMOutputEvent(text=buffer)
|
|
1273
1291
|
|
|
1274
|
-
# 这个要放在最后,防止其他关联的多个事件的信息中断
|
|
1275
|
-
yield TokenUsageEvent(usage=
|
|
1292
|
+
# 这个要放在最后,防止其他关联的多个事件的信息中断
|
|
1293
|
+
yield TokenUsageEvent(usage=last_metadata)
|
|
1276
1294
|
|
|
1277
1295
|
|
|
1278
1296
|
def apply_pre_changes(self):
|
|
@@ -147,8 +147,7 @@ class EventManagerSingleton:
|
|
|
147
147
|
# 排除默认的events.jsonl文件
|
|
148
148
|
event_files = [f for f in event_files if os.path.basename(f) != "events.jsonl"]
|
|
149
149
|
|
|
150
|
-
if len(event_files) <= cls._max_event_files:
|
|
151
|
-
logger.info(f"事件文件数量({len(event_files)})未超过最大保留数量({cls._max_event_files}),无需清理")
|
|
150
|
+
if len(event_files) <= cls._max_event_files:
|
|
152
151
|
return
|
|
153
152
|
|
|
154
153
|
# 解析文件名中的时间戳,格式为 uuid_YYYYMMDD-HHMMSS.jsonl
|
autocoder/version.py
CHANGED
|
@@ -1 +1 @@
|
|
|
1
|
-
__version__ = "0.1.
|
|
1
|
+
__version__ = "0.1.366"
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|