auto-coder 0.1.348__py3-none-any.whl → 0.1.350__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of auto-coder might be problematic. Click here for more details.
- {auto_coder-0.1.348.dist-info → auto_coder-0.1.350.dist-info}/METADATA +1 -1
- {auto_coder-0.1.348.dist-info → auto_coder-0.1.350.dist-info}/RECORD +35 -26
- autocoder/auto_coder_runner.py +14 -10
- autocoder/chat_auto_coder_lang.py +5 -3
- autocoder/common/model_speed_tester.py +392 -0
- autocoder/common/printer.py +7 -8
- autocoder/common/run_cmd.py +247 -0
- autocoder/common/test_run_cmd.py +110 -0
- autocoder/common/v2/agent/agentic_edit.py +61 -11
- autocoder/common/v2/agent/agentic_edit_conversation.py +9 -0
- autocoder/common/v2/agent/agentic_edit_tools/execute_command_tool_resolver.py +21 -36
- autocoder/common/v2/agent/agentic_edit_tools/list_files_tool_resolver.py +4 -7
- autocoder/common/v2/agent/agentic_edit_tools/search_files_tool_resolver.py +2 -5
- autocoder/helper/rag_doc_creator.py +141 -0
- autocoder/ignorefiles/__init__.py +4 -0
- autocoder/ignorefiles/ignore_file_utils.py +63 -0
- autocoder/ignorefiles/test_ignore_file_utils.py +91 -0
- autocoder/models.py +48 -8
- autocoder/rag/cache/byzer_storage_cache.py +10 -4
- autocoder/rag/cache/file_monitor_cache.py +27 -24
- autocoder/rag/cache/local_byzer_storage_cache.py +11 -5
- autocoder/rag/cache/local_duckdb_storage_cache.py +203 -128
- autocoder/rag/cache/simple_cache.py +56 -37
- autocoder/rag/loaders/filter_utils.py +106 -0
- autocoder/rag/loaders/image_loader.py +45 -23
- autocoder/rag/loaders/pdf_loader.py +3 -3
- autocoder/rag/loaders/test_image_loader.py +209 -0
- autocoder/rag/qa_conversation_strategy.py +3 -5
- autocoder/rag/utils.py +20 -9
- autocoder/utils/_markitdown.py +35 -0
- autocoder/version.py +1 -1
- {auto_coder-0.1.348.dist-info → auto_coder-0.1.350.dist-info}/LICENSE +0 -0
- {auto_coder-0.1.348.dist-info → auto_coder-0.1.350.dist-info}/WHEEL +0 -0
- {auto_coder-0.1.348.dist-info → auto_coder-0.1.350.dist-info}/entry_points.txt +0 -0
- {auto_coder-0.1.348.dist-info → auto_coder-0.1.350.dist-info}/top_level.txt +0 -0
|
@@ -4,17 +4,17 @@ autocoder/auto_coder_lang.py,sha256=Rtupq6N3_HT7JRhDKdgCBcwRaiAnyCOR_Gsp4jUomrI,
|
|
|
4
4
|
autocoder/auto_coder_rag.py,sha256=NesRm7sIJrRQL1xxm_lbMtM7gi-KrYv9f26RfBuloZE,35386
|
|
5
5
|
autocoder/auto_coder_rag_client_mcp.py,sha256=QRxUbjc6A8UmDMQ8lXgZkjgqtq3lgKYeatJbDY6rSo0,6270
|
|
6
6
|
autocoder/auto_coder_rag_mcp.py,sha256=-RrjNwFaS2e5v8XDIrKR-zlUNUE8UBaeOtojffBrvJo,8521
|
|
7
|
-
autocoder/auto_coder_runner.py,sha256=
|
|
7
|
+
autocoder/auto_coder_runner.py,sha256=aV5QqUK6NJrshvazMJkAJPQEiwq2tXRWWeVbOiB8neg,112193
|
|
8
8
|
autocoder/auto_coder_server.py,sha256=bLORGEclcVdbBVfM140JCI8WtdrU0jbgqdJIVVupiEU,20578
|
|
9
9
|
autocoder/benchmark.py,sha256=Ypomkdzd1T3GE6dRICY3Hj547dZ6_inqJbBJIp5QMco,4423
|
|
10
10
|
autocoder/chat_auto_coder.py,sha256=CthuvdjVjTQOVv-zREsl8OCsZHPSP9OQcIgHULrW2Ro,25842
|
|
11
|
-
autocoder/chat_auto_coder_lang.py,sha256=
|
|
11
|
+
autocoder/chat_auto_coder_lang.py,sha256=LecXAvbNFVURwXFF1QOs-n04vto37QwUvoeDMeFrLUk,22182
|
|
12
12
|
autocoder/command_args.py,sha256=HxflngkYtTrV17Vfgk6lyUyiG68jP2ftSc7FYr9AXwY,30585
|
|
13
13
|
autocoder/command_parser.py,sha256=fx1g9E6GaM273lGTcJqaFQ-hoksS_Ik2glBMnVltPCE,10013
|
|
14
14
|
autocoder/lang.py,sha256=PFtATuOhHRnfpqHQkXr6p4C893JvpsgwTMif3l-GEi0,14321
|
|
15
|
-
autocoder/models.py,sha256=
|
|
15
|
+
autocoder/models.py,sha256=4szjN42LnAaiYZC2ID6zKy7w3snoeJjUpBB-QPDafbg,13022
|
|
16
16
|
autocoder/run_context.py,sha256=IUfSO6_gp2Wt1blFWAmOpN0b0nDrTTk4LmtCYUBIoro,1643
|
|
17
|
-
autocoder/version.py,sha256=
|
|
17
|
+
autocoder/version.py,sha256=4GdaKb2yxVbVMcOZO75H8eKOPFNgcD3T7wcoWCwtJjs,23
|
|
18
18
|
autocoder/agent/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
|
|
19
19
|
autocoder/agent/agentic_edit.py,sha256=XsfePZ-t6M-uBSdG1VLZXk1goqXk2HPeJ_A8IYyBuWQ,58896
|
|
20
20
|
autocoder/agent/agentic_edit_types.py,sha256=oFcDd_cxJ2yH9Ed1uTpD3BipudgoIEWDMPb5pAkq4gI,3288
|
|
@@ -93,10 +93,12 @@ autocoder/common/mcp_server_types.py,sha256=ijGnMID7Egq3oOn2t7_BJj7JUisDwhUyClZC
|
|
|
93
93
|
autocoder/common/mcp_tools.py,sha256=YdEhDzRnwAr2J3D-23ExIQFWbrNO-EUpIxg179qs9Sw,12666
|
|
94
94
|
autocoder/common/memory_manager.py,sha256=Xx6Yv0ULxVfcFfmD36hdHFFhxCgRAs-5fTd0fLHJrpQ,3773
|
|
95
95
|
autocoder/common/model_speed_test.py,sha256=U48xUUpOnbwUal1cdij4YAn_H2PD2pNaqrMHaYtQRfI,15200
|
|
96
|
+
autocoder/common/model_speed_tester.py,sha256=U48xUUpOnbwUal1cdij4YAn_H2PD2pNaqrMHaYtQRfI,15200
|
|
96
97
|
autocoder/common/openai_content.py,sha256=M_V_UyHrqNVWjgrYvxfAupZw2I0Nr3iilYv6SxSvfLA,8091
|
|
97
|
-
autocoder/common/printer.py,sha256=
|
|
98
|
+
autocoder/common/printer.py,sha256=Xs6xM_BCSA-0wvaLUWLd_f23gB_ghFk_1HqiJX3dX1c,1987
|
|
98
99
|
autocoder/common/recall_validation.py,sha256=Avt9Q9dX3kG6Pf2zsdlOHmsjd-OeSj7U1PFBDp_Cve0,1700
|
|
99
100
|
autocoder/common/result_manager.py,sha256=nBcFRj5reBC7vp13M91f4B8iPW8B8OehayHlUdeAt1g,3776
|
|
101
|
+
autocoder/common/run_cmd.py,sha256=2VrJpeqooasUoc-WKVrvFfesmRR55kOpPmmYgpQrKVc,8283
|
|
100
102
|
autocoder/common/screenshots.py,sha256=_gA-z1HxGjPShBrtgkdideq58MG6rqFB2qMUJKjrycs,3769
|
|
101
103
|
autocoder/common/search.py,sha256=245iPFgWhMldoUK3CqCP89ltaxZiNPK73evoG6Fp1h8,16518
|
|
102
104
|
autocoder/common/search_replace.py,sha256=GphFkc57Hb673CAwmbiocqTbw8vrV7TrZxtOhD0332g,22147
|
|
@@ -104,6 +106,7 @@ autocoder/common/shells.py,sha256=elminFpNosnV0hsEUcsugDxlGO8NfH96uah-8bkaBvA,19
|
|
|
104
106
|
autocoder/common/stats_panel.py,sha256=wGl9O45pjVVDxhNumLv4_NfLYSlUP_18Tw4hcJSjw50,4596
|
|
105
107
|
autocoder/common/stream_out_type.py,sha256=B9lBzCK3aWJq86KvNw6duNfTEDYb_3ZiPRGxGhe6JKU,765
|
|
106
108
|
autocoder/common/sys_prompt.py,sha256=JlexfjZt554faqbgkCmzOJqYUzDHfbnxly5ugFfHfEE,26403
|
|
109
|
+
autocoder/common/test_run_cmd.py,sha256=0piPrNnxTPS8vJRnsVH6-lgB5zeLaXSRY5pPH13HJhc,3470
|
|
107
110
|
autocoder/common/text.py,sha256=KGRQq314GHBmY4MWG8ossRoQi1_DTotvhxchpn78c-k,1003
|
|
108
111
|
autocoder/common/token_cost_caculate.py,sha256=MSWJtl7YpQSUt-gFQoqUcJMblyPqHXe2ZioiZOFkV80,10085
|
|
109
112
|
autocoder/common/types.py,sha256=Cw_4RH-rGmAgQE-Ck69maMAMqlPCDA4Yj37QmuUY0mQ,713
|
|
@@ -126,8 +129,8 @@ autocoder/common/v2/code_editblock_manager.py,sha256=G0CIuV9Ki0FqMLnpA8nBT4pnkCN
|
|
|
126
129
|
autocoder/common/v2/code_manager.py,sha256=C403bS-f6urixwitlKHcml-J03hci-UyNwHJOqBiY6Q,9182
|
|
127
130
|
autocoder/common/v2/code_strict_diff_manager.py,sha256=v-J1kDyLg7tLGg_6_lbO9S4fNkx7M_L8Xr2G7fPptiU,9347
|
|
128
131
|
autocoder/common/v2/agent/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
|
|
129
|
-
autocoder/common/v2/agent/agentic_edit.py,sha256=
|
|
130
|
-
autocoder/common/v2/agent/agentic_edit_conversation.py,sha256=
|
|
132
|
+
autocoder/common/v2/agent/agentic_edit.py,sha256=e2ekrknA34CNpx4HrLRTK8rsKVp_bYu4rdO_rpvFGvc,98209
|
|
133
|
+
autocoder/common/v2/agent/agentic_edit_conversation.py,sha256=pFgWPWHKhZ4J9EcFmIdiGsrSolTZuYcH1qkgKdD8nwk,7726
|
|
131
134
|
autocoder/common/v2/agent/agentic_edit_types.py,sha256=VJMrictg6hJ3mC45VgQGRd43DyDUPDUvPV1Rf3z72NI,4776
|
|
132
135
|
autocoder/common/v2/agent/agentic_tool_display.py,sha256=WKirt-2V346KLnbHgH3NVJiK3xvriD9oaCWj2IdvzLU,7309
|
|
133
136
|
autocoder/common/v2/agent/ignore_utils.py,sha256=gnUchRzKMLbUm_jvnKL-r-K9MWKPtt-6iiuzijY7Es0,1717
|
|
@@ -135,14 +138,14 @@ autocoder/common/v2/agent/agentic_edit_tools/__init__.py,sha256=RbPZZcZg_VnGssL5
|
|
|
135
138
|
autocoder/common/v2/agent/agentic_edit_tools/ask_followup_question_tool_resolver.py,sha256=bwtf4m9N82TCP3piK5UglJk1FVFFm7ZX59XerA2qxko,3131
|
|
136
139
|
autocoder/common/v2/agent/agentic_edit_tools/attempt_completion_tool_resolver.py,sha256=82ZGKeRBSDKeead_XVBW4FxpiE-5dS7tBOk_3RZ6B5s,1511
|
|
137
140
|
autocoder/common/v2/agent/agentic_edit_tools/base_tool_resolver.py,sha256=Zid2m1uZd-2wVFGc_n_KAViXZyNjbdLSpI5n7ut1RUQ,1036
|
|
138
|
-
autocoder/common/v2/agent/agentic_edit_tools/execute_command_tool_resolver.py,sha256=
|
|
141
|
+
autocoder/common/v2/agent/agentic_edit_tools/execute_command_tool_resolver.py,sha256=sX00xzczfmyW6yPG3nMm0xO8p-WARQTiD4jcoUiTxsg,3844
|
|
139
142
|
autocoder/common/v2/agent/agentic_edit_tools/list_code_definition_names_tool_resolver.py,sha256=8QoMsADUDWliqiDt_dpguz31403syB8eeW0Pcw-qfb8,3842
|
|
140
|
-
autocoder/common/v2/agent/agentic_edit_tools/list_files_tool_resolver.py,sha256=
|
|
143
|
+
autocoder/common/v2/agent/agentic_edit_tools/list_files_tool_resolver.py,sha256=nIEgdWdTLa7nLVSMTeoRx1u5Gmh9JGt8MXsZHPk2l2A,5468
|
|
141
144
|
autocoder/common/v2/agent/agentic_edit_tools/list_package_info_tool_resolver.py,sha256=dIdV12VuczHpHuHgx2B1j_3BZYc9PL0jfHCuBk9ryk8,2005
|
|
142
145
|
autocoder/common/v2/agent/agentic_edit_tools/plan_mode_respond_tool_resolver.py,sha256=lGT4_QYJK6Fa9f6HVSGo0cSsGK7qCsDYgJGUowNxPzk,1499
|
|
143
146
|
autocoder/common/v2/agent/agentic_edit_tools/read_file_tool_resolver.py,sha256=9Bh0KVbL0qiIqwChlb77biiBiETQ3zekxGe5Fj7hXAg,2800
|
|
144
147
|
autocoder/common/v2/agent/agentic_edit_tools/replace_in_file_tool_resolver.py,sha256=lpD4fCbVR8GTrynqXON69IjM94nPy3nuUL62Ashm5O4,7988
|
|
145
|
-
autocoder/common/v2/agent/agentic_edit_tools/search_files_tool_resolver.py,sha256=
|
|
148
|
+
autocoder/common/v2/agent/agentic_edit_tools/search_files_tool_resolver.py,sha256=mKztNjR5dmbyJ2aPcMkOuf5krhYPWbNRvIGY8Qp8dWU,5502
|
|
146
149
|
autocoder/common/v2/agent/agentic_edit_tools/use_mcp_tool_resolver.py,sha256=wM2Xy4bcnD0TSLEmcM8rvvyyWenN5_KQnJMO6hJ8lTE,1716
|
|
147
150
|
autocoder/common/v2/agent/agentic_edit_tools/write_to_file_tool_resolver.py,sha256=UO4SrkDek3WDlRdlHH022W1roSNMdMcipJqDxRBlheM,3044
|
|
148
151
|
autocoder/compilers/__init__.py,sha256=C0HOms70QA747XD0uZEMmGtRFcIPenohyqECNStv0Bw,1647
|
|
@@ -176,6 +179,10 @@ autocoder/events/event_store.py,sha256=y6tT3P-o3yhDptrKi-UmqI_ZBNg7v21FriI3f7lo_
|
|
|
176
179
|
autocoder/events/event_types.py,sha256=W_S6PTDIBdufcuPosgz64iITzQy79flL8s3hWB-vZ9o,3638
|
|
177
180
|
autocoder/helper/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
|
|
178
181
|
autocoder/helper/project_creator.py,sha256=sgXrZyAaGc84aUT7K7d7N1ztT-mSNGoLnsT-uKMUvVw,21472
|
|
182
|
+
autocoder/helper/rag_doc_creator.py,sha256=A3lB_jr1KU4bxLbBTX9-nxyylwDirxSi1NXmbPTnp90,4386
|
|
183
|
+
autocoder/ignorefiles/__init__.py,sha256=P0hq7Avu1IeXBYEkPBZLsJhFzhzyktUWTqaRIXiAFLY,75
|
|
184
|
+
autocoder/ignorefiles/ignore_file_utils.py,sha256=atJ_LEhRn-3NamBFl0Y9hJPG0cEt3nL9lVGHBweEOW0,1782
|
|
185
|
+
autocoder/ignorefiles/test_ignore_file_utils.py,sha256=961_5ilCgzyo09Luj457A4694OzZDggmQEoiAkldMcU,3104
|
|
179
186
|
autocoder/index/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
|
|
180
187
|
autocoder/index/entry.py,sha256=JDONxqd8v201JwlxxKraCIKdOJa0cFhCCx74eHqWiu4,15210
|
|
181
188
|
autocoder/index/for_command.py,sha256=BFvljE4t6VaMBGboZAuhUCzVK0EitCy_n5D_7FEnihw,3204
|
|
@@ -216,7 +223,7 @@ autocoder/rag/document_retriever.py,sha256=rFwbAuHTvEFJq16HQNlmRLyJp2ddn2RNFslw_
|
|
|
216
223
|
autocoder/rag/lang.py,sha256=HvcMeu6jReEJOGxyLMn4rwBoD-myFwmykS3VLceBJLs,3364
|
|
217
224
|
autocoder/rag/llm_wrapper.py,sha256=Ht5GF5yJtrztoliujsZzx_ooWZmHkd5xLZKcGEiicZw,4303
|
|
218
225
|
autocoder/rag/long_context_rag.py,sha256=syPIxO_TQJpBgjZ0taF-G7xVGvkNjKWL65KTI-sy4io,42234
|
|
219
|
-
autocoder/rag/qa_conversation_strategy.py,sha256=
|
|
226
|
+
autocoder/rag/qa_conversation_strategy.py,sha256=8kHbxc7RJQ1abCNH8psYDpDI7scR040y5GNtCkqIRY4,11707
|
|
220
227
|
autocoder/rag/rag_config.py,sha256=8LwFcTd8OJWWwi1_WY4IzjqgtT6RyE2j4PjxS5cCTDE,802
|
|
221
228
|
autocoder/rag/rag_entry.py,sha256=6TKtErZ0Us9XSV6HgRKXA6yR3SiZGPHpynOKSaR1wgE,2463
|
|
222
229
|
autocoder/rag/raw_rag.py,sha256=BOr0YGf3umjqXOIDVO1LXQ0bIHx8hzBdiubND2ezyxc,2946
|
|
@@ -229,24 +236,26 @@ autocoder/rag/token_counter.py,sha256=C-Lwc4oIjJpZDEqp9WLHGOe6hb4yhrdJpMtkrtp_1q
|
|
|
229
236
|
autocoder/rag/token_limiter.py,sha256=3VgJF4may3ESyATmBIiOe05oc3VsidJcJTJ5EhoSvH8,18854
|
|
230
237
|
autocoder/rag/token_limiter_utils.py,sha256=FATNEXBnFJy8IK3PWNt1pspIv8wuTgy3F_ACNvqoc4I,404
|
|
231
238
|
autocoder/rag/types.py,sha256=WPgLpUTwbk0BAikyDOc0NOEwV5k73myF38zWdOuYdC4,2499
|
|
232
|
-
autocoder/rag/utils.py,sha256=
|
|
239
|
+
autocoder/rag/utils.py,sha256=q9zvjQkX7-gVzrxmHdHXGbGpYDDpLGsm5vAt1IatZaA,5431
|
|
233
240
|
autocoder/rag/variable_holder.py,sha256=PFvBjFcR7-fNDD4Vcsc8CpH2Te057vcpwJMxtrfUgKI,75
|
|
234
241
|
autocoder/rag/cache/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
|
|
235
242
|
autocoder/rag/cache/base_cache.py,sha256=EaYYYbclMBvnlOUoM7qonnluwZX5oSvUjdvGvFun8_8,742
|
|
236
|
-
autocoder/rag/cache/byzer_storage_cache.py,sha256=
|
|
243
|
+
autocoder/rag/cache/byzer_storage_cache.py,sha256=jq3drQo5r21NO03_wB1P_kSFcaWVrNIAGWgBndkaS68,28367
|
|
237
244
|
autocoder/rag/cache/cache_result_merge.py,sha256=VnTdbT2OMBmWl_83bqds97d9_M33IhPNX8tF7KH2GMM,10556
|
|
238
245
|
autocoder/rag/cache/failed_files_utils.py,sha256=kITguXANLC3EEJy5JoKzNXrtwvTkmZT-ANPwcno42Ck,1183
|
|
239
|
-
autocoder/rag/cache/file_monitor_cache.py,sha256=
|
|
240
|
-
autocoder/rag/cache/local_byzer_storage_cache.py,sha256=
|
|
241
|
-
autocoder/rag/cache/local_duckdb_storage_cache.py,sha256=
|
|
246
|
+
autocoder/rag/cache/file_monitor_cache.py,sha256=lwNrm8epdA3ubc3X3q_BCU1zr_Ul5gEOaM5X5ICeeeQ,9580
|
|
247
|
+
autocoder/rag/cache/local_byzer_storage_cache.py,sha256=KtJimtBxsX2YC6OtznZ3tzp32zW6XjzEtF78EAEqlDY,31187
|
|
248
|
+
autocoder/rag/cache/local_duckdb_storage_cache.py,sha256=Kh0K9uu0JvU8u_kOfTzjHbsPmeK4b9dDoEyTLXV4-cE,35695
|
|
242
249
|
autocoder/rag/cache/rag_file_meta.py,sha256=RQ3n4wfkHlB-1ljS3sFSi8ijbsUPeIqBSgjmmbRuwRI,20521
|
|
243
|
-
autocoder/rag/cache/simple_cache.py,sha256=
|
|
250
|
+
autocoder/rag/cache/simple_cache.py,sha256=j4la869WzJeGUN3YLujjIqarKWS0NHuVPQT1zcGsAro,16747
|
|
244
251
|
autocoder/rag/loaders/__init__.py,sha256=EQHEZ5Cmz-mGP2SllUTvcIbYCnF7W149dNpNItfs0yE,304
|
|
245
252
|
autocoder/rag/loaders/docx_loader.py,sha256=ZswPqiiLngUEpzLhNNm1nmwEYV7ZHFEfIoXoG7c5GDU,614
|
|
246
253
|
autocoder/rag/loaders/excel_loader.py,sha256=Ue8YB1z_kBs8SjIPuBskyM08Q1JiONs_BJZPrzi59oo,896
|
|
247
|
-
autocoder/rag/loaders/
|
|
248
|
-
autocoder/rag/loaders/
|
|
254
|
+
autocoder/rag/loaders/filter_utils.py,sha256=asi8eJCltIxWTPuFGD8JU3Lnx17vgrZ0TnbLwsHszp4,3466
|
|
255
|
+
autocoder/rag/loaders/image_loader.py,sha256=FlEQMGEQEwFHeUKWfIBJBGglG2N7wFW6JF2HmO6AcH4,21945
|
|
256
|
+
autocoder/rag/loaders/pdf_loader.py,sha256=9cl4EAiz4TQaIxmlZnoagK7weTpOBNpWsq_DzvGkRdc,763
|
|
249
257
|
autocoder/rag/loaders/ppt_loader.py,sha256=7VEYc-bqgK8VHCoGC3DIUcqbpda-E5jQF9lYLqP256I,1681
|
|
258
|
+
autocoder/rag/loaders/test_image_loader.py,sha256=oy_j8xkFgPZ0vhz6czTVPh7IwdVWkgh5gE-UA2xCATQ,5753
|
|
250
259
|
autocoder/rag/stream_event/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
|
|
251
260
|
autocoder/rag/stream_event/event_writer.py,sha256=l7kq_LnDDE8E5dZ-73C7J2MgzSL7WrozdXk0eV-k55Q,409
|
|
252
261
|
autocoder/rag/stream_event/types.py,sha256=rtLwOE8rShmi1dJdxyBpAV5ZjLBGG9vptMiSzMxGuIA,318
|
|
@@ -257,7 +266,7 @@ autocoder/shadows/shadow_manager.py,sha256=_DINnvD3oPaHopagL3B3PL0FFfEz7U6DYSEvE
|
|
|
257
266
|
autocoder/suffixproject/__init__.py,sha256=Rew-M9W4pgO9cvw9UCdrc6QVCPdBhVcIpPBnJxrLJ3M,10374
|
|
258
267
|
autocoder/tsproject/__init__.py,sha256=e_TWVyXQQxYKsXqdQZuFVqNCQLdtBVNJRTs0fgLXVdA,11055
|
|
259
268
|
autocoder/utils/__init__.py,sha256=W47ac6IOZhNR1rdbho9fvhHnPI_N1i4oMcZOwxLelbU,1123
|
|
260
|
-
autocoder/utils/_markitdown.py,sha256=
|
|
269
|
+
autocoder/utils/_markitdown.py,sha256=ZaVksHrFUIkNFhS09_KaRUZFYUq_TQa_eK_Xf9Rw9nk,48957
|
|
261
270
|
autocoder/utils/auto_project_type.py,sha256=9_-wE9aavjbPiNSUVKxttJAdu5i5fu-zHyPYHr5XtWk,4422
|
|
262
271
|
autocoder/utils/coder.py,sha256=rK8e0svQBe0NOP26dIGToUXgha_hUDgxlWoC_p_r7oc,5698
|
|
263
272
|
autocoder/utils/conversation_store.py,sha256=esd9zLarKYe0ZsYqjjwHc_ksmVQDDEhVt-Ejul2oyys,1178
|
|
@@ -280,9 +289,9 @@ autocoder/utils/types.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
|
|
|
280
289
|
autocoder/utils/auto_coder_utils/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
|
|
281
290
|
autocoder/utils/auto_coder_utils/chat_stream_out.py,sha256=KW0mlmcHlStXi8-_6fXZ2-ifeJ5mgP0OV7DQFzCtIsw,14008
|
|
282
291
|
autocoder/utils/chat_auto_coder_utils/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
|
|
283
|
-
auto_coder-0.1.
|
|
284
|
-
auto_coder-0.1.
|
|
285
|
-
auto_coder-0.1.
|
|
286
|
-
auto_coder-0.1.
|
|
287
|
-
auto_coder-0.1.
|
|
288
|
-
auto_coder-0.1.
|
|
292
|
+
auto_coder-0.1.350.dist-info/LICENSE,sha256=HrhfyXIkWY2tGFK11kg7vPCqhgh5DcxleloqdhrpyMY,11558
|
|
293
|
+
auto_coder-0.1.350.dist-info/METADATA,sha256=8fjnFqbazsF3HoJi_YEJc5Joz2RSlWV9CN_5a_h_xzw,2728
|
|
294
|
+
auto_coder-0.1.350.dist-info/WHEEL,sha256=GV9aMThwP_4oNCtvEC2ec3qUYutgWeAzklro_0m4WJQ,91
|
|
295
|
+
auto_coder-0.1.350.dist-info/entry_points.txt,sha256=0nzHtHH4pNcM7xq4EBA2toS28Qelrvcbrr59GqD_0Ak,350
|
|
296
|
+
auto_coder-0.1.350.dist-info/top_level.txt,sha256=Jqc0_uJSw2GwoFQAa9iJxYns-2mWla-9ok_Y3Gcznjk,10
|
|
297
|
+
auto_coder-0.1.350.dist-info/RECORD,,
|
autocoder/auto_coder_runner.py
CHANGED
|
@@ -2019,9 +2019,9 @@ def manage_models(query: str):
|
|
|
2019
2019
|
/models /add_model name=xxx base_url=xxx ... - Add model with custom params
|
|
2020
2020
|
/models /remove <name> - Remove model by name
|
|
2021
2021
|
"""
|
|
2022
|
-
printer = Printer()
|
|
2023
2022
|
console = Console()
|
|
2024
|
-
|
|
2023
|
+
printer = Printer(console=console)
|
|
2024
|
+
|
|
2025
2025
|
product_mode = memory.get("product_mode", "lite")
|
|
2026
2026
|
if product_mode != "lite":
|
|
2027
2027
|
printer.print_in_terminal("models_lite_only", style="red")
|
|
@@ -2246,7 +2246,7 @@ def manage_models(query: str):
|
|
|
2246
2246
|
printer.print_in_terminal("models_speed_usage", style="red")
|
|
2247
2247
|
|
|
2248
2248
|
elif subcmd == "/speed-test":
|
|
2249
|
-
from autocoder.common.
|
|
2249
|
+
from autocoder.common.model_speed_tester import render_speed_test_in_terminal
|
|
2250
2250
|
test_rounds = 1 # 默认测试轮数
|
|
2251
2251
|
|
|
2252
2252
|
enable_long_context = False
|
|
@@ -2274,7 +2274,7 @@ def manage_models(query: str):
|
|
|
2274
2274
|
|
|
2275
2275
|
elif subcmd == "/add":
|
|
2276
2276
|
# Support both simplified and legacy formats
|
|
2277
|
-
args = query.strip().split(" ")
|
|
2277
|
+
args = query.strip().split(" ")
|
|
2278
2278
|
if len(args) == 2:
|
|
2279
2279
|
# Simplified: /models /add <name> <api_key>
|
|
2280
2280
|
name, api_key = args[0], args[1]
|
|
@@ -2295,9 +2295,10 @@ def manage_models(query: str):
|
|
|
2295
2295
|
}
|
|
2296
2296
|
})
|
|
2297
2297
|
printer.print_in_terminal("models_add_failed", style="red", name=name)
|
|
2298
|
-
else:
|
|
2299
|
-
|
|
2300
|
-
|
|
2298
|
+
else:
|
|
2299
|
+
models_list = "\n".join([m["name"] for m in models_module.default_models_list])
|
|
2300
|
+
printer.print_in_terminal("models_add_usage", style="red", models=models_list)
|
|
2301
|
+
result_manager.add_result(content=printer.get_message_from_key_with_format("models_add_usage",models=models_list),meta={
|
|
2301
2302
|
"action": "models",
|
|
2302
2303
|
"input": {
|
|
2303
2304
|
"query": query
|
|
@@ -2828,8 +2829,11 @@ def auto_command(query: str,extra_args: Dict[str,Any]={}):
|
|
|
2828
2829
|
current_files = memory.get("current_files",{}).get("files",[])
|
|
2829
2830
|
sources = []
|
|
2830
2831
|
for file in current_files:
|
|
2831
|
-
|
|
2832
|
-
|
|
2832
|
+
try:
|
|
2833
|
+
with open(file,"r",encoding="utf-8") as f:
|
|
2834
|
+
sources.append(SourceCode(module_name=file,source_code=f.read()))
|
|
2835
|
+
except Exception as e:
|
|
2836
|
+
global_logger.error(f"Failed to read file {file}: {e}")
|
|
2833
2837
|
|
|
2834
2838
|
llm = get_single_llm(args.code_model or args.model,product_mode=args.product_mode)
|
|
2835
2839
|
conversation_history = extra_args.get("conversations",[])
|
|
@@ -2895,4 +2899,4 @@ def auto_command(query: str,extra_args: Dict[str,Any]={}):
|
|
|
2895
2899
|
title=printer.get_message_from_key_with_format("auto_command_reasoning_title"),
|
|
2896
2900
|
border_style="blue",
|
|
2897
2901
|
padding=(1, 2)
|
|
2898
|
-
))
|
|
2902
|
+
))
|
|
@@ -387,8 +387,8 @@ MESSAGES = {
|
|
|
387
387
|
"zh": "添加模型 '{{name}}' 失败。在默认模型中未找到该模型。"
|
|
388
388
|
},
|
|
389
389
|
"models_add_usage": {
|
|
390
|
-
"en": "Usage: /models /add <name> <api_key>
|
|
391
|
-
"zh": "用法: /models /add <name> <api_key>
|
|
390
|
+
"en": "Usage: /models /add <name> <api_key> \n Available models: \n{{models}}",
|
|
391
|
+
"zh": "用法: /models /add <name> <api_key> \n 可用模型: \n{{models}}"
|
|
392
392
|
},
|
|
393
393
|
"models_add_model_params": {
|
|
394
394
|
"en": "Please provide parameters in key=value format",
|
|
@@ -582,7 +582,9 @@ def get_system_language():
|
|
|
582
582
|
|
|
583
583
|
def get_message(key):
|
|
584
584
|
lang = get_system_language()
|
|
585
|
-
|
|
585
|
+
if key in MESSAGES:
|
|
586
|
+
return MESSAGES[key].get(lang, MESSAGES[key].get("en", ""))
|
|
587
|
+
return ""
|
|
586
588
|
|
|
587
589
|
|
|
588
590
|
def get_message_with_format(msg_key: str, **kwargs):
|
|
@@ -0,0 +1,392 @@
|
|
|
1
|
+
import time
|
|
2
|
+
import byzerllm
|
|
3
|
+
from typing import Dict, Any, List, Optional
|
|
4
|
+
from rich.console import Console
|
|
5
|
+
from rich.table import Table
|
|
6
|
+
from rich.panel import Panel
|
|
7
|
+
from autocoder.common.printer import Printer
|
|
8
|
+
from autocoder import models as models_module
|
|
9
|
+
from autocoder.utils.llms import get_single_llm
|
|
10
|
+
import byzerllm
|
|
11
|
+
import pkg_resources
|
|
12
|
+
from concurrent.futures import ThreadPoolExecutor
|
|
13
|
+
from typing import Dict, List, Tuple
|
|
14
|
+
from pydantic import BaseModel
|
|
15
|
+
|
|
16
|
+
class ModelSpeedTestResult(BaseModel):
|
|
17
|
+
model_name: str
|
|
18
|
+
tokens_per_second: float
|
|
19
|
+
first_token_time: float
|
|
20
|
+
input_tokens_count: float
|
|
21
|
+
generated_tokens_count: float
|
|
22
|
+
input_tokens_cost: float
|
|
23
|
+
generated_tokens_cost: float
|
|
24
|
+
status: str
|
|
25
|
+
error: Optional[str] = None
|
|
26
|
+
|
|
27
|
+
class SpeedTestResults(BaseModel):
|
|
28
|
+
results: List[ModelSpeedTestResult]
|
|
29
|
+
|
|
30
|
+
byzerllm_content = ""
|
|
31
|
+
try:
|
|
32
|
+
byzerllm_conten_path = pkg_resources.resource_filename(
|
|
33
|
+
"autocoder", "data/byzerllm.md"
|
|
34
|
+
)
|
|
35
|
+
with open(byzerllm_conten_path, "r",encoding="utf-8") as f:
|
|
36
|
+
byzerllm_content = f.read()
|
|
37
|
+
except FileNotFoundError:
|
|
38
|
+
pass
|
|
39
|
+
|
|
40
|
+
@byzerllm.prompt()
|
|
41
|
+
def long_context_prompt() -> str:
|
|
42
|
+
'''
|
|
43
|
+
下面是我们提供的一份文档:
|
|
44
|
+
<document>
|
|
45
|
+
{{ content }}
|
|
46
|
+
</document>
|
|
47
|
+
|
|
48
|
+
请根据上述文档,实现用户的需求:
|
|
49
|
+
|
|
50
|
+
<query>
|
|
51
|
+
我想开发一个翻译程序,使用prompt 函数实现。
|
|
52
|
+
</query>
|
|
53
|
+
'''
|
|
54
|
+
return {
|
|
55
|
+
"content": byzerllm_content
|
|
56
|
+
}
|
|
57
|
+
|
|
58
|
+
@byzerllm.prompt()
|
|
59
|
+
def short_context_prompt() -> str:
|
|
60
|
+
'''
|
|
61
|
+
Hello, can you help me test the response speed?
|
|
62
|
+
'''
|
|
63
|
+
return {}
|
|
64
|
+
|
|
65
|
+
def test_model_speed(model_name: str,
|
|
66
|
+
product_mode: str,
|
|
67
|
+
test_rounds: int = 3,
|
|
68
|
+
enable_long_context: bool = False
|
|
69
|
+
) -> Dict[str, Any]:
|
|
70
|
+
from autocoder.models import get_model_by_name
|
|
71
|
+
"""
|
|
72
|
+
测试单个模型的速度
|
|
73
|
+
|
|
74
|
+
Args:
|
|
75
|
+
model_name: 模型名称
|
|
76
|
+
product_mode: 产品模式 (lite/pro)
|
|
77
|
+
test_rounds: 测试轮数
|
|
78
|
+
|
|
79
|
+
Returns:
|
|
80
|
+
Dict包含测试结果:
|
|
81
|
+
- avg_time: 平均响应时间
|
|
82
|
+
- min_time: 最小响应时间
|
|
83
|
+
- max_time: 最大响应时间
|
|
84
|
+
- first_token_time: 首token时间
|
|
85
|
+
- success: 是否测试成功
|
|
86
|
+
- error: 错误信息(如果有)
|
|
87
|
+
"""
|
|
88
|
+
try:
|
|
89
|
+
llm = get_single_llm(model_name, product_mode)
|
|
90
|
+
model_info = get_model_by_name(model_name)
|
|
91
|
+
|
|
92
|
+
times = []
|
|
93
|
+
first_token_times = []
|
|
94
|
+
tokens_per_seconds = []
|
|
95
|
+
input_tokens_counts = []
|
|
96
|
+
generated_tokens_counts = []
|
|
97
|
+
|
|
98
|
+
input_tokens_costs = []
|
|
99
|
+
generated_tokens_costs = []
|
|
100
|
+
|
|
101
|
+
input_tokens_cost_per_m = model_info.get("input_price", 0.0) / 1000000
|
|
102
|
+
output_tokens_cost_per_m = model_info.get("output_price", 0.0) / 1000000
|
|
103
|
+
|
|
104
|
+
test_query = short_context_prompt.prompt()
|
|
105
|
+
if enable_long_context:
|
|
106
|
+
test_query = long_context_prompt.prompt()
|
|
107
|
+
|
|
108
|
+
content = ""
|
|
109
|
+
for _ in range(test_rounds):
|
|
110
|
+
start_time = time.time()
|
|
111
|
+
first_token_received = False
|
|
112
|
+
first_token_time = None
|
|
113
|
+
last_meta = None
|
|
114
|
+
input_tokens_count = 0
|
|
115
|
+
generated_tokens_count = 0
|
|
116
|
+
input_tokens_cost = 0
|
|
117
|
+
generated_tokens_cost = 0
|
|
118
|
+
for chunk,meta in llm.stream_chat_oai(conversations=[{
|
|
119
|
+
"role": "user",
|
|
120
|
+
"content": test_query
|
|
121
|
+
}],delta_mode=True):
|
|
122
|
+
content += chunk
|
|
123
|
+
last_meta = meta
|
|
124
|
+
current_time = time.time()
|
|
125
|
+
if not first_token_received:
|
|
126
|
+
first_token_time = current_time - start_time
|
|
127
|
+
first_token_received = True
|
|
128
|
+
first_token_times.append(first_token_time)
|
|
129
|
+
|
|
130
|
+
end_time = time.time()
|
|
131
|
+
generated_tokens_count = 0
|
|
132
|
+
if last_meta:
|
|
133
|
+
generated_tokens_count = last_meta.generated_tokens_count
|
|
134
|
+
input_tokens_count = last_meta.input_tokens_count
|
|
135
|
+
input_tokens_cost = input_tokens_count * input_tokens_cost_per_m
|
|
136
|
+
generated_tokens_cost = generated_tokens_count * output_tokens_cost_per_m
|
|
137
|
+
|
|
138
|
+
input_tokens_costs.append(input_tokens_cost)
|
|
139
|
+
generated_tokens_costs.append(generated_tokens_cost)
|
|
140
|
+
generated_tokens_counts.append(generated_tokens_count)
|
|
141
|
+
input_tokens_counts.append(input_tokens_count)
|
|
142
|
+
|
|
143
|
+
tokens_per_seconds.append(generated_tokens_count / (end_time - start_time))
|
|
144
|
+
times.append(end_time - start_time)
|
|
145
|
+
|
|
146
|
+
|
|
147
|
+
avg_time = sum(times) / len(times)
|
|
148
|
+
return {
|
|
149
|
+
"tokens_per_second": sum(tokens_per_seconds) / len(tokens_per_seconds),
|
|
150
|
+
"avg_time": avg_time,
|
|
151
|
+
"min_time": min(times),
|
|
152
|
+
"max_time": max(times),
|
|
153
|
+
"first_token_time": sum(first_token_times) / len(first_token_times),
|
|
154
|
+
"input_tokens_count": sum(input_tokens_counts) / len(input_tokens_counts),
|
|
155
|
+
"generated_tokens_count": sum(generated_tokens_counts) / len(generated_tokens_counts),
|
|
156
|
+
"success": True,
|
|
157
|
+
"error": None,
|
|
158
|
+
"input_tokens_cost": sum(input_tokens_costs) / len(input_tokens_costs),
|
|
159
|
+
"generated_tokens_cost": sum(generated_tokens_costs) / len(generated_tokens_costs)
|
|
160
|
+
}
|
|
161
|
+
except Exception as e:
|
|
162
|
+
return {
|
|
163
|
+
"tokens_per_second": 0,
|
|
164
|
+
"avg_time": 0,
|
|
165
|
+
"min_time": 0,
|
|
166
|
+
"max_time": 0,
|
|
167
|
+
"first_token_time": 0,
|
|
168
|
+
"input_tokens_count": 0,
|
|
169
|
+
"generated_tokens_count": 0,
|
|
170
|
+
"success": False,
|
|
171
|
+
"error": str(e),
|
|
172
|
+
"input_tokens_cost": 0.0,
|
|
173
|
+
"generated_tokens_cost": 0.0
|
|
174
|
+
}
|
|
175
|
+
|
|
176
|
+
def test_model_speed_wrapper(args: Tuple[str, str, int, bool]) -> Tuple[str, Dict[str, Any]]:
|
|
177
|
+
"""
|
|
178
|
+
包装测试函数以适应线程池调用
|
|
179
|
+
|
|
180
|
+
Args:
|
|
181
|
+
args: (model_name, product_mode, test_rounds)的元组
|
|
182
|
+
|
|
183
|
+
Returns:
|
|
184
|
+
(model_name, test_results)的元组
|
|
185
|
+
"""
|
|
186
|
+
model_name, product_mode, test_rounds,enable_long_context = args
|
|
187
|
+
results = test_model_speed(model_name, product_mode, test_rounds,enable_long_context)
|
|
188
|
+
return (model_name, results)
|
|
189
|
+
|
|
190
|
+
|
|
191
|
+
def run_speed_test(product_mode: str, test_rounds: int = 3, max_workers: Optional[int] = None, enable_long_context: bool = False) -> SpeedTestResults:
|
|
192
|
+
"""
|
|
193
|
+
运行所有已激活模型的速度测试
|
|
194
|
+
|
|
195
|
+
Args:
|
|
196
|
+
product_mode: 产品模式 (lite/pro)
|
|
197
|
+
test_rounds: 每个模型测试的轮数
|
|
198
|
+
max_workers: 最大线程数,默认为None(ThreadPoolExecutor会自动设置)
|
|
199
|
+
enable_long_context: 是否启用长文本上下文测试
|
|
200
|
+
|
|
201
|
+
Returns:
|
|
202
|
+
SpeedTestResults: 包含所有模型测试结果的pydantic模型
|
|
203
|
+
"""
|
|
204
|
+
# 获取所有模型
|
|
205
|
+
models_data = models_module.load_models()
|
|
206
|
+
active_models = [m for m in models_data if "api_key" in m] if product_mode == "lite" else models_data
|
|
207
|
+
|
|
208
|
+
if not active_models:
|
|
209
|
+
return SpeedTestResults(results=[])
|
|
210
|
+
|
|
211
|
+
# 准备测试参数
|
|
212
|
+
test_args = [(model["name"], product_mode, test_rounds, enable_long_context) for model in active_models]
|
|
213
|
+
|
|
214
|
+
# 存储结果用于排序
|
|
215
|
+
results_list = []
|
|
216
|
+
|
|
217
|
+
# 使用线程池并发测试
|
|
218
|
+
with ThreadPoolExecutor(max_workers=max_workers) as executor:
|
|
219
|
+
# 提交所有测试任务并获取future对象
|
|
220
|
+
future_to_model = {executor.submit(test_model_speed_wrapper, args): args[0]
|
|
221
|
+
for args in test_args}
|
|
222
|
+
|
|
223
|
+
# 收集结果
|
|
224
|
+
for future in future_to_model:
|
|
225
|
+
model_name = future_to_model[future]
|
|
226
|
+
|
|
227
|
+
try:
|
|
228
|
+
_, results = future.result()
|
|
229
|
+
|
|
230
|
+
if results["success"]:
|
|
231
|
+
status = "✓"
|
|
232
|
+
results_list.append((
|
|
233
|
+
results['tokens_per_second'],
|
|
234
|
+
ModelSpeedTestResult(
|
|
235
|
+
model_name=model_name,
|
|
236
|
+
tokens_per_second=results['tokens_per_second'],
|
|
237
|
+
first_token_time=results['first_token_time'],
|
|
238
|
+
input_tokens_count=results['input_tokens_count'],
|
|
239
|
+
generated_tokens_count=results['generated_tokens_count'],
|
|
240
|
+
status=status,
|
|
241
|
+
input_tokens_cost=results['input_tokens_cost'],
|
|
242
|
+
generated_tokens_cost=results['generated_tokens_cost'],
|
|
243
|
+
)
|
|
244
|
+
))
|
|
245
|
+
try:
|
|
246
|
+
# 更新模型的平均速度
|
|
247
|
+
models_module.update_model_speed(model_name, results['tokens_per_second'])
|
|
248
|
+
except Exception:
|
|
249
|
+
pass
|
|
250
|
+
else:
|
|
251
|
+
results_list.append((
|
|
252
|
+
0,
|
|
253
|
+
ModelSpeedTestResult(
|
|
254
|
+
model_name=model_name,
|
|
255
|
+
tokens_per_second=0,
|
|
256
|
+
first_token_time=0,
|
|
257
|
+
input_tokens_count=0,
|
|
258
|
+
generated_tokens_count=0,
|
|
259
|
+
status=f"✗ {results['error']}",
|
|
260
|
+
error=results['error'],
|
|
261
|
+
input_tokens_cost=0.0,
|
|
262
|
+
generated_tokens_cost=0.0
|
|
263
|
+
)
|
|
264
|
+
))
|
|
265
|
+
except Exception as e:
|
|
266
|
+
results_list.append((
|
|
267
|
+
0,
|
|
268
|
+
ModelSpeedTestResult(
|
|
269
|
+
model_name=model_name,
|
|
270
|
+
tokens_per_second=0,
|
|
271
|
+
first_token_time=0,
|
|
272
|
+
input_tokens_count=0,
|
|
273
|
+
generated_tokens_count=0,
|
|
274
|
+
status=f"✗ {str(e)}",
|
|
275
|
+
error=str(e),
|
|
276
|
+
input_tokens_cost=0.0,
|
|
277
|
+
generated_tokens_cost=0.0
|
|
278
|
+
)
|
|
279
|
+
))
|
|
280
|
+
|
|
281
|
+
# 按速度排序
|
|
282
|
+
results_list.sort(key=lambda x: x[0], reverse=True)
|
|
283
|
+
|
|
284
|
+
return SpeedTestResults(results=[result[1] for result in results_list])
|
|
285
|
+
|
|
286
|
+
def render_speed_test_in_terminal(product_mode: str, test_rounds: int = 3, max_workers: Optional[int] = None,enable_long_context: bool = False) -> None:
|
|
287
|
+
"""
|
|
288
|
+
运行所有已激活模型的速度测试
|
|
289
|
+
|
|
290
|
+
Args:
|
|
291
|
+
product_mode: 产品模式 (lite/pro)
|
|
292
|
+
test_rounds: 每个模型测试的轮数
|
|
293
|
+
max_workers: 最大线程数,默认为None(ThreadPoolExecutor会自动设置)
|
|
294
|
+
"""
|
|
295
|
+
printer = Printer()
|
|
296
|
+
console = Console()
|
|
297
|
+
|
|
298
|
+
# 获取所有模型
|
|
299
|
+
models_data = models_module.load_models()
|
|
300
|
+
active_models = [m for m in models_data if "api_key" in m] if product_mode == "lite" else models_data
|
|
301
|
+
|
|
302
|
+
if not active_models:
|
|
303
|
+
printer.print_in_terminal("models_no_active", style="yellow")
|
|
304
|
+
return
|
|
305
|
+
|
|
306
|
+
# 创建结果表格
|
|
307
|
+
table = Table(
|
|
308
|
+
title=printer.get_message_from_key("models_speed_test_results"),
|
|
309
|
+
show_header=True,
|
|
310
|
+
header_style="bold magenta",
|
|
311
|
+
show_lines=True
|
|
312
|
+
)
|
|
313
|
+
|
|
314
|
+
table.add_column("Model", style="cyan", width=30)
|
|
315
|
+
table.add_column("Tokens/s", style="green", width=15)
|
|
316
|
+
table.add_column("First Token(s)", style="magenta", width=15)
|
|
317
|
+
table.add_column("Input Tokens", style="magenta", width=15)
|
|
318
|
+
table.add_column("Generated Tokens", style="magenta", width=15)
|
|
319
|
+
table.add_column("Input Tokens Cost", style="yellow", width=15)
|
|
320
|
+
table.add_column("Generated Tokens Cost", style="yellow", width=15)
|
|
321
|
+
table.add_column("Status", style="red", width=20)
|
|
322
|
+
|
|
323
|
+
# 准备测试参数
|
|
324
|
+
test_args = [(model["name"], product_mode, test_rounds, enable_long_context) for model in active_models]
|
|
325
|
+
|
|
326
|
+
# 存储结果用于排序
|
|
327
|
+
results_list = []
|
|
328
|
+
|
|
329
|
+
# 使用线程池并发测试
|
|
330
|
+
with ThreadPoolExecutor(max_workers=max_workers) as executor:
|
|
331
|
+
printer.print_in_terminal("models_testing_start", style="yellow")
|
|
332
|
+
|
|
333
|
+
# 提交所有测试任务并获取future对象
|
|
334
|
+
future_to_model = {executor.submit(test_model_speed_wrapper, args): args[0]
|
|
335
|
+
for args in test_args}
|
|
336
|
+
|
|
337
|
+
# 收集结果
|
|
338
|
+
completed = 0
|
|
339
|
+
total = len(future_to_model)
|
|
340
|
+
for future in future_to_model:
|
|
341
|
+
completed += 1
|
|
342
|
+
printer.print_in_terminal("models_testing_progress", style="yellow", completed=completed, total=total)
|
|
343
|
+
model_name = future_to_model[future]
|
|
344
|
+
printer.print_in_terminal("models_testing", style="yellow", name=model_name)
|
|
345
|
+
|
|
346
|
+
try:
|
|
347
|
+
_, results = future.result()
|
|
348
|
+
|
|
349
|
+
if results["success"]:
|
|
350
|
+
status = "✓"
|
|
351
|
+
results['status'] = status
|
|
352
|
+
results_list.append((
|
|
353
|
+
results['tokens_per_second'],
|
|
354
|
+
model_name,
|
|
355
|
+
results
|
|
356
|
+
))
|
|
357
|
+
try:
|
|
358
|
+
# 更新模型的平均速度
|
|
359
|
+
models_module.update_model_speed(model_name, results['tokens_per_second'])
|
|
360
|
+
except Exception as e:
|
|
361
|
+
pass
|
|
362
|
+
else:
|
|
363
|
+
status = f"✗ ({results['error']})"
|
|
364
|
+
results_list.append((
|
|
365
|
+
0,
|
|
366
|
+
model_name,
|
|
367
|
+
{"tokens_per_second":0,"avg_time": 0, "input_tokens_count":0, "generated_tokens_count":0, "min_time": 0, "max_time": 0, "first_token_time": 0, "input_tokens_cost": 0.0, "generated_tokens_cost": 0.0, "status": status}
|
|
368
|
+
))
|
|
369
|
+
except Exception as e:
|
|
370
|
+
results_list.append((
|
|
371
|
+
0,
|
|
372
|
+
model_name,
|
|
373
|
+
{"tokens_per_second":0,"avg_time": 0, "input_tokens_count":0, "generated_tokens_count":0, "min_time": 0, "max_time": 0, "first_token_time": 0, "input_tokens_cost": 0.0, "generated_tokens_cost": 0.0, "status": f"✗ ({str(e)})"}
|
|
374
|
+
))
|
|
375
|
+
|
|
376
|
+
# 按速度排序
|
|
377
|
+
results_list.sort(key=lambda x: x[0], reverse=True)
|
|
378
|
+
|
|
379
|
+
# 添加排序后的结果到表格
|
|
380
|
+
for tokens_per_second, model_name, results in results_list:
|
|
381
|
+
table.add_row(
|
|
382
|
+
model_name,
|
|
383
|
+
f"{tokens_per_second:.2f}",
|
|
384
|
+
f"{results['first_token_time']:.2f}",
|
|
385
|
+
f"{results['input_tokens_count']}",
|
|
386
|
+
f"{results['generated_tokens_count']}",
|
|
387
|
+
f"{results['input_tokens_cost']:.4f}",
|
|
388
|
+
f"{results['generated_tokens_cost']:.4f}",
|
|
389
|
+
results['status']
|
|
390
|
+
)
|
|
391
|
+
|
|
392
|
+
console.print(Panel(table, border_style="blue"))
|