auto-coder 0.1.213__py3-none-any.whl → 0.1.215__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of auto-coder might be problematic. Click here for more details.

@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.1
2
2
  Name: auto-coder
3
- Version: 0.1.213
3
+ Version: 0.1.215
4
4
  Summary: AutoCoder: AutoCoder
5
5
  Author: allwefantasy
6
6
  Classifier: Topic :: Scientific/Engineering :: Artificial Intelligence
@@ -26,7 +26,7 @@ Requires-Dist: tabulate
26
26
  Requires-Dist: jupyter-client
27
27
  Requires-Dist: prompt-toolkit
28
28
  Requires-Dist: tokenizers
29
- Requires-Dist: byzerllm[saas] >=0.1.143
29
+ Requires-Dist: byzerllm[saas] >=0.1.144
30
30
  Requires-Dist: patch
31
31
  Requires-Dist: diff-match-patch
32
32
  Requires-Dist: GitPython
@@ -4,11 +4,11 @@ autocoder/auto_coder_lang.py,sha256=Rtupq6N3_HT7JRhDKdgCBcwRaiAnyCOR_Gsp4jUomrI,
4
4
  autocoder/auto_coder_rag.py,sha256=hU7NmCUIM__918p1RbGFlJKPVEqdr4NXjD6b1dBgEVU,21743
5
5
  autocoder/auto_coder_server.py,sha256=XU9b4SBH7zjPPXaTWWHV4_zJm-XYa6njuLQaplYJH_c,20290
6
6
  autocoder/benchmark.py,sha256=Ypomkdzd1T3GE6dRICY3Hj547dZ6_inqJbBJIp5QMco,4423
7
- autocoder/chat_auto_coder.py,sha256=WhaS6uiG5lIBrnif_Sa_M4aTBMTiVn8-eGhlDWjtdBc,91548
8
- autocoder/chat_auto_coder_lang.py,sha256=zU9VRY-l80fZnLJ0Op8A3wq27UhQHh9WcpSYU4SmnqU,8708
7
+ autocoder/chat_auto_coder.py,sha256=xg8o5Zh3Uhc9Cq4TeR2bCcyU7LiwSf8X0izyvQZyi8c,90725
8
+ autocoder/chat_auto_coder_lang.py,sha256=ReWukXKVvuzVvpbYk5O9kc1ev7XNmAv3DnuQhmpLmnc,8717
9
9
  autocoder/command_args.py,sha256=BpMbPceBzjCftPB0yOVsSmTmt61xS3gtc1WGKtcDHQs,30449
10
10
  autocoder/lang.py,sha256=U6AjVV8Rs1uLyjFCZ8sT6WWuNUxMBqkXXIOs4S120uk,14511
11
- autocoder/version.py,sha256=V-hdG0k1-v5-WK9KQgzySDX22RDvdwG4vsIdj8042Cg,24
11
+ autocoder/version.py,sha256=IihUZg8cv9NkoKSCxqfNNkFfexYf5wMkv_LXnxQC11Q,24
12
12
  autocoder/agent/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
13
13
  autocoder/agent/auto_demand_organizer.py,sha256=NWSAEsEk94vT3lGjfo25kKLMwYdPcpy9e-i21txPasQ,6942
14
14
  autocoder/agent/auto_filegroup.py,sha256=CW7bqp0FW1GIEMnl-blyAc2UGT7O9Mom0q66ITz1ckM,6635
@@ -50,7 +50,7 @@ autocoder/common/mcp_server.py,sha256=QZRr3visIiRUcCfQqVuGngPh9yYlAq0BWijHLpbFoh
50
50
  autocoder/common/mcp_tools.py,sha256=dun9P9txSxSvDe7z3EUXTsV8Gx1RXMAJpsSPqI-D4ok,29546
51
51
  autocoder/common/recall_validation.py,sha256=Avt9Q9dX3kG6Pf2zsdlOHmsjd-OeSj7U1PFBDp_Cve0,1700
52
52
  autocoder/common/screenshots.py,sha256=_gA-z1HxGjPShBrtgkdideq58MG6rqFB2qMUJKjrycs,3769
53
- autocoder/common/search.py,sha256=_ZX03ph89rDPGMY1OrfqaDfxsDR-flh6YEHixherjwM,16616
53
+ autocoder/common/search.py,sha256=245iPFgWhMldoUK3CqCP89ltaxZiNPK73evoG6Fp1h8,16518
54
54
  autocoder/common/search_replace.py,sha256=GphFkc57Hb673CAwmbiocqTbw8vrV7TrZxtOhD0332g,22147
55
55
  autocoder/common/sys_prompt.py,sha256=JlexfjZt554faqbgkCmzOJqYUzDHfbnxly5ugFfHfEE,26403
56
56
  autocoder/common/text.py,sha256=KGRQq314GHBmY4MWG8ossRoQi1_DTotvhxchpn78c-k,1003
@@ -119,9 +119,9 @@ autocoder/utils/request_event_queue.py,sha256=r3lo5qGsB1dIjzVQ05dnr0z_9Z3zOkBdP1
119
119
  autocoder/utils/request_queue.py,sha256=nwp6PMtgTCiuwJI24p8OLNZjUiprC-TsefQrhMI-yPE,3889
120
120
  autocoder/utils/rest.py,sha256=HawagAap3wMIDROGhY1730zSZrJR_EycODAA5qOj83c,8807
121
121
  autocoder/utils/tests.py,sha256=BqphrwyycGAvs-5mhH8pKtMZdObwhFtJ5MC_ZAOiLq8,1340
122
- auto_coder-0.1.213.dist-info/LICENSE,sha256=HrhfyXIkWY2tGFK11kg7vPCqhgh5DcxleloqdhrpyMY,11558
123
- auto_coder-0.1.213.dist-info/METADATA,sha256=7JrELvSZVoZAD3daumw7vNKywrDnYZaqtw8QkuFczEE,2590
124
- auto_coder-0.1.213.dist-info/WHEEL,sha256=GV9aMThwP_4oNCtvEC2ec3qUYutgWeAzklro_0m4WJQ,91
125
- auto_coder-0.1.213.dist-info/entry_points.txt,sha256=0nzHtHH4pNcM7xq4EBA2toS28Qelrvcbrr59GqD_0Ak,350
126
- auto_coder-0.1.213.dist-info/top_level.txt,sha256=Jqc0_uJSw2GwoFQAa9iJxYns-2mWla-9ok_Y3Gcznjk,10
127
- auto_coder-0.1.213.dist-info/RECORD,,
122
+ auto_coder-0.1.215.dist-info/LICENSE,sha256=HrhfyXIkWY2tGFK11kg7vPCqhgh5DcxleloqdhrpyMY,11558
123
+ auto_coder-0.1.215.dist-info/METADATA,sha256=sgkxUJzd2NzfmzIMHy0IUQ26s5voTnyDMNuU6ri6OrU,2590
124
+ auto_coder-0.1.215.dist-info/WHEEL,sha256=GV9aMThwP_4oNCtvEC2ec3qUYutgWeAzklro_0m4WJQ,91
125
+ auto_coder-0.1.215.dist-info/entry_points.txt,sha256=0nzHtHH4pNcM7xq4EBA2toS28Qelrvcbrr59GqD_0Ak,350
126
+ auto_coder-0.1.215.dist-info/top_level.txt,sha256=Jqc0_uJSw2GwoFQAa9iJxYns-2mWla-9ok_Y3Gcznjk,10
127
+ auto_coder-0.1.215.dist-info/RECORD,,
@@ -314,54 +314,29 @@ def initialize_system():
314
314
  except subprocess.CalledProcessError:
315
315
  print_status(get_message("model_error"), "error")
316
316
 
317
- # If deepseek_chat is not available, prompt user to choose a provider
318
- print_status(get_message("model_not_available"), "warning")
319
- choice = radiolist_dialog(
320
- title=get_message("provider_selection"),
321
- text=get_message("provider_selection"),
322
- values=[
323
- ("1", "硅基流动(https://siliconflow.cn)"),
324
- ("2", "Deepseek官方(https://www.deepseek.com/)"),
325
- ],
326
- ).run()
327
-
328
- if choice is None:
329
- print_status(get_message("no_provider"), "error")
330
- return
331
-
317
+ # If deepseek_chat is not available
318
+ print_status(get_message("model_not_available"), "warning")
332
319
  api_key = prompt(HTML(f"<b>{get_message('enter_api_key')} </b>"))
333
-
334
- if choice == "1":
335
- print_status(get_message("deploying_model").format("硅基流动"), "")
336
- deploy_cmd = [
337
- "easy-byzerllm",
338
- "deploy",
339
- "deepseek-ai/deepseek-v2-chat",
340
- "--token",
341
- api_key,
342
- "--alias",
343
- "deepseek_chat",
344
- ]
345
- else:
346
- print_status(get_message("deploying_model").format("Deepseek官方"), "")
347
- deploy_cmd = [
348
- "byzerllm",
349
- "deploy",
350
- "--pretrained_model_type",
351
- "saas/openai",
352
- "--cpus_per_worker",
353
- "0.001",
354
- "--gpus_per_worker",
355
- "0",
356
- "--worker_concurrency",
357
- "1000",
358
- "--num_workers",
359
- "1",
360
- "--infer_params",
361
- f"saas.base_url=https://api.deepseek.com/v1 saas.api_key={api_key} saas.model=deepseek-chat",
362
- "--model",
363
- "deepseek_chat",
364
- ]
320
+
321
+ print_status(get_message("deploying_model").format("Deepseek官方"), "")
322
+ deploy_cmd = [
323
+ "byzerllm",
324
+ "deploy",
325
+ "--pretrained_model_type",
326
+ "saas/openai",
327
+ "--cpus_per_worker",
328
+ "0.001",
329
+ "--gpus_per_worker",
330
+ "0",
331
+ "--worker_concurrency",
332
+ "1000",
333
+ "--num_workers",
334
+ "1",
335
+ "--infer_params",
336
+ f"saas.base_url=https://api.deepseek.com/v1 saas.api_key={api_key} saas.model=deepseek-chat",
337
+ "--model",
338
+ "deepseek_chat",
339
+ ]
365
340
 
366
341
  try:
367
342
  subprocess.run(deploy_cmd, check=True)
@@ -20,10 +20,10 @@ MESSAGES = {
20
20
  "model_available": "deepseek_chat model is available.",
21
21
  "model_timeout": "Command timed out. deepseek_chat model might not be available.",
22
22
  "model_error": "Error occurred while checking deepseek_chat model.",
23
- "model_not_available": "deepseek_chat model is not available. Please choose a provider:",
23
+ "model_not_available": "deepseek_chat model is not available.",
24
24
  "provider_selection": "Select a provider for deepseek_chat model:",
25
25
  "no_provider": "No provider selected. Exiting initialization.",
26
- "enter_api_key": "Please enter your API key: ",
26
+ "enter_api_key": "Please enter your API key(https://www.deepseek.com/): ",
27
27
  "deploying_model": "Deploying deepseek_chat model using {}...",
28
28
  "deploy_complete": "Deployment completed.",
29
29
  "deploy_fail": "Deployment failed. Please try again or deploy manually.",
@@ -86,10 +86,10 @@ MESSAGES = {
86
86
  "model_available": "deepseek_chat模型可用。",
87
87
  "model_timeout": "命令超时。deepseek_chat模型可能不可用。",
88
88
  "model_error": "检查deepseek_chat模型时出错。",
89
- "model_not_available": "deepseek_chat模型不可用。请选择一个提供商:",
89
+ "model_not_available": "deepseek_chat模型不可用。",
90
90
  "provider_selection": "为deepseek_chat模型选择一个提供商:",
91
91
  "no_provider": "未选择提供商。退出初始化。",
92
- "enter_api_key": "请输入您的API密钥:",
92
+ "enter_api_key": "请输入您的API密钥(https://www.deepseek.com/):",
93
93
  "deploying_model": "正在使用{}部署deepseek_chat模型...",
94
94
  "deploy_complete": "部署完成。",
95
95
  "deploy_fail": "部署失败。请重试或手动部署。",
@@ -6,7 +6,7 @@ from pydantic import BaseModel,Field
6
6
  import requests
7
7
  from enum import Enum
8
8
  import byzerllm
9
- from langchain_core.prompts import PromptTemplate
9
+ from byzerllm.utils import format_str_jinja2
10
10
  from autocoder.utils.rest import HttpDoc
11
11
 
12
12
  # Search engine related. You don't really need to change this.
@@ -51,10 +51,7 @@ def llm_rerank(llm:byzerllm.ByzerLLM,query:str,docs:List[str],top_k:int=1):
51
51
  "{context_str}\n"
52
52
  "Question: {query_str}\n"
53
53
  "Answer:\n"
54
- )
55
- DEFAULT_CHOICE_SELECT_PROMPT = PromptTemplate.from_template(
56
- DEFAULT_CHOICE_SELECT_PROMPT_TMPL
57
- )
54
+ )
58
55
 
59
56
  context_str = ""
60
57
  for i,metric in enumerate(docs):
@@ -64,7 +61,7 @@ def llm_rerank(llm:byzerllm.ByzerLLM,query:str,docs:List[str],top_k:int=1):
64
61
 
65
62
  r = llm.chat_oai(conversations=[{
66
63
  "role": "user",
67
- "content": DEFAULT_CHOICE_SELECT_PROMPT.format(context_str=context_str,query_str=query_str)
64
+ "content": format_str_jinja2(DEFAULT_CHOICE_SELECT_PROMPT_TMPL,context_str=context_str,query_str=query_str)
68
65
  }])
69
66
 
70
67
  r = llm.chat_oai(conversations=[{
autocoder/version.py CHANGED
@@ -1 +1 @@
1
- __version__ = "0.1.213"
1
+ __version__ = "0.1.215"