auto-coder 0.1.275__py3-none-any.whl → 0.1.277__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of auto-coder might be problematic. Click here for more details.

@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.1
2
2
  Name: auto-coder
3
- Version: 0.1.275
3
+ Version: 0.1.277
4
4
  Summary: AutoCoder: AutoCoder
5
5
  Author: allwefantasy
6
6
  Classifier: Topic :: Scientific/Engineering :: Artificial Intelligence
@@ -4,7 +4,7 @@ autocoder/auto_coder_lang.py,sha256=Rtupq6N3_HT7JRhDKdgCBcwRaiAnyCOR_Gsp4jUomrI,
4
4
  autocoder/auto_coder_rag.py,sha256=mX-szIG9T7Mzwoc4QwKp_GyYBcVf6dfsNJnKzYHHl6U,30329
5
5
  autocoder/auto_coder_rag_client_mcp.py,sha256=QRxUbjc6A8UmDMQ8lXgZkjgqtq3lgKYeatJbDY6rSo0,6270
6
6
  autocoder/auto_coder_rag_mcp.py,sha256=-RrjNwFaS2e5v8XDIrKR-zlUNUE8UBaeOtojffBrvJo,8521
7
- autocoder/auto_coder_runner.py,sha256=1M74XhbmmJg0z5rC_TjjlPaqyEPfg2z6N7mLN7z8qFw,100773
7
+ autocoder/auto_coder_runner.py,sha256=w-4MCKhOFaoABcDfVoZoonF59UyRso3kghimQYLz3NA,100851
8
8
  autocoder/auto_coder_server.py,sha256=6YQweNEKUrGAZ3yPvw8_qlNZJYLVSVUXGrn1K6udLts,20413
9
9
  autocoder/benchmark.py,sha256=Ypomkdzd1T3GE6dRICY3Hj547dZ6_inqJbBJIp5QMco,4423
10
10
  autocoder/chat_auto_coder.py,sha256=G7_CIgDOTdGGPzRQDo0hEOh5p8A36oJQaYJc514xBkk,16842
@@ -12,7 +12,7 @@ autocoder/chat_auto_coder_lang.py,sha256=ShOQVOnMA-WlT-fB9OrOer-xQkbcWxJGl-WMPuZ
12
12
  autocoder/command_args.py,sha256=9aYJ-AmPxP1sQh6ciw04FWHjSn31f2W9afXFwo8wgx4,30441
13
13
  autocoder/lang.py,sha256=U6AjVV8Rs1uLyjFCZ8sT6WWuNUxMBqkXXIOs4S120uk,14511
14
14
  autocoder/models.py,sha256=PlG1tKHSHwB57cKLOl5gTl5yTzFUDzCgeHPJU3N9F6Q,9106
15
- autocoder/version.py,sha256=zCL4o733jNf4GPlcthnHlKI-WRYh8ZEhNOgsrud1D_E,23
15
+ autocoder/version.py,sha256=aQmPFbK421hxX_q_qH6lzGAzBJ-yTN3E_wgJvqVGg9k,23
16
16
  autocoder/agent/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
17
17
  autocoder/agent/auto_demand_organizer.py,sha256=NWSAEsEk94vT3lGjfo25kKLMwYdPcpy9e-i21txPasQ,6942
18
18
  autocoder/agent/auto_filegroup.py,sha256=CW7bqp0FW1GIEMnl-blyAc2UGT7O9Mom0q66ITz1ckM,6635
@@ -111,7 +111,7 @@ autocoder/rag/api_server.py,sha256=dRbhAZVRAOlZ64Cnxf4_rKb4iJwHnrWS9Zr67IVORw0,7
111
111
  autocoder/rag/doc_filter.py,sha256=yEXaBw1XJH57Gtvk4-RFQtd5eawA6SBjzxeRZrIsQew,11623
112
112
  autocoder/rag/document_retriever.py,sha256=5oThtxukGuRFF96o3pHKsk306a8diXbhgSrbqyU2BvM,8894
113
113
  autocoder/rag/llm_wrapper.py,sha256=wf56ofQNOaBkLhnoxK9VoVnHWD0gsj0pP8mUBfS92RI,2737
114
- autocoder/rag/long_context_rag.py,sha256=CzPC-ct6PVIKBkHsKon4s92YXmi8jZOlGgcquOwWQlI,31802
114
+ autocoder/rag/long_context_rag.py,sha256=qFlNmbgQnstCSCb0SxfkMEYtZRr8p6YEc6u0jpve4Q0,32002
115
115
  autocoder/rag/rag_config.py,sha256=8LwFcTd8OJWWwi1_WY4IzjqgtT6RyE2j4PjxS5cCTDE,802
116
116
  autocoder/rag/rag_entry.py,sha256=6TKtErZ0Us9XSV6HgRKXA6yR3SiZGPHpynOKSaR1wgE,2463
117
117
  autocoder/rag/raw_rag.py,sha256=BOr0YGf3umjqXOIDVO1LXQ0bIHx8hzBdiubND2ezyxc,2946
@@ -165,9 +165,9 @@ autocoder/utils/types.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
165
165
  autocoder/utils/auto_coder_utils/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
166
166
  autocoder/utils/auto_coder_utils/chat_stream_out.py,sha256=lkJ_A-sYU36JMzjFWkk3pR6uos8oZHYt9GPsPe_CPAo,11766
167
167
  autocoder/utils/chat_auto_coder_utils/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
168
- auto_coder-0.1.275.dist-info/LICENSE,sha256=HrhfyXIkWY2tGFK11kg7vPCqhgh5DcxleloqdhrpyMY,11558
169
- auto_coder-0.1.275.dist-info/METADATA,sha256=jbvivKV7U2ukHABw1YawGuUFGastAiiG06xblNioQ5w,2643
170
- auto_coder-0.1.275.dist-info/WHEEL,sha256=GV9aMThwP_4oNCtvEC2ec3qUYutgWeAzklro_0m4WJQ,91
171
- auto_coder-0.1.275.dist-info/entry_points.txt,sha256=0nzHtHH4pNcM7xq4EBA2toS28Qelrvcbrr59GqD_0Ak,350
172
- auto_coder-0.1.275.dist-info/top_level.txt,sha256=Jqc0_uJSw2GwoFQAa9iJxYns-2mWla-9ok_Y3Gcznjk,10
173
- auto_coder-0.1.275.dist-info/RECORD,,
168
+ auto_coder-0.1.277.dist-info/LICENSE,sha256=HrhfyXIkWY2tGFK11kg7vPCqhgh5DcxleloqdhrpyMY,11558
169
+ auto_coder-0.1.277.dist-info/METADATA,sha256=m2MjOLFknaEjczW5V_NfTL4jj7bikJe0jbn_tuYRfdQ,2643
170
+ auto_coder-0.1.277.dist-info/WHEEL,sha256=GV9aMThwP_4oNCtvEC2ec3qUYutgWeAzklro_0m4WJQ,91
171
+ auto_coder-0.1.277.dist-info/entry_points.txt,sha256=0nzHtHH4pNcM7xq4EBA2toS28Qelrvcbrr59GqD_0Ak,350
172
+ auto_coder-0.1.277.dist-info/top_level.txt,sha256=Jqc0_uJSw2GwoFQAa9iJxYns-2mWla-9ok_Y3Gcznjk,10
173
+ auto_coder-0.1.277.dist-info/RECORD,,
@@ -571,7 +571,10 @@ def load_memory():
571
571
  memory_path = os.path.join(base_persist_dir, "memory.json")
572
572
  if os.path.exists(memory_path):
573
573
  with open(memory_path, "r", encoding="utf-8") as f:
574
- memory = json.load(f)
574
+ _memory = json.load(f)
575
+ # clear memory
576
+ memory.clear()
577
+ memory.update(_memory)
575
578
  completer.update_current_files(memory["current_files"]["files"])
576
579
 
577
580
  def get_memory():
@@ -71,15 +71,21 @@ class LongContextRAG:
71
71
  tokenizer_path: Optional[str] = None,
72
72
  ) -> None:
73
73
  self.llm = llm
74
- self.args = args
75
- if args.product_mode == "pro":
76
- self.index_model = byzerllm.ByzerLLM()
77
- self.index_model.setup_default_model_name(
78
- args.index_model or self.llm.default_model_name
79
- )
80
- else:
81
- self.index_model = self.llm
74
+ self.recall_llm = self.llm
75
+ self.chunk_llm = self.llm
76
+ self.qa_llm = self.llm
77
+
78
+ if self.llm.get_sub_client("qa_model"):
79
+ self.qa_llm = self.llm.get_sub_client("qa_model")
80
+
81
+ if self.llm.get_sub_client("recall_model"):
82
+ self.recall_llm = self.llm.get_sub_client("recall_model")
82
83
 
84
+ if self.llm.get_sub_client("chunk_model"):
85
+ self.chunk_llm = self.llm.get_sub_client("chunk_model")
86
+
87
+ self.args = args
88
+
83
89
  self.path = path
84
90
  self.relevant_score = self.args.rag_doc_filter_relevance or 5
85
91
 
@@ -162,7 +168,7 @@ class LongContextRAG:
162
168
  )
163
169
 
164
170
  self.doc_filter = DocFilter(
165
- self.index_model, self.args, on_ray=self.on_ray, path=self.path
171
+ self.llm, self.args, on_ray=self.on_ray, path=self.path
166
172
  )
167
173
 
168
174
  doc_num = 0
@@ -459,22 +465,23 @@ class LongContextRAG:
459
465
 
460
466
  logger.info(f"Query: {query} only_contexts: {only_contexts}")
461
467
  start_time = time.time()
468
+
462
469
 
463
470
  rag_stat = RAGStat(
464
471
  recall_stat=RecallStat(
465
472
  total_input_tokens=0,
466
473
  total_generated_tokens=0,
467
- model_name=self.llm.default_model_name,
474
+ model_name=self.recall_llm.default_model_name,
468
475
  ),
469
476
  chunk_stat=ChunkStat(
470
477
  total_input_tokens=0,
471
478
  total_generated_tokens=0,
472
- model_name=self.llm.default_model_name,
479
+ model_name=self.chunk_llm.default_model_name,
473
480
  ),
474
481
  answer_stat=AnswerStat(
475
482
  total_input_tokens=0,
476
483
  total_generated_tokens=0,
477
- model_name=self.llm.default_model_name,
484
+ model_name=self.qa_llm.default_model_name,
478
485
  ),
479
486
  )
480
487
 
@@ -624,7 +631,7 @@ class LongContextRAG:
624
631
 
625
632
  # 记录令牌统计
626
633
  request_tokens = sum([doc.tokens for doc in relevant_docs])
627
- target_model = model or self.llm.default_model_name
634
+ target_model = target_llm.default_model_name
628
635
  logger.info(
629
636
  f"=== LLM Request ===\n"
630
637
  f" * Target model: {target_model}\n"
autocoder/version.py CHANGED
@@ -1 +1 @@
1
- __version__ = "0.1.275"
1
+ __version__ = "0.1.277"