auto-coder 0.1.187__py3-none-any.whl → 0.1.189__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of auto-coder might be problematic. Click here for more details.
- {auto_coder-0.1.187.dist-info → auto_coder-0.1.189.dist-info}/METADATA +2 -2
- {auto_coder-0.1.187.dist-info → auto_coder-0.1.189.dist-info}/RECORD +10 -10
- autocoder/auto_coder_rag.py +6 -0
- autocoder/common/__init__.py +1 -0
- autocoder/rag/long_context_rag.py +2 -1
- autocoder/version.py +1 -1
- {auto_coder-0.1.187.dist-info → auto_coder-0.1.189.dist-info}/LICENSE +0 -0
- {auto_coder-0.1.187.dist-info → auto_coder-0.1.189.dist-info}/WHEEL +0 -0
- {auto_coder-0.1.187.dist-info → auto_coder-0.1.189.dist-info}/entry_points.txt +0 -0
- {auto_coder-0.1.187.dist-info → auto_coder-0.1.189.dist-info}/top_level.txt +0 -0
|
@@ -1,6 +1,6 @@
|
|
|
1
1
|
Metadata-Version: 2.1
|
|
2
2
|
Name: auto-coder
|
|
3
|
-
Version: 0.1.
|
|
3
|
+
Version: 0.1.189
|
|
4
4
|
Summary: AutoCoder: AutoCoder
|
|
5
5
|
Author: allwefantasy
|
|
6
6
|
Classifier: Topic :: Scientific/Engineering :: Artificial Intelligence
|
|
@@ -26,7 +26,7 @@ Requires-Dist: tabulate
|
|
|
26
26
|
Requires-Dist: jupyter-client
|
|
27
27
|
Requires-Dist: prompt-toolkit
|
|
28
28
|
Requires-Dist: tokenizers
|
|
29
|
-
Requires-Dist: byzerllm[saas] >=0.1.
|
|
29
|
+
Requires-Dist: byzerllm[saas] >=0.1.138
|
|
30
30
|
Requires-Dist: patch
|
|
31
31
|
Requires-Dist: diff-match-patch
|
|
32
32
|
Requires-Dist: GitPython
|
|
@@ -1,13 +1,13 @@
|
|
|
1
1
|
autocoder/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
|
|
2
2
|
autocoder/auto_coder.py,sha256=tSNXFMJSrffagFi4egZJp8XZH9trSCwQjOdXKyHVqwo,37106
|
|
3
3
|
autocoder/auto_coder_lang.py,sha256=Rtupq6N3_HT7JRhDKdgCBcwRaiAnyCOR_Gsp4jUomrI,3229
|
|
4
|
-
autocoder/auto_coder_rag.py,sha256=
|
|
4
|
+
autocoder/auto_coder_rag.py,sha256=GFPp02FE8x14pmmRxIpbkdx5ll-zYyPK3SACjhSeZ8A,16578
|
|
5
5
|
autocoder/auto_coder_server.py,sha256=qRY88mkBnqSGFDcwYE5gwpe2WPhIw1nEH6LdbjCQhQk,20306
|
|
6
6
|
autocoder/chat_auto_coder.py,sha256=1jCx-J83mj_8JnojYSTfPjYide-thbmsFbr12E_kgcQ,81773
|
|
7
7
|
autocoder/chat_auto_coder_lang.py,sha256=QYtu5gWEQmWKVovR_qUZ8plySZarNFX_Onk-1vN9IiA,8524
|
|
8
8
|
autocoder/command_args.py,sha256=ftWw6HnFUZPiQPt1oV-SfpHQe69XN3knaFy1lpROBcU,26854
|
|
9
9
|
autocoder/lang.py,sha256=e-07rYTgimpxS8sm-AxKSmH4kKQX4N05YFHJBg9trVs,12598
|
|
10
|
-
autocoder/version.py,sha256=
|
|
10
|
+
autocoder/version.py,sha256=nsvyU8Y0k8OLDacF-I4MlHVaJQan-Qhexb6JJR0ZyT4,23
|
|
11
11
|
autocoder/agent/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
|
|
12
12
|
autocoder/agent/auto_tool.py,sha256=DBzip-P_T6ZtT2eHexPcusmKYD0h7ufzp7TLwXAY10E,11554
|
|
13
13
|
autocoder/agent/coder.py,sha256=dnITYHqkcOip8zV4lywbkYNH9w7Q3qyYaUArJ4WPrTs,866
|
|
@@ -17,7 +17,7 @@ autocoder/agent/project_reader.py,sha256=-MWRqsr7O4mvU0PIpAhOUBb29htZAvA37pa_GeE
|
|
|
17
17
|
autocoder/chat/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
|
|
18
18
|
autocoder/common/JupyterClient.py,sha256=O-wi6pXeAEYhAY24kDa0BINrLYvKS6rKyWe98pDClS0,2816
|
|
19
19
|
autocoder/common/ShellClient.py,sha256=fM1q8t_XMSbLBl2zkCNC2J9xuyKN3eXzGm6hHhqL2WY,2286
|
|
20
|
-
autocoder/common/__init__.py,sha256=
|
|
20
|
+
autocoder/common/__init__.py,sha256=CceR1pHKhPPl-y_vHgjf-p8pe4xgeOI_CTjkUrTN2hM,10510
|
|
21
21
|
autocoder/common/anything2images.py,sha256=0ILBbWzY02M-CiWB-vzuomb_J1hVdxRcenAfIrAXq9M,25283
|
|
22
22
|
autocoder/common/audio.py,sha256=Kn9nWKQddWnUrAz0a_ZUgjcu4VUU_IcZBigT7n3N3qc,7439
|
|
23
23
|
autocoder/common/cleaner.py,sha256=NU72i8C6o9m0vXExab7nao5bstBUsfJFcj11cXa9l4U,1089
|
|
@@ -62,7 +62,7 @@ autocoder/rag/api_server.py,sha256=zokIlDJlk7ucRorSLQm80uICO1mecfmn4J2zVqEBskE,6
|
|
|
62
62
|
autocoder/rag/doc_filter.py,sha256=Ha0Yae_G_hF72YzvrO7NoDZcG18K4hRcqGAEqfrIwAs,9330
|
|
63
63
|
autocoder/rag/document_retriever.py,sha256=_jCbCEX0I-5UPWuHocESaWHatQcv1r_DqA0yOfOAiZ0,9092
|
|
64
64
|
autocoder/rag/llm_wrapper.py,sha256=xRbTBpLUH43Ah5jplL8WWWU-kjKfNgEJoUntLGBq5F4,2484
|
|
65
|
-
autocoder/rag/long_context_rag.py,sha256=
|
|
65
|
+
autocoder/rag/long_context_rag.py,sha256=tBWJVW4djvPxikOxOUm4nR_bdor1Lfmoig4-XYZ2xDc,22242
|
|
66
66
|
autocoder/rag/rag_config.py,sha256=8LwFcTd8OJWWwi1_WY4IzjqgtT6RyE2j4PjxS5cCTDE,802
|
|
67
67
|
autocoder/rag/rag_entry.py,sha256=V1RJ8RGqM30DNPmzymv64rZjNRGWn6kfc8sRy_LECg0,2451
|
|
68
68
|
autocoder/rag/raw_rag.py,sha256=yS2Ur6kG0IRjhCj2_VonwxjY_xls_E62jO5Gz5j2nqE,2952
|
|
@@ -101,9 +101,9 @@ autocoder/utils/request_event_queue.py,sha256=r3lo5qGsB1dIjzVQ05dnr0z_9Z3zOkBdP1
|
|
|
101
101
|
autocoder/utils/request_queue.py,sha256=nwp6PMtgTCiuwJI24p8OLNZjUiprC-TsefQrhMI-yPE,3889
|
|
102
102
|
autocoder/utils/rest.py,sha256=3tXA8KZG6jKz_tddHNLGx77Icee88WcUeesfNsgPno4,8790
|
|
103
103
|
autocoder/utils/tests.py,sha256=BqphrwyycGAvs-5mhH8pKtMZdObwhFtJ5MC_ZAOiLq8,1340
|
|
104
|
-
auto_coder-0.1.
|
|
105
|
-
auto_coder-0.1.
|
|
106
|
-
auto_coder-0.1.
|
|
107
|
-
auto_coder-0.1.
|
|
108
|
-
auto_coder-0.1.
|
|
109
|
-
auto_coder-0.1.
|
|
104
|
+
auto_coder-0.1.189.dist-info/LICENSE,sha256=HrhfyXIkWY2tGFK11kg7vPCqhgh5DcxleloqdhrpyMY,11558
|
|
105
|
+
auto_coder-0.1.189.dist-info/METADATA,sha256=283-6tH5ORiOAVCGzI-j5QXMvamNveZnIxo6UhGzHfE,2352
|
|
106
|
+
auto_coder-0.1.189.dist-info/WHEEL,sha256=GV9aMThwP_4oNCtvEC2ec3qUYutgWeAzklro_0m4WJQ,91
|
|
107
|
+
auto_coder-0.1.189.dist-info/entry_points.txt,sha256=0nzHtHH4pNcM7xq4EBA2toS28Qelrvcbrr59GqD_0Ak,350
|
|
108
|
+
auto_coder-0.1.189.dist-info/top_level.txt,sha256=Jqc0_uJSw2GwoFQAa9iJxYns-2mWla-9ok_Y3Gcznjk,10
|
|
109
|
+
auto_coder-0.1.189.dist-info/RECORD,,
|
autocoder/auto_coder_rag.py
CHANGED
|
@@ -261,6 +261,12 @@ def main(input_args: Optional[List[str]] = None):
|
|
|
261
261
|
action="store_true",
|
|
262
262
|
help="Enable slow inference without deep thought",
|
|
263
263
|
)
|
|
264
|
+
serve_parser.add_argument(
|
|
265
|
+
"--inference_compute_precision",
|
|
266
|
+
type=int,
|
|
267
|
+
default=6,
|
|
268
|
+
help="The precision of the inference compute",
|
|
269
|
+
)
|
|
264
270
|
|
|
265
271
|
serve_parser.add_argument(
|
|
266
272
|
"--enable_hybrid_index",
|
autocoder/common/__init__.py
CHANGED
|
@@ -317,6 +317,7 @@ class AutoCoderArgs(pydantic.BaseModel):
|
|
|
317
317
|
disable_inference_enhance: Optional[bool] = False
|
|
318
318
|
inference_deep_thought: Optional[bool] = False
|
|
319
319
|
inference_slow_without_deep_thought: Optional[bool] = False
|
|
320
|
+
inference_compute_precision: int = 6
|
|
320
321
|
without_contexts: Optional[bool] = False
|
|
321
322
|
|
|
322
323
|
class Config:
|
|
@@ -524,11 +524,12 @@ class LongContextRAG:
|
|
|
524
524
|
f"Start to send to model {target_model} with {request_tokens} tokens"
|
|
525
525
|
)
|
|
526
526
|
|
|
527
|
-
if LLMComputeEngine is not None:
|
|
527
|
+
if LLMComputeEngine is not None and not self.args.disable_inference_enhance:
|
|
528
528
|
llm_compute_engine = LLMComputeEngine(
|
|
529
529
|
llm=self.llm,
|
|
530
530
|
inference_enhance=not self.args.disable_inference_enhance,
|
|
531
531
|
inference_deep_thought=self.args.inference_deep_thought,
|
|
532
|
+
precision=self.args.inference_compute_precision,
|
|
532
533
|
debug=False,
|
|
533
534
|
)
|
|
534
535
|
new_conversations = llm_compute_engine.process_conversation(
|
autocoder/version.py
CHANGED
|
@@ -1 +1 @@
|
|
|
1
|
-
__version__ = "0.1.
|
|
1
|
+
__version__ = "0.1.189"
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|