auto-coder 0.1.190__py3-none-any.whl → 0.1.192__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of auto-coder might be problematic. Click here for more details.
- {auto_coder-0.1.190.dist-info → auto_coder-0.1.192.dist-info}/METADATA +2 -2
- {auto_coder-0.1.190.dist-info → auto_coder-0.1.192.dist-info}/RECORD +15 -12
- autocoder/auto_coder_rag.py +1 -1
- autocoder/common/__init__.py +1 -1
- autocoder/rag/api_server.py +42 -27
- autocoder/rag/llm_wrapper.py +4 -3
- autocoder/rag/long_context_rag.py +29 -1
- autocoder/rag/stream_event/__init__.py +0 -0
- autocoder/rag/stream_event/event_writer.py +12 -0
- autocoder/rag/stream_event/types.py +16 -0
- autocoder/version.py +1 -1
- {auto_coder-0.1.190.dist-info → auto_coder-0.1.192.dist-info}/LICENSE +0 -0
- {auto_coder-0.1.190.dist-info → auto_coder-0.1.192.dist-info}/WHEEL +0 -0
- {auto_coder-0.1.190.dist-info → auto_coder-0.1.192.dist-info}/entry_points.txt +0 -0
- {auto_coder-0.1.190.dist-info → auto_coder-0.1.192.dist-info}/top_level.txt +0 -0
|
@@ -1,6 +1,6 @@
|
|
|
1
1
|
Metadata-Version: 2.1
|
|
2
2
|
Name: auto-coder
|
|
3
|
-
Version: 0.1.
|
|
3
|
+
Version: 0.1.192
|
|
4
4
|
Summary: AutoCoder: AutoCoder
|
|
5
5
|
Author: allwefantasy
|
|
6
6
|
Classifier: Topic :: Scientific/Engineering :: Artificial Intelligence
|
|
@@ -26,7 +26,7 @@ Requires-Dist: tabulate
|
|
|
26
26
|
Requires-Dist: jupyter-client
|
|
27
27
|
Requires-Dist: prompt-toolkit
|
|
28
28
|
Requires-Dist: tokenizers
|
|
29
|
-
Requires-Dist: byzerllm[saas] >=0.1.
|
|
29
|
+
Requires-Dist: byzerllm[saas] >=0.1.139
|
|
30
30
|
Requires-Dist: patch
|
|
31
31
|
Requires-Dist: diff-match-patch
|
|
32
32
|
Requires-Dist: GitPython
|
|
@@ -1,13 +1,13 @@
|
|
|
1
1
|
autocoder/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
|
|
2
2
|
autocoder/auto_coder.py,sha256=tSNXFMJSrffagFi4egZJp8XZH9trSCwQjOdXKyHVqwo,37106
|
|
3
3
|
autocoder/auto_coder_lang.py,sha256=Rtupq6N3_HT7JRhDKdgCBcwRaiAnyCOR_Gsp4jUomrI,3229
|
|
4
|
-
autocoder/auto_coder_rag.py,sha256=
|
|
4
|
+
autocoder/auto_coder_rag.py,sha256=H75N9rk5rB1u80oDWsPS_DYc2djQcFLOpjklmlGmag0,16579
|
|
5
5
|
autocoder/auto_coder_server.py,sha256=qRY88mkBnqSGFDcwYE5gwpe2WPhIw1nEH6LdbjCQhQk,20306
|
|
6
6
|
autocoder/chat_auto_coder.py,sha256=1jCx-J83mj_8JnojYSTfPjYide-thbmsFbr12E_kgcQ,81773
|
|
7
7
|
autocoder/chat_auto_coder_lang.py,sha256=QYtu5gWEQmWKVovR_qUZ8plySZarNFX_Onk-1vN9IiA,8524
|
|
8
8
|
autocoder/command_args.py,sha256=ftWw6HnFUZPiQPt1oV-SfpHQe69XN3knaFy1lpROBcU,26854
|
|
9
9
|
autocoder/lang.py,sha256=e-07rYTgimpxS8sm-AxKSmH4kKQX4N05YFHJBg9trVs,12598
|
|
10
|
-
autocoder/version.py,sha256=
|
|
10
|
+
autocoder/version.py,sha256=_O_6jlMqTIlOgOmhHKPoMTN-W0CigzKhwb-eIbBGdIQ,24
|
|
11
11
|
autocoder/agent/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
|
|
12
12
|
autocoder/agent/auto_tool.py,sha256=DBzip-P_T6ZtT2eHexPcusmKYD0h7ufzp7TLwXAY10E,11554
|
|
13
13
|
autocoder/agent/coder.py,sha256=dnITYHqkcOip8zV4lywbkYNH9w7Q3qyYaUArJ4WPrTs,866
|
|
@@ -17,7 +17,7 @@ autocoder/agent/project_reader.py,sha256=-MWRqsr7O4mvU0PIpAhOUBb29htZAvA37pa_GeE
|
|
|
17
17
|
autocoder/chat/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
|
|
18
18
|
autocoder/common/JupyterClient.py,sha256=O-wi6pXeAEYhAY24kDa0BINrLYvKS6rKyWe98pDClS0,2816
|
|
19
19
|
autocoder/common/ShellClient.py,sha256=fM1q8t_XMSbLBl2zkCNC2J9xuyKN3eXzGm6hHhqL2WY,2286
|
|
20
|
-
autocoder/common/__init__.py,sha256=
|
|
20
|
+
autocoder/common/__init__.py,sha256=7K74EPvyNyZehp5jA4NlMPpmdAkqMPcphcRnkuYYRMg,10511
|
|
21
21
|
autocoder/common/anything2images.py,sha256=0ILBbWzY02M-CiWB-vzuomb_J1hVdxRcenAfIrAXq9M,25283
|
|
22
22
|
autocoder/common/audio.py,sha256=Kn9nWKQddWnUrAz0a_ZUgjcu4VUU_IcZBigT7n3N3qc,7439
|
|
23
23
|
autocoder/common/cleaner.py,sha256=NU72i8C6o9m0vXExab7nao5bstBUsfJFcj11cXa9l4U,1089
|
|
@@ -58,11 +58,11 @@ autocoder/index/index.py,sha256=6uakPXThpDWxAyOAP-7AbMuXaXJJkBKctL5RkNWGdGw,2248
|
|
|
58
58
|
autocoder/index/symbols_utils.py,sha256=CjcjUVajmJZB75Ty3a7kMv1BZphrm-tIBAdOJv6uo-0,2037
|
|
59
59
|
autocoder/pyproject/__init__.py,sha256=-2-ImQVw6e3NQZQOyDlHEP5b4xVs5ur2G5izB-JCa-A,13160
|
|
60
60
|
autocoder/rag/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
|
|
61
|
-
autocoder/rag/api_server.py,sha256=
|
|
61
|
+
autocoder/rag/api_server.py,sha256=dRbhAZVRAOlZ64Cnxf4_rKb4iJwHnrWS9Zr67IVORw0,7288
|
|
62
62
|
autocoder/rag/doc_filter.py,sha256=Ha0Yae_G_hF72YzvrO7NoDZcG18K4hRcqGAEqfrIwAs,9330
|
|
63
63
|
autocoder/rag/document_retriever.py,sha256=_jCbCEX0I-5UPWuHocESaWHatQcv1r_DqA0yOfOAiZ0,9092
|
|
64
|
-
autocoder/rag/llm_wrapper.py,sha256=
|
|
65
|
-
autocoder/rag/long_context_rag.py,sha256=
|
|
64
|
+
autocoder/rag/llm_wrapper.py,sha256=sbDxCANiZyWb_ocqNgqu2oy3c2t8orPNRGleEs-Uwl8,2649
|
|
65
|
+
autocoder/rag/long_context_rag.py,sha256=B9lBeiBF7p5RRRFV8Wv_JtqNTED7hWuBwY766ZPQI1c,23749
|
|
66
66
|
autocoder/rag/rag_config.py,sha256=8LwFcTd8OJWWwi1_WY4IzjqgtT6RyE2j4PjxS5cCTDE,802
|
|
67
67
|
autocoder/rag/rag_entry.py,sha256=V1RJ8RGqM30DNPmzymv64rZjNRGWn6kfc8sRy_LECg0,2451
|
|
68
68
|
autocoder/rag/raw_rag.py,sha256=yS2Ur6kG0IRjhCj2_VonwxjY_xls_E62jO5Gz5j2nqE,2952
|
|
@@ -85,6 +85,9 @@ autocoder/rag/loaders/docx_loader.py,sha256=g6Ta8rMUbfgwB8N1qiajhyO6wpaWl7zygAZi
|
|
|
85
85
|
autocoder/rag/loaders/excel_loader.py,sha256=Ue8YB1z_kBs8SjIPuBskyM08Q1JiONs_BJZPrzi59oo,896
|
|
86
86
|
autocoder/rag/loaders/pdf_loader.py,sha256=CGfXOja7QZ7mHN-U5MsTiVMFzjP322rTj3dkYlVKKVU,264
|
|
87
87
|
autocoder/rag/loaders/ppt_loader.py,sha256=7VEYc-bqgK8VHCoGC3DIUcqbpda-E5jQF9lYLqP256I,1681
|
|
88
|
+
autocoder/rag/stream_event/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
|
|
89
|
+
autocoder/rag/stream_event/event_writer.py,sha256=l7kq_LnDDE8E5dZ-73C7J2MgzSL7WrozdXk0eV-k55Q,409
|
|
90
|
+
autocoder/rag/stream_event/types.py,sha256=rtLwOE8rShmi1dJdxyBpAV5ZjLBGG9vptMiSzMxGuIA,318
|
|
88
91
|
autocoder/regex_project/__init__.py,sha256=EBZeCL5ORyD_9_5u_UuG4s7XtpXOu0y1sWDmxWFtufE,6781
|
|
89
92
|
autocoder/regexproject/__init__.py,sha256=ThuvVFdpw1EgWv4aIRkhg3ZclKPxMVharUKWppFpQ8o,8436
|
|
90
93
|
autocoder/suffixproject/__init__.py,sha256=EaQoumMzZ2COxMiI_GnL3SG4LGzRj0Qw7UpqLfNLCw8,9823
|
|
@@ -101,9 +104,9 @@ autocoder/utils/request_event_queue.py,sha256=r3lo5qGsB1dIjzVQ05dnr0z_9Z3zOkBdP1
|
|
|
101
104
|
autocoder/utils/request_queue.py,sha256=nwp6PMtgTCiuwJI24p8OLNZjUiprC-TsefQrhMI-yPE,3889
|
|
102
105
|
autocoder/utils/rest.py,sha256=3tXA8KZG6jKz_tddHNLGx77Icee88WcUeesfNsgPno4,8790
|
|
103
106
|
autocoder/utils/tests.py,sha256=BqphrwyycGAvs-5mhH8pKtMZdObwhFtJ5MC_ZAOiLq8,1340
|
|
104
|
-
auto_coder-0.1.
|
|
105
|
-
auto_coder-0.1.
|
|
106
|
-
auto_coder-0.1.
|
|
107
|
-
auto_coder-0.1.
|
|
108
|
-
auto_coder-0.1.
|
|
109
|
-
auto_coder-0.1.
|
|
107
|
+
auto_coder-0.1.192.dist-info/LICENSE,sha256=HrhfyXIkWY2tGFK11kg7vPCqhgh5DcxleloqdhrpyMY,11558
|
|
108
|
+
auto_coder-0.1.192.dist-info/METADATA,sha256=BBKDDDbjH7IKFXSuveBZsDjn3soimiXniATh-8-_Rlk,2352
|
|
109
|
+
auto_coder-0.1.192.dist-info/WHEEL,sha256=GV9aMThwP_4oNCtvEC2ec3qUYutgWeAzklro_0m4WJQ,91
|
|
110
|
+
auto_coder-0.1.192.dist-info/entry_points.txt,sha256=0nzHtHH4pNcM7xq4EBA2toS28Qelrvcbrr59GqD_0Ak,350
|
|
111
|
+
auto_coder-0.1.192.dist-info/top_level.txt,sha256=Jqc0_uJSw2GwoFQAa9iJxYns-2mWla-9ok_Y3Gcznjk,10
|
|
112
|
+
auto_coder-0.1.192.dist-info/RECORD,,
|
autocoder/auto_coder_rag.py
CHANGED
autocoder/common/__init__.py
CHANGED
|
@@ -317,7 +317,7 @@ class AutoCoderArgs(pydantic.BaseModel):
|
|
|
317
317
|
disable_inference_enhance: Optional[bool] = False
|
|
318
318
|
inference_deep_thought: Optional[bool] = False
|
|
319
319
|
inference_slow_without_deep_thought: Optional[bool] = False
|
|
320
|
-
inference_compute_precision: int =
|
|
320
|
+
inference_compute_precision: int = 64
|
|
321
321
|
without_contexts: Optional[bool] = False
|
|
322
322
|
|
|
323
323
|
class Config:
|
autocoder/rag/api_server.py
CHANGED
|
@@ -19,10 +19,10 @@ from byzerllm.utils.client.entrypoints.openai.protocol import (
|
|
|
19
19
|
ChatCompletionRequest,
|
|
20
20
|
ErrorResponse,
|
|
21
21
|
CompletionRequest,
|
|
22
|
-
|
|
23
|
-
|
|
24
|
-
|
|
25
|
-
|
|
22
|
+
EmbeddingCompletionRequest,
|
|
23
|
+
EmbeddingResponse,
|
|
24
|
+
EmbeddingResponseData,
|
|
25
|
+
UsageInfo,
|
|
26
26
|
)
|
|
27
27
|
from pydantic import BaseModel
|
|
28
28
|
from typing import List,Optional
|
|
@@ -122,34 +122,49 @@ async def create_chat_completion(
|
|
|
122
122
|
|
|
123
123
|
|
|
124
124
|
@router_app.post("/v1/embeddings")
|
|
125
|
-
async def embed(body:
|
|
126
|
-
"""
|
|
127
|
-
|
|
125
|
+
async def embed(body: EmbeddingCompletionRequest):
|
|
126
|
+
"""Generate embeddings for given input text.
|
|
127
|
+
|
|
128
|
+
Args:
|
|
129
|
+
body: The embedding request containing input text and parameters.
|
|
130
|
+
|
|
128
131
|
Returns:
|
|
129
|
-
|
|
132
|
+
EmbeddingResponse with embeddings and usage statistics.
|
|
130
133
|
"""
|
|
131
134
|
embedding_id = f"embed-{random_uuid()}"
|
|
132
|
-
|
|
133
|
-
|
|
134
|
-
|
|
135
|
-
|
|
136
|
-
|
|
137
|
-
|
|
138
|
-
|
|
139
|
-
|
|
140
|
-
|
|
141
|
-
|
|
142
|
-
|
|
143
|
-
|
|
144
|
-
|
|
145
|
-
|
|
146
|
-
|
|
147
|
-
|
|
135
|
+
|
|
136
|
+
# Handle both string and list inputs
|
|
137
|
+
inputs = body.input if isinstance(body.input, list) else [body.input]
|
|
138
|
+
|
|
139
|
+
# Generate embeddings for each input
|
|
140
|
+
results_list = []
|
|
141
|
+
for text in inputs:
|
|
142
|
+
result = llm_client.emb(body.model, request=LLMRequest(instruction=text))
|
|
143
|
+
results_list.extend(result)
|
|
144
|
+
|
|
145
|
+
# Build response data
|
|
146
|
+
data = [
|
|
147
|
+
EmbeddingResponseData(
|
|
148
|
+
embedding=result.output,
|
|
149
|
+
index=i,
|
|
150
|
+
object="embedding"
|
|
151
|
+
)
|
|
152
|
+
for i, result in enumerate(results_list)
|
|
153
|
+
]
|
|
154
|
+
|
|
155
|
+
# Calculate token usage (simplified)
|
|
156
|
+
token_count = sum(len(str(input).split()) for input in inputs)
|
|
157
|
+
|
|
158
|
+
return EmbeddingResponse(
|
|
159
|
+
data=data,
|
|
148
160
|
model=body.model,
|
|
149
|
-
|
|
150
|
-
|
|
151
|
-
|
|
161
|
+
object="list",
|
|
162
|
+
usage=UsageInfo(
|
|
163
|
+
prompt_tokens=token_count,
|
|
164
|
+
total_tokens=token_count
|
|
152
165
|
),
|
|
166
|
+
created=int(time.time()),
|
|
167
|
+
id=embedding_id
|
|
153
168
|
)
|
|
154
169
|
|
|
155
170
|
class ServerArgs(BaseModel):
|
autocoder/rag/llm_wrapper.py
CHANGED
|
@@ -9,6 +9,7 @@ from byzerllm.utils.client import LLMResponse
|
|
|
9
9
|
from byzerllm.utils.types import SingleOutputMeta
|
|
10
10
|
from autocoder.rag.simple_rag import SimpleRAG
|
|
11
11
|
from loguru import logger
|
|
12
|
+
from byzerllm.utils.langutil import asyncfy_with_semaphore
|
|
12
13
|
|
|
13
14
|
class LLWrapper:
|
|
14
15
|
|
|
@@ -31,7 +32,7 @@ class LLWrapper:
|
|
|
31
32
|
model:Optional[str] = None,
|
|
32
33
|
role_mapping=None,llm_config:Dict[str,Any]={}
|
|
33
34
|
)->Union[List[LLMResponse],List[LLMFunctionCallResponse],List[LLMClassResponse]]:
|
|
34
|
-
res,contexts = self.rag.stream_chat_oai(conversations)
|
|
35
|
+
res,contexts = self.rag.stream_chat_oai(conversations,llm_config=llm_config)
|
|
35
36
|
s = "".join(res)
|
|
36
37
|
return [LLMResponse(output=s,metadata={},input="")]
|
|
37
38
|
|
|
@@ -40,7 +41,7 @@ class LLWrapper:
|
|
|
40
41
|
role_mapping=None,
|
|
41
42
|
delta_mode=False,
|
|
42
43
|
llm_config:Dict[str,Any]={}):
|
|
43
|
-
res,contexts = self.rag.stream_chat_oai(conversations)
|
|
44
|
+
res,contexts = self.rag.stream_chat_oai(conversations,llm_config=llm_config)
|
|
44
45
|
for t in res:
|
|
45
46
|
yield (t,SingleOutputMeta(0,0))
|
|
46
47
|
|
|
@@ -49,7 +50,7 @@ class LLWrapper:
|
|
|
49
50
|
role_mapping=None,
|
|
50
51
|
delta_mode=False,
|
|
51
52
|
llm_config:Dict[str,Any]={}):
|
|
52
|
-
res,contexts = self.rag.stream_chat_oai(conversations)
|
|
53
|
+
res,contexts = await asyncfy_with_semaphore(lambda: self.rag.stream_chat_oai(conversations,llm_config=llm_config))()
|
|
53
54
|
for t in res:
|
|
54
55
|
yield (t,SingleOutputMeta(0,0))
|
|
55
56
|
|
|
@@ -29,9 +29,14 @@ from autocoder.rag.token_counter import RemoteTokenCounter, TokenCounter
|
|
|
29
29
|
from autocoder.rag.token_limiter import TokenLimiter
|
|
30
30
|
from tokenizers import Tokenizer
|
|
31
31
|
from autocoder.rag.variable_holder import VariableHolder
|
|
32
|
+
from importlib.metadata import version
|
|
33
|
+
from autocoder.rag.stream_event import event_writer
|
|
32
34
|
|
|
33
|
-
try:
|
|
35
|
+
try:
|
|
34
36
|
from autocoder_pro.rag.llm_compute import LLMComputeEngine
|
|
37
|
+
pro_version = version("auto-coder-pro")
|
|
38
|
+
autocoder_version = version("auto-coder")
|
|
39
|
+
logger.warning(f"auto-coder-pro({pro_version}) plugin is enabled in auto-coder.rag({autocoder_version})")
|
|
35
40
|
except ImportError:
|
|
36
41
|
logger.warning("Please install auto-coder-pro to enhance llm compute ability")
|
|
37
42
|
LLMComputeEngine = None
|
|
@@ -342,6 +347,29 @@ class LongContextRAG:
|
|
|
342
347
|
delta_mode=True,
|
|
343
348
|
)
|
|
344
349
|
return (chunk[0] for chunk in chunks), context
|
|
350
|
+
|
|
351
|
+
try:
|
|
352
|
+
request_params = json.loads(query)
|
|
353
|
+
if "request_id" in request_params:
|
|
354
|
+
request_id = request_params["request_id"]
|
|
355
|
+
index = request_params["index"]
|
|
356
|
+
|
|
357
|
+
file_path = event_writer.get_event_file_path(request_id)
|
|
358
|
+
logger.info(f"Get events for request_id: {request_id} index: {index} file_path: {file_path}")
|
|
359
|
+
events = []
|
|
360
|
+
if not os.path.exists(file_path):
|
|
361
|
+
return [],context
|
|
362
|
+
|
|
363
|
+
with open(file_path, "r") as f:
|
|
364
|
+
for line in f:
|
|
365
|
+
event = json.loads(line)
|
|
366
|
+
if event["index"] >= index:
|
|
367
|
+
events.append(event)
|
|
368
|
+
return [json.dumps({
|
|
369
|
+
"events": [event for event in events],
|
|
370
|
+
},ensure_ascii=False)], context
|
|
371
|
+
except json.JSONDecodeError:
|
|
372
|
+
pass
|
|
345
373
|
|
|
346
374
|
if self.args.without_contexts and LLMComputeEngine is not None:
|
|
347
375
|
llm_compute_engine = LLMComputeEngine(
|
|
File without changes
|
|
@@ -0,0 +1,12 @@
|
|
|
1
|
+
import os
|
|
2
|
+
from autocoder.rag.stream_event.types import Event
|
|
3
|
+
|
|
4
|
+
def write_event(event: Event,base_path: str="events"):
|
|
5
|
+
os.makedirs(base_path, exist_ok=True)
|
|
6
|
+
with open(f"{base_path}/{event.request_id}.jsonl", "a") as f:
|
|
7
|
+
f.write(event.model_dump_json() + "\n")
|
|
8
|
+
|
|
9
|
+
def get_event_file_path(request_id: str,base_path: str="events") -> str:
|
|
10
|
+
return f"{base_path}/{request_id}.jsonl"
|
|
11
|
+
|
|
12
|
+
|
|
@@ -0,0 +1,16 @@
|
|
|
1
|
+
from typing import List, Dict, Any
|
|
2
|
+
from enum import Enum
|
|
3
|
+
from pydantic import BaseModel
|
|
4
|
+
|
|
5
|
+
class EventType(Enum):
|
|
6
|
+
START = "start"
|
|
7
|
+
THOUGHT = "thought"
|
|
8
|
+
CHUNK = "chunk"
|
|
9
|
+
DONE = "done"
|
|
10
|
+
ERROR = "error"
|
|
11
|
+
|
|
12
|
+
class Event(BaseModel):
|
|
13
|
+
request_id: str
|
|
14
|
+
event_type: EventType
|
|
15
|
+
content: str
|
|
16
|
+
index: int
|
autocoder/version.py
CHANGED
|
@@ -1 +1 @@
|
|
|
1
|
-
__version__ = "0.1.
|
|
1
|
+
__version__ = "0.1.192"
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|