auto-coder 0.1.292__py3-none-any.whl → 0.1.293__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of auto-coder might be problematic. Click here for more details.

@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.1
2
2
  Name: auto-coder
3
- Version: 0.1.292
3
+ Version: 0.1.293
4
4
  Summary: AutoCoder: AutoCoder
5
5
  Author: allwefantasy
6
6
  Classifier: Topic :: Scientific/Engineering :: Artificial Intelligence
@@ -5,14 +5,14 @@ autocoder/auto_coder_rag.py,sha256=yhwRh_TJZyBxBCmUusZ8h5guU42i0Z6UJ10mT0FH3Rc,3
5
5
  autocoder/auto_coder_rag_client_mcp.py,sha256=QRxUbjc6A8UmDMQ8lXgZkjgqtq3lgKYeatJbDY6rSo0,6270
6
6
  autocoder/auto_coder_rag_mcp.py,sha256=-RrjNwFaS2e5v8XDIrKR-zlUNUE8UBaeOtojffBrvJo,8521
7
7
  autocoder/auto_coder_runner.py,sha256=w-4MCKhOFaoABcDfVoZoonF59UyRso3kghimQYLz3NA,100851
8
- autocoder/auto_coder_server.py,sha256=6YQweNEKUrGAZ3yPvw8_qlNZJYLVSVUXGrn1K6udLts,20413
8
+ autocoder/auto_coder_server.py,sha256=E3Z829TPSooRSNhuh3_x9yaZi0f5G0Lm0ntoZhjGaoQ,20576
9
9
  autocoder/benchmark.py,sha256=Ypomkdzd1T3GE6dRICY3Hj547dZ6_inqJbBJIp5QMco,4423
10
10
  autocoder/chat_auto_coder.py,sha256=z_Kqd7CAecuNMa77kJn7iko2zTdko-4-o72a58H-_s8,24655
11
11
  autocoder/chat_auto_coder_lang.py,sha256=CjsiJsUaWr-TJBCDDlDNnFpCDTd-itJhd9aid9DKlp8,20542
12
12
  autocoder/command_args.py,sha256=9aYJ-AmPxP1sQh6ciw04FWHjSn31f2W9afXFwo8wgx4,30441
13
13
  autocoder/lang.py,sha256=U6AjVV8Rs1uLyjFCZ8sT6WWuNUxMBqkXXIOs4S120uk,14511
14
14
  autocoder/models.py,sha256=AyoZ-Pzy0oyYUmWCxOIRiOImsqboSfRET7LO9-UOuxI,11172
15
- autocoder/version.py,sha256=sjI-QR-PLjX5n0ZliSvh27lLvGZWTK4_QJQ5TCemqkU,23
15
+ autocoder/version.py,sha256=uJLvEc9fkxd409iL_wj7Xexi0uD8yIeEHv4m5yx5T6E,23
16
16
  autocoder/agent/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
17
17
  autocoder/agent/auto_demand_organizer.py,sha256=NWSAEsEk94vT3lGjfo25kKLMwYdPcpy9e-i21txPasQ,6942
18
18
  autocoder/agent/auto_filegroup.py,sha256=CW7bqp0FW1GIEMnl-blyAc2UGT7O9Mom0q66ITz1ckM,6635
@@ -66,7 +66,7 @@ autocoder/common/image_to_page.py,sha256=yWiTJQ49Lm3j0FngiJhQ9u7qayqE_bOGb8Rk0Tm
66
66
  autocoder/common/index_import_export.py,sha256=h758AYY1df6JMTKUXYmMkSgxItfymDt82XT7O-ygEuw,4565
67
67
  autocoder/common/interpreter.py,sha256=62-dIakOunYB4yjmX8SHC0Gdy2h8NtxdgbpdqRZJ5vk,2833
68
68
  autocoder/common/llm_rerank.py,sha256=FbvtCzaR661Mt2wn0qsuiEL1Y3puD6jeIJS4zg_e7Bs,3260
69
- autocoder/common/mcp_hub.py,sha256=RPp7bnW6ij2EmBJMg2a5TN3U9G4oX_gH_vQKsIg7t40,14934
69
+ autocoder/common/mcp_hub.py,sha256=ymy580rkv8kFx2zwQFpMg03s9K8KWsJP3dkfjoYbWSU,16573
70
70
  autocoder/common/mcp_server.py,sha256=gKaQDQWeRZgHtR9UnuxHVgVbo0acrT9qA1kwtgDpHZU,16551
71
71
  autocoder/common/mcp_tools.py,sha256=KsLvRrB6pvmebqd-lDaSH6IBJR0AIxWRE-dtCEG_w9k,12485
72
72
  autocoder/common/memory_manager.py,sha256=2ZjYG7BPyvbYalZBF6AM_G5e10Qkw_zrqtD4Zd7GSsQ,3663
@@ -84,6 +84,7 @@ autocoder/common/text.py,sha256=KGRQq314GHBmY4MWG8ossRoQi1_DTotvhxchpn78c-k,1003
84
84
  autocoder/common/types.py,sha256=PXTETrsTvhLE49jqAeUKGySvxBN9pjeyCgRHLDYdd9U,664
85
85
  autocoder/common/utils_code_auto_generate.py,sha256=oiBjdCgdcQErfhMozFdHxkU84WmDo2euBA86yezha-g,3597
86
86
  autocoder/common/mcp_servers/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
87
+ autocoder/common/mcp_servers/mcp_server_gpt4o_mini_search.py,sha256=z-c3zq0YT7wK2XK2t-tDxdXFTUtCFDfvyGTYaYwRgtM,5661
87
88
  autocoder/common/mcp_servers/mcp_server_perplexity.py,sha256=IXTyMpd1CQcBLzVinA-_OIOHoNmbzvuW6pXIadaKHJE,5533
88
89
  autocoder/data/byzerllm.md,sha256=SGCMpEaUQ0ysPxQsgzyyp5sgvEr8dZsxEGAfVcPBIq0,47741
89
90
  autocoder/data/tokenizer.json,sha256=7Lb5_DaYlDRvBRH0B0ynXO5c1fOwbQLxujX805-OEh0,7847602
@@ -115,13 +116,13 @@ autocoder/privacy/__init__.py,sha256=LnIVvGu_K66zCE-yhN_-dPO8R80pQyedCsXJ7wRqQaI
115
116
  autocoder/privacy/model_filter.py,sha256=-N9ZvxxDKpxU7hkn-tKv-QHyXjvkCopUaKgvJwTOGQs,3369
116
117
  autocoder/pyproject/__init__.py,sha256=ms-A_pocgGv0oZPEW8JAdXi7G-VSVhkQ6CnWFe535Ec,14477
117
118
  autocoder/rag/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
118
- autocoder/rag/api_server.py,sha256=6daDd5tF_Z69cogl-nz-8ogvtKn-BNUdnnpXXxFK0uo,9419
119
+ autocoder/rag/api_server.py,sha256=gsk450_B-qGtBwJ1niG9-QFJAG0RGr2s2KdiMrzzbyQ,9582
119
120
  autocoder/rag/conversation_to_queries.py,sha256=xwmErn4WbdADnhK1me-h_6fV3KYrl_y1qPNQl1aoI6o,4810
120
121
  autocoder/rag/doc_filter.py,sha256=UduVO2mlrngwJICrefjDJTYfdmQ4GcRXrfWDQ7xXksk,14206
121
122
  autocoder/rag/document_retriever.py,sha256=5BDqKVJqLPScEnua5S5suXhWuCaALIfPf5obXeJoWfs,8461
122
123
  autocoder/rag/lang.py,sha256=_jmUtxZDG1fmF4b2mhMJbYS1YQDb2ZE8nyAn5_vrvjA,3350
123
124
  autocoder/rag/llm_wrapper.py,sha256=Ht5GF5yJtrztoliujsZzx_ooWZmHkd5xLZKcGEiicZw,4303
124
- autocoder/rag/long_context_rag.py,sha256=THQakGbrr-kOn8Mu4PdJDMiiPq02FNZxZZUM8Du2YCw,41848
125
+ autocoder/rag/long_context_rag.py,sha256=6rqq0pvYe9N4TvyLwd2OB21ZUrPC4FfxZuks0weAz4A,41935
125
126
  autocoder/rag/qa_conversation_strategy.py,sha256=_BFdgit2KkUkW_82jE67QLYS_d8BsGhU1pG73YhHJgE,5744
126
127
  autocoder/rag/rag_config.py,sha256=8LwFcTd8OJWWwi1_WY4IzjqgtT6RyE2j4PjxS5cCTDE,802
127
128
  autocoder/rag/rag_entry.py,sha256=6TKtErZ0Us9XSV6HgRKXA6yR3SiZGPHpynOKSaR1wgE,2463
@@ -182,9 +183,9 @@ autocoder/utils/types.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
182
183
  autocoder/utils/auto_coder_utils/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
183
184
  autocoder/utils/auto_coder_utils/chat_stream_out.py,sha256=lkJ_A-sYU36JMzjFWkk3pR6uos8oZHYt9GPsPe_CPAo,11766
184
185
  autocoder/utils/chat_auto_coder_utils/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
185
- auto_coder-0.1.292.dist-info/LICENSE,sha256=HrhfyXIkWY2tGFK11kg7vPCqhgh5DcxleloqdhrpyMY,11558
186
- auto_coder-0.1.292.dist-info/METADATA,sha256=MaOhzuRjPEeOIq-Myi-tG7yv_BEHkjTLVtjgsq62PpA,2665
187
- auto_coder-0.1.292.dist-info/WHEEL,sha256=GV9aMThwP_4oNCtvEC2ec3qUYutgWeAzklro_0m4WJQ,91
188
- auto_coder-0.1.292.dist-info/entry_points.txt,sha256=0nzHtHH4pNcM7xq4EBA2toS28Qelrvcbrr59GqD_0Ak,350
189
- auto_coder-0.1.292.dist-info/top_level.txt,sha256=Jqc0_uJSw2GwoFQAa9iJxYns-2mWla-9ok_Y3Gcznjk,10
190
- auto_coder-0.1.292.dist-info/RECORD,,
186
+ auto_coder-0.1.293.dist-info/LICENSE,sha256=HrhfyXIkWY2tGFK11kg7vPCqhgh5DcxleloqdhrpyMY,11558
187
+ auto_coder-0.1.293.dist-info/METADATA,sha256=wM3jXJXkDW9JVdM0Oy1EVukWKunNk2NKrPjRn658wK4,2665
188
+ auto_coder-0.1.293.dist-info/WHEEL,sha256=GV9aMThwP_4oNCtvEC2ec3qUYutgWeAzklro_0m4WJQ,91
189
+ auto_coder-0.1.293.dist-info/entry_points.txt,sha256=0nzHtHH4pNcM7xq4EBA2toS28Qelrvcbrr59GqD_0Ak,350
190
+ auto_coder-0.1.293.dist-info/top_level.txt,sha256=Jqc0_uJSw2GwoFQAa9iJxYns-2mWla-9ok_Y3Gcznjk,10
191
+ auto_coder-0.1.293.dist-info/RECORD,,
@@ -34,6 +34,14 @@ import sys
34
34
  import io
35
35
  from autocoder.utils.log_capture import LogCapture
36
36
 
37
+ # If support dotenv, use it
38
+ if os.path.exists(".env"):
39
+ try:
40
+ from dotenv import load_dotenv
41
+ load_dotenv()
42
+ except ImportError:
43
+ pass
44
+
37
45
  def convert_yaml_config_to_str(yaml_config):
38
46
  yaml_content = yaml.safe_dump(
39
47
  yaml_config,
@@ -2,8 +2,12 @@ import os
2
2
  import json
3
3
  import asyncio
4
4
  import aiohttp
5
+ import importlib
6
+ import pkgutil
7
+ import re
8
+ import inspect
5
9
  from datetime import datetime, timedelta
6
- from typing import Dict, List, Optional, Any, Set, Optional
10
+ from typing import Dict, List, Optional, Any, Set, Optional, Tuple
7
11
  from pathlib import Path
8
12
  from pydantic import BaseModel, Field
9
13
 
@@ -62,23 +66,53 @@ class McpConnection:
62
66
  self.session = session
63
67
 
64
68
 
65
- MCP_PERPLEXITY_SERVER = '''
66
- {
67
- "perplexity": {
68
- "command": "python",
69
- "args": [
70
- "-m", "autocoder.common.mcp_servers.mcp_server_perplexity"
71
- ],
72
- "env": {
73
- "PERPLEXITY_API_KEY": "{{PERPLEXITY_API_KEY}}"
74
- }
75
- }
76
- }
77
- '''
78
-
79
- MCP_BUILD_IN_SERVERS = {
80
- "perplexity": json.loads(MCP_PERPLEXITY_SERVER)["perplexity"]
81
- }
69
+ def _generate_server_configs() -> Tuple[Dict[str, Any], Dict[str, str]]:
70
+ """
71
+ Scan the autocoder.common.mcp_servers directory for mcp_server_*.py files
72
+ and generate server configurations.
73
+
74
+ Returns:
75
+ Tuple of (built-in servers dict, JSON templates dict)
76
+ """
77
+ servers = {}
78
+ templates = {}
79
+
80
+ try:
81
+ package_name = "autocoder.common.mcp_servers"
82
+ package = importlib.import_module(package_name)
83
+
84
+ # Find all modules in the package
85
+ for _, name, _ in pkgutil.iter_modules(package.__path__, package.__name__ + "."):
86
+ # Only process modules that start with "mcp_server_"
87
+ base_name = name.split(".")[-1]
88
+ if base_name.startswith("mcp_server_"):
89
+ # Generate a friendly server name
90
+ friendly_name = base_name[11:]
91
+
92
+ # Create env dictionary with placeholders
93
+ env_dict = {}
94
+
95
+ # Create server configuration
96
+ config = {
97
+ "command": "python",
98
+ "args": ["-m", name],
99
+ "env": env_dict
100
+ }
101
+
102
+ # Store in dictionaries
103
+ servers[friendly_name] = config
104
+ templates[friendly_name] = json.dumps({friendly_name: config}, indent=4)
105
+
106
+ logger.info(f"Detected MCP server: {friendly_name}")
107
+
108
+ except Exception as e:
109
+ logger.error(f"Error generating server configs: {e}")
110
+
111
+ return servers, templates
112
+
113
+
114
+ # Automatically generate server configurations
115
+ MCP_BUILD_IN_SERVERS, MCP_SERVER_TEMPLATES = _generate_server_configs()
82
116
 
83
117
 
84
118
  class McpHub:
@@ -422,3 +456,10 @@ class McpHub:
422
456
  """
423
457
  for name in list(self.connections.keys()):
424
458
  await self.delete_connection(name)
459
+
460
+ @classmethod
461
+ def get_server_templates(cls) -> Dict[str, str]:
462
+ """
463
+ Get all available server templates as JSON strings
464
+ """
465
+ return MCP_SERVER_TEMPLATES
@@ -0,0 +1,153 @@
1
+ from os import getenv
2
+ from textwrap import dedent
3
+ import sys
4
+
5
+ import mcp.server.stdio
6
+ import mcp.types as types
7
+ from mcp.server import NotificationOptions, Server
8
+ from mcp.server.models import InitializationOptions
9
+ import json
10
+ from openai import OpenAI
11
+
12
+ OPENAI_API_KEY = getenv("OPENAI_API_KEY")
13
+ # Check if API key is empty or None
14
+ if not OPENAI_API_KEY:
15
+ print("Error: OPENAI_API_KEY environment variable is not set. Please set it before running this server.", file=sys.stderr)
16
+ sys.exit(1)
17
+
18
+ OPENAI_API_BASE_URL = getenv(
19
+ "OPENAI_API_BASE_URL", "https://api.openai.com/v1")
20
+
21
+ server = Server("mcp-server-gpt4o-mini-search")
22
+
23
+ client = OpenAI(
24
+ api_key=OPENAI_API_KEY,
25
+ base_url=OPENAI_API_BASE_URL
26
+ )
27
+
28
+
29
+ @server.list_tools()
30
+ async def handle_list_tools() -> list[types.Tool]:
31
+ return [
32
+ types.Tool(
33
+ name="gpt4o_mini_search",
34
+ description=dedent(
35
+ """
36
+ GPT-4o mini with search enables agents to gather information from the internet
37
+ in real-time, providing up-to-date answers with source citations.
38
+ This tool is ideal for fact-checking, research, and accessing current information
39
+ that might not be in the model's training data.
40
+
41
+ The search-enhanced responses include relevant web sources to support the information
42
+ provided, making it useful for obtaining verified and recent information.
43
+
44
+ [Response structure]
45
+ - id: A unique identifier for the response
46
+ - model: The model used (gpt-4o-mini-search-preview)
47
+ - object: The object type ("chat.completion")
48
+ - created: The Unix timestamp when the completion was created
49
+ - choices[]: The list of completion choices generated
50
+ - usage: Usage statistics for the completion request
51
+ """
52
+ ),
53
+ inputSchema={
54
+ "type": "object",
55
+ "properties": {
56
+ "system_message": {
57
+ "type": "string",
58
+ "description": "Optional custom system message. If not provided, a default search-optimized system message will be used.",
59
+ },
60
+ "messages": {
61
+ "type": "array",
62
+ "description": "A list of messages comprising the conversation so far (excluding system message which is handled separately).",
63
+ "items": {
64
+ "type": "object",
65
+ "properties": {
66
+ "content": {
67
+ "type": "string",
68
+ "description": "The contents of the message in this turn of conversation.",
69
+ },
70
+ "role": {
71
+ "type": "string",
72
+ "description": "The role of the speaker in this turn of conversation.",
73
+ "enum": ["user", "assistant"],
74
+ },
75
+ },
76
+ "required": ["content", "role"],
77
+ },
78
+ },
79
+ },
80
+ "required": ["messages"],
81
+ },
82
+ )
83
+ ]
84
+
85
+
86
+ @server.call_tool()
87
+ async def handle_call_tool(
88
+ name: str, arguments: dict
89
+ ) -> list[types.TextContent | types.ImageContent | types.EmbeddedResource]:
90
+ if name != "gpt4o_mini_search":
91
+ raise ValueError(f"Unknown tool: {name}")
92
+
93
+ # Extract user messages
94
+ user_messages = arguments.get("messages", [])
95
+
96
+ # Define default system message if not provided
97
+ default_system_message = (
98
+ "你是专业搜索助手,需要:\n"
99
+ "1. 提供基于用户查询的清晰格式化信息\n"
100
+ "2. 使用[标题](URL)格式嵌入链接\n"
101
+ "3. 每条信息后附上来源\n"
102
+ "4. 用'---'分隔不同结果\n"
103
+ "5. 直接在文本中引用,不使用编号引用\n"
104
+ "6. 确保提供完整URL"
105
+ )
106
+
107
+ # Use custom system message if provided, otherwise use default
108
+ system_message = arguments.get("system_message", default_system_message)
109
+
110
+ # Prepare full message list with system message first
111
+ full_messages = [{"role": "system", "content": system_message}]
112
+ full_messages.extend(user_messages)
113
+
114
+ try:
115
+ # Initialize OpenAI client
116
+
117
+ # Make the API call using OpenAI SDK
118
+ completion = client.chat.completions.create(
119
+ model="gpt-4o-mini-search-preview",
120
+ messages=full_messages
121
+ )
122
+
123
+ # Extract content from response
124
+ content = completion.choices[0].message.content
125
+
126
+ except Exception as e:
127
+ raise RuntimeError(f"API error: {str(e)}")
128
+
129
+ return [types.TextContent(
130
+ type="text",
131
+ text=content,
132
+ )]
133
+
134
+
135
+ async def main():
136
+ async with mcp.server.stdio.stdio_server() as (read_stream, write_stream):
137
+ await server.run(
138
+ read_stream,
139
+ write_stream,
140
+ InitializationOptions(
141
+ server_name="mcp-server-gpt4o-mini-search",
142
+ server_version="0.1.0",
143
+ capabilities=server.get_capabilities(
144
+ notification_options=NotificationOptions(
145
+ tools_changed=True),
146
+ experimental_capabilities={},
147
+ ),
148
+ ),
149
+ )
150
+
151
+ if __name__ == "__main__":
152
+ import asyncio
153
+ asyncio.run(main())
@@ -31,6 +31,14 @@ from byzerllm.utils.client.entrypoints.openai.protocol import (
31
31
  from pydantic import BaseModel
32
32
  from typing import List,Optional
33
33
 
34
+ # If support dotenv, use it
35
+ if os.path.exists(".env"):
36
+ try:
37
+ from dotenv import load_dotenv
38
+ load_dotenv()
39
+ except ImportError:
40
+ pass
41
+
34
42
  logger = init_logger(__name__)
35
43
 
36
44
  llm_client: ByzerLLM = None
@@ -210,22 +210,22 @@ class LongContextRAG:
210
210
 
211
211
  avg_tokens = statistics.mean(token_counts) if token_counts else 0
212
212
  median_tokens = statistics.median(token_counts) if token_counts else 0
213
-
214
- logger.info(
215
- "RAG Configuration:\n"
216
- f" Total docs: {doc_num}\n"
217
- f" Total tokens: {token_num}\n"
218
- f" Tokenizer path: {self.tokenizer_path}\n"
219
- f" Relevant score: {self.relevant_score}\n"
220
- f" Token limit: {self.token_limit}\n"
221
- f" Full text limit: {self.full_text_limit}\n"
222
- f" Segment limit: {self.segment_limit}\n"
223
- f" Buff limit: {self.buff_limit}\n"
224
- f" Max doc tokens: {max(token_counts) if token_counts else 0}\n"
225
- f" Min doc tokens: {min(token_counts) if token_counts else 0}\n"
226
- f" Avg doc tokens: {avg_tokens:.2f}\n"
227
- f" Median doc tokens: {median_tokens:.2f}\n"
228
- )
213
+ if not self.client:
214
+ logger.info(
215
+ "RAG Configuration:\n"
216
+ f" Total docs: {doc_num}\n"
217
+ f" Total tokens: {token_num}\n"
218
+ f" Tokenizer path: {self.tokenizer_path}\n"
219
+ f" Relevant score: {self.relevant_score}\n"
220
+ f" Token limit: {self.token_limit}\n"
221
+ f" Full text limit: {self.full_text_limit}\n"
222
+ f" Segment limit: {self.segment_limit}\n"
223
+ f" Buff limit: {self.buff_limit}\n"
224
+ f" Max doc tokens: {max(token_counts) if token_counts else 0}\n"
225
+ f" Min doc tokens: {min(token_counts) if token_counts else 0}\n"
226
+ f" Avg doc tokens: {avg_tokens:.2f}\n"
227
+ f" Median doc tokens: {median_tokens:.2f}\n"
228
+ )
229
229
 
230
230
  def count_tokens(self, text: str) -> int:
231
231
  if self.tokenizer is None:
autocoder/version.py CHANGED
@@ -1 +1 @@
1
- __version__ = "0.1.292"
1
+ __version__ = "0.1.293"