jarvis-ai-assistant 0.1.101__py3-none-any.whl → 0.1.103__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of jarvis-ai-assistant might be problematic. Click here for more details.

Files changed (54) hide show
  1. jarvis/__init__.py +1 -1
  2. jarvis/agent.py +140 -140
  3. jarvis/jarvis_code_agent/code_agent.py +234 -0
  4. jarvis/{jarvis_coder → jarvis_code_agent}/file_select.py +16 -17
  5. jarvis/jarvis_code_agent/patch.py +118 -0
  6. jarvis/jarvis_code_agent/relevant_files.py +66 -0
  7. jarvis/jarvis_codebase/main.py +32 -29
  8. jarvis/jarvis_platform/main.py +5 -3
  9. jarvis/jarvis_rag/main.py +11 -15
  10. jarvis/jarvis_smart_shell/main.py +2 -2
  11. jarvis/models/ai8.py +1 -0
  12. jarvis/models/kimi.py +36 -30
  13. jarvis/models/ollama.py +17 -11
  14. jarvis/models/openai.py +15 -12
  15. jarvis/models/oyi.py +22 -7
  16. jarvis/models/registry.py +1 -25
  17. jarvis/tools/__init__.py +0 -6
  18. jarvis/tools/ask_codebase.py +99 -0
  19. jarvis/tools/ask_user.py +1 -9
  20. jarvis/tools/chdir.py +1 -1
  21. jarvis/tools/code_review.py +163 -0
  22. jarvis/tools/create_code_sub_agent.py +19 -45
  23. jarvis/tools/create_code_test_agent.py +115 -0
  24. jarvis/tools/create_ctags_agent.py +176 -0
  25. jarvis/tools/create_sub_agent.py +2 -2
  26. jarvis/tools/execute_shell.py +2 -2
  27. jarvis/tools/file_operation.py +2 -2
  28. jarvis/tools/find_in_codebase.py +108 -0
  29. jarvis/tools/git_commiter.py +68 -0
  30. jarvis/tools/methodology.py +3 -3
  31. jarvis/tools/rag.py +6 -3
  32. jarvis/tools/read_code.py +147 -0
  33. jarvis/tools/read_webpage.py +1 -1
  34. jarvis/tools/registry.py +92 -68
  35. jarvis/tools/search.py +8 -6
  36. jarvis/tools/select_code_files.py +4 -4
  37. jarvis/utils.py +270 -95
  38. {jarvis_ai_assistant-0.1.101.dist-info → jarvis_ai_assistant-0.1.103.dist-info}/METADATA +9 -5
  39. jarvis_ai_assistant-0.1.103.dist-info/RECORD +51 -0
  40. {jarvis_ai_assistant-0.1.101.dist-info → jarvis_ai_assistant-0.1.103.dist-info}/entry_points.txt +4 -2
  41. jarvis/jarvis_code_agent/main.py +0 -202
  42. jarvis/jarvis_coder/__init__.py +0 -0
  43. jarvis/jarvis_coder/git_utils.py +0 -123
  44. jarvis/jarvis_coder/main.py +0 -241
  45. jarvis/jarvis_coder/patch_handler.py +0 -340
  46. jarvis/jarvis_coder/plan_generator.py +0 -145
  47. jarvis/tools/execute_code_modification.py +0 -70
  48. jarvis/tools/find_files.py +0 -119
  49. jarvis/tools/generate_tool.py +0 -174
  50. jarvis/tools/thinker.py +0 -151
  51. jarvis_ai_assistant-0.1.101.dist-info/RECORD +0 -51
  52. {jarvis_ai_assistant-0.1.101.dist-info → jarvis_ai_assistant-0.1.103.dist-info}/LICENSE +0 -0
  53. {jarvis_ai_assistant-0.1.101.dist-info → jarvis_ai_assistant-0.1.103.dist-info}/WHEEL +0 -0
  54. {jarvis_ai_assistant-0.1.101.dist-info → jarvis_ai_assistant-0.1.103.dist-info}/top_level.txt +0 -0
jarvis/models/ollama.py CHANGED
@@ -25,18 +25,24 @@ class OllamaPlatform(BasePlatform):
25
25
  available_models = [model["name"] for model in response.json().get("models", [])]
26
26
 
27
27
  if not available_models:
28
- PrettyOutput.print("\nNeed to download Ollama model first to use:", OutputType.INFO)
29
- PrettyOutput.print("1. Install Ollama: https://ollama.ai", OutputType.INFO)
30
- PrettyOutput.print("2. Download model:", OutputType.INFO)
31
- PrettyOutput.print(f" ollama pull {self.model_name}", OutputType.INFO)
28
+ message = (
29
+ "Need to download Ollama model first to use:\n"
30
+ "1. Install Ollama: https://ollama.ai\n"
31
+ "2. Download model:\n"
32
+ f" ollama pull {self.model_name}"
33
+ )
34
+ PrettyOutput.print(message, OutputType.INFO)
32
35
  PrettyOutput.print("Ollama has no available models", OutputType.WARNING)
33
36
 
34
37
  except requests.exceptions.ConnectionError:
35
- PrettyOutput.print("\nOllama service is not started or cannot be connected", OutputType.WARNING)
36
- PrettyOutput.print("Please ensure that you have:", OutputType.INFO)
37
- PrettyOutput.print("1. Installed Ollama: https://ollama.ai", OutputType.INFO)
38
- PrettyOutput.print("2. Started Ollama service", OutputType.INFO)
39
- PrettyOutput.print("3. Service address configured correctly (default: http://localhost:11434)", OutputType.INFO)
38
+ message = (
39
+ "Ollama service is not started or cannot be connected\n"
40
+ "Please ensure that you have:\n"
41
+ "1. Installed Ollama: https://ollama.ai\n"
42
+ "2. Started Ollama service\n"
43
+ "3. Service address configured correctly (default: http://localhost:11434)"
44
+ )
45
+ PrettyOutput.print(message, OutputType.WARNING)
40
46
 
41
47
 
42
48
  self.messages = []
@@ -136,10 +142,10 @@ if __name__ == "__main__":
136
142
  ollama = OllamaPlatform()
137
143
  while True:
138
144
  try:
139
- message = get_single_line_input("\nInput question (Ctrl+C to exit)")
145
+ message = get_single_line_input("Input question (Ctrl+C to exit)")
140
146
  ollama.chat_until_success(message)
141
147
  except KeyboardInterrupt:
142
- print("\nGoodbye!")
148
+ print("Goodbye!")
143
149
  break
144
150
  except Exception as e:
145
151
  PrettyOutput.print(f"Program exited with an exception: {str(e)}", OutputType.ERROR)
jarvis/models/openai.py CHANGED
@@ -19,18 +19,21 @@ class OpenAIModel(BasePlatform):
19
19
  self.system_message = ""
20
20
  self.api_key = os.getenv("OPENAI_API_KEY")
21
21
  if not self.api_key:
22
- PrettyOutput.print("\nNeed to set the following environment variables to use OpenAI model:", OutputType.INFO)
23
- PrettyOutput.print(" OPENAI_API_KEY: API key", OutputType.INFO)
24
- PrettyOutput.print(" • OPENAI_API_BASE: (optional) API base address, default using https://api.openai.com/v1", OutputType.INFO)
25
- PrettyOutput.print("\nYou can set them in the following ways:", OutputType.INFO)
26
- PrettyOutput.print("1. Create or edit ~/.jarvis/env file:", OutputType.INFO)
27
- PrettyOutput.print(" OPENAI_API_KEY=your_api_key", OutputType.INFO)
28
- PrettyOutput.print(" OPENAI_API_BASE=your_api_base", OutputType.INFO)
29
- PrettyOutput.print(" OPENAI_MODEL_NAME=your_model_name", OutputType.INFO)
30
- PrettyOutput.print("\n2. Or set the environment variables directly:", OutputType.INFO)
31
- PrettyOutput.print(" export OPENAI_API_KEY=your_api_key", OutputType.INFO)
32
- PrettyOutput.print(" export OPENAI_API_BASE=your_api_base", OutputType.INFO)
33
- PrettyOutput.print(" export OPENAI_MODEL_NAME=your_model_name", OutputType.INFO)
22
+ message = (
23
+ "Need to set the following environment variables to use OpenAI model:\n"
24
+ " • OPENAI_API_KEY: API key\n"
25
+ " OPENAI_API_BASE: (optional) API base address, default using https://api.openai.com/v1\n"
26
+ "You can set them in the following ways:\n"
27
+ "1. Create or edit ~/.jarvis/env file:\n"
28
+ " OPENAI_API_KEY=your_api_key\n"
29
+ " OPENAI_API_BASE=your_api_base\n"
30
+ " OPENAI_MODEL_NAME=your_model_name\n"
31
+ "2. Or set the environment variables directly:\n"
32
+ " export OPENAI_API_KEY=your_api_key\n"
33
+ " export OPENAI_API_BASE=your_api_base\n"
34
+ " export OPENAI_MODEL_NAME=your_model_name"
35
+ )
36
+ PrettyOutput.print(message, OutputType.INFO)
34
37
  PrettyOutput.print("OPENAI_API_KEY is not set", OutputType.WARNING)
35
38
 
36
39
  self.base_url = os.getenv("OPENAI_API_BASE", "https://api.openai.com/v1")
jarvis/models/oyi.py CHANGED
@@ -14,6 +14,7 @@ class OyiModel(BasePlatform):
14
14
 
15
15
  def get_model_list(self) -> List[Tuple[str, str]]:
16
16
  """Get model list"""
17
+ self.get_available_models()
17
18
  return [(name,info['desc']) for name,info in self.models.items()]
18
19
 
19
20
  def __init__(self):
@@ -65,7 +66,7 @@ class OyiModel(BasePlatform):
65
66
  "requestMsgCount": 65536,
66
67
  "temperature": 0.8,
67
68
  "speechVoice": "Alloy",
68
- "max_tokens": get_max_context_length(),
69
+ "max_tokens": 8192,
69
70
  "chatPluginIds": []
70
71
  })
71
72
  }
@@ -170,19 +171,33 @@ class OyiModel(BasePlatform):
170
171
  # 获取响应内容
171
172
  response = requests.post(
172
173
  f"{self.BASE_URL}/chatapi/chat/message/{message_id}",
173
- headers=headers
174
+ headers=headers,
175
+ stream=True
174
176
  )
175
177
 
176
178
  if response.status_code == 200:
177
- if not self.suppress_output:
178
- PrettyOutput.print(response.text, OutputType.SYSTEM)
179
- self.messages.append({"role": "assistant", "content": response.text})
180
- return response.text
179
+ full_response = ""
180
+ bin = b""
181
+ for chunk in response.iter_content(decode_unicode=True):
182
+ if chunk:
183
+ bin += chunk
184
+ try:
185
+ text = bin.decode('utf-8')
186
+ except UnicodeDecodeError:
187
+ continue
188
+ if not self.suppress_output:
189
+ PrettyOutput.print_stream(text)
190
+ full_response += text
191
+ bin = b""
192
+
193
+ PrettyOutput.print_stream_end()
194
+
195
+ self.messages.append({"role": "assistant", "content": full_response})
196
+ return full_response
181
197
  else:
182
198
  error_msg = f"Get response failed: {response.status_code}"
183
199
  PrettyOutput.print(error_msg, OutputType.ERROR)
184
200
  raise Exception(error_msg)
185
-
186
201
  except Exception as e:
187
202
  PrettyOutput.print(f"Chat failed: {str(e)}", OutputType.ERROR)
188
203
  raise e
jarvis/models/registry.py CHANGED
@@ -211,31 +211,7 @@ class PlatformRegistry:
211
211
  except Exception as e:
212
212
  PrettyOutput.print(f"Create platform failed: {str(e)}", OutputType.ERROR)
213
213
  return None
214
-
215
- def use_platforms(self, platform_names: List[str]):
216
- """Restrict available platforms to the specified list
217
-
218
- Args:
219
- platform_names: List of platform names to use
220
- """
221
- self.platforms = {
222
- name: cls
223
- for name, cls in self.platforms.items()
224
- if name in platform_names
225
- }
226
214
 
227
- def dont_use_platforms(self, platform_names: List[str]):
228
- """Restrict available platforms by excluding the specified list
229
-
230
- Args:
231
- platform_names: List of platform names to exclude
232
- """
233
- self.platforms = {
234
- name: cls
235
- for name, cls in self.platforms.items()
236
- if name not in platform_names
237
- }
238
215
  def get_available_platforms(self) -> List[str]:
239
216
  """Get available platform list"""
240
- return list(self.platforms.keys())
241
-
217
+ return list(self.platforms.keys())
jarvis/tools/__init__.py CHANGED
@@ -1,6 +0,0 @@
1
- from .registry import ToolRegistry
2
-
3
- __all__ = [
4
- 'ToolRegistry',
5
- ]
6
-
@@ -0,0 +1,99 @@
1
+ from typing import Dict, Any
2
+ from jarvis.utils import OutputType, PrettyOutput, dont_use_local_model, find_git_root
3
+ from jarvis.jarvis_codebase.main import CodeBase
4
+
5
+ class AskCodebaseTool:
6
+ """Tool for intelligent codebase querying and analysis using CodeBase"""
7
+
8
+ name = "ask_codebase"
9
+ description = "Ask questions about the codebase and get detailed analysis"
10
+ parameters = {
11
+ "type": "object",
12
+ "properties": {
13
+ "question": {
14
+ "type": "string",
15
+ "description": "Question about the codebase"
16
+ },
17
+ "top_k": {
18
+ "type": "integer",
19
+ "description": "Number of most relevant files to analyze (optional)",
20
+ "default": 20
21
+ }
22
+ },
23
+ "required": ["question"]
24
+ }
25
+
26
+ @staticmethod
27
+ def check() -> bool:
28
+ return not dont_use_local_model()
29
+
30
+ def execute(self, args: Dict[str, Any]) -> Dict[str, Any]:
31
+ """Execute codebase analysis using CodeBase
32
+
33
+ Args:
34
+ args: Dictionary containing:
35
+ - question: The question to answer
36
+ - top_k: Optional number of files to analyze
37
+
38
+ Returns:
39
+ Dict containing:
40
+ - success: Boolean indicating success
41
+ - stdout: Analysis result
42
+ - stderr: Error message if any
43
+ """
44
+ try:
45
+ question = args["question"]
46
+ top_k = args.get("top_k", 20)
47
+
48
+ PrettyOutput.print(f"Analyzing codebase for question: {question}", OutputType.INFO)
49
+
50
+ # Create new CodeBase instance
51
+ git_root = find_git_root()
52
+ codebase = CodeBase(git_root)
53
+
54
+ # Use ask_codebase method
55
+ response = codebase.ask_codebase(question, top_k)
56
+
57
+ return {
58
+ "success": True,
59
+ "stdout": response,
60
+ "stderr": ""
61
+ }
62
+
63
+ except Exception as e:
64
+ error_msg = f"Failed to analyze codebase: {str(e)}"
65
+ PrettyOutput.print(error_msg, OutputType.ERROR)
66
+ return {
67
+ "success": False,
68
+ "stdout": "",
69
+ "stderr": error_msg
70
+ }
71
+
72
+
73
+ def main():
74
+ """Command line interface for the tool"""
75
+ import argparse
76
+
77
+ parser = argparse.ArgumentParser(description='Ask questions about the codebase')
78
+ parser.add_argument('question', help='Question about the codebase')
79
+ parser.add_argument('--top-k', type=int, help='Number of files to analyze', default=20)
80
+
81
+ args = parser.parse_args()
82
+
83
+ tool = AskCodebaseTool()
84
+ result = tool.execute({
85
+ "question": args.question,
86
+ "top_k": args.top_k
87
+ })
88
+
89
+ if result["success"]:
90
+ print(result["stdout"])
91
+ else:
92
+ PrettyOutput.print(result["stderr"], OutputType.ERROR)
93
+ return 1
94
+
95
+ return 0
96
+
97
+
98
+ if __name__ == "__main__":
99
+ main()
jarvis/tools/ask_user.py CHANGED
@@ -30,19 +30,11 @@ class AskUserTool:
30
30
  question = args["question"]
31
31
 
32
32
  # Display the question
33
- PrettyOutput.print("\nQuestion:", OutputType.SYSTEM)
34
- PrettyOutput.print(question, OutputType.SYSTEM)
33
+ PrettyOutput.print(f"Question: {question}", OutputType.SYSTEM)
35
34
 
36
35
  # Get user input
37
36
  user_response = get_multiline_input("Please enter your answer (input empty line to end)")
38
37
 
39
- if user_response == "__interrupt__":
40
- return {
41
- "success": False,
42
- "stdout": "",
43
- "stderr": "User canceled input"
44
- }
45
-
46
38
  return {
47
39
  "success": True,
48
40
  "stdout": user_response,
jarvis/tools/chdir.py CHANGED
@@ -31,7 +31,7 @@ class ChdirTool:
31
31
  - error: 失败时的错误信息
32
32
  """
33
33
  try:
34
- path = os.path.expanduser(args["path"]) # 展开 ~ 等路径
34
+ path = os.path.expanduser(args["path"].strip()) # 展开 ~ 等路径
35
35
  path = os.path.abspath(path) # 转换为绝对路径
36
36
 
37
37
  # 检查目录是否存在
@@ -0,0 +1,163 @@
1
+ from typing import Dict, Any
2
+ import subprocess
3
+ import yaml
4
+ from jarvis.models.registry import PlatformRegistry
5
+ from jarvis.tools.registry import ToolRegistry
6
+ from jarvis.utils import OutputType, PrettyOutput, init_env, find_git_root
7
+ from jarvis.agent import Agent
8
+
9
+ class CodeReviewTool:
10
+ name = "code_review"
11
+ description = "Autonomous code review agent for commit analysis"
12
+ parameters = {
13
+ "type": "object",
14
+ "properties": {
15
+ "commit_sha": {
16
+ "type": "string",
17
+ "description": "Target commit SHA to analyze"
18
+ },
19
+ "requirement_desc": {
20
+ "type": "string",
21
+ "description": "Development goal to verify"
22
+ }
23
+ },
24
+ "required": ["commit_sha", "requirement_desc"]
25
+ }
26
+
27
+ def __init__(self):
28
+ init_env()
29
+ self.repo_root = find_git_root()
30
+
31
+ def execute(self, args: Dict[str, Any]) -> Dict[str, Any]:
32
+ try:
33
+ commit_sha = args["commit_sha"].strip()
34
+ requirement = args["requirement_desc"].strip()
35
+
36
+ system_prompt = """You are an autonomous code review expert. Perform in-depth analysis following these guidelines:
37
+
38
+ REVIEW FOCUS AREAS:
39
+ 1. Requirement Alignment:
40
+ - Verify implementation matches original requirements
41
+ - Check for missing functionality
42
+ - Identify over-implementation
43
+
44
+ 2. Code Quality:
45
+ - Code readability and structure
46
+ - Proper error handling
47
+ - Code duplication
48
+ - Adherence to style guides
49
+ - Meaningful variable/method names
50
+
51
+ 3. Security:
52
+ - Input validation
53
+ - Authentication/Authorization checks
54
+ - Sensitive data handling
55
+ - Potential injection vulnerabilities
56
+ - Secure communication practices
57
+
58
+ 4. Testing:
59
+ - Test coverage for new code
60
+ - Edge case handling
61
+ - Test readability and maintainability
62
+ - Missing test scenarios
63
+
64
+ 5. Performance:
65
+ - Algorithm efficiency
66
+ - Unnecessary resource consumption
67
+ - Proper caching mechanisms
68
+ - Database query optimization
69
+
70
+ 6. Maintainability:
71
+ - Documentation quality
72
+ - Logging and monitoring
73
+ - Configuration management
74
+ - Technical debt indicators
75
+
76
+ 7. Operational Considerations:
77
+ - Backward compatibility
78
+ - Migration script safety
79
+ - Environment-specific configurations
80
+ - Deployment impacts
81
+
82
+ REVIEW PROCESS:
83
+ 1. Retrieve full commit context using git commands
84
+ 2. Analyze code changes line-by-line
85
+ 3. Cross-reference with project standards
86
+ 4. Verify test coverage adequacy
87
+ 5. Check documentation updates
88
+ 6. Generate prioritized findings
89
+
90
+ OUTPUT REQUIREMENTS:
91
+ - Categorize issues by severity (Critical/Major/Minor)
92
+ - Reference specific code locations
93
+ - Provide concrete examples
94
+ - Suggest actionable improvements
95
+ - Highlight security risks clearly
96
+ - Separate technical debt from blockers"""
97
+
98
+ summary_prompt = """Please generate a concise summary report of the code review, format as yaml:
99
+ <REPORT>
100
+ - file: xxxx.py
101
+ location: [start_line_number, end_line_number]
102
+ description:
103
+ severity:
104
+ suggestion:
105
+ </REPORT>
106
+
107
+ Please describe in concise bullet points, highlighting important information.
108
+ """
109
+
110
+ tool_registry = ToolRegistry()
111
+ tool_registry.use_tools(["execute_shell", "read_code", "ask_user", "ask_codebase", "find_in_codebase", "create_ctags_agent"])
112
+ tool_registry.dont_use_tools(["code_review"])
113
+
114
+ review_agent = Agent(
115
+ name="Code Review Agent",
116
+ platform=PlatformRegistry().get_thinking_platform(),
117
+ system_prompt=system_prompt,
118
+ is_sub_agent=True,
119
+ tool_registry=tool_registry,
120
+ summary_prompt=summary_prompt,
121
+ auto_complete=True
122
+ )
123
+
124
+ result = review_agent.run(
125
+ f"Analyze commit {commit_sha} for requirement: {requirement}"
126
+ )
127
+
128
+ return {
129
+ "success": True,
130
+ "stdout": {"report": result},
131
+ "stderr": ""
132
+ }
133
+
134
+ except Exception as e:
135
+ return {
136
+ "success": False,
137
+ "stdout": {},
138
+ "stderr": f"Review failed: {str(e)}"
139
+ }
140
+
141
+ def main():
142
+ """CLI entry point"""
143
+ import argparse
144
+
145
+ parser = argparse.ArgumentParser(description='Autonomous code review tool')
146
+ parser.add_argument('--commit', required=True)
147
+ parser.add_argument('--requirement', required=True)
148
+ args = parser.parse_args()
149
+
150
+ tool = CodeReviewTool()
151
+ result = tool.execute({
152
+ "commit_sha": args.commit,
153
+ "requirement_desc": args.requirement
154
+ })
155
+
156
+ if result["success"]:
157
+ PrettyOutput.print("Autonomous Review Result:", OutputType.INFO)
158
+ print(yaml.dump(result["stdout"], allow_unicode=True))
159
+ else:
160
+ PrettyOutput.print(result["stderr"], OutputType.ERROR)
161
+
162
+ if __name__ == "__main__":
163
+ main()
@@ -1,56 +1,30 @@
1
- from typing import Dict, Any
2
1
 
3
- from jarvis.agent import Agent
4
- from jarvis.utils import OutputType, PrettyOutput
5
- from jarvis.jarvis_code_agent.main import system_prompt
2
+
3
+
4
+ from typing import Any, Dict
5
+ from jarvis.jarvis_code_agent.code_agent import CodeAgent
6
6
 
7
7
 
8
8
  class CodeSubAgentTool:
9
9
  name = "create_code_sub_agent"
10
- description = "Create a sub-agent to handle specific code development subtasks"
10
+ description = "Create a sub-agent to handle the code modification"
11
11
  parameters = {
12
12
  "type": "object",
13
13
  "properties": {
14
- "name": {
15
- "type": "string",
16
- "description": "The name of the sub-agent"
17
- },
18
- "subtask": {
14
+ "requirement": {
19
15
  "type": "string",
20
- "description": "The specific code development subtask to complete"
21
- },
22
- },
23
- "required": ["subtask", "name"]
16
+ "description": "The requirement of the sub-agent"
17
+ }
18
+ }
24
19
  }
25
-
20
+
26
21
  def execute(self, args: Dict) -> Dict[str, Any]:
27
- """Execute code development subtask"""
28
- try:
29
- subtask = args["subtask"]
30
- name = args["name"]
31
-
32
- PrettyOutput.print(f"Creating code sub-agent {name} for subtask: {subtask}", OutputType.INFO)
33
-
34
- # Create sub-agent
35
- sub_agent = Agent(
36
- system_prompt=system_prompt,
37
- name=name,
38
- is_sub_agent=True
39
- )
40
-
41
- # Execute subtask
42
- result = sub_agent.run(subtask)
43
-
44
- return {
45
- "success": True,
46
- "stdout": f"Code Development Subtask Results:\n\n{result}",
47
- "stderr": ""
48
- }
49
-
50
- except Exception as e:
51
- PrettyOutput.print(str(e), OutputType.ERROR)
52
- return {
53
- "success": False,
54
- "stdout": "",
55
- "stderr": f"Failed to execute code development subtask: {str(e)}"
56
- }
22
+ """Execute the sub-agent"""
23
+ requirement = args["requirement"]
24
+ agent = CodeAgent()
25
+ output = agent.run(requirement)
26
+ return {
27
+ "success": True,
28
+ "stdout": output,
29
+ "stderr": ""
30
+ }
@@ -0,0 +1,115 @@
1
+ from typing import Dict, Any
2
+ from jarvis.agent import Agent
3
+ from jarvis.tools.registry import ToolRegistry
4
+ import subprocess
5
+
6
+ class TestAgentTool:
7
+ name = "create_code_test_agent"
8
+ description = "Create testing agent for specific commit analysis"
9
+ parameters = {
10
+ "type": "object",
11
+ "properties": {
12
+ "name": {
13
+ "type": "string",
14
+ "description": "Identifier for the test agent"
15
+ },
16
+ "test_scope": {
17
+ "type": "string",
18
+ "enum": ["unit", "integration", "e2e"],
19
+ "description": "Testing focus area"
20
+ },
21
+ "commit_sha": {
22
+ "type": "string",
23
+ "description": "Commit SHA to analyze"
24
+ }
25
+ },
26
+ "required": ["name", "test_scope", "commit_sha"]
27
+ }
28
+
29
+ def execute(self, args: Dict) -> Dict[str, Any]:
30
+ """Execute commit-focused testing"""
31
+ try:
32
+ if not self._is_valid_commit(args["commit_sha"]):
33
+ return {
34
+ "success": False,
35
+ "stdout": "",
36
+ "stderr": f"Invalid commit SHA: {args['commit_sha']}"
37
+ }
38
+
39
+ tool_registry = ToolRegistry()
40
+ tool_registry.dont_use_tools(["create_code_test_agent"])
41
+
42
+ test_agent = Agent(
43
+ system_prompt=self._build_system_prompt(args),
44
+ name=f"TestAgent({args['name']})",
45
+ is_sub_agent=True,
46
+ tool_registry=tool_registry
47
+ )
48
+
49
+ result = test_agent.run(
50
+ f"Analyze and test changes in commit {args['commit_sha'].strip()}"
51
+ )
52
+
53
+ return {
54
+ "success": True,
55
+ "stdout": result,
56
+ "stderr": ""
57
+ }
58
+ except Exception as e:
59
+ return {
60
+ "success": False,
61
+ "stdout": "",
62
+ "stderr": f"Commit testing failed: {str(e)}"
63
+ }
64
+
65
+ def _is_valid_commit(self, commit_sha: str) -> bool:
66
+ """Validate commit exists in repository"""
67
+ try:
68
+ cmd = f"git cat-file -t {commit_sha}"
69
+ result = subprocess.run(
70
+ cmd.split(),
71
+ capture_output=True,
72
+ text=True,
73
+ check=True
74
+ )
75
+ return "commit" in result.stdout
76
+ except subprocess.CalledProcessError:
77
+ return False
78
+
79
+ def _build_system_prompt(self, args: Dict) -> str:
80
+ return """You are a Commit Testing Specialist. Follow this protocol:
81
+
82
+ 【Testing Protocol】
83
+ 1. Commit Analysis:
84
+ - Analyze code changes in target commit
85
+ - Identify modified components
86
+ - Assess change impact scope
87
+
88
+ 2. Test Strategy:
89
+ - Determine required test types
90
+ - Verify backward compatibility
91
+ - Check interface contracts
92
+
93
+ 3. Test Execution:
94
+ - Execute relevant test suites
95
+ - Compare pre/post-commit behavior
96
+ - Validate cross-component interactions
97
+
98
+ 4. Reporting:
99
+ - List affected modules
100
+ - Risk assessment matrix
101
+ - Performance impact analysis
102
+ - Security implications
103
+
104
+ 【Output Requirements】
105
+ - Test coverage analysis
106
+ - Behavioral change summary
107
+ - Critical issues prioritized
108
+ - Actionable recommendations
109
+
110
+ 【Key Principles】
111
+ 1. Focus on delta changes
112
+ 2. Maintain test isolation
113
+ 3. Preserve historical baselines
114
+ 4. Automate verification steps
115
+ 5. Document test evidence"""