PikoAi 0.1.23__tar.gz → 0.1.25__tar.gz
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- {pikoai-0.1.23/Src/PikoAi.egg-info → pikoai-0.1.25}/PKG-INFO +1 -1
- {pikoai-0.1.23 → pikoai-0.1.25}/Src/Agents/Executor/executor.py +66 -1
- {pikoai-0.1.23 → pikoai-0.1.25}/Src/Agents/Executor/prompts.py +21 -10
- {pikoai-0.1.23 → pikoai-0.1.25}/Src/Env/python_executor.py +0 -17
- {pikoai-0.1.23 → pikoai-0.1.25}/Src/OpenCopilot.py +5 -1
- {pikoai-0.1.23 → pikoai-0.1.25/Src/PikoAi.egg-info}/PKG-INFO +1 -1
- {pikoai-0.1.23 → pikoai-0.1.25}/Src/PikoAi.egg-info/SOURCES.txt +1 -9
- pikoai-0.1.25/Src/PikoAi.egg-info/dependency_links.txt +1 -0
- {pikoai-0.1.23 → pikoai-0.1.25}/Src/Tools/tool_manager.py +10 -6
- {pikoai-0.1.23 → pikoai-0.1.25}/Src/Tools/web_loader.py +7 -1
- pikoai-0.1.25/Src/Utils/executor_utils.py +32 -0
- {pikoai-0.1.23 → pikoai-0.1.25}/Src/Utils/ter_interface.py +69 -13
- {pikoai-0.1.23 → pikoai-0.1.25}/Src/llm_interface/llm.py +1 -0
- {pikoai-0.1.23 → pikoai-0.1.25}/setup.py +1 -1
- pikoai-0.1.23/Src/Env/__init__.py +0 -5
- pikoai-0.1.23/Src/Env/base_executor.py +0 -24
- pikoai-0.1.23/Src/Env/env.py +0 -5
- pikoai-0.1.23/Src/Env/js_executor.py +0 -63
- pikoai-0.1.23/Src/Utils/executor_utils.py +0 -15
- pikoai-0.1.23/test/test.py +0 -30
- pikoai-0.1.23/test/test_file_task.py +0 -210
- pikoai-0.1.23/test/test_opencopilot_file_integration.py +0 -187
- pikoai-0.1.23/test/testjs.py +0 -30
- pikoai-0.1.23/test/testscript.py +0 -29
- {pikoai-0.1.23 → pikoai-0.1.25}/LICENSE +0 -0
- {pikoai-0.1.23 → pikoai-0.1.25}/README.md +0 -0
- {pikoai-0.1.23 → pikoai-0.1.25}/Src/Agents/Executor/__init__.py +0 -0
- {pikoai-0.1.23 → pikoai-0.1.25}/Src/Agents/__init__.py +0 -0
- /pikoai-0.1.23/Src/PikoAi.egg-info/dependency_links.txt → /pikoai-0.1.25/Src/Env/__init__.py +0 -0
- {pikoai-0.1.23 → pikoai-0.1.25}/Src/Env/base_env.py +0 -0
- {pikoai-0.1.23 → pikoai-0.1.25}/Src/Env/shell.py +0 -0
- {pikoai-0.1.23 → pikoai-0.1.25}/Src/Env/tests/__init__.py +0 -0
- {pikoai-0.1.23 → pikoai-0.1.25}/Src/Env/tests/test_python_executor.py +0 -0
- {pikoai-0.1.23 → pikoai-0.1.25}/Src/Env/tests/test_shell_executor.py +0 -0
- {pikoai-0.1.23 → pikoai-0.1.25}/Src/PikoAi.egg-info/entry_points.txt +0 -0
- {pikoai-0.1.23 → pikoai-0.1.25}/Src/PikoAi.egg-info/requires.txt +0 -0
- {pikoai-0.1.23 → pikoai-0.1.25}/Src/PikoAi.egg-info/top_level.txt +0 -0
- {pikoai-0.1.23 → pikoai-0.1.25}/Src/Tools/__init__.py +0 -0
- {pikoai-0.1.23 → pikoai-0.1.25}/Src/Tools/file_task.py +0 -0
- {pikoai-0.1.23 → pikoai-0.1.25}/Src/Tools/system_details.py +0 -0
- {pikoai-0.1.23 → pikoai-0.1.25}/Src/Tools/tool_dir.json +0 -0
- {pikoai-0.1.23 → pikoai-0.1.25}/Src/Tools/userinp.py +0 -0
- {pikoai-0.1.23 → pikoai-0.1.25}/Src/Tools/web_search.py +0 -0
- {pikoai-0.1.23 → pikoai-0.1.25}/Src/Utils/__init__.py +0 -0
- {pikoai-0.1.23 → pikoai-0.1.25}/Src/cli.py +0 -0
- {pikoai-0.1.23 → pikoai-0.1.25}/Src/llm_interface/__init__.py +0 -0
- {pikoai-0.1.23 → pikoai-0.1.25}/setup.cfg +0 -0
@@ -4,6 +4,8 @@
|
|
4
4
|
import os
|
5
5
|
import sys
|
6
6
|
import time
|
7
|
+
import logging
|
8
|
+
import json
|
7
9
|
sys.path.append(os.path.abspath(os.path.join(os.path.dirname(__file__), '../../../')))
|
8
10
|
from Utils.ter_interface import TerminalInterface
|
9
11
|
from Utils.executor_utils import parse_tool_call
|
@@ -30,11 +32,28 @@ class executor:
|
|
30
32
|
self.user_prompt = user_prompt
|
31
33
|
self.max_iter = max_iter
|
32
34
|
self.rate_limiter = RateLimiter(wait_time=3.0, max_retries=3)
|
35
|
+
|
36
|
+
# Load environment configuration
|
37
|
+
self.environment = self.load_environment_config()
|
38
|
+
|
39
|
+
# Setup logging if in development environment
|
40
|
+
if self.environment == "development":
|
41
|
+
self.setup_logging()
|
42
|
+
|
33
43
|
self.executor_prompt_init() # Update system_prompt
|
34
44
|
# self.python_executor = python_executor.PythonExecutor() # Initialize PythonExecutor
|
35
45
|
# self.shell_executor = ShellExecutor() # Initialize ShellExecutor
|
36
46
|
self.message = [
|
37
47
|
{"role": "system", "content": self.system_prompt},
|
48
|
+
{"role":"user","content":"Hi"},
|
49
|
+
{"role":"assistant","content":"""```json
|
50
|
+
{
|
51
|
+
"tool_name": "get_user_input",
|
52
|
+
"input": {
|
53
|
+
"query": "Hi,im your terminal assistant. How can I help you?"
|
54
|
+
}
|
55
|
+
}
|
56
|
+
```"""},
|
38
57
|
{"role": "user", "content": self.user_prompt}
|
39
58
|
]
|
40
59
|
self.terminal = TerminalInterface()
|
@@ -74,6 +93,10 @@ class executor:
|
|
74
93
|
|
75
94
|
response = self.llm.chat(self.message) # LiteLLMInterface.chat() returns the full response string
|
76
95
|
|
96
|
+
# Log response in development environment
|
97
|
+
if self.environment == "development":
|
98
|
+
self.logger.info(f"LLM Response: {response}")
|
99
|
+
|
77
100
|
# Streaming is handled within LiteLLMInterface.chat()
|
78
101
|
# and TerminalInterface.process_markdown_chunk()
|
79
102
|
if response.strip():
|
@@ -144,6 +167,48 @@ class executor:
|
|
144
167
|
# result = exec_env.execute(code)
|
145
168
|
# return result
|
146
169
|
|
170
|
+
def load_environment_config(self):
|
171
|
+
"""Load environment configuration from config.json"""
|
172
|
+
try:
|
173
|
+
config_path = os.path.join(os.path.dirname(__file__), '../../../config.json')
|
174
|
+
with open(config_path, "r") as config_file:
|
175
|
+
config = json.load(config_file)
|
176
|
+
return config.get("environment", "production")
|
177
|
+
except Exception as e:
|
178
|
+
|
179
|
+
return "production"
|
180
|
+
|
181
|
+
def setup_logging(self):
|
182
|
+
"""Setup logging for development environment"""
|
183
|
+
try:
|
184
|
+
# Create logs directory if it doesn't exist
|
185
|
+
logs_dir = os.path.join(os.path.dirname(__file__), '../../../logs')
|
186
|
+
os.makedirs(logs_dir, exist_ok=True)
|
187
|
+
|
188
|
+
# Create a specific logger for executor responses
|
189
|
+
self.logger = logging.getLogger('executor_responses')
|
190
|
+
self.logger.setLevel(logging.INFO)
|
191
|
+
|
192
|
+
# Prevent propagation to parent loggers (this stops console output)
|
193
|
+
self.logger.propagate = False
|
194
|
+
|
195
|
+
# Remove any existing handlers to avoid duplicates
|
196
|
+
for handler in self.logger.handlers[:]:
|
197
|
+
self.logger.removeHandler(handler)
|
198
|
+
|
199
|
+
# Add file handler only (no console output)
|
200
|
+
log_file = os.path.join(logs_dir, 'executor_responses.log')
|
201
|
+
file_handler = logging.FileHandler(log_file)
|
202
|
+
file_handler.setFormatter(logging.Formatter('%(asctime)s - %(levelname)s - %(message)s'))
|
203
|
+
self.logger.addHandler(file_handler)
|
204
|
+
|
205
|
+
# Suppress LiteLLM's verbose logging
|
206
|
+
logging.getLogger('litellm').setLevel(logging.WARNING)
|
207
|
+
|
208
|
+
self.logger.info("Development logging enabled for executor responses")
|
209
|
+
except Exception as e:
|
210
|
+
print(f"Warning: Could not setup logging: {e}")
|
211
|
+
|
147
212
|
if __name__ == "__main__":
|
148
213
|
# e1 = executor("") # Commenting out example usage for now as it might need adjustment
|
149
214
|
# user_prompt = input("Please enter your prompt: ")
|
@@ -160,4 +225,4 @@ if __name__ == "__main__":
|
|
160
225
|
# e1.message.append({"role": "user", "content": user_prompt})
|
161
226
|
# # e1.message.append({"role":"user","content":e1.system_prompt})
|
162
227
|
# e1.run()
|
163
|
-
pass
|
228
|
+
pass
|
@@ -23,22 +23,22 @@ Your primary objective is to accomplish the user's goal by performing step-by-st
|
|
23
23
|
|
24
24
|
You must break down the user's goal into smaller steps and perform one action at a time. After each action, carefully evaluate the output to determine the next step.
|
25
25
|
|
26
|
-
|
27
|
-
- **Tool Call**: Use when a specific tool can help with the current step. Format
|
28
|
-
|
29
|
-
|
26
|
+
## Action Guidelines:
|
27
|
+
- **Tool Call**: Use when a specific tool can help with the current step. Format your tool calls as a JSON object within a markdown code block starting with ```json.
|
28
|
+
like this :
|
29
|
+
```json
|
30
30
|
{{
|
31
31
|
"tool_name": "name_of_tool",
|
32
32
|
"input": {{
|
33
|
-
"key": "value" //Replace 'key' with the actual parameter name for the tool
|
33
|
+
"key": "value" // Replace 'key' with the actual parameter name for the tool
|
34
34
|
}}
|
35
35
|
}}
|
36
|
-
|
37
|
-
|
38
|
-
- **Direct Response**: Provide a direct answer if the task doesn't require tool calling
|
36
|
+
```
|
37
|
+
Ensure your entire response for a tool call is *only* this markdown code block if a tool call is being made.
|
38
|
+
- **Direct Response**: Provide a direct answer if the task doesn't require tool calling. If providing a direct response, do not use the markdown JSON code block.
|
39
39
|
|
40
40
|
|
41
|
-
These are the
|
41
|
+
These are the points that you learned from the mistakes you made earlier :
|
42
42
|
- When given a data file and asked to understand data/do data analysis/ data visualisation or similar stuff
|
43
43
|
do not use file reader and read the whole data. Only use python code to do the analysis
|
44
44
|
- This is a standard Python environment, not a python notebook or a repl. previous execution
|
@@ -56,7 +56,18 @@ These are the things that you learned from the mistakes you made earlier :
|
|
56
56
|
- Continue performing actions until the user's goal is fully achieved. Only then, include 'TASK_DONE' in your response.
|
57
57
|
- Do not end the task immediately after a tool call without evaluating its output.
|
58
58
|
- The best way to give output is to save it open the file using shell commands.
|
59
|
-
- The tool call in json format
|
59
|
+
- The tool call in json format must be done between the delimiters <<TOOL_CALL>> and <<END_TOOL_CALL>>. This is non-negotiable.
|
60
|
+
|
61
|
+
for e.g. User: what is the latest news on Ai.
|
62
|
+
your response should be :
|
60
63
|
|
64
|
+
```json
|
65
|
+
{{
|
66
|
+
"tool_name": "web_search",
|
67
|
+
"input": {{
|
68
|
+
"query": "latest news"
|
69
|
+
}}
|
70
|
+
}}
|
71
|
+
```
|
61
72
|
|
62
73
|
"""
|
@@ -1,20 +1,3 @@
|
|
1
|
-
# from .base_executor import BaseExecutor
|
2
|
-
|
3
|
-
# class PythonExecutor():
|
4
|
-
# def execute(self, code: str) -> str:
|
5
|
-
# """Executes Python code and returns the result or an error message."""
|
6
|
-
|
7
|
-
# # if not self.validate_code(code):
|
8
|
-
# # return "Code validation failed: Unsafe code detected."
|
9
|
-
|
10
|
-
# local_vars = {}
|
11
|
-
# try:
|
12
|
-
# exec(code, {}, local_vars) # Execute code in an isolated environment
|
13
|
-
# return local_vars.get("output", "Code executed successfully.")
|
14
|
-
# except Exception as e:
|
15
|
-
# # return self.handle_error(e)
|
16
|
-
# print("error in running python code", e)
|
17
|
-
|
18
1
|
import subprocess
|
19
2
|
import tempfile
|
20
3
|
import os
|
@@ -257,7 +257,11 @@ Examples:
|
|
257
257
|
print("\nGoodbye!")
|
258
258
|
except Exception as e:
|
259
259
|
print_formatted_text(FormattedText([
|
260
|
-
('class:error', f"Failed to start OpenCopilot: {e}")
|
260
|
+
('class:error', f"Failed to start OpenCopilot: {e.__class__.__name__}: {e}")
|
261
|
+
]))
|
262
|
+
import traceback
|
263
|
+
print_formatted_text(FormattedText([
|
264
|
+
('class:error', f"Error occurred at: {traceback.format_exc()}")
|
261
265
|
]))
|
262
266
|
|
263
267
|
def run_task(self, user_prompt, max_iter=10):
|
@@ -9,9 +9,6 @@ Src/Agents/Executor/executor.py
|
|
9
9
|
Src/Agents/Executor/prompts.py
|
10
10
|
Src/Env/__init__.py
|
11
11
|
Src/Env/base_env.py
|
12
|
-
Src/Env/base_executor.py
|
13
|
-
Src/Env/env.py
|
14
|
-
Src/Env/js_executor.py
|
15
12
|
Src/Env/python_executor.py
|
16
13
|
Src/Env/shell.py
|
17
14
|
Src/Env/tests/__init__.py
|
@@ -35,9 +32,4 @@ Src/Utils/__init__.py
|
|
35
32
|
Src/Utils/executor_utils.py
|
36
33
|
Src/Utils/ter_interface.py
|
37
34
|
Src/llm_interface/__init__.py
|
38
|
-
Src/llm_interface/llm.py
|
39
|
-
test/test.py
|
40
|
-
test/test_file_task.py
|
41
|
-
test/test_opencopilot_file_integration.py
|
42
|
-
test/testjs.py
|
43
|
-
test/testscript.py
|
35
|
+
Src/llm_interface/llm.py
|
@@ -0,0 +1 @@
|
|
1
|
+
|
@@ -9,10 +9,12 @@ from Tools.system_details import get_os_details, get_datetime, get_memory_usage,
|
|
9
9
|
from Tools.userinp import get_user_input
|
10
10
|
from Env.python_executor import PythonExecutor
|
11
11
|
from Env.shell import ShellExecutor
|
12
|
+
from Utils.ter_interface import TerminalInterface
|
12
13
|
|
13
14
|
#need to transform it into map of dictionary
|
14
15
|
#name : [function : xyz,description : blah bah]
|
15
16
|
|
17
|
+
terminal = TerminalInterface()
|
16
18
|
|
17
19
|
|
18
20
|
|
@@ -20,9 +22,9 @@ def execute_python_code_tool(code: str) -> str:
|
|
20
22
|
"""
|
21
23
|
Prompts for confirmation, then executes the given Python code and returns a formatted result string.
|
22
24
|
"""
|
23
|
-
|
25
|
+
terminal.code_log(code)
|
26
|
+
user_confirmation = input(f"Do you want to execute this Python code snippet?\n(y/n): ")
|
24
27
|
if user_confirmation.lower() != 'y':
|
25
|
-
print("Python code execution skipped by the user.")
|
26
28
|
return "User chose not to execute the Python code."
|
27
29
|
executor = PythonExecutor()
|
28
30
|
result = executor.execute(code)
|
@@ -48,9 +50,9 @@ def execute_shell_command_tool(command: str) -> str:
|
|
48
50
|
"""
|
49
51
|
Prompts for confirmation, then executes the given shell command and returns a formatted result string.
|
50
52
|
"""
|
51
|
-
|
53
|
+
terminal.code_log(command)
|
54
|
+
user_confirmation = input(f"Do you want to execute the shell command? (y/n): ")
|
52
55
|
if user_confirmation.lower() != 'y':
|
53
|
-
print("Shell command execution skipped by the user.")
|
54
56
|
return "User chose not to execute the shell command."
|
55
57
|
executor = ShellExecutor()
|
56
58
|
result = executor.execute(command)
|
@@ -73,11 +75,13 @@ def call_tool(tool_name, tool_input):
|
|
73
75
|
tool_name (str): Name of the tool to call
|
74
76
|
tool_input (dict): Input parameters for the tool
|
75
77
|
"""
|
78
|
+
|
76
79
|
if tool_name in tools_function_map:
|
77
80
|
# Pass the tool_input dictionary as kwargs to the tool function
|
78
81
|
return tools_function_map[tool_name](**tool_input)
|
79
|
-
else:
|
80
|
-
|
82
|
+
else: raise ValueError(f"This tool is invalid. Please check the tools available in the tool directory")
|
83
|
+
|
84
|
+
|
81
85
|
|
82
86
|
tools_function_map = {
|
83
87
|
"web_loader": load_data,
|
@@ -125,7 +125,7 @@ def load_data(**kwargs):
|
|
125
125
|
}
|
126
126
|
content = ""
|
127
127
|
try:
|
128
|
-
response = session.get(url, headers=headers, timeout=
|
128
|
+
response = session.get(url, headers=headers, timeout=3)
|
129
129
|
response.raise_for_status()
|
130
130
|
data = response.content
|
131
131
|
# Check content type
|
@@ -138,6 +138,12 @@ def load_data(**kwargs):
|
|
138
138
|
# Extract text from each page and combine it
|
139
139
|
content = "\n".join([page.extract_text() for page in pdf.pages if page.extract_text()])
|
140
140
|
|
141
|
+
except requests.exceptions.Timeout:
|
142
|
+
logging.error(f"Timeout error loading data from {url}: The webpage didn't load in time")
|
143
|
+
content = f"Error: The webpage at {url} didn't load in time. Please try again later or check other urls for the task. use web_search tool to find other urls."
|
144
|
+
except requests.exceptions.RequestException as e:
|
145
|
+
logging.error(f"Request error loading data from {url}: {e}")
|
146
|
+
content = f"Error: Failed to load webpage at {url}. {str(e)}"
|
141
147
|
except Exception as e:
|
142
148
|
logging.error(f"Error loading data from {url}: {e}")
|
143
149
|
content = ""
|
@@ -0,0 +1,32 @@
|
|
1
|
+
import json
|
2
|
+
from typing import Optional
|
3
|
+
|
4
|
+
import re
|
5
|
+
|
6
|
+
def parse_tool_call(response: str) -> Optional[dict]:
|
7
|
+
"""
|
8
|
+
Parses a tool call from the response, expecting it in a markdown JSON code block.
|
9
|
+
Example:
|
10
|
+
```json
|
11
|
+
{
|
12
|
+
"tool_name": "tool_name",
|
13
|
+
"input": {"arg": "value"}
|
14
|
+
}
|
15
|
+
```
|
16
|
+
"""
|
17
|
+
# Regex to find ```json ... ``` blocks
|
18
|
+
# It captures the content within the fences.
|
19
|
+
# re.DOTALL allows '.' to match newlines, which is crucial for multi-line JSON.
|
20
|
+
match = re.search(r"```json\s*([\s\S]+?)\s*```", response, re.DOTALL)
|
21
|
+
|
22
|
+
if match:
|
23
|
+
json_str = match.group(1).strip()
|
24
|
+
try:
|
25
|
+
tool_call = json.loads(json_str)
|
26
|
+
# Basic validation for the expected structure
|
27
|
+
if isinstance(tool_call, dict) and "tool_name" in tool_call and "input" in tool_call:
|
28
|
+
return tool_call
|
29
|
+
except json.JSONDecodeError:
|
30
|
+
# Invalid JSON within the markdown block
|
31
|
+
return None
|
32
|
+
return None
|
@@ -5,6 +5,8 @@ from rich.panel import Panel
|
|
5
5
|
from rich.text import Text
|
6
6
|
from rich.markdown import Markdown
|
7
7
|
from rich.syntax import Syntax
|
8
|
+
from yaspin import yaspin
|
9
|
+
import time
|
8
10
|
|
9
11
|
import json
|
10
12
|
|
@@ -36,12 +38,7 @@ class TerminalInterface:
|
|
36
38
|
elif not isinstance(message, str):
|
37
39
|
message = str(message)
|
38
40
|
|
39
|
-
|
40
|
-
# panel = Panel(
|
41
|
-
# Text(message, style="orange"),
|
42
|
-
# title=f"[bold green]{tool_name} Output[/bold green]",
|
43
|
-
# border_style="green"
|
44
|
-
# )
|
41
|
+
|
45
42
|
panel = Panel(
|
46
43
|
Text(message, style="blue"),
|
47
44
|
title=f"[bold green]{tool_name} Output[/bold green]",
|
@@ -49,6 +46,17 @@ class TerminalInterface:
|
|
49
46
|
)
|
50
47
|
self.console.print(panel)
|
51
48
|
|
49
|
+
def code_log(self, code: str):
|
50
|
+
"""
|
51
|
+
Print a code snippet in a formatted panel.
|
52
|
+
"""
|
53
|
+
panel = Panel(
|
54
|
+
Syntax(code, "python", theme="monokai", line_numbers=True),
|
55
|
+
title=f"[bold green]Code Snippet[/bold green]",
|
56
|
+
border_style="green"
|
57
|
+
)
|
58
|
+
self.console.print(panel)
|
59
|
+
|
52
60
|
def process_markdown_chunk(self, chunk):
|
53
61
|
"""
|
54
62
|
Process a chunk of markdown text, handling tool calls and regular markdown.
|
@@ -61,15 +69,19 @@ class TerminalInterface:
|
|
61
69
|
line_stripped = line.strip()
|
62
70
|
|
63
71
|
# Handle tool call opening delimiter - be more flexible with whitespace
|
64
|
-
if "
|
72
|
+
if "```json" in line_stripped:
|
65
73
|
self.inside_tool_call = True
|
66
74
|
self.tool_call_buffer = ""
|
67
|
-
self.console.print("[bold cyan]Tool Call:[/bold cyan]")
|
75
|
+
# self.console.print("[bold cyan]Tool Call:[/bold cyan]")
|
76
|
+
self.spinner = yaspin(text="Tool Call...", color="yellow")
|
77
|
+
self.spinner.start()
|
68
78
|
|
69
79
|
# Handle tool call closing delimiter - be more flexible with whitespace
|
70
|
-
elif "
|
71
|
-
|
72
|
-
|
80
|
+
elif "```" in line_stripped and self.inside_tool_call:
|
81
|
+
if hasattr(self, 'spinner'):
|
82
|
+
self.spinner.stop()
|
83
|
+
delattr(self, 'spinner')
|
84
|
+
self._display_tool_call_content()
|
73
85
|
self.console.print("[bold cyan]--------------------------------[/bold cyan]")
|
74
86
|
self.inside_tool_call = False
|
75
87
|
self.tool_call_buffer = ""
|
@@ -82,14 +94,58 @@ class TerminalInterface:
|
|
82
94
|
else:
|
83
95
|
self.console.print(Markdown(line))
|
84
96
|
|
97
|
+
def _display_tool_call_content(self):
|
98
|
+
"""
|
99
|
+
Parse and display tool call JSON content in a simple key-value format.
|
100
|
+
"""
|
101
|
+
try:
|
102
|
+
# Try to parse the JSON content
|
103
|
+
json_content = json.loads(self.tool_call_buffer.strip())
|
104
|
+
|
105
|
+
# Check if tool_name is execute_python_code or execute_shell_command
|
106
|
+
if 'tool_name' in json_content and json_content['tool_name'] in ['execute_python_code', 'execute_shell_command']:
|
107
|
+
return
|
108
|
+
|
109
|
+
# Build content for the panel
|
110
|
+
panel_content = ""
|
111
|
+
for key, value in json_content.items():
|
112
|
+
if isinstance(value, dict):
|
113
|
+
panel_content += f"{key}:\n"
|
114
|
+
for sub_key, sub_value in value.items():
|
115
|
+
panel_content += f" {sub_key}: {sub_value}\n"
|
116
|
+
else:
|
117
|
+
panel_content += f"{key}: {value}\n"
|
118
|
+
|
119
|
+
# Create and display panel
|
120
|
+
panel = Panel(
|
121
|
+
panel_content.strip(),
|
122
|
+
title="[yellow]Tool Call[/yellow]",
|
123
|
+
border_style="blue"
|
124
|
+
)
|
125
|
+
self.console.print(panel)
|
126
|
+
except json.JSONDecodeError:
|
127
|
+
# If JSON parsing fails, display the raw content in a panel
|
128
|
+
panel = Panel(
|
129
|
+
self.tool_call_buffer.strip(),
|
130
|
+
title="[bold red]Raw Tool Call Content[/bold red]",
|
131
|
+
border_style="red"
|
132
|
+
)
|
133
|
+
self.console.print(panel)
|
134
|
+
|
85
135
|
def flush_markdown(self):
|
86
136
|
"""
|
87
137
|
Flush any remaining markdown content in the buffer.
|
88
138
|
"""
|
89
139
|
if hasattr(self, 'inside_tool_call') and self.inside_tool_call:
|
90
140
|
# Handle case where tool call is not properly terminated
|
91
|
-
|
92
|
-
|
141
|
+
|
142
|
+
if hasattr(self, 'spinner'):
|
143
|
+
self.spinner.stop()
|
144
|
+
delattr(self, 'spinner')
|
145
|
+
|
146
|
+
if self.tool_call_buffer.strip():
|
147
|
+
self._display_tool_call_content()
|
148
|
+
self.console.print("[bold cyan]--------------------------------[/bold cyan]")
|
93
149
|
self.inside_tool_call = False
|
94
150
|
elif self.buffer:
|
95
151
|
if "TASK_DONE" in self.buffer:
|
@@ -1,24 +0,0 @@
|
|
1
|
-
# mostly won't need this. there will be only one base env
|
2
|
-
|
3
|
-
from abc import ABC, abstractmethod
|
4
|
-
|
5
|
-
class BaseExecutor(ABC):
|
6
|
-
pass
|
7
|
-
# def __init__(self):
|
8
|
-
# pass
|
9
|
-
|
10
|
-
# @abstractmethod
|
11
|
-
# def execute(self, code: str):
|
12
|
-
# """Executes the given code and returns the output or error."""
|
13
|
-
# pass
|
14
|
-
|
15
|
-
# def validate_code(self, code: str) -> bool:
|
16
|
-
# """Basic validation to ensure code is safe to execute. Override for custom rules."""
|
17
|
-
# # Implement basic checks (e.g., preventing dangerous commands)
|
18
|
-
# if "import os" in code or "import sys" in code:
|
19
|
-
# return False # Disallow potentially unsafe imports for security
|
20
|
-
# return True
|
21
|
-
|
22
|
-
# def handle_error(self, error: Exception) -> str:
|
23
|
-
# """Handles errors during execution and returns a standardized error message."""
|
24
|
-
# return f"Execution failed: {str(error)}"
|
pikoai-0.1.23/Src/Env/env.py
DELETED
@@ -1,63 +0,0 @@
|
|
1
|
-
import subprocess
|
2
|
-
import sys
|
3
|
-
|
4
|
-
class JavaScriptExecutor():
|
5
|
-
def __init__(self):
|
6
|
-
super().__init__()
|
7
|
-
self.node_installed = self.check_node_installed()
|
8
|
-
|
9
|
-
def check_node_installed(self) -> bool:
|
10
|
-
"""Checks if Node.js is installed on the system."""
|
11
|
-
try:
|
12
|
-
subprocess.run(["node", "-v"], capture_output=True, check=True)
|
13
|
-
return True
|
14
|
-
except subprocess.CalledProcessError:
|
15
|
-
return False
|
16
|
-
except FileNotFoundError:
|
17
|
-
return False
|
18
|
-
|
19
|
-
def install_node(self) -> bool:
|
20
|
-
"""Attempts to install Node.js based on the operating system."""
|
21
|
-
try:
|
22
|
-
if sys.platform.startswith("linux"):
|
23
|
-
# Try to install Node.js using apt-get (for Debian/Ubuntu-based systems)
|
24
|
-
subprocess.run(["sudo", "apt-get", "update"], check=True)
|
25
|
-
subprocess.run(["sudo", "apt-get", "install", "-y", "nodejs"], check=True)
|
26
|
-
elif sys.platform == "darwin":
|
27
|
-
# Try to install Node.js using Homebrew on macOS
|
28
|
-
subprocess.run(["brew", "install", "node"], check=True)
|
29
|
-
elif sys.platform == "win32":
|
30
|
-
# Check if Chocolatey is installed, and install Node.js
|
31
|
-
subprocess.run(["choco", "install", "nodejs", "-y"], check=True)
|
32
|
-
else:
|
33
|
-
return False # Unsupported OS for automatic installation
|
34
|
-
return True
|
35
|
-
except subprocess.CalledProcessError:
|
36
|
-
return False # Installation failed
|
37
|
-
|
38
|
-
def execute(self, code: str) -> str:
|
39
|
-
"""Executes JavaScript code using Node.js and returns the result or an error message."""
|
40
|
-
# Check if Node.js is installed, attempt installation if not
|
41
|
-
if not self.node_installed:
|
42
|
-
if not self.install_node():
|
43
|
-
return "Node.js is not installed, and automatic installation failed. Please install Node.js manually."
|
44
|
-
|
45
|
-
# Recheck after attempted installation
|
46
|
-
self.node_installed = self.check_node_installed()
|
47
|
-
if not self.node_installed:
|
48
|
-
return "Node.js is required but not installed. Please install Node.js manually."
|
49
|
-
|
50
|
-
# Proceed with code execution if Node.js is available
|
51
|
-
# if not self.validate_code(code):
|
52
|
-
# return "Code validation failed: Unsafe code detected."
|
53
|
-
|
54
|
-
try:
|
55
|
-
result = subprocess.run(
|
56
|
-
["node", "-e", code],
|
57
|
-
capture_output=True,
|
58
|
-
text=True,
|
59
|
-
check=True
|
60
|
-
)
|
61
|
-
return result.stdout if result.stdout else "Code executed successfully."
|
62
|
-
except subprocess.CalledProcessError as e:
|
63
|
-
print(e)
|
@@ -1,15 +0,0 @@
|
|
1
|
-
import json
|
2
|
-
from typing import Optional
|
3
|
-
|
4
|
-
def parse_tool_call(response: str) -> Optional[dict]:
|
5
|
-
"""
|
6
|
-
Parses a tool call from the response.
|
7
|
-
"""
|
8
|
-
if "<<TOOL_CALL>>" in response and "<<END_TOOL_CALL>>" in response:
|
9
|
-
tool_call_str = response.split("<<TOOL_CALL>>")[1].split("<<END_TOOL_CALL>>")[0].strip()
|
10
|
-
try:
|
11
|
-
tool_call = json.loads(tool_call_str)
|
12
|
-
return tool_call
|
13
|
-
except json.JSONDecodeError:
|
14
|
-
return None
|
15
|
-
return None
|
pikoai-0.1.23/test/test.py
DELETED
@@ -1,30 +0,0 @@
|
|
1
|
-
import sys
|
2
|
-
import os
|
3
|
-
|
4
|
-
# Add the Src directory to the Python path
|
5
|
-
sys.path.append(os.path.abspath(os.path.join(os.path.dirname(__file__), '../Src')))
|
6
|
-
|
7
|
-
from Env.python_executor import PythonExecutor
|
8
|
-
|
9
|
-
def read_test_script(filename):
|
10
|
-
"""Read the content of a test script file"""
|
11
|
-
with open(filename, 'r') as file:
|
12
|
-
return file.read()
|
13
|
-
|
14
|
-
def main():
|
15
|
-
# Initialize the Python executor
|
16
|
-
executor = PythonExecutor()
|
17
|
-
|
18
|
-
# Get the full path to testscript1.txt
|
19
|
-
script_dir = os.path.dirname(os.path.abspath(__file__))
|
20
|
-
test_script_path = os.path.join(script_dir, 'testscript2.txt')
|
21
|
-
|
22
|
-
# Read the test script
|
23
|
-
code = read_test_script(test_script_path)
|
24
|
-
|
25
|
-
# Execute the code
|
26
|
-
result = executor.execute(code)
|
27
|
-
print(result)
|
28
|
-
|
29
|
-
if __name__ == "__main__":
|
30
|
-
main()
|
@@ -1,210 +0,0 @@
|
|
1
|
-
import unittest
|
2
|
-
import os
|
3
|
-
import sys
|
4
|
-
import shutil
|
5
|
-
|
6
|
-
# Add Src directory to sys.path to allow direct import of file_reader
|
7
|
-
sys.path.insert(0, os.path.abspath(os.path.join(os.path.dirname(__file__), '../Src')))
|
8
|
-
from Tools.file_task import file_reader
|
9
|
-
|
10
|
-
class TestFileReader(unittest.TestCase):
|
11
|
-
|
12
|
-
@classmethod
|
13
|
-
def setUpClass(cls):
|
14
|
-
cls.assets_dir = os.path.join(os.path.dirname(__file__), "assets")
|
15
|
-
# Ensure assets_dir exists from the previous step, or re-create if needed by tests
|
16
|
-
if not os.path.exists(cls.assets_dir):
|
17
|
-
os.makedirs(cls.assets_dir)
|
18
|
-
# Re-create files if they are missing for some reason (e.g. if run standalone without prior step)
|
19
|
-
with open(os.path.join(cls.assets_dir, "test_sample.txt"), "w") as f:
|
20
|
-
f.write("This is a test TXT file.")
|
21
|
-
with open(os.path.join(cls.assets_dir, "test_sample.unknown_ext"), "w") as f:
|
22
|
-
f.write("This is an unknown extension test.")
|
23
|
-
with open(os.path.join(cls.assets_dir, "corrupted.pdf"), "w") as f:
|
24
|
-
f.write("This is not a PDF.")
|
25
|
-
with open(os.path.join(cls.assets_dir, "corrupted.docx"), "w") as f:
|
26
|
-
f.write("This is not a DOCX.")
|
27
|
-
with open(os.path.join(cls.assets_dir, "fake.pdf"), "w") as f:
|
28
|
-
f.write("This is a text file named fake.pdf.")
|
29
|
-
with open(os.path.join(cls.assets_dir, "fake.docx"), "w") as f:
|
30
|
-
f.write("This is a text file named fake.docx.")
|
31
|
-
open(os.path.join(cls.assets_dir, "empty.pdf"), "w").close()
|
32
|
-
open(os.path.join(cls.assets_dir, "empty.docx"), "w").close()
|
33
|
-
open(os.path.join(cls.assets_dir, "encrypted.pdf"), "w").close() # Simulate with empty file
|
34
|
-
|
35
|
-
# For home directory restriction testing
|
36
|
-
cls.test_home_dir = os.path.join(cls.assets_dir, "home_dir_test")
|
37
|
-
cls.sensitive_config_file_path = os.path.join(cls.test_home_dir, ".gitconfig") # Test with a file
|
38
|
-
cls.sensitive_ssh_dir_path = os.path.join(cls.test_home_dir, ".ssh") # Test with a dir
|
39
|
-
|
40
|
-
os.makedirs(cls.test_home_dir, exist_ok=True)
|
41
|
-
os.makedirs(cls.sensitive_ssh_dir_path, exist_ok=True)
|
42
|
-
|
43
|
-
with open(cls.sensitive_config_file_path, "w") as f:
|
44
|
-
f.write("sensitive test content")
|
45
|
-
|
46
|
-
with open(os.path.join(cls.sensitive_ssh_dir_path, "config"), "w") as f:
|
47
|
-
f.write("ssh test content")
|
48
|
-
|
49
|
-
@classmethod
|
50
|
-
def tearDownClass(cls):
|
51
|
-
# Clean up the assets directory after all tests are done
|
52
|
-
# shutil.rmtree(cls.assets_dir) # Commented out to inspect files if needed
|
53
|
-
pass
|
54
|
-
|
55
|
-
def test_read_txt_successfully(self):
|
56
|
-
file_path = os.path.join(self.assets_dir, "test_sample.txt")
|
57
|
-
result = file_reader(file_path=file_path)
|
58
|
-
self.assertTrue(result["success"])
|
59
|
-
self.assertEqual(result["output"].strip(), "This is a test TXT file.")
|
60
|
-
|
61
|
-
def test_read_unknown_extension_as_text(self):
|
62
|
-
file_path = os.path.join(self.assets_dir, "test_sample.unknown_ext")
|
63
|
-
result = file_reader(file_path=file_path)
|
64
|
-
self.assertTrue(result["success"])
|
65
|
-
self.assertEqual(result["output"].strip(), "This is an unknown extension test.")
|
66
|
-
|
67
|
-
# Security - Forbidden Paths
|
68
|
-
def test_block_unix_forbidden_paths(self):
|
69
|
-
# Using a path that is guaranteed to be resolved by abspath and start with /etc/
|
70
|
-
# The security check for forbidden paths happens before os.path.isfile.
|
71
|
-
if os.name != "nt": # Only run this on non-Windows where /etc/ is common
|
72
|
-
# Path can be non-existent, check is on the string path
|
73
|
-
result = file_reader(file_path="/etc/some_non_existent_file_for_test")
|
74
|
-
self.assertFalse(result["success"])
|
75
|
-
self.assertIn("Access to system or restricted directory", result["output"])
|
76
|
-
self.assertIn("is not allowed", result["output"])
|
77
|
-
else:
|
78
|
-
self.skipTest("UNIX forbidden path test skipped on Windows.")
|
79
|
-
|
80
|
-
@unittest.skipIf(os.name != "nt", "Windows forbidden path test only applicable on Windows or needs refined path handling for x-platform testing.")
|
81
|
-
def test_block_windows_forbidden_paths(self):
|
82
|
-
# This test relies on the current OS being Windows or the file_reader's
|
83
|
-
# forbidden path logic correctly normalizing and matching Windows-style paths
|
84
|
-
# even when os.path.abspath produces a POSIX-style absolute path.
|
85
|
-
# As identified, the current file_reader might not do this robustly across OSes.
|
86
|
-
path_to_test = "C:\\Windows\\System.ini"
|
87
|
-
result = file_reader(file_path=path_to_test)
|
88
|
-
self.assertFalse(result["success"])
|
89
|
-
self.assertIn("Access to system or restricted directory", result["output"])
|
90
|
-
self.assertIn("is not allowed", result["output"])
|
91
|
-
|
92
|
-
@unittest.skipIf(os.name == "nt", "Test not applicable on Windows for POSIX sensitive paths")
|
93
|
-
def test_block_sensitive_home_file(self):
|
94
|
-
# Test reading a sensitive file like .gitconfig from the mocked home
|
95
|
-
original_expanduser = os.path.expanduser
|
96
|
-
try:
|
97
|
-
os.path.expanduser = lambda path: self.test_home_dir if path == "~" else original_expanduser(path)
|
98
|
-
# file_reader is expected to form path like "user_home/.gitconfig"
|
99
|
-
# We pass the absolute path to our test .gitconfig file
|
100
|
-
result = file_reader(file_path=self.sensitive_config_file_path)
|
101
|
-
self.assertFalse(result["success"])
|
102
|
-
self.assertIn("Access to sensitive user configuration file", result["output"])
|
103
|
-
finally:
|
104
|
-
os.path.expanduser = original_expanduser
|
105
|
-
|
106
|
-
@unittest.skipIf(os.name == "nt", "Test not applicable on Windows for POSIX sensitive paths")
|
107
|
-
def test_block_sensitive_home_directory_access(self):
|
108
|
-
# Test reading a file within a sensitive directory like .ssh/config from the mocked home
|
109
|
-
original_expanduser = os.path.expanduser
|
110
|
-
try:
|
111
|
-
os.path.expanduser = lambda path: self.test_home_dir if path == "~" else original_expanduser(path)
|
112
|
-
# Path to a file within the .ssh directory
|
113
|
-
path_inside_sensitive_dir = os.path.join(self.sensitive_ssh_dir_path, "config")
|
114
|
-
result = file_reader(file_path=path_inside_sensitive_dir)
|
115
|
-
self.assertFalse(result["success"])
|
116
|
-
self.assertIn("Access to files within sensitive user directory", result["output"]) # Updated assertion
|
117
|
-
finally:
|
118
|
-
os.path.expanduser = original_expanduser
|
119
|
-
|
120
|
-
|
121
|
-
def test_path_traversal_attempt(self):
|
122
|
-
# Construct a path that tries to traverse up and then into /etc/
|
123
|
-
# Current working directory for tests is likely /app/Test/ or /app/
|
124
|
-
# If /app, then ../../../../../etc/hosts becomes /etc/hosts
|
125
|
-
# If /app/Test, then ../../../../../../etc/hosts becomes /etc/hosts
|
126
|
-
# os.path.abspath will resolve this.
|
127
|
-
malicious_path = os.path.join(self.assets_dir, "..", "..", "..", "..", "..", "..", "etc", "hosts")
|
128
|
-
# malicious_path = "../../../../../../../etc/hosts" # Simpler relative path
|
129
|
-
result = file_reader(file_path=malicious_path)
|
130
|
-
self.assertFalse(result["success"])
|
131
|
-
# The exact error message depends on whether /etc/ is in forbidden_dirs
|
132
|
-
# or if the path resolution itself is problematic for other reasons before that.
|
133
|
-
# Given current logic, it should be caught by forbidden_dirs.
|
134
|
-
self.assertIn("Access to system or restricted directory", result["output"])
|
135
|
-
|
136
|
-
# Error Handling Tests
|
137
|
-
def test_read_non_existent_file(self):
|
138
|
-
result = file_reader(file_path="non_existent_file.txt")
|
139
|
-
self.assertFalse(result["success"])
|
140
|
-
self.assertIn("does not exist", result["output"])
|
141
|
-
|
142
|
-
def test_read_corrupted_pdf(self): # Actually a text file named .pdf
|
143
|
-
file_path = os.path.join(self.assets_dir, "corrupted.pdf")
|
144
|
-
result = file_reader(file_path=file_path)
|
145
|
-
self.assertFalse(result["success"])
|
146
|
-
self.assertIn("Could not read PDF file", result["output"]) # From PyPDF2Errors.PdfReadError
|
147
|
-
|
148
|
-
def test_read_corrupted_docx(self): # Actually a text file named .docx
|
149
|
-
file_path = os.path.join(self.assets_dir, "corrupted.docx")
|
150
|
-
result = file_reader(file_path=file_path)
|
151
|
-
self.assertFalse(result["success"])
|
152
|
-
# Error: File '...' is not a valid DOCX file, is corrupted, or is not a compatible OOXML package.
|
153
|
-
self.assertIn("not a valid DOCX file", result["output"]) # From DocxOpcExceptions.PackageNotFoundError
|
154
|
-
|
155
|
-
def test_read_empty_pdf_as_corrupted(self): # An empty file is not a valid PDF
|
156
|
-
file_path = os.path.join(self.assets_dir, "empty.pdf")
|
157
|
-
result = file_reader(file_path=file_path)
|
158
|
-
self.assertFalse(result["success"])
|
159
|
-
self.assertIn("Could not read PDF file", result["output"])
|
160
|
-
|
161
|
-
|
162
|
-
def test_read_empty_docx_as_corrupted(self): # An empty file is not a valid DOCX
|
163
|
-
file_path = os.path.join(self.assets_dir, "empty.docx")
|
164
|
-
result = file_reader(file_path=file_path)
|
165
|
-
self.assertFalse(result["success"])
|
166
|
-
self.assertIn("not a valid DOCX file", result["output"])
|
167
|
-
|
168
|
-
def test_read_encrypted_pdf_simulated(self):
|
169
|
-
# Current file_reader checks `reader.is_encrypted`.
|
170
|
-
# An empty file (encrypted.pdf) will fail before this check, likely as a PdfReadError.
|
171
|
-
file_path = os.path.join(self.assets_dir, "encrypted.pdf")
|
172
|
-
result = file_reader(file_path=file_path)
|
173
|
-
self.assertFalse(result["success"])
|
174
|
-
# Expecting it to be caught by PdfReadError or similar, not the specific "encrypted" message unless it's a specially crafted PDF.
|
175
|
-
self.assertTrue(
|
176
|
-
"Could not read PDF file" in result["output"] or "encrypted" in result["output"]
|
177
|
-
)
|
178
|
-
|
179
|
-
|
180
|
-
def test_read_file_with_wrong_extension_pdf(self): # text file named fake.pdf
|
181
|
-
file_path = os.path.join(self.assets_dir, "fake.pdf")
|
182
|
-
result = file_reader(file_path=file_path)
|
183
|
-
self.assertFalse(result["success"])
|
184
|
-
self.assertIn("Could not read PDF file", result["output"])
|
185
|
-
|
186
|
-
def test_read_file_with_wrong_extension_docx(self): # text file named fake.docx
|
187
|
-
file_path = os.path.join(self.assets_dir, "fake.docx")
|
188
|
-
result = file_reader(file_path=file_path)
|
189
|
-
self.assertFalse(result["success"])
|
190
|
-
self.assertIn("not a valid DOCX file", result["output"])
|
191
|
-
|
192
|
-
# Skipped tests for actual PDF/DOCX content due to generation limitations
|
193
|
-
@unittest.skip("Skipping PDF content test: Cannot generate actual PDF with known content with available tools.")
|
194
|
-
def test_read_pdf_successfully(self):
|
195
|
-
pass
|
196
|
-
|
197
|
-
@unittest.skip("Skipping DOCX content test: Cannot generate actual DOCX with known content with available tools.")
|
198
|
-
def test_read_docx_successfully(self):
|
199
|
-
pass
|
200
|
-
|
201
|
-
@unittest.skip("Skipping MacOS specific forbidden path for now.")
|
202
|
-
def test_block_macos_forbidden_paths(self):
|
203
|
-
# path_to_test = "/System/Library/Kernels"
|
204
|
-
# result = file_reader(file_path=path_to_test)
|
205
|
-
# self.assertFalse(result["success"])
|
206
|
-
# self.assertIn("Access to system or restricted directory", result["output"])
|
207
|
-
pass
|
208
|
-
|
209
|
-
if __name__ == '__main__':
|
210
|
-
unittest.main(argv=['first-arg-is-ignored'], exit=False)
|
@@ -1,187 +0,0 @@
|
|
1
|
-
import unittest
|
2
|
-
from unittest.mock import patch, MagicMock, call
|
3
|
-
import os
|
4
|
-
|
5
|
-
# Assuming OpenCopilot.py is in Src directory and uses:
|
6
|
-
# from Tools.file_task import file_reader
|
7
|
-
# from prompt_toolkit.shortcuts import print_formatted_text
|
8
|
-
# So we patch them in Src.OpenCopilot context
|
9
|
-
from Src.OpenCopilot import OpenCopilot, FormattedText
|
10
|
-
|
11
|
-
class TestExtractFilesAndProcessPrompt(unittest.TestCase):
|
12
|
-
|
13
|
-
def setUp(self):
|
14
|
-
self.copilot = OpenCopilot()
|
15
|
-
# Default behavior for path functions to simplify tests
|
16
|
-
# These can be overridden in specific tests if needed
|
17
|
-
self.abspath_patcher = patch('os.path.abspath', side_effect=lambda x: '/abs/' + os.path.basename(x))
|
18
|
-
self.expanduser_patcher = patch('os.path.expanduser', side_effect=lambda x: x.replace('~', '/user/home'))
|
19
|
-
|
20
|
-
self.mock_abspath = self.abspath_patcher.start()
|
21
|
-
self.mock_expanduser = self.expanduser_patcher.start()
|
22
|
-
|
23
|
-
self.addCleanup(self.abspath_patcher.stop)
|
24
|
-
self.addCleanup(self.expanduser_patcher.stop)
|
25
|
-
|
26
|
-
@patch('Src.OpenCopilot.print_formatted_text')
|
27
|
-
@patch('Src.OpenCopilot.file_reader')
|
28
|
-
@patch('os.path.isfile')
|
29
|
-
@patch('os.path.exists')
|
30
|
-
def test_successful_text_file_load(self, mock_exists, mock_isfile, mock_file_reader, mock_print_formatted_text):
|
31
|
-
mock_exists.return_value = True
|
32
|
-
mock_isfile.return_value = True
|
33
|
-
mock_file_reader.return_value = {"success": True, "output": "Text content"}
|
34
|
-
|
35
|
-
user_input = "@test.txt explain this"
|
36
|
-
# Path seen by file_reader will be /abs/test.txt due to mock_abspath
|
37
|
-
expected_file_path_in_prompt = '/abs/test.txt'
|
38
|
-
|
39
|
-
expected_final_prompt = f"=== Content of file: {expected_file_path_in_prompt} ===\nText content\n=== End of file: {expected_file_path_in_prompt} ===\n\nexplain this"
|
40
|
-
|
41
|
-
result_prompt = self.copilot.extract_files_and_process_prompt(user_input)
|
42
|
-
|
43
|
-
mock_file_reader.assert_called_once_with(file_path=expected_file_path_in_prompt)
|
44
|
-
|
45
|
-
# Check that a success message was printed for the loaded file
|
46
|
-
# The first call to print_formatted_text is for loading, the second (if any) for total files.
|
47
|
-
args, _ = mock_print_formatted_text.call_args_list[0]
|
48
|
-
self.assertIsInstance(args[0], FormattedText)
|
49
|
-
self.assertEqual(args[0][0], ('class:success', f"✓ Loaded file: {expected_file_path_in_prompt}"))
|
50
|
-
|
51
|
-
self.assertEqual(expected_final_prompt.strip(), result_prompt.strip())
|
52
|
-
|
53
|
-
@patch('Src.OpenCopilot.print_formatted_text')
|
54
|
-
@patch('Src.OpenCopilot.file_reader')
|
55
|
-
@patch('os.path.isfile')
|
56
|
-
@patch('os.path.exists')
|
57
|
-
def test_file_read_error_from_file_reader(self, mock_exists, mock_isfile, mock_file_reader, mock_print_formatted_text):
|
58
|
-
mock_exists.return_value = True
|
59
|
-
mock_isfile.return_value = True
|
60
|
-
mock_file_reader.return_value = {"success": False, "output": "Specific read error"}
|
61
|
-
|
62
|
-
user_input = "@error.txt explain this"
|
63
|
-
expected_file_path_in_error = '/abs/error.txt'
|
64
|
-
# If file_reader fails, @file directive is NOT removed from prompt currently
|
65
|
-
expected_final_prompt = "@error.txt explain this"
|
66
|
-
|
67
|
-
result_prompt = self.copilot.extract_files_and_process_prompt(user_input)
|
68
|
-
|
69
|
-
mock_file_reader.assert_called_once_with(file_path=expected_file_path_in_error)
|
70
|
-
|
71
|
-
args, _ = mock_print_formatted_text.call_args_list[0] # Error message for the specific file
|
72
|
-
self.assertIsInstance(args[0], FormattedText)
|
73
|
-
self.assertEqual(args[0][0], ('class:error', f"✗ Error reading file {expected_file_path_in_error}: Specific read error"))
|
74
|
-
|
75
|
-
self.assertEqual(expected_final_prompt.strip(), result_prompt.strip())
|
76
|
-
|
77
|
-
@patch('Src.OpenCopilot.print_formatted_text')
|
78
|
-
@patch('Src.OpenCopilot.file_reader')
|
79
|
-
@patch('os.path.isfile') # Still need to patch isfile even if not called
|
80
|
-
@patch('os.path.exists')
|
81
|
-
def test_non_existent_file(self, mock_exists, mock_isfile, mock_file_reader, mock_print_formatted_text):
|
82
|
-
mock_exists.return_value = False
|
83
|
-
# mock_isfile should not be called if mock_exists is False
|
84
|
-
|
85
|
-
user_input = "@nonexistent.txt explain this"
|
86
|
-
# Path that os.path.exists will check
|
87
|
-
path_checked = '/abs/nonexistent.txt'
|
88
|
-
expected_final_prompt = "@nonexistent.txt explain this" # Unchanged as per current logic
|
89
|
-
|
90
|
-
result_prompt = self.copilot.extract_files_and_process_prompt(user_input)
|
91
|
-
|
92
|
-
mock_exists.assert_called_once_with(path_checked)
|
93
|
-
mock_isfile.assert_not_called() # Crucial check
|
94
|
-
mock_file_reader.assert_not_called()
|
95
|
-
|
96
|
-
args, _ = mock_print_formatted_text.call_args_list[0] # Warning message
|
97
|
-
self.assertIsInstance(args[0], FormattedText)
|
98
|
-
self.assertEqual(args[0][0], ('class:warning', f"⚠ Path not found: {path_checked}"))
|
99
|
-
|
100
|
-
self.assertEqual(expected_final_prompt.strip(), result_prompt.strip())
|
101
|
-
|
102
|
-
@patch('Src.OpenCopilot.print_formatted_text')
|
103
|
-
@patch('Src.OpenCopilot.file_reader')
|
104
|
-
@patch('os.path.isfile')
|
105
|
-
@patch('os.path.exists')
|
106
|
-
def test_directory_path(self, mock_exists, mock_isfile, mock_file_reader, mock_print_formatted_text):
|
107
|
-
mock_exists.return_value = True
|
108
|
-
mock_isfile.return_value = False # This indicates it's a directory (or not a file)
|
109
|
-
|
110
|
-
user_input = "@testdir explain this"
|
111
|
-
# Path that will be processed
|
112
|
-
processed_path = '/abs/testdir'
|
113
|
-
# In the prompt, @testdir is replaced by its absolute path
|
114
|
-
expected_final_prompt = f"{processed_path} explain this"
|
115
|
-
|
116
|
-
result_prompt = self.copilot.extract_files_and_process_prompt(user_input)
|
117
|
-
|
118
|
-
mock_exists.assert_called_once_with(processed_path)
|
119
|
-
mock_isfile.assert_called_once_with(processed_path)
|
120
|
-
mock_file_reader.assert_not_called()
|
121
|
-
|
122
|
-
args, _ = mock_print_formatted_text.call_args_list[0] # Directory message
|
123
|
-
self.assertIsInstance(args[0], FormattedText)
|
124
|
-
self.assertEqual(args[0][0],('class:success', f"✓ Added directory path: {processed_path}"))
|
125
|
-
|
126
|
-
self.assertEqual(expected_final_prompt.strip(), result_prompt.strip())
|
127
|
-
|
128
|
-
@patch('Src.OpenCopilot.print_formatted_text')
|
129
|
-
@patch('Src.OpenCopilot.file_reader')
|
130
|
-
@patch('os.path.isfile')
|
131
|
-
@patch('os.path.exists')
|
132
|
-
def test_multiple_files_success_and_error(self, mock_exists, mock_isfile, mock_file_reader, mock_print_formatted_text):
|
133
|
-
# All paths will be prefixed with /abs/ by the mock_abspath
|
134
|
-
path_file1 = '/abs/file1.txt'
|
135
|
-
path_error_file = '/abs/error.txt'
|
136
|
-
path_file2 = '/abs/file2.txt'
|
137
|
-
|
138
|
-
mock_exists.return_value = True # For all files
|
139
|
-
mock_isfile.return_value = True # For all files
|
140
|
-
|
141
|
-
def file_reader_side_effect(file_path):
|
142
|
-
if file_path == path_file1:
|
143
|
-
return {"success": True, "output": "content1"}
|
144
|
-
elif file_path == path_error_file:
|
145
|
-
return {"success": False, "output": "read error"}
|
146
|
-
elif file_path == path_file2:
|
147
|
-
return {"success": True, "output": "content2"}
|
148
|
-
return {} # Should not happen
|
149
|
-
mock_file_reader.side_effect = file_reader_side_effect
|
150
|
-
|
151
|
-
user_input = "@file1.txt @error.txt @file2.txt go"
|
152
|
-
|
153
|
-
# Each block in file_contents already ends with a newline.
|
154
|
-
# So "\n".join will put an additional newline between them.
|
155
|
-
# Then another "\n" is added before the processed_prompt.
|
156
|
-
block1_content = f"=== Content of file: {path_file1} ===\ncontent1\n=== End of file: {path_file1} ===\n"
|
157
|
-
block2_content = f"=== Content of file: {path_file2} ===\ncontent2\n=== End of file: {path_file2} ===\n"
|
158
|
-
remaining_prompt = "@error.txt go" # After successful removals and stripping
|
159
|
-
|
160
|
-
expected_final_prompt = block1_content + "\n" + block2_content + "\n" + remaining_prompt
|
161
|
-
|
162
|
-
result_prompt = self.copilot.extract_files_and_process_prompt(user_input)
|
163
|
-
|
164
|
-
# Check file_reader calls
|
165
|
-
mock_file_reader.assert_any_call(file_path=path_file1)
|
166
|
-
mock_file_reader.assert_any_call(file_path=path_error_file)
|
167
|
-
mock_file_reader.assert_any_call(file_path=path_file2)
|
168
|
-
self.assertEqual(mock_file_reader.call_count, 3)
|
169
|
-
|
170
|
-
# Check print_formatted_text calls
|
171
|
-
# Call 1: Success for file1.txt
|
172
|
-
args1, _ = mock_print_formatted_text.call_args_list[0]
|
173
|
-
self.assertEqual(args1[0][0], ('class:success', f"✓ Loaded file: {path_file1}"))
|
174
|
-
# Call 2: Error for error.txt
|
175
|
-
args2, _ = mock_print_formatted_text.call_args_list[1]
|
176
|
-
self.assertEqual(args2[0][0], ('class:error', f"✗ Error reading file {path_error_file}: read error"))
|
177
|
-
# Call 3: Success for file2.txt
|
178
|
-
args3, _ = mock_print_formatted_text.call_args_list[2]
|
179
|
-
self.assertEqual(args3[0][0], ('class:success', f"✓ Loaded file: {path_file2}"))
|
180
|
-
# Call 4: Summary message "Loaded 2 file(s) into context"
|
181
|
-
args4, _ = mock_print_formatted_text.call_args_list[3]
|
182
|
-
self.assertEqual(args4[0][0], ('class:info', f"📁 Loaded 2 file(s) into context"))
|
183
|
-
|
184
|
-
self.assertEqual(expected_final_prompt.strip(), result_prompt.strip())
|
185
|
-
|
186
|
-
if __name__ == '__main__':
|
187
|
-
unittest.main()
|
pikoai-0.1.23/test/testjs.py
DELETED
@@ -1,30 +0,0 @@
|
|
1
|
-
import sys
|
2
|
-
import os
|
3
|
-
|
4
|
-
# Add the Src directory to the Python path
|
5
|
-
sys.path.append(os.path.abspath(os.path.join(os.path.dirname(__file__), '../Src')))
|
6
|
-
|
7
|
-
from Env.js_executor import JavaScriptExecutor # Assuming js_executor handles Node.js execution
|
8
|
-
|
9
|
-
def read_test_script(filename):
|
10
|
-
"""Read the content of a test script file"""
|
11
|
-
with open(filename, 'r') as file:
|
12
|
-
return file.read()
|
13
|
-
|
14
|
-
def main():
|
15
|
-
# Initialize the JavaScript executor
|
16
|
-
executor = JavaScriptExecutor()
|
17
|
-
|
18
|
-
# Get the full path to testscript2.js
|
19
|
-
script_dir = os.path.dirname(os.path.abspath(__file__))
|
20
|
-
test_script_path = os.path.join(script_dir, 'testscriptjs.txt')
|
21
|
-
|
22
|
-
# Read the test script
|
23
|
-
code = read_test_script(test_script_path)
|
24
|
-
|
25
|
-
# Execute the JavaScript code
|
26
|
-
result = executor.execute(code)
|
27
|
-
print(result)
|
28
|
-
|
29
|
-
if __name__ == "__main__":
|
30
|
-
main()
|
pikoai-0.1.23/test/testscript.py
DELETED
@@ -1,29 +0,0 @@
|
|
1
|
-
import matplotlib.pyplot as plt
|
2
|
-
import numpy as np
|
3
|
-
|
4
|
-
# Generate a range of x values from 0 to 10
|
5
|
-
x = np.linspace(0, 10, 1000)
|
6
|
-
|
7
|
-
# Calculate the corresponding y values using the formula for a circle
|
8
|
-
y = np.sqrt(1 - x**2)
|
9
|
-
|
10
|
-
# Create a figure and axis
|
11
|
-
fig, ax = plt.subplots()
|
12
|
-
|
13
|
-
# Plot the upper and lower halves of the circle
|
14
|
-
ax.plot(x, y, label='Upper half')
|
15
|
-
ax.plot(x, -y, label='Lower half')
|
16
|
-
|
17
|
-
# Set the aspect ratio of the plot to be equal so the circle appears as a circle
|
18
|
-
ax.set_aspect('equal')
|
19
|
-
|
20
|
-
# Set the title and labels
|
21
|
-
ax.set_title('Graphical Representation of Pi')
|
22
|
-
ax.set_xlabel('x')
|
23
|
-
ax.set_ylabel('y')
|
24
|
-
|
25
|
-
# Add a legend
|
26
|
-
ax.legend()
|
27
|
-
|
28
|
-
# Show the plot
|
29
|
-
plt.show()
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
/pikoai-0.1.23/Src/PikoAi.egg-info/dependency_links.txt → /pikoai-0.1.25/Src/Env/__init__.py
RENAMED
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|