PikoAi 0.1.12__tar.gz → 0.1.14__tar.gz

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (47) hide show
  1. {pikoai-0.1.12/Src/PikoAi.egg-info → pikoai-0.1.14}/PKG-INFO +1 -1
  2. {pikoai-0.1.12 → pikoai-0.1.14}/Src/Agents/Executor/executor.py +9 -18
  3. pikoai-0.1.14/Src/Agents/Executor/prompts.py +58 -0
  4. {pikoai-0.1.12 → pikoai-0.1.14}/Src/Env/python_executor.py +9 -14
  5. {pikoai-0.1.12 → pikoai-0.1.14}/Src/Env/shell.py +9 -14
  6. {pikoai-0.1.12 → pikoai-0.1.14}/Src/OpenCopilot.py +3 -3
  7. {pikoai-0.1.12 → pikoai-0.1.14/Src/PikoAi.egg-info}/PKG-INFO +1 -1
  8. pikoai-0.1.14/Src/Tools/web_search.py +67 -0
  9. {pikoai-0.1.12 → pikoai-0.1.14}/Src/Utils/ter_interface.py +6 -56
  10. {pikoai-0.1.12 → pikoai-0.1.14}/setup.py +1 -1
  11. pikoai-0.1.12/Src/Agents/Executor/prompts.py +0 -83
  12. pikoai-0.1.12/Src/Tools/web_search.py +0 -30
  13. {pikoai-0.1.12 → pikoai-0.1.14}/LICENSE +0 -0
  14. {pikoai-0.1.12 → pikoai-0.1.14}/README.md +0 -0
  15. {pikoai-0.1.12 → pikoai-0.1.14}/Src/Agents/Executor/__init__.py +0 -0
  16. {pikoai-0.1.12 → pikoai-0.1.14}/Src/Agents/__init__.py +0 -0
  17. {pikoai-0.1.12 → pikoai-0.1.14}/Src/Env/__init__.py +0 -0
  18. {pikoai-0.1.12 → pikoai-0.1.14}/Src/Env/base_env.py +0 -0
  19. {pikoai-0.1.12 → pikoai-0.1.14}/Src/Env/base_executor.py +0 -0
  20. {pikoai-0.1.12 → pikoai-0.1.14}/Src/Env/env.py +0 -0
  21. {pikoai-0.1.12 → pikoai-0.1.14}/Src/Env/js_executor.py +0 -0
  22. {pikoai-0.1.12 → pikoai-0.1.14}/Src/Env/tests/__init__.py +0 -0
  23. {pikoai-0.1.12 → pikoai-0.1.14}/Src/Env/tests/test_python_executor.py +0 -0
  24. {pikoai-0.1.12 → pikoai-0.1.14}/Src/Env/tests/test_shell_executor.py +0 -0
  25. {pikoai-0.1.12 → pikoai-0.1.14}/Src/PikoAi.egg-info/SOURCES.txt +0 -0
  26. {pikoai-0.1.12 → pikoai-0.1.14}/Src/PikoAi.egg-info/dependency_links.txt +0 -0
  27. {pikoai-0.1.12 → pikoai-0.1.14}/Src/PikoAi.egg-info/entry_points.txt +0 -0
  28. {pikoai-0.1.12 → pikoai-0.1.14}/Src/PikoAi.egg-info/requires.txt +0 -0
  29. {pikoai-0.1.12 → pikoai-0.1.14}/Src/PikoAi.egg-info/top_level.txt +0 -0
  30. {pikoai-0.1.12 → pikoai-0.1.14}/Src/Tools/__init__.py +0 -0
  31. {pikoai-0.1.12 → pikoai-0.1.14}/Src/Tools/file_task.py +0 -0
  32. {pikoai-0.1.12 → pikoai-0.1.14}/Src/Tools/system_details.py +0 -0
  33. {pikoai-0.1.12 → pikoai-0.1.14}/Src/Tools/tool_dir.json +0 -0
  34. {pikoai-0.1.12 → pikoai-0.1.14}/Src/Tools/tool_manager.py +0 -0
  35. {pikoai-0.1.12 → pikoai-0.1.14}/Src/Tools/userinp.py +0 -0
  36. {pikoai-0.1.12 → pikoai-0.1.14}/Src/Tools/web_loader.py +0 -0
  37. {pikoai-0.1.12 → pikoai-0.1.14}/Src/Utils/__init__.py +0 -0
  38. {pikoai-0.1.12 → pikoai-0.1.14}/Src/Utils/executor_utils.py +0 -0
  39. {pikoai-0.1.12 → pikoai-0.1.14}/Src/cli.py +0 -0
  40. {pikoai-0.1.12 → pikoai-0.1.14}/Src/llm_interface/__init__.py +0 -0
  41. {pikoai-0.1.12 → pikoai-0.1.14}/Src/llm_interface/llm.py +0 -0
  42. {pikoai-0.1.12 → pikoai-0.1.14}/setup.cfg +0 -0
  43. {pikoai-0.1.12 → pikoai-0.1.14}/test/test.py +0 -0
  44. {pikoai-0.1.12 → pikoai-0.1.14}/test/test_file_task.py +0 -0
  45. {pikoai-0.1.12 → pikoai-0.1.14}/test/test_opencopilot_file_integration.py +0 -0
  46. {pikoai-0.1.12 → pikoai-0.1.14}/test/testjs.py +0 -0
  47. {pikoai-0.1.12 → pikoai-0.1.14}/test/testscript.py +0 -0
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.4
2
2
  Name: PikoAi
3
- Version: 0.1.12
3
+ Version: 0.1.14
4
4
  Summary: An AI-powered task automation tool
5
5
  Home-page: https://github.com/nihaaaar22/OS-Assistant
6
6
  Author: Nihar S
@@ -7,7 +7,7 @@ import time
7
7
  sys.path.append(os.path.abspath(os.path.join(os.path.dirname(__file__), '../../../')))
8
8
  from Utils.ter_interface import TerminalInterface
9
9
  from Utils.executor_utils import parse_tool_call
10
- from Agents.Executor.prompts import get_system_prompt, get_task_prompt # Import prompts
10
+ from Agents.Executor.prompts import get_executor_prompt # Import prompts
11
11
 
12
12
  from typing import Optional
13
13
  from mistralai.models.sdkerror import SDKError # This might be an issue if LiteLLM doesn't use SDKError
@@ -22,7 +22,7 @@ from llm_interface.llm import LiteLLMInterface # Import LiteLLMInterface
22
22
  from Tools import tool_manager
23
23
 
24
24
  class RateLimiter:
25
- def __init__(self, wait_time: float = 5.0, max_retries: int = 3):
25
+ def __init__(self, wait_time: float = 1.0, max_retries: int = 3):
26
26
  self.wait_time = wait_time
27
27
  self.max_retries = max_retries
28
28
  self.last_call_time = None
@@ -44,7 +44,7 @@ class executor:
44
44
  # self.shell_executor = ShellExecutor() # Initialize ShellExecutor
45
45
  self.message = [
46
46
  {"role": "system", "content": self.system_prompt},
47
- {"role": "user", "content": self.task_prompt}
47
+ {"role": "user", "content": self.user_prompt}
48
48
  ]
49
49
  self.terminal = TerminalInterface()
50
50
  self.initialize_llm()
@@ -71,8 +71,7 @@ class executor:
71
71
  config = json.load(config_file)
72
72
  working_dir = config.get("working_directory", "")
73
73
 
74
- self.system_prompt = get_system_prompt(self.user_prompt, working_dir, tools_details)
75
- self.task_prompt = get_task_prompt()
74
+ self.system_prompt = get_executor_prompt(working_dir, tools_details)
76
75
 
77
76
  def run_inference(self):
78
77
  retries = 0
@@ -84,7 +83,8 @@ class executor:
84
83
 
85
84
  # Streaming is handled within LiteLLMInterface.chat()
86
85
  # and TerminalInterface.process_markdown_chunk()
87
- self.message.append({"role": "assistant", "content": response})
86
+ if response.strip():
87
+ self.message.append({"role": "assistant", "content": response})
88
88
  return response
89
89
 
90
90
  except Exception as e: # Catching generic Exception as LiteLLM maps to OpenAI exceptions
@@ -112,11 +112,6 @@ class executor:
112
112
  self.run_task()
113
113
 
114
114
  def run_task(self):
115
- # Remove tools_details parameter since it's in the prompt
116
- task_message = self.task_prompt
117
-
118
- self.message.append({"role": "user", "content": task_message})
119
-
120
115
  iteration = 0
121
116
  task_done = False
122
117
 
@@ -128,13 +123,13 @@ class executor:
128
123
  if tool_call:
129
124
  tool_name = tool_call['tool_name']
130
125
  tool_input = tool_call['input']
131
- print(f"\nIdentified tool call: {tool_name} with input {tool_input}")
126
+
132
127
 
133
128
  # Call the tool and append the result (no confirmation or special logic)
134
129
  try:
135
130
  tool_output_result = tool_manager.call_tool(tool_name, tool_input)
136
- self.terminal.tool_output_log(tool_output_result, tool_name)
137
- print(tool_output_result)
131
+ if tool_name not in ['execute_python_code', 'execute_shell_command']:
132
+ self.terminal.tool_output_log(tool_output_result, tool_name)
138
133
  self.message.append({"role": "user", "content": tool_output_result})
139
134
  except ValueError as e:
140
135
  error_msg = str(e)
@@ -142,10 +137,6 @@ class executor:
142
137
  self.message.append({"role": "user", "content": f"Tool Error: {error_msg}"})
143
138
 
144
139
  else: # Not a tool call, could be a direct response or requires clarification
145
- # This part handles responses that are not formatted as tool calls.
146
- # It might be a final answer, a question, or just conversational text.
147
- # The existing logic for TASK_DONE or asking for next step handles this.
148
- # No specific code/shell parsing here anymore as they are tools.
149
140
  pass # Explicitly pass if no tool call and no old code/shell logic.
150
141
 
151
142
  # Check if task is done
@@ -0,0 +1,58 @@
1
+ # This file contains the prompts used by the Executor agent.
2
+
3
+ import platform
4
+
5
+ def get_executor_prompt(working_dir: str, tools_details: str) -> str:
6
+ """
7
+ Returns the main executor prompt.
8
+ """
9
+ os_name = platform.system()
10
+ # tools_details is passed to the LLM but not directly included in this prompt string.
11
+ return f"""You are a terminal-based operating system assistant designed to help users achieve their goals.
12
+
13
+ This is important information about the environment:
14
+ Working Directory: {working_dir}
15
+ Operating System: {os_name}
16
+
17
+ You have access to the following tools:
18
+ {tools_details}
19
+
20
+ Your primary objective is to accomplish the user's goal by performing step-by-step actions. These actions can include:
21
+ 1. Calling a tool
22
+ 2. Providing a direct response
23
+
24
+ You must break down the user's goal into smaller steps and perform one action at a time. After each action, carefully evaluate the output to determine the next step.
25
+
26
+ ### Action Guidelines:
27
+ - **Tool Call**: Use when a specific tool can help with the current step. Format:
28
+ <<TOOL_CALL>>
29
+ {{
30
+ "tool_name": "name_of_tool",
31
+ "input": {{
32
+ "key": "value" //Replace 'key' with the actual parameter name for the tool
33
+ }}
34
+ }}
35
+ <<END_TOOL_CALL>>
36
+ - **Code Execution**: Write Python code when no tool is suitable or when custom logic is needed.
37
+ the code written will be executed immediately and not saved.
38
+ - **Direct Response**: Provide a direct answer if the task doesn't require tools or code.
39
+
40
+
41
+ These are the things that you learned from the mistakes you made earlier :
42
+ - When given a data file and asked to understand data/do data analysis/ data visualisation or similar stuff
43
+ do not use file reader and read the whole data. Only use python code to do the analysis
44
+ - This is a standard Python environment, not a python notebook or a repl. previous execution
45
+ context is not preserved between executions.
46
+ - Don't execute dangerous commands like rm -rf * or access sensitive files
47
+ - If you are stuck, have tried to fix an issue (e.g., a linter error) multiple times (e.g., 3 times) without success, or need clarification, ask the USER for input. Explain the situation clearly.
48
+ - Upon creating anything (like a new project, website, data analysis png) always show the output.You can do this by executing shell commands.
49
+ - the python/shell code execution in tool call will be executed immediately and output will be shown. it wont be saved.
50
+
51
+
52
+ ** Important **
53
+ - Perform only one action per step (either a single tool call or a single code execution).
54
+ - Always evaluate the output of each action before deciding the next step.
55
+ - Continue performing actions until the user's goal is fully achieved. Only then, include 'TASK_DONE' in your response if that is the required signal for completion.
56
+ - Do not end the task immediately after a tool call or code execution without evaluating its output.
57
+
58
+ """
@@ -58,7 +58,7 @@ try:
58
58
  except Exception as e:
59
59
  print(f"Error: {{str(e)}}")
60
60
  """
61
-
61
+
62
62
 
63
63
  try:
64
64
  # Execute the code in a subprocess
@@ -75,19 +75,14 @@ except Exception as e:
75
75
  stderr_data = []
76
76
  start_time = time.time()
77
77
 
78
- # First read all stdout
79
- for line in self.process.stdout:
80
- # Check for timeout
81
- if time.time() - start_time > 30:
82
- self.process.kill()
83
- return {
84
- 'success': False,
85
- 'output': 'Execution timed out after 30 seconds',
86
- 'error': 'Timeout error'
87
- }
88
-
89
- stdout_data.append(line)
90
- print(line, end='', flush=True) # Print in real-time
78
+ # Read stdout character by character
79
+ while True:
80
+ char = self.process.stdout.read(1)
81
+ if char == '' and self.process.poll() is not None:
82
+ break # Process ended and no more output
83
+ if char:
84
+ stdout_data.append(char)
85
+ print(char, end='', flush=True) # Print in real-time, no extra newline
91
86
 
92
87
  # Then read all stderr
93
88
  for line in self.process.stderr:
@@ -114,21 +114,16 @@ class ShellExecutor(BaseEnv):
114
114
  stderr_data = []
115
115
  start_time = time.time()
116
116
 
117
- # First read all stdout
118
- for line in self.process.stdout:
119
- # Check for timeout
120
- if time.time() - start_time > 30:
121
- self.process.kill()
122
- return {
123
- 'success': False,
124
- 'output': 'Execution timed out after 30 seconds',
125
- 'error': 'Timeout error'
126
- }
127
-
128
- stdout_data.append(line)
129
- print(line, end='', flush=True) # Print in real-time
117
+ # First read all stdout character by character
118
+ while True:
119
+ char = self.process.stdout.read(1)
120
+ if char == '' and self.process.poll() is not None:
121
+ break # Process ended and no more output
122
+ if char:
123
+ stdout_data.append(char)
124
+ print(char, end='', flush=True) # Print in real-time, no extra newline
130
125
 
131
- # Then read all stderr
126
+ # Then read all stderr (can keep line-by-line or do char-by-char similarly)
132
127
  for line in self.process.stderr:
133
128
  # Check for timeout
134
129
  if time.time() - start_time > 30:
@@ -206,12 +206,12 @@ Examples:
206
206
 
207
207
  try:
208
208
  # Get initial prompt
209
- user_input = self.session.prompt(HTML("<b>Please enter your prompt: </b>"))
209
+ user_input = self.session.prompt(HTML("<b>Piko></b>"))
210
210
 
211
211
  # Handle special commands
212
212
  if user_input.lower() == 'help':
213
213
  self.display_help()
214
- user_input = self.session.prompt(HTML("<b>Please enter your prompt: </b>"))
214
+ user_input = self.session.prompt(HTML("<b>Piko></b>"))
215
215
  elif user_input.lower() == 'quit':
216
216
  print("Goodbye!")
217
217
  return
@@ -227,7 +227,7 @@ Examples:
227
227
  # Continue conversation loop
228
228
  while True:
229
229
  try:
230
- user_input = self.session.prompt(HTML("<b>\nPlease enter your prompt (or 'quit' to exit): </b>"))
230
+ user_input = self.session.prompt(HTML("<b>\nPiko></b>"))
231
231
 
232
232
  # Handle special commands
233
233
  if user_input.lower() == 'quit':
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.4
2
2
  Name: PikoAi
3
- Version: 0.1.12
3
+ Version: 0.1.14
4
4
  Summary: An AI-powered task automation tool
5
5
  Home-page: https://github.com/nihaaaar22/OS-Assistant
6
6
  Author: Nihar S
@@ -0,0 +1,67 @@
1
+ import os
2
+ from duckduckgo_search import DDGS
3
+ from serpapi import SerpApiClient
4
+
5
+ def web_search(max_results: int = 10, **kwargs) -> str:
6
+ """
7
+ Performs a DuckDuckGo web search based on your query (think a Google search) then returns the top search results.
8
+ If DuckDuckGo search fails, it falls back to SerpAPI.
9
+
10
+ Args:
11
+ query (str): The search query to perform.
12
+ max_results (int, optional): Maximum number of results to return. Defaults to 10.
13
+ **kwargs: Additional keyword arguments to pass to DDGS.
14
+
15
+ Returns:
16
+ str: Formatted string containing search results.
17
+
18
+ Raises:
19
+ ImportError: If the duckduckgo_search or serpapi package is not installed.
20
+ Exception: If no results are found for the given query via both DuckDuckGo and SerpAPI, or if the SerpAPI key is not found.
21
+
22
+ Note:
23
+ For SerpAPI fallback, the SERPAPI_API_KEY environment variable must be set.
24
+ """
25
+ try:
26
+ ddgs_instance = DDGS()
27
+ except ImportError as e:
28
+ raise ImportError("You must install package `duckduckgo_search` to run this function: for instance run `pip install duckduckgo-search`.") from e
29
+
30
+ try:
31
+ query = kwargs['query']
32
+ results = ddgs_instance.text(query, max_results=max_results)
33
+ if len(results) == 0:
34
+ raise Exception("No results found via DuckDuckGo.")
35
+
36
+ postprocessed_results = [f"[{result['title']}]({result['href']})\n{result['body']}" for result in results]
37
+
38
+ return "## Search Results (via DuckDuckGo)\n\n" + "\n\n".join(postprocessed_results)
39
+
40
+ except Exception as e:
41
+ # print(f"DuckDuckGo search failed: {e}. Falling back to SerpAPI.")
42
+ # If the exception was the specific DDGS ImportError, we re-raise it directly if it wasn't caught above.
43
+ # However, the structure above should prevent it from reaching here.
44
+ # The primary purpose of this block is to catch runtime errors from ddgs.text or the "No results" exception.
45
+
46
+ api_key = os.environ.get("SERPAPI_API_KEY")
47
+ if not api_key:
48
+ raise Exception("SerpAPI key not found. Please set the SERPAPI_API_KEY environment variable.")
49
+
50
+ try:
51
+ client = SerpApiClient({"api_key": api_key})
52
+ except ImportError as serp_e:
53
+ raise ImportError("You must install package `serpapi` to run this function: for instance run `pip install google-search-results`.") from serp_e
54
+
55
+ search_params = {
56
+ "engine": "google",
57
+ "q": query,
58
+ "num": max_results # SerpAPI uses 'num' for number of results
59
+ }
60
+ serp_results = client.search(search_params)
61
+
62
+ if "organic_results" in serp_results and serp_results["organic_results"]:
63
+ organic_results = serp_results["organic_results"]
64
+ postprocessed_results = [f"[{result['title']}]({result['link']})\n{result.get('snippet', '')}" for result in organic_results]
65
+ return "## Search Results (via SerpAPI)\n\n" + "\n\n".join(postprocessed_results)
66
+ else:
67
+ raise Exception(f"No results found via DuckDuckGo or SerpAPI! Original error: {e}")
@@ -51,72 +51,32 @@ class TerminalInterface:
51
51
 
52
52
  def process_markdown_chunk(self, chunk):
53
53
  """
54
- Process a chunk of markdown text, handling code blocks, shell commands, tool calls, and regular markdown.
54
+ Process a chunk of markdown text, handling tool calls and regular markdown.
55
55
  Args:
56
56
  chunk (str): A piece of markdown text to process
57
57
  """
58
- # Initialize tracking attributes if they don't exist yet
59
-
60
-
61
58
  self.buffer += chunk
62
59
  while "\n" in self.buffer:
63
60
  line, self.buffer = self.buffer.split("\n", 1)
64
61
  line_stripped = line.strip()
65
-
66
- # Handle code blocks
67
- if line_stripped.startswith("<<CODE>>"):
68
- if self.inside_code_block:
69
- # Closing code block
70
- self.console.print(Syntax(self.code_buffer, "python", theme="bw", line_numbers=False))
71
- self.inside_code_block = False
72
- self.code_buffer = ""
73
- else:
74
- # Opening code block
75
- self.inside_code_block = True
76
- self.code_lang = line_stripped[8:].strip() or "python" # default lang
77
-
78
- # Handle shell command blocks
79
- elif line_stripped.startswith("<<SHELL_COMMAND>>"):
80
- self.inside_shell_command = True
81
- self.shell_command_buffer = ""
82
- # Print a styled header for shell commands
83
- self.console.print("[bold yellow]Shell Command:[/bold yellow]")
84
-
85
- elif line_stripped.startswith("<<END_SHELL_COMMAND>>"):
86
- if self.inside_shell_command:
87
- # Closing shell command block
88
- self.console.print(Syntax(self.shell_command_buffer.strip(), "bash", theme="monokai", line_numbers=False))
89
- self.inside_shell_command = False
90
- self.shell_command_buffer = ""
91
-
62
+
92
63
  # Handle tool call opening delimiter - be more flexible with whitespace
93
- elif "<<TOOL_CALL>>" in line_stripped:
64
+ if "<<TOOL_CALL>>" in line_stripped:
94
65
  self.inside_tool_call = True
95
66
  self.tool_call_buffer = ""
96
- # Print a styled header for tool calls
97
67
  self.console.print("[bold cyan]Tool Call:[/bold cyan]")
98
-
68
+
99
69
  # Handle tool call closing delimiter - be more flexible with whitespace
100
70
  elif "<<END_TOOL_CALL>>" in line_stripped:
101
71
  self.console.print(Syntax('{"status": "end_tool_call"}', "json", theme="monokai", line_numbers=False))
102
72
  self.console.print("[bold cyan]--------------------------------[/bold cyan]")
103
73
  self.inside_tool_call = False
104
74
  self.tool_call_buffer = ""
105
-
106
- # Handle content inside code blocks
107
- elif self.inside_code_block:
108
- self.code_buffer += line + "\n"
109
-
110
- # Handle content inside shell command blocks
111
- elif self.inside_shell_command:
112
- self.shell_command_buffer += line + "\n"
113
-
75
+
114
76
  # Handle content inside tool calls
115
77
  elif self.inside_tool_call:
116
78
  self.tool_call_buffer += line + "\n"
117
- # Print the line with styling as it comes in
118
79
 
119
-
120
80
  # Regular markdown content
121
81
  else:
122
82
  self.console.print(Markdown(line))
@@ -125,14 +85,7 @@ class TerminalInterface:
125
85
  """
126
86
  Flush any remaining markdown content in the buffer.
127
87
  """
128
- if self.inside_code_block:
129
- self.console.print(Syntax(self.code_buffer, "python", theme="bw", line_numbers=False))
130
- self.inside_code_block = False
131
- elif self.inside_shell_command:
132
- self.console.print(Syntax(self.shell_command_buffer.strip(), "bash", theme="monokai", line_numbers=False))
133
-
134
- self.inside_shell_command = False
135
- elif hasattr(self, 'inside_tool_call') and self.inside_tool_call:
88
+ if hasattr(self, 'inside_tool_call') and self.inside_tool_call:
136
89
  # Handle case where tool call is not properly terminated
137
90
  self.console.print(Syntax(self.tool_call_buffer.strip(), "json", theme="monokai", line_numbers=False))
138
91
  self.console.print("[bold cyan]End Tool Call (forced)[/bold cyan]")
@@ -142,10 +95,7 @@ class TerminalInterface:
142
95
  self.console.print("━" * 80) # Print a solid line
143
96
  else:
144
97
  self.console.print(Markdown(self.buffer))
145
-
146
98
  self.buffer = ""
147
- self.code_buffer = ""
148
- self.shell_command_buffer = ""
149
99
  if hasattr(self, 'tool_call_buffer'):
150
100
  self.tool_call_buffer = ""
151
101
 
@@ -3,7 +3,7 @@ from pathlib import Path
3
3
 
4
4
  setup(
5
5
  name="PikoAi",
6
- version="0.1.12",
6
+ version="0.1.14",
7
7
  packages=find_packages(where="Src"),
8
8
  py_modules=["cli", "OpenCopilot"],
9
9
  package_dir={"": "Src"},
@@ -1,83 +0,0 @@
1
- # This file contains the prompts used by the Executor agent.
2
-
3
- import platform
4
-
5
- def get_system_prompt(user_prompt: str, working_dir: str, tools_details: str) -> str:
6
- """
7
- Returns the system prompt for the Executor agent.
8
- """
9
- os_name = platform.system()
10
- return f"""You are a terminal-based operating system assistant designed to help users achieve their goals by executing tasks provided in text format. The current user goal is: {user_prompt}.
11
-
12
- Working Directory: {working_dir}
13
- Operating System: {os_name}
14
-
15
- You have access to the following tools:
16
- {tools_details}
17
-
18
- Your primary objective is to accomplish the user's goal by performing step-by-step actions. These actions can include:
19
- 1. Calling a tool
20
- 2. Providing a direct response
21
-
22
- You must break down the user's goal into smaller steps and perform one action at a time. After each action, carefully evaluate the output to determine the next step.
23
-
24
- ### Action Guidelines:
25
- - **Tool Call**: Use when a specific tool can help with the current step. Format:
26
- <<TOOL_CALL>>
27
- {{
28
- "tool_name": "name_of_tool",
29
- "input": {{
30
- "key": "value" //Replace 'key' with the actual parameter name for the tool
31
- }}
32
- }}
33
- <<END_TOOL_CALL>>
34
- This includes executing Python code and shell commands:
35
- `execute_python_code`: {{"code": "your_python_code_here"}}
36
- `execute_shell_command`: {{"command": "your_shell_command_here"}}
37
- - **Direct Response**: Provide a direct answer if the task doesn't require tools or code.
38
-
39
- ### Important Notes:
40
- - Perform only one action per step.
41
- - Always evaluate the output of each action before deciding the next step.
42
- - Continue performing actions until the user's goal is fully achieved. Only then, include 'TASK_DONE' in your response.
43
- - Do not end the task immediately after a tool call or code execution without evaluating its output.
44
-
45
- Now, carefully plan your approach and start with the first step to achieve the user's goal.
46
- """
47
-
48
- def get_task_prompt() -> str:
49
- """
50
- Returns the task prompt for the Executor agent.
51
- """
52
- return """
53
- Following are the things that you must read carefully and remember:
54
-
55
- - For tool calls, use:
56
- <<TOOL_CALL>>
57
- {
58
- "tool_name": "name_of_tool",
59
- "input": {
60
- "key": "value" // Use the correct parameter name for each tool
61
- }
62
- }
63
- <<END_TOOL_CALL>>
64
- Remember that executing Python code and shell commands is now done through specific tool calls (`execute_python_code` and `execute_shell_command`).
65
-
66
- After each action, always evaluate the output to decide your next step. Only include 'TASK_DONE'
67
- When the entire task is completed. Do not end the task immediately after a tool call or code execution without
68
- checking its output.
69
- You can only execute a single tool call or code execution at a time, then check its ouput
70
- then proceed with the next call
71
- Use the working directory as the current directory for all file operations unless otherwise specified.
72
-
73
-
74
-
75
- These are the things that you learn't from the mistakes you made earlier :
76
-
77
- - When given a data file and asked to understand data/do data analysis/ data visualisation or similar stuff
78
- do not use file reader and read the whole data. Only use python code to do the analysis
79
- - This is a standard Python environment, not a python notebook or a repl. previous execution
80
- context is not preserved between executions.
81
- - You have a get_user_input tool to ask user more context before, in between or after tasks
82
-
83
- """
@@ -1,30 +0,0 @@
1
- from duckduckgo_search import DDGS
2
-
3
- def web_search(max_results: int = 10, **kwargs) -> str:
4
- """
5
- Performs a DuckDuckGo web search based on your query (think a Google search) then returns the top search results.
6
-
7
- Args:
8
- query (str): The search query to perform.
9
- max_results (int, optional): Maximum number of results to return. Defaults to 10.
10
- **kwargs: Additional keyword arguments to pass to DDGS.
11
-
12
- Returns:
13
- str: Formatted string containing search results.
14
-
15
- Raises:
16
- ImportError: If the duckduckgo_search package is not installed.
17
- Exception: If no results are found for the given query.
18
- """
19
- try:
20
- ddgs = DDGS()
21
- except ImportError as e:
22
- raise ImportError("You must install package `duckduckgo_search` to run this function: for instance run `pip install duckduckgo-search`."
23
- ) from e
24
- query = kwargs['query']
25
- results = ddgs.text(query, max_results=max_results)
26
- if len(results) == 0:
27
- raise Exception("No results found! Try a less restrictive/shorter query.")
28
-
29
- postprocessed_results = [f"[{result['title']}]({result['href']})\n{result['body']}" for result in results]
30
- return "## Search Results\n\n" + "\n\n".join(postprocessed_results)
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes