PikoAi 0.1.0__py3-none-any.whl → 0.1.2__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- Agents/Executor/executor.py +7 -7
- OpenCopilot.py +256 -0
- Tools/tool_manager.py +6 -5
- cli.py +300 -0
- llm_interface/llm.py +1 -1
- {pikoai-0.1.0.dist-info → pikoai-0.1.2.dist-info}/METADATA +1 -1
- {pikoai-0.1.0.dist-info → pikoai-0.1.2.dist-info}/RECORD +11 -9
- {pikoai-0.1.0.dist-info → pikoai-0.1.2.dist-info}/top_level.txt +2 -0
- {pikoai-0.1.0.dist-info → pikoai-0.1.2.dist-info}/WHEEL +0 -0
- {pikoai-0.1.0.dist-info → pikoai-0.1.2.dist-info}/entry_points.txt +0 -0
- {pikoai-0.1.0.dist-info → pikoai-0.1.2.dist-info}/licenses/LICENSE +0 -0
Agents/Executor/executor.py
CHANGED
@@ -5,19 +5,19 @@ import os
|
|
5
5
|
import sys
|
6
6
|
import time
|
7
7
|
sys.path.append(os.path.abspath(os.path.join(os.path.dirname(__file__), '../../../')))
|
8
|
-
from
|
9
|
-
from
|
10
|
-
from
|
8
|
+
from Utils.ter_interface import TerminalInterface
|
9
|
+
from Utils.executor_utils import parse_tool_call, parse_code, parse_shell_command
|
10
|
+
from Agents.Executor.prompts import get_system_prompt, get_task_prompt # Import prompts
|
11
11
|
|
12
12
|
from typing import Optional
|
13
13
|
from mistralai.models.sdkerror import SDKError # This might be an issue if LiteLLM doesn't use SDKError
|
14
14
|
# LiteLLM maps exceptions to OpenAI exceptions.
|
15
15
|
# We'll keep it for now and see if errors arise during testing.
|
16
|
-
from
|
17
|
-
from
|
18
|
-
from
|
16
|
+
from Env import python_executor
|
17
|
+
from Env.shell import ShellExecutor # Import ShellExecutor
|
18
|
+
from llm_interface.llm import LiteLLMInterface # Import LiteLLMInterface
|
19
19
|
|
20
|
-
from
|
20
|
+
from Tools import tool_manager
|
21
21
|
|
22
22
|
class RateLimiter:
|
23
23
|
def __init__(self, wait_time: float = 5.0, max_retries: int = 3):
|
OpenCopilot.py
ADDED
@@ -0,0 +1,256 @@
|
|
1
|
+
import os
|
2
|
+
import sys
|
3
|
+
import json
|
4
|
+
import re
|
5
|
+
from prompt_toolkit import PromptSession, HTML
|
6
|
+
from prompt_toolkit.completion import Completer, Completion
|
7
|
+
from prompt_toolkit.shortcuts import print_formatted_text
|
8
|
+
from prompt_toolkit.formatted_text import FormattedText
|
9
|
+
|
10
|
+
# Add the parent directory to the path to enable imports
|
11
|
+
sys.path.append(os.path.abspath(os.path.join(os.path.dirname(__file__), '../')))
|
12
|
+
|
13
|
+
from Agents.Executor.executor import executor
|
14
|
+
|
15
|
+
# sys.path.append(os.path.abspath(os.path.join(os.path.dirname(__file__), '../')))
|
16
|
+
|
17
|
+
class FilePathCompleter(Completer):
|
18
|
+
def get_completions(self, document, complete_event):
|
19
|
+
text_before_cursor = document.text_before_cursor
|
20
|
+
path_match = self._find_at_path(text_before_cursor)
|
21
|
+
if not path_match:
|
22
|
+
return
|
23
|
+
current_path = path_match.group(1)
|
24
|
+
# Only consider files/dirs in current working directory
|
25
|
+
# Support completion for subdirectories and deeper paths
|
26
|
+
# Split current_path into directory and base name
|
27
|
+
dir_path, base_name = os.path.split(current_path)
|
28
|
+
if not dir_path:
|
29
|
+
dir_path = os.getcwd()
|
30
|
+
else:
|
31
|
+
# Expand user home and make absolute
|
32
|
+
dir_path = os.path.expanduser(dir_path)
|
33
|
+
if not os.path.isabs(dir_path):
|
34
|
+
dir_path = os.path.abspath(dir_path)
|
35
|
+
try:
|
36
|
+
items = os.listdir(dir_path)
|
37
|
+
matching_items = [item for item in items if item.lower().startswith(base_name.lower())]
|
38
|
+
matching_items.sort(key=lambda x: (not os.path.isdir(os.path.join(dir_path, x)), x.lower()))
|
39
|
+
for item in matching_items:
|
40
|
+
yield self._create_completion(text_before_cursor,dir_path, item)
|
41
|
+
except (OSError, PermissionError):
|
42
|
+
pass
|
43
|
+
|
44
|
+
def _find_at_path(self, text):
|
45
|
+
"""Find the last @ symbol after whitespace and the path after it"""
|
46
|
+
return re.search(r'@([^@\s]*)$', text)
|
47
|
+
|
48
|
+
def _create_completion(self, text_before_cursor, dir_path, item):
|
49
|
+
"""Create a Completion object for a given item in the current directory."""
|
50
|
+
full_path = os.path.join(dir_path, item)
|
51
|
+
# Only the completed path after '@' should be inserted
|
52
|
+
if os.path.isdir(full_path):
|
53
|
+
completion_text = f"{item}/"
|
54
|
+
display_text = f"{item}/ (directory)"
|
55
|
+
else:
|
56
|
+
_, ext = os.path.splitext(item)
|
57
|
+
completion_text = f"{item}"
|
58
|
+
if ext:
|
59
|
+
display_text = f"{item} ({ext[1:]} file)"
|
60
|
+
else:
|
61
|
+
display_text = f"{item}"
|
62
|
+
if '/' in text_before_cursor[text_before_cursor.rindex('@'):]:
|
63
|
+
start_position = -(len(text_before_cursor) - text_before_cursor.rindex('/') - 1)
|
64
|
+
else:
|
65
|
+
start_position = -(len(text_before_cursor) - text_before_cursor.rindex('@') - 1)
|
66
|
+
return Completion(
|
67
|
+
text=completion_text,
|
68
|
+
start_position=start_position,
|
69
|
+
display=display_text
|
70
|
+
)
|
71
|
+
|
72
|
+
class OpenCopilot:
|
73
|
+
def __init__(self):
|
74
|
+
self.e1 = None # Initialize as None, will be set in run()
|
75
|
+
self.session = PromptSession(completer=FilePathCompleter())
|
76
|
+
|
77
|
+
def extract_files_and_process_prompt(self, user_input):
|
78
|
+
"""Extract file paths from @ commands and process the prompt."""
|
79
|
+
# Find all @file patterns
|
80
|
+
file_patterns = re.findall(r'@(\S+)', user_input)
|
81
|
+
file_contents = []
|
82
|
+
processed_prompt = user_input
|
83
|
+
|
84
|
+
for file_path in file_patterns:
|
85
|
+
# Expand user home directory if needed
|
86
|
+
expanded_path = os.path.expanduser(file_path)
|
87
|
+
|
88
|
+
# Convert to absolute path if it's relative
|
89
|
+
if not os.path.isabs(expanded_path):
|
90
|
+
expanded_path = os.path.abspath(expanded_path)
|
91
|
+
|
92
|
+
if os.path.exists(expanded_path):
|
93
|
+
if os.path.isfile(expanded_path):
|
94
|
+
try:
|
95
|
+
with open(expanded_path, 'r', encoding='utf-8') as f:
|
96
|
+
content = f.read()
|
97
|
+
|
98
|
+
# Add file content with clear formatting
|
99
|
+
file_contents.append(f"=== Content of file: {expanded_path} ===\n{content}\n=== End of file: {expanded_path} ===\n")
|
100
|
+
print(f"=== Content of file: {expanded_path} ===\n{content}\n=== End of file: {expanded_path} ===\n")
|
101
|
+
# Remove the @file pattern from the processed prompt
|
102
|
+
processed_prompt = processed_prompt.replace(f"@{file_path}", "")
|
103
|
+
|
104
|
+
print_formatted_text(FormattedText([
|
105
|
+
('class:success', f"✓ Loaded file: {expanded_path}")
|
106
|
+
]))
|
107
|
+
|
108
|
+
except Exception as e:
|
109
|
+
print_formatted_text(FormattedText([
|
110
|
+
('class:error', f"✗ Error reading file {expanded_path}: {str(e)}")
|
111
|
+
]))
|
112
|
+
else:
|
113
|
+
# For directories, just append the path to the processed prompt
|
114
|
+
processed_prompt = processed_prompt.replace(f"@{file_path}", expanded_path)
|
115
|
+
print_formatted_text(FormattedText([
|
116
|
+
('class:success', f"✓ Added directory path: {expanded_path}")
|
117
|
+
]))
|
118
|
+
else:
|
119
|
+
print_formatted_text(FormattedText([
|
120
|
+
('class:warning', f"⚠ Path not found: {expanded_path}")
|
121
|
+
]))
|
122
|
+
|
123
|
+
# Combine file contents with the processed prompt. will have the files first content and then the user prompt
|
124
|
+
if file_contents:
|
125
|
+
final_prompt = "\n".join(file_contents) + "\n" + processed_prompt.strip()
|
126
|
+
print_formatted_text(FormattedText([
|
127
|
+
('class:info', f"📁 Loaded {len(file_contents)} file(s) into context")
|
128
|
+
]))
|
129
|
+
else:
|
130
|
+
final_prompt = processed_prompt.strip()
|
131
|
+
|
132
|
+
return final_prompt
|
133
|
+
|
134
|
+
def display_help(self):
|
135
|
+
"""Display help information about available commands."""
|
136
|
+
help_text = """
|
137
|
+
🚀 TaskAutomator OpenCopilot Help
|
138
|
+
|
139
|
+
Available Commands:
|
140
|
+
@<file_path> - Include file content in your prompt
|
141
|
+
Example: @config.json analyze this configuration
|
142
|
+
Supports: relative paths, absolute paths, ~ for home directory
|
143
|
+
Multiple files: @file1.py @file2.txt compare these files
|
144
|
+
|
145
|
+
quit - Exit the application
|
146
|
+
help - Show this help message
|
147
|
+
|
148
|
+
File Path Completion:
|
149
|
+
- Type @ followed by a file path
|
150
|
+
- Use arrow keys to navigate suggestions
|
151
|
+
- Press Tab or Enter to autocomplete
|
152
|
+
- Supports directories (shows with /) and files
|
153
|
+
- Case-insensitive matching
|
154
|
+
|
155
|
+
Examples:
|
156
|
+
@src/main.py explain this code
|
157
|
+
@~/documents/data.csv @analysis.py analyze this data using this script
|
158
|
+
@config.json @logs/error.log debug the issue in these files
|
159
|
+
"""
|
160
|
+
print_formatted_text(FormattedText([('class:info', help_text)]))
|
161
|
+
|
162
|
+
def run(self):
|
163
|
+
"""Main conversation loop with enhanced @ command support."""
|
164
|
+
print_formatted_text(FormattedText([
|
165
|
+
('class:title', '🚀 TaskAutomator OpenCopilot'),
|
166
|
+
('class:subtitle', '\nType "help" for available commands or start with your prompt.\nUse @<file_path> to include files in your context.\n')
|
167
|
+
]))
|
168
|
+
|
169
|
+
try:
|
170
|
+
# Get initial prompt
|
171
|
+
user_input = self.session.prompt(HTML("<b>Please enter your prompt: </b>"))
|
172
|
+
|
173
|
+
# Handle special commands
|
174
|
+
if user_input.lower() == 'help':
|
175
|
+
self.display_help()
|
176
|
+
user_input = self.session.prompt(HTML("<b>Please enter your prompt: </b>"))
|
177
|
+
elif user_input.lower() == 'quit':
|
178
|
+
print("Goodbye!")
|
179
|
+
return
|
180
|
+
|
181
|
+
# Process the initial prompt
|
182
|
+
final_prompt = self.extract_files_and_process_prompt(user_input)
|
183
|
+
|
184
|
+
# Initialize executor with the processed prompt
|
185
|
+
self.e1 = executor(final_prompt)
|
186
|
+
self.e1.executor_prompt_init()
|
187
|
+
self.e1.run()
|
188
|
+
|
189
|
+
# Continue conversation loop
|
190
|
+
while True:
|
191
|
+
try:
|
192
|
+
user_input = self.session.prompt(HTML("<b>\nPlease enter your prompt (or 'quit' to exit): </b>"))
|
193
|
+
|
194
|
+
# Handle special commands
|
195
|
+
if user_input.lower() == 'quit':
|
196
|
+
print("Goodbye!")
|
197
|
+
break
|
198
|
+
elif user_input.lower() == 'help':
|
199
|
+
self.display_help()
|
200
|
+
continue
|
201
|
+
|
202
|
+
# Process the prompt and extract files
|
203
|
+
final_prompt = self.extract_files_and_process_prompt(user_input)
|
204
|
+
|
205
|
+
# Add to conversation
|
206
|
+
self.e1.message.append({"role": "user", "content": final_prompt})
|
207
|
+
self.e1.run()
|
208
|
+
|
209
|
+
except KeyboardInterrupt:
|
210
|
+
print("\nGoodbye!")
|
211
|
+
break
|
212
|
+
except Exception as e:
|
213
|
+
print_formatted_text(FormattedText([
|
214
|
+
('class:error', f"An error occurred: {e}")
|
215
|
+
]))
|
216
|
+
continue
|
217
|
+
|
218
|
+
except KeyboardInterrupt:
|
219
|
+
print("\nGoodbye!")
|
220
|
+
except Exception as e:
|
221
|
+
print_formatted_text(FormattedText([
|
222
|
+
('class:error', f"Failed to start OpenCopilot: {e}")
|
223
|
+
]))
|
224
|
+
|
225
|
+
def run_task(self, user_prompt, max_iter=10):
|
226
|
+
"""One-shot task execution with @ command support."""
|
227
|
+
# Process @ commands in the prompt
|
228
|
+
final_prompt = self.extract_files_and_process_prompt(user_prompt)
|
229
|
+
|
230
|
+
e1 = executor(final_prompt, max_iter=max_iter)
|
231
|
+
e1.executor_prompt_init()
|
232
|
+
e1.run()
|
233
|
+
|
234
|
+
@staticmethod
|
235
|
+
def list_available_tools():
|
236
|
+
"""List all available tools."""
|
237
|
+
try:
|
238
|
+
tool_dir_path = os.path.join(os.path.dirname(__file__), 'Tools/tool_dir.json')
|
239
|
+
with open(tool_dir_path, 'r') as f:
|
240
|
+
tools = json.load(f)
|
241
|
+
return tools
|
242
|
+
except FileNotFoundError:
|
243
|
+
print("Tools directory not found.")
|
244
|
+
return {}
|
245
|
+
except json.JSONDecodeError:
|
246
|
+
print("Error reading tools configuration.")
|
247
|
+
return {}
|
248
|
+
|
249
|
+
# To run the copilot
|
250
|
+
if __name__ == "__main__":
|
251
|
+
copilot = OpenCopilot()
|
252
|
+
copilot.run()
|
253
|
+
|
254
|
+
|
255
|
+
|
256
|
+
|
Tools/tool_manager.py
CHANGED
@@ -1,11 +1,12 @@
|
|
1
1
|
import sys
|
2
2
|
import os
|
3
|
+
import json
|
3
4
|
sys.path.append(os.path.abspath(os.path.join(os.path.dirname(__file__), '../../')))
|
4
|
-
from
|
5
|
-
from
|
6
|
-
from
|
7
|
-
from
|
8
|
-
from
|
5
|
+
from Tools.web_loader import load_data
|
6
|
+
from Tools.web_search import web_search
|
7
|
+
from Tools.file_task import file_reader, file_maker, file_writer, directory_maker
|
8
|
+
from Tools.system_details import get_os_details, get_datetime, get_memory_usage, get_cpu_info
|
9
|
+
from Tools.userinp import get_user_input
|
9
10
|
|
10
11
|
#need to transform it into map of dictionary
|
11
12
|
#name : [function : xyz,description : blah bah]
|
cli.py
ADDED
@@ -0,0 +1,300 @@
|
|
1
|
+
import click
|
2
|
+
import json
|
3
|
+
import os
|
4
|
+
import inquirer
|
5
|
+
import shutil
|
6
|
+
from OpenCopilot import OpenCopilot
|
7
|
+
from dotenv import load_dotenv
|
8
|
+
|
9
|
+
# Define available models for each provider using litellm compatible strings
|
10
|
+
AVAILABLE_MODELS = {
|
11
|
+
"openai": [
|
12
|
+
"openai/gpt-3.5-turbo",
|
13
|
+
"openai/gpt-4",
|
14
|
+
"openai/gpt-4-turbo-preview",
|
15
|
+
"openai/gpt-4o"
|
16
|
+
],
|
17
|
+
"mistral": [
|
18
|
+
"mistral/mistral-tiny",
|
19
|
+
"mistral/mistral-small",
|
20
|
+
"mistral/mistral-medium",
|
21
|
+
"mistral/mistral-large-latest"
|
22
|
+
],
|
23
|
+
"groq": [
|
24
|
+
"groq/llama2-70b-4096",
|
25
|
+
"groq/mixtral-8x7b-32768",
|
26
|
+
"groq/gemma-7b-it"
|
27
|
+
],
|
28
|
+
"anthropic": [
|
29
|
+
"anthropic/claude-3-opus-20240229",
|
30
|
+
"anthropic/claude-3-sonnet-20240229",
|
31
|
+
"anthropic/claude-3-haiku-20240307"
|
32
|
+
]
|
33
|
+
}
|
34
|
+
|
35
|
+
# Define API key environment variables for each provider (matching litellm conventions)
|
36
|
+
API_KEYS = {
|
37
|
+
"openai": "OPENAI_API_KEY",
|
38
|
+
"mistral": "MISTRAL_API_KEY",
|
39
|
+
"groq": "GROQ_API_KEY",
|
40
|
+
"anthropic": "ANTHROPIC_API_KEY"
|
41
|
+
}
|
42
|
+
|
43
|
+
def get_provider_from_model_name(model_name: str) -> str:
|
44
|
+
"""Extracts the provider from a litellm model string (e.g., 'openai/gpt-4o' -> 'openai')."""
|
45
|
+
if not model_name or '/' not in model_name:
|
46
|
+
# Fallback or error handling if model_name is not in expected format
|
47
|
+
# For now, try to return the model_name itself if it doesn't contain '/',
|
48
|
+
# as it might be a provider name already or an old format.
|
49
|
+
# This case should ideally be handled based on how robust the system needs to be.
|
50
|
+
print(f"Warning: Model name '{model_name}' may not be in 'provider/model' format. Attempting to use as provider.")
|
51
|
+
return model_name
|
52
|
+
return model_name.split('/')[0]
|
53
|
+
|
54
|
+
def clear_terminal():
|
55
|
+
"""Clear the terminal screen"""
|
56
|
+
os.system('cls' if os.name == 'nt' else 'clear')
|
57
|
+
|
58
|
+
def ensure_api_key(provider):
|
59
|
+
"""Ensure API key exists for the given provider"""
|
60
|
+
env_path = os.path.join(os.path.dirname(__file__), '../.env')
|
61
|
+
env_var = API_KEYS.get(provider)
|
62
|
+
if not env_var:
|
63
|
+
raise ValueError(f"Unknown provider: {provider}")
|
64
|
+
|
65
|
+
# Force reload of .env file
|
66
|
+
if os.path.exists(env_path):
|
67
|
+
load_dotenv(env_path, override=True)
|
68
|
+
|
69
|
+
# Check if API key exists in environment
|
70
|
+
api_key = os.getenv(env_var)
|
71
|
+
|
72
|
+
if not api_key:
|
73
|
+
# Ask for API key
|
74
|
+
questions = [
|
75
|
+
inquirer.Text('api_key',
|
76
|
+
message=f"Enter your {provider.upper()} API key",
|
77
|
+
validate=lambda _, x: len(x.strip()) > 0
|
78
|
+
)
|
79
|
+
]
|
80
|
+
api_key = inquirer.prompt(questions)['api_key']
|
81
|
+
clear_terminal()
|
82
|
+
|
83
|
+
# Save to .env file
|
84
|
+
if not os.path.exists(env_path):
|
85
|
+
with open(env_path, 'w') as f:
|
86
|
+
f.write(f"{env_var}={api_key}\n")
|
87
|
+
else:
|
88
|
+
# Read existing .env file
|
89
|
+
with open(env_path, 'r') as f:
|
90
|
+
lines = f.readlines()
|
91
|
+
|
92
|
+
# Check if key already exists
|
93
|
+
key_exists = False
|
94
|
+
for i, line in enumerate(lines):
|
95
|
+
if line.strip().startswith(f"{env_var}=") or line.strip().startswith(f"#{env_var}="):
|
96
|
+
lines[i] = f"{env_var}={api_key}\n"
|
97
|
+
key_exists = True
|
98
|
+
break
|
99
|
+
|
100
|
+
# If key doesn't exist, append it
|
101
|
+
if not key_exists:
|
102
|
+
lines.append(f"{env_var}={api_key}\n")
|
103
|
+
|
104
|
+
# Write back to .env file
|
105
|
+
with open(env_path, 'w') as f:
|
106
|
+
f.writelines(lines)
|
107
|
+
|
108
|
+
# Reload environment with new key
|
109
|
+
load_dotenv(env_path, override=True)
|
110
|
+
|
111
|
+
return api_key
|
112
|
+
|
113
|
+
def ensure_config_exists():
|
114
|
+
"""Ensure config.json exists and has required fields"""
|
115
|
+
config_path = os.path.join(os.path.dirname(__file__), '../config.json')
|
116
|
+
config = None
|
117
|
+
|
118
|
+
if not os.path.exists(config_path):
|
119
|
+
# Copy config from example if it doesn't exist
|
120
|
+
example_path = os.path.join(os.path.dirname(__file__), '../config.example.json')
|
121
|
+
if os.path.exists(example_path):
|
122
|
+
shutil.copy2(example_path, config_path)
|
123
|
+
with open(config_path, 'r') as f:
|
124
|
+
config = json.load(f)
|
125
|
+
else:
|
126
|
+
# Create a default config if example is missing
|
127
|
+
config = {
|
128
|
+
"working_directory": os.getcwd(),
|
129
|
+
"llm_provider": None, # Will store the provider part, e.g., "openai"
|
130
|
+
"model_name": None # Will store the full litellm string, e.g., "openai/gpt-4o"
|
131
|
+
}
|
132
|
+
else:
|
133
|
+
# Read existing config
|
134
|
+
with open(config_path, 'r') as f:
|
135
|
+
try:
|
136
|
+
config = json.load(f)
|
137
|
+
except json.JSONDecodeError:
|
138
|
+
print("Error reading config.json. File might be corrupted. Re-creating default.")
|
139
|
+
config = {
|
140
|
+
"working_directory": os.getcwd(),
|
141
|
+
"llm_provider": None,
|
142
|
+
"model_name": None
|
143
|
+
}
|
144
|
+
|
145
|
+
# Ensure 'working_directory' exists, default if not
|
146
|
+
if "working_directory" not in config or not config["working_directory"]:
|
147
|
+
config["working_directory"] = os.getcwd()
|
148
|
+
|
149
|
+
# Check if model configuration is needed
|
150
|
+
if not config.get("model_name") or not config.get("llm_provider"):
|
151
|
+
print("LLM provider or model not configured.")
|
152
|
+
questions = [
|
153
|
+
inquirer.List('provider_key',
|
154
|
+
message="Select LLM Provider",
|
155
|
+
choices=list(AVAILABLE_MODELS.keys()) # User selects "openai", "mistral", etc.
|
156
|
+
)
|
157
|
+
]
|
158
|
+
selected_provider_key = inquirer.prompt(questions)['provider_key']
|
159
|
+
clear_terminal()
|
160
|
+
|
161
|
+
# Ensure API key exists for the selected provider
|
162
|
+
ensure_api_key(selected_provider_key) # Uses "openai", "mistral", etc.
|
163
|
+
|
164
|
+
questions = [
|
165
|
+
inquirer.List('model_name_full',
|
166
|
+
message=f"Select {selected_provider_key} Model",
|
167
|
+
choices=AVAILABLE_MODELS[selected_provider_key] # Shows "openai/gpt-3.5-turbo", etc.
|
168
|
+
)
|
169
|
+
]
|
170
|
+
selected_model_name_full = inquirer.prompt(questions)['model_name_full']
|
171
|
+
clear_terminal()
|
172
|
+
|
173
|
+
config["llm_provider"] = selected_provider_key # Store "openai"
|
174
|
+
config["model_name"] = selected_model_name_full # Store "openai/gpt-4o"
|
175
|
+
|
176
|
+
with open(config_path, 'w') as f:
|
177
|
+
json.dump(config, f, indent=4)
|
178
|
+
print(f"Configuration saved: Provider '{selected_provider_key}', Model '{selected_model_name_full}'")
|
179
|
+
|
180
|
+
else:
|
181
|
+
# Config exists, ensure API key for the stored provider
|
182
|
+
# llm_provider should already be the provider part, e.g., "openai"
|
183
|
+
# If old config only had model_name, try to parse provider from it
|
184
|
+
provider_to_check = config.get("llm_provider")
|
185
|
+
if not provider_to_check and config.get("model_name"):
|
186
|
+
provider_to_check = get_provider_from_model_name(config["model_name"])
|
187
|
+
# Optionally, update config if llm_provider was missing
|
188
|
+
if provider_to_check != config.get("llm_provider"): # Check if it's different or was None
|
189
|
+
config["llm_provider"] = provider_to_check
|
190
|
+
with open(config_path, 'w') as f:
|
191
|
+
json.dump(config, f, indent=4)
|
192
|
+
|
193
|
+
if provider_to_check:
|
194
|
+
ensure_api_key(provider_to_check)
|
195
|
+
else:
|
196
|
+
# This case should ideally be handled by the initial setup logic
|
197
|
+
print("Warning: Could not determine LLM provider from config to ensure API key.")
|
198
|
+
|
199
|
+
|
200
|
+
# Create config file if it was created from scratch without example
|
201
|
+
if not os.path.exists(config_path):
|
202
|
+
with open(config_path, 'w') as f:
|
203
|
+
json.dump(config, f, indent=4)
|
204
|
+
|
205
|
+
return config_path
|
206
|
+
|
207
|
+
@click.group(invoke_without_command=True)
|
208
|
+
@click.option('--task', '-t', help='The task to automate')
|
209
|
+
@click.option('--max-iter', '-m', default=10, help='Maximum number of iterations for the task')
|
210
|
+
@click.option('--change-model', is_flag=True, help='Change the LLM provider and model')
|
211
|
+
@click.pass_context
|
212
|
+
def cli(ctx, task, max_iter, change_model):
|
213
|
+
"""TaskAutomator - Your AI Task Automation Tool"""
|
214
|
+
# Ensure config exists and has required fields
|
215
|
+
config_path = ensure_config_exists()
|
216
|
+
clear_terminal()
|
217
|
+
|
218
|
+
# If change-model flag is set, update the model
|
219
|
+
if change_model:
|
220
|
+
with open(config_path, 'r') as f:
|
221
|
+
config = json.load(f)
|
222
|
+
|
223
|
+
print("Current configuration: Provider: {}, Model: {}".format(config.get("llm_provider"), config.get("model_name")))
|
224
|
+
questions = [
|
225
|
+
inquirer.List('provider_key',
|
226
|
+
message="Select LLM Provider",
|
227
|
+
choices=list(AVAILABLE_MODELS.keys()) # User selects "openai", "mistral", etc.
|
228
|
+
)
|
229
|
+
]
|
230
|
+
selected_provider_key = inquirer.prompt(questions)['provider_key']
|
231
|
+
clear_terminal()
|
232
|
+
|
233
|
+
# Ensure API key exists for the selected provider
|
234
|
+
ensure_api_key(selected_provider_key)
|
235
|
+
|
236
|
+
questions = [
|
237
|
+
inquirer.List('model_name_full',
|
238
|
+
message=f"Select {selected_provider_key} Model",
|
239
|
+
choices=AVAILABLE_MODELS[selected_provider_key] # Shows "openai/gpt-3.5-turbo", etc.
|
240
|
+
)
|
241
|
+
]
|
242
|
+
selected_model_name_full = inquirer.prompt(questions)['model_name_full']
|
243
|
+
clear_terminal()
|
244
|
+
|
245
|
+
config["llm_provider"] = selected_provider_key # Store "openai"
|
246
|
+
config["model_name"] = selected_model_name_full # Store "openai/gpt-4o"
|
247
|
+
|
248
|
+
# Ensure working_directory is preserved or set
|
249
|
+
if "working_directory" not in config or not config["working_directory"]:
|
250
|
+
config["working_directory"] = os.getcwd()
|
251
|
+
|
252
|
+
with open(config_path, 'w') as f:
|
253
|
+
json.dump(config, f, indent=4)
|
254
|
+
|
255
|
+
click.echo(f"Model changed to {selected_model_name_full}")
|
256
|
+
return
|
257
|
+
|
258
|
+
# Ensure API key for the configured model before running OpenCopilot
|
259
|
+
# This is a bit redundant if ensure_config_exists already did it, but good for safety
|
260
|
+
with open(config_path, 'r') as f:
|
261
|
+
config = json.load(f)
|
262
|
+
|
263
|
+
current_provider = config.get("llm_provider")
|
264
|
+
if not current_provider and config.get("model_name"): # If llm_provider is missing, try to derive it
|
265
|
+
current_provider = get_provider_from_model_name(config["model_name"])
|
266
|
+
|
267
|
+
if current_provider:
|
268
|
+
ensure_api_key(current_provider)
|
269
|
+
else:
|
270
|
+
click.echo("Error: LLM provider not configured. Please run with --change-model to set it up.", err=True)
|
271
|
+
return
|
272
|
+
|
273
|
+
copilot = OpenCopilot()
|
274
|
+
if ctx.invoked_subcommand is None:
|
275
|
+
if task:
|
276
|
+
copilot.run_task(user_prompt=task, max_iter=max_iter)
|
277
|
+
else:
|
278
|
+
copilot.run()
|
279
|
+
|
280
|
+
@cli.command('list-tools')
|
281
|
+
def list_tools():
|
282
|
+
"""List all available automation tools"""
|
283
|
+
tools = OpenCopilot.list_available_tools()
|
284
|
+
click.echo("Available Tools:")
|
285
|
+
for tool in tools:
|
286
|
+
click.echo(f"- {tool['name']}: {tool['summary']}")
|
287
|
+
if tool.get('arguments'):
|
288
|
+
click.echo(f" Arguments: {tool['arguments']}")
|
289
|
+
|
290
|
+
@cli.command('list-models')
|
291
|
+
def list_models():
|
292
|
+
"""List all available LLM providers and their models (litellm compatible)"""
|
293
|
+
click.echo("Available LLM Providers and Models (litellm compatible):")
|
294
|
+
for provider_key, model_list in AVAILABLE_MODELS.items():
|
295
|
+
click.echo(f"\n{provider_key.upper()}:") # provider_key is "openai", "mistral", etc.
|
296
|
+
for model_name_full in model_list: # model_name_full is "openai/gpt-4o", etc.
|
297
|
+
click.echo(f" - {model_name_full}")
|
298
|
+
|
299
|
+
if __name__ == '__main__':
|
300
|
+
cli()
|
llm_interface/llm.py
CHANGED
@@ -10,7 +10,7 @@ import litellm # Added import for litellm
|
|
10
10
|
|
11
11
|
sys.path.append(os.path.abspath(os.path.join(os.path.dirname(__file__), '../../')))
|
12
12
|
|
13
|
-
from
|
13
|
+
from Utils.ter_interface import TerminalInterface
|
14
14
|
|
15
15
|
|
16
16
|
# Load environment variables from .env file
|
@@ -1,6 +1,8 @@
|
|
1
|
+
OpenCopilot.py,sha256=sey9mzHUtn07aBpG8HyKjOXImoq3OAvNZo1bpXA8XEk,10691
|
2
|
+
cli.py,sha256=o2V_DETYXEiYm6617Xg8rDcAgb0ofmojPvdlOpNHwMY,11809
|
1
3
|
Agents/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
|
2
4
|
Agents/Executor/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
|
3
|
-
Agents/Executor/executor.py,sha256=
|
5
|
+
Agents/Executor/executor.py,sha256=8p226vZF8MUowWtfIuIcjL3U-2dVfJV4pfQvv47iLnQ,12488
|
4
6
|
Agents/Executor/prompts.py,sha256=wLS3lPAYWjeKF02LzJ8vP5bZ2VQrMJUd4A7rBfl6qSQ,3846
|
5
7
|
Env/__init__.py,sha256=KLe7UcNV5L395SxhMwbYGyu7KPrSNaoV_9QJo3mLop0,196
|
6
8
|
Env/base_env.py,sha256=ORM6U5qwj7cTuSHFtSmCSsE0cl6pZ28D97CEyyFnucI,1323
|
@@ -12,15 +14,15 @@ Env/shell.py,sha256=gr6czmeuSWtB3xSA9TZN7wnK2BENOuA9zjNttwbxztU,1877
|
|
12
14
|
Tools/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
|
13
15
|
Tools/file_task.py,sha256=VUhWq_G-SWvGahQo8PG7TOpElHUW3BGLUabrTdJS89o,12151
|
14
16
|
Tools/system_details.py,sha256=7-mTm3CG4NoatHcvcosalEgEcpWlNsCsZ7kuS3y_EmY,2262
|
15
|
-
Tools/tool_manager.py,sha256=
|
17
|
+
Tools/tool_manager.py,sha256=0i3bd_VxhbpWKLzyfSeYyv_33Z6HmvQDBUxPUxjLYlU,1736
|
16
18
|
Tools/userinp.py,sha256=vUhEj3y1W1_ZFHqo2xQwvqDyeOg3VsisSKTI0EurUH8,1205
|
17
19
|
Tools/web_loader.py,sha256=PyZk2g7WngZT0tCLs9Danx20dYspnaZwy4rlVE9Sx_4,5054
|
18
20
|
Tools/web_search.py,sha256=4EGq1VZqfDgG-_yXTd4_Ha1iEUcR-szdlgRV7oFPru4,1259
|
19
21
|
llm_interface/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
|
20
|
-
llm_interface/llm.py,sha256=
|
21
|
-
pikoai-0.1.
|
22
|
-
pikoai-0.1.
|
23
|
-
pikoai-0.1.
|
24
|
-
pikoai-0.1.
|
25
|
-
pikoai-0.1.
|
26
|
-
pikoai-0.1.
|
22
|
+
llm_interface/llm.py,sha256=tI_KDOW14QLWowA7bB3GPe2qjlk0sjS5fBavs9XD1fo,5185
|
23
|
+
pikoai-0.1.2.dist-info/licenses/LICENSE,sha256=cELUVOboOAderKFp8bdtcM5VyJi61YH1oDbRhOuoQZw,1067
|
24
|
+
pikoai-0.1.2.dist-info/METADATA,sha256=8m0dTg6SbBwPFM-cOtlKgQ4uRm_8-zqRYqk55kYESFk,2961
|
25
|
+
pikoai-0.1.2.dist-info/WHEEL,sha256=_zCd3N1l69ArxyTb8rzEoP9TpbYXkqRFSNOD5OuxnTs,91
|
26
|
+
pikoai-0.1.2.dist-info/entry_points.txt,sha256=QVeDO6N3nO3UScMb2ksusQWPgcVn86vXosgL-8gu6fo,33
|
27
|
+
pikoai-0.1.2.dist-info/top_level.txt,sha256=_xQTtTA77f_GF7zdtD6C3gMyPP8GqRZvuhOSTVvSePU,47
|
28
|
+
pikoai-0.1.2.dist-info/RECORD,,
|
File without changes
|
File without changes
|
File without changes
|