PikoAi 0.1.17__py3-none-any.whl → 0.1.18__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- Agents/Executor/executor.py +3 -3
- Utils/ter_interface.py +2 -1
- cli.py +53 -18
- llm_interface/llm.py +28 -2
- {pikoai-0.1.17.dist-info → pikoai-0.1.18.dist-info}/METADATA +1 -1
- {pikoai-0.1.17.dist-info → pikoai-0.1.18.dist-info}/RECORD +10 -10
- {pikoai-0.1.17.dist-info → pikoai-0.1.18.dist-info}/WHEEL +0 -0
- {pikoai-0.1.17.dist-info → pikoai-0.1.18.dist-info}/entry_points.txt +0 -0
- {pikoai-0.1.17.dist-info → pikoai-0.1.18.dist-info}/licenses/LICENSE +0 -0
- {pikoai-0.1.17.dist-info → pikoai-0.1.18.dist-info}/top_level.txt +0 -0
Agents/Executor/executor.py
CHANGED
@@ -9,7 +9,7 @@ from Utils.ter_interface import TerminalInterface
|
|
9
9
|
from Utils.executor_utils import parse_tool_call
|
10
10
|
from Agents.Executor.prompts import get_executor_prompt # Import prompts
|
11
11
|
|
12
|
-
from llm_interface.llm import LiteLLMInterface # Import
|
12
|
+
from llm_interface.llm import LiteLLMInterface # Import LiteLLMInterfacea
|
13
13
|
from Tools import tool_manager
|
14
14
|
|
15
15
|
class RateLimiter:
|
@@ -29,7 +29,7 @@ class executor:
|
|
29
29
|
def __init__(self, user_prompt, max_iter=10):
|
30
30
|
self.user_prompt = user_prompt
|
31
31
|
self.max_iter = max_iter
|
32
|
-
self.rate_limiter = RateLimiter(wait_time=
|
32
|
+
self.rate_limiter = RateLimiter(wait_time=3.0, max_retries=3)
|
33
33
|
self.executor_prompt_init() # Update system_prompt
|
34
34
|
# self.python_executor = python_executor.PythonExecutor() # Initialize PythonExecutor
|
35
35
|
# self.shell_executor = ShellExecutor() # Initialize ShellExecutor
|
@@ -80,7 +80,7 @@ class executor:
|
|
80
80
|
|
81
81
|
except Exception as e: # Catching generic Exception as LiteLLM maps to OpenAI exceptions
|
82
82
|
# Check if the error message contains "429" for rate limiting
|
83
|
-
if
|
83
|
+
if retries < self.rate_limiter.max_retries:
|
84
84
|
retries += 1
|
85
85
|
print(f"\nRate limit error detected. Waiting {self.rate_limiter.wait_time} seconds before retry {retries}/{self.rate_limiter.max_retries}")
|
86
86
|
time.sleep(self.rate_limiter.wait_time)
|
Utils/ter_interface.py
CHANGED
@@ -68,7 +68,8 @@ class TerminalInterface:
|
|
68
68
|
|
69
69
|
# Handle tool call closing delimiter - be more flexible with whitespace
|
70
70
|
elif "<<END_TOOL_CALL>>" in line_stripped:
|
71
|
-
self.console.print(
|
71
|
+
self.console.print(self.tool_call_buffer)
|
72
|
+
# self.console.print(Syntax('{"status": "end_tool_call"}', "json", theme="monokai", line_numbers=False))
|
72
73
|
self.console.print("[bold cyan]--------------------------------[/bold cyan]")
|
73
74
|
self.inside_tool_call = False
|
74
75
|
self.tool_call_buffer = ""
|
cli.py
CHANGED
@@ -32,6 +32,10 @@ AVAILABLE_MODELS = {
|
|
32
32
|
"anthropic/claude-3-opus-20240229",
|
33
33
|
"anthropic/claude-3-sonnet-20240229",
|
34
34
|
"anthropic/claude-3-haiku-20240307"
|
35
|
+
],
|
36
|
+
"gemini": [
|
37
|
+
"gemini/gemini-2.0-flash",
|
38
|
+
"gemini/gemini-2.5-flash-preview-05-20"
|
35
39
|
]
|
36
40
|
}
|
37
41
|
|
@@ -40,7 +44,8 @@ API_KEYS = {
|
|
40
44
|
"openai": "OPENAI_API_KEY",
|
41
45
|
"mistral": "MISTRAL_API_KEY",
|
42
46
|
"groq": "GROQ_API_KEY",
|
43
|
-
"anthropic": "ANTHROPIC_API_KEY"
|
47
|
+
"anthropic": "ANTHROPIC_API_KEY",
|
48
|
+
"gemini": "GEMINI_API_KEY"
|
44
49
|
}
|
45
50
|
|
46
51
|
# --- Utility Functions ---
|
@@ -72,7 +77,6 @@ def load_config(config_path: str) -> dict:
|
|
72
77
|
with open(config_path, 'r') as f:
|
73
78
|
try:
|
74
79
|
config = json.load(f)
|
75
|
-
config["working_directory"] = os.getcwd()
|
76
80
|
except json.JSONDecodeError:
|
77
81
|
print("Error reading config.json. File might be corrupted. Re-creating default.")
|
78
82
|
config = { "working_directory": os.getcwd(), "llm_provider": None, "model_name": None }
|
@@ -148,13 +152,20 @@ def update_model_config(config_path: str, provider_key: str, model_name_full: st
|
|
148
152
|
|
149
153
|
# --- CLI Commands ---
|
150
154
|
|
151
|
-
@click.group(invoke_without_command=True)
|
152
|
-
@click.option("--task", "-t", help="The task to automate")
|
153
|
-
@click.option("--max-iter", "-m", default=10, help="Maximum number of iterations for the task")
|
154
|
-
@click.option("--change-model", is_flag=True, help="Change the LLM provider and model")
|
155
|
+
@click.group(invoke_without_command=True, help="TaskAutomator – Your AI Task Automation Tool\n\nThis tool helps automate tasks using AI. You can run tasks directly or use various commands to manage settings and tools.")
|
156
|
+
@click.option("--task", "-t", help="The task to automate (e.g., 'create a python script that sorts files by date')")
|
157
|
+
@click.option("--max-iter", "-m", default=10, help="Maximum number of iterations for the task (default: 10)")
|
158
|
+
@click.option("--change-model", is_flag=True, help="Change the LLM provider and model before running the task")
|
155
159
|
@click.pass_context
|
156
160
|
def cli(ctx, task, max_iter, change_model):
|
157
|
-
"""TaskAutomator – Your AI Task Automation Tool
|
161
|
+
"""TaskAutomator – Your AI Task Automation Tool
|
162
|
+
|
163
|
+
This tool helps automate tasks using AI. You can:
|
164
|
+
- Run tasks directly with --task
|
165
|
+
- Change AI models with --change-model
|
166
|
+
- Manage API keys with set-api-key and set-serp-key
|
167
|
+
- List available tools and models
|
168
|
+
"""
|
158
169
|
config_path = os.path.join(os.path.dirname(__file__), '../config.json')
|
159
170
|
config = load_config(config_path)
|
160
171
|
save_config(config_path, config)
|
@@ -185,9 +196,13 @@ def cli(ctx, task, max_iter, change_model):
|
|
185
196
|
else:
|
186
197
|
copilot.run()
|
187
198
|
|
188
|
-
@cli.command("list-tools")
|
199
|
+
@cli.command("list-tools", help="List all available automation tools and their descriptions")
|
189
200
|
def list_tools():
|
190
|
-
"""List all available automation tools.
|
201
|
+
"""List all available automation tools and their descriptions.
|
202
|
+
|
203
|
+
This command shows all tools that can be used by the AI to automate tasks,
|
204
|
+
including what each tool does and what arguments it accepts.
|
205
|
+
"""
|
191
206
|
tools = OpenCopilot.list_available_tools()
|
192
207
|
click.echo("Available Tools:")
|
193
208
|
for tool in tools:
|
@@ -195,20 +210,32 @@ def list_tools():
|
|
195
210
|
if tool.get("arguments"):
|
196
211
|
click.echo(f" Arguments: {tool['arguments']}")
|
197
212
|
|
198
|
-
@cli.command("list-models")
|
213
|
+
@cli.command("list-models", help="List all available LLM providers and their models")
|
199
214
|
def list_models():
|
200
|
-
"""List all available LLM providers and their models
|
215
|
+
"""List all available LLM providers and their models.
|
216
|
+
|
217
|
+
Shows all supported AI models that can be used for task automation,
|
218
|
+
organized by provider (OpenAI, Mistral, Groq, Anthropic).
|
219
|
+
"""
|
201
220
|
click.echo("Available LLM Providers and Models (litellm compatible):")
|
202
221
|
for (provider_key, model_list) in AVAILABLE_MODELS.items():
|
203
222
|
click.echo(f"\n{provider_key.upper()}:")
|
204
223
|
for model_name_full in model_list:
|
205
224
|
click.echo(f" - {model_name_full}")
|
206
225
|
|
207
|
-
@cli.command("set-api-key")
|
208
|
-
@click.option("--provider", "-p", type=click.Choice(list(AVAILABLE_MODELS.keys())), help="The LLM provider to set API key for")
|
209
|
-
@click.option("--key", "-k", help="The API key to set (if not provided, will prompt for it)")
|
226
|
+
@cli.command("set-api-key", help="Set or update API key for an LLM provider")
|
227
|
+
@click.option("--provider", "-p", type=click.Choice(list(AVAILABLE_MODELS.keys())), help="The LLM provider to set API key for (e.g., openai, mistral, groq, anthropic)")
|
228
|
+
@click.option("--key", "-k", help="The API key to set (if not provided, will prompt for it securely)")
|
210
229
|
def set_api_key(provider, key):
|
211
|
-
"""Set or update API key for
|
230
|
+
"""Set or update API key for an LLM provider.
|
231
|
+
|
232
|
+
This command allows you to set or update the API key for any supported LLM provider.
|
233
|
+
The key will be stored securely in your .env file.
|
234
|
+
|
235
|
+
Examples:
|
236
|
+
piko set-api-key --provider openai
|
237
|
+
piko set-api-key -p mistral -k your-key-here
|
238
|
+
"""
|
212
239
|
if not provider:
|
213
240
|
questions = [ inquirer.List("provider_key", message="Select LLM Provider to update API key", choices=list(AVAILABLE_MODELS.keys())) ]
|
214
241
|
provider = inquirer.prompt(questions)["provider_key"]
|
@@ -236,10 +263,18 @@ def set_api_key(provider, key):
|
|
236
263
|
f.writelines(lines)
|
237
264
|
click.echo(f"API key for {provider.upper()} has been updated successfully in {env_path}")
|
238
265
|
|
239
|
-
@cli.command("set-serp-key")
|
240
|
-
@click.option("--key", "-k", help="The SERP API key to set (if not provided, will prompt for it)")
|
266
|
+
@cli.command("set-serp-key", help="Set or update the SERP API key for web search functionality")
|
267
|
+
@click.option("--key", "-k", help="The SERP API key to set (if not provided, will prompt for it securely)")
|
241
268
|
def set_serp_key(key):
|
242
|
-
"""Set or update the SERP API key used for web search functionality.
|
269
|
+
"""Set or update the SERP API key used for web search functionality.
|
270
|
+
|
271
|
+
This command sets the API key used for web search operations when DuckDuckGo
|
272
|
+
search is not available. The key will be stored securely in your .env file.
|
273
|
+
|
274
|
+
Examples:
|
275
|
+
piko set-serp-key
|
276
|
+
piko set-serp-key -k your-key-here
|
277
|
+
"""
|
243
278
|
if not key:
|
244
279
|
questions = [ inquirer.Text("api_key", message="Enter your SERP API key", validate=lambda _, x: len(x.strip()) > 0) ]
|
245
280
|
key = inquirer.prompt(questions)["api_key"]
|
llm_interface/llm.py
CHANGED
@@ -7,11 +7,26 @@ import os
|
|
7
7
|
import sys
|
8
8
|
import json
|
9
9
|
import litellm # Added import for litellm
|
10
|
+
import logging
|
11
|
+
from datetime import datetime
|
10
12
|
|
11
13
|
sys.path.append(os.path.abspath(os.path.join(os.path.dirname(__file__), '../../')))
|
12
14
|
|
13
15
|
from Utils.ter_interface import TerminalInterface
|
14
16
|
|
17
|
+
# Set up logging
|
18
|
+
log_dir = os.path.join(os.path.dirname(__file__), '../../logs')
|
19
|
+
os.makedirs(log_dir, exist_ok=True)
|
20
|
+
log_file = os.path.join(log_dir, 'llm_responses.log')
|
21
|
+
|
22
|
+
# Configure logging
|
23
|
+
logging.basicConfig(
|
24
|
+
level=logging.INFO,
|
25
|
+
format='%(asctime)s - %(message)s',
|
26
|
+
handlers=[
|
27
|
+
logging.FileHandler(log_file) # Only log to file, removed StreamHandler
|
28
|
+
]
|
29
|
+
)
|
15
30
|
|
16
31
|
# Load environment variables from .env file
|
17
32
|
load_dotenv()
|
@@ -30,6 +45,15 @@ class LiteLLMInterface:
|
|
30
45
|
def __init__(self):
|
31
46
|
self.terminal = TerminalInterface()
|
32
47
|
self.model_name = self.load_config()
|
48
|
+
logging.info(f"\n{'='*50}\nNew Session - Using model: {self.model_name}\n{'='*50}")
|
49
|
+
|
50
|
+
def log_response(self, response_content):
|
51
|
+
"""Log only the LLM response in a readable format."""
|
52
|
+
timestamp = datetime.now().strftime("%Y-%m-%d %H:%M:%S")
|
53
|
+
log_entry = f"\n{'='*50}\nTimestamp: {timestamp}\nModel: {self.model_name}\n\n"
|
54
|
+
log_entry += f"Response:\n{response_content}\n"
|
55
|
+
log_entry += f"{'='*50}\n"
|
56
|
+
logging.info(log_entry)
|
33
57
|
|
34
58
|
def load_config(self):
|
35
59
|
config_path = os.path.join(os.path.dirname(__file__), '../../config.json')
|
@@ -60,10 +84,12 @@ class LiteLLMInterface:
|
|
60
84
|
response_content += content
|
61
85
|
|
62
86
|
self.terminal.flush_markdown()
|
87
|
+
# Log only the response after successful completion
|
88
|
+
self.log_response(response_content)
|
63
89
|
return response_content
|
64
90
|
except Exception as e:
|
65
|
-
#
|
66
|
-
|
91
|
+
# Log the error
|
92
|
+
logging.error(f"\n{'='*50}\nError occurred:\nModel: {self.model_name}\nError: {str(e)}\n{'='*50}")
|
67
93
|
print(f"An error occurred during the API call: {e}")
|
68
94
|
self.terminal.flush_markdown() # Ensure terminal is flushed even on error
|
69
95
|
raise # Re-raise the exception to be caught by the executor
|
@@ -1,8 +1,8 @@
|
|
1
1
|
OpenCopilot.py,sha256=kPTs0-ly84h4dM7AmBlK4uwst5Sj2AM6UAlE3okkD8U,12157
|
2
|
-
cli.py,sha256=
|
2
|
+
cli.py,sha256=2UvmH74pcBFFezI0WHNyWTHMYasIM5NGnrUX6wsdveM,12945
|
3
3
|
Agents/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
|
4
4
|
Agents/Executor/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
|
5
|
-
Agents/Executor/executor.py,sha256=
|
5
|
+
Agents/Executor/executor.py,sha256=BzSYkT4aPW1yDLSNXNr9WEWZEcs1becEYYlop-eB8s8,6999
|
6
6
|
Agents/Executor/prompts.py,sha256=pGY4uXNGYiw_TnTUsRjrVsWc9CV657q3916eui0oulU,2688
|
7
7
|
Env/__init__.py,sha256=KLe7UcNV5L395SxhMwbYGyu7KPrSNaoV_9QJo3mLop0,196
|
8
8
|
Env/base_env.py,sha256=K4PoWwPXn3pKeu7_-JOlUuyNbyYQ9itMhQybFOm-3K4,1563
|
@@ -24,12 +24,12 @@ Tools/web_loader.py,sha256=_oP48uwveTaCKU7G5ju2zsJGTcZd1ScXTKOvHDFtZJU,4564
|
|
24
24
|
Tools/web_search.py,sha256=12_VhwJGXmn3oUNhTbQ5ENFG964t9DWkfCz3UtlxrbM,2261
|
25
25
|
Utils/__init__.py,sha256=oukU0ufroPRd8_N8d2xiFes9CTxSaw4NA6p2nS1kkSg,16
|
26
26
|
Utils/executor_utils.py,sha256=WwK3TKgw_hG_crg7ijRaqfidYnnNXYbbs37vKZRYK-0,491
|
27
|
-
Utils/ter_interface.py,sha256=
|
27
|
+
Utils/ter_interface.py,sha256=8Oe5818MAYC21SfUxtfnV9HQFcQ49z8Q030jjPqNP_g,3889
|
28
28
|
llm_interface/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
|
29
|
-
llm_interface/llm.py,sha256=
|
30
|
-
pikoai-0.1.
|
31
|
-
pikoai-0.1.
|
32
|
-
pikoai-0.1.
|
33
|
-
pikoai-0.1.
|
34
|
-
pikoai-0.1.
|
35
|
-
pikoai-0.1.
|
29
|
+
llm_interface/llm.py,sha256=TEKKgRfZGtk_UgBfqqkdWKHr4NS3jOOk9di0S3RA7c0,6216
|
30
|
+
pikoai-0.1.18.dist-info/licenses/LICENSE,sha256=cELUVOboOAderKFp8bdtcM5VyJi61YH1oDbRhOuoQZw,1067
|
31
|
+
pikoai-0.1.18.dist-info/METADATA,sha256=BM1IQxmZ4D7p07EfgLAuUGhpLvAT3W9oPw9WTV8Lee0,2962
|
32
|
+
pikoai-0.1.18.dist-info/WHEEL,sha256=_zCd3N1l69ArxyTb8rzEoP9TpbYXkqRFSNOD5OuxnTs,91
|
33
|
+
pikoai-0.1.18.dist-info/entry_points.txt,sha256=xjZnheDymNDnQ0o84R0jZKEITrhNbzQWN-AhqfA_d6s,50
|
34
|
+
pikoai-0.1.18.dist-info/top_level.txt,sha256=hWzBNE7UQsuNcENIOksGcJED08k3ZGRRn2X5jnStICU,53
|
35
|
+
pikoai-0.1.18.dist-info/RECORD,,
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|