PikoAi 0.1.18__py3-none-any.whl → 0.1.20__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- Agents/Executor/executor.py +2 -2
- Agents/Executor/prompts.py +5 -2
- llm_interface/llm.py +4 -30
- {pikoai-0.1.18.dist-info → pikoai-0.1.20.dist-info}/METADATA +1 -1
- {pikoai-0.1.18.dist-info → pikoai-0.1.20.dist-info}/RECORD +9 -9
- {pikoai-0.1.18.dist-info → pikoai-0.1.20.dist-info}/WHEEL +0 -0
- {pikoai-0.1.18.dist-info → pikoai-0.1.20.dist-info}/entry_points.txt +0 -0
- {pikoai-0.1.18.dist-info → pikoai-0.1.20.dist-info}/licenses/LICENSE +0 -0
- {pikoai-0.1.18.dist-info → pikoai-0.1.20.dist-info}/top_level.txt +0 -0
Agents/Executor/executor.py
CHANGED
@@ -26,7 +26,7 @@ class RateLimiter:
|
|
26
26
|
self.last_call_time = time.time()
|
27
27
|
|
28
28
|
class executor:
|
29
|
-
def __init__(self, user_prompt, max_iter=
|
29
|
+
def __init__(self, user_prompt, max_iter=30):
|
30
30
|
self.user_prompt = user_prompt
|
31
31
|
self.max_iter = max_iter
|
32
32
|
self.rate_limiter = RateLimiter(wait_time=3.0, max_retries=3)
|
@@ -130,7 +130,7 @@ class executor:
|
|
130
130
|
task_done = True
|
131
131
|
|
132
132
|
else:
|
133
|
-
self.message.append({"role": "user", "content": "
|
133
|
+
self.message.append({"role": "user", "content": "Continue with the task if not complete.Else simply output TASK_DONE. "})
|
134
134
|
iteration += 1
|
135
135
|
|
136
136
|
if not task_done:
|
Agents/Executor/prompts.py
CHANGED
@@ -42,15 +42,18 @@ These are the things that you learned from the mistakes you made earlier :
|
|
42
42
|
- This is a standard Python environment, not a python notebook or a repl. previous execution
|
43
43
|
context is not preserved between executions.
|
44
44
|
- Don't execute dangerous commands like rm -rf * or access sensitive files
|
45
|
-
- If you are stuck, have tried to fix an issue (e.g., a linter error) multiple times (e.g., 3 times) without success, or need clarification, ask the USER for input.
|
45
|
+
- If you are stuck, have tried to fix an issue (e.g., a linter error) multiple times (e.g., 3 times) without success, or need clarification, ask the USER for input.
|
46
46
|
- Upon creating anything (like a new project, website, data analysis png) always show the output.You can do this by executing shell commands.
|
47
47
|
- the python/shell code execution through tool call will be executed immediately and output will be shown. it wont be saved.
|
48
|
+
- When asked to do research, use the web_search and web_loader tools to do in depth research. Use multiple iterations and get information from multiple sources. Analyse data and provide insights.
|
48
49
|
|
49
50
|
|
50
51
|
** Important **
|
51
52
|
- You can only perform one tool call at a time.
|
52
53
|
- Always evaluate the output of the tool call before deciding the next step.
|
53
|
-
- Continue performing actions until the user's goal is fully achieved. Only then, include 'TASK_DONE' in your response
|
54
|
+
- Continue performing actions until the user's goal is fully achieved. Only then, include 'TASK_DONE' in your response.
|
54
55
|
- Do not end the task immediately after a tool call without evaluating its output.
|
56
|
+
- The best way to give output is to save it open the file using shell commands.
|
57
|
+
|
55
58
|
|
56
59
|
"""
|
llm_interface/llm.py
CHANGED
@@ -7,26 +7,11 @@ import os
|
|
7
7
|
import sys
|
8
8
|
import json
|
9
9
|
import litellm # Added import for litellm
|
10
|
-
import logging
|
11
|
-
from datetime import datetime
|
12
10
|
|
13
11
|
sys.path.append(os.path.abspath(os.path.join(os.path.dirname(__file__), '../../')))
|
14
12
|
|
15
13
|
from Utils.ter_interface import TerminalInterface
|
16
14
|
|
17
|
-
# Set up logging
|
18
|
-
log_dir = os.path.join(os.path.dirname(__file__), '../../logs')
|
19
|
-
os.makedirs(log_dir, exist_ok=True)
|
20
|
-
log_file = os.path.join(log_dir, 'llm_responses.log')
|
21
|
-
|
22
|
-
# Configure logging
|
23
|
-
logging.basicConfig(
|
24
|
-
level=logging.INFO,
|
25
|
-
format='%(asctime)s - %(message)s',
|
26
|
-
handlers=[
|
27
|
-
logging.FileHandler(log_file) # Only log to file, removed StreamHandler
|
28
|
-
]
|
29
|
-
)
|
30
15
|
|
31
16
|
# Load environment variables from .env file
|
32
17
|
load_dotenv()
|
@@ -45,15 +30,6 @@ class LiteLLMInterface:
|
|
45
30
|
def __init__(self):
|
46
31
|
self.terminal = TerminalInterface()
|
47
32
|
self.model_name = self.load_config()
|
48
|
-
logging.info(f"\n{'='*50}\nNew Session - Using model: {self.model_name}\n{'='*50}")
|
49
|
-
|
50
|
-
def log_response(self, response_content):
|
51
|
-
"""Log only the LLM response in a readable format."""
|
52
|
-
timestamp = datetime.now().strftime("%Y-%m-%d %H:%M:%S")
|
53
|
-
log_entry = f"\n{'='*50}\nTimestamp: {timestamp}\nModel: {self.model_name}\n\n"
|
54
|
-
log_entry += f"Response:\n{response_content}\n"
|
55
|
-
log_entry += f"{'='*50}\n"
|
56
|
-
logging.info(log_entry)
|
57
33
|
|
58
34
|
def load_config(self):
|
59
35
|
config_path = os.path.join(os.path.dirname(__file__), '../../config.json')
|
@@ -84,15 +60,13 @@ class LiteLLMInterface:
|
|
84
60
|
response_content += content
|
85
61
|
|
86
62
|
self.terminal.flush_markdown()
|
87
|
-
# Log only the response after successful completion
|
88
|
-
self.log_response(response_content)
|
89
63
|
return response_content
|
90
64
|
except Exception as e:
|
91
|
-
#
|
92
|
-
|
93
|
-
|
65
|
+
# litellm maps exceptions to OpenAI exceptions.
|
66
|
+
# The executor should catch these and handle them.
|
67
|
+
|
94
68
|
self.terminal.flush_markdown() # Ensure terminal is flushed even on error
|
95
|
-
raise
|
69
|
+
raise
|
96
70
|
|
97
71
|
|
98
72
|
# class MistralModel:
|
@@ -2,8 +2,8 @@ OpenCopilot.py,sha256=kPTs0-ly84h4dM7AmBlK4uwst5Sj2AM6UAlE3okkD8U,12157
|
|
2
2
|
cli.py,sha256=2UvmH74pcBFFezI0WHNyWTHMYasIM5NGnrUX6wsdveM,12945
|
3
3
|
Agents/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
|
4
4
|
Agents/Executor/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
|
5
|
-
Agents/Executor/executor.py,sha256=
|
6
|
-
Agents/Executor/prompts.py,sha256=
|
5
|
+
Agents/Executor/executor.py,sha256=BZdH3QEvv3xsAl7NXrIKBy5nMXTOvHOQ7ppM2YXrcPA,6975
|
6
|
+
Agents/Executor/prompts.py,sha256=3GHNKArEe-pWucbZTgSeJ9yjk_HVhd6BapVpQNt_nEE,2890
|
7
7
|
Env/__init__.py,sha256=KLe7UcNV5L395SxhMwbYGyu7KPrSNaoV_9QJo3mLop0,196
|
8
8
|
Env/base_env.py,sha256=K4PoWwPXn3pKeu7_-JOlUuyNbyYQ9itMhQybFOm-3K4,1563
|
9
9
|
Env/base_executor.py,sha256=awTwJ44CKWV4JO2KUHfHDX0p1Ujw55hlaL5oNYTEW9M,893
|
@@ -26,10 +26,10 @@ Utils/__init__.py,sha256=oukU0ufroPRd8_N8d2xiFes9CTxSaw4NA6p2nS1kkSg,16
|
|
26
26
|
Utils/executor_utils.py,sha256=WwK3TKgw_hG_crg7ijRaqfidYnnNXYbbs37vKZRYK-0,491
|
27
27
|
Utils/ter_interface.py,sha256=8Oe5818MAYC21SfUxtfnV9HQFcQ49z8Q030jjPqNP_g,3889
|
28
28
|
llm_interface/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
|
29
|
-
llm_interface/llm.py,sha256=
|
30
|
-
pikoai-0.1.
|
31
|
-
pikoai-0.1.
|
32
|
-
pikoai-0.1.
|
33
|
-
pikoai-0.1.
|
34
|
-
pikoai-0.1.
|
35
|
-
pikoai-0.1.
|
29
|
+
llm_interface/llm.py,sha256=Am18eyEFYlgcackuYGz8_Y0L7lQ7SSUCpNAEjcyZaAU,5080
|
30
|
+
pikoai-0.1.20.dist-info/licenses/LICENSE,sha256=cELUVOboOAderKFp8bdtcM5VyJi61YH1oDbRhOuoQZw,1067
|
31
|
+
pikoai-0.1.20.dist-info/METADATA,sha256=tOpoh5fCs-_07akpneL4846eE61Dnh1eR1bmVdNBLIU,2962
|
32
|
+
pikoai-0.1.20.dist-info/WHEEL,sha256=_zCd3N1l69ArxyTb8rzEoP9TpbYXkqRFSNOD5OuxnTs,91
|
33
|
+
pikoai-0.1.20.dist-info/entry_points.txt,sha256=xjZnheDymNDnQ0o84R0jZKEITrhNbzQWN-AhqfA_d6s,50
|
34
|
+
pikoai-0.1.20.dist-info/top_level.txt,sha256=hWzBNE7UQsuNcENIOksGcJED08k3ZGRRn2X5jnStICU,53
|
35
|
+
pikoai-0.1.20.dist-info/RECORD,,
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|