nixagent 1.0__tar.gz

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
nixagent-1.0/LICENSE ADDED
@@ -0,0 +1,21 @@
1
+ MIT License
2
+
3
+ Copyright (c) 2025 TechnicalHeist
4
+
5
+ Permission is hereby granted, free of charge, to any person obtaining a copy
6
+ of this software and associated documentation files (the "Software"), to deal
7
+ in the Software without restriction, including without limitation the rights
8
+ to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
9
+ copies of the Software, and to permit persons to whom the Software is
10
+ furnished to do so, subject to the following conditions:
11
+
12
+ The above copyright notice and this permission notice shall be included in all
13
+ copies or substantial portions of the Software.
14
+
15
+ THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16
+ IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17
+ FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
18
+ AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19
+ LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
20
+ OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
21
+ SOFTWARE.
@@ -0,0 +1,10 @@
1
+ include README.md
2
+ include LICENSE
3
+ include requirements.txt
4
+ include *.py
5
+ recursive-include helper *.py
6
+ recursive-include agents *.py
7
+ global-exclude __pycache__
8
+ global-exclude *.py[co]
9
+ global-exclude .DS_Store
10
+ prune __test__
nixagent-1.0/PKG-INFO ADDED
@@ -0,0 +1,170 @@
1
+ Metadata-Version: 2.4
2
+ Name: nixagent
3
+ Version: 1.0
4
+ Summary: A sophisticated AI agent toolkit supporting multiple AI providers with tool calling capabilities.
5
+ Home-page: https://technicalheist.com
6
+ Author: TechnicalHeist
7
+ Author-email: TechnicalHeist <contact@technicalheist.com>
8
+ License: MIT
9
+ Project-URL: Homepage, https://technicalheist.com
10
+ Project-URL: Repository, https://github.com/technicalheist/local-agent-toolkit
11
+ Project-URL: Issues, https://github.com/technicalheist/local-agent-toolkit/issues
12
+ Keywords: ai,agent,toolkit,ollama,openai,tool-calling,automation
13
+ Classifier: Development Status :: 3 - Alpha
14
+ Classifier: Intended Audience :: Developers
15
+ Classifier: Topic :: Software Development :: Libraries :: Python Modules
16
+ Classifier: Topic :: Scientific/Engineering :: Artificial Intelligence
17
+ Classifier: Programming Language :: Python :: 3
18
+ Classifier: Programming Language :: Python :: 3.8
19
+ Classifier: Programming Language :: Python :: 3.9
20
+ Classifier: Programming Language :: Python :: 3.10
21
+ Classifier: Programming Language :: Python :: 3.11
22
+ Classifier: Programming Language :: Python :: 3.12
23
+ Requires-Python: >=3.8
24
+ Description-Content-Type: text/markdown
25
+ License-File: LICENSE
26
+ Requires-Dist: python-dotenv>=0.19.0
27
+ Requires-Dist: requests>=2.25.0
28
+ Provides-Extra: dev
29
+ Requires-Dist: pytest>=6.0; extra == "dev"
30
+ Requires-Dist: pytest-cov>=2.0; extra == "dev"
31
+ Requires-Dist: black>=21.0; extra == "dev"
32
+ Requires-Dist: flake8>=3.8; extra == "dev"
33
+ Requires-Dist: build>=0.8.0; extra == "dev"
34
+ Requires-Dist: twine>=4.0.0; extra == "dev"
35
+ Dynamic: author
36
+ Dynamic: home-page
37
+ Dynamic: license-file
38
+ Dynamic: requires-python
39
+
40
+ # Local Agent Toolkit
41
+
42
+ [![Python 3.8+](https://img.shields.io/badge/python-3.8+-blue.svg)](https://www.python.org/downloads/)
43
+ [![License: MIT](https://img.shields.io/badge/License-MIT-yellow.svg)](https://opensource.org/licenses/MIT)
44
+
45
+ A generic, multipurpose Local Agent Toolkit in Python. This framework is completely agnostic to specific use cases and architectures, serving as a robust foundation for building autonomous, collaborative AI agents that can manage their own context, interface with each other, and securely use external tools.
46
+
47
+ ## 🚀 Quick Start
48
+
49
+ ### Installation
50
+
51
+ ```bash
52
+ pip install -r requirements.txt
53
+ ```
54
+
55
+ ### Command Line Usage
56
+
57
+ First, set up your environment configuration by copying `.env.example` to `.env` and adding your API keys.
58
+
59
+ ```bash
60
+ # Ask a question directly
61
+ python app.py "What files are in the current directory?"
62
+
63
+ # Interactive mode
64
+ python app.py
65
+
66
+ # With custom settings
67
+ python app.py "Analyze the code structure" --no-save
68
+ ```
69
+
70
+ ### Python Library Usage
71
+
72
+ ```python
73
+ from local_agent_toolkit import Agent
74
+
75
+ # Initialize the core agent
76
+ agent = Agent(
77
+ name="MainAgent",
78
+ system_prompt="You are a highly capable AI assistant that uses available tools to accomplish goals."
79
+ )
80
+
81
+ result = agent.run(user_prompt="List all Python files in the project")
82
+ print(result)
83
+ ```
84
+
85
+ ## ✨ Features
86
+
87
+ - **🌐 Standardized API Interface**: Uses pure `requests` following the OpenAI native JSON structure. Compatible with OpenAI, Vertex, Local LLMs (via Ollama/vLLM), Groq, and more.
88
+ - **🤖 Autonomous Agents**: Agents maintain independent conversation histories and automatically delegate sub-tasks when needed.
89
+ - **🔌 Model Context Protocol (MCP)**: Dynamic tool extension via MCP Servers via `.mcp.json`.
90
+ - **🛠️ Rich Built-In Tools**: Deep system-level tools covering regex-based file searching, exact content mapping, disk manipulation, and secure subprocess execution.
91
+ - **🗣️ Inter-Agent Collaboration**: Support for multiple sub-agents operating concurrently under the same framework via `.register_collaborator(agent)`.
92
+
93
+ ## 📦 Project Structure
94
+
95
+ ```text
96
+ framework/
97
+ ├── app.py # Main CLI application
98
+ ├── local_agent_toolkit/ # Core Framework Mechanics
99
+ │ ├── __init__.py # Library exports
100
+ │ ├── agent.py # Core contextual autonomous Agent
101
+ │ ├── llm.py # Central HTTP-based LLM orchestration
102
+ │ ├── mcp.py # Model Context Protocol definition and bindings
103
+ │ └── tools/ # Default Native Tools
104
+ │ ├── __init__.py # Tool bindings & descriptions
105
+ │ ├── cmd.py # Subprocess shell extensions
106
+ │ └── fs.py # File system native operations
107
+ ├── mcp.json # Model Context Protocol Server mapping
108
+ ├── docs/ # Additional Documentation
109
+ ├── requirements.txt # Python dependencies
110
+ ├── .env # Operational mapping variables
111
+ └── README.md # This file
112
+ ```
113
+
114
+ ## ⚙️ Configuration
115
+
116
+ Create a `.env` file in your project root:
117
+
118
+ ```bash
119
+ # Core API Key for the provider (OpenAI, Gemini, Vertex, Groq, etc)
120
+ API_KEY=your_api_key_here
121
+
122
+ # Base URL for the OpenAI compatible endpoint
123
+ API_BASE_URL=https://api.openai.com/v1
124
+
125
+ # The model name to use for generating text
126
+ MODEL=gpt-4o
127
+
128
+ # Tool and Processing Configuration
129
+ MAX_ITERATIONS=25
130
+
131
+ # Logging Configuration
132
+ LOG_LEVEL=INFO
133
+ ```
134
+
135
+ ## 🔌 Using MCP Servers
136
+
137
+ Add server definitions to your `mcp.json` file in the root directory:
138
+
139
+ ```json
140
+ {
141
+ "mcpServers": {
142
+ "sqlite": {
143
+ "command": "uvx",
144
+ "args": ["mcp-server-sqlite", "--db-path", "./database.db"],
145
+ "active": true
146
+ }
147
+ }
148
+ }
149
+ ```
150
+
151
+ The framework's `MCPManager` automatically bootstraps all active MCP servers, parses their schemas, and loads their tools natively alongside standard tools upon Agent initialization.
152
+
153
+ ## 🤝 Collaborative Agents
154
+
155
+ Agents can securely establish communication networks.
156
+
157
+ ```python
158
+ from local_agent_toolkit import Agent
159
+
160
+ research_agent = Agent("Researcher", "You perform file system research.")
161
+ writer_agent = Agent("Writer", "You answer questions accurately.")
162
+
163
+ writer_agent.register_collaborator(research_agent)
164
+
165
+ writer_agent.run("Ask the Researcher to find all text files and read them to me.")
166
+ ```
167
+
168
+ ## 📄 License
169
+
170
+ This project is licensed under the MIT License - see the [LICENSE](LICENSE) file for details.
nixagent-1.0/README.md ADDED
@@ -0,0 +1,131 @@
1
+ # Local Agent Toolkit
2
+
3
+ [![Python 3.8+](https://img.shields.io/badge/python-3.8+-blue.svg)](https://www.python.org/downloads/)
4
+ [![License: MIT](https://img.shields.io/badge/License-MIT-yellow.svg)](https://opensource.org/licenses/MIT)
5
+
6
+ A generic, multipurpose Local Agent Toolkit in Python. This framework is completely agnostic to specific use cases and architectures, serving as a robust foundation for building autonomous, collaborative AI agents that can manage their own context, interface with each other, and securely use external tools.
7
+
8
+ ## 🚀 Quick Start
9
+
10
+ ### Installation
11
+
12
+ ```bash
13
+ pip install -r requirements.txt
14
+ ```
15
+
16
+ ### Command Line Usage
17
+
18
+ First, set up your environment configuration by copying `.env.example` to `.env` and adding your API keys.
19
+
20
+ ```bash
21
+ # Ask a question directly
22
+ python app.py "What files are in the current directory?"
23
+
24
+ # Interactive mode
25
+ python app.py
26
+
27
+ # With custom settings
28
+ python app.py "Analyze the code structure" --no-save
29
+ ```
30
+
31
+ ### Python Library Usage
32
+
33
+ ```python
34
+ from local_agent_toolkit import Agent
35
+
36
+ # Initialize the core agent
37
+ agent = Agent(
38
+ name="MainAgent",
39
+ system_prompt="You are a highly capable AI assistant that uses available tools to accomplish goals."
40
+ )
41
+
42
+ result = agent.run(user_prompt="List all Python files in the project")
43
+ print(result)
44
+ ```
45
+
46
+ ## ✨ Features
47
+
48
+ - **🌐 Standardized API Interface**: Uses pure `requests` following the OpenAI native JSON structure. Compatible with OpenAI, Vertex, Local LLMs (via Ollama/vLLM), Groq, and more.
49
+ - **🤖 Autonomous Agents**: Agents maintain independent conversation histories and automatically delegate sub-tasks when needed.
50
+ - **🔌 Model Context Protocol (MCP)**: Dynamic tool extension via MCP Servers via `.mcp.json`.
51
+ - **🛠️ Rich Built-In Tools**: Deep system-level tools covering regex-based file searching, exact content mapping, disk manipulation, and secure subprocess execution.
52
+ - **🗣️ Inter-Agent Collaboration**: Support for multiple sub-agents operating concurrently under the same framework via `.register_collaborator(agent)`.
53
+
54
+ ## 📦 Project Structure
55
+
56
+ ```text
57
+ framework/
58
+ ├── app.py # Main CLI application
59
+ ├── local_agent_toolkit/ # Core Framework Mechanics
60
+ │ ├── __init__.py # Library exports
61
+ │ ├── agent.py # Core contextual autonomous Agent
62
+ │ ├── llm.py # Central HTTP-based LLM orchestration
63
+ │ ├── mcp.py # Model Context Protocol definition and bindings
64
+ │ └── tools/ # Default Native Tools
65
+ │ ├── __init__.py # Tool bindings & descriptions
66
+ │ ├── cmd.py # Subprocess shell extensions
67
+ │ └── fs.py # File system native operations
68
+ ├── mcp.json # Model Context Protocol Server mapping
69
+ ├── docs/ # Additional Documentation
70
+ ├── requirements.txt # Python dependencies
71
+ ├── .env # Operational mapping variables
72
+ └── README.md # This file
73
+ ```
74
+
75
+ ## ⚙️ Configuration
76
+
77
+ Create a `.env` file in your project root:
78
+
79
+ ```bash
80
+ # Core API Key for the provider (OpenAI, Gemini, Vertex, Groq, etc)
81
+ API_KEY=your_api_key_here
82
+
83
+ # Base URL for the OpenAI compatible endpoint
84
+ API_BASE_URL=https://api.openai.com/v1
85
+
86
+ # The model name to use for generating text
87
+ MODEL=gpt-4o
88
+
89
+ # Tool and Processing Configuration
90
+ MAX_ITERATIONS=25
91
+
92
+ # Logging Configuration
93
+ LOG_LEVEL=INFO
94
+ ```
95
+
96
+ ## 🔌 Using MCP Servers
97
+
98
+ Add server definitions to your `mcp.json` file in the root directory:
99
+
100
+ ```json
101
+ {
102
+ "mcpServers": {
103
+ "sqlite": {
104
+ "command": "uvx",
105
+ "args": ["mcp-server-sqlite", "--db-path", "./database.db"],
106
+ "active": true
107
+ }
108
+ }
109
+ }
110
+ ```
111
+
112
+ The framework's `MCPManager` automatically bootstraps all active MCP servers, parses their schemas, and loads their tools natively alongside standard tools upon Agent initialization.
113
+
114
+ ## 🤝 Collaborative Agents
115
+
116
+ Agents can securely establish communication networks.
117
+
118
+ ```python
119
+ from local_agent_toolkit import Agent
120
+
121
+ research_agent = Agent("Researcher", "You perform file system research.")
122
+ writer_agent = Agent("Writer", "You answer questions accurately.")
123
+
124
+ writer_agent.register_collaborator(research_agent)
125
+
126
+ writer_agent.run("Ask the Researcher to find all text files and read them to me.")
127
+ ```
128
+
129
+ ## 📄 License
130
+
131
+ This project is licensed under the MIT License - see the [LICENSE](LICENSE) file for details.
@@ -0,0 +1,42 @@
1
+ """
2
+ Local Agent Toolkit
3
+
4
+ A sophisticated AI agent toolkit supporting multiple AI providers with tool calling capabilities.
5
+
6
+ This package provides:
7
+ - AI agents that can use tools to perform tasks
8
+ - Support for Ollama and OpenAI models
9
+ - Command-line interface for interactive use
10
+ - Python library for programmatic use
11
+
12
+ Quick Start:
13
+ # Install the package
14
+ pip install local-agent-toolkit
15
+
16
+ # Use as command line tool
17
+ local-agent "What files are in the current directory?"
18
+
19
+ # Use as Python library
20
+ from local_agent_toolkit import run_agent_with_question
21
+ result, messages = run_agent_with_question("Your question here")
22
+
23
+ Environment Variables:
24
+ CURRENT_AGENT: Set to "OLLAMA" or "OPENAI" (default: "OLLAMA")
25
+ OLLAMA_MODEL: Ollama model name (default: "llama3.1")
26
+ OLLAMA_BASE_URL: Ollama server URL (default: "http://localhost:11434")
27
+ OPENAI_API_KEY: OpenAI API key (required for OpenAI agent)
28
+ """
29
+
30
+ from helper import run_agent_with_question, tools
31
+ from helper import OllamaAgent, OpenAIAgent
32
+
33
+ __version__ = "0.1.0"
34
+ __author__ = "TechnicalHeist"
35
+ __email__ = "contact@technicalheist.com"
36
+
37
+ __all__ = [
38
+ 'run_agent_with_question',
39
+ 'OllamaAgent',
40
+ 'OpenAIAgent',
41
+ 'tools'
42
+ ]
nixagent-1.0/app.py ADDED
@@ -0,0 +1,88 @@
1
+ #!/usr/bin/env python3
2
+ """
3
+ AI Agent Application
4
+
5
+ This application allows you to interact with an AI agent that can use various tools
6
+ to answer questions and perform tasks. It uses the new generic Agent framework.
7
+ """
8
+
9
+ from dotenv import load_dotenv
10
+ load_dotenv()
11
+
12
+ import sys
13
+ import argparse
14
+ import json
15
+ from nixagent import Agent
16
+
17
+ def main():
18
+ parser = argparse.ArgumentParser(
19
+ description="AI Agent that can use tools to answer questions and perform tasks",
20
+ formatter_class=argparse.RawDescriptionHelpFormatter,
21
+ )
22
+
23
+ parser.add_argument('question', nargs='?', help='The question to ask the agent')
24
+ parser.add_argument('-q', '--question', dest='question_flag', help='Alternative to positional argument')
25
+ parser.add_argument('--no-save', action='store_true', help='Do not save the conversation history')
26
+ parser.add_argument('--messages-file', default='messages.json', help='Filename to save history')
27
+
28
+ args = parser.parse_args()
29
+
30
+ question = args.question or args.question_flag
31
+
32
+ # Initialize the core agent
33
+ agent = Agent(
34
+ name="Main",
35
+ system_prompt="You are a highly capable AI assistant that uses available tools to accomplish the user's goals. You must analyze the task carefully and execute the appropriate actions."
36
+ )
37
+
38
+ if not question:
39
+ print("🤖 AI Agent - Interactive Mode")
40
+ print("=" * 50)
41
+ print("Ask me anything! I can help you with various tasks using my tools.")
42
+ print("Type 'quit', 'exit', or 'q' to end the session.")
43
+ print()
44
+
45
+ while True:
46
+ try:
47
+ question = input("❓ Your question: ").strip()
48
+ if question.lower() in ['quit', 'exit', 'q', '']:
49
+ print("👋 Goodbye!")
50
+ break
51
+
52
+ print("\n🔄 Processing your request...")
53
+ print("-" * 30)
54
+
55
+ result = agent.run(user_prompt=question)
56
+ print(f"\n✅ Final Result:\n{result}")
57
+
58
+ if not args.no_save:
59
+ with open(args.messages_file, "w") as f:
60
+ json.dump(agent.messages, f, indent=2, default=str)
61
+ print(f"\n💾 Conversation saved to: {args.messages_file}")
62
+
63
+ print("\n" + "=" * 50)
64
+
65
+ except (KeyboardInterrupt, EOFError):
66
+ print("\n\n👋 Goodbye!")
67
+ break
68
+ except Exception as e:
69
+ print(f"\n❌ Error: {e}")
70
+ else:
71
+ print(f"🤖 AI Agent - Processing: {question}")
72
+ print("=" * 50)
73
+
74
+ try:
75
+ result = agent.run(user_prompt=question)
76
+ print(f"\n✅ Final Result:\n{result}")
77
+
78
+ if not args.no_save:
79
+ with open(args.messages_file, "w") as f:
80
+ json.dump(agent.messages, f, indent=2, default=str)
81
+ print(f"\n💾 Conversation saved to: {args.messages_file}")
82
+
83
+ except Exception as e:
84
+ print(f"❌ Error: {e}")
85
+ sys.exit(1)
86
+
87
+ if __name__ == "__main__":
88
+ main()
@@ -0,0 +1,5 @@
1
+ from .agent import Agent
2
+ from .llm import call_llm
3
+ from .mcp import MCPManager
4
+
5
+ __all__ = ["Agent", "call_llm", "MCPManager"]
@@ -0,0 +1,132 @@
1
+ import os
2
+ import json
3
+ from typing import List, Dict, Any, Callable
4
+ from .llm import call_llm
5
+ from .tools import AVAILABLE_TOOLS, TOOL_DEFINITIONS
6
+ from .mcp import MCPManager
7
+
8
+ _global_mcp_manager = None
9
+
10
+ def get_mcp_manager():
11
+ global _global_mcp_manager
12
+ if _global_mcp_manager is None:
13
+ _global_mcp_manager = MCPManager("mcp.json")
14
+ _global_mcp_manager.load_and_activate()
15
+ return _global_mcp_manager
16
+
17
+ class Agent:
18
+ def __init__(self, name: str, system_prompt: str, model: str = None,
19
+ tools: dict = None, tool_defs: list = None):
20
+ self.name = name
21
+ self.system_prompt = system_prompt
22
+ self.model = model or os.getenv("MODEL", "gpt-4o")
23
+ self.messages = [{"role": "system", "content": system_prompt}]
24
+ self.tools = tools if tools is not None else AVAILABLE_TOOLS.copy()
25
+ self.tool_defs = tool_defs if tool_defs is not None else TOOL_DEFINITIONS.copy()
26
+
27
+ # Load MCP tools
28
+ mcp = get_mcp_manager()
29
+ mcp_tools = mcp.get_all_tools()
30
+ if mcp_tools:
31
+ self.tool_defs.extend(mcp_tools)
32
+ for mcp_tool in mcp_tools:
33
+ mcp_name = mcp_tool["function"]["name"]
34
+
35
+ # Use a factory function to capture the correct mcp_name for the closure
36
+ def make_mcp_caller(n):
37
+ return lambda **kwargs: mcp.call_tool(n, kwargs)
38
+
39
+ self.tools[mcp_name] = make_mcp_caller(mcp_name)
40
+
41
+ self.agents_in_network = {}
42
+
43
+ def register_collaborator(self, agent_instance):
44
+ """Allows agents to talk to each other."""
45
+ self.agents_in_network[agent_instance.name] = agent_instance
46
+ # Add a tool to communicate with this agent
47
+ def communicate_with_agent(message: str) -> str:
48
+ # Note: We run a sub-agent non-streaming and isolated iterations
49
+ return agent_instance.run(message, max_iterations=10, stream=False)
50
+
51
+ tool_name = f"ask_agent_{agent_instance.name}"
52
+ self.tools[tool_name] = communicate_with_agent
53
+ self.tool_defs.append({
54
+ "type": "function",
55
+ "function": {
56
+ "name": tool_name,
57
+ "description": f"Ask the {agent_instance.name} agent to perform a task.",
58
+ "parameters": {
59
+ "type": "object",
60
+ "properties": {
61
+ "message": {"type": "string", "description": "The task or question for the agent."}
62
+ },
63
+ "required": ["message"]
64
+ }
65
+ }
66
+ })
67
+
68
+ def run(self, user_prompt: str, max_iterations: int = 15, stream: bool = False):
69
+ self.messages.append({"role": "user", "content": user_prompt})
70
+
71
+ for i in range(max_iterations):
72
+ print(f"[{self.name}] Iteration {i+1}")
73
+ try:
74
+ # Assuming call_llm handles streaming internally.
75
+ # For this generic implementation we use standard sync response parsing.
76
+ # Since task says 'Direct HTTP Requests over SDKs' and 'agnostic to specific use cases'.
77
+ response = call_llm(
78
+ messages=self.messages,
79
+ tools=self.tool_defs if self.tool_defs else None,
80
+ model=self.model,
81
+ stream=False # Stream logic parsing over standard HTTP is complex, focusing on standard payload first
82
+ )
83
+
84
+ message = response['choices'][0]['message']
85
+
86
+ # Append assistant message
87
+ self.messages.append(message)
88
+
89
+ if not message.get("tool_calls"):
90
+ return message.get("content", "")
91
+
92
+ for tool_call in message["tool_calls"]:
93
+ tool_name = tool_call["function"]["name"]
94
+ tool_args_str = tool_call["function"]["arguments"]
95
+ try:
96
+ tool_args = json.loads(tool_args_str)
97
+ except json.JSONDecodeError:
98
+ tool_args = {}
99
+
100
+ if tool_name not in self.tools:
101
+ print(f"[{self.name}] Tool '{tool_name}' not found.")
102
+ self.messages.append({
103
+ "role": "tool",
104
+ "name": tool_name,
105
+ "content": f"Error: Tool '{tool_name}' not found.",
106
+ "tool_call_id": tool_call["id"]
107
+ })
108
+ continue
109
+
110
+ print(f"[{self.name}] Calling {tool_name}")
111
+ try:
112
+ tool_output = self.tools[tool_name](**tool_args)
113
+ self.messages.append({
114
+ "role": "tool",
115
+ "name": tool_name,
116
+ "content": str(tool_output),
117
+ "tool_call_id": tool_call["id"]
118
+ })
119
+ except Exception as e:
120
+ print(f"[{self.name}] Error executing tool '{tool_name}': {e}")
121
+ self.messages.append({
122
+ "role": "tool",
123
+ "name": tool_name,
124
+ "content": f"Error executing tool '{tool_name}': {e}",
125
+ "tool_call_id": tool_call["id"]
126
+ })
127
+
128
+ except Exception as e:
129
+ print(f"API error: {e}")
130
+ return f"API error: {e}"
131
+
132
+ return "Agent could not complete task within limits."
@@ -0,0 +1,50 @@
1
+ import os
2
+ import json
3
+ import requests
4
+ from typing import List, Dict, Any, Optional
5
+
6
+ def call_llm(messages: List[Dict], tools: Optional[List[Dict]] = None,
7
+ model: Optional[str] = None, api_base: Optional[str] = None,
8
+ api_key: Optional[str] = None, provider: str = "openai",
9
+ stream: bool = False) -> Any:
10
+ """
11
+ Execute LLM calls via raw HTTP requests.
12
+ Supports OpenAI standard format. Many platforms including Ollama support this format.
13
+ Vertex API support can be routed here as well.
14
+ """
15
+ api_key = api_key or os.getenv("API_KEY", "")
16
+ api_base = api_base or os.getenv("API_BASE_URL", "https://api.openai.com/v1")
17
+ model = model or os.getenv("MODEL", "gpt-4o")
18
+
19
+ headers = {
20
+ "Content-Type": "application/json"
21
+ }
22
+
23
+ if api_key:
24
+ headers["Authorization"] = f"Bearer {api_key}"
25
+
26
+ payload = {
27
+ "model": model,
28
+ "messages": messages,
29
+ "temperature": 1,
30
+ "top_p": 1,
31
+ "max_tokens": 4096,
32
+ "stream": stream
33
+ }
34
+
35
+ if tools:
36
+ payload["tools"] = tools
37
+
38
+ url = f"{api_base.rstrip('/')}/chat/completions"
39
+
40
+ # Custom logic for Vertex if provider == "vertex" might be added here,
41
+ # but the task states we standardize exclusively on the OpenAI format for all request structures.
42
+
43
+ if stream:
44
+ response = requests.post(url, headers=headers, json=payload, stream=True)
45
+ response.raise_for_status()
46
+ return response # Return the raw response for iterating lines
47
+ else:
48
+ response = requests.post(url, headers=headers, json=payload)
49
+ response.raise_for_status()
50
+ return response.json()