local-agent-toolkit 0.1.0__tar.gz

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (30) hide show
  1. local_agent_toolkit-0.1.0/.env.example +41 -0
  2. local_agent_toolkit-0.1.0/.github/workflows/publish.yml +59 -0
  3. local_agent_toolkit-0.1.0/.gitignore +79 -0
  4. local_agent_toolkit-0.1.0/LICENSE +21 -0
  5. local_agent_toolkit-0.1.0/MANIFEST.in +10 -0
  6. local_agent_toolkit-0.1.0/PKG-INFO +416 -0
  7. local_agent_toolkit-0.1.0/README.md +375 -0
  8. local_agent_toolkit-0.1.0/__init__.py +42 -0
  9. local_agent_toolkit-0.1.0/agents/OllamaAgent.py +138 -0
  10. local_agent_toolkit-0.1.0/agents/OpenAIAgent.py +239 -0
  11. local_agent_toolkit-0.1.0/app.py +148 -0
  12. local_agent_toolkit-0.1.0/docs/ENV_MANAGEMENT.md +374 -0
  13. local_agent_toolkit-0.1.0/docs/PUBLISHING_GUIDE.md +214 -0
  14. local_agent_toolkit-0.1.0/helper/__init__.py +58 -0
  15. local_agent_toolkit-0.1.0/helper/agent.py +199 -0
  16. local_agent_toolkit-0.1.0/helper/tool_call.py +139 -0
  17. local_agent_toolkit-0.1.0/helper/tools.py +197 -0
  18. local_agent_toolkit-0.1.0/helper/tools_definition.py +145 -0
  19. local_agent_toolkit-0.1.0/local_agent_toolkit.egg-info/PKG-INFO +416 -0
  20. local_agent_toolkit-0.1.0/local_agent_toolkit.egg-info/SOURCES.txt +28 -0
  21. local_agent_toolkit-0.1.0/local_agent_toolkit.egg-info/dependency_links.txt +1 -0
  22. local_agent_toolkit-0.1.0/local_agent_toolkit.egg-info/entry_points.txt +2 -0
  23. local_agent_toolkit-0.1.0/local_agent_toolkit.egg-info/not-zip-safe +1 -0
  24. local_agent_toolkit-0.1.0/local_agent_toolkit.egg-info/requires.txt +12 -0
  25. local_agent_toolkit-0.1.0/local_agent_toolkit.egg-info/top_level.txt +3 -0
  26. local_agent_toolkit-0.1.0/publish.sh +100 -0
  27. local_agent_toolkit-0.1.0/pyproject.toml +61 -0
  28. local_agent_toolkit-0.1.0/requirements.txt +4 -0
  29. local_agent_toolkit-0.1.0/setup.cfg +4 -0
  30. local_agent_toolkit-0.1.0/setup.py +63 -0
@@ -0,0 +1,41 @@
1
+ # Local Agent Toolkit Environment Configuration
2
+ # Copy this file to .env and fill in your values
3
+
4
+ # Agent Configuration
5
+ # Choose which AI agent to use: OLLAMA or OPENAI
6
+ CURRENT_AGENT=OLLAMA
7
+
8
+ # Ollama Configuration
9
+ # Model name to use with Ollama (e.g., llama3.1, codellama, etc.)
10
+ OLLAMA_MODEL=llama3.1
11
+ # Ollama server URL
12
+ OLLAMA_BASE_URL=http://localhost:11434
13
+ OLLAMA_HOST=http://localhost:11434
14
+
15
+ # OpenAI Configuration
16
+ # Your OpenAI API key (required only if using OPENAI agent)
17
+ OPENAI_API_KEY=your_openai_api_key_here
18
+ # OpenAI API base URL (optional, for custom endpoints)
19
+ OPENAI_API_BASE=https://api.openai.com/v1
20
+ # OpenAI model to use (optional, defaults to gpt-4 in the code)
21
+ OPENAI_MODEL=gpt-4
22
+
23
+ # Legacy/Alternative API Configuration
24
+ API_BASE_URL=http://localhost:5000
25
+
26
+ # Tool and Processing Configuration
27
+ # Maximum iterations for tool calls to prevent infinite loops
28
+ MAX_ITERATIONS=25
29
+
30
+ # Directory for file operations (optional, defaults to current directory)
31
+ WORK_DIRECTORY=./
32
+
33
+ # Logging Configuration (optional)
34
+ LOG_LEVEL=INFO
35
+ LOG_FILE=agent.log
36
+
37
+ # Advanced Configuration (optional)
38
+ # Maximum tool call depth to prevent infinite loops
39
+ MAX_TOOL_CALLS=10
40
+ # Request timeout in seconds
41
+ REQUEST_TIMEOUT=30
@@ -0,0 +1,59 @@
1
+ name: Publish to PyPI
2
+
3
+ on:
4
+ push:
5
+ tags:
6
+ - 'v*'
7
+ release:
8
+ types: [published]
9
+ workflow_dispatch:
10
+
11
+ jobs:
12
+ build:
13
+ runs-on: ubuntu-latest
14
+
15
+ steps:
16
+ - uses: actions/checkout@v4
17
+ with:
18
+ fetch-depth: 0
19
+
20
+ - name: Set up Python
21
+ uses: actions/setup-python@v5
22
+ with:
23
+ python-version: '3.11'
24
+
25
+ - name: Install build dependencies
26
+ run: |
27
+ python -m pip install --upgrade pip
28
+ pip install build twine
29
+
30
+ - name: Build package
31
+ run: python -m build
32
+
33
+ - name: Check package
34
+ run: twine check dist/*
35
+
36
+ - name: Upload artifacts
37
+ uses: actions/upload-artifact@v4
38
+ with:
39
+ name: python-package-distributions
40
+ path: dist/
41
+
42
+ publish:
43
+ needs: build
44
+ runs-on: ubuntu-latest
45
+ environment: pypi
46
+ permissions:
47
+ id-token: write
48
+ if: startsWith(github.ref, 'refs/tags/v') || (github.event_name == 'release' && github.event.action == 'published')
49
+
50
+ steps:
51
+ - name: Download artifacts
52
+ uses: actions/download-artifact@v4
53
+ with:
54
+ name: python-package-distributions
55
+ path: dist/
56
+
57
+ - name: Publish to PyPI
58
+ uses: pypa/gh-action-pypi-publish@release/v1
59
+ # Note: No 'with' section for trusted publishing - that's the key!
@@ -0,0 +1,79 @@
1
+ # Byte-compiled / optimized / DLL files
2
+ __pycache__/
3
+ *.py[cod]
4
+ *$py.class
5
+
6
+ # C extensions
7
+ *.so
8
+
9
+ # Distribution / packaging
10
+ .Python
11
+ env/
12
+ build/
13
+ develop-eggs/
14
+ dist/
15
+ downloads/
16
+ eggs/
17
+ .eggs/
18
+ lib/
19
+ lib64/
20
+ parts/
21
+ sdist/
22
+ var/
23
+ *.egg-info/
24
+ .installed.cfg
25
+ *.egg
26
+ MANIFEST
27
+
28
+ # Virtual environments
29
+ .venv/
30
+ venv/
31
+ ENV/
32
+ env.bak/
33
+ venv.bak/
34
+
35
+ # PyInstaller
36
+ # Usually these files are written by a python script from a template
37
+ # before PyInstaller builds the exe, so as to inject date/other infos into it.
38
+ *.manifest
39
+ *.spec
40
+
41
+ # Installer logs
42
+ debug.log
43
+ pip-log.txt
44
+ pip-delete-this-directory.txt
45
+
46
+ # Unit test / coverage reports
47
+ htmlcov/
48
+ .tox/
49
+ .nox/
50
+ .coverage
51
+ .coverage.*
52
+ .cache
53
+ nosetests.xml
54
+ coverage.xml
55
+ *.cover
56
+ .hypothesis/
57
+ .pytest_cache/
58
+
59
+ # Jupyter Notebook
60
+ .ipynb_checkpoints
61
+
62
+ # pyenv
63
+ .python-version
64
+
65
+ # mypy
66
+ .mypy_cache/
67
+ .dmypy.json
68
+
69
+ # Pyre type checker
70
+ .pyre/
71
+
72
+ # VS Code
73
+ .vscode/
74
+
75
+ # Local env files
76
+ .env
77
+
78
+ # MacOS
79
+ .DS_Store
@@ -0,0 +1,21 @@
1
+ MIT License
2
+
3
+ Copyright (c) 2025 TechnicalHeist
4
+
5
+ Permission is hereby granted, free of charge, to any person obtaining a copy
6
+ of this software and associated documentation files (the "Software"), to deal
7
+ in the Software without restriction, including without limitation the rights
8
+ to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
9
+ copies of the Software, and to permit persons to whom the Software is
10
+ furnished to do so, subject to the following conditions:
11
+
12
+ The above copyright notice and this permission notice shall be included in all
13
+ copies or substantial portions of the Software.
14
+
15
+ THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16
+ IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17
+ FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
18
+ AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19
+ LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
20
+ OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
21
+ SOFTWARE.
@@ -0,0 +1,10 @@
1
+ include README.md
2
+ include LICENSE
3
+ include requirements.txt
4
+ include *.py
5
+ recursive-include helper *.py
6
+ recursive-include agents *.py
7
+ global-exclude __pycache__
8
+ global-exclude *.py[co]
9
+ global-exclude .DS_Store
10
+ prune __test__
@@ -0,0 +1,416 @@
1
+ Metadata-Version: 2.4
2
+ Name: local-agent-toolkit
3
+ Version: 0.1.0
4
+ Summary: A sophisticated AI agent toolkit supporting multiple AI providers with tool calling capabilities.
5
+ Home-page: https://technicalheist.com
6
+ Author: TechnicalHeist
7
+ Author-email: TechnicalHeist <contact@technicalheist.com>
8
+ License: MIT
9
+ Project-URL: Homepage, https://technicalheist.com
10
+ Project-URL: Repository, https://github.com/technicalheist/local-agent-toolkit
11
+ Project-URL: Issues, https://github.com/technicalheist/local-agent-toolkit/issues
12
+ Keywords: ai,agent,toolkit,ollama,openai,tool-calling,automation
13
+ Classifier: Development Status :: 3 - Alpha
14
+ Classifier: Intended Audience :: Developers
15
+ Classifier: Topic :: Software Development :: Libraries :: Python Modules
16
+ Classifier: Topic :: Scientific/Engineering :: Artificial Intelligence
17
+ Classifier: Programming Language :: Python :: 3
18
+ Classifier: Programming Language :: Python :: 3.8
19
+ Classifier: Programming Language :: Python :: 3.9
20
+ Classifier: Programming Language :: Python :: 3.10
21
+ Classifier: Programming Language :: Python :: 3.11
22
+ Classifier: Programming Language :: Python :: 3.12
23
+ Requires-Python: >=3.8
24
+ Description-Content-Type: text/markdown
25
+ License-File: LICENSE
26
+ Requires-Dist: ollama>=0.1.0
27
+ Requires-Dist: python-dotenv>=0.19.0
28
+ Requires-Dist: requests>=2.25.0
29
+ Requires-Dist: openai>=1.0.0
30
+ Provides-Extra: dev
31
+ Requires-Dist: pytest>=6.0; extra == "dev"
32
+ Requires-Dist: pytest-cov>=2.0; extra == "dev"
33
+ Requires-Dist: black>=21.0; extra == "dev"
34
+ Requires-Dist: flake8>=3.8; extra == "dev"
35
+ Requires-Dist: build>=0.8.0; extra == "dev"
36
+ Requires-Dist: twine>=4.0.0; extra == "dev"
37
+ Dynamic: author
38
+ Dynamic: home-page
39
+ Dynamic: license-file
40
+ Dynamic: requires-python
41
+
42
+ # Local Agent Toolkit
43
+
44
+ [![PyPI version](https://badge.fury.io/py/local-agent-toolkit.svg)](https://badge.fury.io/py/local-agent-toolkit)
45
+ [![Python 3.8+](https://img.shields.io/badge/python-3.8+-blue.svg)](https://www.python.org/downloads/)
46
+ [![License: MIT](https://img.shields.io/badge/License-MIT-yellow.svg)](https://opensource.org/licenses/MIT)
47
+
48
+ A sophisticated AI agent toolkit that can use various tools to answer questions and perform tasks. The toolkit supports multiple AI providers including Ollama and OpenAI, and can be used both as a command-line tool and as a Python library.
49
+
50
+ ## 🚀 Quick Start
51
+
52
+ ### Installation
53
+
54
+ ```bash
55
+ pip install local-agent-toolkit
56
+ ```
57
+
58
+ ### Command Line Usage
59
+
60
+ ```bash
61
+ # Ask a question directly
62
+ local-agent "What files are in the current directory?"
63
+
64
+ # Interactive mode
65
+ local-agent
66
+
67
+ # With custom settings
68
+ local-agent "Analyze the code structure" --no-save --no-stream
69
+ ```
70
+
71
+ ### Python Library Usage
72
+
73
+ ```python
74
+ from local_agent_toolkit import run_agent_with_question
75
+
76
+ # Simple usage
77
+ result, messages = run_agent_with_question("List all Python files in the project")
78
+ print(result)
79
+
80
+ # With custom options
81
+ result, messages = run_agent_with_question(
82
+ question="What's the weather like?",
83
+ save_messages=False,
84
+ stream=False
85
+ )
86
+ ```
87
+
88
+ ## ✨ Features
89
+
90
+ - **🤖 Multiple AI Agent Support**: Choose between Ollama and OpenAI agents
91
+ - **🛠️ Rich Tool Integration**: File operations, internet search, shell commands, and more
92
+ - **💻 Dual Interface**: Command-line tool and Python library
93
+ - **📝 Conversation History**: Automatic saving with customizable options
94
+ - **⚙️ Flexible Configuration**: Environment-based configuration for different AI providers
95
+ - **🔄 Streaming Support**: Real-time response streaming (Ollama)
96
+ - **🐍 Python 3.8+**: Compatible with modern Python versions
97
+
98
+ ## 📦 Installation Options
99
+
100
+ ### From PyPI (Recommended)
101
+ ```bash
102
+ pip install local-agent-toolkit
103
+ ```
104
+
105
+ ### From Source
106
+ ```bash
107
+ git clone https://github.com/technicalheist/local-agent-toolkit.git
108
+ cd local-agent-toolkit
109
+ pip install -e .
110
+ ```
111
+
112
+ ### Development Installation
113
+ ```bash
114
+ git clone https://github.com/technicalheist/local-agent-toolkit.git
115
+ cd local-agent-toolkit
116
+ pip install -e ".[dev]"
117
+ ```
118
+
119
+ ## Project Structure
120
+
121
+ ```
122
+ agent/
123
+ ├── app.py # Main application entry point
124
+ ├── agents/ # AI agent implementations
125
+ │ ├── OllamaAgent.py # Ollama-based agent
126
+ │ └── OpenAIAgent.py # OpenAI-based agent
127
+ ├── helper/ # Helper modules directory
128
+ │ ├── __init__.py # Package initialization
129
+ │ ├── agent.py # Core agent logic
130
+ │ ├── tools.py # Tool implementations
131
+ │ ├── tools_definition.py # Tool definitions
132
+ │ └── tool_call.py # Function for running the agent
133
+ ├── __test__/ # Test files
134
+ │ ├── ollama_agent_test.py # Tests for Ollama agent
135
+ │ └── openai_agent_test.py # Tests for OpenAI agent
136
+ ├── requirements.txt # Python dependencies
137
+ ├── .env # Environment variables
138
+ └── README.md # This file
139
+ ```
140
+
141
+ ## ⚙️ Configuration
142
+
143
+ ### Environment Variables
144
+
145
+ Create a `.env` file in your project root (copy from `.env.example`):
146
+
147
+ ```bash
148
+ # Agent Selection
149
+ CURRENT_AGENT=OLLAMA # or OPENAI
150
+
151
+ # Ollama Configuration
152
+ OLLAMA_MODEL=llama3.1
153
+ OLLAMA_BASE_URL=http://localhost:11434
154
+
155
+ # OpenAI Configuration
156
+ OPENAI_API_KEY=your_openai_api_key_here
157
+ OPENAI_MODEL=gpt-4
158
+
159
+ # Optional Settings
160
+ MAX_ITERATIONS=25
161
+ WORK_DIRECTORY=./
162
+ LOG_LEVEL=INFO
163
+ ```
164
+
165
+ ### Supported AI Providers
166
+
167
+ #### 🦙 Ollama Agent
168
+ - **Purpose**: Local AI models with privacy
169
+ - **Requirements**: Ollama server running locally
170
+ - **Models**: Any Ollama-compatible model (llama3.1, codellama, etc.)
171
+ - **Benefits**: Privacy, no API costs, offline usage
172
+
173
+ #### 🤖 OpenAI Agent
174
+ - **Purpose**: Cloud-based AI with latest models
175
+ - **Requirements**: OpenAI API key
176
+ - **Models**: GPT-4, GPT-3.5-turbo, etc.
177
+ - **Benefits**: High performance, latest capabilities
178
+
179
+ ## 🛠️ Available Tools
180
+
181
+ The agents have access to these built-in tools:
182
+
183
+ - **📁 File Operations**: `list_files`, `read_file`, `write_file`
184
+ - **🔍 Pattern Search**: `list_files_by_pattern`
185
+ - **🌐 Internet Search**: `ask_any_question_internet`
186
+ - **💻 Shell Commands**: `execute_shell_command`
187
+ - **📂 Directory Operations**: `mkdir`
188
+
189
+ ## 📚 Usage Examples
190
+
191
+ ### Command Line Interface
192
+
193
+ ```bash
194
+ # Basic usage
195
+ local-agent "What Python files are in this project?"
196
+
197
+ # Interactive mode with conversation history
198
+ local-agent
199
+
200
+ # Disable features
201
+ local-agent "Analyze code" --no-save --no-stream
202
+
203
+ # Custom message file
204
+ local-agent "Help me debug" --messages custom_conversation.json
205
+ ```
206
+
207
+ ### Python Library
208
+
209
+ ```python
210
+ # Basic usage
211
+ from local_agent_toolkit import run_agent_with_question
212
+
213
+ result, messages = run_agent_with_question(
214
+ "Create a Python script that lists all files"
215
+ )
216
+
217
+ # Advanced usage with options
218
+ result, messages = run_agent_with_question(
219
+ question="What's the project structure?",
220
+ save_messages=True,
221
+ messages_file="project_analysis.json",
222
+ stream=False
223
+ )
224
+
225
+ # Using specific agents
226
+ from local_agent_toolkit import OllamaAgent, OpenAIAgent
227
+ from local_agent_toolkit.helper.tools_definition import tools
228
+
229
+ # Create Ollama agent
230
+ ollama_agent = OllamaAgent(
231
+ tool_definitions=tools,
232
+ tool_callables=available_functions
233
+ )
234
+
235
+ # Create OpenAI agent
236
+ openai_agent = OpenAIAgent(
237
+ tool_definitions=tools,
238
+ tool_callables=available_functions
239
+ )
240
+ ```
241
+
242
+ ### Environment Management
243
+
244
+ #### Option 1: Global .env file
245
+ ```bash
246
+ # Create .env in your project root
247
+ cp .env.example .env
248
+ # Edit .env with your settings
249
+ ```
250
+
251
+ #### Option 2: Per-project configuration
252
+ ```python
253
+ import os
254
+ from dotenv import load_dotenv
255
+
256
+ # Load from specific path
257
+ load_dotenv('/path/to/your/project/.env')
258
+
259
+ # Or set programmatically
260
+ os.environ['CURRENT_AGENT'] = 'OPENAI'
261
+ os.environ['OPENAI_API_KEY'] = 'your_key_here'
262
+
263
+ from local_agent_toolkit import run_agent_with_question
264
+ ```
265
+
266
+ #### Option 3: Docker/Container deployment
267
+ ```dockerfile
268
+ # Dockerfile
269
+ FROM python:3.11-slim
270
+
271
+ WORKDIR /app
272
+ COPY requirements.txt .
273
+ RUN pip install local-agent-toolkit
274
+
275
+ # Set environment variables
276
+ ENV CURRENT_AGENT=OLLAMA
277
+ ENV OLLAMA_BASE_URL=http://ollama-server:11434
278
+ ENV OLLAMA_MODEL=llama3.1
279
+
280
+ COPY . .
281
+ CMD ["local-agent"]
282
+ ```
283
+
284
+ ## 🔧 Development
285
+
286
+ ### Running Tests
287
+ ```bash
288
+ # Install development dependencies
289
+ pip install -e ".[dev]"
290
+
291
+ # Run tests
292
+ pytest __test__/
293
+
294
+ # Run specific tests
295
+ pytest __test__/ollama_agent_test.py
296
+ pytest __test__/openai_agent_test.py
297
+ ```
298
+
299
+ ### Building for Distribution
300
+ ```bash
301
+ # Install build tools
302
+ pip install build twine
303
+
304
+ # Build the package
305
+ python -m build
306
+
307
+ # Upload to PyPI (maintainers only)
308
+ twine upload dist/*
309
+ ```
310
+
311
+ ## 📝 Project Structure
312
+
313
+ ```
314
+ local-agent-toolkit/
315
+ ├── app.py # Command-line interface
316
+ ├── agents/ # AI agent implementations
317
+ │ ├── OllamaAgent.py # Ollama-based agent
318
+ │ └── OpenAIAgent.py # OpenAI-based agent
319
+ ├── helper/ # Core library modules
320
+ │ ├── __init__.py # Library exports
321
+ │ ├── agent.py # Base agent logic
322
+ │ ├── tools.py # Tool implementations
323
+ │ ├── tools_definition.py # Tool schemas
324
+ │ └── tool_call.py # Main execution function
325
+ ├── __test__/ # Test suite
326
+ ├── requirements.txt # Dependencies
327
+ ├── setup.py # Package configuration
328
+ ├── pyproject.toml # Modern packaging config
329
+ └── .env.example # Environment template
330
+ ```
331
+
332
+ ## 🤝 Contributing
333
+
334
+ 1. Fork the repository
335
+ 2. Create a feature branch (`git checkout -b feature/amazing-feature`)
336
+ 3. Commit your changes (`git commit -m 'Add amazing feature'`)
337
+ 4. Push to the branch (`git push origin feature/amazing-feature`)
338
+ 5. Open a Pull Request
339
+
340
+ ## 📄 License
341
+
342
+ This project is licensed under the MIT License - see the [LICENSE](LICENSE) file for details.
343
+
344
+ ## 🙏 Acknowledgments
345
+
346
+ - [Ollama](https://ollama.ai/) for local AI model serving
347
+ - [OpenAI](https://openai.com/) for API access to advanced models
348
+ - The Python community for excellent tooling and libraries
349
+
350
+ ## 📞 Support
351
+
352
+ - 📧 Issues: [GitHub Issues](https://github.com/technicalheist/local-agent-toolkit/issues)
353
+ - 📖 Documentation: [GitHub Repository](https://github.com/technicalheist/local-agent-toolkit)
354
+ - 💬 Discussions: [GitHub Discussions](https://github.com/technicalheist/local-agent-toolkit/discussions)
355
+
356
+ Use command-line flags for more control:
357
+
358
+ ```bash
359
+ # Ask a question using the --question flag
360
+ python app.py --question "What files are in the helper directory?"
361
+
362
+ # Don't save the conversation history
363
+ python app.py --question "Get current time" --no-save
364
+
365
+ # Save to a custom file
366
+ python app.py --question "Create a test directory" --messages-file "my_conversation.json"
367
+ ```
368
+
369
+ ## Command-Line Options
370
+
371
+ - `question` (positional): The question to ask the agent
372
+ - `-q, --question`: Alternative way to specify the question
373
+ - `--no-save`: Don't save the conversation history to a file
374
+ - `--messages-file`: Custom filename for saving conversation history (default: `messages.json`)
375
+ - `-h, --help`: Show help message
376
+
377
+ ## Examples
378
+
379
+ ```bash
380
+ # Interactive mode
381
+ python app.py
382
+
383
+ # Simple question
384
+ python app.py "What's the weather like?"
385
+
386
+ # With custom options
387
+ python app.py -q "List directory contents" --messages-file "dir_check.json"
388
+
389
+ # Without saving conversation
390
+ python app.py "Quick calculation: 2+2" --no-save
391
+ ```
392
+
393
+ ## Available Tools
394
+
395
+ The agent has access to various tools including:
396
+
397
+ - **File Operations**: `list_files`, `read_file`, `write_file`, `mkdir`
398
+ - **Pattern Matching**: `list_files_by_pattern`
399
+ - **Internet Search**: `ask_any_question_internet`
400
+ - **Shell Commands**: `execute_shell_command`
401
+
402
+ ## Conversation History
403
+
404
+ By default, all conversations are saved to `messages.json`. You can:
405
+ - Use `--no-save` to skip saving
406
+ - Use `--messages-file` to specify a custom filename
407
+ - View the conversation history in JSON format
408
+
409
+ ## Notes
410
+
411
+ - The agent can handle complex multi-step tasks
412
+ - **Multiple AI Providers**: Choose between Ollama (local) and OpenAI (cloud) agents based on your needs
413
+ - Conversations include tool calls and responses for full transparency
414
+ - The JSON conversation files can be used for debugging or analysis
415
+ - The agent will continue working until it completes the task or reaches the maximum iteration limit
416
+ - Both agent types support the same tool set and functionality