kubrick-cli 0.1.3__tar.gz
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- kubrick_cli-0.1.3/LICENSE +21 -0
- kubrick_cli-0.1.3/PKG-INFO +124 -0
- kubrick_cli-0.1.3/README.md +100 -0
- kubrick_cli-0.1.3/kubrick_cli/__init__.py +47 -0
- kubrick_cli-0.1.3/kubrick_cli/agent_loop.py +274 -0
- kubrick_cli-0.1.3/kubrick_cli/classifier.py +194 -0
- kubrick_cli-0.1.3/kubrick_cli/config.py +247 -0
- kubrick_cli-0.1.3/kubrick_cli/display.py +154 -0
- kubrick_cli-0.1.3/kubrick_cli/execution_strategy.py +195 -0
- kubrick_cli-0.1.3/kubrick_cli/main.py +806 -0
- kubrick_cli-0.1.3/kubrick_cli/planning.py +319 -0
- kubrick_cli-0.1.3/kubrick_cli/progress.py +162 -0
- kubrick_cli-0.1.3/kubrick_cli/providers/__init__.py +6 -0
- kubrick_cli-0.1.3/kubrick_cli/providers/anthropic_provider.py +209 -0
- kubrick_cli-0.1.3/kubrick_cli/providers/base.py +136 -0
- kubrick_cli-0.1.3/kubrick_cli/providers/factory.py +161 -0
- kubrick_cli-0.1.3/kubrick_cli/providers/openai_provider.py +181 -0
- kubrick_cli-0.1.3/kubrick_cli/providers/triton_provider.py +96 -0
- kubrick_cli-0.1.3/kubrick_cli/safety.py +204 -0
- kubrick_cli-0.1.3/kubrick_cli/scheduler.py +183 -0
- kubrick_cli-0.1.3/kubrick_cli/setup_wizard.py +161 -0
- kubrick_cli-0.1.3/kubrick_cli/tools.py +400 -0
- kubrick_cli-0.1.3/kubrick_cli/triton_client.py +177 -0
- kubrick_cli-0.1.3/kubrick_cli.egg-info/PKG-INFO +124 -0
- kubrick_cli-0.1.3/kubrick_cli.egg-info/SOURCES.txt +35 -0
- kubrick_cli-0.1.3/kubrick_cli.egg-info/dependency_links.txt +1 -0
- kubrick_cli-0.1.3/kubrick_cli.egg-info/entry_points.txt +2 -0
- kubrick_cli-0.1.3/kubrick_cli.egg-info/requires.txt +11 -0
- kubrick_cli-0.1.3/kubrick_cli.egg-info/top_level.txt +1 -0
- kubrick_cli-0.1.3/pyproject.toml +42 -0
- kubrick_cli-0.1.3/setup.cfg +4 -0
- kubrick_cli-0.1.3/tests/test_completion_detector.py +345 -0
- kubrick_cli-0.1.3/tests/test_config.py +130 -0
- kubrick_cli-0.1.3/tests/test_safety.py +294 -0
- kubrick_cli-0.1.3/tests/test_tool_calling.py +126 -0
- kubrick_cli-0.1.3/tests/test_tool_executor.py +265 -0
- kubrick_cli-0.1.3/tests/test_triton_client_unit.py +464 -0
|
@@ -0,0 +1,21 @@
|
|
|
1
|
+
MIT License
|
|
2
|
+
|
|
3
|
+
Copyright (c) [2026] [Russell Land]
|
|
4
|
+
|
|
5
|
+
Permission is hereby granted, free of charge, to any person obtaining a copy
|
|
6
|
+
of this software and associated documentation files (the "Software"), to deal
|
|
7
|
+
in the Software without restriction, including without limitation the rights
|
|
8
|
+
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
|
|
9
|
+
copies of the Software, and to permit persons to whom the Software is
|
|
10
|
+
furnished to do so, subject to the following conditions:
|
|
11
|
+
|
|
12
|
+
The above copyright notice and this permission notice shall be included in all
|
|
13
|
+
copies or substantial portions of the Software.
|
|
14
|
+
|
|
15
|
+
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
|
16
|
+
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
|
17
|
+
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
|
|
18
|
+
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
|
19
|
+
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
|
|
20
|
+
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
|
|
21
|
+
SOFTWARE.
|
|
@@ -0,0 +1,124 @@
|
|
|
1
|
+
Metadata-Version: 2.4
|
|
2
|
+
Name: kubrick-cli
|
|
3
|
+
Version: 0.1.3
|
|
4
|
+
Summary: A CLI tool for AI-assisted coding using Triton LLM backend
|
|
5
|
+
Author-email: Russell Land <rcland12@gmail.com>
|
|
6
|
+
License: MIT
|
|
7
|
+
Project-URL: Homepage, https://github.com/rcland12/kubrick-cli
|
|
8
|
+
Project-URL: Repository, https://github.com/rcland12/kubrick-cli
|
|
9
|
+
Project-URL: Issues, https://github.com/rcland12/kubrick-cli/issues
|
|
10
|
+
Requires-Python: >=3.8
|
|
11
|
+
Description-Content-Type: text/markdown
|
|
12
|
+
License-File: LICENSE
|
|
13
|
+
Requires-Dist: rich>=13.0.0
|
|
14
|
+
Requires-Dist: prompt_toolkit>=3.0.0
|
|
15
|
+
Provides-Extra: dev
|
|
16
|
+
Requires-Dist: pytest>=7.0.0; extra == "dev"
|
|
17
|
+
Requires-Dist: pytest-cov>=4.0.0; extra == "dev"
|
|
18
|
+
Requires-Dist: pytest-mock>=3.10.0; extra == "dev"
|
|
19
|
+
Requires-Dist: black>=22.0.0; extra == "dev"
|
|
20
|
+
Requires-Dist: flake8>=6.0.0; extra == "dev"
|
|
21
|
+
Requires-Dist: build>=1.0.0; extra == "dev"
|
|
22
|
+
Requires-Dist: twine>=5.0.0; extra == "dev"
|
|
23
|
+
Dynamic: license-file
|
|
24
|
+
|
|
25
|
+
# Kubrick CLI
|
|
26
|
+
|
|
27
|
+
An AI-assisted coding CLI tool powered by your own Triton LLM backend. Like Claude Code, but self-hosted.
|
|
28
|
+
|
|
29
|
+
## Installation
|
|
30
|
+
|
|
31
|
+
### Local Installation
|
|
32
|
+
|
|
33
|
+
```bash
|
|
34
|
+
pip install -e .
|
|
35
|
+
kubrick
|
|
36
|
+
```
|
|
37
|
+
|
|
38
|
+
### Docker
|
|
39
|
+
|
|
40
|
+
```bash
|
|
41
|
+
cd /path/to/your/project
|
|
42
|
+
docker run --rm -it \
|
|
43
|
+
--network host \
|
|
44
|
+
-v ${HOME}:/home/kubrick \
|
|
45
|
+
-v ${PWD}:/workspace \
|
|
46
|
+
-v /etc/localtime:/etc/localtime:ro \
|
|
47
|
+
-v /etc/timezone:/etc/timezone:ro \
|
|
48
|
+
rcland12/kubrick-cli
|
|
49
|
+
```
|
|
50
|
+
|
|
51
|
+
## Quick Start
|
|
52
|
+
|
|
53
|
+
```bash
|
|
54
|
+
# Basic usage
|
|
55
|
+
kubrick
|
|
56
|
+
|
|
57
|
+
# Custom Triton server
|
|
58
|
+
kubrick --triton-url my-server:8000
|
|
59
|
+
|
|
60
|
+
# Load previous conversation
|
|
61
|
+
kubrick --load 20240118_143022
|
|
62
|
+
```
|
|
63
|
+
|
|
64
|
+
## Requirements
|
|
65
|
+
|
|
66
|
+
- Python 3.8+
|
|
67
|
+
- LLM Provider (choose one):
|
|
68
|
+
- **Triton Inference Server** with streaming LLM model (default: `http://localhost:8000`)
|
|
69
|
+
- **OpenAI API** (GPT-4, GPT-3.5-turbo, etc.)
|
|
70
|
+
- **Anthropic API** (Claude models)
|
|
71
|
+
|
|
72
|
+
## Documentation
|
|
73
|
+
|
|
74
|
+
- **[WIKI.md](https://github.com/rcland12/kubrick-cli/blob/master/docs/WIKI.md)** - Complete features, commands, and usage guide
|
|
75
|
+
- **[PROVIDERS.md](https://github.com/rcland12/kubrick-cli/blob/master/docs/PROVIDERS.md)** - Multi-provider setup (Triton, OpenAI, Anthropic)
|
|
76
|
+
- **[TRITON.md](https://github.com/rcland12/kubrick-cli/blob/master/docs/TRITON.md)** - Triton backend setup and requirements
|
|
77
|
+
- **[TESTING.md](https://github.com/rcland12/kubrick-cli/blob/master/docs/TESTING.md)** - Testing guide and CI/CD setup
|
|
78
|
+
- **[DOCKER.md](https://github.com/rcland12/kubrick-cli/blob/master/docs/DOCKER.md)** - Docker setup and troubleshooting
|
|
79
|
+
|
|
80
|
+
## Configuration
|
|
81
|
+
|
|
82
|
+
Config stored at `~/.kubrick/config.json`. Override with environment variables:
|
|
83
|
+
|
|
84
|
+
```bash
|
|
85
|
+
export TRITON_URL=localhost:8000
|
|
86
|
+
export TRITON_MODEL_NAME=llm_decoupled
|
|
87
|
+
```
|
|
88
|
+
|
|
89
|
+
## Development
|
|
90
|
+
|
|
91
|
+
### Install Development Dependencies
|
|
92
|
+
|
|
93
|
+
```bash
|
|
94
|
+
pip install -e ".[dev]"
|
|
95
|
+
```
|
|
96
|
+
|
|
97
|
+
### Run Tests
|
|
98
|
+
|
|
99
|
+
```bash
|
|
100
|
+
# Run all tests
|
|
101
|
+
pytest
|
|
102
|
+
|
|
103
|
+
# Run with coverage
|
|
104
|
+
pytest --cov=kubrick_cli --cov-report=term-missing
|
|
105
|
+
|
|
106
|
+
# Run specific test file
|
|
107
|
+
pytest tests/test_tool_executor.py -v
|
|
108
|
+
```
|
|
109
|
+
|
|
110
|
+
See [TESTING.md](https://github.com/rcland12/kubrick-cli/blob/master/docs/TESTING.md) for detailed testing documentation.
|
|
111
|
+
|
|
112
|
+
### Code Quality
|
|
113
|
+
|
|
114
|
+
```bash
|
|
115
|
+
# Format code
|
|
116
|
+
black kubrick_cli tests
|
|
117
|
+
|
|
118
|
+
# Check linting
|
|
119
|
+
flake8 kubrick_cli
|
|
120
|
+
```
|
|
121
|
+
|
|
122
|
+
## License
|
|
123
|
+
|
|
124
|
+
MIT License - See [LICENSE](LICENSE)
|
|
@@ -0,0 +1,100 @@
|
|
|
1
|
+
# Kubrick CLI
|
|
2
|
+
|
|
3
|
+
An AI-assisted coding CLI tool powered by your own Triton LLM backend. Like Claude Code, but self-hosted.
|
|
4
|
+
|
|
5
|
+
## Installation
|
|
6
|
+
|
|
7
|
+
### Local Installation
|
|
8
|
+
|
|
9
|
+
```bash
|
|
10
|
+
pip install -e .
|
|
11
|
+
kubrick
|
|
12
|
+
```
|
|
13
|
+
|
|
14
|
+
### Docker
|
|
15
|
+
|
|
16
|
+
```bash
|
|
17
|
+
cd /path/to/your/project
|
|
18
|
+
docker run --rm -it \
|
|
19
|
+
--network host \
|
|
20
|
+
-v ${HOME}:/home/kubrick \
|
|
21
|
+
-v ${PWD}:/workspace \
|
|
22
|
+
-v /etc/localtime:/etc/localtime:ro \
|
|
23
|
+
-v /etc/timezone:/etc/timezone:ro \
|
|
24
|
+
rcland12/kubrick-cli
|
|
25
|
+
```
|
|
26
|
+
|
|
27
|
+
## Quick Start
|
|
28
|
+
|
|
29
|
+
```bash
|
|
30
|
+
# Basic usage
|
|
31
|
+
kubrick
|
|
32
|
+
|
|
33
|
+
# Custom Triton server
|
|
34
|
+
kubrick --triton-url my-server:8000
|
|
35
|
+
|
|
36
|
+
# Load previous conversation
|
|
37
|
+
kubrick --load 20240118_143022
|
|
38
|
+
```
|
|
39
|
+
|
|
40
|
+
## Requirements
|
|
41
|
+
|
|
42
|
+
- Python 3.8+
|
|
43
|
+
- LLM Provider (choose one):
|
|
44
|
+
- **Triton Inference Server** with streaming LLM model (default: `http://localhost:8000`)
|
|
45
|
+
- **OpenAI API** (GPT-4, GPT-3.5-turbo, etc.)
|
|
46
|
+
- **Anthropic API** (Claude models)
|
|
47
|
+
|
|
48
|
+
## Documentation
|
|
49
|
+
|
|
50
|
+
- **[WIKI.md](https://github.com/rcland12/kubrick-cli/blob/master/docs/WIKI.md)** - Complete features, commands, and usage guide
|
|
51
|
+
- **[PROVIDERS.md](https://github.com/rcland12/kubrick-cli/blob/master/docs/PROVIDERS.md)** - Multi-provider setup (Triton, OpenAI, Anthropic)
|
|
52
|
+
- **[TRITON.md](https://github.com/rcland12/kubrick-cli/blob/master/docs/TRITON.md)** - Triton backend setup and requirements
|
|
53
|
+
- **[TESTING.md](https://github.com/rcland12/kubrick-cli/blob/master/docs/TESTING.md)** - Testing guide and CI/CD setup
|
|
54
|
+
- **[DOCKER.md](https://github.com/rcland12/kubrick-cli/blob/master/docs/DOCKER.md)** - Docker setup and troubleshooting
|
|
55
|
+
|
|
56
|
+
## Configuration
|
|
57
|
+
|
|
58
|
+
Config stored at `~/.kubrick/config.json`. Override with environment variables:
|
|
59
|
+
|
|
60
|
+
```bash
|
|
61
|
+
export TRITON_URL=localhost:8000
|
|
62
|
+
export TRITON_MODEL_NAME=llm_decoupled
|
|
63
|
+
```
|
|
64
|
+
|
|
65
|
+
## Development
|
|
66
|
+
|
|
67
|
+
### Install Development Dependencies
|
|
68
|
+
|
|
69
|
+
```bash
|
|
70
|
+
pip install -e ".[dev]"
|
|
71
|
+
```
|
|
72
|
+
|
|
73
|
+
### Run Tests
|
|
74
|
+
|
|
75
|
+
```bash
|
|
76
|
+
# Run all tests
|
|
77
|
+
pytest
|
|
78
|
+
|
|
79
|
+
# Run with coverage
|
|
80
|
+
pytest --cov=kubrick_cli --cov-report=term-missing
|
|
81
|
+
|
|
82
|
+
# Run specific test file
|
|
83
|
+
pytest tests/test_tool_executor.py -v
|
|
84
|
+
```
|
|
85
|
+
|
|
86
|
+
See [TESTING.md](https://github.com/rcland12/kubrick-cli/blob/master/docs/TESTING.md) for detailed testing documentation.
|
|
87
|
+
|
|
88
|
+
### Code Quality
|
|
89
|
+
|
|
90
|
+
```bash
|
|
91
|
+
# Format code
|
|
92
|
+
black kubrick_cli tests
|
|
93
|
+
|
|
94
|
+
# Check linting
|
|
95
|
+
flake8 kubrick_cli
|
|
96
|
+
```
|
|
97
|
+
|
|
98
|
+
## License
|
|
99
|
+
|
|
100
|
+
MIT License - See [LICENSE](LICENSE)
|
|
@@ -0,0 +1,47 @@
|
|
|
1
|
+
"""Kubrick CLI - AI-assisted coding tool with agentic capabilities."""
|
|
2
|
+
|
|
3
|
+
__version__ = "0.2.0"
|
|
4
|
+
|
|
5
|
+
from .agent_loop import AgentLoop, CompletionDetector
|
|
6
|
+
from .classifier import TaskClassification, TaskClassifier
|
|
7
|
+
from .config import KubrickConfig
|
|
8
|
+
from .display import DisplayManager
|
|
9
|
+
from .execution_strategy import ExecutionConfig, ExecutionStrategy
|
|
10
|
+
from .main import KubrickCLI
|
|
11
|
+
from .planning import PlanningPhase
|
|
12
|
+
from .progress import ProgressTracker
|
|
13
|
+
from .providers.base import ProviderAdapter
|
|
14
|
+
from .providers.factory import ProviderFactory
|
|
15
|
+
from .providers.triton_provider import TritonProvider
|
|
16
|
+
from .providers.openai_provider import OpenAIProvider
|
|
17
|
+
from .providers.anthropic_provider import AnthropicProvider
|
|
18
|
+
from .safety import SafetyConfig, SafetyManager
|
|
19
|
+
from .scheduler import ToolScheduler
|
|
20
|
+
from .setup_wizard import SetupWizard
|
|
21
|
+
from .tools import ToolExecutor
|
|
22
|
+
from .triton_client import TritonLLMClient
|
|
23
|
+
|
|
24
|
+
__all__ = [
|
|
25
|
+
"AgentLoop",
|
|
26
|
+
"AnthropicProvider",
|
|
27
|
+
"CompletionDetector",
|
|
28
|
+
"DisplayManager",
|
|
29
|
+
"ExecutionConfig",
|
|
30
|
+
"ExecutionStrategy",
|
|
31
|
+
"KubrickCLI",
|
|
32
|
+
"KubrickConfig",
|
|
33
|
+
"OpenAIProvider",
|
|
34
|
+
"PlanningPhase",
|
|
35
|
+
"ProgressTracker",
|
|
36
|
+
"ProviderAdapter",
|
|
37
|
+
"ProviderFactory",
|
|
38
|
+
"SafetyConfig",
|
|
39
|
+
"SafetyManager",
|
|
40
|
+
"SetupWizard",
|
|
41
|
+
"TaskClassification",
|
|
42
|
+
"TaskClassifier",
|
|
43
|
+
"ToolExecutor",
|
|
44
|
+
"ToolScheduler",
|
|
45
|
+
"TritonLLMClient",
|
|
46
|
+
"TritonProvider",
|
|
47
|
+
]
|
|
@@ -0,0 +1,274 @@
|
|
|
1
|
+
"""Multi-step agentic execution loop with completion detection."""
|
|
2
|
+
|
|
3
|
+
import re
|
|
4
|
+
from typing import Dict, List, Tuple
|
|
5
|
+
|
|
6
|
+
from rich.console import Console
|
|
7
|
+
|
|
8
|
+
console = Console()
|
|
9
|
+
|
|
10
|
+
try:
|
|
11
|
+
from .display import DisplayManager
|
|
12
|
+
except ImportError:
|
|
13
|
+
DisplayManager = None
|
|
14
|
+
|
|
15
|
+
try:
|
|
16
|
+
from .scheduler import ToolScheduler
|
|
17
|
+
except ImportError:
|
|
18
|
+
ToolScheduler = None
|
|
19
|
+
|
|
20
|
+
|
|
21
|
+
class CompletionDetector:
|
|
22
|
+
"""Detects when an agent has completed its task."""
|
|
23
|
+
|
|
24
|
+
COMPLETION_MARKERS = [
|
|
25
|
+
"TASK_COMPLETE",
|
|
26
|
+
"PLAN_COMPLETE",
|
|
27
|
+
"[COMPLETE]",
|
|
28
|
+
"[DONE]",
|
|
29
|
+
]
|
|
30
|
+
|
|
31
|
+
@staticmethod
|
|
32
|
+
def is_complete(
|
|
33
|
+
response_text: str,
|
|
34
|
+
has_tool_calls: bool,
|
|
35
|
+
iteration: int,
|
|
36
|
+
max_iterations: int,
|
|
37
|
+
) -> Tuple[bool, str]:
|
|
38
|
+
"""
|
|
39
|
+
Determine if the agent has completed its task.
|
|
40
|
+
|
|
41
|
+
Args:
|
|
42
|
+
response_text: The agent's response text
|
|
43
|
+
has_tool_calls: Whether the response contains tool calls
|
|
44
|
+
iteration: Current iteration number
|
|
45
|
+
max_iterations: Maximum allowed iterations
|
|
46
|
+
|
|
47
|
+
Returns:
|
|
48
|
+
Tuple of (is_complete, reason)
|
|
49
|
+
"""
|
|
50
|
+
for marker in CompletionDetector.COMPLETION_MARKERS:
|
|
51
|
+
if marker in response_text:
|
|
52
|
+
return True, f"explicit_marker:{marker}"
|
|
53
|
+
|
|
54
|
+
if iteration >= max_iterations:
|
|
55
|
+
return True, "max_iterations_reached"
|
|
56
|
+
|
|
57
|
+
if not has_tool_calls:
|
|
58
|
+
if CompletionDetector._looks_conclusive(response_text):
|
|
59
|
+
return True, "conclusive_response"
|
|
60
|
+
|
|
61
|
+
return False, "continuing"
|
|
62
|
+
|
|
63
|
+
@staticmethod
|
|
64
|
+
def _looks_conclusive(text: str) -> bool:
|
|
65
|
+
"""
|
|
66
|
+
Check if text looks like a conclusive response.
|
|
67
|
+
|
|
68
|
+
This is a heuristic to detect when the agent is done without
|
|
69
|
+
explicitly saying so.
|
|
70
|
+
"""
|
|
71
|
+
text_lower = text.lower()
|
|
72
|
+
|
|
73
|
+
conclusive_patterns = [
|
|
74
|
+
r"\b(done|completed|finished|ready)\b",
|
|
75
|
+
r"\b(successfully|all set|good to go)\b",
|
|
76
|
+
r"\bhere(?:'s| is) (?:the |a )?(?:summary|result)",
|
|
77
|
+
r"\b(?:task|work|changes) (?:is |are )?(?:complete|done|finished)",
|
|
78
|
+
r"\blet me know if you need",
|
|
79
|
+
r"\bthat(?:'s| should do it)",
|
|
80
|
+
r"\beverything(?:'s| is) (?:set|ready|done)",
|
|
81
|
+
]
|
|
82
|
+
|
|
83
|
+
for pattern in conclusive_patterns:
|
|
84
|
+
if re.search(pattern, text_lower):
|
|
85
|
+
return True
|
|
86
|
+
|
|
87
|
+
return False
|
|
88
|
+
|
|
89
|
+
|
|
90
|
+
class AgentLoop:
|
|
91
|
+
"""
|
|
92
|
+
Multi-step agentic execution loop.
|
|
93
|
+
|
|
94
|
+
Runs iteratively until the task is complete or max iterations reached.
|
|
95
|
+
"""
|
|
96
|
+
|
|
97
|
+
def __init__(
|
|
98
|
+
self,
|
|
99
|
+
llm_client,
|
|
100
|
+
tool_executor,
|
|
101
|
+
max_iterations: int = 15,
|
|
102
|
+
max_tools_per_turn: int = 5,
|
|
103
|
+
timeout_seconds: int = 600,
|
|
104
|
+
stream_options: Dict = None,
|
|
105
|
+
display_manager=None,
|
|
106
|
+
tool_scheduler=None,
|
|
107
|
+
):
|
|
108
|
+
"""
|
|
109
|
+
Initialize the agent loop.
|
|
110
|
+
|
|
111
|
+
Args:
|
|
112
|
+
llm_client: LLM client instance (e.g., TritonLLMClient)
|
|
113
|
+
tool_executor: Tool executor instance
|
|
114
|
+
max_iterations: Maximum number of iterations
|
|
115
|
+
max_tools_per_turn: Maximum tools per turn
|
|
116
|
+
timeout_seconds: Total timeout in seconds
|
|
117
|
+
stream_options: Optional streaming options
|
|
118
|
+
display_manager: Optional DisplayManager instance for natural language display
|
|
119
|
+
tool_scheduler: Optional ToolScheduler for parallel execution
|
|
120
|
+
"""
|
|
121
|
+
self.llm_client = llm_client
|
|
122
|
+
self.tool_executor = tool_executor
|
|
123
|
+
self.max_iterations = max_iterations
|
|
124
|
+
self.max_tools_per_turn = max_tools_per_turn
|
|
125
|
+
self.timeout_seconds = timeout_seconds
|
|
126
|
+
self.stream_options = stream_options or {}
|
|
127
|
+
self.display_manager = display_manager
|
|
128
|
+
self.tool_scheduler = tool_scheduler
|
|
129
|
+
|
|
130
|
+
def run(
|
|
131
|
+
self,
|
|
132
|
+
messages: List[Dict],
|
|
133
|
+
tool_parser,
|
|
134
|
+
display_callback=None,
|
|
135
|
+
) -> Dict:
|
|
136
|
+
"""
|
|
137
|
+
Run the agentic loop.
|
|
138
|
+
|
|
139
|
+
Args:
|
|
140
|
+
messages: Conversation messages (will be modified in-place)
|
|
141
|
+
tool_parser: Function to parse tool calls from text
|
|
142
|
+
display_callback: Optional callback for displaying streaming response
|
|
143
|
+
|
|
144
|
+
Returns:
|
|
145
|
+
Dict with execution results and metadata
|
|
146
|
+
"""
|
|
147
|
+
iteration = 0
|
|
148
|
+
total_tool_calls = 0
|
|
149
|
+
|
|
150
|
+
while iteration < self.max_iterations:
|
|
151
|
+
iteration += 1
|
|
152
|
+
|
|
153
|
+
console.print(
|
|
154
|
+
f"\n[dim]→ Agent iteration {iteration}/{self.max_iterations}[/dim]"
|
|
155
|
+
)
|
|
156
|
+
|
|
157
|
+
console.print("[bold cyan]Assistant:[/bold cyan]")
|
|
158
|
+
chunks = []
|
|
159
|
+
|
|
160
|
+
try:
|
|
161
|
+
for chunk in self.llm_client.generate_streaming(
|
|
162
|
+
messages, stream_options=self.stream_options
|
|
163
|
+
):
|
|
164
|
+
console.print(chunk, end="")
|
|
165
|
+
chunks.append(chunk)
|
|
166
|
+
|
|
167
|
+
console.print("\n")
|
|
168
|
+
|
|
169
|
+
except Exception as e:
|
|
170
|
+
console.print(f"\n[red]Error during LLM generation: {e}[/red]")
|
|
171
|
+
return {
|
|
172
|
+
"success": False,
|
|
173
|
+
"error": str(e),
|
|
174
|
+
"iterations": iteration,
|
|
175
|
+
"tool_calls": total_tool_calls,
|
|
176
|
+
}
|
|
177
|
+
|
|
178
|
+
response_text = "".join(chunks)
|
|
179
|
+
|
|
180
|
+
messages.append({"role": "assistant", "content": response_text})
|
|
181
|
+
|
|
182
|
+
if display_callback:
|
|
183
|
+
display_callback(response_text)
|
|
184
|
+
|
|
185
|
+
tool_calls = tool_parser(response_text)
|
|
186
|
+
|
|
187
|
+
is_complete, reason = CompletionDetector.is_complete(
|
|
188
|
+
response_text=response_text,
|
|
189
|
+
has_tool_calls=len(tool_calls) > 0,
|
|
190
|
+
iteration=iteration,
|
|
191
|
+
max_iterations=self.max_iterations,
|
|
192
|
+
)
|
|
193
|
+
|
|
194
|
+
if is_complete:
|
|
195
|
+
console.print(f"\n[green]✓ Task complete ({reason})[/green]")
|
|
196
|
+
return {
|
|
197
|
+
"success": True,
|
|
198
|
+
"completion_reason": reason,
|
|
199
|
+
"iterations": iteration,
|
|
200
|
+
"tool_calls": total_tool_calls,
|
|
201
|
+
}
|
|
202
|
+
|
|
203
|
+
if tool_calls:
|
|
204
|
+
if len(tool_calls) > self.max_tools_per_turn:
|
|
205
|
+
console.print(
|
|
206
|
+
f"[yellow]⚠ Too many tool calls ({len(tool_calls)}), "
|
|
207
|
+
f"limiting to {self.max_tools_per_turn}[/yellow]"
|
|
208
|
+
)
|
|
209
|
+
tool_calls = tool_calls[: self.max_tools_per_turn]
|
|
210
|
+
|
|
211
|
+
console.print(
|
|
212
|
+
f"\n[yellow]Executing {len(tool_calls)} tool(s)...[/yellow]\n"
|
|
213
|
+
)
|
|
214
|
+
|
|
215
|
+
if self.tool_scheduler and len(tool_calls) > 1:
|
|
216
|
+
execution_results = self.tool_scheduler.execute_tools(tool_calls)
|
|
217
|
+
else:
|
|
218
|
+
execution_results = [
|
|
219
|
+
self.tool_executor.execute(tool_name, params)
|
|
220
|
+
for tool_name, params in tool_calls
|
|
221
|
+
]
|
|
222
|
+
|
|
223
|
+
tool_results = []
|
|
224
|
+
for (tool_name, parameters), result in zip(
|
|
225
|
+
tool_calls, execution_results
|
|
226
|
+
):
|
|
227
|
+
if self.display_manager:
|
|
228
|
+
self.display_manager.display_tool_call(tool_name, parameters)
|
|
229
|
+
self.display_manager.display_tool_result(
|
|
230
|
+
tool_name, result, result["success"]
|
|
231
|
+
)
|
|
232
|
+
else:
|
|
233
|
+
console.print(f"[cyan]→ Called {tool_name}[/cyan]")
|
|
234
|
+
if result["success"]:
|
|
235
|
+
console.print(f"[green]✓ {tool_name} succeeded[/green]")
|
|
236
|
+
else:
|
|
237
|
+
console.print(
|
|
238
|
+
f"[red]✗ {tool_name} failed: {result['error']}[/red]"
|
|
239
|
+
)
|
|
240
|
+
|
|
241
|
+
if result["success"]:
|
|
242
|
+
tool_results.append(
|
|
243
|
+
f"Tool: {tool_name}\nResult: {result['result']}"
|
|
244
|
+
)
|
|
245
|
+
else:
|
|
246
|
+
tool_results.append(
|
|
247
|
+
f"Tool: {tool_name}\nError: {result['error']}"
|
|
248
|
+
)
|
|
249
|
+
|
|
250
|
+
total_tool_calls += 1
|
|
251
|
+
|
|
252
|
+
tool_results_text = "\n\n".join(tool_results)
|
|
253
|
+
messages.append(
|
|
254
|
+
{
|
|
255
|
+
"role": "user",
|
|
256
|
+
"content": f"Tool execution results:\n\n{tool_results_text}",
|
|
257
|
+
}
|
|
258
|
+
)
|
|
259
|
+
|
|
260
|
+
continue
|
|
261
|
+
|
|
262
|
+
console.print(
|
|
263
|
+
"[yellow]⚠ No tool calls and task not marked complete. Continuing...[/yellow]"
|
|
264
|
+
)
|
|
265
|
+
|
|
266
|
+
console.print(
|
|
267
|
+
f"\n[yellow]⚠ Max iterations ({self.max_iterations}) reached[/yellow]"
|
|
268
|
+
)
|
|
269
|
+
return {
|
|
270
|
+
"success": True,
|
|
271
|
+
"completion_reason": "max_iterations",
|
|
272
|
+
"iterations": iteration,
|
|
273
|
+
"tool_calls": total_tool_calls,
|
|
274
|
+
}
|