blocks-control-sdk 0.1.0rc0__tar.gz
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- blocks_control_sdk-0.1.0rc0/PKG-INFO +99 -0
- blocks_control_sdk-0.1.0rc0/README.md +70 -0
- blocks_control_sdk-0.1.0rc0/blocks_control_sdk/__init__.py +5 -0
- blocks_control_sdk-0.1.0rc0/blocks_control_sdk/constants/__init__.py +0 -0
- blocks_control_sdk-0.1.0rc0/blocks_control_sdk/constants/anthropic.py +12 -0
- blocks_control_sdk-0.1.0rc0/blocks_control_sdk/constants/gemini.py +10 -0
- blocks_control_sdk-0.1.0rc0/blocks_control_sdk/constants/openai.py +9 -0
- blocks_control_sdk-0.1.0rc0/blocks_control_sdk/control/__init__.py +0 -0
- blocks_control_sdk-0.1.0rc0/blocks_control_sdk/control/agent_base.py +474 -0
- blocks_control_sdk-0.1.0rc0/blocks_control_sdk/control/agent_claude.py +469 -0
- blocks_control_sdk-0.1.0rc0/blocks_control_sdk/control/agent_codex.py +567 -0
- blocks_control_sdk-0.1.0rc0/blocks_control_sdk/control/agent_gemini.py +415 -0
- blocks_control_sdk-0.1.0rc0/blocks_control_sdk.egg-info/PKG-INFO +99 -0
- blocks_control_sdk-0.1.0rc0/blocks_control_sdk.egg-info/SOURCES.txt +17 -0
- blocks_control_sdk-0.1.0rc0/blocks_control_sdk.egg-info/dependency_links.txt +1 -0
- blocks_control_sdk-0.1.0rc0/blocks_control_sdk.egg-info/requires.txt +5 -0
- blocks_control_sdk-0.1.0rc0/blocks_control_sdk.egg-info/top_level.txt +1 -0
- blocks_control_sdk-0.1.0rc0/setup.cfg +4 -0
- blocks_control_sdk-0.1.0rc0/setup.py +41 -0
|
@@ -0,0 +1,99 @@
|
|
|
1
|
+
Metadata-Version: 2.1
|
|
2
|
+
Name: blocks_control_sdk
|
|
3
|
+
Version: 0.1.0rc0
|
|
4
|
+
Summary: A unified Python interface to interact with popular coding agents.
|
|
5
|
+
Home-page: https://github.com/BlocksOrg/blocks-control-sdk
|
|
6
|
+
Author: BlocksOrg
|
|
7
|
+
Author-email: dev@blocks.team
|
|
8
|
+
License: AGPL
|
|
9
|
+
Classifier: Development Status :: 3 - Alpha
|
|
10
|
+
Classifier: Intended Audience :: Developers
|
|
11
|
+
Classifier: License :: OSI Approved :: GNU Affero General Public License v3
|
|
12
|
+
Classifier: Programming Language :: Python
|
|
13
|
+
Classifier: Programming Language :: Python :: 3
|
|
14
|
+
Classifier: Programming Language :: Python :: 3.8
|
|
15
|
+
Classifier: Programming Language :: Python :: 3.9
|
|
16
|
+
Classifier: Programming Language :: Python :: 3.10
|
|
17
|
+
Classifier: Programming Language :: Python :: 3.11
|
|
18
|
+
Classifier: Operating System :: OS Independent
|
|
19
|
+
Classifier: Topic :: Software Development :: Libraries
|
|
20
|
+
Classifier: Topic :: Software Development :: Build Tools
|
|
21
|
+
Classifier: Topic :: Software Development :: Bug Tracking
|
|
22
|
+
Classifier: Topic :: Software Development :: Debuggers
|
|
23
|
+
Classifier: Topic :: Software Development :: Code Generators
|
|
24
|
+
Classifier: Topic :: Software Development :: Version Control :: Git
|
|
25
|
+
Classifier: Topic :: Communications :: Chat
|
|
26
|
+
Classifier: Topic :: Scientific/Engineering :: Artificial Intelligence
|
|
27
|
+
Requires-Python: >=3.9
|
|
28
|
+
Description-Content-Type: text/markdown
|
|
29
|
+
|
|
30
|
+
# Blocks Control SDK
|
|
31
|
+
|
|
32
|
+
A unified Python interface to interact with popular coding agents.
|
|
33
|
+
|
|
34
|
+
> Think of it like litellm, but for coding agents
|
|
35
|
+
|
|
36
|
+
## Supported Agents
|
|
37
|
+
|
|
38
|
+
- **Claude Code** - Anthropic's Claude
|
|
39
|
+
- **Gemini CLI** - Google's Gemini
|
|
40
|
+
- **Codex CLI** - OpenAI's Codex
|
|
41
|
+
|
|
42
|
+
## Installation
|
|
43
|
+
|
|
44
|
+
```bash
|
|
45
|
+
pip install blocks-control-sdk
|
|
46
|
+
```
|
|
47
|
+
|
|
48
|
+
## Usage
|
|
49
|
+
|
|
50
|
+
### Async Streaming
|
|
51
|
+
|
|
52
|
+
```python
|
|
53
|
+
from control.agent_claude_exp import ClaudeCodeCLIExp
|
|
54
|
+
|
|
55
|
+
agent = ClaudeCodeCLIExp()
|
|
56
|
+
|
|
57
|
+
async for message in agent.stream("Write a python script to print 'Hello, World!'"):
|
|
58
|
+
print(message.content)
|
|
59
|
+
```
|
|
60
|
+
|
|
61
|
+
### Sync with Callbacks
|
|
62
|
+
|
|
63
|
+
```python
|
|
64
|
+
from control.agent_claude_exp import ClaudeCodeCLIExp
|
|
65
|
+
|
|
66
|
+
agent = ClaudeCodeCLIExp()
|
|
67
|
+
|
|
68
|
+
def on_message(notification):
|
|
69
|
+
print(notification.message.content)
|
|
70
|
+
|
|
71
|
+
agent.register_notification(agent.notifications.NOTIFY_MESSAGE_V2, on_message)
|
|
72
|
+
|
|
73
|
+
agent.query("Write a python script to print 'Hello, World!'")
|
|
74
|
+
```
|
|
75
|
+
|
|
76
|
+
### All Agents
|
|
77
|
+
|
|
78
|
+
```python
|
|
79
|
+
from control.agent_claude_exp import ClaudeCodeCLIExp
|
|
80
|
+
from control.agent_gemini_exp import GeminiAgentCLIExp
|
|
81
|
+
from control.agent_codex import CodexAgentCLI
|
|
82
|
+
|
|
83
|
+
# Claude
|
|
84
|
+
claude = ClaudeCodeCLIExp()
|
|
85
|
+
|
|
86
|
+
# Gemini
|
|
87
|
+
gemini = GeminiAgentCLIExp()
|
|
88
|
+
|
|
89
|
+
# Codex
|
|
90
|
+
codex = CodexAgentCLI()
|
|
91
|
+
```
|
|
92
|
+
|
|
93
|
+
## Environment Variables
|
|
94
|
+
|
|
95
|
+
```bash
|
|
96
|
+
export ANTHROPIC_API_KEY="your-key" # For Claude
|
|
97
|
+
export GEMINI_API_KEY="your-key" # For Gemini
|
|
98
|
+
export OPENAI_API_KEY="your-key" # For Codex
|
|
99
|
+
```
|
|
@@ -0,0 +1,70 @@
|
|
|
1
|
+
# Blocks Control SDK
|
|
2
|
+
|
|
3
|
+
A unified Python interface to interact with popular coding agents.
|
|
4
|
+
|
|
5
|
+
> Think of it like litellm, but for coding agents
|
|
6
|
+
|
|
7
|
+
## Supported Agents
|
|
8
|
+
|
|
9
|
+
- **Claude Code** - Anthropic's Claude
|
|
10
|
+
- **Gemini CLI** - Google's Gemini
|
|
11
|
+
- **Codex CLI** - OpenAI's Codex
|
|
12
|
+
|
|
13
|
+
## Installation
|
|
14
|
+
|
|
15
|
+
```bash
|
|
16
|
+
pip install blocks-control-sdk
|
|
17
|
+
```
|
|
18
|
+
|
|
19
|
+
## Usage
|
|
20
|
+
|
|
21
|
+
### Async Streaming
|
|
22
|
+
|
|
23
|
+
```python
|
|
24
|
+
from control.agent_claude_exp import ClaudeCodeCLIExp
|
|
25
|
+
|
|
26
|
+
agent = ClaudeCodeCLIExp()
|
|
27
|
+
|
|
28
|
+
async for message in agent.stream("Write a python script to print 'Hello, World!'"):
|
|
29
|
+
print(message.content)
|
|
30
|
+
```
|
|
31
|
+
|
|
32
|
+
### Sync with Callbacks
|
|
33
|
+
|
|
34
|
+
```python
|
|
35
|
+
from control.agent_claude_exp import ClaudeCodeCLIExp
|
|
36
|
+
|
|
37
|
+
agent = ClaudeCodeCLIExp()
|
|
38
|
+
|
|
39
|
+
def on_message(notification):
|
|
40
|
+
print(notification.message.content)
|
|
41
|
+
|
|
42
|
+
agent.register_notification(agent.notifications.NOTIFY_MESSAGE_V2, on_message)
|
|
43
|
+
|
|
44
|
+
agent.query("Write a python script to print 'Hello, World!'")
|
|
45
|
+
```
|
|
46
|
+
|
|
47
|
+
### All Agents
|
|
48
|
+
|
|
49
|
+
```python
|
|
50
|
+
from control.agent_claude_exp import ClaudeCodeCLIExp
|
|
51
|
+
from control.agent_gemini_exp import GeminiAgentCLIExp
|
|
52
|
+
from control.agent_codex import CodexAgentCLI
|
|
53
|
+
|
|
54
|
+
# Claude
|
|
55
|
+
claude = ClaudeCodeCLIExp()
|
|
56
|
+
|
|
57
|
+
# Gemini
|
|
58
|
+
gemini = GeminiAgentCLIExp()
|
|
59
|
+
|
|
60
|
+
# Codex
|
|
61
|
+
codex = CodexAgentCLI()
|
|
62
|
+
```
|
|
63
|
+
|
|
64
|
+
## Environment Variables
|
|
65
|
+
|
|
66
|
+
```bash
|
|
67
|
+
export ANTHROPIC_API_KEY="your-key" # For Claude
|
|
68
|
+
export GEMINI_API_KEY="your-key" # For Gemini
|
|
69
|
+
export OPENAI_API_KEY="your-key" # For Codex
|
|
70
|
+
```
|
|
File without changes
|
|
@@ -0,0 +1,12 @@
|
|
|
1
|
+
from enum import Enum
|
|
2
|
+
|
|
3
|
+
class AnthropicModels(str, Enum):
|
|
4
|
+
claude_3_5_haiku = 'claude-3-5-haiku-latest'
|
|
5
|
+
claude_4_5_haiku = 'claude-haiku-4-5-20251001'
|
|
6
|
+
claude_3_5_sonnet = 'claude-3-5-sonnet-20241022'
|
|
7
|
+
claude_3_7_sonnet = 'claude-3-7-sonnet-latest'
|
|
8
|
+
claude_4_sonnet_20250514 = 'claude-sonnet-4-20250514'
|
|
9
|
+
claude_4_opus_20250514 = 'claude-opus-4-20250514'
|
|
10
|
+
claude_4_1_opus_20250805 = 'claude-opus-4-1-20250805'
|
|
11
|
+
sonnet = 'sonnet' # default to latest in claude code
|
|
12
|
+
opus = 'opus' # default to latest in claude code
|
|
@@ -0,0 +1,10 @@
|
|
|
1
|
+
from enum import Enum
|
|
2
|
+
|
|
3
|
+
class GeminiModels(str, Enum):
|
|
4
|
+
gemini_flash_latest = "gemini-flash-latest"
|
|
5
|
+
gemini_2_5_pro = "gemini-2.5-pro"
|
|
6
|
+
gemini_2_5_flash_lite = "gemini-2.5-flash-lite"
|
|
7
|
+
gemini_3_pro_preview = "gemini-3-pro-preview"
|
|
8
|
+
|
|
9
|
+
def to_litellm_model(self):
|
|
10
|
+
return f"gemini/{self.value}"
|
|
File without changes
|
|
@@ -0,0 +1,474 @@
|
|
|
1
|
+
import signal
|
|
2
|
+
import os
|
|
3
|
+
import time
|
|
4
|
+
import json
|
|
5
|
+
import asyncio
|
|
6
|
+
from pathlib import Path
|
|
7
|
+
from blocks_control_sdk.utils.logger import log
|
|
8
|
+
from enum import Enum
|
|
9
|
+
from types import FunctionType
|
|
10
|
+
from typing import Any, Tuple, Union, List, Optional, Dict, TYPE_CHECKING, AsyncIterator
|
|
11
|
+
from blocks.utils import BackgroundCommandOutput
|
|
12
|
+
import uuid
|
|
13
|
+
from pydantic import BaseModel, ConfigDict
|
|
14
|
+
|
|
15
|
+
if TYPE_CHECKING:
|
|
16
|
+
from litellm.types.utils import Message, ModelResponse
|
|
17
|
+
|
|
18
|
+
class NotificationsV2:
|
|
19
|
+
NOTIFY_COMPLETE_V2 = "notify_complete_v2"
|
|
20
|
+
NOTIFY_MESSAGE_V2 = "notify_message_v2"
|
|
21
|
+
NOTIFY_TOOL_CALL_V2 = "notify_tool_call_v2"
|
|
22
|
+
NOTIFY_START_V2 = "notify_start_v2"
|
|
23
|
+
NOTIFY_RESUME_V2 = "notify_resume_v2"
|
|
24
|
+
NOTIFY_CHAT_THREAD_ID_UPDATE = "notify_chat_thread_id_update"
|
|
25
|
+
|
|
26
|
+
class NotifyBase(BaseModel):
|
|
27
|
+
type: str
|
|
28
|
+
status: Optional[str] = None
|
|
29
|
+
chat_thread_id: Optional[str] = None
|
|
30
|
+
|
|
31
|
+
class NotifyMessageArgs(NotifyBase):
|
|
32
|
+
type: str = NotificationsV2.NOTIFY_MESSAGE_V2
|
|
33
|
+
message: Any # Accepts both local Message and litellm Message
|
|
34
|
+
|
|
35
|
+
class NotifyCompleteArgs(NotifyBase):
|
|
36
|
+
type: str = NotificationsV2.NOTIFY_COMPLETE_V2
|
|
37
|
+
last_message: str
|
|
38
|
+
|
|
39
|
+
class NotifyToolCallArgs(NotifyBase):
|
|
40
|
+
type: str = NotificationsV2.NOTIFY_TOOL_CALL_V2
|
|
41
|
+
tool_name: str
|
|
42
|
+
serialized_args: str
|
|
43
|
+
|
|
44
|
+
class NotifyStartArgs(NotifyBase):
|
|
45
|
+
type: str = NotificationsV2.NOTIFY_START_V2
|
|
46
|
+
|
|
47
|
+
class NotifyResumeArgs(NotifyBase):
|
|
48
|
+
type: str = NotificationsV2.NOTIFY_RESUME_V2
|
|
49
|
+
|
|
50
|
+
class NotifyChatThreadIdUpdateArgs(NotifyBase):
|
|
51
|
+
type: str = NotificationsV2.NOTIFY_CHAT_THREAD_ID_UPDATE
|
|
52
|
+
new_chat_thread_id: str
|
|
53
|
+
old_chat_thread_id: Optional[str] = None
|
|
54
|
+
|
|
55
|
+
class LLM(Enum):
|
|
56
|
+
CLAUDE = "claude"
|
|
57
|
+
CODEX = "codex"
|
|
58
|
+
GEMINI = "gemini"
|
|
59
|
+
|
|
60
|
+
LLM_API_KEYS = {
|
|
61
|
+
LLM.CLAUDE: os.getenv("ANTHROPIC_API_KEY"),
|
|
62
|
+
LLM.CODEX: os.getenv("OPENAI_API_KEY"),
|
|
63
|
+
LLM.GEMINI: os.getenv("GEMINI_API_KEY"),
|
|
64
|
+
}
|
|
65
|
+
|
|
66
|
+
class CodingAgentBaseCLI():
|
|
67
|
+
|
|
68
|
+
class AgentStatus:
|
|
69
|
+
TURNS_COMPLETED = "turns_completed"
|
|
70
|
+
TURNS_IN_PROGRESS = "turns_in_progress"
|
|
71
|
+
TURNS_NOT_STARTED = "turns_not_started"
|
|
72
|
+
|
|
73
|
+
class Notifications:
|
|
74
|
+
NOTIFY_START_V2 = "notify_start_v2"
|
|
75
|
+
NOTIFY_RESUME_V2 = "notify_resume_v2"
|
|
76
|
+
NOTIFY_COMPLETE_V2 = "notify_complete_v2"
|
|
77
|
+
NOTIFY_MESSAGE_V2 = "notify_message_v2"
|
|
78
|
+
NOTIFY_TODO_LIST_UPDATE = "notify_todo_list_update"
|
|
79
|
+
NOTIFY_TOOL_CALL_V2 = "notify_tool_call_v2"
|
|
80
|
+
NOTIFY_CHAT_THREAD_ID_UPDATE = "notify_chat_thread_id_update"
|
|
81
|
+
|
|
82
|
+
pid: int = None
|
|
83
|
+
is_session_active: bool = False
|
|
84
|
+
status: str = AgentStatus.TURNS_NOT_STARTED
|
|
85
|
+
is_interrupt_triggered: bool = False
|
|
86
|
+
chat_thread_id: str = None
|
|
87
|
+
workspace_dir: Optional[Path] = None
|
|
88
|
+
|
|
89
|
+
def __init__(self, chat_thread_id: str = None, workspace_dir: str = None):
|
|
90
|
+
self.notifications = self.Notifications()
|
|
91
|
+
self._notification_callbacks = {}
|
|
92
|
+
self.assistant_messages = [] # Initialize as instance variable, not class variable
|
|
93
|
+
self.callback_errors = [] # Track any errors that occur in notification callbacks
|
|
94
|
+
|
|
95
|
+
# Raw message capture for debugging and regression testing
|
|
96
|
+
self.raw_messages: List[Dict[str, Any]] = []
|
|
97
|
+
self._raw_message_index: int = 0
|
|
98
|
+
|
|
99
|
+
self.set_chat_thread_id(chat_thread_id)
|
|
100
|
+
|
|
101
|
+
# if workspace_dir is None set to cwd
|
|
102
|
+
if workspace_dir is not None:
|
|
103
|
+
self.workspace_dir = Path(workspace_dir)
|
|
104
|
+
else:
|
|
105
|
+
self.workspace_dir = Path.cwd()
|
|
106
|
+
|
|
107
|
+
def tool_call_arg_kb_size(self, content: str) -> int:
|
|
108
|
+
content = content or ""
|
|
109
|
+
size_bytes = len(content.encode('utf-8'))
|
|
110
|
+
size_kb = size_bytes / 1024
|
|
111
|
+
return int(size_kb)
|
|
112
|
+
|
|
113
|
+
def tool_call_arg_kb_size_truncate_to_limit(self, content: str, kb_limit: int) -> str:
|
|
114
|
+
limit_bytes = kb_limit * 1024
|
|
115
|
+
encoded = content.encode("utf-8")
|
|
116
|
+
if len(encoded) <= limit_bytes:
|
|
117
|
+
return content
|
|
118
|
+
# cut the bytes safely
|
|
119
|
+
truncated_bytes = encoded[:limit_bytes]
|
|
120
|
+
# decode with ignore to drop partial utf-8 chars
|
|
121
|
+
truncated_str = truncated_bytes.decode("utf-8", errors="ignore")
|
|
122
|
+
return truncated_str + "..."
|
|
123
|
+
|
|
124
|
+
@staticmethod
|
|
125
|
+
def resolve_agent_class(provider: LLM):
|
|
126
|
+
from .agent_codex import Codex
|
|
127
|
+
from .agent_claude_exp import ClaudeCode
|
|
128
|
+
from .agent_gemini_exp import GeminiCLI
|
|
129
|
+
|
|
130
|
+
if provider == LLM.CODEX:
|
|
131
|
+
return Codex
|
|
132
|
+
elif provider == LLM.CLAUDE:
|
|
133
|
+
return ClaudeCode
|
|
134
|
+
elif provider == LLM.GEMINI:
|
|
135
|
+
return GeminiCLI
|
|
136
|
+
else:
|
|
137
|
+
raise ValueError(f"Unknown provider: {provider}")
|
|
138
|
+
|
|
139
|
+
@staticmethod
|
|
140
|
+
def resolve_provider_from_keys(default_provider: Union[LLM, None] = None) -> LLM:
|
|
141
|
+
|
|
142
|
+
if default_provider is not None and default_provider in LLM_API_KEYS:
|
|
143
|
+
print(f"Attempting to resolve LLM provider from default_provider in automation: {default_provider}")
|
|
144
|
+
if LLM_API_KEYS.get(default_provider) is not None:
|
|
145
|
+
return default_provider
|
|
146
|
+
else:
|
|
147
|
+
raise ValueError(
|
|
148
|
+
f"LLM API key for {default_provider} is not set, you must set the environment variable {default_provider.value}"
|
|
149
|
+
)
|
|
150
|
+
|
|
151
|
+
print("Attempting to resolve LLM provider from api keys...")
|
|
152
|
+
|
|
153
|
+
if LLM_API_KEYS.get(LLM.CLAUDE) is not None:
|
|
154
|
+
return LLM.CLAUDE
|
|
155
|
+
elif LLM_API_KEYS.get(LLM.CODEX) is not None:
|
|
156
|
+
return LLM.CODEX
|
|
157
|
+
elif LLM_API_KEYS.get(LLM.GEMINI) is not None:
|
|
158
|
+
return LLM.GEMINI
|
|
159
|
+
else:
|
|
160
|
+
llm_name_strs = [llm.value for llm in LLM]
|
|
161
|
+
raise ValueError(
|
|
162
|
+
"No LLM API keys set, you must set at least one of the following environment variables: "
|
|
163
|
+
f"{', '.join(llm_name_strs)}"
|
|
164
|
+
)
|
|
165
|
+
|
|
166
|
+
def _kill_process(self, pid: int):
|
|
167
|
+
try:
|
|
168
|
+
os.killpg(pid, signal.SIGTERM)
|
|
169
|
+
time.sleep(1)
|
|
170
|
+
os.killpg(pid, signal.SIGKILL)
|
|
171
|
+
except ProcessLookupError:
|
|
172
|
+
pass
|
|
173
|
+
except PermissionError:
|
|
174
|
+
# Fallback to killing just the main process
|
|
175
|
+
try:
|
|
176
|
+
os.kill(pid, signal.SIGKILL)
|
|
177
|
+
except ProcessLookupError:
|
|
178
|
+
pass
|
|
179
|
+
|
|
180
|
+
def register_notification(self, notification_type: str, callback):
|
|
181
|
+
"""Register a callback for a specific notification type"""
|
|
182
|
+
if notification_type not in self._notification_callbacks:
|
|
183
|
+
self._notification_callbacks[notification_type] = []
|
|
184
|
+
self._notification_callbacks[notification_type].append(callback)
|
|
185
|
+
|
|
186
|
+
def notify(self, arg: NotifyBase):
|
|
187
|
+
"""Trigger all callbacks registered for a notification type with args and kwargs"""
|
|
188
|
+
|
|
189
|
+
notification_type = arg.type
|
|
190
|
+
|
|
191
|
+
arg.chat_thread_id = self.chat_thread_id
|
|
192
|
+
arg.status = self.status
|
|
193
|
+
|
|
194
|
+
size = notification_type in self._notification_callbacks and len(self._notification_callbacks[notification_type]) or 0
|
|
195
|
+
|
|
196
|
+
log.debug(f">Notifying callbacks for notification type: {notification_type}, #callbacks: {size}")
|
|
197
|
+
|
|
198
|
+
if notification_type in self._notification_callbacks:
|
|
199
|
+
for callback in self._notification_callbacks[notification_type]:
|
|
200
|
+
if callable(callback):
|
|
201
|
+
try:
|
|
202
|
+
callback(arg)
|
|
203
|
+
except Exception as e:
|
|
204
|
+
error_info = {
|
|
205
|
+
"notification_type": notification_type,
|
|
206
|
+
"callback": str(callback),
|
|
207
|
+
"error": str(e)
|
|
208
|
+
}
|
|
209
|
+
self.callback_errors.append(error_info)
|
|
210
|
+
log.error(f">Error calling callback {callback} for notification type {notification_type}: {e}")
|
|
211
|
+
else:
|
|
212
|
+
log.error(f">Callback {callback} for notification type {notification_type} is not callable")
|
|
213
|
+
|
|
214
|
+
def clear_messages(self):
|
|
215
|
+
self.assistant_messages = []
|
|
216
|
+
self.clear_raw_messages()
|
|
217
|
+
|
|
218
|
+
def clear_raw_messages(self):
|
|
219
|
+
"""Clear raw message capture buffer."""
|
|
220
|
+
self.raw_messages = []
|
|
221
|
+
self._raw_message_index = 0
|
|
222
|
+
|
|
223
|
+
def capture_raw_line(
|
|
224
|
+
self,
|
|
225
|
+
raw_json: Dict[str, Any],
|
|
226
|
+
parsed_type: str = "unknown",
|
|
227
|
+
parse_success: bool = True,
|
|
228
|
+
parse_error: Optional[str] = None
|
|
229
|
+
):
|
|
230
|
+
"""
|
|
231
|
+
Capture a raw JSON line for debugging and regression testing.
|
|
232
|
+
|
|
233
|
+
Args:
|
|
234
|
+
raw_json: The raw JSON object parsed from the agent's output
|
|
235
|
+
parsed_type: Type of parsed content ("message", "tool_call", "parse_error", "other")
|
|
236
|
+
parse_success: Whether parsing was successful
|
|
237
|
+
parse_error: Error message if parsing failed
|
|
238
|
+
"""
|
|
239
|
+
entry = {
|
|
240
|
+
"index": self._raw_message_index,
|
|
241
|
+
"timestamp": time.time(),
|
|
242
|
+
"raw_json": raw_json,
|
|
243
|
+
"parsed_type": parsed_type,
|
|
244
|
+
"parse_success": parse_success,
|
|
245
|
+
"parse_error": parse_error,
|
|
246
|
+
}
|
|
247
|
+
|
|
248
|
+
self.raw_messages.append(entry)
|
|
249
|
+
self._raw_message_index += 1
|
|
250
|
+
|
|
251
|
+
def set_chat_thread_id(self, chat_thread_id: str = None):
|
|
252
|
+
new_chat_thread_id = chat_thread_id or str(uuid.uuid4())
|
|
253
|
+
old_chat_thread_id = self.chat_thread_id
|
|
254
|
+
self.notify(NotifyChatThreadIdUpdateArgs(
|
|
255
|
+
new_chat_thread_id=new_chat_thread_id,
|
|
256
|
+
old_chat_thread_id=old_chat_thread_id
|
|
257
|
+
))
|
|
258
|
+
self.chat_thread_id = new_chat_thread_id
|
|
259
|
+
|
|
260
|
+
def new_chat_thread(self, chat_thread_id: str = None):
|
|
261
|
+
self.set_chat_thread_id(chat_thread_id)
|
|
262
|
+
self.clear_messages()
|
|
263
|
+
|
|
264
|
+
def notify_complete_callback(self, text: str):
|
|
265
|
+
"""Default callback method that can be overridden"""
|
|
266
|
+
pass
|
|
267
|
+
|
|
268
|
+
def _start(self):
|
|
269
|
+
self.is_session_active = True
|
|
270
|
+
|
|
271
|
+
def interrupt(self):
|
|
272
|
+
if self.pid is not None:
|
|
273
|
+
self._kill_process(self.pid)
|
|
274
|
+
self.pid = None
|
|
275
|
+
self.is_interrupt_triggered = True
|
|
276
|
+
|
|
277
|
+
def query(self, query: str) -> BackgroundCommandOutput:
|
|
278
|
+
pass
|
|
279
|
+
|
|
280
|
+
async def stream(self, prompt: str) -> AsyncIterator["Message"]:
|
|
281
|
+
"""
|
|
282
|
+
Async generator that yields Message objects as they stream in.
|
|
283
|
+
|
|
284
|
+
Usage:
|
|
285
|
+
async for message in agent.stream("Tell me a joke"):
|
|
286
|
+
print(message.content)
|
|
287
|
+
|
|
288
|
+
Args:
|
|
289
|
+
prompt: The query/prompt to send to the agent
|
|
290
|
+
|
|
291
|
+
Yields:
|
|
292
|
+
Message objects containing assistant messages
|
|
293
|
+
"""
|
|
294
|
+
queue: asyncio.Queue = asyncio.Queue()
|
|
295
|
+
loop = asyncio.get_event_loop()
|
|
296
|
+
|
|
297
|
+
def on_message(notification: NotifyMessageArgs):
|
|
298
|
+
"""Bridge from sync callback to async queue"""
|
|
299
|
+
loop.call_soon_threadsafe(queue.put_nowait, ("message", notification.message))
|
|
300
|
+
|
|
301
|
+
# on tool call
|
|
302
|
+
def on_tool_call(notification: NotifyToolCallArgs):
|
|
303
|
+
"""Bridge from sync callback to async queue"""
|
|
304
|
+
loop.call_soon_threadsafe(queue.put_nowait, ("tool_call", (notification.tool_name, notification.serialized_args)))
|
|
305
|
+
|
|
306
|
+
def on_complete(notification: NotifyCompleteArgs):
|
|
307
|
+
"""Signal completion by pushing sentinel"""
|
|
308
|
+
loop.call_soon_threadsafe(queue.put_nowait, ("complete", notification.last_message))
|
|
309
|
+
|
|
310
|
+
# Register callbacks for streaming
|
|
311
|
+
self.register_notification(self.Notifications.NOTIFY_MESSAGE_V2, on_message)
|
|
312
|
+
self.register_notification(self.Notifications.NOTIFY_COMPLETE_V2, on_complete)
|
|
313
|
+
self.register_notification(self.Notifications.NOTIFY_TOOL_CALL_V2, on_tool_call)
|
|
314
|
+
|
|
315
|
+
try:
|
|
316
|
+
# Start the query (non-blocking, returns immediately)
|
|
317
|
+
self.query(prompt)
|
|
318
|
+
|
|
319
|
+
# Yield messages until completion
|
|
320
|
+
while True:
|
|
321
|
+
event_type, payload = await queue.get()
|
|
322
|
+
if event_type == "complete":
|
|
323
|
+
break
|
|
324
|
+
elif event_type == "tool_call":
|
|
325
|
+
yield payload
|
|
326
|
+
elif event_type == "message":
|
|
327
|
+
yield payload
|
|
328
|
+
finally:
|
|
329
|
+
# Clean up callbacks to prevent memory leaks on repeated calls
|
|
330
|
+
if self.Notifications.NOTIFY_MESSAGE_V2 in self._notification_callbacks:
|
|
331
|
+
try:
|
|
332
|
+
self._notification_callbacks[self.Notifications.NOTIFY_MESSAGE_V2].remove(on_message)
|
|
333
|
+
except ValueError:
|
|
334
|
+
pass
|
|
335
|
+
if self.Notifications.NOTIFY_COMPLETE_V2 in self._notification_callbacks:
|
|
336
|
+
try:
|
|
337
|
+
self._notification_callbacks[self.Notifications.NOTIFY_COMPLETE_V2].remove(on_complete)
|
|
338
|
+
except ValueError:
|
|
339
|
+
pass
|
|
340
|
+
if self.Notifications.NOTIFY_TOOL_CALL_V2 in self._notification_callbacks:
|
|
341
|
+
try:
|
|
342
|
+
self._notification_callbacks[self.Notifications.NOTIFY_TOOL_CALL_V2].remove(on_tool_call)
|
|
343
|
+
except ValueError:
|
|
344
|
+
pass
|
|
345
|
+
|
|
346
|
+
def warm_up_mcp(self, default_packages: List[str] = None) -> Dict[str, Any]:
|
|
347
|
+
"""
|
|
348
|
+
Pre-install MCP (Model Context Protocol) packages that use npx.
|
|
349
|
+
Reads configuration from ~/.config/blocks/mcp.json and installs
|
|
350
|
+
all npx-based packages globally using npm.
|
|
351
|
+
|
|
352
|
+
Args:
|
|
353
|
+
default_packages: List of default packages to always install (with optional versions)
|
|
354
|
+
e.g., ["mcp-remote"]
|
|
355
|
+
|
|
356
|
+
Returns:
|
|
357
|
+
Dict containing status, installed packages, and any errors
|
|
358
|
+
"""
|
|
359
|
+
from blocks import bash
|
|
360
|
+
|
|
361
|
+
# Default package(s) to always install
|
|
362
|
+
if default_packages is None:
|
|
363
|
+
default_packages = ["mcp-remote"]
|
|
364
|
+
|
|
365
|
+
result = {
|
|
366
|
+
"status": "success",
|
|
367
|
+
"packages": [],
|
|
368
|
+
"errors": [],
|
|
369
|
+
"message": ""
|
|
370
|
+
}
|
|
371
|
+
|
|
372
|
+
try:
|
|
373
|
+
# Construct path to MCP config
|
|
374
|
+
mcp_config_path = Path.home() / ".config" / "blocks" / "mcp.json"
|
|
375
|
+
|
|
376
|
+
# Check if config file exists
|
|
377
|
+
if not mcp_config_path.exists():
|
|
378
|
+
result["status"] = "skipped"
|
|
379
|
+
result["message"] = f"MCP config not found at {mcp_config_path}"
|
|
380
|
+
print(f"[warm_up_mcp] {result['message']}")
|
|
381
|
+
return result
|
|
382
|
+
|
|
383
|
+
# Read and parse MCP configuration
|
|
384
|
+
with open(mcp_config_path, "r") as f:
|
|
385
|
+
mcp_config = json.load(f)
|
|
386
|
+
|
|
387
|
+
if not mcp_config:
|
|
388
|
+
result["status"] = "skipped"
|
|
389
|
+
result["message"] = "MCP config is empty"
|
|
390
|
+
print(f"[warm_up_mcp] {result['message']}")
|
|
391
|
+
return result
|
|
392
|
+
|
|
393
|
+
# Extract npx-based packages
|
|
394
|
+
npx_packages = []
|
|
395
|
+
for server_name, server_config in mcp_config.items():
|
|
396
|
+
# Skip disabled servers
|
|
397
|
+
if server_config.get("disabled", False):
|
|
398
|
+
print(f"[warm_up_mcp] Skipping disabled server: {server_name}")
|
|
399
|
+
continue
|
|
400
|
+
|
|
401
|
+
# Check if this is an npx-based server
|
|
402
|
+
if server_config.get("command") == "npx":
|
|
403
|
+
args = server_config.get("args", [])
|
|
404
|
+
|
|
405
|
+
# Extract package name from args
|
|
406
|
+
# Typical format: ["-y", "@package/name@version"]
|
|
407
|
+
package_name = None
|
|
408
|
+
for arg in args:
|
|
409
|
+
# Skip flags
|
|
410
|
+
if arg.startswith("-"):
|
|
411
|
+
continue
|
|
412
|
+
# This should be the package name
|
|
413
|
+
package_name = arg
|
|
414
|
+
break
|
|
415
|
+
|
|
416
|
+
if package_name:
|
|
417
|
+
# Keep the package name as-is, including version if specified
|
|
418
|
+
# Examples:
|
|
419
|
+
# - "@modelcontextprotocol/server-slack" (no version)
|
|
420
|
+
# - "@modelcontextprotocol/server-slack@latest" (with version)
|
|
421
|
+
# - "firecrawl-mcp@1.2.3" (non-scoped with version)
|
|
422
|
+
|
|
423
|
+
npx_packages.append({
|
|
424
|
+
"name": package_name,
|
|
425
|
+
"server": server_name
|
|
426
|
+
})
|
|
427
|
+
print(f"[warm_up_mcp] Found npx package: {package_name} (server: {server_name})")
|
|
428
|
+
|
|
429
|
+
# Add default packages
|
|
430
|
+
for default_pkg in default_packages:
|
|
431
|
+
print(f"[warm_up_mcp] Adding default package: {default_pkg}")
|
|
432
|
+
|
|
433
|
+
# Prepare npm install command
|
|
434
|
+
package_names = [pkg["name"] for pkg in npx_packages] + default_packages
|
|
435
|
+
|
|
436
|
+
if not package_names:
|
|
437
|
+
result["status"] = "skipped"
|
|
438
|
+
result["message"] = "No packages to install"
|
|
439
|
+
print(f"[warm_up_mcp] {result['message']}")
|
|
440
|
+
return result
|
|
441
|
+
|
|
442
|
+
result["packages"] = package_names
|
|
443
|
+
|
|
444
|
+
# Run npm install globally
|
|
445
|
+
install_command = f"npm install -g {' '.join(package_names)}"
|
|
446
|
+
print(f"[warm_up_mcp] Running: {install_command}")
|
|
447
|
+
|
|
448
|
+
# Execute the installation
|
|
449
|
+
install_result = bash(install_command, suppress_exception=True)
|
|
450
|
+
|
|
451
|
+
if install_result.return_code == 0:
|
|
452
|
+
result["status"] = "success"
|
|
453
|
+
result["message"] = f"Successfully installed {len(package_names)} package(s)"
|
|
454
|
+
print(f"[warm_up_mcp] {result['message']}")
|
|
455
|
+
print(f"[warm_up_mcp] Installed packages: {', '.join(package_names)}")
|
|
456
|
+
else:
|
|
457
|
+
result["status"] = "partial"
|
|
458
|
+
result["message"] = f"Installation completed with warnings/errors"
|
|
459
|
+
result["errors"].append(install_result.stderr)
|
|
460
|
+
print(f"[warm_up_mcp] {result['message']}")
|
|
461
|
+
print(f"[warm_up_mcp] stderr: {install_result.stderr}")
|
|
462
|
+
|
|
463
|
+
except json.JSONDecodeError as e:
|
|
464
|
+
result["status"] = "error"
|
|
465
|
+
result["message"] = f"Failed to parse MCP config: {e}"
|
|
466
|
+
result["errors"].append(str(e))
|
|
467
|
+
print(f"[warm_up_mcp] Error: {result['message']}")
|
|
468
|
+
except Exception as e:
|
|
469
|
+
result["status"] = "error"
|
|
470
|
+
result["message"] = f"Unexpected error: {e}"
|
|
471
|
+
result["errors"].append(str(e))
|
|
472
|
+
print(f"[warm_up_mcp] Error: {result['message']}")
|
|
473
|
+
|
|
474
|
+
return result
|