tactus 0.31.2__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- tactus/__init__.py +49 -0
- tactus/adapters/__init__.py +9 -0
- tactus/adapters/broker_log.py +76 -0
- tactus/adapters/cli_hitl.py +189 -0
- tactus/adapters/cli_log.py +223 -0
- tactus/adapters/cost_collector_log.py +56 -0
- tactus/adapters/file_storage.py +367 -0
- tactus/adapters/http_callback_log.py +109 -0
- tactus/adapters/ide_log.py +71 -0
- tactus/adapters/lua_tools.py +336 -0
- tactus/adapters/mcp.py +289 -0
- tactus/adapters/mcp_manager.py +196 -0
- tactus/adapters/memory.py +53 -0
- tactus/adapters/plugins.py +419 -0
- tactus/backends/http_backend.py +58 -0
- tactus/backends/model_backend.py +35 -0
- tactus/backends/pytorch_backend.py +110 -0
- tactus/broker/__init__.py +12 -0
- tactus/broker/client.py +247 -0
- tactus/broker/protocol.py +183 -0
- tactus/broker/server.py +1123 -0
- tactus/broker/stdio.py +12 -0
- tactus/cli/__init__.py +7 -0
- tactus/cli/app.py +2245 -0
- tactus/cli/commands/__init__.py +0 -0
- tactus/core/__init__.py +32 -0
- tactus/core/config_manager.py +790 -0
- tactus/core/dependencies/__init__.py +14 -0
- tactus/core/dependencies/registry.py +180 -0
- tactus/core/dsl_stubs.py +2117 -0
- tactus/core/exceptions.py +66 -0
- tactus/core/execution_context.py +480 -0
- tactus/core/lua_sandbox.py +508 -0
- tactus/core/message_history_manager.py +236 -0
- tactus/core/mocking.py +286 -0
- tactus/core/output_validator.py +291 -0
- tactus/core/registry.py +499 -0
- tactus/core/runtime.py +2907 -0
- tactus/core/template_resolver.py +142 -0
- tactus/core/yaml_parser.py +301 -0
- tactus/docker/Dockerfile +61 -0
- tactus/docker/entrypoint.sh +69 -0
- tactus/dspy/__init__.py +39 -0
- tactus/dspy/agent.py +1144 -0
- tactus/dspy/broker_lm.py +181 -0
- tactus/dspy/config.py +212 -0
- tactus/dspy/history.py +196 -0
- tactus/dspy/module.py +405 -0
- tactus/dspy/prediction.py +318 -0
- tactus/dspy/signature.py +185 -0
- tactus/formatting/__init__.py +7 -0
- tactus/formatting/formatter.py +437 -0
- tactus/ide/__init__.py +9 -0
- tactus/ide/coding_assistant.py +343 -0
- tactus/ide/server.py +2223 -0
- tactus/primitives/__init__.py +49 -0
- tactus/primitives/control.py +168 -0
- tactus/primitives/file.py +229 -0
- tactus/primitives/handles.py +378 -0
- tactus/primitives/host.py +94 -0
- tactus/primitives/human.py +342 -0
- tactus/primitives/json.py +189 -0
- tactus/primitives/log.py +187 -0
- tactus/primitives/message_history.py +157 -0
- tactus/primitives/model.py +163 -0
- tactus/primitives/procedure.py +564 -0
- tactus/primitives/procedure_callable.py +318 -0
- tactus/primitives/retry.py +155 -0
- tactus/primitives/session.py +152 -0
- tactus/primitives/state.py +182 -0
- tactus/primitives/step.py +209 -0
- tactus/primitives/system.py +93 -0
- tactus/primitives/tool.py +375 -0
- tactus/primitives/tool_handle.py +279 -0
- tactus/primitives/toolset.py +229 -0
- tactus/protocols/__init__.py +38 -0
- tactus/protocols/chat_recorder.py +81 -0
- tactus/protocols/config.py +97 -0
- tactus/protocols/cost.py +31 -0
- tactus/protocols/hitl.py +71 -0
- tactus/protocols/log_handler.py +27 -0
- tactus/protocols/models.py +355 -0
- tactus/protocols/result.py +33 -0
- tactus/protocols/storage.py +90 -0
- tactus/providers/__init__.py +13 -0
- tactus/providers/base.py +92 -0
- tactus/providers/bedrock.py +117 -0
- tactus/providers/google.py +105 -0
- tactus/providers/openai.py +98 -0
- tactus/sandbox/__init__.py +63 -0
- tactus/sandbox/config.py +171 -0
- tactus/sandbox/container_runner.py +1099 -0
- tactus/sandbox/docker_manager.py +433 -0
- tactus/sandbox/entrypoint.py +227 -0
- tactus/sandbox/protocol.py +213 -0
- tactus/stdlib/__init__.py +10 -0
- tactus/stdlib/io/__init__.py +13 -0
- tactus/stdlib/io/csv.py +88 -0
- tactus/stdlib/io/excel.py +136 -0
- tactus/stdlib/io/file.py +90 -0
- tactus/stdlib/io/fs.py +154 -0
- tactus/stdlib/io/hdf5.py +121 -0
- tactus/stdlib/io/json.py +109 -0
- tactus/stdlib/io/parquet.py +83 -0
- tactus/stdlib/io/tsv.py +88 -0
- tactus/stdlib/loader.py +274 -0
- tactus/stdlib/tac/tactus/tools/done.tac +33 -0
- tactus/stdlib/tac/tactus/tools/log.tac +50 -0
- tactus/testing/README.md +273 -0
- tactus/testing/__init__.py +61 -0
- tactus/testing/behave_integration.py +380 -0
- tactus/testing/context.py +486 -0
- tactus/testing/eval_models.py +114 -0
- tactus/testing/evaluation_runner.py +222 -0
- tactus/testing/evaluators.py +634 -0
- tactus/testing/events.py +94 -0
- tactus/testing/gherkin_parser.py +134 -0
- tactus/testing/mock_agent.py +315 -0
- tactus/testing/mock_dependencies.py +234 -0
- tactus/testing/mock_hitl.py +171 -0
- tactus/testing/mock_registry.py +168 -0
- tactus/testing/mock_tools.py +133 -0
- tactus/testing/models.py +115 -0
- tactus/testing/pydantic_eval_runner.py +508 -0
- tactus/testing/steps/__init__.py +13 -0
- tactus/testing/steps/builtin.py +902 -0
- tactus/testing/steps/custom.py +69 -0
- tactus/testing/steps/registry.py +68 -0
- tactus/testing/test_runner.py +489 -0
- tactus/tracing/__init__.py +5 -0
- tactus/tracing/trace_manager.py +417 -0
- tactus/utils/__init__.py +1 -0
- tactus/utils/cost_calculator.py +72 -0
- tactus/utils/model_pricing.py +132 -0
- tactus/utils/safe_file_library.py +502 -0
- tactus/utils/safe_libraries.py +234 -0
- tactus/validation/LuaLexerBase.py +66 -0
- tactus/validation/LuaParserBase.py +23 -0
- tactus/validation/README.md +224 -0
- tactus/validation/__init__.py +7 -0
- tactus/validation/error_listener.py +21 -0
- tactus/validation/generated/LuaLexer.interp +231 -0
- tactus/validation/generated/LuaLexer.py +5548 -0
- tactus/validation/generated/LuaLexer.tokens +124 -0
- tactus/validation/generated/LuaLexerBase.py +66 -0
- tactus/validation/generated/LuaParser.interp +173 -0
- tactus/validation/generated/LuaParser.py +6439 -0
- tactus/validation/generated/LuaParser.tokens +124 -0
- tactus/validation/generated/LuaParserBase.py +23 -0
- tactus/validation/generated/LuaParserVisitor.py +118 -0
- tactus/validation/generated/__init__.py +7 -0
- tactus/validation/grammar/LuaLexer.g4 +123 -0
- tactus/validation/grammar/LuaParser.g4 +178 -0
- tactus/validation/semantic_visitor.py +817 -0
- tactus/validation/validator.py +157 -0
- tactus-0.31.2.dist-info/METADATA +1809 -0
- tactus-0.31.2.dist-info/RECORD +160 -0
- tactus-0.31.2.dist-info/WHEEL +4 -0
- tactus-0.31.2.dist-info/entry_points.txt +2 -0
- tactus-0.31.2.dist-info/licenses/LICENSE +21 -0
tactus/broker/server.py
ADDED
|
@@ -0,0 +1,1123 @@
|
|
|
1
|
+
"""
|
|
2
|
+
Host-side broker server (local UDS transport).
|
|
3
|
+
|
|
4
|
+
This is intentionally narrow: it exposes only allowlisted operations required
|
|
5
|
+
by the runtime container.
|
|
6
|
+
"""
|
|
7
|
+
|
|
8
|
+
import asyncio
|
|
9
|
+
import json
|
|
10
|
+
import logging
|
|
11
|
+
import os
|
|
12
|
+
import ssl
|
|
13
|
+
from collections.abc import Awaitable, Callable
|
|
14
|
+
from dataclasses import dataclass
|
|
15
|
+
from pathlib import Path
|
|
16
|
+
from typing import Any, Optional
|
|
17
|
+
|
|
18
|
+
import anyio
|
|
19
|
+
from anyio.streams.buffered import BufferedByteReceiveStream
|
|
20
|
+
from anyio.streams.tls import TLSStream
|
|
21
|
+
|
|
22
|
+
from tactus.broker.protocol import (
|
|
23
|
+
read_message,
|
|
24
|
+
read_message_anyio,
|
|
25
|
+
write_message,
|
|
26
|
+
write_message_anyio,
|
|
27
|
+
)
|
|
28
|
+
|
|
29
|
+
logger = logging.getLogger(__name__)
|
|
30
|
+
|
|
31
|
+
|
|
32
|
+
def _json_dumps(obj: Any) -> str:
|
|
33
|
+
return json.dumps(obj, ensure_ascii=False, separators=(",", ":"))
|
|
34
|
+
|
|
35
|
+
|
|
36
|
+
async def _write_event_anyio(stream: anyio.abc.ByteStream, event: dict[str, Any]) -> None:
|
|
37
|
+
"""Write an event using length-prefixed protocol."""
|
|
38
|
+
await write_message_anyio(stream, event)
|
|
39
|
+
|
|
40
|
+
|
|
41
|
+
async def _write_event_asyncio(writer: asyncio.StreamWriter, event: dict[str, Any]) -> None:
|
|
42
|
+
"""Write an event using length-prefixed protocol."""
|
|
43
|
+
await write_message(writer, event)
|
|
44
|
+
|
|
45
|
+
|
|
46
|
+
def _flatten_exceptions(exc: BaseException) -> list[BaseException]:
|
|
47
|
+
"""Flatten BaseExceptionGroup into a list of leaf exceptions."""
|
|
48
|
+
if isinstance(exc, BaseExceptionGroup):
|
|
49
|
+
leaves: list[BaseException] = []
|
|
50
|
+
for child in exc.exceptions:
|
|
51
|
+
leaves.extend(_flatten_exceptions(child))
|
|
52
|
+
return leaves
|
|
53
|
+
return [exc]
|
|
54
|
+
|
|
55
|
+
|
|
56
|
+
@dataclass(frozen=True)
|
|
57
|
+
class OpenAIChatConfig:
|
|
58
|
+
api_key_env: str = "OPENAI_API_KEY"
|
|
59
|
+
|
|
60
|
+
|
|
61
|
+
class OpenAIChatBackend:
|
|
62
|
+
"""
|
|
63
|
+
Minimal OpenAI chat-completions backend used by the broker.
|
|
64
|
+
|
|
65
|
+
Credentials are read from the broker process environment.
|
|
66
|
+
"""
|
|
67
|
+
|
|
68
|
+
def __init__(self, config: Optional[OpenAIChatConfig] = None):
|
|
69
|
+
self._config = config or OpenAIChatConfig()
|
|
70
|
+
|
|
71
|
+
# Lazy-init the client so unit tests can run without OpenAI installed/configured.
|
|
72
|
+
self._client = None
|
|
73
|
+
|
|
74
|
+
def _get_client(self):
|
|
75
|
+
if self._client is not None:
|
|
76
|
+
return self._client
|
|
77
|
+
|
|
78
|
+
from openai import AsyncOpenAI
|
|
79
|
+
|
|
80
|
+
api_key = os.environ.get(self._config.api_key_env)
|
|
81
|
+
if not api_key:
|
|
82
|
+
raise RuntimeError(f"Missing OpenAI API key in environment: {self._config.api_key_env}")
|
|
83
|
+
|
|
84
|
+
self._client = AsyncOpenAI(api_key=api_key)
|
|
85
|
+
return self._client
|
|
86
|
+
|
|
87
|
+
async def chat(
|
|
88
|
+
self,
|
|
89
|
+
*,
|
|
90
|
+
model: str,
|
|
91
|
+
messages: list[dict[str, Any]],
|
|
92
|
+
temperature: Optional[float] = None,
|
|
93
|
+
max_tokens: Optional[int] = None,
|
|
94
|
+
stream: bool,
|
|
95
|
+
):
|
|
96
|
+
client = self._get_client()
|
|
97
|
+
|
|
98
|
+
kwargs: dict[str, Any] = {"model": model, "messages": messages}
|
|
99
|
+
if temperature is not None:
|
|
100
|
+
kwargs["temperature"] = temperature
|
|
101
|
+
if max_tokens is not None:
|
|
102
|
+
kwargs["max_tokens"] = max_tokens
|
|
103
|
+
|
|
104
|
+
if stream:
|
|
105
|
+
return await client.chat.completions.create(**kwargs, stream=True)
|
|
106
|
+
|
|
107
|
+
return await client.chat.completions.create(**kwargs)
|
|
108
|
+
|
|
109
|
+
|
|
110
|
+
class HostToolRegistry:
|
|
111
|
+
"""
|
|
112
|
+
Minimal deny-by-default registry for broker-executed host tools.
|
|
113
|
+
|
|
114
|
+
Phase 1B starts with a tiny allowlist and expands deliberately.
|
|
115
|
+
"""
|
|
116
|
+
|
|
117
|
+
def __init__(self, tools: Optional[dict[str, Callable[[dict[str, Any]], Any]]] = None):
|
|
118
|
+
self._tools = tools or {}
|
|
119
|
+
|
|
120
|
+
@classmethod
|
|
121
|
+
def default(cls) -> "HostToolRegistry":
|
|
122
|
+
def host_ping(args: dict[str, Any]) -> dict[str, Any]:
|
|
123
|
+
return {"ok": True, "echo": args}
|
|
124
|
+
|
|
125
|
+
def host_echo(args: dict[str, Any]) -> dict[str, Any]:
|
|
126
|
+
return {"echo": args}
|
|
127
|
+
|
|
128
|
+
return cls({"host.ping": host_ping, "host.echo": host_echo})
|
|
129
|
+
|
|
130
|
+
def call(self, name: str, args: dict[str, Any]) -> Any:
|
|
131
|
+
if name not in self._tools:
|
|
132
|
+
raise KeyError(f"Tool not allowlisted: {name}")
|
|
133
|
+
return self._tools[name](args)
|
|
134
|
+
|
|
135
|
+
|
|
136
|
+
class _BaseBrokerServer:
|
|
137
|
+
def __init__(
|
|
138
|
+
self,
|
|
139
|
+
*,
|
|
140
|
+
openai_backend: Optional[OpenAIChatBackend] = None,
|
|
141
|
+
tool_registry: Optional[HostToolRegistry] = None,
|
|
142
|
+
event_handler: Optional[Callable[[dict[str, Any]], None]] = None,
|
|
143
|
+
):
|
|
144
|
+
self._listener = None
|
|
145
|
+
self._serve_task: asyncio.Task[None] | None = None
|
|
146
|
+
self._openai = openai_backend or OpenAIChatBackend()
|
|
147
|
+
self._tools = tool_registry or HostToolRegistry.default()
|
|
148
|
+
self._event_handler = event_handler
|
|
149
|
+
|
|
150
|
+
async def start(self) -> None:
|
|
151
|
+
raise NotImplementedError
|
|
152
|
+
|
|
153
|
+
async def serve(self) -> None:
|
|
154
|
+
"""Serve connections (blocks until listener is closed)."""
|
|
155
|
+
if self._listener is None:
|
|
156
|
+
raise RuntimeError("Server not started - call start() first")
|
|
157
|
+
await self._listener.serve(self._handle_connection)
|
|
158
|
+
|
|
159
|
+
async def aclose(self) -> None:
|
|
160
|
+
if self._listener is not None:
|
|
161
|
+
await self._listener.aclose()
|
|
162
|
+
self._listener = None
|
|
163
|
+
|
|
164
|
+
task = self._serve_task
|
|
165
|
+
self._serve_task = None
|
|
166
|
+
if task is not None:
|
|
167
|
+
try:
|
|
168
|
+
await task
|
|
169
|
+
except BaseExceptionGroup as eg:
|
|
170
|
+
# AnyIO raises ClosedResourceError during normal listener shutdown.
|
|
171
|
+
leaves = _flatten_exceptions(eg)
|
|
172
|
+
if leaves and all(isinstance(e, anyio.ClosedResourceError) for e in leaves):
|
|
173
|
+
return
|
|
174
|
+
raise
|
|
175
|
+
except asyncio.CancelledError:
|
|
176
|
+
pass
|
|
177
|
+
|
|
178
|
+
async def __aenter__(self) -> "_BaseBrokerServer":
|
|
179
|
+
await self.start()
|
|
180
|
+
|
|
181
|
+
# AnyIO listeners (TCP/TLS) require an explicit serve loop. Run it in the background
|
|
182
|
+
# so `async with TcpBrokerServer(...)` is sufficient to accept connections.
|
|
183
|
+
if self._listener is not None:
|
|
184
|
+
self._serve_task = asyncio.create_task(self.serve(), name="tactus-broker-serve")
|
|
185
|
+
return self
|
|
186
|
+
|
|
187
|
+
async def __aexit__(self, exc_type, exc, tb) -> None:
|
|
188
|
+
await self.aclose()
|
|
189
|
+
|
|
190
|
+
async def _handle_connection(self, byte_stream: anyio.abc.ByteStream) -> None:
|
|
191
|
+
# For TLS connections, wrap the stream with TLS
|
|
192
|
+
# Note: TcpBrokerServer subclass can override self.ssl_context
|
|
193
|
+
if hasattr(self, "ssl_context") and self.ssl_context is not None:
|
|
194
|
+
byte_stream = await TLSStream.wrap(
|
|
195
|
+
byte_stream, ssl_context=self.ssl_context, server_side=True
|
|
196
|
+
)
|
|
197
|
+
|
|
198
|
+
# Wrap the stream for buffered reading
|
|
199
|
+
buffered_stream = BufferedByteReceiveStream(byte_stream)
|
|
200
|
+
|
|
201
|
+
try:
|
|
202
|
+
# Use length-prefixed protocol to handle arbitrarily large messages
|
|
203
|
+
req = await read_message_anyio(buffered_stream)
|
|
204
|
+
req_id = req.get("id")
|
|
205
|
+
method = req.get("method")
|
|
206
|
+
params = req.get("params") or {}
|
|
207
|
+
|
|
208
|
+
if not req_id or not method:
|
|
209
|
+
await _write_event_anyio(
|
|
210
|
+
byte_stream,
|
|
211
|
+
{
|
|
212
|
+
"id": req_id or "",
|
|
213
|
+
"event": "error",
|
|
214
|
+
"error": {"type": "BadRequest", "message": "Missing id/method"},
|
|
215
|
+
},
|
|
216
|
+
)
|
|
217
|
+
return
|
|
218
|
+
|
|
219
|
+
if method == "events.emit":
|
|
220
|
+
await self._handle_events_emit(req_id, params, byte_stream)
|
|
221
|
+
return
|
|
222
|
+
|
|
223
|
+
if method == "llm.chat":
|
|
224
|
+
await self._handle_llm_chat(req_id, params, byte_stream)
|
|
225
|
+
return
|
|
226
|
+
|
|
227
|
+
if method == "tool.call":
|
|
228
|
+
await self._handle_tool_call(req_id, params, byte_stream)
|
|
229
|
+
return
|
|
230
|
+
|
|
231
|
+
await _write_event_anyio(
|
|
232
|
+
byte_stream,
|
|
233
|
+
{
|
|
234
|
+
"id": req_id,
|
|
235
|
+
"event": "error",
|
|
236
|
+
"error": {"type": "MethodNotFound", "message": f"Unknown method: {method}"},
|
|
237
|
+
},
|
|
238
|
+
)
|
|
239
|
+
|
|
240
|
+
except Exception as e:
|
|
241
|
+
logger.debug("[BROKER] Connection handler error", exc_info=True)
|
|
242
|
+
try:
|
|
243
|
+
await _write_event_anyio(
|
|
244
|
+
byte_stream,
|
|
245
|
+
{
|
|
246
|
+
"id": "",
|
|
247
|
+
"event": "error",
|
|
248
|
+
"error": {"type": type(e).__name__, "message": str(e)},
|
|
249
|
+
},
|
|
250
|
+
)
|
|
251
|
+
except Exception:
|
|
252
|
+
pass
|
|
253
|
+
finally:
|
|
254
|
+
try:
|
|
255
|
+
await byte_stream.aclose()
|
|
256
|
+
except Exception:
|
|
257
|
+
pass
|
|
258
|
+
|
|
259
|
+
async def _handle_connection_asyncio(
|
|
260
|
+
self, reader: asyncio.StreamReader, writer: asyncio.StreamWriter
|
|
261
|
+
) -> None:
|
|
262
|
+
"""
|
|
263
|
+
Handle a single broker request over asyncio streams.
|
|
264
|
+
|
|
265
|
+
UDS uses asyncio's StreamReader/StreamWriter APIs, while TCP uses AnyIO streams.
|
|
266
|
+
"""
|
|
267
|
+
try:
|
|
268
|
+
req = await read_message(reader)
|
|
269
|
+
req_id = req.get("id")
|
|
270
|
+
method = req.get("method")
|
|
271
|
+
params = req.get("params") or {}
|
|
272
|
+
|
|
273
|
+
if not req_id or not method:
|
|
274
|
+
await _write_event_asyncio(
|
|
275
|
+
writer,
|
|
276
|
+
{
|
|
277
|
+
"id": req_id or "",
|
|
278
|
+
"event": "error",
|
|
279
|
+
"error": {"type": "BadRequest", "message": "Missing id/method"},
|
|
280
|
+
},
|
|
281
|
+
)
|
|
282
|
+
return
|
|
283
|
+
|
|
284
|
+
if method == "events.emit":
|
|
285
|
+
await self._handle_events_emit_asyncio(req_id, params, writer)
|
|
286
|
+
return
|
|
287
|
+
|
|
288
|
+
if method == "llm.chat":
|
|
289
|
+
await self._handle_llm_chat_asyncio(req_id, params, writer)
|
|
290
|
+
return
|
|
291
|
+
|
|
292
|
+
if method == "tool.call":
|
|
293
|
+
await self._handle_tool_call_asyncio(req_id, params, writer)
|
|
294
|
+
return
|
|
295
|
+
|
|
296
|
+
await _write_event_asyncio(
|
|
297
|
+
writer,
|
|
298
|
+
{
|
|
299
|
+
"id": req_id,
|
|
300
|
+
"event": "error",
|
|
301
|
+
"error": {"type": "MethodNotFound", "message": f"Unknown method: {method}"},
|
|
302
|
+
},
|
|
303
|
+
)
|
|
304
|
+
except Exception as e:
|
|
305
|
+
logger.debug("[BROKER] asyncio connection handler error", exc_info=True)
|
|
306
|
+
try:
|
|
307
|
+
await _write_event_asyncio(
|
|
308
|
+
writer,
|
|
309
|
+
{
|
|
310
|
+
"id": "",
|
|
311
|
+
"event": "error",
|
|
312
|
+
"error": {"type": type(e).__name__, "message": str(e)},
|
|
313
|
+
},
|
|
314
|
+
)
|
|
315
|
+
except Exception:
|
|
316
|
+
pass
|
|
317
|
+
finally:
|
|
318
|
+
try:
|
|
319
|
+
writer.close()
|
|
320
|
+
await writer.wait_closed()
|
|
321
|
+
except Exception:
|
|
322
|
+
pass
|
|
323
|
+
|
|
324
|
+
async def _handle_events_emit_asyncio(
|
|
325
|
+
self, req_id: str, params: dict[str, Any], writer: asyncio.StreamWriter
|
|
326
|
+
) -> None:
|
|
327
|
+
event = params.get("event")
|
|
328
|
+
if not isinstance(event, dict):
|
|
329
|
+
await _write_event_asyncio(
|
|
330
|
+
writer,
|
|
331
|
+
{
|
|
332
|
+
"id": req_id,
|
|
333
|
+
"event": "error",
|
|
334
|
+
"error": {"type": "BadRequest", "message": "params.event must be an object"},
|
|
335
|
+
},
|
|
336
|
+
)
|
|
337
|
+
return
|
|
338
|
+
|
|
339
|
+
try:
|
|
340
|
+
if self._event_handler is not None:
|
|
341
|
+
self._event_handler(event)
|
|
342
|
+
except Exception:
|
|
343
|
+
logger.debug("[BROKER] event_handler raised", exc_info=True)
|
|
344
|
+
|
|
345
|
+
await _write_event_asyncio(writer, {"id": req_id, "event": "done", "data": {"ok": True}})
|
|
346
|
+
|
|
347
|
+
async def _handle_llm_chat_asyncio(
|
|
348
|
+
self, req_id: str, params: dict[str, Any], writer: asyncio.StreamWriter
|
|
349
|
+
) -> None:
|
|
350
|
+
provider = params.get("provider") or "openai"
|
|
351
|
+
if provider != "openai":
|
|
352
|
+
await _write_event_asyncio(
|
|
353
|
+
writer,
|
|
354
|
+
{
|
|
355
|
+
"id": req_id,
|
|
356
|
+
"event": "error",
|
|
357
|
+
"error": {
|
|
358
|
+
"type": "UnsupportedProvider",
|
|
359
|
+
"message": f"Unsupported provider: {provider}",
|
|
360
|
+
},
|
|
361
|
+
},
|
|
362
|
+
)
|
|
363
|
+
return
|
|
364
|
+
|
|
365
|
+
model = params.get("model")
|
|
366
|
+
messages = params.get("messages")
|
|
367
|
+
stream = bool(params.get("stream", False))
|
|
368
|
+
temperature = params.get("temperature")
|
|
369
|
+
max_tokens = params.get("max_tokens")
|
|
370
|
+
|
|
371
|
+
if not isinstance(model, str) or not model:
|
|
372
|
+
await _write_event_asyncio(
|
|
373
|
+
writer,
|
|
374
|
+
{
|
|
375
|
+
"id": req_id,
|
|
376
|
+
"event": "error",
|
|
377
|
+
"error": {"type": "BadRequest", "message": "params.model must be a string"},
|
|
378
|
+
},
|
|
379
|
+
)
|
|
380
|
+
return
|
|
381
|
+
if not isinstance(messages, list):
|
|
382
|
+
await _write_event_asyncio(
|
|
383
|
+
writer,
|
|
384
|
+
{
|
|
385
|
+
"id": req_id,
|
|
386
|
+
"event": "error",
|
|
387
|
+
"error": {"type": "BadRequest", "message": "params.messages must be a list"},
|
|
388
|
+
},
|
|
389
|
+
)
|
|
390
|
+
return
|
|
391
|
+
|
|
392
|
+
try:
|
|
393
|
+
if stream:
|
|
394
|
+
stream_iter = await self._openai.chat(
|
|
395
|
+
model=model,
|
|
396
|
+
messages=messages,
|
|
397
|
+
temperature=temperature,
|
|
398
|
+
max_tokens=max_tokens,
|
|
399
|
+
stream=True,
|
|
400
|
+
)
|
|
401
|
+
|
|
402
|
+
full_text = ""
|
|
403
|
+
async for chunk in stream_iter:
|
|
404
|
+
try:
|
|
405
|
+
delta = chunk.choices[0].delta
|
|
406
|
+
text = getattr(delta, "content", None)
|
|
407
|
+
except Exception:
|
|
408
|
+
text = None
|
|
409
|
+
|
|
410
|
+
if not text:
|
|
411
|
+
continue
|
|
412
|
+
|
|
413
|
+
full_text += text
|
|
414
|
+
await _write_event_asyncio(
|
|
415
|
+
writer, {"id": req_id, "event": "delta", "data": {"text": text}}
|
|
416
|
+
)
|
|
417
|
+
|
|
418
|
+
await _write_event_asyncio(
|
|
419
|
+
writer,
|
|
420
|
+
{
|
|
421
|
+
"id": req_id,
|
|
422
|
+
"event": "done",
|
|
423
|
+
"data": {
|
|
424
|
+
"text": full_text,
|
|
425
|
+
"usage": {
|
|
426
|
+
"prompt_tokens": 0,
|
|
427
|
+
"completion_tokens": 0,
|
|
428
|
+
"total_tokens": 0,
|
|
429
|
+
},
|
|
430
|
+
},
|
|
431
|
+
},
|
|
432
|
+
)
|
|
433
|
+
return
|
|
434
|
+
|
|
435
|
+
resp = await self._openai.chat(
|
|
436
|
+
model=model,
|
|
437
|
+
messages=messages,
|
|
438
|
+
temperature=temperature,
|
|
439
|
+
max_tokens=max_tokens,
|
|
440
|
+
stream=False,
|
|
441
|
+
)
|
|
442
|
+
text = ""
|
|
443
|
+
try:
|
|
444
|
+
text = resp.choices[0].message.content or ""
|
|
445
|
+
except Exception:
|
|
446
|
+
text = ""
|
|
447
|
+
|
|
448
|
+
await _write_event_asyncio(
|
|
449
|
+
writer,
|
|
450
|
+
{
|
|
451
|
+
"id": req_id,
|
|
452
|
+
"event": "done",
|
|
453
|
+
"data": {
|
|
454
|
+
"text": text,
|
|
455
|
+
"usage": {"prompt_tokens": 0, "completion_tokens": 0, "total_tokens": 0},
|
|
456
|
+
},
|
|
457
|
+
},
|
|
458
|
+
)
|
|
459
|
+
except Exception as e:
|
|
460
|
+
logger.debug("[BROKER] llm.chat error", exc_info=True)
|
|
461
|
+
await _write_event_asyncio(
|
|
462
|
+
writer,
|
|
463
|
+
{
|
|
464
|
+
"id": req_id,
|
|
465
|
+
"event": "error",
|
|
466
|
+
"error": {"type": type(e).__name__, "message": str(e)},
|
|
467
|
+
},
|
|
468
|
+
)
|
|
469
|
+
|
|
470
|
+
async def _handle_tool_call_asyncio(
|
|
471
|
+
self, req_id: str, params: dict[str, Any], writer: asyncio.StreamWriter
|
|
472
|
+
) -> None:
|
|
473
|
+
name = params.get("name")
|
|
474
|
+
args = params.get("args") or {}
|
|
475
|
+
|
|
476
|
+
if not isinstance(name, str) or not name:
|
|
477
|
+
await _write_event_asyncio(
|
|
478
|
+
writer,
|
|
479
|
+
{
|
|
480
|
+
"id": req_id,
|
|
481
|
+
"event": "error",
|
|
482
|
+
"error": {"type": "BadRequest", "message": "params.name must be a string"},
|
|
483
|
+
},
|
|
484
|
+
)
|
|
485
|
+
return
|
|
486
|
+
if not isinstance(args, dict):
|
|
487
|
+
await _write_event_asyncio(
|
|
488
|
+
writer,
|
|
489
|
+
{
|
|
490
|
+
"id": req_id,
|
|
491
|
+
"event": "error",
|
|
492
|
+
"error": {"type": "BadRequest", "message": "params.args must be an object"},
|
|
493
|
+
},
|
|
494
|
+
)
|
|
495
|
+
return
|
|
496
|
+
|
|
497
|
+
try:
|
|
498
|
+
result = self._tools.call(name, args)
|
|
499
|
+
except KeyError:
|
|
500
|
+
await _write_event_asyncio(
|
|
501
|
+
writer,
|
|
502
|
+
{
|
|
503
|
+
"id": req_id,
|
|
504
|
+
"event": "error",
|
|
505
|
+
"error": {
|
|
506
|
+
"type": "ToolNotAllowed",
|
|
507
|
+
"message": f"Tool not allowlisted: {name}",
|
|
508
|
+
},
|
|
509
|
+
},
|
|
510
|
+
)
|
|
511
|
+
return
|
|
512
|
+
except Exception as e:
|
|
513
|
+
logger.debug("[BROKER] tool.call error", exc_info=True)
|
|
514
|
+
await _write_event_asyncio(
|
|
515
|
+
writer,
|
|
516
|
+
{
|
|
517
|
+
"id": req_id,
|
|
518
|
+
"event": "error",
|
|
519
|
+
"error": {"type": type(e).__name__, "message": str(e)},
|
|
520
|
+
},
|
|
521
|
+
)
|
|
522
|
+
return
|
|
523
|
+
|
|
524
|
+
await _write_event_asyncio(
|
|
525
|
+
writer, {"id": req_id, "event": "done", "data": {"result": result}}
|
|
526
|
+
)
|
|
527
|
+
|
|
528
|
+
async def _handle_events_emit(
|
|
529
|
+
self, req_id: str, params: dict[str, Any], byte_stream: anyio.abc.ByteStream
|
|
530
|
+
) -> None:
|
|
531
|
+
event = params.get("event")
|
|
532
|
+
if not isinstance(event, dict):
|
|
533
|
+
await _write_event_anyio(
|
|
534
|
+
byte_stream,
|
|
535
|
+
{
|
|
536
|
+
"id": req_id,
|
|
537
|
+
"event": "error",
|
|
538
|
+
"error": {"type": "BadRequest", "message": "params.event must be an object"},
|
|
539
|
+
},
|
|
540
|
+
)
|
|
541
|
+
return
|
|
542
|
+
|
|
543
|
+
try:
|
|
544
|
+
if self._event_handler is not None:
|
|
545
|
+
self._event_handler(event)
|
|
546
|
+
except Exception:
|
|
547
|
+
logger.debug("[BROKER] event_handler raised", exc_info=True)
|
|
548
|
+
|
|
549
|
+
await _write_event_anyio(byte_stream, {"id": req_id, "event": "done", "data": {"ok": True}})
|
|
550
|
+
|
|
551
|
+
async def _handle_llm_chat(
|
|
552
|
+
self, req_id: str, params: dict[str, Any], byte_stream: anyio.abc.ByteStream
|
|
553
|
+
) -> None:
|
|
554
|
+
provider = params.get("provider") or "openai"
|
|
555
|
+
if provider != "openai":
|
|
556
|
+
await _write_event_anyio(
|
|
557
|
+
byte_stream,
|
|
558
|
+
{
|
|
559
|
+
"id": req_id,
|
|
560
|
+
"event": "error",
|
|
561
|
+
"error": {
|
|
562
|
+
"type": "UnsupportedProvider",
|
|
563
|
+
"message": f"Unsupported provider: {provider}",
|
|
564
|
+
},
|
|
565
|
+
},
|
|
566
|
+
)
|
|
567
|
+
return
|
|
568
|
+
|
|
569
|
+
model = params.get("model")
|
|
570
|
+
messages = params.get("messages")
|
|
571
|
+
stream = bool(params.get("stream", False))
|
|
572
|
+
temperature = params.get("temperature")
|
|
573
|
+
max_tokens = params.get("max_tokens")
|
|
574
|
+
|
|
575
|
+
if not isinstance(model, str) or not model:
|
|
576
|
+
await _write_event_anyio(
|
|
577
|
+
byte_stream,
|
|
578
|
+
{
|
|
579
|
+
"id": req_id,
|
|
580
|
+
"event": "error",
|
|
581
|
+
"error": {"type": "BadRequest", "message": "params.model must be a string"},
|
|
582
|
+
},
|
|
583
|
+
)
|
|
584
|
+
return
|
|
585
|
+
if not isinstance(messages, list):
|
|
586
|
+
await _write_event_anyio(
|
|
587
|
+
byte_stream,
|
|
588
|
+
{
|
|
589
|
+
"id": req_id,
|
|
590
|
+
"event": "error",
|
|
591
|
+
"error": {"type": "BadRequest", "message": "params.messages must be a list"},
|
|
592
|
+
},
|
|
593
|
+
)
|
|
594
|
+
return
|
|
595
|
+
|
|
596
|
+
try:
|
|
597
|
+
if stream:
|
|
598
|
+
stream_iter = await self._openai.chat(
|
|
599
|
+
model=model,
|
|
600
|
+
messages=messages,
|
|
601
|
+
temperature=temperature,
|
|
602
|
+
max_tokens=max_tokens,
|
|
603
|
+
stream=True,
|
|
604
|
+
)
|
|
605
|
+
|
|
606
|
+
full_text = ""
|
|
607
|
+
async for chunk in stream_iter:
|
|
608
|
+
try:
|
|
609
|
+
delta = chunk.choices[0].delta
|
|
610
|
+
text = getattr(delta, "content", None)
|
|
611
|
+
except Exception:
|
|
612
|
+
text = None
|
|
613
|
+
|
|
614
|
+
if not text:
|
|
615
|
+
continue
|
|
616
|
+
|
|
617
|
+
full_text += text
|
|
618
|
+
await _write_event_anyio(
|
|
619
|
+
byte_stream, {"id": req_id, "event": "delta", "data": {"text": text}}
|
|
620
|
+
)
|
|
621
|
+
|
|
622
|
+
await _write_event_anyio(
|
|
623
|
+
byte_stream,
|
|
624
|
+
{
|
|
625
|
+
"id": req_id,
|
|
626
|
+
"event": "done",
|
|
627
|
+
"data": {
|
|
628
|
+
"text": full_text,
|
|
629
|
+
"usage": {
|
|
630
|
+
"prompt_tokens": 0,
|
|
631
|
+
"completion_tokens": 0,
|
|
632
|
+
"total_tokens": 0,
|
|
633
|
+
},
|
|
634
|
+
},
|
|
635
|
+
},
|
|
636
|
+
)
|
|
637
|
+
return
|
|
638
|
+
|
|
639
|
+
resp = await self._openai.chat(
|
|
640
|
+
model=model,
|
|
641
|
+
messages=messages,
|
|
642
|
+
temperature=temperature,
|
|
643
|
+
max_tokens=max_tokens,
|
|
644
|
+
stream=False,
|
|
645
|
+
)
|
|
646
|
+
text = ""
|
|
647
|
+
try:
|
|
648
|
+
text = resp.choices[0].message.content or ""
|
|
649
|
+
except Exception:
|
|
650
|
+
text = ""
|
|
651
|
+
|
|
652
|
+
await _write_event_anyio(
|
|
653
|
+
byte_stream,
|
|
654
|
+
{
|
|
655
|
+
"id": req_id,
|
|
656
|
+
"event": "done",
|
|
657
|
+
"data": {
|
|
658
|
+
"text": text,
|
|
659
|
+
"usage": {"prompt_tokens": 0, "completion_tokens": 0, "total_tokens": 0},
|
|
660
|
+
},
|
|
661
|
+
},
|
|
662
|
+
)
|
|
663
|
+
except Exception as e:
|
|
664
|
+
logger.debug("[BROKER] llm.chat error", exc_info=True)
|
|
665
|
+
await _write_event_anyio(
|
|
666
|
+
byte_stream,
|
|
667
|
+
{
|
|
668
|
+
"id": req_id,
|
|
669
|
+
"event": "error",
|
|
670
|
+
"error": {"type": type(e).__name__, "message": str(e)},
|
|
671
|
+
},
|
|
672
|
+
)
|
|
673
|
+
|
|
674
|
+
async def _handle_tool_call(
|
|
675
|
+
self, req_id: str, params: dict[str, Any], byte_stream: anyio.abc.ByteStream
|
|
676
|
+
) -> None:
|
|
677
|
+
name = params.get("name")
|
|
678
|
+
args = params.get("args") or {}
|
|
679
|
+
|
|
680
|
+
if not isinstance(name, str) or not name:
|
|
681
|
+
await _write_event_anyio(
|
|
682
|
+
byte_stream,
|
|
683
|
+
{
|
|
684
|
+
"id": req_id,
|
|
685
|
+
"event": "error",
|
|
686
|
+
"error": {"type": "BadRequest", "message": "params.name must be a string"},
|
|
687
|
+
},
|
|
688
|
+
)
|
|
689
|
+
return
|
|
690
|
+
if not isinstance(args, dict):
|
|
691
|
+
await _write_event_anyio(
|
|
692
|
+
byte_stream,
|
|
693
|
+
{
|
|
694
|
+
"id": req_id,
|
|
695
|
+
"event": "error",
|
|
696
|
+
"error": {"type": "BadRequest", "message": "params.args must be an object"},
|
|
697
|
+
},
|
|
698
|
+
)
|
|
699
|
+
return
|
|
700
|
+
|
|
701
|
+
try:
|
|
702
|
+
result = self._tools.call(name, args)
|
|
703
|
+
except KeyError:
|
|
704
|
+
await _write_event_anyio(
|
|
705
|
+
byte_stream,
|
|
706
|
+
{
|
|
707
|
+
"id": req_id,
|
|
708
|
+
"event": "error",
|
|
709
|
+
"error": {
|
|
710
|
+
"type": "ToolNotAllowed",
|
|
711
|
+
"message": f"Tool not allowlisted: {name}",
|
|
712
|
+
},
|
|
713
|
+
},
|
|
714
|
+
)
|
|
715
|
+
return
|
|
716
|
+
except Exception as e:
|
|
717
|
+
logger.debug("[BROKER] tool.call error", exc_info=True)
|
|
718
|
+
await _write_event_anyio(
|
|
719
|
+
byte_stream,
|
|
720
|
+
{
|
|
721
|
+
"id": req_id,
|
|
722
|
+
"event": "error",
|
|
723
|
+
"error": {"type": type(e).__name__, "message": str(e)},
|
|
724
|
+
},
|
|
725
|
+
)
|
|
726
|
+
return
|
|
727
|
+
|
|
728
|
+
await _write_event_anyio(
|
|
729
|
+
byte_stream, {"id": req_id, "event": "done", "data": {"result": result}}
|
|
730
|
+
)
|
|
731
|
+
|
|
732
|
+
|
|
733
|
+
class BrokerServer(_BaseBrokerServer):
|
|
734
|
+
"""
|
|
735
|
+
Local broker server that listens on a Unix domain socket.
|
|
736
|
+
|
|
737
|
+
Protocol (NDJSON):
|
|
738
|
+
request: {"id":"...","method":"llm.chat","params":{...}}
|
|
739
|
+
response stream:
|
|
740
|
+
{"id":"...","event":"delta","data":{"text":"..."}}
|
|
741
|
+
{"id":"...","event":"done","data":{...}}
|
|
742
|
+
or:
|
|
743
|
+
{"id":"...","event":"error","error":{"message":"...","type":"..."}}
|
|
744
|
+
"""
|
|
745
|
+
|
|
746
|
+
def __init__(
|
|
747
|
+
self,
|
|
748
|
+
socket_path: Path,
|
|
749
|
+
*,
|
|
750
|
+
openai_backend: Optional[OpenAIChatBackend] = None,
|
|
751
|
+
tool_registry: Optional[HostToolRegistry] = None,
|
|
752
|
+
event_handler: Optional[Callable[[dict[str, Any]], None]] = None,
|
|
753
|
+
):
|
|
754
|
+
super().__init__(
|
|
755
|
+
openai_backend=openai_backend, tool_registry=tool_registry, event_handler=event_handler
|
|
756
|
+
)
|
|
757
|
+
self.socket_path = Path(socket_path)
|
|
758
|
+
self._server: asyncio.AbstractServer | None = None
|
|
759
|
+
|
|
760
|
+
async def start(self) -> None:
|
|
761
|
+
# Most platforms enforce a short maximum length for AF_UNIX socket paths.
|
|
762
|
+
# Keep a conservative bound to avoid opaque "AF_UNIX path too long" errors.
|
|
763
|
+
if len(str(self.socket_path)) > 90:
|
|
764
|
+
raise ValueError(
|
|
765
|
+
f"Broker socket path too long for AF_UNIX: {self.socket_path} "
|
|
766
|
+
f"(len={len(str(self.socket_path))})"
|
|
767
|
+
)
|
|
768
|
+
|
|
769
|
+
self.socket_path.parent.mkdir(parents=True, exist_ok=True)
|
|
770
|
+
if self.socket_path.exists():
|
|
771
|
+
self.socket_path.unlink()
|
|
772
|
+
|
|
773
|
+
self._server = await asyncio.start_unix_server(
|
|
774
|
+
self._handle_connection_asyncio, path=str(self.socket_path)
|
|
775
|
+
)
|
|
776
|
+
logger.info(f"[BROKER] Listening on UDS: {self.socket_path}")
|
|
777
|
+
|
|
778
|
+
async def aclose(self) -> None:
|
|
779
|
+
server = getattr(self, "_server", None)
|
|
780
|
+
if server is not None:
|
|
781
|
+
try:
|
|
782
|
+
server.close()
|
|
783
|
+
await server.wait_closed()
|
|
784
|
+
except Exception:
|
|
785
|
+
logger.debug("[BROKER] Failed to close asyncio server", exc_info=True)
|
|
786
|
+
finally:
|
|
787
|
+
self._server = None
|
|
788
|
+
|
|
789
|
+
await super().aclose()
|
|
790
|
+
|
|
791
|
+
if self._server is not None:
|
|
792
|
+
self._server.close()
|
|
793
|
+
try:
|
|
794
|
+
await self._server.wait_closed()
|
|
795
|
+
finally:
|
|
796
|
+
self._server = None
|
|
797
|
+
|
|
798
|
+
try:
|
|
799
|
+
if self.socket_path.exists():
|
|
800
|
+
self.socket_path.unlink()
|
|
801
|
+
except Exception:
|
|
802
|
+
logger.debug("[BROKER] Failed to unlink socket path", exc_info=True)
|
|
803
|
+
|
|
804
|
+
async def _handle_connection_asyncio(
|
|
805
|
+
self, reader: asyncio.StreamReader, writer: asyncio.StreamWriter
|
|
806
|
+
) -> None:
|
|
807
|
+
try:
|
|
808
|
+
req = await read_message(reader)
|
|
809
|
+
req_id = req.get("id")
|
|
810
|
+
method = req.get("method")
|
|
811
|
+
params = req.get("params") or {}
|
|
812
|
+
|
|
813
|
+
async def write_event(event: dict[str, Any]) -> None:
|
|
814
|
+
await write_message(writer, event)
|
|
815
|
+
|
|
816
|
+
if not req_id or not method:
|
|
817
|
+
await write_event(
|
|
818
|
+
{
|
|
819
|
+
"id": req_id or "",
|
|
820
|
+
"event": "error",
|
|
821
|
+
"error": {"type": "BadRequest", "message": "Missing id/method"},
|
|
822
|
+
}
|
|
823
|
+
)
|
|
824
|
+
return
|
|
825
|
+
|
|
826
|
+
if method == "events.emit":
|
|
827
|
+
await self._handle_events_emit_asyncio(req_id, params, write_event)
|
|
828
|
+
return
|
|
829
|
+
|
|
830
|
+
if method == "llm.chat":
|
|
831
|
+
await self._handle_llm_chat_asyncio(req_id, params, write_event)
|
|
832
|
+
return
|
|
833
|
+
|
|
834
|
+
if method == "tool.call":
|
|
835
|
+
await self._handle_tool_call_asyncio(req_id, params, write_event)
|
|
836
|
+
return
|
|
837
|
+
|
|
838
|
+
await write_event(
|
|
839
|
+
{
|
|
840
|
+
"id": req_id,
|
|
841
|
+
"event": "error",
|
|
842
|
+
"error": {"type": "MethodNotFound", "message": f"Unknown method: {method}"},
|
|
843
|
+
}
|
|
844
|
+
)
|
|
845
|
+
|
|
846
|
+
except Exception as e:
|
|
847
|
+
logger.debug("[BROKER] Connection handler error", exc_info=True)
|
|
848
|
+
try:
|
|
849
|
+
await write_message(
|
|
850
|
+
writer,
|
|
851
|
+
{
|
|
852
|
+
"id": "",
|
|
853
|
+
"event": "error",
|
|
854
|
+
"error": {"type": type(e).__name__, "message": str(e)},
|
|
855
|
+
},
|
|
856
|
+
)
|
|
857
|
+
except Exception:
|
|
858
|
+
pass
|
|
859
|
+
finally:
|
|
860
|
+
try:
|
|
861
|
+
writer.close()
|
|
862
|
+
await writer.wait_closed()
|
|
863
|
+
except Exception:
|
|
864
|
+
pass
|
|
865
|
+
|
|
866
|
+
async def _handle_events_emit_asyncio(
|
|
867
|
+
self,
|
|
868
|
+
req_id: str,
|
|
869
|
+
params: dict[str, Any],
|
|
870
|
+
write_event: Callable[[dict[str, Any]], Awaitable[None]],
|
|
871
|
+
) -> None:
|
|
872
|
+
event = params.get("event")
|
|
873
|
+
if not isinstance(event, dict):
|
|
874
|
+
await write_event(
|
|
875
|
+
{
|
|
876
|
+
"id": req_id,
|
|
877
|
+
"event": "error",
|
|
878
|
+
"error": {"type": "BadRequest", "message": "params.event must be an object"},
|
|
879
|
+
}
|
|
880
|
+
)
|
|
881
|
+
return
|
|
882
|
+
|
|
883
|
+
try:
|
|
884
|
+
if self._event_handler is not None:
|
|
885
|
+
self._event_handler(event)
|
|
886
|
+
except Exception:
|
|
887
|
+
logger.debug("[BROKER] event_handler raised", exc_info=True)
|
|
888
|
+
|
|
889
|
+
await write_event({"id": req_id, "event": "done", "data": {"ok": True}})
|
|
890
|
+
|
|
891
|
+
async def _handle_tool_call_asyncio(
|
|
892
|
+
self,
|
|
893
|
+
req_id: str,
|
|
894
|
+
params: dict[str, Any],
|
|
895
|
+
write_event: Callable[[dict[str, Any]], Awaitable[None]],
|
|
896
|
+
) -> None:
|
|
897
|
+
name = params.get("name")
|
|
898
|
+
args = params.get("args") or {}
|
|
899
|
+
|
|
900
|
+
if not isinstance(name, str) or not name:
|
|
901
|
+
await write_event(
|
|
902
|
+
{
|
|
903
|
+
"id": req_id,
|
|
904
|
+
"event": "error",
|
|
905
|
+
"error": {"type": "BadRequest", "message": "params.name must be a string"},
|
|
906
|
+
}
|
|
907
|
+
)
|
|
908
|
+
return
|
|
909
|
+
if not isinstance(args, dict):
|
|
910
|
+
await write_event(
|
|
911
|
+
{
|
|
912
|
+
"id": req_id,
|
|
913
|
+
"event": "error",
|
|
914
|
+
"error": {"type": "BadRequest", "message": "params.args must be an object"},
|
|
915
|
+
}
|
|
916
|
+
)
|
|
917
|
+
return
|
|
918
|
+
|
|
919
|
+
try:
|
|
920
|
+
result = self._tools.call(name, args)
|
|
921
|
+
except KeyError:
|
|
922
|
+
await write_event(
|
|
923
|
+
{
|
|
924
|
+
"id": req_id,
|
|
925
|
+
"event": "error",
|
|
926
|
+
"error": {
|
|
927
|
+
"type": "ToolNotAllowed",
|
|
928
|
+
"message": f"Tool not allowlisted: {name}",
|
|
929
|
+
},
|
|
930
|
+
}
|
|
931
|
+
)
|
|
932
|
+
return
|
|
933
|
+
except Exception as e:
|
|
934
|
+
logger.debug("[BROKER] tool.call error", exc_info=True)
|
|
935
|
+
await write_event(
|
|
936
|
+
{
|
|
937
|
+
"id": req_id,
|
|
938
|
+
"event": "error",
|
|
939
|
+
"error": {"type": type(e).__name__, "message": str(e)},
|
|
940
|
+
}
|
|
941
|
+
)
|
|
942
|
+
return
|
|
943
|
+
|
|
944
|
+
await write_event({"id": req_id, "event": "done", "data": {"result": result}})
|
|
945
|
+
|
|
946
|
+
async def _handle_llm_chat_asyncio(
|
|
947
|
+
self,
|
|
948
|
+
req_id: str,
|
|
949
|
+
params: dict[str, Any],
|
|
950
|
+
write_event: Callable[[dict[str, Any]], Awaitable[None]],
|
|
951
|
+
) -> None:
|
|
952
|
+
provider = params.get("provider") or "openai"
|
|
953
|
+
if provider != "openai":
|
|
954
|
+
await write_event(
|
|
955
|
+
{
|
|
956
|
+
"id": req_id,
|
|
957
|
+
"event": "error",
|
|
958
|
+
"error": {
|
|
959
|
+
"type": "UnsupportedProvider",
|
|
960
|
+
"message": f"Unsupported provider: {provider}",
|
|
961
|
+
},
|
|
962
|
+
}
|
|
963
|
+
)
|
|
964
|
+
return
|
|
965
|
+
|
|
966
|
+
model = params.get("model")
|
|
967
|
+
messages = params.get("messages")
|
|
968
|
+
stream = bool(params.get("stream", False))
|
|
969
|
+
temperature = params.get("temperature")
|
|
970
|
+
max_tokens = params.get("max_tokens")
|
|
971
|
+
|
|
972
|
+
if not isinstance(model, str) or not model:
|
|
973
|
+
await write_event(
|
|
974
|
+
{
|
|
975
|
+
"id": req_id,
|
|
976
|
+
"event": "error",
|
|
977
|
+
"error": {"type": "BadRequest", "message": "params.model must be a string"},
|
|
978
|
+
}
|
|
979
|
+
)
|
|
980
|
+
return
|
|
981
|
+
if not isinstance(messages, list):
|
|
982
|
+
await write_event(
|
|
983
|
+
{
|
|
984
|
+
"id": req_id,
|
|
985
|
+
"event": "error",
|
|
986
|
+
"error": {"type": "BadRequest", "message": "params.messages must be a list"},
|
|
987
|
+
}
|
|
988
|
+
)
|
|
989
|
+
return
|
|
990
|
+
|
|
991
|
+
try:
|
|
992
|
+
if stream:
|
|
993
|
+
stream_iter = await self._openai.chat(
|
|
994
|
+
model=model,
|
|
995
|
+
messages=messages,
|
|
996
|
+
temperature=temperature,
|
|
997
|
+
max_tokens=max_tokens,
|
|
998
|
+
stream=True,
|
|
999
|
+
)
|
|
1000
|
+
|
|
1001
|
+
full_text = ""
|
|
1002
|
+
async for chunk in stream_iter:
|
|
1003
|
+
try:
|
|
1004
|
+
delta = chunk.choices[0].delta
|
|
1005
|
+
text = getattr(delta, "content", None)
|
|
1006
|
+
except Exception:
|
|
1007
|
+
text = None
|
|
1008
|
+
|
|
1009
|
+
if not text:
|
|
1010
|
+
continue
|
|
1011
|
+
|
|
1012
|
+
full_text += text
|
|
1013
|
+
await write_event({"id": req_id, "event": "delta", "data": {"text": text}})
|
|
1014
|
+
|
|
1015
|
+
await write_event(
|
|
1016
|
+
{
|
|
1017
|
+
"id": req_id,
|
|
1018
|
+
"event": "done",
|
|
1019
|
+
"data": {
|
|
1020
|
+
"text": full_text,
|
|
1021
|
+
"usage": {
|
|
1022
|
+
"prompt_tokens": 0,
|
|
1023
|
+
"completion_tokens": 0,
|
|
1024
|
+
"total_tokens": 0,
|
|
1025
|
+
},
|
|
1026
|
+
},
|
|
1027
|
+
}
|
|
1028
|
+
)
|
|
1029
|
+
return
|
|
1030
|
+
|
|
1031
|
+
resp = await self._openai.chat(
|
|
1032
|
+
model=model,
|
|
1033
|
+
messages=messages,
|
|
1034
|
+
temperature=temperature,
|
|
1035
|
+
max_tokens=max_tokens,
|
|
1036
|
+
stream=False,
|
|
1037
|
+
)
|
|
1038
|
+
text = ""
|
|
1039
|
+
try:
|
|
1040
|
+
text = resp.choices[0].message.content or ""
|
|
1041
|
+
except Exception:
|
|
1042
|
+
text = ""
|
|
1043
|
+
|
|
1044
|
+
await write_event(
|
|
1045
|
+
{
|
|
1046
|
+
"id": req_id,
|
|
1047
|
+
"event": "done",
|
|
1048
|
+
"data": {
|
|
1049
|
+
"text": text,
|
|
1050
|
+
"usage": {"prompt_tokens": 0, "completion_tokens": 0, "total_tokens": 0},
|
|
1051
|
+
},
|
|
1052
|
+
}
|
|
1053
|
+
)
|
|
1054
|
+
except Exception as e:
|
|
1055
|
+
logger.debug("[BROKER] llm.chat error", exc_info=True)
|
|
1056
|
+
await write_event(
|
|
1057
|
+
{
|
|
1058
|
+
"id": req_id,
|
|
1059
|
+
"event": "error",
|
|
1060
|
+
"error": {"type": type(e).__name__, "message": str(e)},
|
|
1061
|
+
}
|
|
1062
|
+
)
|
|
1063
|
+
|
|
1064
|
+
|
|
1065
|
+
class TcpBrokerServer(_BaseBrokerServer):
|
|
1066
|
+
"""
|
|
1067
|
+
Broker server that listens on TCP (optionally TLS).
|
|
1068
|
+
|
|
1069
|
+
Protocol is the same NDJSON framing used by the UDS broker.
|
|
1070
|
+
"""
|
|
1071
|
+
|
|
1072
|
+
def __init__(
|
|
1073
|
+
self,
|
|
1074
|
+
*,
|
|
1075
|
+
host: str = "127.0.0.1",
|
|
1076
|
+
port: int = 0,
|
|
1077
|
+
ssl_context: ssl.SSLContext | None = None,
|
|
1078
|
+
openai_backend: Optional[OpenAIChatBackend] = None,
|
|
1079
|
+
tool_registry: Optional[HostToolRegistry] = None,
|
|
1080
|
+
event_handler: Optional[Callable[[dict[str, Any]], None]] = None,
|
|
1081
|
+
):
|
|
1082
|
+
super().__init__(
|
|
1083
|
+
openai_backend=openai_backend, tool_registry=tool_registry, event_handler=event_handler
|
|
1084
|
+
)
|
|
1085
|
+
self.host = host
|
|
1086
|
+
self.port = port
|
|
1087
|
+
self.ssl_context = ssl_context
|
|
1088
|
+
self.bound_port: int | None = None
|
|
1089
|
+
self._serve_task: asyncio.Task[None] | None = None
|
|
1090
|
+
|
|
1091
|
+
async def start(self) -> None:
|
|
1092
|
+
# Create AnyIO TCP listener (doesn't block, just binds to port)
|
|
1093
|
+
self._listener = await anyio.create_tcp_listener(local_host=self.host, local_port=self.port)
|
|
1094
|
+
|
|
1095
|
+
# Get the bound port
|
|
1096
|
+
try:
|
|
1097
|
+
sockname = self._listener.extra(anyio.abc.SocketAttribute.raw_socket).getsockname()
|
|
1098
|
+
self.bound_port = int(sockname[1])
|
|
1099
|
+
except Exception:
|
|
1100
|
+
self.bound_port = None
|
|
1101
|
+
|
|
1102
|
+
scheme = "tls" if self.ssl_context is not None else "tcp"
|
|
1103
|
+
logger.info(
|
|
1104
|
+
f"[BROKER] Listening on {scheme}: {self.host}:{self.bound_port if self.bound_port is not None else self.port}"
|
|
1105
|
+
)
|
|
1106
|
+
|
|
1107
|
+
# Unlike asyncio's start_server(), AnyIO listeners don't automatically start
|
|
1108
|
+
# serving on enter; they require an explicit serve() loop. Run it in the
|
|
1109
|
+
# background for the duration of the async context manager.
|
|
1110
|
+
if self._serve_task is None or self._serve_task.done():
|
|
1111
|
+
self._serve_task = asyncio.create_task(self.serve(), name="tactus-broker-tcp-serve")
|
|
1112
|
+
|
|
1113
|
+
async def aclose(self) -> None:
|
|
1114
|
+
task = self._serve_task
|
|
1115
|
+
self._serve_task = None
|
|
1116
|
+
if task is not None and not task.done():
|
|
1117
|
+
task.cancel()
|
|
1118
|
+
try:
|
|
1119
|
+
await task
|
|
1120
|
+
except asyncio.CancelledError:
|
|
1121
|
+
pass
|
|
1122
|
+
|
|
1123
|
+
await super().aclose()
|