soorma-core 0.3.0__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- soorma/__init__.py +138 -0
- soorma/agents/__init__.py +17 -0
- soorma/agents/base.py +523 -0
- soorma/agents/planner.py +391 -0
- soorma/agents/tool.py +373 -0
- soorma/agents/worker.py +385 -0
- soorma/ai/event_toolkit.py +281 -0
- soorma/ai/tools.py +280 -0
- soorma/cli/__init__.py +7 -0
- soorma/cli/commands/__init__.py +3 -0
- soorma/cli/commands/dev.py +780 -0
- soorma/cli/commands/init.py +717 -0
- soorma/cli/main.py +52 -0
- soorma/context.py +832 -0
- soorma/events.py +496 -0
- soorma/models.py +24 -0
- soorma/registry/client.py +186 -0
- soorma/utils/schema_utils.py +209 -0
- soorma_core-0.3.0.dist-info/METADATA +454 -0
- soorma_core-0.3.0.dist-info/RECORD +23 -0
- soorma_core-0.3.0.dist-info/WHEEL +4 -0
- soorma_core-0.3.0.dist-info/entry_points.txt +3 -0
- soorma_core-0.3.0.dist-info/licenses/LICENSE.txt +21 -0
soorma/agents/worker.py
ADDED
|
@@ -0,0 +1,385 @@
|
|
|
1
|
+
"""
|
|
2
|
+
Worker Agent - Domain-specific cognitive node.
|
|
3
|
+
|
|
4
|
+
Workers are specialized agents that handle domain-specific cognitive tasks.
|
|
5
|
+
They:
|
|
6
|
+
- Subscribe to action-requests for their capabilities
|
|
7
|
+
- Execute tasks using domain knowledge (often with LLMs)
|
|
8
|
+
- Report progress and results
|
|
9
|
+
- Emit action-results when complete
|
|
10
|
+
|
|
11
|
+
Workers are discoverable via the Registry service based on their capabilities.
|
|
12
|
+
|
|
13
|
+
Usage:
|
|
14
|
+
from soorma.agents import Worker
|
|
15
|
+
|
|
16
|
+
worker = Worker(
|
|
17
|
+
name="research-worker",
|
|
18
|
+
description="Searches and analyzes research papers",
|
|
19
|
+
capabilities=["paper_search", "citation_analysis"],
|
|
20
|
+
)
|
|
21
|
+
|
|
22
|
+
@worker.on_task("search_papers")
|
|
23
|
+
async def search_papers(task, context):
|
|
24
|
+
# Access shared memory
|
|
25
|
+
preferences = await context.memory.retrieve(f"user:{task.session_id}:preferences")
|
|
26
|
+
|
|
27
|
+
# Use LLM for intelligent search
|
|
28
|
+
results = await search_with_llm(task.data["query"], preferences)
|
|
29
|
+
|
|
30
|
+
# Store results in memory for other workers
|
|
31
|
+
await context.memory.store(f"search_results:{task.task_id}", results)
|
|
32
|
+
|
|
33
|
+
return {"papers": results, "count": len(results)}
|
|
34
|
+
|
|
35
|
+
worker.run()
|
|
36
|
+
"""
|
|
37
|
+
import logging
|
|
38
|
+
from dataclasses import dataclass, field
|
|
39
|
+
from typing import Any, Awaitable, Callable, Dict, List, Optional
|
|
40
|
+
from uuid import uuid4
|
|
41
|
+
|
|
42
|
+
from .base import Agent
|
|
43
|
+
from ..context import PlatformContext
|
|
44
|
+
|
|
45
|
+
logger = logging.getLogger(__name__)
|
|
46
|
+
|
|
47
|
+
|
|
48
|
+
@dataclass
|
|
49
|
+
class TaskContext:
|
|
50
|
+
"""
|
|
51
|
+
Context for a task being executed by a Worker.
|
|
52
|
+
|
|
53
|
+
Provides all the information needed to execute a task,
|
|
54
|
+
plus methods for reporting progress.
|
|
55
|
+
|
|
56
|
+
Attributes:
|
|
57
|
+
task_id: Unique task identifier
|
|
58
|
+
task_name: Name of the task
|
|
59
|
+
plan_id: Parent plan ID
|
|
60
|
+
goal_id: Original goal ID
|
|
61
|
+
data: Task input data
|
|
62
|
+
correlation_id: Tracking ID
|
|
63
|
+
session_id: Client session
|
|
64
|
+
tenant_id: Multi-tenant isolation
|
|
65
|
+
timeout: Task timeout in seconds
|
|
66
|
+
priority: Task priority
|
|
67
|
+
"""
|
|
68
|
+
task_id: str
|
|
69
|
+
task_name: str
|
|
70
|
+
plan_id: str
|
|
71
|
+
goal_id: str
|
|
72
|
+
data: Dict[str, Any]
|
|
73
|
+
correlation_id: Optional[str] = None
|
|
74
|
+
session_id: Optional[str] = None
|
|
75
|
+
tenant_id: Optional[str] = None
|
|
76
|
+
timeout: Optional[float] = None
|
|
77
|
+
priority: int = 0
|
|
78
|
+
|
|
79
|
+
# Internal reference to platform context
|
|
80
|
+
_platform_context: Optional[PlatformContext] = field(default=None, repr=False)
|
|
81
|
+
|
|
82
|
+
async def report_progress(
|
|
83
|
+
self,
|
|
84
|
+
progress: float,
|
|
85
|
+
message: Optional[str] = None,
|
|
86
|
+
) -> None:
|
|
87
|
+
"""
|
|
88
|
+
Report task progress.
|
|
89
|
+
|
|
90
|
+
Args:
|
|
91
|
+
progress: Progress percentage (0.0 to 1.0)
|
|
92
|
+
message: Optional status message
|
|
93
|
+
"""
|
|
94
|
+
if self._platform_context:
|
|
95
|
+
await self._platform_context.tracker.emit_progress(
|
|
96
|
+
plan_id=self.plan_id,
|
|
97
|
+
task_id=self.task_id,
|
|
98
|
+
status="running",
|
|
99
|
+
progress=progress,
|
|
100
|
+
message=message,
|
|
101
|
+
)
|
|
102
|
+
|
|
103
|
+
|
|
104
|
+
# Type alias for task handlers
|
|
105
|
+
TaskHandler = Callable[[TaskContext, PlatformContext], Awaitable[Dict[str, Any]]]
|
|
106
|
+
|
|
107
|
+
|
|
108
|
+
class Worker(Agent):
|
|
109
|
+
"""
|
|
110
|
+
Domain-specific cognitive node that executes tasks.
|
|
111
|
+
|
|
112
|
+
Workers are the "hands" of the DisCo architecture. They:
|
|
113
|
+
1. Register capabilities with the Registry
|
|
114
|
+
2. Subscribe to action-requests matching their capabilities
|
|
115
|
+
3. Execute tasks with domain expertise (often using LLMs)
|
|
116
|
+
4. Report progress to the Tracker
|
|
117
|
+
5. Emit action-results when complete
|
|
118
|
+
|
|
119
|
+
Workers are designed to be:
|
|
120
|
+
- Specialized: Each worker handles specific domain tasks
|
|
121
|
+
- Discoverable: Found via Registry by capability
|
|
122
|
+
- Stateless: All state is stored in Memory service
|
|
123
|
+
- Observable: Progress tracked automatically
|
|
124
|
+
|
|
125
|
+
Attributes:
|
|
126
|
+
All Agent attributes, plus:
|
|
127
|
+
on_task: Decorator for registering task handlers
|
|
128
|
+
|
|
129
|
+
Usage:
|
|
130
|
+
worker = Worker(
|
|
131
|
+
name="summarizer",
|
|
132
|
+
description="Summarizes documents",
|
|
133
|
+
capabilities=["text_summarization", "key_extraction"],
|
|
134
|
+
)
|
|
135
|
+
|
|
136
|
+
@worker.on_task("summarize_document")
|
|
137
|
+
async def summarize(task: TaskContext, context: PlatformContext) -> Dict:
|
|
138
|
+
# Report progress
|
|
139
|
+
await task.report_progress(0.1, "Loading document")
|
|
140
|
+
|
|
141
|
+
doc = await context.memory.retrieve(f"doc:{task.data['doc_id']}")
|
|
142
|
+
|
|
143
|
+
await task.report_progress(0.5, "Summarizing")
|
|
144
|
+
summary = await llm_summarize(doc)
|
|
145
|
+
|
|
146
|
+
return {"summary": summary, "length": len(summary)}
|
|
147
|
+
|
|
148
|
+
worker.run()
|
|
149
|
+
"""
|
|
150
|
+
|
|
151
|
+
def __init__(
|
|
152
|
+
self,
|
|
153
|
+
name: str,
|
|
154
|
+
description: str = "",
|
|
155
|
+
version: str = "0.1.0",
|
|
156
|
+
capabilities: Optional[List[str]] = None,
|
|
157
|
+
**kwargs,
|
|
158
|
+
):
|
|
159
|
+
"""
|
|
160
|
+
Initialize the Worker.
|
|
161
|
+
|
|
162
|
+
Args:
|
|
163
|
+
name: Worker name
|
|
164
|
+
description: What this worker does
|
|
165
|
+
version: Version string
|
|
166
|
+
capabilities: Task capabilities offered
|
|
167
|
+
**kwargs: Additional Agent arguments
|
|
168
|
+
"""
|
|
169
|
+
# Workers consume action-requests and produce action-results
|
|
170
|
+
events_consumed = kwargs.pop("events_consumed", [])
|
|
171
|
+
if "action.request" not in events_consumed:
|
|
172
|
+
events_consumed.append("action.request")
|
|
173
|
+
|
|
174
|
+
events_produced = kwargs.pop("events_produced", [])
|
|
175
|
+
if "action.result" not in events_produced:
|
|
176
|
+
events_produced.append("action.result")
|
|
177
|
+
|
|
178
|
+
super().__init__(
|
|
179
|
+
name=name,
|
|
180
|
+
description=description,
|
|
181
|
+
version=version,
|
|
182
|
+
agent_type="worker",
|
|
183
|
+
capabilities=capabilities or [],
|
|
184
|
+
events_consumed=events_consumed,
|
|
185
|
+
events_produced=events_produced,
|
|
186
|
+
**kwargs,
|
|
187
|
+
)
|
|
188
|
+
|
|
189
|
+
# Task handlers: task_name -> handler
|
|
190
|
+
self._task_handlers: Dict[str, TaskHandler] = {}
|
|
191
|
+
|
|
192
|
+
# Also register the main action.request handler
|
|
193
|
+
self._register_action_request_handler()
|
|
194
|
+
|
|
195
|
+
def _register_action_request_handler(self) -> None:
|
|
196
|
+
"""Register the main action.request event handler."""
|
|
197
|
+
@self.on_event("action.request")
|
|
198
|
+
async def handle_action_request(event: Dict[str, Any], context: PlatformContext) -> None:
|
|
199
|
+
await self._handle_action_request(event, context)
|
|
200
|
+
|
|
201
|
+
def on_task(self, task_name: str) -> Callable[[TaskHandler], TaskHandler]:
|
|
202
|
+
"""
|
|
203
|
+
Decorator to register a task handler.
|
|
204
|
+
|
|
205
|
+
Task handlers receive a TaskContext and PlatformContext,
|
|
206
|
+
and return a result dictionary.
|
|
207
|
+
|
|
208
|
+
Usage:
|
|
209
|
+
@worker.on_task("process_data")
|
|
210
|
+
async def process(task: TaskContext, context: PlatformContext) -> Dict:
|
|
211
|
+
result = await do_processing(task.data)
|
|
212
|
+
return {"processed": True, "output": result}
|
|
213
|
+
|
|
214
|
+
Args:
|
|
215
|
+
task_name: The task name to handle
|
|
216
|
+
|
|
217
|
+
Returns:
|
|
218
|
+
Decorator function
|
|
219
|
+
"""
|
|
220
|
+
def decorator(func: TaskHandler) -> TaskHandler:
|
|
221
|
+
self._task_handlers[task_name] = func
|
|
222
|
+
|
|
223
|
+
# Add to capabilities if not already there
|
|
224
|
+
if task_name not in self.config.capabilities:
|
|
225
|
+
self.config.capabilities.append(task_name)
|
|
226
|
+
|
|
227
|
+
logger.debug(f"Registered task handler: {task_name}")
|
|
228
|
+
return func
|
|
229
|
+
return decorator
|
|
230
|
+
|
|
231
|
+
async def _handle_action_request(
|
|
232
|
+
self,
|
|
233
|
+
event: Dict[str, Any],
|
|
234
|
+
context: PlatformContext,
|
|
235
|
+
) -> None:
|
|
236
|
+
"""Handle an incoming action.request event."""
|
|
237
|
+
data = event.get("data", {})
|
|
238
|
+
|
|
239
|
+
task_name = data.get("task_name")
|
|
240
|
+
assigned_to = data.get("assigned_to")
|
|
241
|
+
|
|
242
|
+
# Check if this task is assigned to us
|
|
243
|
+
if not self._should_handle_task(assigned_to):
|
|
244
|
+
return
|
|
245
|
+
|
|
246
|
+
handler = self._task_handlers.get(task_name)
|
|
247
|
+
if not handler:
|
|
248
|
+
logger.debug(f"No handler for task: {task_name}")
|
|
249
|
+
return
|
|
250
|
+
|
|
251
|
+
# Create TaskContext
|
|
252
|
+
task = TaskContext(
|
|
253
|
+
task_id=data.get("task_id", str(uuid4())),
|
|
254
|
+
task_name=task_name,
|
|
255
|
+
plan_id=data.get("plan_id", ""),
|
|
256
|
+
goal_id=data.get("goal_id", ""),
|
|
257
|
+
data=data.get("data", {}),
|
|
258
|
+
correlation_id=event.get("correlation_id"),
|
|
259
|
+
session_id=event.get("session_id"),
|
|
260
|
+
tenant_id=event.get("tenant_id"),
|
|
261
|
+
timeout=data.get("timeout"),
|
|
262
|
+
priority=data.get("priority", 0),
|
|
263
|
+
_platform_context=context,
|
|
264
|
+
)
|
|
265
|
+
|
|
266
|
+
# Report task started
|
|
267
|
+
await context.tracker.emit_progress(
|
|
268
|
+
plan_id=task.plan_id,
|
|
269
|
+
task_id=task.task_id,
|
|
270
|
+
status="running",
|
|
271
|
+
progress=0.0,
|
|
272
|
+
)
|
|
273
|
+
|
|
274
|
+
try:
|
|
275
|
+
# Execute task
|
|
276
|
+
logger.info(f"Executing task: {task_name} ({task.task_id})")
|
|
277
|
+
result = await handler(task, context)
|
|
278
|
+
|
|
279
|
+
# Report completion
|
|
280
|
+
await context.tracker.complete_task(
|
|
281
|
+
plan_id=task.plan_id,
|
|
282
|
+
task_id=task.task_id,
|
|
283
|
+
result=result,
|
|
284
|
+
)
|
|
285
|
+
|
|
286
|
+
# Emit action.result
|
|
287
|
+
await context.bus.publish(
|
|
288
|
+
event_type="action.result",
|
|
289
|
+
data={
|
|
290
|
+
"task_id": task.task_id,
|
|
291
|
+
"task_name": task_name,
|
|
292
|
+
"plan_id": task.plan_id,
|
|
293
|
+
"goal_id": task.goal_id,
|
|
294
|
+
"status": "completed",
|
|
295
|
+
"result": result,
|
|
296
|
+
},
|
|
297
|
+
topic="action-results",
|
|
298
|
+
correlation_id=task.correlation_id,
|
|
299
|
+
)
|
|
300
|
+
|
|
301
|
+
logger.info(f"Completed task: {task_name} ({task.task_id})")
|
|
302
|
+
|
|
303
|
+
except Exception as e:
|
|
304
|
+
logger.error(f"Task failed: {task_name} - {e}")
|
|
305
|
+
|
|
306
|
+
# Report failure
|
|
307
|
+
await context.tracker.fail_task(
|
|
308
|
+
plan_id=task.plan_id,
|
|
309
|
+
task_id=task.task_id,
|
|
310
|
+
error=str(e),
|
|
311
|
+
)
|
|
312
|
+
|
|
313
|
+
# Emit failure result
|
|
314
|
+
await context.bus.publish(
|
|
315
|
+
event_type="action.result",
|
|
316
|
+
data={
|
|
317
|
+
"task_id": task.task_id,
|
|
318
|
+
"task_name": task_name,
|
|
319
|
+
"plan_id": task.plan_id,
|
|
320
|
+
"goal_id": task.goal_id,
|
|
321
|
+
"status": "failed",
|
|
322
|
+
"error": str(e),
|
|
323
|
+
},
|
|
324
|
+
topic="action-results",
|
|
325
|
+
correlation_id=task.correlation_id,
|
|
326
|
+
)
|
|
327
|
+
|
|
328
|
+
def _should_handle_task(self, assigned_to: str) -> bool:
|
|
329
|
+
"""Check if this worker should handle a task based on assigned_to."""
|
|
330
|
+
if not assigned_to:
|
|
331
|
+
return False
|
|
332
|
+
|
|
333
|
+
# Match by name
|
|
334
|
+
if assigned_to == self.name:
|
|
335
|
+
return True
|
|
336
|
+
|
|
337
|
+
# Match by agent_id
|
|
338
|
+
if assigned_to == self.agent_id:
|
|
339
|
+
return True
|
|
340
|
+
|
|
341
|
+
# Match by capability
|
|
342
|
+
if assigned_to in self.config.capabilities:
|
|
343
|
+
return True
|
|
344
|
+
|
|
345
|
+
return False
|
|
346
|
+
|
|
347
|
+
async def execute_task(
|
|
348
|
+
self,
|
|
349
|
+
task_name: str,
|
|
350
|
+
data: Dict[str, Any],
|
|
351
|
+
plan_id: Optional[str] = None,
|
|
352
|
+
goal_id: Optional[str] = None,
|
|
353
|
+
) -> Dict[str, Any]:
|
|
354
|
+
"""
|
|
355
|
+
Programmatically execute a task.
|
|
356
|
+
|
|
357
|
+
This method allows executing tasks without going through the event bus,
|
|
358
|
+
useful for testing or direct integration.
|
|
359
|
+
|
|
360
|
+
Args:
|
|
361
|
+
task_name: Name of the task to execute
|
|
362
|
+
data: Task input data
|
|
363
|
+
plan_id: Optional plan ID
|
|
364
|
+
goal_id: Optional goal ID
|
|
365
|
+
|
|
366
|
+
Returns:
|
|
367
|
+
Task result dictionary
|
|
368
|
+
|
|
369
|
+
Raises:
|
|
370
|
+
ValueError: If no handler for task_name
|
|
371
|
+
"""
|
|
372
|
+
handler = self._task_handlers.get(task_name)
|
|
373
|
+
if not handler:
|
|
374
|
+
raise ValueError(f"No handler for task: {task_name}")
|
|
375
|
+
|
|
376
|
+
task = TaskContext(
|
|
377
|
+
task_id=str(uuid4()),
|
|
378
|
+
task_name=task_name,
|
|
379
|
+
plan_id=plan_id or "",
|
|
380
|
+
goal_id=goal_id or "",
|
|
381
|
+
data=data,
|
|
382
|
+
_platform_context=self.context,
|
|
383
|
+
)
|
|
384
|
+
|
|
385
|
+
return await handler(task, self.context)
|
|
@@ -0,0 +1,281 @@
|
|
|
1
|
+
"""
|
|
2
|
+
AI-friendly toolkit for dynamic event discovery and generation.
|
|
3
|
+
|
|
4
|
+
This module provides utilities specifically designed for AI agents to:
|
|
5
|
+
1. Discover available events from the registry
|
|
6
|
+
2. Generate example payloads from schemas
|
|
7
|
+
3. Validate and publish events dynamically
|
|
8
|
+
4. Handle responses without hardcoded DTOs
|
|
9
|
+
|
|
10
|
+
The toolkit abstracts away the complexity of working with JSON schemas
|
|
11
|
+
and provides a simple, intuitive interface that AI agents can understand.
|
|
12
|
+
"""
|
|
13
|
+
from typing import Any, Dict, List, Optional
|
|
14
|
+
from pydantic import ValidationError as PydanticValidationError
|
|
15
|
+
|
|
16
|
+
from soorma.registry.client import RegistryClient
|
|
17
|
+
from soorma_common import EventDefinition
|
|
18
|
+
from soorma.utils.schema_utils import (
|
|
19
|
+
create_event_models,
|
|
20
|
+
get_schema_field_names,
|
|
21
|
+
get_required_fields,
|
|
22
|
+
)
|
|
23
|
+
|
|
24
|
+
|
|
25
|
+
class EventToolkit:
|
|
26
|
+
"""
|
|
27
|
+
AI-friendly toolkit for working with events dynamically.
|
|
28
|
+
|
|
29
|
+
This class provides a simple interface for AI agents to discover
|
|
30
|
+
and work with events without needing to know schemas in advance.
|
|
31
|
+
|
|
32
|
+
Example:
|
|
33
|
+
>>> async with EventToolkit() as toolkit:
|
|
34
|
+
... # Discover available events
|
|
35
|
+
... events = await toolkit.discover_events(topic="action-requests")
|
|
36
|
+
...
|
|
37
|
+
... # Create validated payload
|
|
38
|
+
... payload = await toolkit.create_payload(
|
|
39
|
+
... "web.search.request",
|
|
40
|
+
... {"query": "AI trends"}
|
|
41
|
+
... )
|
|
42
|
+
"""
|
|
43
|
+
|
|
44
|
+
def __init__(self, registry_url: str = "http://localhost:8000"):
|
|
45
|
+
"""
|
|
46
|
+
Initialize the toolkit.
|
|
47
|
+
|
|
48
|
+
Args:
|
|
49
|
+
registry_url: Registry service URL (default: http://localhost:8000)
|
|
50
|
+
"""
|
|
51
|
+
self.registry_url = registry_url
|
|
52
|
+
self._client: Optional[RegistryClient] = None
|
|
53
|
+
|
|
54
|
+
async def __aenter__(self):
|
|
55
|
+
"""Async context manager entry."""
|
|
56
|
+
self._client = RegistryClient(base_url=self.registry_url)
|
|
57
|
+
await self._client.__aenter__()
|
|
58
|
+
return self
|
|
59
|
+
|
|
60
|
+
async def __aexit__(self, exc_type, exc_val, exc_tb):
|
|
61
|
+
"""Async context manager exit."""
|
|
62
|
+
if self._client:
|
|
63
|
+
await self._client.__aexit__(exc_type, exc_val, exc_tb)
|
|
64
|
+
|
|
65
|
+
def _format_event_descriptor(self, event: EventDefinition) -> Dict[str, Any]:
|
|
66
|
+
"""Format event definition for AI consumption."""
|
|
67
|
+
required_fields = get_required_fields(event.payload_schema)
|
|
68
|
+
payload_fields = {}
|
|
69
|
+
|
|
70
|
+
for name, prop in event.payload_schema.get("properties", {}).items():
|
|
71
|
+
field_info = prop.copy()
|
|
72
|
+
field_info["required"] = name in required_fields
|
|
73
|
+
if "enum" in field_info:
|
|
74
|
+
field_info["allowed_values"] = field_info.pop("enum")
|
|
75
|
+
payload_fields[name] = field_info
|
|
76
|
+
|
|
77
|
+
descriptor = {
|
|
78
|
+
"name": event.event_name,
|
|
79
|
+
"description": event.description,
|
|
80
|
+
"topic": event.topic,
|
|
81
|
+
"required_fields": required_fields,
|
|
82
|
+
"payload_fields": payload_fields,
|
|
83
|
+
"example_payload": self._generate_example(event.payload_schema),
|
|
84
|
+
"has_response": event.response_schema is not None
|
|
85
|
+
}
|
|
86
|
+
|
|
87
|
+
if event.response_schema:
|
|
88
|
+
descriptor["response_fields"] = event.response_schema.get("properties", {})
|
|
89
|
+
|
|
90
|
+
return descriptor
|
|
91
|
+
|
|
92
|
+
async def discover_events(
|
|
93
|
+
self,
|
|
94
|
+
topic: Optional[str] = None,
|
|
95
|
+
event_name_pattern: Optional[str] = None,
|
|
96
|
+
) -> List[Dict[str, Any]]:
|
|
97
|
+
"""
|
|
98
|
+
Discover available events with their schemas.
|
|
99
|
+
|
|
100
|
+
Returns a simplified list of events with metadata that AI agents
|
|
101
|
+
can understand and reason about.
|
|
102
|
+
|
|
103
|
+
Args:
|
|
104
|
+
topic: Optional topic filter (e.g., "action-requests", "business-facts")
|
|
105
|
+
event_name_pattern: Optional pattern to match in event names
|
|
106
|
+
|
|
107
|
+
Returns:
|
|
108
|
+
List of event descriptors with schemas in AI-friendly format
|
|
109
|
+
"""
|
|
110
|
+
if not self._client:
|
|
111
|
+
raise RuntimeError("Toolkit must be used as async context manager")
|
|
112
|
+
|
|
113
|
+
# Get events from registry
|
|
114
|
+
if topic:
|
|
115
|
+
all_events = await self._client.get_events_by_topic(topic)
|
|
116
|
+
else:
|
|
117
|
+
all_events = await self._client.get_all_events()
|
|
118
|
+
|
|
119
|
+
# Filter by name pattern if provided
|
|
120
|
+
if event_name_pattern:
|
|
121
|
+
all_events = [
|
|
122
|
+
e for e in all_events
|
|
123
|
+
if event_name_pattern.lower() in e.event_name.lower()
|
|
124
|
+
]
|
|
125
|
+
|
|
126
|
+
# Convert to AI-friendly format
|
|
127
|
+
return [self._format_event_descriptor(e) for e in all_events]
|
|
128
|
+
|
|
129
|
+
async def get_event_info(self, event_name: str) -> Optional[Dict[str, Any]]:
|
|
130
|
+
"""
|
|
131
|
+
Get detailed information about a specific event.
|
|
132
|
+
|
|
133
|
+
Args:
|
|
134
|
+
event_name: Name of the event
|
|
135
|
+
|
|
136
|
+
Returns:
|
|
137
|
+
Detailed event info including full schema, or None if not found
|
|
138
|
+
"""
|
|
139
|
+
if not self._client:
|
|
140
|
+
raise RuntimeError("Toolkit must be used as async context manager")
|
|
141
|
+
|
|
142
|
+
event = await self._client.get_event(event_name)
|
|
143
|
+
if not event:
|
|
144
|
+
return None
|
|
145
|
+
|
|
146
|
+
return self._format_event_descriptor(event)
|
|
147
|
+
|
|
148
|
+
async def create_payload(
|
|
149
|
+
self,
|
|
150
|
+
event_name: str,
|
|
151
|
+
data: Dict[str, Any]
|
|
152
|
+
) -> Dict[str, Any]:
|
|
153
|
+
"""
|
|
154
|
+
Create and validate an event payload.
|
|
155
|
+
|
|
156
|
+
Args:
|
|
157
|
+
event_name: Name of the event
|
|
158
|
+
data: Raw data dictionary (can be snake_case)
|
|
159
|
+
|
|
160
|
+
Returns:
|
|
161
|
+
Validated payload dictionary (camelCase)
|
|
162
|
+
|
|
163
|
+
Raises:
|
|
164
|
+
ValueError: If event not found or validation fails
|
|
165
|
+
"""
|
|
166
|
+
if not self._client:
|
|
167
|
+
raise RuntimeError("Toolkit must be used as async context manager")
|
|
168
|
+
|
|
169
|
+
event = await self._client.get_event(event_name)
|
|
170
|
+
if not event:
|
|
171
|
+
raise ValueError(f"Event '{event_name}' not found in registry")
|
|
172
|
+
|
|
173
|
+
# Create dynamic Pydantic model
|
|
174
|
+
PayloadModel, _ = create_event_models(event)
|
|
175
|
+
|
|
176
|
+
# Validate and convert
|
|
177
|
+
try:
|
|
178
|
+
model_instance = PayloadModel.model_validate(data)
|
|
179
|
+
return model_instance.model_dump(by_alias=True)
|
|
180
|
+
except PydanticValidationError as e:
|
|
181
|
+
# Re-raise as ValueError with clear message for AI
|
|
182
|
+
raise ValueError(f"Payload validation failed: {str(e)}")
|
|
183
|
+
|
|
184
|
+
async def validate_response(
|
|
185
|
+
self,
|
|
186
|
+
event_name: str,
|
|
187
|
+
data: Dict[str, Any]
|
|
188
|
+
) -> Dict[str, Any]:
|
|
189
|
+
"""
|
|
190
|
+
Validate an event response.
|
|
191
|
+
|
|
192
|
+
Args:
|
|
193
|
+
event_name: Name of the event
|
|
194
|
+
data: Response data dictionary
|
|
195
|
+
|
|
196
|
+
Returns:
|
|
197
|
+
Validated response dictionary
|
|
198
|
+
|
|
199
|
+
Raises:
|
|
200
|
+
ValueError: If event not found, has no response schema, or validation fails
|
|
201
|
+
"""
|
|
202
|
+
if not self._client:
|
|
203
|
+
raise RuntimeError("Toolkit must be used as async context manager")
|
|
204
|
+
|
|
205
|
+
event = await self._client.get_event(event_name)
|
|
206
|
+
if not event:
|
|
207
|
+
raise ValueError(f"Event '{event_name}' not found in registry")
|
|
208
|
+
|
|
209
|
+
if not event.response_schema:
|
|
210
|
+
raise ValueError(f"Event '{event_name}' has no response schema")
|
|
211
|
+
|
|
212
|
+
# Create dynamic Pydantic model
|
|
213
|
+
_, ResponseModel = create_event_models(event)
|
|
214
|
+
|
|
215
|
+
if not ResponseModel:
|
|
216
|
+
raise ValueError(f"Event '{event_name}' has no response schema")
|
|
217
|
+
|
|
218
|
+
# Validate and convert
|
|
219
|
+
try:
|
|
220
|
+
model_instance = ResponseModel.model_validate(data)
|
|
221
|
+
return model_instance.model_dump(by_alias=True)
|
|
222
|
+
except PydanticValidationError as e:
|
|
223
|
+
raise ValueError(f"Response validation failed: {str(e)}")
|
|
224
|
+
|
|
225
|
+
def _generate_example(self, schema: Dict[str, Any]) -> Dict[str, Any]:
|
|
226
|
+
"""Generate a simple example from schema."""
|
|
227
|
+
example = {}
|
|
228
|
+
properties = schema.get("properties", {})
|
|
229
|
+
|
|
230
|
+
for name, prop in properties.items():
|
|
231
|
+
prop_type = prop.get("type")
|
|
232
|
+
if prop_type == "string":
|
|
233
|
+
example[name] = prop.get("example", "string_value")
|
|
234
|
+
elif prop_type == "integer":
|
|
235
|
+
example[name] = prop.get("minimum", 0)
|
|
236
|
+
elif prop_type == "boolean":
|
|
237
|
+
example[name] = True
|
|
238
|
+
elif prop_type == "object":
|
|
239
|
+
example[name] = {}
|
|
240
|
+
elif prop_type == "array":
|
|
241
|
+
example[name] = []
|
|
242
|
+
|
|
243
|
+
return example
|
|
244
|
+
|
|
245
|
+
# Helper functions for non-async usage (if needed)
|
|
246
|
+
async def discover_events_simple(
|
|
247
|
+
topic: Optional[str] = None,
|
|
248
|
+
registry_url: str = "http://localhost:8000"
|
|
249
|
+
) -> List[Dict[str, Any]]:
|
|
250
|
+
"""One-shot event discovery."""
|
|
251
|
+
async with EventToolkit(registry_url) as toolkit:
|
|
252
|
+
return await toolkit.discover_events(topic)
|
|
253
|
+
|
|
254
|
+
async def create_event_payload_simple(
|
|
255
|
+
event_name: str,
|
|
256
|
+
data: Dict[str, Any],
|
|
257
|
+
registry_url: str = "http://localhost:8000"
|
|
258
|
+
) -> Dict[str, Any]:
|
|
259
|
+
"""One-shot payload creation."""
|
|
260
|
+
try:
|
|
261
|
+
async with EventToolkit(registry_url) as toolkit:
|
|
262
|
+
payload = await toolkit.create_payload(event_name, data)
|
|
263
|
+
return {
|
|
264
|
+
"success": True,
|
|
265
|
+
"payload": payload,
|
|
266
|
+
"errors": []
|
|
267
|
+
}
|
|
268
|
+
except Exception as e:
|
|
269
|
+
return {
|
|
270
|
+
"success": False,
|
|
271
|
+
"errors": [str(e)]
|
|
272
|
+
}
|
|
273
|
+
|
|
274
|
+
|
|
275
|
+
async def get_event_info_simple(
|
|
276
|
+
event_name: str,
|
|
277
|
+
registry_url: str = "http://localhost:8000"
|
|
278
|
+
) -> Optional[Dict[str, Any]]:
|
|
279
|
+
"""One-shot event info retrieval."""
|
|
280
|
+
async with EventToolkit(registry_url) as toolkit:
|
|
281
|
+
return await toolkit.get_event_info(event_name)
|