codetether 1.2.2__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (66) hide show
  1. a2a_server/__init__.py +29 -0
  2. a2a_server/a2a_agent_card.py +365 -0
  3. a2a_server/a2a_errors.py +1133 -0
  4. a2a_server/a2a_executor.py +926 -0
  5. a2a_server/a2a_router.py +1033 -0
  6. a2a_server/a2a_types.py +344 -0
  7. a2a_server/agent_card.py +408 -0
  8. a2a_server/agents_server.py +271 -0
  9. a2a_server/auth_api.py +349 -0
  10. a2a_server/billing_api.py +638 -0
  11. a2a_server/billing_service.py +712 -0
  12. a2a_server/billing_webhooks.py +501 -0
  13. a2a_server/config.py +96 -0
  14. a2a_server/database.py +2165 -0
  15. a2a_server/email_inbound.py +398 -0
  16. a2a_server/email_notifications.py +486 -0
  17. a2a_server/enhanced_agents.py +919 -0
  18. a2a_server/enhanced_server.py +160 -0
  19. a2a_server/hosted_worker.py +1049 -0
  20. a2a_server/integrated_agents_server.py +347 -0
  21. a2a_server/keycloak_auth.py +750 -0
  22. a2a_server/livekit_bridge.py +439 -0
  23. a2a_server/marketing_tools.py +1364 -0
  24. a2a_server/mcp_client.py +196 -0
  25. a2a_server/mcp_http_server.py +2256 -0
  26. a2a_server/mcp_server.py +191 -0
  27. a2a_server/message_broker.py +725 -0
  28. a2a_server/mock_mcp.py +273 -0
  29. a2a_server/models.py +494 -0
  30. a2a_server/monitor_api.py +5904 -0
  31. a2a_server/opencode_bridge.py +1594 -0
  32. a2a_server/redis_task_manager.py +518 -0
  33. a2a_server/server.py +726 -0
  34. a2a_server/task_manager.py +668 -0
  35. a2a_server/task_queue.py +742 -0
  36. a2a_server/tenant_api.py +333 -0
  37. a2a_server/tenant_middleware.py +219 -0
  38. a2a_server/tenant_service.py +760 -0
  39. a2a_server/user_auth.py +721 -0
  40. a2a_server/vault_client.py +576 -0
  41. a2a_server/worker_sse.py +873 -0
  42. agent_worker/__init__.py +8 -0
  43. agent_worker/worker.py +4877 -0
  44. codetether/__init__.py +10 -0
  45. codetether/__main__.py +4 -0
  46. codetether/cli.py +112 -0
  47. codetether/worker_cli.py +57 -0
  48. codetether-1.2.2.dist-info/METADATA +570 -0
  49. codetether-1.2.2.dist-info/RECORD +66 -0
  50. codetether-1.2.2.dist-info/WHEEL +5 -0
  51. codetether-1.2.2.dist-info/entry_points.txt +4 -0
  52. codetether-1.2.2.dist-info/licenses/LICENSE +202 -0
  53. codetether-1.2.2.dist-info/top_level.txt +5 -0
  54. codetether_voice_agent/__init__.py +6 -0
  55. codetether_voice_agent/agent.py +445 -0
  56. codetether_voice_agent/codetether_mcp.py +345 -0
  57. codetether_voice_agent/config.py +16 -0
  58. codetether_voice_agent/functiongemma_caller.py +380 -0
  59. codetether_voice_agent/session_playback.py +247 -0
  60. codetether_voice_agent/tools/__init__.py +21 -0
  61. codetether_voice_agent/tools/definitions.py +135 -0
  62. codetether_voice_agent/tools/handlers.py +380 -0
  63. run_server.py +314 -0
  64. ui/monitor-tailwind.html +1790 -0
  65. ui/monitor.html +1775 -0
  66. ui/monitor.js +2662 -0
@@ -0,0 +1,380 @@
1
+ """FunctionGemma caller module for local function calling inference.
2
+
3
+ This module provides integration with Google's FunctionGemma model
4
+ (google/functiongemma-270m-it) for parsing user intent into structured
5
+ function calls for the CodeTether voice agent system.
6
+
7
+ The module implements lazy loading of the model to ensure efficient
8
+ resource usage, loading the model only when first needed.
9
+ """
10
+
11
+ from __future__ import annotations
12
+
13
+ import json
14
+ import logging
15
+ import re
16
+ from typing import Any, Dict, List, Optional
17
+
18
+ import torch
19
+ from transformers import AutoModelForCausalLM, AutoProcessor
20
+
21
+ logger = logging.getLogger(__name__)
22
+
23
+ CODETETHER_TOOLS: List[Dict[str, Any]] = [
24
+ {
25
+ 'name': 'create_task',
26
+ 'description': 'Create a new task for an agent to execute',
27
+ 'parameters': {
28
+ 'type': 'object',
29
+ 'properties': {
30
+ 'title': {
31
+ 'type': 'string',
32
+ 'description': 'The title of the task',
33
+ },
34
+ 'description': {
35
+ 'type': 'string',
36
+ 'description': 'Detailed description of what the task should accomplish',
37
+ },
38
+ 'codebase_id': {
39
+ 'type': 'string',
40
+ 'description': 'Optional identifier for the codebase context',
41
+ },
42
+ 'agent_type': {
43
+ 'type': 'string',
44
+ 'enum': ['build', 'plan', 'general', 'explore'],
45
+ 'description': 'The type of agent to handle this task',
46
+ },
47
+ 'priority': {
48
+ 'type': 'integer',
49
+ 'description': 'Task priority (higher values indicate higher priority)',
50
+ },
51
+ },
52
+ 'required': ['title'],
53
+ },
54
+ },
55
+ {
56
+ 'name': 'list_tasks',
57
+ 'description': 'List tasks with optional filtering',
58
+ 'parameters': {
59
+ 'type': 'object',
60
+ 'properties': {
61
+ 'status': {
62
+ 'type': 'string',
63
+ 'description': 'Filter tasks by status',
64
+ },
65
+ 'codebase_id': {
66
+ 'type': 'string',
67
+ 'description': 'Filter tasks by codebase identifier',
68
+ },
69
+ },
70
+ },
71
+ },
72
+ {
73
+ 'name': 'get_task',
74
+ 'description': 'Retrieve details of a specific task',
75
+ 'parameters': {
76
+ 'type': 'object',
77
+ 'properties': {
78
+ 'task_id': {
79
+ 'type': 'string',
80
+ 'description': 'The unique identifier of the task',
81
+ },
82
+ },
83
+ 'required': ['task_id'],
84
+ },
85
+ },
86
+ {
87
+ 'name': 'cancel_task',
88
+ 'description': 'Cancel an active task',
89
+ 'parameters': {
90
+ 'type': 'object',
91
+ 'properties': {
92
+ 'task_id': {
93
+ 'type': 'string',
94
+ 'description': 'The unique identifier of the task to cancel',
95
+ },
96
+ },
97
+ 'required': ['task_id'],
98
+ },
99
+ },
100
+ {
101
+ 'name': 'get_session_history',
102
+ 'description': 'Retrieve the message history for a session',
103
+ 'parameters': {
104
+ 'type': 'object',
105
+ 'properties': {
106
+ 'session_id': {
107
+ 'type': 'string',
108
+ 'description': 'The unique identifier of the session',
109
+ },
110
+ },
111
+ 'required': ['session_id'],
112
+ },
113
+ },
114
+ {
115
+ 'name': 'playback_session',
116
+ 'description': 'Play back a session with optional summarization',
117
+ 'parameters': {
118
+ 'type': 'object',
119
+ 'properties': {
120
+ 'session_id': {
121
+ 'type': 'string',
122
+ 'description': 'The unique identifier of the session to playback',
123
+ },
124
+ 'style': {
125
+ 'type': 'string',
126
+ 'enum': ['verbatim', 'summary'],
127
+ 'description': 'The playback style to use',
128
+ },
129
+ },
130
+ 'required': ['session_id'],
131
+ },
132
+ },
133
+ {
134
+ 'name': 'discover_agents',
135
+ 'description': 'Discover available agents in the system',
136
+ 'parameters': {
137
+ 'type': 'object',
138
+ 'properties': {},
139
+ },
140
+ },
141
+ {
142
+ 'name': 'send_message',
143
+ 'description': 'Send a message to a specific agent',
144
+ 'parameters': {
145
+ 'type': 'object',
146
+ 'properties': {
147
+ 'agent_name': {
148
+ 'type': 'string',
149
+ 'description': 'The name of the target agent',
150
+ },
151
+ 'message': {
152
+ 'type': 'string',
153
+ 'description': 'The message content to send',
154
+ },
155
+ },
156
+ 'required': ['agent_name', 'message'],
157
+ },
158
+ },
159
+ ]
160
+
161
+
162
+ class FunctionGemmaCaller:
163
+ """Caller for FunctionGemma model for local function calling inference.
164
+
165
+ This class provides an interface to the FunctionGemma model for parsing
166
+ natural language user input into structured function calls. It uses lazy
167
+ loading to defer model initialization until first use.
168
+
169
+ Attributes:
170
+ model_path: Path to the FunctionGemma model (default: google/functiongemma-270m-it)
171
+ _model: The loaded model instance (None until first call)
172
+ _processor: The loaded processor instance (None until first call)
173
+ """
174
+
175
+ def __init__(
176
+ self, model_path: str = 'google/functiongemma-270m-it'
177
+ ) -> None:
178
+ """Initialize the FunctionGemma caller.
179
+
180
+ Args:
181
+ model_path: Path to the FunctionGemma model. Can be a HuggingFace
182
+ model ID or a local path. Defaults to "google/functiongemma-270m-it".
183
+ """
184
+ self.model_path = model_path
185
+ self._model: Optional[AutoModelForCausalLM] = None
186
+ self._processor: Optional[AutoProcessor] = None
187
+
188
+ def _load_model(self) -> None:
189
+ """Load the FunctionGemma model and processor.
190
+
191
+ This method performs lazy loading of the model and processor on first
192
+ use. The model is loaded in BF16 precision with automatic device mapping.
193
+
194
+ Raises:
195
+ RuntimeError: If the model fails to load.
196
+ """
197
+ if self._model is not None and self._processor is not None:
198
+ return
199
+
200
+ try:
201
+ logger.info(f'Loading FunctionGemma model from {self.model_path}')
202
+ self._processor = AutoProcessor.from_pretrained(
203
+ self.model_path,
204
+ trust_remote_code=True,
205
+ )
206
+ self._model = AutoModelForCausalLM.from_pretrained(
207
+ self.model_path,
208
+ torch_dtype=torch.bfloat16,
209
+ device_map='auto',
210
+ trust_remote_code=True,
211
+ )
212
+ logger.info('FunctionGemma model loaded successfully')
213
+ except Exception as e:
214
+ logger.error(f'Failed to load FunctionGemma model: {e}')
215
+ raise RuntimeError(
216
+ f'Failed to load FunctionGemma model: {e}'
217
+ ) from e
218
+
219
+ async def parse_intent(self, user_input: str) -> Optional[Dict[str, Any]]:
220
+ """Parse user input into a structured function call.
221
+
222
+ This method takes natural language input from the user and uses
223
+ FunctionGemma to generate a structured function call.
224
+
225
+ Args:
226
+ user_input: The natural language input from the user.
227
+
228
+ Returns:
229
+ A dictionary containing the function name and arguments if a
230
+ function call is successfully parsed, None otherwise.
231
+ """
232
+ self._load_model()
233
+
234
+ if self._model is None or self._processor is None:
235
+ logger.error('Model not loaded after _load_model() call')
236
+ return None
237
+
238
+ try:
239
+ messages = [
240
+ {
241
+ 'role': 'user',
242
+ 'content': user_input,
243
+ },
244
+ ]
245
+
246
+ input_text = self._processor.apply_chat_template(
247
+ messages,
248
+ tokenize=False,
249
+ add_generation_prompt=True,
250
+ )
251
+
252
+ inputs = self._processor(
253
+ input_text,
254
+ return_tensors='pt',
255
+ ).to(self._model.device)
256
+
257
+ outputs = self._model.generate(
258
+ **inputs,
259
+ max_new_tokens=256,
260
+ do_sample=False,
261
+ temperature=None,
262
+ top_p=None,
263
+ )
264
+
265
+ generated_text = self._processor.decode(
266
+ outputs[0],
267
+ skip_special_tokens=False,
268
+ )
269
+
270
+ function_call = self._parse_function_call(generated_text)
271
+
272
+ if function_call:
273
+ logger.info(f'Parsed function call: {function_call}')
274
+ else:
275
+ logger.debug(
276
+ f'No function call found in output: {generated_text}'
277
+ )
278
+
279
+ return function_call
280
+
281
+ except Exception as e:
282
+ logger.error(f'Error during intent parsing: {e}')
283
+ return None
284
+
285
+ def _parse_function_call(self, output: str) -> Optional[Dict[str, Any]]:
286
+ """Parse FunctionGemma output format into a structured dictionary.
287
+
288
+ The FunctionGemma output format uses special tokens to delimit
289
+ function calls:
290
+ <start_function_call>call:func_name{arg1:<escape>value1<escape>,...}<end_function_call>
291
+
292
+ Args:
293
+ output: The raw text output from FunctionGemma.
294
+
295
+ Returns:
296
+ A dictionary with 'name' and 'args' keys if a valid function call
297
+ is found, None otherwise.
298
+ """
299
+ start_tag = '<start_function_call>'
300
+ end_tag = '<end_function_call>'
301
+
302
+ start_idx = output.find(start_tag)
303
+ end_idx = output.find(end_tag)
304
+
305
+ if start_idx == -1 or end_idx == -1:
306
+ logger.debug(f'No function call markers found in output')
307
+ return None
308
+
309
+ start_idx += len(start_tag)
310
+ function_call_str = output[start_idx:end_idx]
311
+
312
+ func_call_pattern = r'^call:(\w+)\{(.+)\}$'
313
+ match = re.match(func_call_pattern, function_call_str)
314
+
315
+ if not match:
316
+ logger.debug(f'Invalid function call format: {function_call_str}')
317
+ return None
318
+
319
+ func_name = match.group(1)
320
+ args_str = match.group(2)
321
+
322
+ try:
323
+ args = self._extract_args(args_str)
324
+ except Exception as e:
325
+ logger.error(f'Failed to extract arguments: {e}')
326
+ return None
327
+
328
+ return {'name': func_name, 'args': args}
329
+
330
+ def _extract_args(self, args_str: str) -> Dict[str, Any]:
331
+ """Extract arguments from the function call string.
332
+
333
+ Parses argument strings in the format:
334
+ arg1:<escape>value1<escape>,arg2:<escape>value2<escape>
335
+
336
+ Args:
337
+ args_str: The argument string to parse.
338
+
339
+ Returns:
340
+ A dictionary mapping argument names to their values.
341
+
342
+ Raises:
343
+ ValueError: If the argument string is malformed.
344
+ """
345
+ if not args_str.strip():
346
+ return {}
347
+
348
+ args: Dict[str, Any] = {}
349
+ escape_token = '<escape>'
350
+
351
+ while args_str:
352
+ eq_idx = args_str.find(':')
353
+
354
+ if eq_idx == -1:
355
+ raise ValueError(f'Invalid argument format: {args_str}')
356
+
357
+ arg_name = args_str[:eq_idx].strip()
358
+ args_str = args_str[eq_idx + 1 :]
359
+
360
+ if not args_str.startswith(escape_token):
361
+ raise ValueError(
362
+ f"Expected escape token for argument '{arg_name}': {args_str}"
363
+ )
364
+
365
+ args_str = args_str[len(escape_token) :]
366
+ end_escape_idx = args_str.find(escape_token)
367
+
368
+ if end_escape_idx == -1:
369
+ raise ValueError(
370
+ f"Missing closing escape token for argument '{arg_name}'"
371
+ )
372
+
373
+ arg_value = args_str[:end_escape_idx]
374
+ args[arg_name] = arg_value
375
+ args_str = args_str[end_escape_idx + len(escape_token) :]
376
+
377
+ if args_str.startswith(','):
378
+ args_str = args_str[1:].strip()
379
+
380
+ return args
@@ -0,0 +1,247 @@
1
+ """Session playback module for voice-based replay of historical conversations.
2
+
3
+ This module provides functionality to playback historical sessions via voice,
4
+ either verbatim or as a summary. It handles message formatting, role-based
5
+ prefixes, and interruption handling for natural conversation flow.
6
+ """
7
+
8
+ import asyncio
9
+ import logging
10
+ from typing import TYPE_CHECKING, Any, Dict, List, Optional
11
+
12
+ if TYPE_CHECKING:
13
+ from codetether_voice_agent.mcp_client import CodeTetherMCP
14
+
15
+ logger = logging.getLogger(__name__)
16
+
17
+
18
+ class SessionPlayback:
19
+ """Handles voice playback of historical conversation sessions.
20
+
21
+ Provides methods to replay sessions either verbatim (reading each message)
22
+ or as a summary, with support for natural interruption handling.
23
+ """
24
+
25
+ def __init__(self, mcp_client: 'CodeTetherMCP') -> None:
26
+ """Initialize the session playback handler.
27
+
28
+ Args:
29
+ mcp_client: The MCP client instance for session operations.
30
+ """
31
+ self.mcp_client = mcp_client
32
+ self._interrupted = False
33
+ self._playback_task: Optional[asyncio.Task] = None
34
+
35
+ async def start(
36
+ self, session: Any, session_id: str, style: str = 'verbatim'
37
+ ) -> None:
38
+ """Start playback of a historical session.
39
+
40
+ Main entry point for playing back sessions. Loads session messages
41
+ from the MCP client and routes to the appropriate playback method.
42
+
43
+ Args:
44
+ session: The session object to use for voice output.
45
+ session_id: The unique identifier of the session to playback.
46
+ style: Playback style - "verbatim" or "summary". Defaults to "verbatim".
47
+
48
+ Raises:
49
+ ValueError: If an invalid playback style is specified.
50
+ """
51
+ self._interrupted = False
52
+ logger.info(f'Starting {style} playback for session: {session_id}')
53
+
54
+ try:
55
+ messages = await self._load_session_messages(session_id)
56
+
57
+ if not messages:
58
+ await session.generate_reply(
59
+ instructions='This session has no messages to playback.'
60
+ )
61
+ return
62
+
63
+ if style == 'verbatim':
64
+ await self.playback_verbatim(session, messages)
65
+ elif style == 'summary':
66
+ await self.playback_summary(session, messages)
67
+ else:
68
+ raise ValueError(
69
+ f"Invalid playback style: {style}. Must be 'verbatim' or 'summary'"
70
+ )
71
+
72
+ except Exception as e:
73
+ logger.error(f'Error during session playback: {e}')
74
+ await session.generate_reply(
75
+ instructions='I encountered an error while trying to playback the session.'
76
+ )
77
+
78
+ async def playback_verbatim(
79
+ self, session: Any, messages: List[Dict]
80
+ ) -> None:
81
+ """Play back messages verbatim with role prefixes.
82
+
83
+ Reads each message aloud exactly as it was written, prefixed with
84
+ "You said:" for user messages or "The agent responded:" for agent messages.
85
+ Includes brief pauses between messages for natural pacing.
86
+
87
+ Args:
88
+ session: The session object for voice output.
89
+ messages: List of message dictionaries to playback.
90
+ """
91
+ logger.info(f'Starting verbatim playback of {len(messages)} messages')
92
+
93
+ for idx, message in enumerate(messages):
94
+ if self._interrupted:
95
+ logger.debug('Playback interrupted during verbatim mode')
96
+ break
97
+
98
+ role = message.get('role', 'unknown')
99
+ content = message.get('content', '')
100
+
101
+ if not content:
102
+ continue
103
+
104
+ prefix = self._get_role_prefix(role)
105
+ text_to_read = f'{prefix} {content}'
106
+
107
+ try:
108
+ await session.generate_reply(
109
+ instructions=f'Read this aloud exactly: {text_to_read}'
110
+ )
111
+ await asyncio.sleep(0.5)
112
+
113
+ except Exception as e:
114
+ logger.warning(f'Error reading message {idx}: {e}')
115
+ continue
116
+
117
+ if not self._interrupted:
118
+ await session.generate_reply(
119
+ instructions='That concludes the playback of this session.'
120
+ )
121
+
122
+ async def playback_summary(
123
+ self, session: Any, messages: List[Dict]
124
+ ) -> None:
125
+ """Play back messages as a natural summary.
126
+
127
+ Creates a coherent summary of all messages and reads it aloud
128
+ in a conversational manner, rather than reading each message verbatim.
129
+
130
+ Args:
131
+ session: The session object for voice output.
132
+ messages: List of message dictionaries to summarize.
133
+ """
134
+ logger.info(f'Starting summary playback of {len(messages)} messages')
135
+
136
+ formatted_text = self._format_messages_for_summary(messages)
137
+
138
+ summary_prompt = (
139
+ 'Summarize the following conversation in a natural, conversational way. '
140
+ "Present it as if you're recounting what happened in the conversation. "
141
+ 'Keep it concise but include the key points and exchanges. '
142
+ f'Here is the conversation:\n\n{formatted_text}'
143
+ )
144
+
145
+ try:
146
+ await session.generate_reply(instructions=summary_prompt)
147
+ except Exception as e:
148
+ logger.error(f'Error generating summary: {e}')
149
+ await session.generate_reply(
150
+ instructions='I was unable to generate a summary of this session.'
151
+ )
152
+
153
+ def _format_messages_for_summary(self, messages: List[Dict]) -> str:
154
+ """Format messages into a readable string for summarization.
155
+
156
+ Creates a clean, linear representation of the conversation for
157
+ the LLM to summarize effectively.
158
+
159
+ Args:
160
+ messages: List of message dictionaries.
161
+
162
+ Returns:
163
+ A formatted string representation of the conversation.
164
+ """
165
+ formatted_parts = []
166
+
167
+ for message in messages:
168
+ role = message.get('role', 'unknown')
169
+ content = message.get('content', '')
170
+
171
+ if not content:
172
+ continue
173
+
174
+ prefix = self._get_role_prefix(role)
175
+ formatted_parts.append(f'{prefix}\n{content}\n')
176
+
177
+ return '\n'.join(formatted_parts)
178
+
179
+ def _get_role_prefix(self, role: str) -> str:
180
+ """Get the appropriate role prefix for voice playback.
181
+
182
+ Args:
183
+ role: The role identifier from the message.
184
+
185
+ Returns:
186
+ A human-readable prefix for the role.
187
+ """
188
+ role_prefix_map = {
189
+ 'user': 'You said:',
190
+ 'assistant': 'The agent responded:',
191
+ 'agent': 'The agent responded:',
192
+ 'system': 'System note:',
193
+ }
194
+
195
+ return role_prefix_map.get(role, f'{role.capitalize()}:')
196
+
197
+ def interrupt(self) -> None:
198
+ """Signal that playback has been interrupted.
199
+
200
+ Sets the interrupted flag to pause or stop current playback.
201
+ Call this method when user interruption is detected.
202
+ """
203
+ self._interrupted = True
204
+ logger.info('Session playback interruption requested')
205
+
206
+ def resume(self) -> None:
207
+ """Resume playback after interruption.
208
+
209
+ Resets the interrupted flag to allow playback to continue.
210
+ Note: This only resets the flag; actual resuming must be
211
+ handled by the calling code.
212
+ """
213
+ self._interrupted = False
214
+ logger.info('Session playback interruption cleared')
215
+
216
+ def is_interrupted(self) -> bool:
217
+ """Check if playback is currently interrupted.
218
+
219
+ Returns:
220
+ True if playback is interrupted, False otherwise.
221
+ """
222
+ return self._interrupted
223
+
224
+ async def _load_session_messages(self, session_id: str) -> List[Dict]:
225
+ """Load messages for a session from the MCP client.
226
+
227
+ Args:
228
+ session_id: The unique identifier of the session.
229
+
230
+ Returns:
231
+ List of message dictionaries for the session.
232
+ """
233
+ try:
234
+ if hasattr(self.mcp_client, 'get_session_messages'):
235
+ messages = await self.mcp_client.get_session_messages(
236
+ session_id
237
+ )
238
+ return messages if messages else []
239
+ else:
240
+ logger.warning(
241
+ 'MCP client does not have get_session_messages method'
242
+ )
243
+ return []
244
+
245
+ except Exception as e:
246
+ logger.error(f'Failed to load session messages: {e}')
247
+ return []
@@ -0,0 +1,21 @@
1
+ from .handlers import (
2
+ create_task_handler,
3
+ list_tasks_handler,
4
+ get_task_handler,
5
+ cancel_task_handler,
6
+ get_session_history_handler,
7
+ discover_agents_handler,
8
+ send_message_handler,
9
+ )
10
+ from .handlers import register_all_tools
11
+
12
+ __all__ = [
13
+ 'create_task_handler',
14
+ 'list_tasks_handler',
15
+ 'get_task_handler',
16
+ 'cancel_task_handler',
17
+ 'get_session_history_handler',
18
+ 'discover_agents_handler',
19
+ 'send_message_handler',
20
+ 'register_all_tools',
21
+ ]