kyber-chat 1.0.0__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (71) hide show
  1. kyber/__init__.py +6 -0
  2. kyber/__main__.py +8 -0
  3. kyber/agent/__init__.py +8 -0
  4. kyber/agent/context.py +224 -0
  5. kyber/agent/loop.py +687 -0
  6. kyber/agent/memory.py +109 -0
  7. kyber/agent/skills.py +244 -0
  8. kyber/agent/subagent.py +379 -0
  9. kyber/agent/tools/__init__.py +6 -0
  10. kyber/agent/tools/base.py +102 -0
  11. kyber/agent/tools/filesystem.py +191 -0
  12. kyber/agent/tools/message.py +86 -0
  13. kyber/agent/tools/registry.py +73 -0
  14. kyber/agent/tools/shell.py +141 -0
  15. kyber/agent/tools/spawn.py +65 -0
  16. kyber/agent/tools/task_status.py +53 -0
  17. kyber/agent/tools/web.py +163 -0
  18. kyber/bridge/package.json +26 -0
  19. kyber/bridge/src/index.ts +50 -0
  20. kyber/bridge/src/server.ts +104 -0
  21. kyber/bridge/src/types.d.ts +3 -0
  22. kyber/bridge/src/whatsapp.ts +185 -0
  23. kyber/bridge/tsconfig.json +16 -0
  24. kyber/bus/__init__.py +6 -0
  25. kyber/bus/events.py +37 -0
  26. kyber/bus/queue.py +81 -0
  27. kyber/channels/__init__.py +6 -0
  28. kyber/channels/base.py +121 -0
  29. kyber/channels/discord.py +304 -0
  30. kyber/channels/feishu.py +263 -0
  31. kyber/channels/manager.py +161 -0
  32. kyber/channels/telegram.py +302 -0
  33. kyber/channels/whatsapp.py +141 -0
  34. kyber/cli/__init__.py +1 -0
  35. kyber/cli/commands.py +736 -0
  36. kyber/config/__init__.py +6 -0
  37. kyber/config/loader.py +95 -0
  38. kyber/config/schema.py +205 -0
  39. kyber/cron/__init__.py +6 -0
  40. kyber/cron/service.py +346 -0
  41. kyber/cron/types.py +59 -0
  42. kyber/dashboard/__init__.py +5 -0
  43. kyber/dashboard/server.py +122 -0
  44. kyber/dashboard/static/app.js +458 -0
  45. kyber/dashboard/static/favicon.png +0 -0
  46. kyber/dashboard/static/index.html +107 -0
  47. kyber/dashboard/static/kyber_logo.png +0 -0
  48. kyber/dashboard/static/styles.css +608 -0
  49. kyber/heartbeat/__init__.py +5 -0
  50. kyber/heartbeat/service.py +130 -0
  51. kyber/providers/__init__.py +6 -0
  52. kyber/providers/base.py +69 -0
  53. kyber/providers/litellm_provider.py +227 -0
  54. kyber/providers/transcription.py +65 -0
  55. kyber/session/__init__.py +5 -0
  56. kyber/session/manager.py +202 -0
  57. kyber/skills/README.md +47 -0
  58. kyber/skills/github/SKILL.md +48 -0
  59. kyber/skills/skill-creator/SKILL.md +371 -0
  60. kyber/skills/summarize/SKILL.md +67 -0
  61. kyber/skills/tmux/SKILL.md +121 -0
  62. kyber/skills/tmux/scripts/find-sessions.sh +112 -0
  63. kyber/skills/tmux/scripts/wait-for-text.sh +83 -0
  64. kyber/skills/weather/SKILL.md +49 -0
  65. kyber/utils/__init__.py +5 -0
  66. kyber/utils/helpers.py +91 -0
  67. kyber_chat-1.0.0.dist-info/METADATA +35 -0
  68. kyber_chat-1.0.0.dist-info/RECORD +71 -0
  69. kyber_chat-1.0.0.dist-info/WHEEL +4 -0
  70. kyber_chat-1.0.0.dist-info/entry_points.txt +2 -0
  71. kyber_chat-1.0.0.dist-info/licenses/LICENSE +21 -0
kyber/agent/loop.py ADDED
@@ -0,0 +1,687 @@
1
+ """Agent loop: the core processing engine."""
2
+
3
+ import asyncio
4
+ import json
5
+ from pathlib import Path
6
+ from typing import Any
7
+
8
+ from loguru import logger
9
+
10
+ from kyber.bus.events import InboundMessage, OutboundMessage
11
+ from kyber.bus.queue import MessageBus
12
+ from kyber.providers.base import LLMProvider
13
+ from kyber.agent.context import ContextBuilder
14
+ from kyber.agent.tools.registry import ToolRegistry
15
+ from kyber.agent.tools.filesystem import ReadFileTool, WriteFileTool, EditFileTool, ListDirTool
16
+ from kyber.agent.tools.shell import ExecTool
17
+ from kyber.agent.tools.web import WebSearchTool, WebFetchTool
18
+ from kyber.agent.tools.message import MessageTool
19
+ from kyber.agent.tools.spawn import SpawnTool
20
+ from kyber.agent.tools.task_status import TaskStatusTool
21
+ from kyber.agent.subagent import SubagentManager
22
+ from kyber.session.manager import SessionManager
23
+
24
+ # Wall-clock timeout before auto-offloading to a subagent (seconds)
25
+ AUTO_OFFLOAD_TIMEOUT = 30
26
+
27
+
28
+ class AgentLoop:
29
+ """
30
+ The agent loop is the core processing engine.
31
+
32
+ It:
33
+ 1. Receives messages from the bus
34
+ 2. Builds context with history, memory, skills
35
+ 3. Calls the LLM
36
+ 4. Executes tool calls
37
+ 5. Sends responses back
38
+ """
39
+
40
+ def __init__(
41
+ self,
42
+ bus: MessageBus,
43
+ provider: LLMProvider,
44
+ workspace: Path,
45
+ model: str | None = None,
46
+ max_iterations: int = 20,
47
+ brave_api_key: str | None = None,
48
+ search_max_results: int = 5,
49
+ exec_config: "ExecToolConfig | None" = None,
50
+ ):
51
+ from kyber.config.schema import ExecToolConfig
52
+ self.bus = bus
53
+ self.provider = provider
54
+ self.workspace = workspace
55
+ self.model = model or provider.get_default_model()
56
+ self.max_iterations = max_iterations
57
+ self.brave_api_key = brave_api_key
58
+ self.search_max_results = search_max_results
59
+ self.exec_config = exec_config or ExecToolConfig()
60
+
61
+ self.context = ContextBuilder(workspace)
62
+ self.sessions = SessionManager(workspace)
63
+ self.tools = ToolRegistry()
64
+ self.subagents = SubagentManager(
65
+ provider=provider,
66
+ workspace=workspace,
67
+ bus=bus,
68
+ model=self.model,
69
+ brave_api_key=brave_api_key,
70
+ exec_config=self.exec_config,
71
+ )
72
+
73
+ self._running = False
74
+ self._register_default_tools()
75
+
76
+ def _register_default_tools(self) -> None:
77
+ """Register the default set of tools."""
78
+ # File tools
79
+ self.tools.register(ReadFileTool())
80
+ self.tools.register(WriteFileTool())
81
+ self.tools.register(EditFileTool())
82
+ self.tools.register(ListDirTool())
83
+
84
+ # Shell tool
85
+ self.tools.register(ExecTool(
86
+ working_dir=str(self.workspace),
87
+ timeout=self.exec_config.timeout,
88
+ restrict_to_workspace=self.exec_config.restrict_to_workspace,
89
+ ))
90
+
91
+ # Web tools
92
+ self.tools.register(WebSearchTool(api_key=self.brave_api_key, max_results=self.search_max_results))
93
+ self.tools.register(WebFetchTool())
94
+
95
+ # Message tool
96
+ message_tool = MessageTool(send_callback=self.bus.publish_outbound)
97
+ self.tools.register(message_tool)
98
+
99
+ # Spawn tool (for subagents)
100
+ spawn_tool = SpawnTool(manager=self.subagents)
101
+ self.tools.register(spawn_tool)
102
+
103
+ # Task status tool (instant subagent progress lookup)
104
+ task_status_tool = TaskStatusTool(manager=self.subagents)
105
+ self.tools.register(task_status_tool)
106
+
107
+ async def _generate_tool_status(
108
+ self,
109
+ messages: list[dict[str, Any]],
110
+ tool_name: str
111
+ ) -> str | None:
112
+ """Generate a short status update using the same prompt route as normal replies."""
113
+ def _is_sentence(text: str) -> bool:
114
+ text = text.strip()
115
+ if not text:
116
+ return False
117
+ if text[-1] not in ".!?":
118
+ return False
119
+ words = text.split()
120
+ return len(words) >= 4
121
+
122
+ status_prompt = (
123
+ "Give a short, friendly status update as a single complete sentence "
124
+ "(min 4 words, max 120 characters) about what you're doing next. "
125
+ f"Action: {tool_name}. "
126
+ "Include a verb and end with punctuation. "
127
+ "Do not mention tools or tool calls. No markdown."
128
+ )
129
+
130
+ try:
131
+ response = await self.provider.chat(
132
+ messages=messages + [{"role": "user", "content": status_prompt}],
133
+ tools=None,
134
+ model=self.model,
135
+ max_tokens=60,
136
+ temperature=0.7,
137
+ )
138
+ except Exception as e:
139
+ logger.warning(f"Status update generation failed: {e}")
140
+ return None
141
+
142
+ content = (response.content or "").strip()
143
+ if not content or content.startswith("Error calling LLM:"):
144
+ if content:
145
+ logger.warning(f"Status update generation error: {content}")
146
+ return None
147
+
148
+ content = content.splitlines()[0].strip().replace("`", "")
149
+ if len(content) > 120:
150
+ content = content[:117].rstrip() + "..."
151
+ if _is_sentence(content):
152
+ return content
153
+
154
+ retry_prompt = (
155
+ "Rewrite this as a single complete sentence (min 4 words), "
156
+ "ending with punctuation, no markdown. "
157
+ f"Action: {tool_name}. "
158
+ "Do not mention tools."
159
+ )
160
+ try:
161
+ retry = await self.provider.chat(
162
+ messages=messages + [{"role": "user", "content": retry_prompt}],
163
+ tools=None,
164
+ model=self.model,
165
+ max_tokens=60,
166
+ temperature=0.6,
167
+ )
168
+ except Exception as e:
169
+ logger.warning(f"Status update retry failed: {e}")
170
+ return None
171
+
172
+ content = (retry.content or "").strip()
173
+ if not content or content.startswith("Error calling LLM:"):
174
+ if content:
175
+ logger.warning(f"Status update retry error: {content}")
176
+ return None
177
+
178
+ content = content.splitlines()[0].strip().replace("`", "")
179
+ if len(content) > 120:
180
+ content = content[:117].rstrip() + "..."
181
+ return content if _is_sentence(content) else None
182
+
183
+ async def _publish_tool_status(
184
+ self,
185
+ channel: str,
186
+ chat_id: str,
187
+ tool_name: str,
188
+ messages: list[dict[str, Any]],
189
+ ) -> None:
190
+ """Publish a short status message before executing a tool call."""
191
+ content = await self._generate_tool_status(messages, tool_name)
192
+ if not content:
193
+ return
194
+ await self.bus.publish_outbound(OutboundMessage(
195
+ channel=channel,
196
+ chat_id=chat_id,
197
+ content=content
198
+ ))
199
+
200
+ async def run(self) -> None:
201
+ """Run the agent loop, processing messages from the bus.
202
+
203
+ Each inbound message is handled in its own asyncio task so the loop
204
+ is never blocked — the user can always send new messages even while
205
+ a long task is in progress.
206
+ """
207
+ self._running = True
208
+ self._active_tasks: set[asyncio.Task[None]] = set()
209
+ logger.info("Agent loop started")
210
+
211
+ while self._running:
212
+ try:
213
+ msg = await asyncio.wait_for(
214
+ self.bus.consume_inbound(),
215
+ timeout=1.0,
216
+ )
217
+ task = asyncio.create_task(self._handle_message(msg))
218
+ self._active_tasks.add(task)
219
+ task.add_done_callback(self._active_tasks.discard)
220
+ except asyncio.TimeoutError:
221
+ continue
222
+
223
+ async def _handle_message(self, msg: InboundMessage) -> None:
224
+ """Handle a single message in its own task (fire-and-forget from run)."""
225
+ try:
226
+ # System messages (subagent results) are never offloaded
227
+ if msg.channel == "system":
228
+ response = await self._process_message(msg)
229
+ else:
230
+ response = await self._process_with_timeout(msg)
231
+ if response:
232
+ await self.bus.publish_outbound(response)
233
+ except Exception as e:
234
+ logger.error(f"Error processing message: {e}")
235
+ await self.bus.publish_outbound(OutboundMessage(
236
+ channel=msg.channel,
237
+ chat_id=msg.chat_id,
238
+ content=f"Sorry, I encountered an error: {str(e)}",
239
+ ))
240
+
241
+ async def _process_with_timeout(self, msg: InboundMessage) -> OutboundMessage | None:
242
+ """
243
+ Process a user message with a wall-clock timeout for acknowledgment.
244
+
245
+ The work always runs to completion. If it takes longer than
246
+ AUTO_OFFLOAD_TIMEOUT seconds, we register it as a tracked task,
247
+ send the user an in-character heads-up, and let it keep running.
248
+ The user can check progress via the task_status tool at any time.
249
+ """
250
+ process_task = asyncio.create_task(self._process_message(msg))
251
+ task_id: str | None = None
252
+
253
+ try:
254
+ return await asyncio.wait_for(
255
+ asyncio.shield(process_task),
256
+ timeout=AUTO_OFFLOAD_TIMEOUT,
257
+ )
258
+ except asyncio.TimeoutError:
259
+ # Register as a tracked task so task_status can report on it
260
+ import uuid
261
+ task_id = str(uuid.uuid4())[:8]
262
+ label = msg.content[:40] + ("…" if len(msg.content) > 40 else "")
263
+ self.subagents.register_task(task_id, label, msg.content)
264
+
265
+ logger.info(
266
+ f"Message from {msg.channel}:{msg.sender_id} still processing "
267
+ f"after {AUTO_OFFLOAD_TIMEOUT}s — registered as task {task_id}"
268
+ )
269
+ ack = await self._generate_offload_ack(msg.content)
270
+ await self.bus.publish_outbound(OutboundMessage(
271
+ channel=msg.channel,
272
+ chat_id=msg.chat_id,
273
+ content=ack,
274
+ ))
275
+
276
+ # Let the original task finish
277
+ try:
278
+ result = await process_task
279
+ finally:
280
+ self.subagents.complete_task(task_id)
281
+ return result
282
+
283
+ async def _generate_offload_ack(self, user_message: str) -> str:
284
+ """Generate an in-character acknowledgment for a long-running task.
285
+
286
+ Uses a minimal system prompt (no file I/O) and a short max_tokens to
287
+ keep this fast and reliable. Retries once on empty response, then
288
+ falls back to a contextual template.
289
+ """
290
+ # Truncate long messages so we don't blow the context window
291
+ short_msg = user_message[:200] + ("…" if len(user_message) > 200 else "")
292
+
293
+ prompt = (
294
+ "The user asked you to do something and it's taking a while. "
295
+ "Let them know you're still working on it in the background and "
296
+ "they're free to keep chatting — you'll send the result when it's "
297
+ "done. Reference what they asked for so it feels personal.\n\n"
298
+ f'User\'s request: "{short_msg}"\n\n'
299
+ "Write 1-2 short sentences. Stay in character. No markdown."
300
+ )
301
+
302
+ # Minimal system prompt — avoid file I/O that could fail
303
+ system = (
304
+ "You are kyber, a helpful AI assistant. "
305
+ "You're friendly, concise, and speak naturally."
306
+ )
307
+
308
+ messages = [
309
+ {"role": "system", "content": system},
310
+ {"role": "user", "content": prompt},
311
+ ]
312
+
313
+ # Try up to 2 times — empty responses from OpenRouter are common
314
+ for attempt in range(2):
315
+ try:
316
+ response = await self.provider.chat(
317
+ messages=messages,
318
+ tools=None,
319
+ model=self.model,
320
+ max_tokens=100,
321
+ temperature=0.8 if attempt > 0 else 0.7,
322
+ )
323
+ content = (response.content or "").strip()
324
+ if content and not content.startswith("Error calling LLM:"):
325
+ return content
326
+ logger.warning(
327
+ f"Offload ack attempt {attempt + 1} returned empty/error: "
328
+ f"{content!r} (finish_reason={response.finish_reason})"
329
+ )
330
+ except Exception as e:
331
+ logger.warning(f"Offload ack attempt {attempt + 1} failed: {e}")
332
+
333
+ # Contextual fallback — still references what the user asked
334
+ logger.info("Using contextual template for offload ack")
335
+ return (
336
+ f"Still working on that for you — taking a bit longer than expected. "
337
+ f"Feel free to keep chatting in the meantime, I'll have your answer shortly."
338
+ )
339
+
340
+ def stop(self) -> None:
341
+ """Stop the agent loop."""
342
+ self._running = False
343
+ logger.info("Agent loop stopping")
344
+
345
+ async def _process_message(self, msg: InboundMessage) -> OutboundMessage | None:
346
+ """
347
+ Process a single inbound message.
348
+
349
+ Args:
350
+ msg: The inbound message to process.
351
+
352
+ Returns:
353
+ The response message, or None if no response needed.
354
+ """
355
+ # Handle system messages (subagent announces)
356
+ # The chat_id contains the original "channel:chat_id" to route back to
357
+ if msg.channel == "system":
358
+ return await self._process_system_message(msg)
359
+
360
+ logger.info(f"Processing message from {msg.channel}:{msg.sender_id}")
361
+
362
+ # Get or create session
363
+ session = self.sessions.get_or_create(msg.session_key)
364
+
365
+ # Update tool contexts
366
+ message_tool = self.tools.get("message")
367
+ if isinstance(message_tool, MessageTool):
368
+ message_tool.set_context(msg.channel, msg.chat_id)
369
+
370
+ spawn_tool = self.tools.get("spawn")
371
+ if isinstance(spawn_tool, SpawnTool):
372
+ spawn_tool.set_context(msg.channel, msg.chat_id)
373
+
374
+ # Build initial messages (use get_history for LLM-formatted messages)
375
+ messages = self.context.build_messages(
376
+ history=session.get_history(),
377
+ current_message=msg.content,
378
+ media=msg.media if msg.media else None,
379
+ )
380
+
381
+ # Agent loop
382
+ iteration = 0
383
+ final_content = None
384
+ empty_response_retries = 0
385
+ max_empty_response_retries = 2
386
+ llm_error_retries = 0
387
+ max_llm_error_retries = 3
388
+ tool_calls_executed = False
389
+ last_tool_results: list[str] = []
390
+
391
+ while iteration < self.max_iterations:
392
+ iteration += 1
393
+
394
+ # Call LLM
395
+ response = await self.provider.chat(
396
+ messages=messages,
397
+ tools=self.tools.get_definitions(),
398
+ model=self.model
399
+ )
400
+
401
+ # Check for LLM-level errors (provider returned an error string)
402
+ if response.finish_reason == "error":
403
+ llm_error_retries += 1
404
+ logger.warning(
405
+ f"LLM error (attempt {llm_error_retries}/{max_llm_error_retries}): "
406
+ f"{response.content}"
407
+ )
408
+ if llm_error_retries <= max_llm_error_retries:
409
+ await asyncio.sleep(min(2 ** (llm_error_retries - 1), 4))
410
+ continue
411
+ # Exhausted retries — use a fallback instead of crashing
412
+ logger.error("LLM errors exhausted, using fallback response")
413
+ break
414
+
415
+ # Handle tool calls
416
+ if response.has_tool_calls:
417
+ # Add assistant message with tool calls
418
+ tool_call_dicts = [
419
+ {
420
+ "id": tc.id,
421
+ "type": "function",
422
+ "function": {
423
+ "name": tc.name,
424
+ "arguments": json.dumps(tc.arguments) # Must be JSON string
425
+ }
426
+ }
427
+ for tc in response.tool_calls
428
+ ]
429
+ status_messages = messages.copy()
430
+ messages = self.context.add_assistant_message(
431
+ messages, response.content, tool_call_dicts
432
+ )
433
+
434
+ # Execute tools
435
+ last_tool_results.clear()
436
+ for tool_call in response.tool_calls:
437
+ args_str = json.dumps(tool_call.arguments)
438
+ logger.debug(f"Executing tool: {tool_call.name} with arguments: {args_str}")
439
+ await self._publish_tool_status(
440
+ msg.channel, msg.chat_id, tool_call.name, status_messages
441
+ )
442
+ result = await self.tools.execute(tool_call.name, tool_call.arguments)
443
+ last_tool_results.append(result)
444
+ messages = self.context.add_tool_result(
445
+ messages, tool_call.id, tool_call.name, result
446
+ )
447
+ tool_calls_executed = True
448
+ # Reset error counters after successful tool execution
449
+ llm_error_retries = 0
450
+ empty_response_retries = 0
451
+ else:
452
+ # No tool calls — check for content
453
+ final_content = (response.content or "").strip()
454
+ # Treat error-prefixed content as empty so we retry
455
+ if final_content.startswith("Error calling LLM:"):
456
+ logger.warning(f"LLM returned error as content: {final_content}")
457
+ final_content = ""
458
+ if not final_content:
459
+ empty_response_retries += 1
460
+ logger.warning(
461
+ f"Empty LLM response; retry {empty_response_retries}/"
462
+ f"{max_empty_response_retries} (finish_reason={response.finish_reason})."
463
+ )
464
+ if empty_response_retries <= max_empty_response_retries:
465
+ # If we already executed tools, nudge the LLM to summarize
466
+ if tool_calls_executed:
467
+ messages.append({
468
+ "role": "user",
469
+ "content": (
470
+ "You executed tools and got results. Now please "
471
+ "summarize the results and respond to the user."
472
+ )
473
+ })
474
+ else:
475
+ messages.append({
476
+ "role": "user",
477
+ "content": (
478
+ "Please provide your response to the user's message."
479
+ )
480
+ })
481
+ continue
482
+ break
483
+
484
+ # Fallback: if we still have no content, generate something useful
485
+ if not final_content or not final_content.strip():
486
+ if tool_calls_executed and last_tool_results:
487
+ # We ran tools but the LLM never summarized — build a minimal reply
488
+ logger.warning("No final LLM content after tool calls; generating fallback")
489
+ final_content = (
490
+ "I completed the requested actions. Let me know if you need "
491
+ "anything else!"
492
+ )
493
+ else:
494
+ logger.error("Empty LLM response after all retries")
495
+ final_content = (
496
+ "Sorry, I'm having trouble generating a response right now. "
497
+ "Please try again in a moment."
498
+ )
499
+
500
+ # Save to session
501
+ session.add_message("user", msg.content)
502
+ session.add_message("assistant", final_content)
503
+ self.sessions.save(session)
504
+
505
+ return OutboundMessage(
506
+ channel=msg.channel,
507
+ chat_id=msg.chat_id,
508
+ content=final_content
509
+ )
510
+
511
+ async def _process_system_message(self, msg: InboundMessage) -> OutboundMessage | None:
512
+ """
513
+ Process a system message (e.g., subagent announce).
514
+
515
+ The chat_id field contains "original_channel:original_chat_id" to route
516
+ the response back to the correct destination.
517
+ """
518
+ logger.info(f"Processing system message from {msg.sender_id}")
519
+
520
+ # Parse origin from chat_id (format: "channel:chat_id")
521
+ if ":" in msg.chat_id:
522
+ parts = msg.chat_id.split(":", 1)
523
+ origin_channel = parts[0]
524
+ origin_chat_id = parts[1]
525
+ else:
526
+ # Fallback
527
+ origin_channel = "cli"
528
+ origin_chat_id = msg.chat_id
529
+
530
+ # Use the origin session for context
531
+ session_key = f"{origin_channel}:{origin_chat_id}"
532
+ session = self.sessions.get_or_create(session_key)
533
+
534
+ # Update tool contexts
535
+ message_tool = self.tools.get("message")
536
+ if isinstance(message_tool, MessageTool):
537
+ message_tool.set_context(origin_channel, origin_chat_id)
538
+
539
+ spawn_tool = self.tools.get("spawn")
540
+ if isinstance(spawn_tool, SpawnTool):
541
+ spawn_tool.set_context(origin_channel, origin_chat_id)
542
+
543
+ # Build messages with the announce content
544
+ messages = self.context.build_messages(
545
+ history=session.get_history(),
546
+ current_message=msg.content
547
+ )
548
+
549
+ # Agent loop (limited for announce handling)
550
+ iteration = 0
551
+ final_content = None
552
+ empty_response_retries = 0
553
+ max_empty_response_retries = 2
554
+ llm_error_retries = 0
555
+ max_llm_error_retries = 3
556
+ tool_calls_executed = False
557
+ last_tool_results: list[str] = []
558
+
559
+ while iteration < self.max_iterations:
560
+ iteration += 1
561
+
562
+ response = await self.provider.chat(
563
+ messages=messages,
564
+ tools=self.tools.get_definitions(),
565
+ model=self.model
566
+ )
567
+
568
+ # Check for LLM-level errors
569
+ if response.finish_reason == "error":
570
+ llm_error_retries += 1
571
+ logger.warning(
572
+ f"LLM error in system handler (attempt {llm_error_retries}/"
573
+ f"{max_llm_error_retries}): {response.content}"
574
+ )
575
+ if llm_error_retries <= max_llm_error_retries:
576
+ await asyncio.sleep(min(2 ** (llm_error_retries - 1), 4))
577
+ continue
578
+ logger.error("LLM errors exhausted in system handler, using fallback")
579
+ break
580
+
581
+ if response.has_tool_calls:
582
+ tool_call_dicts = [
583
+ {
584
+ "id": tc.id,
585
+ "type": "function",
586
+ "function": {
587
+ "name": tc.name,
588
+ "arguments": json.dumps(tc.arguments)
589
+ }
590
+ }
591
+ for tc in response.tool_calls
592
+ ]
593
+ status_messages = messages.copy()
594
+ messages = self.context.add_assistant_message(
595
+ messages, response.content, tool_call_dicts
596
+ )
597
+
598
+ last_tool_results.clear()
599
+ for tool_call in response.tool_calls:
600
+ args_str = json.dumps(tool_call.arguments)
601
+ logger.debug(f"Executing tool: {tool_call.name} with arguments: {args_str}")
602
+ await self._publish_tool_status(
603
+ origin_channel, origin_chat_id, tool_call.name, status_messages
604
+ )
605
+ result = await self.tools.execute(tool_call.name, tool_call.arguments)
606
+ last_tool_results.append(result)
607
+ messages = self.context.add_tool_result(
608
+ messages, tool_call.id, tool_call.name, result
609
+ )
610
+ tool_calls_executed = True
611
+ llm_error_retries = 0
612
+ empty_response_retries = 0
613
+ else:
614
+ final_content = (response.content or "").strip()
615
+ if final_content.startswith("Error calling LLM:"):
616
+ logger.warning(f"LLM returned error as content (system): {final_content}")
617
+ final_content = ""
618
+ if not final_content:
619
+ empty_response_retries += 1
620
+ logger.warning(
621
+ f"Empty LLM response (system); retry {empty_response_retries}/"
622
+ f"{max_empty_response_retries} (finish_reason={response.finish_reason})."
623
+ )
624
+ if empty_response_retries <= max_empty_response_retries:
625
+ if tool_calls_executed:
626
+ messages.append({
627
+ "role": "user",
628
+ "content": (
629
+ "You executed tools and got results. Now please "
630
+ "summarize the results and respond to the user."
631
+ )
632
+ })
633
+ else:
634
+ messages.append({
635
+ "role": "user",
636
+ "content": (
637
+ "Please provide your response to the user's message."
638
+ )
639
+ })
640
+ continue
641
+ break
642
+
643
+ if not final_content or not final_content.strip():
644
+ if tool_calls_executed and last_tool_results:
645
+ logger.warning("No final LLM content after tool calls (system); generating fallback")
646
+ final_content = (
647
+ "I completed the requested actions. Let me know if you need "
648
+ "anything else!"
649
+ )
650
+ else:
651
+ logger.error("Empty LLM response after all retries (system)")
652
+ final_content = (
653
+ "Sorry, I'm having trouble generating a response right now. "
654
+ "Please try again in a moment."
655
+ )
656
+
657
+ # Save to session (mark as system message in history)
658
+ session.add_message("user", f"[System: {msg.sender_id}] {msg.content}")
659
+ session.add_message("assistant", final_content)
660
+ self.sessions.save(session)
661
+
662
+ return OutboundMessage(
663
+ channel=origin_channel,
664
+ chat_id=origin_chat_id,
665
+ content=final_content
666
+ )
667
+
668
+ async def process_direct(self, content: str, session_key: str = "cli:direct") -> str:
669
+ """
670
+ Process a message directly (for CLI usage).
671
+
672
+ Args:
673
+ content: The message content.
674
+ session_key: Session identifier.
675
+
676
+ Returns:
677
+ The agent's response.
678
+ """
679
+ msg = InboundMessage(
680
+ channel="cli",
681
+ sender_id="user",
682
+ chat_id="direct",
683
+ content=content
684
+ )
685
+
686
+ response = await self._process_message(msg)
687
+ return response.content if response else ""