genxai-framework 0.1.0__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (156) hide show
  1. cli/__init__.py +3 -0
  2. cli/commands/__init__.py +6 -0
  3. cli/commands/approval.py +85 -0
  4. cli/commands/audit.py +127 -0
  5. cli/commands/metrics.py +25 -0
  6. cli/commands/tool.py +389 -0
  7. cli/main.py +32 -0
  8. genxai/__init__.py +81 -0
  9. genxai/api/__init__.py +5 -0
  10. genxai/api/app.py +21 -0
  11. genxai/config/__init__.py +5 -0
  12. genxai/config/settings.py +37 -0
  13. genxai/connectors/__init__.py +19 -0
  14. genxai/connectors/base.py +122 -0
  15. genxai/connectors/kafka.py +92 -0
  16. genxai/connectors/postgres_cdc.py +95 -0
  17. genxai/connectors/registry.py +44 -0
  18. genxai/connectors/sqs.py +94 -0
  19. genxai/connectors/webhook.py +73 -0
  20. genxai/core/__init__.py +37 -0
  21. genxai/core/agent/__init__.py +32 -0
  22. genxai/core/agent/base.py +206 -0
  23. genxai/core/agent/config_io.py +59 -0
  24. genxai/core/agent/registry.py +98 -0
  25. genxai/core/agent/runtime.py +970 -0
  26. genxai/core/communication/__init__.py +6 -0
  27. genxai/core/communication/collaboration.py +44 -0
  28. genxai/core/communication/message_bus.py +192 -0
  29. genxai/core/communication/protocols.py +35 -0
  30. genxai/core/execution/__init__.py +22 -0
  31. genxai/core/execution/metadata.py +181 -0
  32. genxai/core/execution/queue.py +201 -0
  33. genxai/core/graph/__init__.py +30 -0
  34. genxai/core/graph/checkpoints.py +77 -0
  35. genxai/core/graph/edges.py +131 -0
  36. genxai/core/graph/engine.py +813 -0
  37. genxai/core/graph/executor.py +516 -0
  38. genxai/core/graph/nodes.py +161 -0
  39. genxai/core/graph/trigger_runner.py +40 -0
  40. genxai/core/memory/__init__.py +19 -0
  41. genxai/core/memory/base.py +72 -0
  42. genxai/core/memory/embedding.py +327 -0
  43. genxai/core/memory/episodic.py +448 -0
  44. genxai/core/memory/long_term.py +467 -0
  45. genxai/core/memory/manager.py +543 -0
  46. genxai/core/memory/persistence.py +297 -0
  47. genxai/core/memory/procedural.py +461 -0
  48. genxai/core/memory/semantic.py +526 -0
  49. genxai/core/memory/shared.py +62 -0
  50. genxai/core/memory/short_term.py +303 -0
  51. genxai/core/memory/vector_store.py +508 -0
  52. genxai/core/memory/working.py +211 -0
  53. genxai/core/state/__init__.py +6 -0
  54. genxai/core/state/manager.py +293 -0
  55. genxai/core/state/schema.py +115 -0
  56. genxai/llm/__init__.py +14 -0
  57. genxai/llm/base.py +150 -0
  58. genxai/llm/factory.py +329 -0
  59. genxai/llm/providers/__init__.py +1 -0
  60. genxai/llm/providers/anthropic.py +249 -0
  61. genxai/llm/providers/cohere.py +274 -0
  62. genxai/llm/providers/google.py +334 -0
  63. genxai/llm/providers/ollama.py +147 -0
  64. genxai/llm/providers/openai.py +257 -0
  65. genxai/llm/routing.py +83 -0
  66. genxai/observability/__init__.py +6 -0
  67. genxai/observability/logging.py +327 -0
  68. genxai/observability/metrics.py +494 -0
  69. genxai/observability/tracing.py +372 -0
  70. genxai/performance/__init__.py +39 -0
  71. genxai/performance/cache.py +256 -0
  72. genxai/performance/pooling.py +289 -0
  73. genxai/security/audit.py +304 -0
  74. genxai/security/auth.py +315 -0
  75. genxai/security/cost_control.py +528 -0
  76. genxai/security/default_policies.py +44 -0
  77. genxai/security/jwt.py +142 -0
  78. genxai/security/oauth.py +226 -0
  79. genxai/security/pii.py +366 -0
  80. genxai/security/policy_engine.py +82 -0
  81. genxai/security/rate_limit.py +341 -0
  82. genxai/security/rbac.py +247 -0
  83. genxai/security/validation.py +218 -0
  84. genxai/tools/__init__.py +21 -0
  85. genxai/tools/base.py +383 -0
  86. genxai/tools/builtin/__init__.py +131 -0
  87. genxai/tools/builtin/communication/__init__.py +15 -0
  88. genxai/tools/builtin/communication/email_sender.py +159 -0
  89. genxai/tools/builtin/communication/notification_manager.py +167 -0
  90. genxai/tools/builtin/communication/slack_notifier.py +118 -0
  91. genxai/tools/builtin/communication/sms_sender.py +118 -0
  92. genxai/tools/builtin/communication/webhook_caller.py +136 -0
  93. genxai/tools/builtin/computation/__init__.py +15 -0
  94. genxai/tools/builtin/computation/calculator.py +101 -0
  95. genxai/tools/builtin/computation/code_executor.py +183 -0
  96. genxai/tools/builtin/computation/data_validator.py +259 -0
  97. genxai/tools/builtin/computation/hash_generator.py +129 -0
  98. genxai/tools/builtin/computation/regex_matcher.py +201 -0
  99. genxai/tools/builtin/data/__init__.py +15 -0
  100. genxai/tools/builtin/data/csv_processor.py +213 -0
  101. genxai/tools/builtin/data/data_transformer.py +299 -0
  102. genxai/tools/builtin/data/json_processor.py +233 -0
  103. genxai/tools/builtin/data/text_analyzer.py +288 -0
  104. genxai/tools/builtin/data/xml_processor.py +175 -0
  105. genxai/tools/builtin/database/__init__.py +15 -0
  106. genxai/tools/builtin/database/database_inspector.py +157 -0
  107. genxai/tools/builtin/database/mongodb_query.py +196 -0
  108. genxai/tools/builtin/database/redis_cache.py +167 -0
  109. genxai/tools/builtin/database/sql_query.py +145 -0
  110. genxai/tools/builtin/database/vector_search.py +163 -0
  111. genxai/tools/builtin/file/__init__.py +17 -0
  112. genxai/tools/builtin/file/directory_scanner.py +214 -0
  113. genxai/tools/builtin/file/file_compressor.py +237 -0
  114. genxai/tools/builtin/file/file_reader.py +102 -0
  115. genxai/tools/builtin/file/file_writer.py +122 -0
  116. genxai/tools/builtin/file/image_processor.py +186 -0
  117. genxai/tools/builtin/file/pdf_parser.py +144 -0
  118. genxai/tools/builtin/test/__init__.py +15 -0
  119. genxai/tools/builtin/test/async_simulator.py +62 -0
  120. genxai/tools/builtin/test/data_transformer.py +99 -0
  121. genxai/tools/builtin/test/error_generator.py +82 -0
  122. genxai/tools/builtin/test/simple_math.py +94 -0
  123. genxai/tools/builtin/test/string_processor.py +72 -0
  124. genxai/tools/builtin/web/__init__.py +15 -0
  125. genxai/tools/builtin/web/api_caller.py +161 -0
  126. genxai/tools/builtin/web/html_parser.py +330 -0
  127. genxai/tools/builtin/web/http_client.py +187 -0
  128. genxai/tools/builtin/web/url_validator.py +162 -0
  129. genxai/tools/builtin/web/web_scraper.py +170 -0
  130. genxai/tools/custom/my_test_tool_2.py +9 -0
  131. genxai/tools/dynamic.py +105 -0
  132. genxai/tools/mcp_server.py +167 -0
  133. genxai/tools/persistence/__init__.py +6 -0
  134. genxai/tools/persistence/models.py +55 -0
  135. genxai/tools/persistence/service.py +322 -0
  136. genxai/tools/registry.py +227 -0
  137. genxai/tools/security/__init__.py +11 -0
  138. genxai/tools/security/limits.py +214 -0
  139. genxai/tools/security/policy.py +20 -0
  140. genxai/tools/security/sandbox.py +248 -0
  141. genxai/tools/templates.py +435 -0
  142. genxai/triggers/__init__.py +19 -0
  143. genxai/triggers/base.py +104 -0
  144. genxai/triggers/file_watcher.py +75 -0
  145. genxai/triggers/queue.py +68 -0
  146. genxai/triggers/registry.py +82 -0
  147. genxai/triggers/schedule.py +66 -0
  148. genxai/triggers/webhook.py +68 -0
  149. genxai/utils/__init__.py +1 -0
  150. genxai/utils/tokens.py +295 -0
  151. genxai_framework-0.1.0.dist-info/METADATA +495 -0
  152. genxai_framework-0.1.0.dist-info/RECORD +156 -0
  153. genxai_framework-0.1.0.dist-info/WHEEL +5 -0
  154. genxai_framework-0.1.0.dist-info/entry_points.txt +2 -0
  155. genxai_framework-0.1.0.dist-info/licenses/LICENSE +21 -0
  156. genxai_framework-0.1.0.dist-info/top_level.txt +2 -0
@@ -0,0 +1,970 @@
1
+ """Agent runtime for executing agents with LLM integration."""
2
+
3
+ from typing import Any, Dict, Optional, List
4
+ import asyncio
5
+ import time
6
+ import logging
7
+ import json
8
+
9
+ from genxai.core.agent.base import Agent
10
+ from genxai.llm.base import LLMProvider
11
+ from genxai.llm.factory import LLMProviderFactory
12
+ from genxai.utils.tokens import manage_context_window
13
+ from genxai.observability.logging import set_log_context, clear_log_context
14
+ from genxai.observability.metrics import record_agent_execution, record_llm_request
15
+ from genxai.observability.tracing import span, add_event, record_exception
16
+ from genxai.security.rbac import get_current_user, Permission
17
+ from genxai.security.policy_engine import get_policy_engine
18
+ from genxai.security.audit import get_audit_log, AuditEvent
19
+
20
+ logger = logging.getLogger(__name__)
21
+
22
+
23
+ class AgentExecutionError(Exception):
24
+ """Exception raised during agent execution."""
25
+
26
+ pass
27
+
28
+
29
+ class AgentRuntime:
30
+ """Runtime for executing agents."""
31
+
32
+ def __init__(
33
+ self,
34
+ agent: Agent,
35
+ llm_provider: Optional[LLMProvider] = None,
36
+ api_key: Optional[str] = None,
37
+ enable_memory: bool = True,
38
+ ) -> None:
39
+ """Initialize agent runtime.
40
+
41
+ Args:
42
+ agent: Agent to execute
43
+ llm_provider: LLM provider instance (optional, will be created if not provided)
44
+ api_key: API key for LLM provider (optional, will use env var if not provided)
45
+ enable_memory: Whether to initialize memory system
46
+ """
47
+ self.agent = agent
48
+ self._tools: Dict[str, Any] = {}
49
+ self._memory: Optional[Any] = None
50
+
51
+ # Initialize LLM provider
52
+ if llm_provider:
53
+ self._llm_provider = llm_provider
54
+ else:
55
+ # Create provider from agent config
56
+ try:
57
+ self._llm_provider = LLMProviderFactory.create_provider(
58
+ model=agent.config.llm_model,
59
+ api_key=api_key,
60
+ temperature=agent.config.llm_temperature,
61
+ max_tokens=agent.config.llm_max_tokens,
62
+ )
63
+ logger.info(f"Created LLM provider for agent {agent.id}: {agent.config.llm_model}")
64
+ except Exception as e:
65
+ logger.warning(f"Failed to create LLM provider for agent {agent.id}: {e}")
66
+ self._llm_provider = None
67
+
68
+ # Initialize memory system if enabled
69
+ if enable_memory and agent.config.enable_memory:
70
+ try:
71
+ from genxai.core.memory.manager import MemorySystem
72
+ self._memory = MemorySystem(agent_id=agent.id)
73
+ logger.info(f"Memory system initialized for agent {agent.id}")
74
+ except Exception as e:
75
+ logger.warning(f"Failed to initialize memory system: {e}")
76
+
77
+ async def execute(
78
+ self,
79
+ task: str,
80
+ context: Optional[Dict[str, Any]] = None,
81
+ timeout: Optional[float] = None,
82
+ ) -> Dict[str, Any]:
83
+ """Execute agent with given task.
84
+
85
+ Args:
86
+ task: Task description
87
+ context: Execution context
88
+ timeout: Execution timeout in seconds
89
+
90
+ Returns:
91
+ Execution result
92
+
93
+ Raises:
94
+ AgentExecutionError: If execution fails
95
+ asyncio.TimeoutError: If execution times out
96
+ """
97
+ start_time = time.time()
98
+ set_log_context(agent_id=self.agent.id)
99
+
100
+ if context is None:
101
+ context = {}
102
+
103
+ # Apply timeout
104
+ execution_timeout = timeout or self.agent.config.max_execution_time
105
+
106
+ status = "success"
107
+ error_type: Optional[str] = None
108
+ try:
109
+ with span(
110
+ "genxai.agent.execute",
111
+ {"agent_id": self.agent.id, "agent_role": self.agent.config.role},
112
+ ):
113
+ user = get_current_user()
114
+ if user is not None:
115
+ get_policy_engine().check(user, f"agent:{self.agent.id}", Permission.AGENT_EXECUTE)
116
+ get_audit_log().record(
117
+ AuditEvent(
118
+ action="agent.execute",
119
+ actor_id=user.user_id,
120
+ resource_id=f"agent:{self.agent.id}",
121
+ status="allowed",
122
+ )
123
+ )
124
+ if execution_timeout:
125
+ result = await asyncio.wait_for(
126
+ self._execute_internal(task, context),
127
+ timeout=execution_timeout
128
+ )
129
+ else:
130
+ result = await self._execute_internal(task, context)
131
+
132
+ execution_time = time.time() - start_time
133
+ result["execution_time"] = execution_time
134
+ return result
135
+
136
+ except asyncio.TimeoutError as exc:
137
+ status = "error"
138
+ error_type = type(exc).__name__
139
+ logger.error(f"Agent {self.agent.id} execution timed out after {execution_timeout}s")
140
+ record_exception(exc)
141
+ raise
142
+ except Exception as e:
143
+ status = "error"
144
+ error_type = type(e).__name__
145
+ logger.error(f"Agent {self.agent.id} execution failed: {e}")
146
+ record_exception(e)
147
+ raise AgentExecutionError(f"Agent execution failed: {e}") from e
148
+ finally:
149
+ execution_time = time.time() - start_time
150
+ record_agent_execution(
151
+ agent_id=self.agent.id,
152
+ duration=execution_time,
153
+ status=status,
154
+ error_type=error_type,
155
+ )
156
+ clear_log_context()
157
+
158
+ async def _execute_internal(
159
+ self,
160
+ task: str,
161
+ context: Dict[str, Any],
162
+ ) -> Dict[str, Any]:
163
+ """Internal execution logic with full LLM integration.
164
+
165
+ Args:
166
+ task: Task description
167
+ context: Execution context
168
+
169
+ Returns:
170
+ Execution result
171
+ """
172
+ logger.info(f"Executing agent {self.agent.id}: {task}")
173
+
174
+ # Get memory context if available
175
+ memory_context = ""
176
+ if self.agent.config.enable_memory and self._memory:
177
+ memory_context = await self.get_memory_context(limit=5)
178
+
179
+ # Build prompt (without memory context, as it's handled in _get_llm_response)
180
+ prompt = self._build_prompt(task, context, "")
181
+
182
+ # Get LLM response with retry logic and memory context
183
+ if self.agent.config.tools and self._tools and self._provider_supports_tools():
184
+ response = await self._get_llm_response_with_tools(prompt, memory_context, context)
185
+ else:
186
+ response = await self._get_llm_response_with_retry(prompt, memory_context)
187
+ # Process tools if needed (legacy parsing)
188
+ if self.agent.config.tools and self._tools:
189
+ response = await self._process_tools(response, context)
190
+
191
+ # Update memory if enabled
192
+ if self.agent.config.enable_memory and self._memory:
193
+ await self._update_memory(task, response)
194
+
195
+ # Build result
196
+ result = {
197
+ "agent_id": self.agent.id,
198
+ "task": task,
199
+ "status": "completed",
200
+ "output": response,
201
+ "context": context,
202
+ "tokens_used": self.agent._total_tokens,
203
+ }
204
+
205
+ # Store episode in episodic memory
206
+ if self._memory and hasattr(self._memory, 'episodic') and self._memory.episodic:
207
+ try:
208
+ execution_time = time.time() - time.time() # Will be set by caller
209
+ await self._memory.store_episode(
210
+ task=task,
211
+ actions=[{"type": "llm_call", "response": response}],
212
+ outcome=result,
213
+ duration=execution_time,
214
+ success=True,
215
+ metadata={"agent_id": self.agent.id},
216
+ )
217
+ except Exception as e:
218
+ logger.warning(f"Failed to store episode: {e}")
219
+
220
+ # Reflection for learning agents
221
+ if self.agent.config.agent_type == "learning":
222
+ reflection = await self.agent.reflect(result)
223
+ result["reflection"] = reflection
224
+
225
+ return result
226
+
227
+ def _build_prompt(
228
+ self,
229
+ task: str,
230
+ context: Dict[str, Any],
231
+ memory_context: str = ""
232
+ ) -> str:
233
+ """Build comprehensive prompt for LLM with memory context.
234
+
235
+ Args:
236
+ task: Task description
237
+ context: Execution context
238
+ memory_context: Recent memory context
239
+
240
+ Returns:
241
+ Formatted prompt
242
+ """
243
+ prompt_parts = []
244
+
245
+ # Add memory context if available
246
+ if memory_context:
247
+ prompt_parts.append(memory_context)
248
+ prompt_parts.append("") # Empty line for separation
249
+
250
+ # Add available tools with descriptions
251
+ if self.agent.config.tools and self._tools:
252
+ prompt_parts.append("Available tools:")
253
+ for tool_name in self.agent.config.tools:
254
+ if tool_name in self._tools:
255
+ tool = self._tools[tool_name]
256
+ tool_desc = getattr(tool, 'metadata', None)
257
+ if tool_desc:
258
+ prompt_parts.append(f"- {tool_name}: {tool_desc.description}")
259
+ else:
260
+ prompt_parts.append(f"- {tool_name}")
261
+ prompt_parts.append("")
262
+
263
+ # Add context if provided
264
+ if context:
265
+ prompt_parts.append(f"Context: {context}")
266
+ prompt_parts.append("")
267
+
268
+ # Add task
269
+ prompt_parts.append(f"Task: {task}")
270
+
271
+ # Add agent type specific instructions
272
+ if self.agent.config.agent_type == "deliberative":
273
+ prompt_parts.append("\nThink step by step and plan your approach before responding.")
274
+ elif self.agent.config.agent_type == "learning":
275
+ prompt_parts.append("\nConsider past experiences and improve your approach.")
276
+
277
+ return "\n".join(prompt_parts)
278
+
279
+ def _build_system_prompt(self) -> str:
280
+ """Build system prompt from agent configuration.
281
+
282
+ Returns:
283
+ System prompt string
284
+ """
285
+ system_parts = []
286
+
287
+ # Add role
288
+ system_parts.append(f"You are a {self.agent.config.role}.")
289
+
290
+ # Add goal
291
+ system_parts.append(f"Your goal is: {self.agent.config.goal}")
292
+
293
+ # Add backstory if provided
294
+ if self.agent.config.backstory:
295
+ system_parts.append(f"\nBackground: {self.agent.config.backstory}")
296
+
297
+ # Add agent type specific instructions
298
+ if self.agent.config.agent_type == "deliberative":
299
+ system_parts.append("\nYou should think carefully and plan before acting.")
300
+ elif self.agent.config.agent_type == "learning":
301
+ system_parts.append("\nYou should learn from feedback and improve over time.")
302
+ elif self.agent.config.agent_type == "collaborative":
303
+ system_parts.append("\nYou should work well with other agents and coordinate effectively.")
304
+
305
+ return "\n".join(system_parts)
306
+
307
+ async def _get_llm_response(self, prompt: str, memory_context: str = "") -> str:
308
+ """Get response from LLM with context window management.
309
+
310
+ Args:
311
+ prompt: Prompt to send to LLM
312
+ memory_context: Memory context to include
313
+
314
+ Returns:
315
+ LLM response
316
+
317
+ Raises:
318
+ RuntimeError: If LLM provider not initialized
319
+ """
320
+ if not self._llm_provider:
321
+ logger.error(f"No LLM provider available for agent {self.agent.id}")
322
+ raise RuntimeError(
323
+ f"Agent {self.agent.id} has no LLM provider. "
324
+ "Provide an API key or set OPENAI_API_KEY environment variable."
325
+ )
326
+
327
+ start_time = time.time()
328
+ try:
329
+ logger.debug(f"Calling LLM for agent {self.agent.id}")
330
+
331
+ # Build system prompt from agent config
332
+ system_prompt = self._build_system_prompt()
333
+
334
+ # Manage context window to fit within model limits
335
+ system_prompt, prompt, memory_context = manage_context_window(
336
+ system_prompt=system_prompt,
337
+ user_prompt=prompt,
338
+ memory_context=memory_context,
339
+ model=self.agent.config.llm_model,
340
+ reserve_tokens=self.agent.config.llm_max_tokens or 1000,
341
+ )
342
+
343
+ # Prepend memory context to prompt if available
344
+ if memory_context:
345
+ prompt = f"{memory_context}\n\n{prompt}"
346
+
347
+ # Call LLM provider
348
+ response = await self._llm_provider.generate(
349
+ prompt=prompt,
350
+ system_prompt=system_prompt,
351
+ )
352
+
353
+ # Update token usage and execution count
354
+ self.agent._total_tokens += response.usage.get("total_tokens", 0)
355
+ self.agent._execution_count += 1
356
+
357
+ logger.debug(
358
+ f"LLM response received for agent {self.agent.id}: "
359
+ f"{len(response.content)} chars, "
360
+ f"{response.usage.get('total_tokens', 0)} tokens"
361
+ )
362
+
363
+ duration = time.time() - start_time
364
+ provider_name = self._llm_provider.__class__.__name__
365
+ record_llm_request(
366
+ provider=provider_name,
367
+ model=self.agent.config.llm_model,
368
+ duration=duration,
369
+ status="success",
370
+ input_tokens=response.usage.get("prompt_tokens", 0),
371
+ output_tokens=response.usage.get("completion_tokens", 0),
372
+ total_cost=0.0,
373
+ )
374
+ add_event("llm.response", {"tokens": response.usage.get("total_tokens", 0)})
375
+ return response.content
376
+
377
+ except Exception as e:
378
+ duration = time.time() - start_time
379
+ provider_name = self._llm_provider.__class__.__name__ if self._llm_provider else "unknown"
380
+ record_llm_request(
381
+ provider=provider_name,
382
+ model=self.agent.config.llm_model,
383
+ duration=duration,
384
+ status="error",
385
+ total_cost=0.0,
386
+ )
387
+ logger.error(f"LLM call failed for agent {self.agent.id}: {e}")
388
+ raise RuntimeError(f"LLM call failed: {e}") from e
389
+
390
+ async def _get_llm_response_with_retry(
391
+ self,
392
+ prompt: str,
393
+ memory_context: str = "",
394
+ max_retries: int = 3,
395
+ base_delay: float = 1.0,
396
+ ) -> str:
397
+ """Get response from LLM with exponential backoff retry logic.
398
+
399
+ Args:
400
+ prompt: Prompt to send to LLM
401
+ memory_context: Memory context to include
402
+ max_retries: Maximum number of retry attempts
403
+ base_delay: Base delay in seconds for exponential backoff
404
+
405
+ Returns:
406
+ LLM response
407
+
408
+ Raises:
409
+ RuntimeError: If all retries fail
410
+ """
411
+ last_error = None
412
+
413
+ for attempt in range(max_retries):
414
+ try:
415
+ return await self._get_llm_response(prompt, memory_context)
416
+ except Exception as e:
417
+ last_error = e
418
+ if attempt < max_retries - 1:
419
+ # Exponential backoff: 1s, 2s, 4s, etc.
420
+ delay = base_delay * (2 ** attempt)
421
+ logger.warning(
422
+ f"LLM call failed for agent {self.agent.id} "
423
+ f"(attempt {attempt + 1}/{max_retries}). "
424
+ f"Retrying in {delay}s... Error: {e}"
425
+ )
426
+ await asyncio.sleep(delay)
427
+ else:
428
+ logger.error(
429
+ f"LLM call failed for agent {self.agent.id} "
430
+ f"after {max_retries} attempts"
431
+ )
432
+
433
+ raise RuntimeError(
434
+ f"LLM call failed after {max_retries} attempts. Last error: {last_error}"
435
+ ) from last_error
436
+
437
+ def _provider_supports_tools(self) -> bool:
438
+ """Check if the configured provider supports schema-based tool calling."""
439
+ if not self._llm_provider:
440
+ return False
441
+ return self._llm_provider.__class__.__name__ == "OpenAIProvider"
442
+
443
+ def _build_tool_schemas(self) -> List[Dict[str, Any]]:
444
+ """Build OpenAI-compatible tool schemas from registered tools."""
445
+ schemas: List[Dict[str, Any]] = []
446
+ for tool in self._tools.values():
447
+ if hasattr(tool, "get_schema"):
448
+ schema = tool.get_schema()
449
+ parameters = schema.get("parameters") or {
450
+ "type": "object",
451
+ "properties": {},
452
+ "required": [],
453
+ }
454
+ schemas.append(
455
+ {
456
+ "type": "function",
457
+ "function": {
458
+ "name": schema.get("name", tool.metadata.name),
459
+ "description": schema.get("description", ""),
460
+ "parameters": parameters,
461
+ },
462
+ }
463
+ )
464
+ else:
465
+ schemas.append(
466
+ {
467
+ "type": "function",
468
+ "function": {
469
+ "name": tool.metadata.name,
470
+ "description": tool.metadata.description,
471
+ "parameters": {"type": "object", "properties": {}},
472
+ },
473
+ }
474
+ )
475
+ return schemas
476
+
477
+ async def _get_llm_response_with_tools(
478
+ self,
479
+ prompt: str,
480
+ memory_context: str,
481
+ context: Dict[str, Any],
482
+ ) -> str:
483
+ """Get response from LLM using schema-based tool calling."""
484
+ if not self._llm_provider:
485
+ raise RuntimeError(
486
+ f"Agent {self.agent.id} has no LLM provider. "
487
+ "Provide an API key or set OPENAI_API_KEY environment variable."
488
+ )
489
+
490
+ system_prompt = self._build_system_prompt()
491
+ system_prompt, prompt, memory_context = manage_context_window(
492
+ system_prompt=system_prompt,
493
+ user_prompt=prompt,
494
+ memory_context=memory_context,
495
+ model=self.agent.config.llm_model,
496
+ reserve_tokens=self.agent.config.llm_max_tokens or 1000,
497
+ )
498
+
499
+ if memory_context:
500
+ prompt = f"{memory_context}\n\n{prompt}"
501
+
502
+ tool_schemas = self._build_tool_schemas()
503
+ messages: List[Dict[str, Any]] = [
504
+ {"role": "system", "content": system_prompt},
505
+ {"role": "user", "content": prompt},
506
+ ]
507
+
508
+ response = await self._llm_provider.generate_chat(
509
+ messages=messages,
510
+ tools=tool_schemas,
511
+ tool_choice="auto",
512
+ )
513
+
514
+ tool_calls = self._extract_tool_calls(response.metadata.get("tool_calls"))
515
+ if not tool_calls:
516
+ return response.content
517
+
518
+ tool_messages: List[Dict[str, Any]] = []
519
+ for call in tool_calls:
520
+ result = await self._execute_tool(
521
+ {"name": call["name"], "arguments": call["arguments"]},
522
+ context,
523
+ )
524
+ serialized = self._serialize_tool_result(result)
525
+ tool_messages.append(
526
+ {
527
+ "role": "tool",
528
+ "tool_call_id": call["id"],
529
+ "content": json.dumps(serialized, default=str),
530
+ }
531
+ )
532
+
533
+ assistant_message = {
534
+ "role": "assistant",
535
+ "content": response.content or "",
536
+ "tool_calls": [call["raw"] for call in tool_calls],
537
+ }
538
+ messages.append(assistant_message)
539
+ messages.extend(tool_messages)
540
+
541
+ final_response = await self._llm_provider.generate_chat(
542
+ messages=messages,
543
+ tools=tool_schemas,
544
+ tool_choice="none",
545
+ )
546
+ return final_response.content
547
+
548
+ def _extract_tool_calls(self, raw_calls: Any) -> List[Dict[str, Any]]:
549
+ """Normalize tool calls returned by the LLM provider."""
550
+ if not raw_calls:
551
+ return []
552
+
553
+ tool_calls: List[Dict[str, Any]] = []
554
+ for call in raw_calls:
555
+ normalized = call
556
+ if hasattr(call, "model_dump"):
557
+ normalized = call.model_dump()
558
+ elif hasattr(call, "dict"):
559
+ normalized = call.dict()
560
+ elif hasattr(call, "__dict__"):
561
+ normalized = call.__dict__
562
+
563
+ function_payload = normalized.get("function") if isinstance(normalized, dict) else None
564
+ if not function_payload:
565
+ continue
566
+
567
+ name = function_payload.get("name")
568
+ arguments_raw = function_payload.get("arguments", "{}")
569
+ try:
570
+ arguments = json.loads(arguments_raw) if isinstance(arguments_raw, str) else arguments_raw
571
+ except json.JSONDecodeError:
572
+ arguments = {}
573
+
574
+ tool_calls.append(
575
+ {
576
+ "id": normalized.get("id") or f"tool_call_{name}",
577
+ "name": name,
578
+ "arguments": arguments or {},
579
+ "raw": normalized,
580
+ }
581
+ )
582
+
583
+ return tool_calls
584
+
585
+ def _serialize_tool_result(self, result: Any) -> Any:
586
+ """Convert tool result into JSON-serializable data."""
587
+ if hasattr(result, "model_dump"):
588
+ return result.model_dump()
589
+ if hasattr(result, "dict"):
590
+ return result.dict()
591
+ return result
592
+
593
+ async def stream_execute(
594
+ self,
595
+ task: str,
596
+ context: Optional[Dict[str, Any]] = None,
597
+ ) -> Any:
598
+ """Execute agent with streaming response.
599
+
600
+ Args:
601
+ task: Task description
602
+ context: Execution context
603
+
604
+ Yields:
605
+ Response chunks as they arrive
606
+
607
+ Raises:
608
+ RuntimeError: If LLM provider not initialized or doesn't support streaming
609
+ """
610
+ if not self._llm_provider:
611
+ raise RuntimeError(
612
+ f"Agent {self.agent.id} has no LLM provider. "
613
+ "Provide an API key or set OPENAI_API_KEY environment variable."
614
+ )
615
+
616
+ logger.info(f"Streaming execution for agent {self.agent.id}: {task}")
617
+
618
+ # Get memory context if available
619
+ memory_context = ""
620
+ if self.agent.config.enable_memory and self._memory:
621
+ memory_context = await self.get_memory_context(limit=5)
622
+
623
+ # Build prompt
624
+ prompt = self._build_prompt(task, context or {}, memory_context)
625
+ system_prompt = self._build_system_prompt()
626
+
627
+ try:
628
+ # Stream from LLM provider
629
+ full_response = []
630
+ async for chunk in self._llm_provider.generate_stream(
631
+ prompt=prompt,
632
+ system_prompt=system_prompt,
633
+ ):
634
+ full_response.append(chunk)
635
+ yield chunk
636
+
637
+ # Update memory after streaming completes
638
+ complete_response = "".join(full_response)
639
+ if self.agent.config.enable_memory and self._memory:
640
+ await self._update_memory(task, complete_response)
641
+
642
+ except Exception as e:
643
+ logger.error(f"Streaming execution failed for agent {self.agent.id}: {e}")
644
+ raise RuntimeError(f"Streaming execution failed: {e}") from e
645
+
646
+ async def _process_tools(
647
+ self,
648
+ response: str,
649
+ context: Dict[str, Any],
650
+ max_iterations: int = 5,
651
+ ) -> str:
652
+ """Process tool calls in response with chaining support.
653
+
654
+ Args:
655
+ response: LLM response
656
+ context: Execution context
657
+ max_iterations: Maximum tool chaining iterations
658
+
659
+ Returns:
660
+ Processed response with tool results
661
+ """
662
+ logger.debug(f"Processing tools for agent {self.agent.id}")
663
+
664
+ current_response = response
665
+ all_tool_results = []
666
+ iteration = 0
667
+
668
+ # Tool chaining loop
669
+ while iteration < max_iterations:
670
+ # Parse tool calls from current response
671
+ tool_calls = self._parse_tool_calls(current_response)
672
+
673
+ if not tool_calls:
674
+ # No more tool calls, we're done
675
+ break
676
+
677
+ logger.info(f"Tool iteration {iteration + 1}: Found {len(tool_calls)} tool calls")
678
+
679
+ # Execute tools in this iteration
680
+ iteration_results = []
681
+ for tool_call in tool_calls:
682
+ try:
683
+ result = await self._execute_tool(tool_call, context)
684
+ iteration_results.append({
685
+ "tool": tool_call["name"],
686
+ "success": True,
687
+ "result": result,
688
+ "iteration": iteration + 1,
689
+ })
690
+ # Update context with tool result for chaining
691
+ context[f"tool_result_{tool_call['name']}"] = result
692
+ except Exception as e:
693
+ logger.error(f"Tool {tool_call['name']} failed: {e}")
694
+ iteration_results.append({
695
+ "tool": tool_call["name"],
696
+ "success": False,
697
+ "error": str(e),
698
+ "iteration": iteration + 1,
699
+ })
700
+
701
+ all_tool_results.extend(iteration_results)
702
+
703
+ # Get next response from LLM with tool results
704
+ current_response = await self._format_tool_results(current_response, iteration_results)
705
+ iteration += 1
706
+
707
+ if iteration >= max_iterations:
708
+ logger.warning(f"Reached max tool chaining iterations ({max_iterations})")
709
+
710
+ return current_response
711
+
712
+ def _parse_tool_calls(self, response: str) -> list[Dict[str, Any]]:
713
+ """Parse tool calls from LLM response.
714
+
715
+ Supports two formats:
716
+ 1. Function calling: {"name": "tool_name", "arguments": {...}}
717
+ 2. Text format: USE_TOOL: tool_name(arg1="value1", arg2="value2")
718
+
719
+ Args:
720
+ response: LLM response text
721
+
722
+ Returns:
723
+ List of tool call dictionaries
724
+ """
725
+ import json
726
+ import re
727
+
728
+ tool_calls = []
729
+
730
+ # Try to parse JSON function calls - look for complete JSON objects
731
+ try:
732
+ # Pattern to match JSON objects with name and arguments fields
733
+ # This handles nested objects in arguments
734
+ json_pattern = r'\{[^{}]*"name"\s*:\s*"[^"]+"\s*,\s*"arguments"\s*:\s*\{[^}]*\}\s*\}'
735
+ matches = re.findall(json_pattern, response, re.DOTALL)
736
+
737
+ for match in matches:
738
+ try:
739
+ call = json.loads(match)
740
+ if "name" in call and "arguments" in call:
741
+ tool_calls.append({
742
+ "name": call["name"],
743
+ "arguments": call["arguments"],
744
+ })
745
+ except json.JSONDecodeError:
746
+ # Try to fix common JSON issues
747
+ try:
748
+ # Replace single quotes with double quotes
749
+ fixed_match = match.replace("'", '"')
750
+ call = json.loads(fixed_match)
751
+ if "name" in call and "arguments" in call:
752
+ tool_calls.append({
753
+ "name": call["name"],
754
+ "arguments": call["arguments"],
755
+ })
756
+ except:
757
+ continue
758
+ except Exception as e:
759
+ logger.debug(f"Failed to parse JSON tool calls: {e}")
760
+
761
+ # Try to parse text-based tool calls
762
+ text_pattern = r'USE_TOOL:\s*(\w+)\((.*?)\)'
763
+ matches = re.findall(text_pattern, response, re.DOTALL)
764
+
765
+ for tool_name, args_str in matches:
766
+ try:
767
+ # Parse arguments
768
+ arguments = {}
769
+ if args_str.strip():
770
+ # Parse key="value" pairs
771
+ arg_pattern = r'(\w+)=(["\'])(.*?)\2'
772
+ arg_matches = re.findall(arg_pattern, args_str)
773
+ for key, _, value in arg_matches:
774
+ arguments[key] = value
775
+
776
+ tool_calls.append({
777
+ "name": tool_name,
778
+ "arguments": arguments,
779
+ })
780
+ except Exception as e:
781
+ logger.error(f"Failed to parse tool call {tool_name}: {e}")
782
+
783
+ return tool_calls
784
+
785
+ async def _execute_tool(
786
+ self,
787
+ tool_call: Dict[str, Any],
788
+ context: Dict[str, Any],
789
+ ) -> Any:
790
+ """Execute a single tool.
791
+
792
+ Args:
793
+ tool_call: Tool call dictionary with name and arguments
794
+ context: Execution context
795
+
796
+ Returns:
797
+ Tool execution result
798
+
799
+ Raises:
800
+ ValueError: If tool not found
801
+ Exception: If tool execution fails
802
+ """
803
+ tool_name = tool_call["name"]
804
+ arguments = tool_call.get("arguments", {})
805
+
806
+ # Check if tool exists
807
+ if tool_name not in self._tools:
808
+ raise ValueError(f"Tool '{tool_name}' not found in available tools")
809
+
810
+ tool = self._tools[tool_name]
811
+
812
+ logger.info(f"Executing tool {tool_name} with arguments: {arguments}")
813
+
814
+ # Execute tool
815
+ try:
816
+ # Check if tool has async execute method
817
+ if hasattr(tool, 'execute') and asyncio.iscoroutinefunction(tool.execute):
818
+ result = await tool.execute(**arguments)
819
+ elif hasattr(tool, 'execute'):
820
+ result = tool.execute(**arguments)
821
+ elif callable(tool):
822
+ # Tool is a function
823
+ if asyncio.iscoroutinefunction(tool):
824
+ result = await tool(**arguments)
825
+ else:
826
+ result = tool(**arguments)
827
+ else:
828
+ raise ValueError(f"Tool {tool_name} is not callable")
829
+
830
+ logger.info(f"Tool {tool_name} executed successfully")
831
+ return result
832
+
833
+ except Exception as e:
834
+ logger.error(f"Tool {tool_name} execution failed: {e}")
835
+ raise
836
+
837
+ async def _format_tool_results(
838
+ self,
839
+ original_response: str,
840
+ tool_results: list[Dict[str, Any]],
841
+ ) -> str:
842
+ """Format tool results and get final response from LLM.
843
+
844
+ Args:
845
+ original_response: Original LLM response with tool calls
846
+ tool_results: List of tool execution results
847
+
848
+ Returns:
849
+ Final formatted response
850
+ """
851
+ # Build tool results summary
852
+ results_text = "\n\nTool Execution Results:\n"
853
+ for result in tool_results:
854
+ if result["success"]:
855
+ results_text += f"- {result['tool']}: {result['result']}\n"
856
+ else:
857
+ results_text += f"- {result['tool']}: ERROR - {result['error']}\n"
858
+
859
+ # Ask LLM to incorporate tool results into final response
860
+ follow_up_prompt = (
861
+ f"Based on the tool execution results below, provide a final response.\n"
862
+ f"{results_text}\n"
863
+ f"Provide a clear, concise response incorporating these results."
864
+ )
865
+
866
+ try:
867
+ final_response = await self._get_llm_response(follow_up_prompt)
868
+ return final_response
869
+ except Exception as e:
870
+ logger.error(f"Failed to get final response after tool execution: {e}")
871
+ # Return original response with tool results appended
872
+ return original_response + results_text
873
+
874
+ async def _update_memory(self, task: str, response: str) -> None:
875
+ """Update agent memory.
876
+
877
+ Args:
878
+ task: Task that was executed
879
+ response: Response generated
880
+ """
881
+ if not self._memory:
882
+ return
883
+
884
+ try:
885
+ # Store in short-term memory
886
+ await self._memory.add_to_short_term(
887
+ content={"task": task, "response": response},
888
+ metadata={"agent_id": self.agent.id, "timestamp": time.time()},
889
+ )
890
+
891
+ logger.debug(f"Stored interaction in short-term memory for agent {self.agent.id}")
892
+
893
+ # Consolidate important memories to long-term
894
+ if hasattr(self._memory, 'consolidate_memories'):
895
+ await self._memory.consolidate_memories(importance_threshold=0.7)
896
+ except Exception as e:
897
+ logger.error(f"Failed to update memory: {e}")
898
+
899
+ def set_llm_provider(self, provider: Any) -> None:
900
+ """Set LLM provider.
901
+
902
+ Args:
903
+ provider: LLM provider instance
904
+ """
905
+ self._llm_provider = provider
906
+ logger.info(f"LLM provider set for agent {self.agent.id}")
907
+
908
+ def set_tools(self, tools: Dict[str, Any]) -> None:
909
+ """Set available tools.
910
+
911
+ Args:
912
+ tools: Dictionary of tool name to tool instance
913
+ """
914
+ self._tools = tools
915
+ logger.info(f"Tools set for agent {self.agent.id}: {list(tools.keys())}")
916
+
917
+ def set_memory(self, memory: Any) -> None:
918
+ """Set memory system.
919
+
920
+ Args:
921
+ memory: Memory system instance (MemoryManager or MemorySystem)
922
+ """
923
+ self._memory = memory
924
+ logger.info(f"Memory system set for agent {self.agent.id}")
925
+
926
+ async def get_memory_context(self, limit: int = 5) -> str:
927
+ """Get recent memory context for LLM prompts.
928
+
929
+ Args:
930
+ limit: Number of recent memories to include
931
+
932
+ Returns:
933
+ Formatted memory context string
934
+ """
935
+ if not self._memory:
936
+ return ""
937
+
938
+ try:
939
+ # Get context from short-term memory
940
+ context = await self._memory.get_short_term_context(max_tokens=2000)
941
+ return context
942
+ except Exception as e:
943
+ logger.error(f"Failed to get memory context: {e}")
944
+ return ""
945
+
946
+ async def batch_execute(
947
+ self,
948
+ tasks: list[str],
949
+ context: Optional[Dict[str, Any]] = None,
950
+ ) -> list[Dict[str, Any]]:
951
+ """Execute multiple tasks in parallel.
952
+
953
+ Args:
954
+ tasks: List of tasks to execute
955
+ context: Shared execution context
956
+
957
+ Returns:
958
+ List of execution results
959
+ """
960
+ logger.info(f"Batch executing {len(tasks)} tasks for agent {self.agent.id}")
961
+
962
+ results = await asyncio.gather(
963
+ *[self.execute(task, context) for task in tasks],
964
+ return_exceptions=True
965
+ )
966
+
967
+ return [
968
+ r if not isinstance(r, Exception) else {"error": str(r)}
969
+ for r in results
970
+ ]