kailash 0.3.1__py3-none-any.whl → 0.4.0__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (146) hide show
  1. kailash/__init__.py +33 -1
  2. kailash/access_control/__init__.py +129 -0
  3. kailash/access_control/managers.py +461 -0
  4. kailash/access_control/rule_evaluators.py +467 -0
  5. kailash/access_control_abac.py +825 -0
  6. kailash/config/__init__.py +27 -0
  7. kailash/config/database_config.py +359 -0
  8. kailash/database/__init__.py +28 -0
  9. kailash/database/execution_pipeline.py +499 -0
  10. kailash/middleware/__init__.py +306 -0
  11. kailash/middleware/auth/__init__.py +33 -0
  12. kailash/middleware/auth/access_control.py +436 -0
  13. kailash/middleware/auth/auth_manager.py +422 -0
  14. kailash/middleware/auth/jwt_auth.py +477 -0
  15. kailash/middleware/auth/kailash_jwt_auth.py +616 -0
  16. kailash/middleware/communication/__init__.py +37 -0
  17. kailash/middleware/communication/ai_chat.py +989 -0
  18. kailash/middleware/communication/api_gateway.py +802 -0
  19. kailash/middleware/communication/events.py +470 -0
  20. kailash/middleware/communication/realtime.py +710 -0
  21. kailash/middleware/core/__init__.py +21 -0
  22. kailash/middleware/core/agent_ui.py +890 -0
  23. kailash/middleware/core/schema.py +643 -0
  24. kailash/middleware/core/workflows.py +396 -0
  25. kailash/middleware/database/__init__.py +63 -0
  26. kailash/middleware/database/base.py +113 -0
  27. kailash/middleware/database/base_models.py +525 -0
  28. kailash/middleware/database/enums.py +106 -0
  29. kailash/middleware/database/migrations.py +12 -0
  30. kailash/{api/database.py → middleware/database/models.py} +183 -291
  31. kailash/middleware/database/repositories.py +685 -0
  32. kailash/middleware/database/session_manager.py +19 -0
  33. kailash/middleware/mcp/__init__.py +38 -0
  34. kailash/middleware/mcp/client_integration.py +585 -0
  35. kailash/middleware/mcp/enhanced_server.py +576 -0
  36. kailash/nodes/__init__.py +25 -3
  37. kailash/nodes/admin/__init__.py +35 -0
  38. kailash/nodes/admin/audit_log.py +794 -0
  39. kailash/nodes/admin/permission_check.py +864 -0
  40. kailash/nodes/admin/role_management.py +823 -0
  41. kailash/nodes/admin/security_event.py +1519 -0
  42. kailash/nodes/admin/user_management.py +944 -0
  43. kailash/nodes/ai/a2a.py +24 -7
  44. kailash/nodes/ai/ai_providers.py +1 -0
  45. kailash/nodes/ai/embedding_generator.py +11 -11
  46. kailash/nodes/ai/intelligent_agent_orchestrator.py +99 -11
  47. kailash/nodes/ai/llm_agent.py +407 -2
  48. kailash/nodes/ai/self_organizing.py +85 -10
  49. kailash/nodes/api/auth.py +287 -6
  50. kailash/nodes/api/rest.py +151 -0
  51. kailash/nodes/auth/__init__.py +17 -0
  52. kailash/nodes/auth/directory_integration.py +1228 -0
  53. kailash/nodes/auth/enterprise_auth_provider.py +1328 -0
  54. kailash/nodes/auth/mfa.py +2338 -0
  55. kailash/nodes/auth/risk_assessment.py +872 -0
  56. kailash/nodes/auth/session_management.py +1093 -0
  57. kailash/nodes/auth/sso.py +1040 -0
  58. kailash/nodes/base.py +344 -13
  59. kailash/nodes/base_cycle_aware.py +4 -2
  60. kailash/nodes/base_with_acl.py +1 -1
  61. kailash/nodes/code/python.py +293 -12
  62. kailash/nodes/compliance/__init__.py +9 -0
  63. kailash/nodes/compliance/data_retention.py +1888 -0
  64. kailash/nodes/compliance/gdpr.py +2004 -0
  65. kailash/nodes/data/__init__.py +22 -2
  66. kailash/nodes/data/async_connection.py +469 -0
  67. kailash/nodes/data/async_sql.py +757 -0
  68. kailash/nodes/data/async_vector.py +598 -0
  69. kailash/nodes/data/readers.py +767 -0
  70. kailash/nodes/data/retrieval.py +360 -1
  71. kailash/nodes/data/sharepoint_graph.py +397 -21
  72. kailash/nodes/data/sql.py +94 -5
  73. kailash/nodes/data/streaming.py +68 -8
  74. kailash/nodes/data/vector_db.py +54 -4
  75. kailash/nodes/enterprise/__init__.py +13 -0
  76. kailash/nodes/enterprise/batch_processor.py +741 -0
  77. kailash/nodes/enterprise/data_lineage.py +497 -0
  78. kailash/nodes/logic/convergence.py +31 -9
  79. kailash/nodes/logic/operations.py +14 -3
  80. kailash/nodes/mixins/__init__.py +8 -0
  81. kailash/nodes/mixins/event_emitter.py +201 -0
  82. kailash/nodes/mixins/mcp.py +9 -4
  83. kailash/nodes/mixins/security.py +165 -0
  84. kailash/nodes/monitoring/__init__.py +7 -0
  85. kailash/nodes/monitoring/performance_benchmark.py +2497 -0
  86. kailash/nodes/rag/__init__.py +284 -0
  87. kailash/nodes/rag/advanced.py +1615 -0
  88. kailash/nodes/rag/agentic.py +773 -0
  89. kailash/nodes/rag/conversational.py +999 -0
  90. kailash/nodes/rag/evaluation.py +875 -0
  91. kailash/nodes/rag/federated.py +1188 -0
  92. kailash/nodes/rag/graph.py +721 -0
  93. kailash/nodes/rag/multimodal.py +671 -0
  94. kailash/nodes/rag/optimized.py +933 -0
  95. kailash/nodes/rag/privacy.py +1059 -0
  96. kailash/nodes/rag/query_processing.py +1335 -0
  97. kailash/nodes/rag/realtime.py +764 -0
  98. kailash/nodes/rag/registry.py +547 -0
  99. kailash/nodes/rag/router.py +837 -0
  100. kailash/nodes/rag/similarity.py +1854 -0
  101. kailash/nodes/rag/strategies.py +566 -0
  102. kailash/nodes/rag/workflows.py +575 -0
  103. kailash/nodes/security/__init__.py +19 -0
  104. kailash/nodes/security/abac_evaluator.py +1411 -0
  105. kailash/nodes/security/audit_log.py +91 -0
  106. kailash/nodes/security/behavior_analysis.py +1893 -0
  107. kailash/nodes/security/credential_manager.py +401 -0
  108. kailash/nodes/security/rotating_credentials.py +760 -0
  109. kailash/nodes/security/security_event.py +132 -0
  110. kailash/nodes/security/threat_detection.py +1103 -0
  111. kailash/nodes/testing/__init__.py +9 -0
  112. kailash/nodes/testing/credential_testing.py +499 -0
  113. kailash/nodes/transform/__init__.py +10 -2
  114. kailash/nodes/transform/chunkers.py +592 -1
  115. kailash/nodes/transform/processors.py +484 -14
  116. kailash/nodes/validation.py +321 -0
  117. kailash/runtime/access_controlled.py +1 -1
  118. kailash/runtime/async_local.py +41 -7
  119. kailash/runtime/docker.py +1 -1
  120. kailash/runtime/local.py +474 -55
  121. kailash/runtime/parallel.py +1 -1
  122. kailash/runtime/parallel_cyclic.py +1 -1
  123. kailash/runtime/testing.py +210 -2
  124. kailash/utils/migrations/__init__.py +25 -0
  125. kailash/utils/migrations/generator.py +433 -0
  126. kailash/utils/migrations/models.py +231 -0
  127. kailash/utils/migrations/runner.py +489 -0
  128. kailash/utils/secure_logging.py +342 -0
  129. kailash/workflow/__init__.py +16 -0
  130. kailash/workflow/cyclic_runner.py +3 -4
  131. kailash/workflow/graph.py +70 -2
  132. kailash/workflow/resilience.py +249 -0
  133. kailash/workflow/templates.py +726 -0
  134. {kailash-0.3.1.dist-info → kailash-0.4.0.dist-info}/METADATA +253 -20
  135. kailash-0.4.0.dist-info/RECORD +223 -0
  136. kailash/api/__init__.py +0 -17
  137. kailash/api/__main__.py +0 -6
  138. kailash/api/studio_secure.py +0 -893
  139. kailash/mcp/__main__.py +0 -13
  140. kailash/mcp/server_new.py +0 -336
  141. kailash/mcp/servers/__init__.py +0 -12
  142. kailash-0.3.1.dist-info/RECORD +0 -136
  143. {kailash-0.3.1.dist-info → kailash-0.4.0.dist-info}/WHEEL +0 -0
  144. {kailash-0.3.1.dist-info → kailash-0.4.0.dist-info}/entry_points.txt +0 -0
  145. {kailash-0.3.1.dist-info → kailash-0.4.0.dist-info}/licenses/LICENSE +0 -0
  146. {kailash-0.3.1.dist-info → kailash-0.4.0.dist-info}/top_level.txt +0 -0
@@ -1,16 +1,56 @@
1
1
  """Advanced LLM Agent node with LangChain integration and MCP support."""
2
2
 
3
3
  import json
4
- from typing import Any
4
+ import time
5
+ from dataclasses import dataclass, field
6
+ from datetime import datetime, timezone
7
+ from typing import Any, Dict, List, Literal, Optional
5
8
 
6
9
  from kailash.nodes.base import Node, NodeParameter, register_node
7
10
 
8
11
 
12
+ @dataclass
13
+ class TokenUsage:
14
+ """Token usage statistics."""
15
+
16
+ prompt_tokens: int = 0
17
+ completion_tokens: int = 0
18
+ total_tokens: int = 0
19
+
20
+ def add(self, other: "TokenUsage"):
21
+ """Add another usage record."""
22
+ self.prompt_tokens += other.prompt_tokens
23
+ self.completion_tokens += other.completion_tokens
24
+ self.total_tokens += other.total_tokens
25
+
26
+
27
+ @dataclass
28
+ class CostEstimate:
29
+ """Cost estimation for LLM usage."""
30
+
31
+ prompt_cost: float = 0.0
32
+ completion_cost: float = 0.0
33
+ total_cost: float = 0.0
34
+ currency: str = "USD"
35
+
36
+
37
+ @dataclass
38
+ class UsageMetrics:
39
+ """Comprehensive usage metrics."""
40
+
41
+ token_usage: TokenUsage = field(default_factory=TokenUsage)
42
+ cost_estimate: CostEstimate = field(default_factory=CostEstimate)
43
+ execution_time_ms: float = 0.0
44
+ model: str = ""
45
+ timestamp: str = ""
46
+ metadata: Dict[str, Any] = field(default_factory=dict)
47
+
48
+
9
49
  @register_node()
10
50
  class LLMAgentNode(Node):
11
51
  """
12
52
  Advanced Large Language Model agent with LangChain integration and MCP
13
- support.
53
+ support, with optional cost tracking and usage monitoring.
14
54
 
15
55
  Design Purpose and Philosophy:
16
56
  The LLMAgent node provides enterprise-grade AI agent capabilities with
@@ -119,6 +159,57 @@ class LLMAgentNode(Node):
119
159
  ... )
120
160
  """
121
161
 
162
+ # Model pricing (USD per 1K tokens)
163
+ MODEL_PRICING = {
164
+ # OpenAI models
165
+ "gpt-4": {"prompt": 0.03, "completion": 0.06},
166
+ "gpt-4-turbo": {"prompt": 0.01, "completion": 0.03},
167
+ "gpt-3.5-turbo": {"prompt": 0.001, "completion": 0.002},
168
+ "gpt-3.5-turbo-16k": {"prompt": 0.003, "completion": 0.004},
169
+ # Anthropic models
170
+ "claude-3-opus": {"prompt": 0.015, "completion": 0.075},
171
+ "claude-3-sonnet": {"prompt": 0.003, "completion": 0.015},
172
+ "claude-3-haiku": {"prompt": 0.00025, "completion": 0.00125},
173
+ "claude-2.1": {"prompt": 0.008, "completion": 0.024},
174
+ # Google models
175
+ "gemini-pro": {"prompt": 0.00025, "completion": 0.0005},
176
+ "gemini-pro-vision": {"prompt": 0.00025, "completion": 0.0005},
177
+ # Cohere models
178
+ "command": {"prompt": 0.0015, "completion": 0.0015},
179
+ "command-light": {"prompt": 0.0006, "completion": 0.0006},
180
+ }
181
+
182
+ def __init__(self, **kwargs):
183
+ """Initialize LLMAgentNode with optional monitoring features.
184
+
185
+ Args:
186
+ enable_monitoring: Enable token usage and cost tracking
187
+ budget_limit: Maximum spend allowed in USD (None = unlimited)
188
+ alert_threshold: Alert when usage reaches this fraction of budget
189
+ track_history: Whether to keep usage history
190
+ history_limit: Maximum history entries to keep
191
+ custom_pricing: Override default pricing (per 1K tokens)
192
+ cost_multiplier: Multiply all costs by this factor
193
+ **kwargs: Additional Node parameters
194
+ """
195
+ super().__init__(**kwargs)
196
+
197
+ # Monitoring configuration
198
+ self.enable_monitoring = kwargs.get("enable_monitoring", False)
199
+ self.budget_limit = kwargs.get("budget_limit")
200
+ self.alert_threshold = kwargs.get("alert_threshold", 0.8)
201
+ self.track_history = kwargs.get("track_history", True)
202
+ self.history_limit = kwargs.get("history_limit", 1000)
203
+ self.custom_pricing = kwargs.get("custom_pricing")
204
+ self.cost_multiplier = kwargs.get("cost_multiplier", 1.0)
205
+
206
+ # Usage tracking (only if monitoring enabled)
207
+ if self.enable_monitoring:
208
+ self._total_usage = TokenUsage()
209
+ self._total_cost = 0.0
210
+ self._usage_history: List[UsageMetrics] = []
211
+ self._budget_alerts_sent = False
212
+
122
213
  def get_parameters(self) -> dict[str, NodeParameter]:
123
214
  return {
124
215
  "provider": NodeParameter(
@@ -224,6 +315,40 @@ class LLMAgentNode(Node):
224
315
  default=3,
225
316
  description="Maximum retry attempts for failed requests",
226
317
  ),
318
+ # Monitoring parameters
319
+ "enable_monitoring": NodeParameter(
320
+ name="enable_monitoring",
321
+ type=bool,
322
+ required=False,
323
+ default=False,
324
+ description="Enable token usage tracking and cost monitoring",
325
+ ),
326
+ "budget_limit": NodeParameter(
327
+ name="budget_limit",
328
+ type=float,
329
+ required=False,
330
+ description="Maximum spend allowed in USD (None = unlimited)",
331
+ ),
332
+ "alert_threshold": NodeParameter(
333
+ name="alert_threshold",
334
+ type=float,
335
+ required=False,
336
+ default=0.8,
337
+ description="Alert when usage reaches this fraction of budget",
338
+ ),
339
+ "track_history": NodeParameter(
340
+ name="track_history",
341
+ type=bool,
342
+ required=False,
343
+ default=True,
344
+ description="Whether to keep usage history for analytics",
345
+ ),
346
+ "custom_pricing": NodeParameter(
347
+ name="custom_pricing",
348
+ type=dict,
349
+ required=False,
350
+ description="Override default model pricing (per 1K tokens)",
351
+ ),
227
352
  }
228
353
 
229
354
  def run(self, **kwargs) -> dict[str, Any]:
@@ -465,6 +590,19 @@ class LLMAgentNode(Node):
465
590
  timeout = kwargs.get("timeout", 120)
466
591
  max_retries = kwargs.get("max_retries", 3)
467
592
 
593
+ # Check monitoring parameters
594
+ enable_monitoring = kwargs.get("enable_monitoring", self.enable_monitoring)
595
+
596
+ # Check budget if monitoring is enabled
597
+ if enable_monitoring and not self._check_budget():
598
+ raise ValueError(
599
+ f"Budget limit exceeded: ${self._total_cost:.2f}/${self.budget_limit:.2f} USD. "
600
+ "Reset budget or increase limit to continue."
601
+ )
602
+
603
+ # Track execution time
604
+ start_time = time.time()
605
+
468
606
  try:
469
607
  # Import LangChain and related libraries (graceful fallback)
470
608
  langchain_available = self._check_langchain_availability()
@@ -530,6 +668,47 @@ class LLMAgentNode(Node):
530
668
  enriched_messages, response, model, provider
531
669
  )
532
670
 
671
+ # Add monitoring data if enabled
672
+ execution_time = time.time() - start_time
673
+ if enable_monitoring:
674
+ # Extract token usage for monitoring
675
+ usage = self._extract_token_usage(response)
676
+ cost = self._calculate_cost(usage, model)
677
+
678
+ # Update totals
679
+ if hasattr(self, "_total_usage"):
680
+ self._total_usage.add(usage)
681
+ self._total_cost += cost.total_cost
682
+
683
+ # Record metrics
684
+ self._record_usage(usage, cost, execution_time, model)
685
+
686
+ # Add monitoring section to response
687
+ usage_metrics["monitoring"] = {
688
+ "tokens": {
689
+ "prompt": usage.prompt_tokens,
690
+ "completion": usage.completion_tokens,
691
+ "total": usage.total_tokens,
692
+ },
693
+ "cost": {
694
+ "prompt": round(cost.prompt_cost, 6),
695
+ "completion": round(cost.completion_cost, 6),
696
+ "total": round(cost.total_cost, 6),
697
+ "currency": cost.currency,
698
+ },
699
+ "execution_time_ms": round(execution_time * 1000, 2),
700
+ "model": model,
701
+ "budget": {
702
+ "used": round(self._total_cost, 4),
703
+ "limit": self.budget_limit,
704
+ "remaining": (
705
+ round(self.budget_limit - self._total_cost, 4)
706
+ if self.budget_limit
707
+ else None
708
+ ),
709
+ },
710
+ }
711
+
533
712
  return {
534
713
  "success": True,
535
714
  "response": response,
@@ -1481,3 +1660,229 @@ class LLMAgentNode(Node):
1481
1660
  except Exception as e:
1482
1661
  self.logger.error(f"MCP tool execution failed: {e}")
1483
1662
  return {"error": str(e), "success": False, "tool_name": tool_name}
1663
+
1664
+ # Monitoring methods
1665
+ def _get_pricing(self, model: str) -> Dict[str, float]:
1666
+ """Get pricing for current model."""
1667
+ if self.custom_pricing:
1668
+ return {
1669
+ "prompt": self.custom_pricing.get("prompt_token_cost", 0.001),
1670
+ "completion": self.custom_pricing.get("completion_token_cost", 0.002),
1671
+ }
1672
+
1673
+ # Check if model has pricing info
1674
+ model_key = None
1675
+ for key in self.MODEL_PRICING:
1676
+ if key in model.lower():
1677
+ model_key = key
1678
+ break
1679
+
1680
+ if model_key:
1681
+ return self.MODEL_PRICING[model_key]
1682
+
1683
+ # Default pricing if model not found
1684
+ return {"prompt": 0.001, "completion": 0.002}
1685
+
1686
+ def _calculate_cost(self, usage: TokenUsage, model: str) -> CostEstimate:
1687
+ """Calculate cost from token usage."""
1688
+ pricing = self._get_pricing(model)
1689
+
1690
+ # Cost per 1K tokens
1691
+ prompt_cost = (
1692
+ (usage.prompt_tokens / 1000) * pricing["prompt"] * self.cost_multiplier
1693
+ )
1694
+ completion_cost = (
1695
+ (usage.completion_tokens / 1000)
1696
+ * pricing["completion"]
1697
+ * self.cost_multiplier
1698
+ )
1699
+
1700
+ return CostEstimate(
1701
+ prompt_cost=prompt_cost,
1702
+ completion_cost=completion_cost,
1703
+ total_cost=prompt_cost + completion_cost,
1704
+ currency="USD",
1705
+ )
1706
+
1707
+ def _extract_token_usage(self, response: Dict[str, Any]) -> TokenUsage:
1708
+ """Extract token usage from LLM response."""
1709
+ usage = TokenUsage()
1710
+
1711
+ # Check if response has usage data
1712
+ if "usage" in response:
1713
+ usage_data = response["usage"]
1714
+ usage.prompt_tokens = usage_data.get("prompt_tokens", 0)
1715
+ usage.completion_tokens = usage_data.get("completion_tokens", 0)
1716
+ usage.total_tokens = usage_data.get("total_tokens", 0)
1717
+
1718
+ # Anthropic format
1719
+ elif "metadata" in response and "usage" in response["metadata"]:
1720
+ usage_data = response["metadata"]["usage"]
1721
+ usage.prompt_tokens = usage_data.get("input_tokens", 0)
1722
+ usage.completion_tokens = usage_data.get("output_tokens", 0)
1723
+ usage.total_tokens = usage.prompt_tokens + usage.completion_tokens
1724
+
1725
+ # Fallback: estimate from text length
1726
+ elif "content" in response or "text" in response:
1727
+ text = response.get("content") or response.get("text", "")
1728
+ # Rough estimation: 1 token ≈ 4 characters
1729
+ usage.completion_tokens = len(text) // 4
1730
+ usage.prompt_tokens = 100 # Rough estimate
1731
+ usage.total_tokens = usage.prompt_tokens + usage.completion_tokens
1732
+
1733
+ return usage
1734
+
1735
+ def _check_budget(self) -> bool:
1736
+ """Check if within budget. Returns True if OK to proceed."""
1737
+ if not self.budget_limit or not hasattr(self, "_total_cost"):
1738
+ return True
1739
+
1740
+ if self._total_cost >= self.budget_limit:
1741
+ return False
1742
+
1743
+ # Check alert threshold
1744
+ if (
1745
+ not self._budget_alerts_sent
1746
+ and self._total_cost >= self.budget_limit * self.alert_threshold
1747
+ ):
1748
+ self._budget_alerts_sent = True
1749
+ # In production, this would send actual alerts
1750
+ self.logger.warning(
1751
+ f"Budget Alert: ${self._total_cost:.2f}/${self.budget_limit:.2f} USD used "
1752
+ f"({self._total_cost/self.budget_limit*100:.1f}%)"
1753
+ )
1754
+
1755
+ return True
1756
+
1757
+ def _record_usage(
1758
+ self, usage: TokenUsage, cost: CostEstimate, execution_time: float, model: str
1759
+ ):
1760
+ """Record usage metrics."""
1761
+ if not self.track_history or not hasattr(self, "_usage_history"):
1762
+ return
1763
+
1764
+ metrics = UsageMetrics(
1765
+ token_usage=usage,
1766
+ cost_estimate=cost,
1767
+ execution_time_ms=execution_time * 1000,
1768
+ model=model,
1769
+ timestamp=datetime.now(timezone.utc).isoformat(),
1770
+ metadata={
1771
+ "node_id": self.id,
1772
+ "budget_remaining": (
1773
+ self.budget_limit - self._total_cost if self.budget_limit else None
1774
+ ),
1775
+ },
1776
+ )
1777
+
1778
+ self._usage_history.append(metrics)
1779
+
1780
+ # Maintain history limit
1781
+ if len(self._usage_history) > self.history_limit:
1782
+ self._usage_history.pop(0)
1783
+
1784
+ def get_usage_report(self) -> Dict[str, Any]:
1785
+ """Get comprehensive usage report."""
1786
+ if not self.enable_monitoring or not hasattr(self, "_total_usage"):
1787
+ return {"error": "Monitoring not enabled"}
1788
+
1789
+ report = {
1790
+ "summary": {
1791
+ "total_tokens": self._total_usage.total_tokens,
1792
+ "prompt_tokens": self._total_usage.prompt_tokens,
1793
+ "completion_tokens": self._total_usage.completion_tokens,
1794
+ "total_cost": round(self._total_cost, 4),
1795
+ "currency": "USD",
1796
+ "requests": (
1797
+ len(self._usage_history) if hasattr(self, "_usage_history") else 0
1798
+ ),
1799
+ },
1800
+ "budget": {
1801
+ "limit": self.budget_limit,
1802
+ "used": round(self._total_cost, 4),
1803
+ "remaining": (
1804
+ round(self.budget_limit - self._total_cost, 4)
1805
+ if self.budget_limit
1806
+ else None
1807
+ ),
1808
+ "percentage_used": (
1809
+ round(self._total_cost / self.budget_limit * 100, 1)
1810
+ if self.budget_limit
1811
+ else 0
1812
+ ),
1813
+ },
1814
+ }
1815
+
1816
+ if hasattr(self, "_usage_history") and self._usage_history:
1817
+ # Calculate analytics
1818
+ total_time = sum(m.execution_time_ms for m in self._usage_history)
1819
+ avg_time = total_time / len(self._usage_history)
1820
+
1821
+ report["analytics"] = {
1822
+ "average_tokens_per_request": self._total_usage.total_tokens
1823
+ // len(self._usage_history),
1824
+ "average_cost_per_request": round(
1825
+ self._total_cost / len(self._usage_history), 4
1826
+ ),
1827
+ "average_execution_time_ms": round(avg_time, 2),
1828
+ "cost_per_1k_tokens": (
1829
+ round(self._total_cost / (self._total_usage.total_tokens / 1000), 4)
1830
+ if self._total_usage.total_tokens > 0
1831
+ else 0
1832
+ ),
1833
+ }
1834
+
1835
+ # Recent history
1836
+ report["recent_usage"] = [
1837
+ {
1838
+ "timestamp": m.timestamp,
1839
+ "tokens": m.token_usage.total_tokens,
1840
+ "cost": round(m.cost_estimate.total_cost, 6),
1841
+ "execution_time_ms": round(m.execution_time_ms, 2),
1842
+ }
1843
+ for m in self._usage_history[-10:] # Last 10 requests
1844
+ ]
1845
+
1846
+ return report
1847
+
1848
+ def reset_budget(self):
1849
+ """Reset budget tracking."""
1850
+ if hasattr(self, "_total_cost"):
1851
+ self._total_cost = 0.0
1852
+ self._budget_alerts_sent = False
1853
+
1854
+ def reset_usage(self):
1855
+ """Reset all usage tracking."""
1856
+ if hasattr(self, "_total_usage"):
1857
+ self._total_usage = TokenUsage()
1858
+ self._total_cost = 0.0
1859
+ self._usage_history = []
1860
+ self._budget_alerts_sent = False
1861
+
1862
+ def export_usage_data(self, format: Literal["json", "csv"] = "json") -> str:
1863
+ """Export usage data for analysis."""
1864
+ if not self.enable_monitoring:
1865
+ return json.dumps({"error": "Monitoring not enabled"})
1866
+
1867
+ if format == "json":
1868
+ return json.dumps(self.get_usage_report(), indent=2)
1869
+
1870
+ elif format == "csv":
1871
+ if not hasattr(self, "_usage_history"):
1872
+ return "timestamp,model,prompt_tokens,completion_tokens,total_tokens,cost,execution_time_ms"
1873
+
1874
+ # Simple CSV export
1875
+ lines = [
1876
+ "timestamp,model,prompt_tokens,completion_tokens,total_tokens,cost,execution_time_ms"
1877
+ ]
1878
+ for m in self._usage_history:
1879
+ lines.append(
1880
+ f"{m.timestamp},{m.model},{m.token_usage.prompt_tokens},"
1881
+ f"{m.token_usage.completion_tokens},{m.token_usage.total_tokens},"
1882
+ f"{m.cost_estimate.total_cost:.6f},{m.execution_time_ms:.2f}"
1883
+ )
1884
+ return "\n".join(lines)
1885
+
1886
+ async def async_run(self, **kwargs) -> Dict[str, Any]:
1887
+ """Async execution method for enterprise integration."""
1888
+ return self.run(**kwargs)
@@ -109,8 +109,20 @@ class AgentPoolManagerNode(Node):
109
109
  >>> assert result["success"] == True
110
110
  """
111
111
 
112
- def __init__(self):
113
- super().__init__()
112
+ def __init__(self, name: str = None, id: str = None, **kwargs):
113
+ # Set name from parameters
114
+ if name:
115
+ self.name = name
116
+ elif id:
117
+ self.name = id
118
+ elif "name" in kwargs:
119
+ self.name = kwargs.pop("name")
120
+ elif "id" in kwargs:
121
+ self.name = kwargs.pop("id")
122
+ else:
123
+ self.name = self.__class__.__name__
124
+
125
+ # Initialize node attributes
114
126
  self.agent_registry = {}
115
127
  self.availability_tracker = {}
116
128
  self.performance_metrics = defaultdict(
@@ -125,6 +137,9 @@ class AgentPoolManagerNode(Node):
125
137
  self.capability_index = defaultdict(set)
126
138
  self.team_history = deque(maxlen=100)
127
139
 
140
+ # Call parent constructor
141
+ super().__init__(name=self.name)
142
+
128
143
  def get_parameters(self) -> dict[str, NodeParameter]:
129
144
  return {
130
145
  "action": NodeParameter(
@@ -484,8 +499,20 @@ class ProblemAnalyzerNode(Node):
484
499
  >>> assert "decomposition_strategy" in params
485
500
  """
486
501
 
487
- def __init__(self):
488
- super().__init__()
502
+ def __init__(self, name: str = None, id: str = None, **kwargs):
503
+ # Set name from parameters
504
+ if name:
505
+ self.name = name
506
+ elif id:
507
+ self.name = id
508
+ elif "name" in kwargs:
509
+ self.name = kwargs.pop("name")
510
+ elif "id" in kwargs:
511
+ self.name = kwargs.pop("id")
512
+ else:
513
+ self.name = self.__class__.__name__
514
+
515
+ # Initialize node attributes
489
516
  self.capability_patterns = {
490
517
  "data": ["data_collection", "data_cleaning", "data_validation"],
491
518
  "analysis": [
@@ -499,6 +526,9 @@ class ProblemAnalyzerNode(Node):
499
526
  "domain": ["domain_expertise", "validation", "interpretation"],
500
527
  }
501
528
 
529
+ # Call parent constructor
530
+ super().__init__(name=self.name)
531
+
502
532
  def get_parameters(self) -> dict[str, NodeParameter]:
503
533
  return {
504
534
  "problem_description": NodeParameter(
@@ -784,11 +814,26 @@ class TeamFormationNode(Node):
784
814
  ... )
785
815
  """
786
816
 
787
- def __init__(self):
788
- super().__init__()
817
+ def __init__(self, name: str = None, id: str = None, **kwargs):
818
+ # Set name from parameters
819
+ if name:
820
+ self.name = name
821
+ elif id:
822
+ self.name = id
823
+ elif "name" in kwargs:
824
+ self.name = kwargs.pop("name")
825
+ elif "id" in kwargs:
826
+ self.name = kwargs.pop("id")
827
+ else:
828
+ self.name = self.__class__.__name__
829
+
830
+ # Initialize node attributes
789
831
  self.formation_history = deque(maxlen=50)
790
832
  self.team_performance_cache = {}
791
833
 
834
+ # Call parent constructor
835
+ super().__init__(name=self.name)
836
+
792
837
  def get_parameters(self) -> dict[str, NodeParameter]:
793
838
  return {
794
839
  "problem_analysis": NodeParameter(
@@ -1225,12 +1270,27 @@ class SelfOrganizingAgentNode(A2AAgentNode):
1225
1270
  >>> assert "capabilities" in params
1226
1271
  """
1227
1272
 
1228
- def __init__(self):
1229
- super().__init__()
1273
+ def __init__(self, name: str = None, id: str = None, **kwargs):
1274
+ # Set name from parameters
1275
+ if name:
1276
+ self.name = name
1277
+ elif id:
1278
+ self.name = id
1279
+ elif "name" in kwargs:
1280
+ self.name = kwargs.pop("name")
1281
+ elif "id" in kwargs:
1282
+ self.name = kwargs.pop("id")
1283
+ else:
1284
+ self.name = self.__class__.__name__
1285
+
1286
+ # Initialize node attributes
1230
1287
  self.team_memberships = {}
1231
1288
  self.collaboration_history = deque(maxlen=50)
1232
1289
  self.skill_adaptations = defaultdict(float)
1233
1290
 
1291
+ # Call parent constructor
1292
+ super().__init__(name=self.name)
1293
+
1234
1294
  def get_parameters(self) -> dict[str, NodeParameter]:
1235
1295
  params = super().get_parameters()
1236
1296
 
@@ -1438,10 +1498,25 @@ class SolutionEvaluatorNode(Node):
1438
1498
  ... )
1439
1499
  """
1440
1500
 
1441
- def __init__(self):
1442
- super().__init__()
1501
+ def __init__(self, name: str = None, id: str = None, **kwargs):
1502
+ # Set name from parameters
1503
+ if name:
1504
+ self.name = name
1505
+ elif id:
1506
+ self.name = id
1507
+ elif "name" in kwargs:
1508
+ self.name = kwargs.pop("name")
1509
+ elif "id" in kwargs:
1510
+ self.name = kwargs.pop("id")
1511
+ else:
1512
+ self.name = self.__class__.__name__
1513
+
1514
+ # Initialize node attributes
1443
1515
  self.evaluation_history = deque(maxlen=100)
1444
1516
 
1517
+ # Call parent constructor
1518
+ super().__init__(name=self.name)
1519
+
1445
1520
  def get_parameters(self) -> dict[str, NodeParameter]:
1446
1521
  return {
1447
1522
  "solution": NodeParameter(