kailash 0.3.2__py3-none-any.whl → 0.4.1__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- kailash/__init__.py +33 -1
- kailash/access_control/__init__.py +129 -0
- kailash/access_control/managers.py +461 -0
- kailash/access_control/rule_evaluators.py +467 -0
- kailash/access_control_abac.py +825 -0
- kailash/config/__init__.py +27 -0
- kailash/config/database_config.py +359 -0
- kailash/database/__init__.py +28 -0
- kailash/database/execution_pipeline.py +499 -0
- kailash/middleware/__init__.py +306 -0
- kailash/middleware/auth/__init__.py +33 -0
- kailash/middleware/auth/access_control.py +436 -0
- kailash/middleware/auth/auth_manager.py +422 -0
- kailash/middleware/auth/jwt_auth.py +477 -0
- kailash/middleware/auth/kailash_jwt_auth.py +616 -0
- kailash/middleware/communication/__init__.py +37 -0
- kailash/middleware/communication/ai_chat.py +989 -0
- kailash/middleware/communication/api_gateway.py +802 -0
- kailash/middleware/communication/events.py +470 -0
- kailash/middleware/communication/realtime.py +710 -0
- kailash/middleware/core/__init__.py +21 -0
- kailash/middleware/core/agent_ui.py +890 -0
- kailash/middleware/core/schema.py +643 -0
- kailash/middleware/core/workflows.py +396 -0
- kailash/middleware/database/__init__.py +63 -0
- kailash/middleware/database/base.py +113 -0
- kailash/middleware/database/base_models.py +525 -0
- kailash/middleware/database/enums.py +106 -0
- kailash/middleware/database/migrations.py +12 -0
- kailash/{api/database.py → middleware/database/models.py} +183 -291
- kailash/middleware/database/repositories.py +685 -0
- kailash/middleware/database/session_manager.py +19 -0
- kailash/middleware/mcp/__init__.py +38 -0
- kailash/middleware/mcp/client_integration.py +585 -0
- kailash/middleware/mcp/enhanced_server.py +576 -0
- kailash/nodes/__init__.py +27 -3
- kailash/nodes/admin/__init__.py +42 -0
- kailash/nodes/admin/audit_log.py +794 -0
- kailash/nodes/admin/permission_check.py +864 -0
- kailash/nodes/admin/role_management.py +823 -0
- kailash/nodes/admin/security_event.py +1523 -0
- kailash/nodes/admin/user_management.py +944 -0
- kailash/nodes/ai/a2a.py +24 -7
- kailash/nodes/ai/ai_providers.py +248 -40
- kailash/nodes/ai/embedding_generator.py +11 -11
- kailash/nodes/ai/intelligent_agent_orchestrator.py +99 -11
- kailash/nodes/ai/llm_agent.py +436 -5
- kailash/nodes/ai/self_organizing.py +85 -10
- kailash/nodes/ai/vision_utils.py +148 -0
- kailash/nodes/alerts/__init__.py +26 -0
- kailash/nodes/alerts/base.py +234 -0
- kailash/nodes/alerts/discord.py +499 -0
- kailash/nodes/api/auth.py +287 -6
- kailash/nodes/api/rest.py +151 -0
- kailash/nodes/auth/__init__.py +17 -0
- kailash/nodes/auth/directory_integration.py +1228 -0
- kailash/nodes/auth/enterprise_auth_provider.py +1328 -0
- kailash/nodes/auth/mfa.py +2338 -0
- kailash/nodes/auth/risk_assessment.py +872 -0
- kailash/nodes/auth/session_management.py +1093 -0
- kailash/nodes/auth/sso.py +1040 -0
- kailash/nodes/base.py +344 -13
- kailash/nodes/base_cycle_aware.py +4 -2
- kailash/nodes/base_with_acl.py +1 -1
- kailash/nodes/code/python.py +283 -10
- kailash/nodes/compliance/__init__.py +9 -0
- kailash/nodes/compliance/data_retention.py +1888 -0
- kailash/nodes/compliance/gdpr.py +2004 -0
- kailash/nodes/data/__init__.py +22 -2
- kailash/nodes/data/async_connection.py +469 -0
- kailash/nodes/data/async_sql.py +757 -0
- kailash/nodes/data/async_vector.py +598 -0
- kailash/nodes/data/readers.py +767 -0
- kailash/nodes/data/retrieval.py +360 -1
- kailash/nodes/data/sharepoint_graph.py +397 -21
- kailash/nodes/data/sql.py +94 -5
- kailash/nodes/data/streaming.py +68 -8
- kailash/nodes/data/vector_db.py +54 -4
- kailash/nodes/enterprise/__init__.py +13 -0
- kailash/nodes/enterprise/batch_processor.py +741 -0
- kailash/nodes/enterprise/data_lineage.py +497 -0
- kailash/nodes/logic/convergence.py +31 -9
- kailash/nodes/logic/operations.py +14 -3
- kailash/nodes/mixins/__init__.py +8 -0
- kailash/nodes/mixins/event_emitter.py +201 -0
- kailash/nodes/mixins/mcp.py +9 -4
- kailash/nodes/mixins/security.py +165 -0
- kailash/nodes/monitoring/__init__.py +7 -0
- kailash/nodes/monitoring/performance_benchmark.py +2497 -0
- kailash/nodes/rag/__init__.py +284 -0
- kailash/nodes/rag/advanced.py +1615 -0
- kailash/nodes/rag/agentic.py +773 -0
- kailash/nodes/rag/conversational.py +999 -0
- kailash/nodes/rag/evaluation.py +875 -0
- kailash/nodes/rag/federated.py +1188 -0
- kailash/nodes/rag/graph.py +721 -0
- kailash/nodes/rag/multimodal.py +671 -0
- kailash/nodes/rag/optimized.py +933 -0
- kailash/nodes/rag/privacy.py +1059 -0
- kailash/nodes/rag/query_processing.py +1335 -0
- kailash/nodes/rag/realtime.py +764 -0
- kailash/nodes/rag/registry.py +547 -0
- kailash/nodes/rag/router.py +837 -0
- kailash/nodes/rag/similarity.py +1854 -0
- kailash/nodes/rag/strategies.py +566 -0
- kailash/nodes/rag/workflows.py +575 -0
- kailash/nodes/security/__init__.py +19 -0
- kailash/nodes/security/abac_evaluator.py +1411 -0
- kailash/nodes/security/audit_log.py +103 -0
- kailash/nodes/security/behavior_analysis.py +1893 -0
- kailash/nodes/security/credential_manager.py +401 -0
- kailash/nodes/security/rotating_credentials.py +760 -0
- kailash/nodes/security/security_event.py +133 -0
- kailash/nodes/security/threat_detection.py +1103 -0
- kailash/nodes/testing/__init__.py +9 -0
- kailash/nodes/testing/credential_testing.py +499 -0
- kailash/nodes/transform/__init__.py +10 -2
- kailash/nodes/transform/chunkers.py +592 -1
- kailash/nodes/transform/processors.py +484 -14
- kailash/nodes/validation.py +321 -0
- kailash/runtime/access_controlled.py +1 -1
- kailash/runtime/async_local.py +41 -7
- kailash/runtime/docker.py +1 -1
- kailash/runtime/local.py +474 -55
- kailash/runtime/parallel.py +1 -1
- kailash/runtime/parallel_cyclic.py +1 -1
- kailash/runtime/testing.py +210 -2
- kailash/security.py +1 -1
- kailash/utils/migrations/__init__.py +25 -0
- kailash/utils/migrations/generator.py +433 -0
- kailash/utils/migrations/models.py +231 -0
- kailash/utils/migrations/runner.py +489 -0
- kailash/utils/secure_logging.py +342 -0
- kailash/workflow/__init__.py +16 -0
- kailash/workflow/cyclic_runner.py +3 -4
- kailash/workflow/graph.py +70 -2
- kailash/workflow/resilience.py +249 -0
- kailash/workflow/templates.py +726 -0
- {kailash-0.3.2.dist-info → kailash-0.4.1.dist-info}/METADATA +256 -20
- kailash-0.4.1.dist-info/RECORD +227 -0
- kailash/api/__init__.py +0 -17
- kailash/api/__main__.py +0 -6
- kailash/api/studio_secure.py +0 -893
- kailash/mcp/__main__.py +0 -13
- kailash/mcp/server_new.py +0 -336
- kailash/mcp/servers/__init__.py +0 -12
- kailash-0.3.2.dist-info/RECORD +0 -136
- {kailash-0.3.2.dist-info → kailash-0.4.1.dist-info}/WHEEL +0 -0
- {kailash-0.3.2.dist-info → kailash-0.4.1.dist-info}/entry_points.txt +0 -0
- {kailash-0.3.2.dist-info → kailash-0.4.1.dist-info}/licenses/LICENSE +0 -0
- {kailash-0.3.2.dist-info → kailash-0.4.1.dist-info}/top_level.txt +0 -0
kailash/nodes/ai/a2a.py
CHANGED
@@ -112,8 +112,11 @@ class SharedMemoryPoolNode(Node):
|
|
112
112
|
... )
|
113
113
|
"""
|
114
114
|
|
115
|
-
def __init__(self):
|
116
|
-
|
115
|
+
def __init__(self, name=None, **kwargs):
|
116
|
+
# Accept name parameter and pass all kwargs to parent
|
117
|
+
if name:
|
118
|
+
kwargs["name"] = name
|
119
|
+
super().__init__(**kwargs)
|
117
120
|
self.memory_segments = defaultdict(deque)
|
118
121
|
self.agent_subscriptions = defaultdict(set)
|
119
122
|
self.attention_indices = defaultdict(lambda: defaultdict(list))
|
@@ -183,6 +186,13 @@ class SharedMemoryPoolNode(Node):
|
|
183
186
|
required=False,
|
184
187
|
description="Search query for semantic memory search",
|
185
188
|
),
|
189
|
+
"segments": NodeParameter(
|
190
|
+
name="segments",
|
191
|
+
type=list,
|
192
|
+
required=False,
|
193
|
+
default=["general"],
|
194
|
+
description="Memory segments to subscribe to",
|
195
|
+
),
|
186
196
|
}
|
187
197
|
|
188
198
|
def run(self, **kwargs) -> Dict[str, Any]:
|
@@ -545,8 +555,11 @@ class A2AAgentNode(LLMAgentNode):
|
|
545
555
|
>>> assert all("content" in i for i in insights)
|
546
556
|
"""
|
547
557
|
|
548
|
-
def __init__(self):
|
549
|
-
|
558
|
+
def __init__(self, name=None, **kwargs):
|
559
|
+
# Accept name parameter and pass all kwargs to parent
|
560
|
+
if name:
|
561
|
+
kwargs["name"] = name
|
562
|
+
super().__init__(**kwargs)
|
550
563
|
self.local_memory = deque(maxlen=100)
|
551
564
|
self.communication_log = deque(maxlen=50)
|
552
565
|
|
@@ -1220,8 +1233,11 @@ class A2ACoordinatorNode(CycleAwareNode):
|
|
1220
1233
|
... )
|
1221
1234
|
"""
|
1222
1235
|
|
1223
|
-
def __init__(self):
|
1224
|
-
|
1236
|
+
def __init__(self, name=None, **kwargs):
|
1237
|
+
# Accept name parameter and pass all kwargs to parent
|
1238
|
+
if name:
|
1239
|
+
kwargs["name"] = name
|
1240
|
+
super().__init__(**kwargs)
|
1225
1241
|
self.registered_agents = {}
|
1226
1242
|
self.task_queue = deque()
|
1227
1243
|
self.consensus_sessions = {}
|
@@ -1275,7 +1291,7 @@ class A2ACoordinatorNode(CycleAwareNode):
|
|
1275
1291
|
),
|
1276
1292
|
}
|
1277
1293
|
|
1278
|
-
def run(self,
|
1294
|
+
def run(self, **kwargs) -> Dict[str, Any]:
|
1279
1295
|
"""
|
1280
1296
|
Execute coordination action with cycle awareness.
|
1281
1297
|
|
@@ -1315,6 +1331,7 @@ class A2ACoordinatorNode(CycleAwareNode):
|
|
1315
1331
|
... )
|
1316
1332
|
>>> assert result[\"success\"] == True
|
1317
1333
|
"""
|
1334
|
+
context = kwargs.get("context", {})
|
1318
1335
|
action = kwargs.get("action")
|
1319
1336
|
|
1320
1337
|
# Get cycle information using CycleAwareNode helpers
|
kailash/nodes/ai/ai_providers.py
CHANGED
@@ -8,7 +8,11 @@ separation between LLM and embedding capabilities.
|
|
8
8
|
|
9
9
|
import hashlib
|
10
10
|
from abc import ABC, abstractmethod
|
11
|
-
from typing import Any
|
11
|
+
from typing import Any, Dict, List, Union
|
12
|
+
|
13
|
+
# Type definitions for flexible message content
|
14
|
+
MessageContent = Union[str, List[Dict[str, Any]]]
|
15
|
+
Message = Dict[str, Union[str, MessageContent]]
|
12
16
|
|
13
17
|
|
14
18
|
class BaseAIProvider(ABC):
|
@@ -205,12 +209,14 @@ class LLMProvider(BaseAIProvider):
|
|
205
209
|
self._capabilities["chat"] = True
|
206
210
|
|
207
211
|
@abstractmethod
|
208
|
-
def chat(self, messages:
|
212
|
+
def chat(self, messages: List[Message], **kwargs) -> dict[str, Any]:
|
209
213
|
"""
|
210
214
|
Generate a chat completion using the provider's LLM.
|
211
215
|
|
212
216
|
Args:
|
213
217
|
messages: Conversation messages in OpenAI format
|
218
|
+
Can be simple: [{"role": "user", "content": "text"}]
|
219
|
+
Or complex: [{"role": "user", "content": [{"type": "text", "text": "..."}, {"type": "image", "path": "..."}]}]
|
214
220
|
**kwargs: Provider-specific parameters
|
215
221
|
|
216
222
|
Returns:
|
@@ -391,7 +397,7 @@ class OllamaProvider(UnifiedAIProvider):
|
|
391
397
|
|
392
398
|
return self._available
|
393
399
|
|
394
|
-
def chat(self, messages:
|
400
|
+
def chat(self, messages: List[Message], **kwargs) -> dict[str, Any]:
|
395
401
|
"""Generate a chat completion using Ollama.
|
396
402
|
|
397
403
|
Args:
|
@@ -399,6 +405,7 @@ class OllamaProvider(UnifiedAIProvider):
|
|
399
405
|
**kwargs: Additional arguments including:
|
400
406
|
model (str): Ollama model name (default: "llama3.1:8b-instruct-q8_0")
|
401
407
|
generation_config (dict): Generation parameters including:
|
408
|
+
|
402
409
|
* temperature, max_tokens, top_p, top_k, repeat_penalty
|
403
410
|
* seed, stop, num_ctx, num_batch, num_thread
|
404
411
|
* tfs_z, typical_p, mirostat, mirostat_tau, mirostat_eta
|
@@ -434,8 +441,50 @@ class OllamaProvider(UnifiedAIProvider):
|
|
434
441
|
# Remove None values
|
435
442
|
options = {k: v for k, v in options.items() if v is not None}
|
436
443
|
|
444
|
+
# Process messages for vision content
|
445
|
+
processed_messages = []
|
446
|
+
|
447
|
+
for msg in messages:
|
448
|
+
if isinstance(msg.get("content"), list):
|
449
|
+
# Complex content with potential images
|
450
|
+
text_parts = []
|
451
|
+
images = []
|
452
|
+
|
453
|
+
for item in msg["content"]:
|
454
|
+
if item["type"] == "text":
|
455
|
+
text_parts.append(item["text"])
|
456
|
+
elif item["type"] == "image":
|
457
|
+
# Lazy load vision utilities
|
458
|
+
from .vision_utils import encode_image
|
459
|
+
|
460
|
+
if "path" in item:
|
461
|
+
# For file paths, read the file directly
|
462
|
+
with open(item["path"], "rb") as f:
|
463
|
+
images.append(f.read())
|
464
|
+
else:
|
465
|
+
# For base64, decode it to bytes
|
466
|
+
import base64
|
467
|
+
|
468
|
+
base64_data = item.get("base64", "")
|
469
|
+
images.append(base64.b64decode(base64_data))
|
470
|
+
|
471
|
+
# Ollama expects images as part of the message
|
472
|
+
message_dict = {
|
473
|
+
"role": msg["role"],
|
474
|
+
"content": " ".join(text_parts),
|
475
|
+
}
|
476
|
+
if images:
|
477
|
+
message_dict["images"] = images
|
478
|
+
|
479
|
+
processed_messages.append(message_dict)
|
480
|
+
else:
|
481
|
+
# Simple string content (backward compatible)
|
482
|
+
processed_messages.append(msg)
|
483
|
+
|
437
484
|
# Call Ollama
|
438
|
-
response = ollama.chat(
|
485
|
+
response = ollama.chat(
|
486
|
+
model=model, messages=processed_messages, options=options
|
487
|
+
)
|
439
488
|
|
440
489
|
# Format response to match standard structure
|
441
490
|
return {
|
@@ -544,11 +593,18 @@ class OpenAIProvider(UnifiedAIProvider):
|
|
544
593
|
- Install openai package: `pip install openai`
|
545
594
|
|
546
595
|
Supported LLM models:
|
547
|
-
-
|
548
|
-
-
|
549
|
-
|
550
|
-
|
551
|
-
- gpt-3.5-turbo
|
596
|
+
- o4-mini (latest, vision support, recommended)
|
597
|
+
- o3 (reasoning model)
|
598
|
+
|
599
|
+
Note: This provider uses max_completion_tokens parameter compatible with
|
600
|
+
latest OpenAI models. Older models (gpt-4, gpt-3.5-turbo) are not supported.
|
601
|
+
|
602
|
+
Generation Config Parameters:
|
603
|
+
- max_completion_tokens (int): Maximum tokens to generate (recommended)
|
604
|
+
- max_tokens (int): Deprecated, use max_completion_tokens instead
|
605
|
+
- temperature (float): Sampling temperature (0-2)
|
606
|
+
- top_p (float): Nucleus sampling probability
|
607
|
+
- Other standard OpenAI parameters
|
552
608
|
|
553
609
|
Supported embedding models:
|
554
610
|
- text-embedding-3-large (3072 dimensions, configurable)
|
@@ -571,19 +627,22 @@ class OpenAIProvider(UnifiedAIProvider):
|
|
571
627
|
|
572
628
|
return self._available
|
573
629
|
|
574
|
-
def chat(self, messages:
|
630
|
+
def chat(self, messages: List[Message], **kwargs) -> dict[str, Any]:
|
575
631
|
"""
|
576
632
|
Generate a chat completion using OpenAI.
|
577
633
|
|
578
634
|
Supported kwargs:
|
579
|
-
- model (str): OpenAI model name (default: "
|
580
|
-
- generation_config (dict): Generation parameters
|
635
|
+
- model (str): OpenAI model name (default: "o4-mini")
|
636
|
+
- generation_config (dict): Generation parameters including:
|
637
|
+
- max_completion_tokens (int): Max tokens to generate (recommended)
|
638
|
+
- max_tokens (int): Deprecated, use max_completion_tokens
|
639
|
+
- temperature, top_p, frequency_penalty, presence_penalty, etc.
|
581
640
|
- tools (List[Dict]): Function/tool definitions for function calling
|
582
641
|
"""
|
583
642
|
try:
|
584
643
|
import openai
|
585
644
|
|
586
|
-
model = kwargs.get("model", "
|
645
|
+
model = kwargs.get("model", "o4-mini")
|
587
646
|
generation_config = kwargs.get("generation_config", {})
|
588
647
|
tools = kwargs.get("tools", [])
|
589
648
|
|
@@ -591,13 +650,86 @@ class OpenAIProvider(UnifiedAIProvider):
|
|
591
650
|
if self._client is None:
|
592
651
|
self._client = openai.OpenAI()
|
593
652
|
|
653
|
+
# Process messages for vision content
|
654
|
+
processed_messages = []
|
655
|
+
for msg in messages:
|
656
|
+
if isinstance(msg.get("content"), list):
|
657
|
+
# Complex content with potential images
|
658
|
+
processed_content = []
|
659
|
+
for item in msg["content"]:
|
660
|
+
if item.get("type") == "text":
|
661
|
+
processed_content.append(
|
662
|
+
{"type": "text", "text": item.get("text", "")}
|
663
|
+
)
|
664
|
+
elif item.get("type") == "image":
|
665
|
+
# Lazy load vision utilities
|
666
|
+
from .vision_utils import (
|
667
|
+
encode_image,
|
668
|
+
get_media_type,
|
669
|
+
validate_image_size,
|
670
|
+
)
|
671
|
+
|
672
|
+
if "path" in item:
|
673
|
+
# Validate image size
|
674
|
+
is_valid, error_msg = validate_image_size(item["path"])
|
675
|
+
if not is_valid:
|
676
|
+
raise ValueError(
|
677
|
+
f"Image validation failed: {error_msg}"
|
678
|
+
)
|
679
|
+
|
680
|
+
base64_image = encode_image(item["path"])
|
681
|
+
media_type = get_media_type(item["path"])
|
682
|
+
elif "base64" in item:
|
683
|
+
base64_image = item["base64"]
|
684
|
+
media_type = item.get("media_type", "image/jpeg")
|
685
|
+
else:
|
686
|
+
raise ValueError(
|
687
|
+
"Image item must have either 'path' or 'base64' field"
|
688
|
+
)
|
689
|
+
|
690
|
+
processed_content.append(
|
691
|
+
{
|
692
|
+
"type": "image_url",
|
693
|
+
"image_url": {
|
694
|
+
"url": f"data:{media_type};base64,{base64_image}"
|
695
|
+
},
|
696
|
+
}
|
697
|
+
)
|
698
|
+
|
699
|
+
processed_messages.append(
|
700
|
+
{"role": msg.get("role", "user"), "content": processed_content}
|
701
|
+
)
|
702
|
+
else:
|
703
|
+
# Simple string content (backward compatible)
|
704
|
+
processed_messages.append(msg)
|
705
|
+
|
706
|
+
# Handle max tokens parameter - support both old and new names
|
707
|
+
max_completion = generation_config.get(
|
708
|
+
"max_completion_tokens"
|
709
|
+
) or generation_config.get("max_tokens", 500)
|
710
|
+
|
711
|
+
# Show deprecation warning if using old parameter
|
712
|
+
# TODO: remove the max_tokens in the future.
|
713
|
+
if (
|
714
|
+
"max_tokens" in generation_config
|
715
|
+
and "max_completion_tokens" not in generation_config
|
716
|
+
):
|
717
|
+
import warnings
|
718
|
+
|
719
|
+
warnings.warn(
|
720
|
+
"'max_tokens' is deprecated and will be removed in v0.5.0. "
|
721
|
+
"Please use 'max_completion_tokens' instead.",
|
722
|
+
DeprecationWarning,
|
723
|
+
stacklevel=3,
|
724
|
+
)
|
725
|
+
|
594
726
|
# Prepare request
|
595
727
|
request_params = {
|
596
728
|
"model": model,
|
597
|
-
"messages":
|
598
|
-
"temperature": generation_config.get("temperature", 0
|
599
|
-
"
|
600
|
-
"top_p": generation_config.get("top_p", 0
|
729
|
+
"messages": processed_messages,
|
730
|
+
"temperature": generation_config.get("temperature", 1.0),
|
731
|
+
"max_completion_tokens": max_completion, # Always use new parameter
|
732
|
+
"top_p": generation_config.get("top_p", 1.0),
|
601
733
|
"frequency_penalty": generation_config.get("frequency_penalty"),
|
602
734
|
"presence_penalty": generation_config.get("presence_penalty"),
|
603
735
|
"stop": generation_config.get("stop"),
|
@@ -648,6 +780,15 @@ class OpenAIProvider(UnifiedAIProvider):
|
|
648
780
|
raise RuntimeError(
|
649
781
|
"OpenAI library not installed. Install with: pip install openai"
|
650
782
|
)
|
783
|
+
except openai.BadRequestError as e:
|
784
|
+
# Provide helpful error message for unsupported models or parameters
|
785
|
+
if "max_tokens" in str(e):
|
786
|
+
raise RuntimeError(
|
787
|
+
"This OpenAI provider requires models that support max_completion_tokens. "
|
788
|
+
"Please use o4-mini, o3 "
|
789
|
+
"Older models like gpt-4o or gpt-3.5-turbo are not supported."
|
790
|
+
)
|
791
|
+
raise RuntimeError(f"OpenAI API error: {str(e)}")
|
651
792
|
except Exception as e:
|
652
793
|
raise RuntimeError(f"OpenAI error: {str(e)}")
|
653
794
|
|
@@ -771,7 +912,7 @@ class AnthropicProvider(LLMProvider):
|
|
771
912
|
|
772
913
|
return self._available
|
773
914
|
|
774
|
-
def chat(self, messages:
|
915
|
+
def chat(self, messages: List[Message], **kwargs) -> dict[str, Any]:
|
775
916
|
"""Generate a chat completion using Anthropic."""
|
776
917
|
try:
|
777
918
|
import anthropic
|
@@ -789,22 +930,75 @@ class AnthropicProvider(LLMProvider):
|
|
789
930
|
|
790
931
|
for msg in messages:
|
791
932
|
if msg["role"] == "system":
|
792
|
-
|
933
|
+
# System messages are always text
|
934
|
+
system_message = (
|
935
|
+
msg["content"]
|
936
|
+
if isinstance(msg["content"], str)
|
937
|
+
else str(msg["content"])
|
938
|
+
)
|
793
939
|
else:
|
794
|
-
|
795
|
-
|
796
|
-
|
797
|
-
|
798
|
-
|
799
|
-
|
800
|
-
|
801
|
-
|
802
|
-
|
803
|
-
|
804
|
-
|
805
|
-
|
806
|
-
|
807
|
-
|
940
|
+
# Process potentially complex content
|
941
|
+
if isinstance(msg.get("content"), list):
|
942
|
+
# Complex content with potential images
|
943
|
+
content_parts = []
|
944
|
+
|
945
|
+
for item in msg["content"]:
|
946
|
+
if item["type"] == "text":
|
947
|
+
content_parts.append(
|
948
|
+
{"type": "text", "text": item["text"]}
|
949
|
+
)
|
950
|
+
elif item["type"] == "image":
|
951
|
+
# Lazy load vision utilities
|
952
|
+
from .vision_utils import encode_image, get_media_type
|
953
|
+
|
954
|
+
if "path" in item:
|
955
|
+
base64_image = encode_image(item["path"])
|
956
|
+
media_type = get_media_type(item["path"])
|
957
|
+
else:
|
958
|
+
base64_image = item.get("base64", "")
|
959
|
+
media_type = item.get("media_type", "image/jpeg")
|
960
|
+
|
961
|
+
content_parts.append(
|
962
|
+
{
|
963
|
+
"type": "image",
|
964
|
+
"source": {
|
965
|
+
"type": "base64",
|
966
|
+
"media_type": media_type,
|
967
|
+
"data": base64_image,
|
968
|
+
},
|
969
|
+
}
|
970
|
+
)
|
971
|
+
|
972
|
+
user_messages.append(
|
973
|
+
{"role": msg["role"], "content": content_parts}
|
974
|
+
)
|
975
|
+
else:
|
976
|
+
# Simple string content (backward compatible)
|
977
|
+
user_messages.append(msg)
|
978
|
+
|
979
|
+
# Call Anthropic - build kwargs to avoid passing None values
|
980
|
+
create_kwargs = {
|
981
|
+
"model": model,
|
982
|
+
"messages": user_messages,
|
983
|
+
"max_tokens": generation_config.get("max_tokens", 500),
|
984
|
+
"temperature": generation_config.get("temperature", 0.7),
|
985
|
+
}
|
986
|
+
|
987
|
+
# Only add optional parameters if they have valid values
|
988
|
+
if system_message is not None:
|
989
|
+
create_kwargs["system"] = system_message
|
990
|
+
if generation_config.get("top_p") is not None:
|
991
|
+
create_kwargs["top_p"] = generation_config.get("top_p")
|
992
|
+
if generation_config.get("top_k") is not None:
|
993
|
+
create_kwargs["top_k"] = generation_config.get("top_k")
|
994
|
+
if generation_config.get("stop_sequences") is not None:
|
995
|
+
create_kwargs["stop_sequences"] = generation_config.get(
|
996
|
+
"stop_sequences"
|
997
|
+
)
|
998
|
+
if generation_config.get("metadata") is not None:
|
999
|
+
create_kwargs["metadata"] = generation_config.get("metadata")
|
1000
|
+
|
1001
|
+
response = self._client.messages.create(**create_kwargs)
|
808
1002
|
|
809
1003
|
# Format response
|
810
1004
|
return {
|
@@ -1231,16 +1425,33 @@ class MockProvider(UnifiedAIProvider):
|
|
1231
1425
|
"""Mock provider is always available."""
|
1232
1426
|
return True
|
1233
1427
|
|
1234
|
-
def chat(self, messages:
|
1428
|
+
def chat(self, messages: List[Message], **kwargs) -> dict[str, Any]:
|
1235
1429
|
"""Generate mock LLM response."""
|
1236
1430
|
last_user_message = ""
|
1431
|
+
has_images = False
|
1432
|
+
|
1237
1433
|
for msg in reversed(messages):
|
1238
1434
|
if msg.get("role") == "user":
|
1239
|
-
|
1435
|
+
content = msg.get("content", "")
|
1436
|
+
# Handle complex content with images
|
1437
|
+
if isinstance(content, list):
|
1438
|
+
text_parts = []
|
1439
|
+
for item in content:
|
1440
|
+
if item.get("type") == "text":
|
1441
|
+
text_parts.append(item.get("text", ""))
|
1442
|
+
elif item.get("type") == "image":
|
1443
|
+
has_images = True
|
1444
|
+
last_user_message = " ".join(text_parts)
|
1445
|
+
else:
|
1446
|
+
last_user_message = content
|
1240
1447
|
break
|
1241
1448
|
|
1242
1449
|
# Generate contextual mock response
|
1243
|
-
if
|
1450
|
+
if has_images:
|
1451
|
+
response_content = (
|
1452
|
+
"I can see the image(s) you've provided. [Mock vision response]"
|
1453
|
+
)
|
1454
|
+
elif "analyze" in last_user_message.lower():
|
1244
1455
|
response_content = "Based on the provided data and context, I can see several key patterns..."
|
1245
1456
|
elif "create" in last_user_message.lower():
|
1246
1457
|
response_content = "I'll help you create that. Based on the requirements..."
|
@@ -1258,10 +1469,7 @@ class MockProvider(UnifiedAIProvider):
|
|
1258
1469
|
"tool_calls": [],
|
1259
1470
|
"finish_reason": "stop",
|
1260
1471
|
"usage": {
|
1261
|
-
"prompt_tokens":
|
1262
|
-
" ".join(msg.get("content", "") for msg in messages)
|
1263
|
-
)
|
1264
|
-
// 4,
|
1472
|
+
"prompt_tokens": 100, # Mock value
|
1265
1473
|
"completion_tokens": len(response_content) // 4,
|
1266
1474
|
"total_tokens": 0, # Will be calculated
|
1267
1475
|
},
|
@@ -94,17 +94,17 @@ class EmbeddingGeneratorNode(Node):
|
|
94
94
|
... similarity_metric="cosine"
|
95
95
|
... )
|
96
96
|
|
97
|
-
Cached embedding with MCP integration
|
98
|
-
|
99
|
-
|
100
|
-
|
101
|
-
|
102
|
-
|
103
|
-
|
104
|
-
|
105
|
-
|
106
|
-
|
107
|
-
|
97
|
+
Cached embedding with MCP integration::
|
98
|
+
|
99
|
+
mcp_embedder = EmbeddingGeneratorNode()
|
100
|
+
result = mcp_embedder.run(
|
101
|
+
provider="azure",
|
102
|
+
model="text-embedding-3-small",
|
103
|
+
mcp_resource_uri="data://documents/knowledge_base.json",
|
104
|
+
operation="embed_mcp_resource",
|
105
|
+
cache_ttl=3600,
|
106
|
+
chunk_size=512
|
107
|
+
)
|
108
108
|
"""
|
109
109
|
|
110
110
|
def get_parameters(self) -> dict[str, NodeParameter]:
|
@@ -132,14 +132,29 @@ class IntelligentCacheNode(Node):
|
|
132
132
|
>>> assert "hit_rate" in stats["stats"]
|
133
133
|
"""
|
134
134
|
|
135
|
-
def __init__(self):
|
136
|
-
|
135
|
+
def __init__(self, name: str = None, id: str = None, **kwargs):
|
136
|
+
# Set name from parameters
|
137
|
+
if name:
|
138
|
+
self.name = name
|
139
|
+
elif id:
|
140
|
+
self.name = id
|
141
|
+
elif "name" in kwargs:
|
142
|
+
self.name = kwargs.pop("name")
|
143
|
+
elif "id" in kwargs:
|
144
|
+
self.name = kwargs.pop("id")
|
145
|
+
else:
|
146
|
+
self.name = self.__class__.__name__
|
147
|
+
|
148
|
+
# Initialize node attributes
|
137
149
|
self.cache = {}
|
138
150
|
self.semantic_index = defaultdict(list)
|
139
151
|
self.access_patterns = defaultdict(int)
|
140
152
|
self.cost_metrics = {}
|
141
153
|
self.query_abstractions = {}
|
142
154
|
|
155
|
+
# Call parent constructor
|
156
|
+
super().__init__(name=self.name)
|
157
|
+
|
143
158
|
def get_parameters(self) -> dict[str, NodeParameter]:
|
144
159
|
return {
|
145
160
|
"action": NodeParameter(
|
@@ -482,12 +497,27 @@ class MCPAgentNode(SelfOrganizingAgentNode):
|
|
482
497
|
>>> assert "task" in params
|
483
498
|
"""
|
484
499
|
|
485
|
-
def __init__(self):
|
486
|
-
|
500
|
+
def __init__(self, name: str = None, id: str = None, **kwargs):
|
501
|
+
# Set name from parameters
|
502
|
+
if name:
|
503
|
+
self.name = name
|
504
|
+
elif id:
|
505
|
+
self.name = id
|
506
|
+
elif "name" in kwargs:
|
507
|
+
self.name = kwargs.pop("name")
|
508
|
+
elif "id" in kwargs:
|
509
|
+
self.name = kwargs.pop("id")
|
510
|
+
else:
|
511
|
+
self.name = self.__class__.__name__
|
512
|
+
|
513
|
+
# Initialize node attributes
|
487
514
|
self.mcp_clients = {}
|
488
515
|
self.tool_registry = {}
|
489
516
|
self.call_history = deque(maxlen=100)
|
490
517
|
|
518
|
+
# Call parent constructor
|
519
|
+
super().__init__(name=self.name)
|
520
|
+
|
491
521
|
def get_parameters(self) -> dict[str, NodeParameter]:
|
492
522
|
params = super().get_parameters()
|
493
523
|
|
@@ -789,8 +819,20 @@ class QueryAnalysisNode(Node):
|
|
789
819
|
>>> assert simple["analysis"]["team_suggestion"]["suggested_size"] >= 1
|
790
820
|
"""
|
791
821
|
|
792
|
-
def __init__(self):
|
793
|
-
|
822
|
+
def __init__(self, name: str = None, id: str = None, **kwargs):
|
823
|
+
# Set name from parameters
|
824
|
+
if name:
|
825
|
+
self.name = name
|
826
|
+
elif id:
|
827
|
+
self.name = id
|
828
|
+
elif "name" in kwargs:
|
829
|
+
self.name = kwargs.pop("name")
|
830
|
+
elif "id" in kwargs:
|
831
|
+
self.name = kwargs.pop("id")
|
832
|
+
else:
|
833
|
+
self.name = self.__class__.__name__
|
834
|
+
|
835
|
+
# Initialize node attributes
|
794
836
|
self.query_patterns = {
|
795
837
|
"data_retrieval": {
|
796
838
|
"keywords": ["what is", "get", "fetch", "retrieve", "show me"],
|
@@ -824,6 +866,9 @@ class QueryAnalysisNode(Node):
|
|
824
866
|
},
|
825
867
|
}
|
826
868
|
|
869
|
+
# Call parent constructor
|
870
|
+
super().__init__(name=self.name)
|
871
|
+
|
827
872
|
def get_parameters(self) -> dict[str, NodeParameter]:
|
828
873
|
return {
|
829
874
|
"query": NodeParameter(
|
@@ -1148,11 +1193,26 @@ class OrchestrationManagerNode(Node):
|
|
1148
1193
|
>>> assert "max_iterations" in params
|
1149
1194
|
"""
|
1150
1195
|
|
1151
|
-
def __init__(self):
|
1152
|
-
|
1196
|
+
def __init__(self, name: str = None, id: str = None, **kwargs):
|
1197
|
+
# Set name from parameters
|
1198
|
+
if name:
|
1199
|
+
self.name = name
|
1200
|
+
elif id:
|
1201
|
+
self.name = id
|
1202
|
+
elif "name" in kwargs:
|
1203
|
+
self.name = kwargs.pop("name")
|
1204
|
+
elif "id" in kwargs:
|
1205
|
+
self.name = kwargs.pop("id")
|
1206
|
+
else:
|
1207
|
+
self.name = self.__class__.__name__
|
1208
|
+
|
1209
|
+
# Initialize node attributes
|
1153
1210
|
self.session_id = str(uuid.uuid4())
|
1154
1211
|
self.orchestration_history = deque(maxlen=50)
|
1155
1212
|
|
1213
|
+
# Call parent constructor
|
1214
|
+
super().__init__(name=self.name)
|
1215
|
+
|
1156
1216
|
def get_parameters(self) -> dict[str, NodeParameter]:
|
1157
1217
|
return {
|
1158
1218
|
"query": NodeParameter(
|
@@ -1304,9 +1364,22 @@ class OrchestrationManagerNode(Node):
|
|
1304
1364
|
|
1305
1365
|
# Phase 5: Final Processing
|
1306
1366
|
print("📊 Phase 5: Finalizing results...")
|
1367
|
+
|
1368
|
+
# Handle case where solution_history is empty (time limit reached early)
|
1369
|
+
if final_solution:
|
1370
|
+
solution_to_use = final_solution
|
1371
|
+
elif solution_history:
|
1372
|
+
solution_to_use = solution_history[-1]["solution"]
|
1373
|
+
else:
|
1374
|
+
solution_to_use = {
|
1375
|
+
"content": "No solution generated due to time constraints.",
|
1376
|
+
"confidence": 0.0,
|
1377
|
+
"reasoning": "Time limit reached before any solution could be generated.",
|
1378
|
+
}
|
1379
|
+
|
1307
1380
|
final_result = self._finalize_results(
|
1308
1381
|
query,
|
1309
|
-
|
1382
|
+
solution_to_use,
|
1310
1383
|
solution_history,
|
1311
1384
|
time.time() - start_time,
|
1312
1385
|
infrastructure,
|
@@ -1825,10 +1898,25 @@ class ConvergenceDetectorNode(Node):
|
|
1825
1898
|
>>> assert "Diminishing returns" in result2["reason"]
|
1826
1899
|
"""
|
1827
1900
|
|
1828
|
-
def __init__(self):
|
1829
|
-
|
1901
|
+
def __init__(self, name: str = None, id: str = None, **kwargs):
|
1902
|
+
# Set name from parameters
|
1903
|
+
if name:
|
1904
|
+
self.name = name
|
1905
|
+
elif id:
|
1906
|
+
self.name = id
|
1907
|
+
elif "name" in kwargs:
|
1908
|
+
self.name = kwargs.pop("name")
|
1909
|
+
elif "id" in kwargs:
|
1910
|
+
self.name = kwargs.pop("id")
|
1911
|
+
else:
|
1912
|
+
self.name = self.__class__.__name__
|
1913
|
+
|
1914
|
+
# Initialize node attributes
|
1830
1915
|
self.convergence_history = deque(maxlen=100)
|
1831
1916
|
|
1917
|
+
# Call parent constructor
|
1918
|
+
super().__init__(name=self.name)
|
1919
|
+
|
1832
1920
|
def get_parameters(self) -> dict[str, NodeParameter]:
|
1833
1921
|
return {
|
1834
1922
|
"solution_history": NodeParameter(
|