kailash 0.4.0__py3-none-any.whl → 0.4.1__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- kailash/__init__.py +4 -4
- kailash/nodes/__init__.py +2 -0
- kailash/nodes/admin/__init__.py +9 -2
- kailash/nodes/admin/audit_log.py +1 -1
- kailash/nodes/admin/security_event.py +7 -3
- kailash/nodes/ai/ai_providers.py +247 -40
- kailash/nodes/ai/llm_agent.py +29 -3
- kailash/nodes/ai/vision_utils.py +148 -0
- kailash/nodes/alerts/__init__.py +26 -0
- kailash/nodes/alerts/base.py +234 -0
- kailash/nodes/alerts/discord.py +499 -0
- kailash/nodes/data/streaming.py +8 -8
- kailash/nodes/security/audit_log.py +48 -36
- kailash/nodes/security/security_event.py +73 -72
- kailash/security.py +1 -1
- {kailash-0.4.0.dist-info → kailash-0.4.1.dist-info}/METADATA +4 -1
- {kailash-0.4.0.dist-info → kailash-0.4.1.dist-info}/RECORD +21 -17
- {kailash-0.4.0.dist-info → kailash-0.4.1.dist-info}/WHEEL +0 -0
- {kailash-0.4.0.dist-info → kailash-0.4.1.dist-info}/entry_points.txt +0 -0
- {kailash-0.4.0.dist-info → kailash-0.4.1.dist-info}/licenses/LICENSE +0 -0
- {kailash-0.4.0.dist-info → kailash-0.4.1.dist-info}/top_level.txt +0 -0
kailash/__init__.py
CHANGED
@@ -3,9 +3,9 @@
|
|
3
3
|
The Kailash SDK provides a comprehensive framework for creating nodes and workflows
|
4
4
|
that align with container-node architecture while allowing rapid prototyping.
|
5
5
|
|
6
|
-
New in v0.4.
|
7
|
-
|
8
|
-
|
6
|
+
New in v0.4.1: Production-ready Alert Nodes with Discord integration and
|
7
|
+
AI Provider Vision Support. Rich Discord alerts with embeds, rate limiting,
|
8
|
+
and universal vision capabilities across OpenAI, Anthropic, and Ollama providers.
|
9
9
|
"""
|
10
10
|
|
11
11
|
from kailash.nodes.base import Node, NodeMetadata, NodeParameter
|
@@ -34,7 +34,7 @@ except ImportError:
|
|
34
34
|
# For backward compatibility
|
35
35
|
WorkflowGraph = Workflow
|
36
36
|
|
37
|
-
__version__ = "0.4.
|
37
|
+
__version__ = "0.4.1"
|
38
38
|
|
39
39
|
__all__ = [
|
40
40
|
# Core workflow components
|
kailash/nodes/__init__.py
CHANGED
kailash/nodes/admin/__init__.py
CHANGED
@@ -19,17 +19,24 @@ Core Components:
|
|
19
19
|
- SecurityEventNode: Security incident tracking
|
20
20
|
"""
|
21
21
|
|
22
|
-
from .audit_log import
|
22
|
+
from .audit_log import EnterpriseAuditLogNode
|
23
23
|
from .permission_check import PermissionCheckNode
|
24
24
|
from .role_management import RoleManagementNode
|
25
|
-
from .security_event import
|
25
|
+
from .security_event import EnterpriseSecurityEventNode
|
26
26
|
from .user_management import UserManagementNode
|
27
27
|
|
28
|
+
# For backward compatibility, expose both old and new names
|
29
|
+
AuditLogNode = EnterpriseAuditLogNode
|
30
|
+
SecurityEventNode = EnterpriseSecurityEventNode
|
31
|
+
|
28
32
|
__all__ = [
|
29
33
|
# Core admin nodes
|
30
34
|
"UserManagementNode",
|
31
35
|
"RoleManagementNode",
|
32
36
|
"PermissionCheckNode",
|
37
|
+
"EnterpriseAuditLogNode",
|
38
|
+
"EnterpriseSecurityEventNode",
|
39
|
+
# Backward compatibility aliases
|
33
40
|
"AuditLogNode",
|
34
41
|
"SecurityEventNode",
|
35
42
|
]
|
kailash/nodes/admin/audit_log.py
CHANGED
@@ -127,7 +127,7 @@ class AuditEvent:
|
|
127
127
|
|
128
128
|
|
129
129
|
@register_node()
|
130
|
-
class
|
130
|
+
class EnterpriseAuditLogNode(Node):
|
131
131
|
"""Enterprise audit logging node with comprehensive compliance features.
|
132
132
|
|
133
133
|
This node provides comprehensive audit logging capabilities including:
|
@@ -24,7 +24,11 @@ from enum import Enum
|
|
24
24
|
from typing import Any, Dict, List, Optional, Tuple
|
25
25
|
|
26
26
|
from kailash.access_control import UserContext
|
27
|
-
from kailash.nodes.admin.audit_log import
|
27
|
+
from kailash.nodes.admin.audit_log import (
|
28
|
+
AuditEventType,
|
29
|
+
AuditSeverity,
|
30
|
+
EnterpriseAuditLogNode,
|
31
|
+
)
|
28
32
|
from kailash.nodes.base import Node, NodeParameter, register_node
|
29
33
|
from kailash.nodes.data import AsyncSQLDatabaseNode
|
30
34
|
from kailash.sdk_exceptions import NodeExecutionError, NodeValidationError
|
@@ -169,7 +173,7 @@ class SecurityIncident:
|
|
169
173
|
|
170
174
|
|
171
175
|
@register_node()
|
172
|
-
class
|
176
|
+
class EnterpriseSecurityEventNode(Node):
|
173
177
|
"""Enterprise security event monitoring and incident response node.
|
174
178
|
|
175
179
|
This node provides comprehensive security event processing including:
|
@@ -412,7 +416,7 @@ class SecurityEventNode(Node):
|
|
412
416
|
self._db_node = AsyncSQLDatabaseNode(name="security_event_db", **db_config)
|
413
417
|
|
414
418
|
# Initialize audit logging node
|
415
|
-
self._audit_node =
|
419
|
+
self._audit_node = EnterpriseAuditLogNode(database_config=db_config)
|
416
420
|
|
417
421
|
def _create_event(self, inputs: Dict[str, Any]) -> Dict[str, Any]:
|
418
422
|
"""Create a new security event with risk scoring."""
|
kailash/nodes/ai/ai_providers.py
CHANGED
@@ -8,7 +8,11 @@ separation between LLM and embedding capabilities.
|
|
8
8
|
|
9
9
|
import hashlib
|
10
10
|
from abc import ABC, abstractmethod
|
11
|
-
from typing import Any
|
11
|
+
from typing import Any, Dict, List, Union
|
12
|
+
|
13
|
+
# Type definitions for flexible message content
|
14
|
+
MessageContent = Union[str, List[Dict[str, Any]]]
|
15
|
+
Message = Dict[str, Union[str, MessageContent]]
|
12
16
|
|
13
17
|
|
14
18
|
class BaseAIProvider(ABC):
|
@@ -205,12 +209,14 @@ class LLMProvider(BaseAIProvider):
|
|
205
209
|
self._capabilities["chat"] = True
|
206
210
|
|
207
211
|
@abstractmethod
|
208
|
-
def chat(self, messages:
|
212
|
+
def chat(self, messages: List[Message], **kwargs) -> dict[str, Any]:
|
209
213
|
"""
|
210
214
|
Generate a chat completion using the provider's LLM.
|
211
215
|
|
212
216
|
Args:
|
213
217
|
messages: Conversation messages in OpenAI format
|
218
|
+
Can be simple: [{"role": "user", "content": "text"}]
|
219
|
+
Or complex: [{"role": "user", "content": [{"type": "text", "text": "..."}, {"type": "image", "path": "..."}]}]
|
214
220
|
**kwargs: Provider-specific parameters
|
215
221
|
|
216
222
|
Returns:
|
@@ -391,7 +397,7 @@ class OllamaProvider(UnifiedAIProvider):
|
|
391
397
|
|
392
398
|
return self._available
|
393
399
|
|
394
|
-
def chat(self, messages:
|
400
|
+
def chat(self, messages: List[Message], **kwargs) -> dict[str, Any]:
|
395
401
|
"""Generate a chat completion using Ollama.
|
396
402
|
|
397
403
|
Args:
|
@@ -435,8 +441,50 @@ class OllamaProvider(UnifiedAIProvider):
|
|
435
441
|
# Remove None values
|
436
442
|
options = {k: v for k, v in options.items() if v is not None}
|
437
443
|
|
444
|
+
# Process messages for vision content
|
445
|
+
processed_messages = []
|
446
|
+
|
447
|
+
for msg in messages:
|
448
|
+
if isinstance(msg.get("content"), list):
|
449
|
+
# Complex content with potential images
|
450
|
+
text_parts = []
|
451
|
+
images = []
|
452
|
+
|
453
|
+
for item in msg["content"]:
|
454
|
+
if item["type"] == "text":
|
455
|
+
text_parts.append(item["text"])
|
456
|
+
elif item["type"] == "image":
|
457
|
+
# Lazy load vision utilities
|
458
|
+
from .vision_utils import encode_image
|
459
|
+
|
460
|
+
if "path" in item:
|
461
|
+
# For file paths, read the file directly
|
462
|
+
with open(item["path"], "rb") as f:
|
463
|
+
images.append(f.read())
|
464
|
+
else:
|
465
|
+
# For base64, decode it to bytes
|
466
|
+
import base64
|
467
|
+
|
468
|
+
base64_data = item.get("base64", "")
|
469
|
+
images.append(base64.b64decode(base64_data))
|
470
|
+
|
471
|
+
# Ollama expects images as part of the message
|
472
|
+
message_dict = {
|
473
|
+
"role": msg["role"],
|
474
|
+
"content": " ".join(text_parts),
|
475
|
+
}
|
476
|
+
if images:
|
477
|
+
message_dict["images"] = images
|
478
|
+
|
479
|
+
processed_messages.append(message_dict)
|
480
|
+
else:
|
481
|
+
# Simple string content (backward compatible)
|
482
|
+
processed_messages.append(msg)
|
483
|
+
|
438
484
|
# Call Ollama
|
439
|
-
response = ollama.chat(
|
485
|
+
response = ollama.chat(
|
486
|
+
model=model, messages=processed_messages, options=options
|
487
|
+
)
|
440
488
|
|
441
489
|
# Format response to match standard structure
|
442
490
|
return {
|
@@ -545,11 +593,18 @@ class OpenAIProvider(UnifiedAIProvider):
|
|
545
593
|
- Install openai package: `pip install openai`
|
546
594
|
|
547
595
|
Supported LLM models:
|
548
|
-
-
|
549
|
-
-
|
550
|
-
|
551
|
-
|
552
|
-
- gpt-3.5-turbo
|
596
|
+
- o4-mini (latest, vision support, recommended)
|
597
|
+
- o3 (reasoning model)
|
598
|
+
|
599
|
+
Note: This provider uses max_completion_tokens parameter compatible with
|
600
|
+
latest OpenAI models. Older models (gpt-4, gpt-3.5-turbo) are not supported.
|
601
|
+
|
602
|
+
Generation Config Parameters:
|
603
|
+
- max_completion_tokens (int): Maximum tokens to generate (recommended)
|
604
|
+
- max_tokens (int): Deprecated, use max_completion_tokens instead
|
605
|
+
- temperature (float): Sampling temperature (0-2)
|
606
|
+
- top_p (float): Nucleus sampling probability
|
607
|
+
- Other standard OpenAI parameters
|
553
608
|
|
554
609
|
Supported embedding models:
|
555
610
|
- text-embedding-3-large (3072 dimensions, configurable)
|
@@ -572,19 +627,22 @@ class OpenAIProvider(UnifiedAIProvider):
|
|
572
627
|
|
573
628
|
return self._available
|
574
629
|
|
575
|
-
def chat(self, messages:
|
630
|
+
def chat(self, messages: List[Message], **kwargs) -> dict[str, Any]:
|
576
631
|
"""
|
577
632
|
Generate a chat completion using OpenAI.
|
578
633
|
|
579
634
|
Supported kwargs:
|
580
|
-
- model (str): OpenAI model name (default: "
|
581
|
-
- generation_config (dict): Generation parameters
|
635
|
+
- model (str): OpenAI model name (default: "o4-mini")
|
636
|
+
- generation_config (dict): Generation parameters including:
|
637
|
+
- max_completion_tokens (int): Max tokens to generate (recommended)
|
638
|
+
- max_tokens (int): Deprecated, use max_completion_tokens
|
639
|
+
- temperature, top_p, frequency_penalty, presence_penalty, etc.
|
582
640
|
- tools (List[Dict]): Function/tool definitions for function calling
|
583
641
|
"""
|
584
642
|
try:
|
585
643
|
import openai
|
586
644
|
|
587
|
-
model = kwargs.get("model", "
|
645
|
+
model = kwargs.get("model", "o4-mini")
|
588
646
|
generation_config = kwargs.get("generation_config", {})
|
589
647
|
tools = kwargs.get("tools", [])
|
590
648
|
|
@@ -592,13 +650,86 @@ class OpenAIProvider(UnifiedAIProvider):
|
|
592
650
|
if self._client is None:
|
593
651
|
self._client = openai.OpenAI()
|
594
652
|
|
653
|
+
# Process messages for vision content
|
654
|
+
processed_messages = []
|
655
|
+
for msg in messages:
|
656
|
+
if isinstance(msg.get("content"), list):
|
657
|
+
# Complex content with potential images
|
658
|
+
processed_content = []
|
659
|
+
for item in msg["content"]:
|
660
|
+
if item.get("type") == "text":
|
661
|
+
processed_content.append(
|
662
|
+
{"type": "text", "text": item.get("text", "")}
|
663
|
+
)
|
664
|
+
elif item.get("type") == "image":
|
665
|
+
# Lazy load vision utilities
|
666
|
+
from .vision_utils import (
|
667
|
+
encode_image,
|
668
|
+
get_media_type,
|
669
|
+
validate_image_size,
|
670
|
+
)
|
671
|
+
|
672
|
+
if "path" in item:
|
673
|
+
# Validate image size
|
674
|
+
is_valid, error_msg = validate_image_size(item["path"])
|
675
|
+
if not is_valid:
|
676
|
+
raise ValueError(
|
677
|
+
f"Image validation failed: {error_msg}"
|
678
|
+
)
|
679
|
+
|
680
|
+
base64_image = encode_image(item["path"])
|
681
|
+
media_type = get_media_type(item["path"])
|
682
|
+
elif "base64" in item:
|
683
|
+
base64_image = item["base64"]
|
684
|
+
media_type = item.get("media_type", "image/jpeg")
|
685
|
+
else:
|
686
|
+
raise ValueError(
|
687
|
+
"Image item must have either 'path' or 'base64' field"
|
688
|
+
)
|
689
|
+
|
690
|
+
processed_content.append(
|
691
|
+
{
|
692
|
+
"type": "image_url",
|
693
|
+
"image_url": {
|
694
|
+
"url": f"data:{media_type};base64,{base64_image}"
|
695
|
+
},
|
696
|
+
}
|
697
|
+
)
|
698
|
+
|
699
|
+
processed_messages.append(
|
700
|
+
{"role": msg.get("role", "user"), "content": processed_content}
|
701
|
+
)
|
702
|
+
else:
|
703
|
+
# Simple string content (backward compatible)
|
704
|
+
processed_messages.append(msg)
|
705
|
+
|
706
|
+
# Handle max tokens parameter - support both old and new names
|
707
|
+
max_completion = generation_config.get(
|
708
|
+
"max_completion_tokens"
|
709
|
+
) or generation_config.get("max_tokens", 500)
|
710
|
+
|
711
|
+
# Show deprecation warning if using old parameter
|
712
|
+
# TODO: remove the max_tokens in the future.
|
713
|
+
if (
|
714
|
+
"max_tokens" in generation_config
|
715
|
+
and "max_completion_tokens" not in generation_config
|
716
|
+
):
|
717
|
+
import warnings
|
718
|
+
|
719
|
+
warnings.warn(
|
720
|
+
"'max_tokens' is deprecated and will be removed in v0.5.0. "
|
721
|
+
"Please use 'max_completion_tokens' instead.",
|
722
|
+
DeprecationWarning,
|
723
|
+
stacklevel=3,
|
724
|
+
)
|
725
|
+
|
595
726
|
# Prepare request
|
596
727
|
request_params = {
|
597
728
|
"model": model,
|
598
|
-
"messages":
|
599
|
-
"temperature": generation_config.get("temperature", 0
|
600
|
-
"
|
601
|
-
"top_p": generation_config.get("top_p", 0
|
729
|
+
"messages": processed_messages,
|
730
|
+
"temperature": generation_config.get("temperature", 1.0),
|
731
|
+
"max_completion_tokens": max_completion, # Always use new parameter
|
732
|
+
"top_p": generation_config.get("top_p", 1.0),
|
602
733
|
"frequency_penalty": generation_config.get("frequency_penalty"),
|
603
734
|
"presence_penalty": generation_config.get("presence_penalty"),
|
604
735
|
"stop": generation_config.get("stop"),
|
@@ -649,6 +780,15 @@ class OpenAIProvider(UnifiedAIProvider):
|
|
649
780
|
raise RuntimeError(
|
650
781
|
"OpenAI library not installed. Install with: pip install openai"
|
651
782
|
)
|
783
|
+
except openai.BadRequestError as e:
|
784
|
+
# Provide helpful error message for unsupported models or parameters
|
785
|
+
if "max_tokens" in str(e):
|
786
|
+
raise RuntimeError(
|
787
|
+
"This OpenAI provider requires models that support max_completion_tokens. "
|
788
|
+
"Please use o4-mini, o3 "
|
789
|
+
"Older models like gpt-4o or gpt-3.5-turbo are not supported."
|
790
|
+
)
|
791
|
+
raise RuntimeError(f"OpenAI API error: {str(e)}")
|
652
792
|
except Exception as e:
|
653
793
|
raise RuntimeError(f"OpenAI error: {str(e)}")
|
654
794
|
|
@@ -772,7 +912,7 @@ class AnthropicProvider(LLMProvider):
|
|
772
912
|
|
773
913
|
return self._available
|
774
914
|
|
775
|
-
def chat(self, messages:
|
915
|
+
def chat(self, messages: List[Message], **kwargs) -> dict[str, Any]:
|
776
916
|
"""Generate a chat completion using Anthropic."""
|
777
917
|
try:
|
778
918
|
import anthropic
|
@@ -790,22 +930,75 @@ class AnthropicProvider(LLMProvider):
|
|
790
930
|
|
791
931
|
for msg in messages:
|
792
932
|
if msg["role"] == "system":
|
793
|
-
|
933
|
+
# System messages are always text
|
934
|
+
system_message = (
|
935
|
+
msg["content"]
|
936
|
+
if isinstance(msg["content"], str)
|
937
|
+
else str(msg["content"])
|
938
|
+
)
|
794
939
|
else:
|
795
|
-
|
796
|
-
|
797
|
-
|
798
|
-
|
799
|
-
|
800
|
-
|
801
|
-
|
802
|
-
|
803
|
-
|
804
|
-
|
805
|
-
|
806
|
-
|
807
|
-
|
808
|
-
|
940
|
+
# Process potentially complex content
|
941
|
+
if isinstance(msg.get("content"), list):
|
942
|
+
# Complex content with potential images
|
943
|
+
content_parts = []
|
944
|
+
|
945
|
+
for item in msg["content"]:
|
946
|
+
if item["type"] == "text":
|
947
|
+
content_parts.append(
|
948
|
+
{"type": "text", "text": item["text"]}
|
949
|
+
)
|
950
|
+
elif item["type"] == "image":
|
951
|
+
# Lazy load vision utilities
|
952
|
+
from .vision_utils import encode_image, get_media_type
|
953
|
+
|
954
|
+
if "path" in item:
|
955
|
+
base64_image = encode_image(item["path"])
|
956
|
+
media_type = get_media_type(item["path"])
|
957
|
+
else:
|
958
|
+
base64_image = item.get("base64", "")
|
959
|
+
media_type = item.get("media_type", "image/jpeg")
|
960
|
+
|
961
|
+
content_parts.append(
|
962
|
+
{
|
963
|
+
"type": "image",
|
964
|
+
"source": {
|
965
|
+
"type": "base64",
|
966
|
+
"media_type": media_type,
|
967
|
+
"data": base64_image,
|
968
|
+
},
|
969
|
+
}
|
970
|
+
)
|
971
|
+
|
972
|
+
user_messages.append(
|
973
|
+
{"role": msg["role"], "content": content_parts}
|
974
|
+
)
|
975
|
+
else:
|
976
|
+
# Simple string content (backward compatible)
|
977
|
+
user_messages.append(msg)
|
978
|
+
|
979
|
+
# Call Anthropic - build kwargs to avoid passing None values
|
980
|
+
create_kwargs = {
|
981
|
+
"model": model,
|
982
|
+
"messages": user_messages,
|
983
|
+
"max_tokens": generation_config.get("max_tokens", 500),
|
984
|
+
"temperature": generation_config.get("temperature", 0.7),
|
985
|
+
}
|
986
|
+
|
987
|
+
# Only add optional parameters if they have valid values
|
988
|
+
if system_message is not None:
|
989
|
+
create_kwargs["system"] = system_message
|
990
|
+
if generation_config.get("top_p") is not None:
|
991
|
+
create_kwargs["top_p"] = generation_config.get("top_p")
|
992
|
+
if generation_config.get("top_k") is not None:
|
993
|
+
create_kwargs["top_k"] = generation_config.get("top_k")
|
994
|
+
if generation_config.get("stop_sequences") is not None:
|
995
|
+
create_kwargs["stop_sequences"] = generation_config.get(
|
996
|
+
"stop_sequences"
|
997
|
+
)
|
998
|
+
if generation_config.get("metadata") is not None:
|
999
|
+
create_kwargs["metadata"] = generation_config.get("metadata")
|
1000
|
+
|
1001
|
+
response = self._client.messages.create(**create_kwargs)
|
809
1002
|
|
810
1003
|
# Format response
|
811
1004
|
return {
|
@@ -1232,16 +1425,33 @@ class MockProvider(UnifiedAIProvider):
|
|
1232
1425
|
"""Mock provider is always available."""
|
1233
1426
|
return True
|
1234
1427
|
|
1235
|
-
def chat(self, messages:
|
1428
|
+
def chat(self, messages: List[Message], **kwargs) -> dict[str, Any]:
|
1236
1429
|
"""Generate mock LLM response."""
|
1237
1430
|
last_user_message = ""
|
1431
|
+
has_images = False
|
1432
|
+
|
1238
1433
|
for msg in reversed(messages):
|
1239
1434
|
if msg.get("role") == "user":
|
1240
|
-
|
1435
|
+
content = msg.get("content", "")
|
1436
|
+
# Handle complex content with images
|
1437
|
+
if isinstance(content, list):
|
1438
|
+
text_parts = []
|
1439
|
+
for item in content:
|
1440
|
+
if item.get("type") == "text":
|
1441
|
+
text_parts.append(item.get("text", ""))
|
1442
|
+
elif item.get("type") == "image":
|
1443
|
+
has_images = True
|
1444
|
+
last_user_message = " ".join(text_parts)
|
1445
|
+
else:
|
1446
|
+
last_user_message = content
|
1241
1447
|
break
|
1242
1448
|
|
1243
1449
|
# Generate contextual mock response
|
1244
|
-
if
|
1450
|
+
if has_images:
|
1451
|
+
response_content = (
|
1452
|
+
"I can see the image(s) you've provided. [Mock vision response]"
|
1453
|
+
)
|
1454
|
+
elif "analyze" in last_user_message.lower():
|
1245
1455
|
response_content = "Based on the provided data and context, I can see several key patterns..."
|
1246
1456
|
elif "create" in last_user_message.lower():
|
1247
1457
|
response_content = "I'll help you create that. Based on the requirements..."
|
@@ -1259,10 +1469,7 @@ class MockProvider(UnifiedAIProvider):
|
|
1259
1469
|
"tool_calls": [],
|
1260
1470
|
"finish_reason": "stop",
|
1261
1471
|
"usage": {
|
1262
|
-
"prompt_tokens":
|
1263
|
-
" ".join(msg.get("content", "") for msg in messages)
|
1264
|
-
)
|
1265
|
-
// 4,
|
1472
|
+
"prompt_tokens": 100, # Mock value
|
1266
1473
|
"completion_tokens": len(response_content) // 4,
|
1267
1474
|
"total_tokens": 0, # Will be calculated
|
1268
1475
|
},
|
kailash/nodes/ai/llm_agent.py
CHANGED
@@ -1412,13 +1412,28 @@ class LLMAgentNode(Node):
|
|
1412
1412
|
) -> dict[str, Any]:
|
1413
1413
|
"""Generate mock LLM response for testing."""
|
1414
1414
|
last_user_message = ""
|
1415
|
+
has_images = False
|
1416
|
+
|
1415
1417
|
for msg in reversed(messages):
|
1416
1418
|
if msg.get("role") == "user":
|
1417
|
-
|
1419
|
+
content = msg.get("content", "")
|
1420
|
+
# Handle complex content with images
|
1421
|
+
if isinstance(content, list):
|
1422
|
+
text_parts = []
|
1423
|
+
for item in content:
|
1424
|
+
if item.get("type") == "text":
|
1425
|
+
text_parts.append(item.get("text", ""))
|
1426
|
+
elif item.get("type") == "image":
|
1427
|
+
has_images = True
|
1428
|
+
last_user_message = " ".join(text_parts)
|
1429
|
+
else:
|
1430
|
+
last_user_message = content
|
1418
1431
|
break
|
1419
1432
|
|
1420
1433
|
# Generate contextual mock response
|
1421
|
-
if
|
1434
|
+
if has_images:
|
1435
|
+
response_content = "I can see the image(s) you've provided. Based on my analysis, [Mock vision response for testing]"
|
1436
|
+
elif "analyze" in last_user_message.lower():
|
1422
1437
|
response_content = "Based on the provided data and context, I can see several key patterns: 1) Customer engagement has increased by 15% this quarter, 2) Product A shows the highest conversion rate, and 3) There are opportunities for improvement in the onboarding process."
|
1423
1438
|
elif (
|
1424
1439
|
"create" in last_user_message.lower()
|
@@ -1458,7 +1473,18 @@ class LLMAgentNode(Node):
|
|
1458
1473
|
"finish_reason": "stop" if not tool_calls else "tool_calls",
|
1459
1474
|
"usage": {
|
1460
1475
|
"prompt_tokens": len(
|
1461
|
-
" ".join(
|
1476
|
+
" ".join(
|
1477
|
+
(
|
1478
|
+
msg.get("content", "")
|
1479
|
+
if isinstance(msg.get("content"), str)
|
1480
|
+
else " ".join(
|
1481
|
+
item.get("text", "")
|
1482
|
+
for item in msg.get("content", [])
|
1483
|
+
if item.get("type") == "text"
|
1484
|
+
)
|
1485
|
+
)
|
1486
|
+
for msg in messages
|
1487
|
+
)
|
1462
1488
|
)
|
1463
1489
|
// 4,
|
1464
1490
|
"completion_tokens": len(response_content) // 4,
|