kailash 0.3.0__py3-none-any.whl → 0.3.1__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (114) hide show
  1. kailash/__init__.py +1 -1
  2. kailash/access_control.py +40 -39
  3. kailash/api/auth.py +26 -32
  4. kailash/api/custom_nodes.py +29 -29
  5. kailash/api/custom_nodes_secure.py +35 -35
  6. kailash/api/database.py +17 -17
  7. kailash/api/gateway.py +19 -19
  8. kailash/api/mcp_integration.py +24 -23
  9. kailash/api/studio.py +45 -45
  10. kailash/api/workflow_api.py +8 -8
  11. kailash/cli/commands.py +5 -8
  12. kailash/manifest.py +42 -42
  13. kailash/mcp/__init__.py +1 -1
  14. kailash/mcp/ai_registry_server.py +20 -20
  15. kailash/mcp/client.py +9 -11
  16. kailash/mcp/client_new.py +10 -10
  17. kailash/mcp/server.py +1 -2
  18. kailash/mcp/server_enhanced.py +449 -0
  19. kailash/mcp/servers/ai_registry.py +6 -6
  20. kailash/mcp/utils/__init__.py +31 -0
  21. kailash/mcp/utils/cache.py +267 -0
  22. kailash/mcp/utils/config.py +263 -0
  23. kailash/mcp/utils/formatters.py +293 -0
  24. kailash/mcp/utils/metrics.py +418 -0
  25. kailash/nodes/ai/agents.py +9 -9
  26. kailash/nodes/ai/ai_providers.py +33 -34
  27. kailash/nodes/ai/embedding_generator.py +31 -32
  28. kailash/nodes/ai/intelligent_agent_orchestrator.py +62 -66
  29. kailash/nodes/ai/iterative_llm_agent.py +48 -48
  30. kailash/nodes/ai/llm_agent.py +32 -33
  31. kailash/nodes/ai/models.py +13 -13
  32. kailash/nodes/ai/self_organizing.py +44 -44
  33. kailash/nodes/api/auth.py +11 -11
  34. kailash/nodes/api/graphql.py +13 -13
  35. kailash/nodes/api/http.py +19 -19
  36. kailash/nodes/api/monitoring.py +20 -20
  37. kailash/nodes/api/rate_limiting.py +9 -13
  38. kailash/nodes/api/rest.py +29 -29
  39. kailash/nodes/api/security.py +44 -47
  40. kailash/nodes/base.py +21 -23
  41. kailash/nodes/base_async.py +7 -7
  42. kailash/nodes/base_cycle_aware.py +12 -12
  43. kailash/nodes/base_with_acl.py +5 -5
  44. kailash/nodes/code/python.py +56 -55
  45. kailash/nodes/data/directory.py +6 -6
  46. kailash/nodes/data/event_generation.py +10 -10
  47. kailash/nodes/data/file_discovery.py +28 -31
  48. kailash/nodes/data/readers.py +8 -8
  49. kailash/nodes/data/retrieval.py +10 -10
  50. kailash/nodes/data/sharepoint_graph.py +17 -17
  51. kailash/nodes/data/sources.py +5 -5
  52. kailash/nodes/data/sql.py +13 -13
  53. kailash/nodes/data/streaming.py +25 -25
  54. kailash/nodes/data/vector_db.py +22 -22
  55. kailash/nodes/data/writers.py +7 -7
  56. kailash/nodes/logic/async_operations.py +17 -17
  57. kailash/nodes/logic/convergence.py +11 -11
  58. kailash/nodes/logic/loop.py +4 -4
  59. kailash/nodes/logic/operations.py +11 -11
  60. kailash/nodes/logic/workflow.py +8 -9
  61. kailash/nodes/mixins/mcp.py +17 -17
  62. kailash/nodes/mixins.py +8 -10
  63. kailash/nodes/transform/chunkers.py +3 -3
  64. kailash/nodes/transform/formatters.py +7 -7
  65. kailash/nodes/transform/processors.py +10 -10
  66. kailash/runtime/access_controlled.py +18 -18
  67. kailash/runtime/async_local.py +17 -19
  68. kailash/runtime/docker.py +20 -22
  69. kailash/runtime/local.py +16 -16
  70. kailash/runtime/parallel.py +23 -23
  71. kailash/runtime/parallel_cyclic.py +27 -27
  72. kailash/runtime/runner.py +6 -6
  73. kailash/runtime/testing.py +20 -20
  74. kailash/sdk_exceptions.py +0 -58
  75. kailash/security.py +14 -26
  76. kailash/tracking/manager.py +38 -38
  77. kailash/tracking/metrics_collector.py +15 -14
  78. kailash/tracking/models.py +53 -53
  79. kailash/tracking/storage/base.py +7 -17
  80. kailash/tracking/storage/database.py +22 -23
  81. kailash/tracking/storage/filesystem.py +38 -40
  82. kailash/utils/export.py +21 -21
  83. kailash/utils/templates.py +2 -3
  84. kailash/visualization/api.py +30 -34
  85. kailash/visualization/dashboard.py +17 -17
  86. kailash/visualization/performance.py +16 -16
  87. kailash/visualization/reports.py +25 -27
  88. kailash/workflow/builder.py +8 -8
  89. kailash/workflow/convergence.py +13 -12
  90. kailash/workflow/cycle_analyzer.py +30 -32
  91. kailash/workflow/cycle_builder.py +12 -12
  92. kailash/workflow/cycle_config.py +16 -15
  93. kailash/workflow/cycle_debugger.py +40 -40
  94. kailash/workflow/cycle_exceptions.py +29 -29
  95. kailash/workflow/cycle_profiler.py +21 -21
  96. kailash/workflow/cycle_state.py +20 -22
  97. kailash/workflow/cyclic_runner.py +44 -44
  98. kailash/workflow/graph.py +40 -40
  99. kailash/workflow/mermaid_visualizer.py +9 -11
  100. kailash/workflow/migration.py +22 -22
  101. kailash/workflow/mock_registry.py +6 -6
  102. kailash/workflow/runner.py +9 -9
  103. kailash/workflow/safety.py +12 -13
  104. kailash/workflow/state.py +8 -11
  105. kailash/workflow/templates.py +19 -19
  106. kailash/workflow/validation.py +14 -14
  107. kailash/workflow/visualization.py +22 -22
  108. {kailash-0.3.0.dist-info → kailash-0.3.1.dist-info}/METADATA +53 -5
  109. kailash-0.3.1.dist-info/RECORD +136 -0
  110. kailash-0.3.0.dist-info/RECORD +0 -130
  111. {kailash-0.3.0.dist-info → kailash-0.3.1.dist-info}/WHEEL +0 -0
  112. {kailash-0.3.0.dist-info → kailash-0.3.1.dist-info}/entry_points.txt +0 -0
  113. {kailash-0.3.0.dist-info → kailash-0.3.1.dist-info}/licenses/LICENSE +0 -0
  114. {kailash-0.3.0.dist-info → kailash-0.3.1.dist-info}/top_level.txt +0 -0
@@ -1,6 +1,6 @@
1
1
  """AI agent nodes for the Kailash SDK."""
2
2
 
3
- from typing import Any, Dict
3
+ from typing import Any
4
4
 
5
5
  from kailash.nodes.base import Node, NodeParameter, register_node
6
6
 
@@ -66,7 +66,7 @@ class ChatAgent(Node):
66
66
  >>> assert "temperature" in params
67
67
  """
68
68
 
69
- def get_parameters(self) -> Dict[str, NodeParameter]:
69
+ def get_parameters(self) -> dict[str, NodeParameter]:
70
70
  return {
71
71
  "messages": NodeParameter(
72
72
  name="messages",
@@ -104,7 +104,7 @@ class ChatAgent(Node):
104
104
  ),
105
105
  }
106
106
 
107
- def run(self, **kwargs) -> Dict[str, Any]:
107
+ def run(self, **kwargs) -> dict[str, Any]:
108
108
  messages = kwargs["messages"]
109
109
  model = kwargs.get("model", "default")
110
110
  temperature = kwargs.get("temperature", 0.7)
@@ -213,7 +213,7 @@ class RetrievalAgent(Node):
213
213
  >>> assert "top_k" in params
214
214
  """
215
215
 
216
- def get_parameters(self) -> Dict[str, NodeParameter]:
216
+ def get_parameters(self) -> dict[str, NodeParameter]:
217
217
  return {
218
218
  "query": NodeParameter(
219
219
  name="query", type=str, required=True, description="Query for retrieval"
@@ -247,7 +247,7 @@ class RetrievalAgent(Node):
247
247
  ),
248
248
  }
249
249
 
250
- def run(self, **kwargs) -> Dict[str, Any]:
250
+ def run(self, **kwargs) -> dict[str, Any]:
251
251
  query = kwargs["query"]
252
252
  documents = kwargs["documents"]
253
253
  top_k = kwargs.get("top_k", 5)
@@ -301,7 +301,7 @@ class RetrievalAgent(Node):
301
301
  class FunctionCallingAgent(Node):
302
302
  """Agent that can call functions based on input."""
303
303
 
304
- def get_parameters(self) -> Dict[str, NodeParameter]:
304
+ def get_parameters(self) -> dict[str, NodeParameter]:
305
305
  return {
306
306
  "query": NodeParameter(
307
307
  name="query", type=str, required=True, description="User query"
@@ -328,7 +328,7 @@ class FunctionCallingAgent(Node):
328
328
  ),
329
329
  }
330
330
 
331
- def run(self, **kwargs) -> Dict[str, Any]:
331
+ def run(self, **kwargs) -> dict[str, Any]:
332
332
  query = kwargs["query"]
333
333
  available_functions = kwargs["available_functions"]
334
334
  context = kwargs.get("context", {})
@@ -403,7 +403,7 @@ class FunctionCallingAgent(Node):
403
403
  class PlanningAgent(Node):
404
404
  """Agent that creates execution plans."""
405
405
 
406
- def get_parameters(self) -> Dict[str, NodeParameter]:
406
+ def get_parameters(self) -> dict[str, NodeParameter]:
407
407
  return {
408
408
  "goal": NodeParameter(
409
409
  name="goal", type=str, required=True, description="Goal to achieve"
@@ -430,7 +430,7 @@ class PlanningAgent(Node):
430
430
  ),
431
431
  }
432
432
 
433
- def run(self, **kwargs) -> Dict[str, Any]:
433
+ def run(self, **kwargs) -> dict[str, Any]:
434
434
  goal = kwargs["goal"]
435
435
  available_tools = kwargs["available_tools"]
436
436
  constraints = kwargs.get("constraints", {})
@@ -8,7 +8,7 @@ separation between LLM and embedding capabilities.
8
8
 
9
9
  import hashlib
10
10
  from abc import ABC, abstractmethod
11
- from typing import Any, Dict, List, Optional, Union
11
+ from typing import Any
12
12
 
13
13
 
14
14
  class BaseAIProvider(ABC):
@@ -101,9 +101,8 @@ class BaseAIProvider(ABC):
101
101
  Returns:
102
102
  bool: True if the provider can be used, False otherwise
103
103
  """
104
- pass
105
104
 
106
- def get_capabilities(self) -> Dict[str, bool]:
105
+ def get_capabilities(self) -> dict[str, bool]:
107
106
  """
108
107
  Get the capabilities supported by this provider.
109
108
 
@@ -206,7 +205,7 @@ class LLMProvider(BaseAIProvider):
206
205
  self._capabilities["chat"] = True
207
206
 
208
207
  @abstractmethod
209
- def chat(self, messages: List[Dict[str, str]], **kwargs) -> Dict[str, Any]:
208
+ def chat(self, messages: list[dict[str, str]], **kwargs) -> dict[str, Any]:
210
209
  """
211
210
  Generate a chat completion using the provider's LLM.
212
211
 
@@ -217,7 +216,6 @@ class LLMProvider(BaseAIProvider):
217
216
  Returns:
218
217
  Dict containing the standardized response
219
218
  """
220
- pass
221
219
 
222
220
 
223
221
  class EmbeddingProvider(BaseAIProvider):
@@ -304,7 +302,7 @@ class EmbeddingProvider(BaseAIProvider):
304
302
  self._capabilities["embeddings"] = True
305
303
 
306
304
  @abstractmethod
307
- def embed(self, texts: List[str], **kwargs) -> List[List[float]]:
305
+ def embed(self, texts: list[str], **kwargs) -> list[list[float]]:
308
306
  """
309
307
  Generate embeddings for a list of texts.
310
308
 
@@ -315,10 +313,9 @@ class EmbeddingProvider(BaseAIProvider):
315
313
  Returns:
316
314
  List of embedding vectors
317
315
  """
318
- pass
319
316
 
320
317
  @abstractmethod
321
- def get_model_info(self, model: str) -> Dict[str, Any]:
318
+ def get_model_info(self, model: str) -> dict[str, Any]:
322
319
  """
323
320
  Get information about a specific embedding model.
324
321
 
@@ -328,7 +325,6 @@ class EmbeddingProvider(BaseAIProvider):
328
325
  Returns:
329
326
  Dict containing model information
330
327
  """
331
- pass
332
328
 
333
329
 
334
330
  class UnifiedAIProvider(LLMProvider, EmbeddingProvider):
@@ -395,7 +391,7 @@ class OllamaProvider(UnifiedAIProvider):
395
391
 
396
392
  return self._available
397
393
 
398
- def chat(self, messages: List[Dict[str, str]], **kwargs) -> Dict[str, Any]:
394
+ def chat(self, messages: list[dict[str, str]], **kwargs) -> dict[str, Any]:
399
395
  """Generate a chat completion using Ollama.
400
396
 
401
397
  Args:
@@ -470,7 +466,7 @@ class OllamaProvider(UnifiedAIProvider):
470
466
  except Exception as e:
471
467
  raise RuntimeError(f"Ollama error: {str(e)}")
472
468
 
473
- def embed(self, texts: List[str], **kwargs) -> List[List[float]]:
469
+ def embed(self, texts: list[str], **kwargs) -> list[list[float]]:
474
470
  """
475
471
  Generate embeddings using Ollama.
476
472
 
@@ -506,7 +502,7 @@ class OllamaProvider(UnifiedAIProvider):
506
502
  except Exception as e:
507
503
  raise RuntimeError(f"Ollama embedding error: {str(e)}")
508
504
 
509
- def get_model_info(self, model: str) -> Dict[str, Any]:
505
+ def get_model_info(self, model: str) -> dict[str, Any]:
510
506
  """Get information about an Ollama embedding model."""
511
507
  if model in self._model_cache:
512
508
  return self._model_cache[model]
@@ -575,7 +571,7 @@ class OpenAIProvider(UnifiedAIProvider):
575
571
 
576
572
  return self._available
577
573
 
578
- def chat(self, messages: List[Dict[str, str]], **kwargs) -> Dict[str, Any]:
574
+ def chat(self, messages: list[dict[str, str]], **kwargs) -> dict[str, Any]:
579
575
  """
580
576
  Generate a chat completion using OpenAI.
581
577
 
@@ -655,7 +651,7 @@ class OpenAIProvider(UnifiedAIProvider):
655
651
  except Exception as e:
656
652
  raise RuntimeError(f"OpenAI error: {str(e)}")
657
653
 
658
- def embed(self, texts: List[str], **kwargs) -> List[List[float]]:
654
+ def embed(self, texts: list[str], **kwargs) -> list[list[float]]:
659
655
  """
660
656
  Generate embeddings using OpenAI.
661
657
 
@@ -699,7 +695,7 @@ class OpenAIProvider(UnifiedAIProvider):
699
695
  except Exception as e:
700
696
  raise RuntimeError(f"OpenAI embedding error: {str(e)}")
701
697
 
702
- def get_model_info(self, model: str) -> Dict[str, Any]:
698
+ def get_model_info(self, model: str) -> dict[str, Any]:
703
699
  """Get information about an OpenAI embedding model."""
704
700
  models = {
705
701
  "text-embedding-3-large": {
@@ -775,7 +771,7 @@ class AnthropicProvider(LLMProvider):
775
771
 
776
772
  return self._available
777
773
 
778
- def chat(self, messages: List[Dict[str, str]], **kwargs) -> Dict[str, Any]:
774
+ def chat(self, messages: list[dict[str, str]], **kwargs) -> dict[str, Any]:
779
775
  """Generate a chat completion using Anthropic."""
780
776
  try:
781
777
  import anthropic
@@ -869,7 +865,7 @@ class CohereProvider(EmbeddingProvider):
869
865
 
870
866
  return self._available
871
867
 
872
- def embed(self, texts: List[str], **kwargs) -> List[List[float]]:
868
+ def embed(self, texts: list[str], **kwargs) -> list[list[float]]:
873
869
  """Generate embeddings using Cohere."""
874
870
  try:
875
871
  import cohere
@@ -899,7 +895,7 @@ class CohereProvider(EmbeddingProvider):
899
895
  except Exception as e:
900
896
  raise RuntimeError(f"Cohere embedding error: {str(e)}")
901
897
 
902
- def get_model_info(self, model: str) -> Dict[str, Any]:
898
+ def get_model_info(self, model: str) -> dict[str, Any]:
903
899
  """Get information about a Cohere embedding model."""
904
900
  models = {
905
901
  "embed-english-v3.0": {
@@ -1050,7 +1046,7 @@ class HuggingFaceProvider(EmbeddingProvider):
1050
1046
 
1051
1047
  return self._available_api or self._available_local
1052
1048
 
1053
- def embed(self, texts: List[str], **kwargs) -> List[List[float]]:
1049
+ def embed(self, texts: list[str], **kwargs) -> list[list[float]]:
1054
1050
  """Generate embeddings using HuggingFace."""
1055
1051
  model = kwargs.get("model", "sentence-transformers/all-MiniLM-L6-v2")
1056
1052
  use_api = kwargs.get("use_api", self._available_api)
@@ -1067,8 +1063,8 @@ class HuggingFaceProvider(EmbeddingProvider):
1067
1063
  )
1068
1064
 
1069
1065
  def _embed_api(
1070
- self, texts: List[str], model: str, normalize: bool
1071
- ) -> List[List[float]]:
1066
+ self, texts: list[str], model: str, normalize: bool
1067
+ ) -> list[list[float]]:
1072
1068
  """Generate embeddings using HuggingFace Inference API."""
1073
1069
  try:
1074
1070
  import os
@@ -1106,8 +1102,8 @@ class HuggingFaceProvider(EmbeddingProvider):
1106
1102
  raise RuntimeError(f"HuggingFace API error: {str(e)}")
1107
1103
 
1108
1104
  def _embed_local(
1109
- self, texts: List[str], model: str, device: str, normalize: bool
1110
- ) -> List[List[float]]:
1105
+ self, texts: list[str], model: str, device: str, normalize: bool
1106
+ ) -> list[list[float]]:
1111
1107
  """Generate embeddings using local HuggingFace model."""
1112
1108
  try:
1113
1109
  import torch
@@ -1165,7 +1161,7 @@ class HuggingFaceProvider(EmbeddingProvider):
1165
1161
  except Exception as e:
1166
1162
  raise RuntimeError(f"HuggingFace local error: {str(e)}")
1167
1163
 
1168
- def get_model_info(self, model: str) -> Dict[str, Any]:
1164
+ def get_model_info(self, model: str) -> dict[str, Any]:
1169
1165
  """Get information about a HuggingFace embedding model."""
1170
1166
  models = {
1171
1167
  "sentence-transformers/all-MiniLM-L6-v2": {
@@ -1235,7 +1231,7 @@ class MockProvider(UnifiedAIProvider):
1235
1231
  """Mock provider is always available."""
1236
1232
  return True
1237
1233
 
1238
- def chat(self, messages: List[Dict[str, str]], **kwargs) -> Dict[str, Any]:
1234
+ def chat(self, messages: list[dict[str, str]], **kwargs) -> dict[str, Any]:
1239
1235
  """Generate mock LLM response."""
1240
1236
  last_user_message = ""
1241
1237
  for msg in reversed(messages):
@@ -1272,7 +1268,7 @@ class MockProvider(UnifiedAIProvider):
1272
1268
  "metadata": {},
1273
1269
  }
1274
1270
 
1275
- def embed(self, texts: List[str], **kwargs) -> List[List[float]]:
1271
+ def embed(self, texts: list[str], **kwargs) -> list[list[float]]:
1276
1272
  """Generate mock embeddings."""
1277
1273
  model = kwargs.get("model", "mock-embedding")
1278
1274
  dimensions = kwargs.get("dimensions", 1536)
@@ -1300,7 +1296,7 @@ class MockProvider(UnifiedAIProvider):
1300
1296
 
1301
1297
  return embeddings
1302
1298
 
1303
- def get_model_info(self, model: str) -> Dict[str, Any]:
1299
+ def get_model_info(self, model: str) -> dict[str, Any]:
1304
1300
  """Get information about a mock embedding model."""
1305
1301
  models = {
1306
1302
  "mock-embedding-small": {"dimensions": 384, "max_tokens": 512},
@@ -1335,8 +1331,8 @@ PROVIDERS = {
1335
1331
 
1336
1332
 
1337
1333
  def get_provider(
1338
- provider_name: str, provider_type: Optional[str] = None
1339
- ) -> Union[BaseAIProvider, LLMProvider, EmbeddingProvider]:
1334
+ provider_name: str, provider_type: str | None = None
1335
+ ) -> BaseAIProvider | LLMProvider | EmbeddingProvider:
1340
1336
  """
1341
1337
  Get an AI provider instance by name.
1342
1338
 
@@ -1408,8 +1404,8 @@ def get_provider(
1408
1404
 
1409
1405
 
1410
1406
  def get_available_providers(
1411
- provider_type: Optional[str] = None,
1412
- ) -> Dict[str, Dict[str, Any]]:
1407
+ provider_type: str | None = None,
1408
+ ) -> dict[str, dict[str, Any]]:
1413
1409
  """
1414
1410
  Get information about all available providers.
1415
1411
 
@@ -1440,9 +1436,12 @@ def get_available_providers(
1440
1436
  capabilities = provider.get_capabilities()
1441
1437
 
1442
1438
  # Apply filter if specified
1443
- if provider_type == "chat" and not capabilities.get("chat"):
1444
- continue
1445
- elif provider_type == "embeddings" and not capabilities.get("embeddings"):
1439
+ if (
1440
+ provider_type == "chat"
1441
+ and not capabilities.get("chat")
1442
+ or provider_type == "embeddings"
1443
+ and not capabilities.get("embeddings")
1444
+ ):
1446
1445
  continue
1447
1446
 
1448
1447
  results[name] = {
@@ -1,7 +1,7 @@
1
1
  """Embedding Generator node for vector embeddings with support for multiple providers."""
2
2
 
3
3
  import time
4
- from typing import Any, Dict, List, Optional
4
+ from typing import Any
5
5
 
6
6
  from kailash.nodes.base import Node, NodeParameter, register_node
7
7
 
@@ -107,7 +107,7 @@ class EmbeddingGeneratorNode(Node):
107
107
  )
108
108
  """
109
109
 
110
- def get_parameters(self) -> Dict[str, NodeParameter]:
110
+ def get_parameters(self) -> dict[str, NodeParameter]:
111
111
  return {
112
112
  "operation": NodeParameter(
113
113
  name="operation",
@@ -222,7 +222,7 @@ class EmbeddingGeneratorNode(Node):
222
222
  ),
223
223
  }
224
224
 
225
- def run(self, **kwargs) -> Dict[str, Any]:
225
+ def run(self, **kwargs) -> dict[str, Any]:
226
226
  operation = kwargs["operation"]
227
227
  provider = kwargs.get("provider", "mock")
228
228
  model = kwargs.get("model", "default")
@@ -343,16 +343,16 @@ class EmbeddingGeneratorNode(Node):
343
343
 
344
344
  def _embed_single_text(
345
345
  self,
346
- text: Optional[str],
346
+ text: str | None,
347
347
  provider: str,
348
348
  model: str,
349
349
  cache_enabled: bool,
350
350
  cache_ttl: int,
351
- dimensions: Optional[int],
351
+ dimensions: int | None,
352
352
  normalize: bool,
353
353
  timeout: int,
354
354
  max_retries: int,
355
- ) -> Dict[str, Any]:
355
+ ) -> dict[str, Any]:
356
356
  """Generate embedding for a single text."""
357
357
  if not text:
358
358
  return {
@@ -418,18 +418,18 @@ class EmbeddingGeneratorNode(Node):
418
418
 
419
419
  def _embed_batch_texts(
420
420
  self,
421
- texts: List[str],
421
+ texts: list[str],
422
422
  provider: str,
423
423
  model: str,
424
424
  batch_size: int,
425
425
  chunk_size: int,
426
426
  cache_enabled: bool,
427
427
  cache_ttl: int,
428
- dimensions: Optional[int],
428
+ dimensions: int | None,
429
429
  normalize: bool,
430
430
  timeout: int,
431
431
  max_retries: int,
432
- ) -> Dict[str, Any]:
432
+ ) -> dict[str, Any]:
433
433
  """Generate embeddings for a batch of texts."""
434
434
  if not texts:
435
435
  return {
@@ -538,10 +538,10 @@ class EmbeddingGeneratorNode(Node):
538
538
 
539
539
  def _calculate_similarity(
540
540
  self,
541
- embedding_1: Optional[List[float]],
542
- embedding_2: Optional[List[float]],
541
+ embedding_1: list[float] | None,
542
+ embedding_2: list[float] | None,
543
543
  metric: str,
544
- ) -> Dict[str, Any]:
544
+ ) -> dict[str, Any]:
545
545
  """Calculate similarity between two embedding vectors."""
546
546
  if not embedding_1 or not embedding_2:
547
547
  return {
@@ -587,17 +587,17 @@ class EmbeddingGeneratorNode(Node):
587
587
 
588
588
  def _embed_mcp_resource(
589
589
  self,
590
- resource_uri: Optional[str],
590
+ resource_uri: str | None,
591
591
  provider: str,
592
592
  model: str,
593
593
  chunk_size: int,
594
594
  cache_enabled: bool,
595
595
  cache_ttl: int,
596
- dimensions: Optional[int],
596
+ dimensions: int | None,
597
597
  normalize: bool,
598
598
  timeout: int,
599
599
  max_retries: int,
600
- ) -> Dict[str, Any]:
600
+ ) -> dict[str, Any]:
601
601
  """Embed content from an MCP resource."""
602
602
  if not resource_uri:
603
603
  return {
@@ -632,7 +632,7 @@ class EmbeddingGeneratorNode(Node):
632
632
 
633
633
  return result
634
634
 
635
- def _generate_mock_embedding(self, text: str, dimensions: int) -> List[float]:
635
+ def _generate_mock_embedding(self, text: str, dimensions: int) -> list[float]:
636
636
  """Generate a mock embedding vector based on text content."""
637
637
  import hashlib
638
638
  import random
@@ -653,10 +653,10 @@ class EmbeddingGeneratorNode(Node):
653
653
  text: str,
654
654
  provider: str,
655
655
  model: str,
656
- dimensions: Optional[int],
656
+ dimensions: int | None,
657
657
  timeout: int,
658
658
  max_retries: int,
659
- ) -> List[float]:
659
+ ) -> list[float]:
660
660
  """Generate embedding using external provider."""
661
661
  try:
662
662
  from .ai_providers import get_provider
@@ -702,10 +702,10 @@ class EmbeddingGeneratorNode(Node):
702
702
  text: str,
703
703
  provider: str,
704
704
  model: str,
705
- dimensions: Optional[int],
705
+ dimensions: int | None,
706
706
  timeout: int,
707
707
  max_retries: int,
708
- ) -> List[float]:
708
+ ) -> list[float]:
709
709
  """Fallback implementation for backward compatibility."""
710
710
  # Handle Ollama provider
711
711
  if provider == "ollama":
@@ -739,7 +739,7 @@ class EmbeddingGeneratorNode(Node):
739
739
  f"{provider}:{model}:{text}", actual_dimensions
740
740
  )
741
741
 
742
- def _chunk_text(self, text: str, chunk_size: int) -> List[str]:
742
+ def _chunk_text(self, text: str, chunk_size: int) -> list[str]:
743
743
  """Split text into chunks based on token limit."""
744
744
  # Simple word-based chunking (real implementation would use proper tokenization)
745
745
  words = text.split()
@@ -751,7 +751,7 @@ class EmbeddingGeneratorNode(Node):
751
751
 
752
752
  return chunks or [""]
753
753
 
754
- def _average_embeddings(self, embeddings: List[List[float]]) -> List[float]:
754
+ def _average_embeddings(self, embeddings: list[list[float]]) -> list[float]:
755
755
  """Average multiple embedding vectors."""
756
756
  if not embeddings:
757
757
  return []
@@ -765,16 +765,16 @@ class EmbeddingGeneratorNode(Node):
765
765
 
766
766
  return averaged
767
767
 
768
- def _normalize_vector(self, vector: List[float]) -> List[float]:
768
+ def _normalize_vector(self, vector: list[float]) -> list[float]:
769
769
  """Normalize vector to unit length."""
770
770
  magnitude = sum(x * x for x in vector) ** 0.5
771
771
  if magnitude == 0:
772
772
  return vector
773
773
  return [x / magnitude for x in vector]
774
774
 
775
- def _cosine_similarity(self, vec1: List[float], vec2: List[float]) -> float:
775
+ def _cosine_similarity(self, vec1: list[float], vec2: list[float]) -> float:
776
776
  """Calculate cosine similarity between two vectors."""
777
- dot_product = sum(a * b for a, b in zip(vec1, vec2))
777
+ dot_product = sum(a * b for a, b in zip(vec1, vec2, strict=False))
778
778
  magnitude1 = sum(a * a for a in vec1) ** 0.5
779
779
  magnitude2 = sum(b * b for b in vec2) ** 0.5
780
780
 
@@ -783,13 +783,13 @@ class EmbeddingGeneratorNode(Node):
783
783
 
784
784
  return dot_product / (magnitude1 * magnitude2)
785
785
 
786
- def _euclidean_distance(self, vec1: List[float], vec2: List[float]) -> float:
786
+ def _euclidean_distance(self, vec1: list[float], vec2: list[float]) -> float:
787
787
  """Calculate Euclidean distance between two vectors."""
788
- return sum((a - b) ** 2 for a, b in zip(vec1, vec2)) ** 0.5
788
+ return sum((a - b) ** 2 for a, b in zip(vec1, vec2, strict=False)) ** 0.5
789
789
 
790
- def _dot_product(self, vec1: List[float], vec2: List[float]) -> float:
790
+ def _dot_product(self, vec1: list[float], vec2: list[float]) -> float:
791
791
  """Calculate dot product of two vectors."""
792
- return sum(a * b for a, b in zip(vec1, vec2))
792
+ return sum(a * b for a, b in zip(vec1, vec2, strict=False))
793
793
 
794
794
  def _interpret_similarity(self, score: float, metric: str) -> str:
795
795
  """Provide human-readable interpretation of similarity score."""
@@ -823,15 +823,14 @@ class EmbeddingGeneratorNode(Node):
823
823
  content = f"{provider}:{model}:{text}"
824
824
  return f"emb_{hashlib.md5(content.encode()).hexdigest()[:16]}"
825
825
 
826
- def _get_cached_embedding(self, cache_key: str) -> Optional[Dict[str, Any]]:
826
+ def _get_cached_embedding(self, cache_key: str) -> dict[str, Any] | None:
827
827
  """Retrieve embedding from cache (mock implementation)."""
828
828
  # Mock cache lookup - in real implementation, use Redis or similar
829
829
  return None
830
830
 
831
- def _cache_embedding(self, cache_key: str, vector: List[float], ttl: int) -> None:
831
+ def _cache_embedding(self, cache_key: str, vector: list[float], ttl: int) -> None:
832
832
  """Store embedding in cache (mock implementation)."""
833
833
  # Mock cache storage - in real implementation, use Redis or similar
834
- pass
835
834
 
836
835
  def _estimate_embedding_cost(self, tokens: int, provider: str, model: str) -> float:
837
836
  """Estimate embedding cost based on tokens and provider pricing."""