alita-sdk 0.3.532__py3-none-any.whl → 0.3.602__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of alita-sdk might be problematic. Click here for more details.

Files changed (137) hide show
  1. alita_sdk/cli/agent_executor.py +2 -1
  2. alita_sdk/cli/agent_loader.py +34 -4
  3. alita_sdk/cli/agents.py +433 -203
  4. alita_sdk/community/__init__.py +8 -4
  5. alita_sdk/configurations/__init__.py +1 -0
  6. alita_sdk/configurations/openapi.py +323 -0
  7. alita_sdk/runtime/clients/client.py +165 -7
  8. alita_sdk/runtime/langchain/_constants_bkup.py +1318 -0
  9. alita_sdk/runtime/langchain/assistant.py +61 -11
  10. alita_sdk/runtime/langchain/constants.py +419 -171
  11. alita_sdk/runtime/langchain/document_loaders/AlitaJSONLoader.py +4 -2
  12. alita_sdk/runtime/langchain/document_loaders/AlitaTextLoader.py +5 -2
  13. alita_sdk/runtime/langchain/langraph_agent.py +108 -23
  14. alita_sdk/runtime/langchain/utils.py +76 -14
  15. alita_sdk/runtime/skills/__init__.py +91 -0
  16. alita_sdk/runtime/skills/callbacks.py +498 -0
  17. alita_sdk/runtime/skills/discovery.py +540 -0
  18. alita_sdk/runtime/skills/executor.py +610 -0
  19. alita_sdk/runtime/skills/input_builder.py +371 -0
  20. alita_sdk/runtime/skills/models.py +330 -0
  21. alita_sdk/runtime/skills/registry.py +355 -0
  22. alita_sdk/runtime/skills/skill_runner.py +330 -0
  23. alita_sdk/runtime/toolkits/__init__.py +5 -0
  24. alita_sdk/runtime/toolkits/artifact.py +2 -1
  25. alita_sdk/runtime/toolkits/mcp.py +6 -3
  26. alita_sdk/runtime/toolkits/mcp_config.py +1048 -0
  27. alita_sdk/runtime/toolkits/skill_router.py +238 -0
  28. alita_sdk/runtime/toolkits/tools.py +139 -10
  29. alita_sdk/runtime/toolkits/vectorstore.py +1 -1
  30. alita_sdk/runtime/tools/__init__.py +3 -1
  31. alita_sdk/runtime/tools/artifact.py +15 -0
  32. alita_sdk/runtime/tools/data_analysis.py +183 -0
  33. alita_sdk/runtime/tools/llm.py +260 -73
  34. alita_sdk/runtime/tools/loop.py +3 -1
  35. alita_sdk/runtime/tools/loop_output.py +3 -1
  36. alita_sdk/runtime/tools/mcp_server_tool.py +6 -3
  37. alita_sdk/runtime/tools/router.py +2 -4
  38. alita_sdk/runtime/tools/sandbox.py +9 -6
  39. alita_sdk/runtime/tools/skill_router.py +776 -0
  40. alita_sdk/runtime/tools/tool.py +3 -1
  41. alita_sdk/runtime/tools/vectorstore.py +7 -2
  42. alita_sdk/runtime/tools/vectorstore_base.py +7 -2
  43. alita_sdk/runtime/utils/constants.py +5 -1
  44. alita_sdk/runtime/utils/mcp_client.py +1 -1
  45. alita_sdk/runtime/utils/mcp_sse_client.py +1 -1
  46. alita_sdk/runtime/utils/toolkit_utils.py +2 -0
  47. alita_sdk/tools/__init__.py +44 -2
  48. alita_sdk/tools/ado/repos/__init__.py +26 -8
  49. alita_sdk/tools/ado/repos/repos_wrapper.py +78 -52
  50. alita_sdk/tools/ado/test_plan/__init__.py +3 -2
  51. alita_sdk/tools/ado/test_plan/test_plan_wrapper.py +23 -1
  52. alita_sdk/tools/ado/utils.py +1 -18
  53. alita_sdk/tools/ado/wiki/__init__.py +2 -1
  54. alita_sdk/tools/ado/wiki/ado_wrapper.py +23 -1
  55. alita_sdk/tools/ado/work_item/__init__.py +3 -2
  56. alita_sdk/tools/ado/work_item/ado_wrapper.py +56 -3
  57. alita_sdk/tools/advanced_jira_mining/__init__.py +2 -1
  58. alita_sdk/tools/aws/delta_lake/__init__.py +2 -1
  59. alita_sdk/tools/azure_ai/search/__init__.py +2 -1
  60. alita_sdk/tools/azure_ai/search/api_wrapper.py +1 -1
  61. alita_sdk/tools/base_indexer_toolkit.py +51 -30
  62. alita_sdk/tools/bitbucket/__init__.py +2 -1
  63. alita_sdk/tools/bitbucket/api_wrapper.py +1 -1
  64. alita_sdk/tools/bitbucket/cloud_api_wrapper.py +3 -3
  65. alita_sdk/tools/browser/__init__.py +1 -1
  66. alita_sdk/tools/carrier/__init__.py +1 -1
  67. alita_sdk/tools/chunkers/code/treesitter/treesitter.py +37 -13
  68. alita_sdk/tools/cloud/aws/__init__.py +2 -1
  69. alita_sdk/tools/cloud/azure/__init__.py +2 -1
  70. alita_sdk/tools/cloud/gcp/__init__.py +2 -1
  71. alita_sdk/tools/cloud/k8s/__init__.py +2 -1
  72. alita_sdk/tools/code/linter/__init__.py +2 -1
  73. alita_sdk/tools/code/sonar/__init__.py +2 -1
  74. alita_sdk/tools/code_indexer_toolkit.py +19 -2
  75. alita_sdk/tools/confluence/__init__.py +7 -6
  76. alita_sdk/tools/confluence/api_wrapper.py +7 -8
  77. alita_sdk/tools/confluence/loader.py +4 -2
  78. alita_sdk/tools/custom_open_api/__init__.py +2 -1
  79. alita_sdk/tools/elastic/__init__.py +2 -1
  80. alita_sdk/tools/elitea_base.py +28 -9
  81. alita_sdk/tools/figma/__init__.py +52 -6
  82. alita_sdk/tools/figma/api_wrapper.py +1158 -123
  83. alita_sdk/tools/figma/figma_client.py +73 -0
  84. alita_sdk/tools/figma/toon_tools.py +2748 -0
  85. alita_sdk/tools/github/__init__.py +2 -1
  86. alita_sdk/tools/github/github_client.py +56 -92
  87. alita_sdk/tools/github/schemas.py +4 -4
  88. alita_sdk/tools/gitlab/__init__.py +2 -1
  89. alita_sdk/tools/gitlab/api_wrapper.py +118 -38
  90. alita_sdk/tools/gitlab_org/__init__.py +2 -1
  91. alita_sdk/tools/gitlab_org/api_wrapper.py +60 -62
  92. alita_sdk/tools/google/bigquery/__init__.py +2 -1
  93. alita_sdk/tools/google_places/__init__.py +2 -1
  94. alita_sdk/tools/jira/__init__.py +2 -1
  95. alita_sdk/tools/keycloak/__init__.py +2 -1
  96. alita_sdk/tools/localgit/__init__.py +2 -1
  97. alita_sdk/tools/memory/__init__.py +1 -1
  98. alita_sdk/tools/ocr/__init__.py +2 -1
  99. alita_sdk/tools/openapi/__init__.py +490 -118
  100. alita_sdk/tools/openapi/api_wrapper.py +1368 -0
  101. alita_sdk/tools/openapi/tool.py +20 -0
  102. alita_sdk/tools/pandas/__init__.py +11 -5
  103. alita_sdk/tools/pandas/api_wrapper.py +38 -25
  104. alita_sdk/tools/pandas/dataframe/generator/base.py +3 -1
  105. alita_sdk/tools/postman/__init__.py +2 -1
  106. alita_sdk/tools/pptx/__init__.py +2 -1
  107. alita_sdk/tools/qtest/__init__.py +21 -2
  108. alita_sdk/tools/qtest/api_wrapper.py +430 -13
  109. alita_sdk/tools/rally/__init__.py +2 -1
  110. alita_sdk/tools/rally/api_wrapper.py +1 -1
  111. alita_sdk/tools/report_portal/__init__.py +2 -1
  112. alita_sdk/tools/salesforce/__init__.py +2 -1
  113. alita_sdk/tools/servicenow/__init__.py +11 -10
  114. alita_sdk/tools/servicenow/api_wrapper.py +1 -1
  115. alita_sdk/tools/sharepoint/__init__.py +2 -1
  116. alita_sdk/tools/sharepoint/api_wrapper.py +2 -2
  117. alita_sdk/tools/slack/__init__.py +3 -2
  118. alita_sdk/tools/slack/api_wrapper.py +2 -2
  119. alita_sdk/tools/sql/__init__.py +3 -2
  120. alita_sdk/tools/testio/__init__.py +2 -1
  121. alita_sdk/tools/testrail/__init__.py +2 -1
  122. alita_sdk/tools/utils/content_parser.py +77 -3
  123. alita_sdk/tools/utils/text_operations.py +163 -71
  124. alita_sdk/tools/xray/__init__.py +3 -2
  125. alita_sdk/tools/yagmail/__init__.py +2 -1
  126. alita_sdk/tools/zephyr/__init__.py +2 -1
  127. alita_sdk/tools/zephyr_enterprise/__init__.py +2 -1
  128. alita_sdk/tools/zephyr_essential/__init__.py +2 -1
  129. alita_sdk/tools/zephyr_scale/__init__.py +3 -2
  130. alita_sdk/tools/zephyr_scale/api_wrapper.py +2 -2
  131. alita_sdk/tools/zephyr_squad/__init__.py +2 -1
  132. {alita_sdk-0.3.532.dist-info → alita_sdk-0.3.602.dist-info}/METADATA +7 -6
  133. {alita_sdk-0.3.532.dist-info → alita_sdk-0.3.602.dist-info}/RECORD +137 -119
  134. {alita_sdk-0.3.532.dist-info → alita_sdk-0.3.602.dist-info}/WHEEL +0 -0
  135. {alita_sdk-0.3.532.dist-info → alita_sdk-0.3.602.dist-info}/entry_points.txt +0 -0
  136. {alita_sdk-0.3.532.dist-info → alita_sdk-0.3.602.dist-info}/licenses/LICENSE +0 -0
  137. {alita_sdk-0.3.532.dist-info → alita_sdk-0.3.602.dist-info}/top_level.txt +0 -0
@@ -23,6 +23,7 @@ from langgraph.store.base import BaseStore
23
23
  from .constants import PRINTER_NODE_RS, PRINTER, PRINTER_COMPLETED_STATE
24
24
  from .mixedAgentRenderes import convert_message_to_json
25
25
  from .utils import create_state, propagate_the_input_mapping, safe_format
26
+ from ..utils.constants import TOOLKIT_NAME_META, TOOL_NAME_META
26
27
  from ..tools.function import FunctionTool
27
28
  from ..tools.indexer_tool import IndexerNode
28
29
  from ..tools.llm import LLMNode
@@ -188,7 +189,7 @@ Answer only with step name, no need to add descrip in case none of the steps are
188
189
  decision_input = state.get('messages', [])[:]
189
190
  else:
190
191
  if len(additional_info) == 0:
191
- additional_info = """### Additoinal info: """
192
+ additional_info = """### Additional info: """
192
193
  additional_info += "{field}: {value}\n".format(field=field, value=state.get(field, ""))
193
194
  decision_input.append(HumanMessage(
194
195
  self.prompt.format(steps=self.steps, description=safe_format(self.description, state), additional_info=additional_info)))
@@ -447,6 +448,50 @@ def prepare_output_schema(lg_builder, memory, store, debug=False, interrupt_befo
447
448
  return compiled
448
449
 
449
450
 
451
+ def find_tool_by_name_or_metadata(tools: list, tool_name: str, toolkit_name: Optional[str] = None) -> Optional[BaseTool]:
452
+ """
453
+ Find a tool by name or by matching metadata (toolkit_name + tool_name).
454
+
455
+ For toolkit nodes with toolkit_name specified, this function checks:
456
+ 1. Metadata match first (toolkit_name + tool_name) - PRIORITY when toolkit_name is provided
457
+ 2. Direct tool name match (backward compatibility fallback)
458
+
459
+ For toolkit nodes without toolkit_name, or other node types:
460
+ 1. Direct tool name match
461
+
462
+ Args:
463
+ tools: List of available tools
464
+ tool_name: The tool name to search for
465
+ toolkit_name: Optional toolkit name for metadata matching
466
+
467
+ Returns:
468
+ The matching tool or None if not found
469
+ """
470
+ # When toolkit_name is specified, prioritize metadata matching
471
+ if toolkit_name:
472
+ for tool in tools:
473
+ # Check metadata match first
474
+ if hasattr(tool, 'metadata') and tool.metadata:
475
+ metadata_toolkit_name = tool.metadata.get(TOOLKIT_NAME_META)
476
+ metadata_tool_name = tool.metadata.get(TOOL_NAME_META)
477
+
478
+ # Match if both toolkit_name and tool_name in metadata match
479
+ if metadata_toolkit_name == toolkit_name and metadata_tool_name == tool_name:
480
+ return tool
481
+
482
+ # Fallback to direct name match for backward compatibility
483
+ for tool in tools:
484
+ if tool.name == tool_name:
485
+ return tool
486
+ else:
487
+ # No toolkit_name specified, use direct name match only
488
+ for tool in tools:
489
+ if tool.name == tool_name:
490
+ return tool
491
+
492
+ return None
493
+
494
+
450
495
  def create_graph(
451
496
  client: Any,
452
497
  yaml_schema: str,
@@ -482,19 +527,37 @@ def create_graph(
482
527
  node_type = node.get('type', 'function')
483
528
  node_id = clean_string(node['id'])
484
529
  toolkit_name = node.get('toolkit_name')
485
- tool_name = clean_string(node.get('tool', node_id))
530
+ tool_name = clean_string(node.get('tool', ''))
486
531
  # Tool names are now clean (no prefix needed)
487
532
  logger.info(f"Node: {node_id} : {node_type} - {tool_name}")
488
533
  if node_type in ['function', 'toolkit', 'mcp', 'tool', 'loop', 'loop_from_tool', 'indexer', 'subgraph', 'pipeline', 'agent']:
489
- if node_type == 'mcp' and tool_name not in [tool.name for tool in tools]:
490
- # MCP is not connected and node cannot be added
491
- raise ToolException(f"MCP tool '{tool_name}' not found in the provided tools. "
492
- f"Make sure it is connected properly. Available tools: {[tool.name for tool in tools]}")
493
- for tool in tools:
494
- if tool.name == tool_name:
534
+ if node_type in ['mcp', 'toolkit', 'agent'] and not tool_name:
535
+ # tool is not specified
536
+ raise ToolException(f"Tool name is required for {node_type} node with id '{node_id}'")
537
+
538
+ # Unified validation and tool finding for toolkit, mcp, and agent node types
539
+ matching_tool = None
540
+ if node_type in ['toolkit', 'mcp', 'agent']:
541
+ # Use enhanced validation that checks both direct name and metadata
542
+ matching_tool = find_tool_by_name_or_metadata(tools, tool_name, toolkit_name)
543
+ if not matching_tool:
544
+ # tool is not found in the provided tools
545
+ error_msg = f"Node `{node_id}` with type `{node_type}` has tool '{tool_name}'"
546
+ if toolkit_name:
547
+ error_msg += f" (toolkit: '{toolkit_name}')"
548
+ error_msg += f" which is not found in the provided tools. Make sure it is connected properly. Available tools: {format_tools(tools)}"
549
+ raise ToolException(error_msg)
550
+ else:
551
+ # For other node types, find tool by direct name match
552
+ for tool in tools:
553
+ if tool.name == tool_name:
554
+ matching_tool = tool
555
+ break
556
+
557
+ if matching_tool:
495
558
  if node_type in ['function', 'toolkit', 'mcp']:
496
559
  lg_builder.add_node(node_id, FunctionTool(
497
- tool=tool, name=node_id, return_type='dict',
560
+ tool=matching_tool, name=node_id, return_type='dict',
498
561
  output_variables=node.get('output', []),
499
562
  input_mapping=node.get('input_mapping',
500
563
  {'messages': {'type': 'variable', 'value': 'messages'}}),
@@ -505,7 +568,7 @@ def create_graph(
505
568
  {'messages': {'type': 'variable', 'value': 'messages'}})
506
569
  output_vars = node.get('output', [])
507
570
  lg_builder.add_node(node_id, FunctionTool(
508
- client=client, tool=tool,
571
+ client=client, tool=matching_tool,
509
572
  name=node_id, return_type='str',
510
573
  output_variables=output_vars + ['messages'] if 'messages' not in output_vars else output_vars,
511
574
  input_variables=input_params,
@@ -513,15 +576,15 @@ def create_graph(
513
576
  ))
514
577
  elif node_type == 'subgraph' or node_type == 'pipeline':
515
578
  # assign parent memory/store
516
- # tool.checkpointer = memory
517
- # tool.store = store
579
+ # matching_tool.checkpointer = memory
580
+ # matching_tool.store = store
518
581
  # wrap with mappings
519
582
  pipeline_name = node.get('tool', None)
520
583
  if not pipeline_name:
521
584
  raise ValueError(
522
585
  "Subgraph must have a 'tool' node: add required tool to the subgraph node")
523
586
  node_fn = SubgraphRunnable(
524
- inner=tool.graph,
587
+ inner=matching_tool.graph,
525
588
  name=pipeline_name,
526
589
  input_mapping=node.get('input_mapping', {}),
527
590
  output_mapping=node.get('output_mapping', {}),
@@ -530,7 +593,7 @@ def create_graph(
530
593
  break # skip legacy handling
531
594
  elif node_type == 'tool':
532
595
  lg_builder.add_node(node_id, ToolNode(
533
- client=client, tool=tool,
596
+ client=client, tool=matching_tool,
534
597
  name=node_id, return_type='dict',
535
598
  output_variables=node.get('output', []),
536
599
  input_variables=node.get('input', ['messages']),
@@ -539,7 +602,7 @@ def create_graph(
539
602
  ))
540
603
  elif node_type == 'loop':
541
604
  lg_builder.add_node(node_id, LoopNode(
542
- client=client, tool=tool,
605
+ client=client, tool=matching_tool,
543
606
  name=node_id, return_type='dict',
544
607
  output_variables=node.get('output', []),
545
608
  input_variables=node.get('input', ['messages']),
@@ -557,7 +620,7 @@ def create_graph(
557
620
  lg_builder.add_node(node_id, LoopToolNode(
558
621
  client=client,
559
622
  name=node_id, return_type='dict',
560
- tool=tool, loop_tool=t,
623
+ tool=matching_tool, loop_tool=t,
561
624
  variables_mapping=node.get('variables_mapping', {}),
562
625
  output_variables=node.get('output', []),
563
626
  input_variables=node.get('input', ['messages']),
@@ -573,7 +636,7 @@ def create_graph(
573
636
  indexer_tool = t
574
637
  logger.info(f"Indexer tool: {indexer_tool}")
575
638
  lg_builder.add_node(node_id, IndexerNode(
576
- client=client, tool=tool,
639
+ client=client, tool=matching_tool,
577
640
  index_tool=indexer_tool,
578
641
  input_mapping=node.get('input_mapping', {}),
579
642
  name=node_id, return_type='dict',
@@ -582,7 +645,6 @@ def create_graph(
582
645
  output_variables=node.get('output', []),
583
646
  input_variables=node.get('input', ['messages']),
584
647
  structured_output=node.get('structured_output', False)))
585
- break
586
648
  elif node_type == 'code':
587
649
  from ..tools.sandbox import create_sandbox_tool
588
650
  sandbox_tool = create_sandbox_tool(stateful=False, allow_net=True,
@@ -651,10 +713,13 @@ def create_graph(
651
713
  ))
652
714
  elif node_type == 'decision':
653
715
  logger.info(f'Adding decision: {node["nodes"]}')
716
+ # fallback to old-style decision node
717
+ decisional_inputs = node.get('decisional_inputs')
718
+ decisional_inputs = node.get('input', ['messages']) if not decisional_inputs else decisional_inputs
654
719
  lg_builder.add_node(node_id, DecisionEdge(
655
720
  client, node['nodes'],
656
721
  node.get('description', ""),
657
- decisional_inputs=node.get('decisional_inputs', ['messages']),
722
+ decisional_inputs=decisional_inputs,
658
723
  default_output=node.get('default_output', 'END'),
659
724
  is_node=True
660
725
  ))
@@ -749,8 +814,20 @@ def create_graph(
749
814
  debug=debug,
750
815
  )
751
816
  except ValueError as e:
752
- raise ValueError(
753
- f"Validation of the schema failed. {e}\n\nDEBUG INFO:**Schema Nodes:**\n\n{lg_builder.nodes}\n\n**Schema Enges:**\n\n{lg_builder.edges}\n\n**Tools Available:**\n\n{tools}")
817
+ # Build a clearer debug message without complex f-string expressions
818
+ debug_nodes = "\n*".join(lg_builder.nodes.keys()) if lg_builder and lg_builder.nodes else ""
819
+ debug_message = (
820
+ "Validation of the schema failed. {err}\n\n"
821
+ "DEBUG INFO:**Schema Nodes:**\n\n*{nodes}\n\n"
822
+ "**Schema Edges:**\n\n{edges}\n\n"
823
+ "**Tools Available:**\n\n{tools}"
824
+ ).format(
825
+ err=e,
826
+ nodes=debug_nodes,
827
+ edges=lg_builder.edges if lg_builder else {},
828
+ tools=format_tools(tools),
829
+ )
830
+ raise ValueError(debug_message)
754
831
  # If building a nested subgraph, return the raw CompiledStateGraph
755
832
  if for_subgraph:
756
833
  return graph
@@ -764,6 +841,14 @@ def create_graph(
764
841
  )
765
842
  return compiled.validate()
766
843
 
844
+ def format_tools(tools_list: list) -> str:
845
+ """Format a list of tool names into a comma-separated string."""
846
+ try:
847
+ return ', '.join([tool.name for tool in tools_list])
848
+ except Exception as e:
849
+ logger.warning(f"Failed to format tools list: {e}")
850
+ return str(tools_list)
851
+
767
852
  def set_defaults(d):
768
853
  """Set default values for dictionary entries based on their type."""
769
854
  type_defaults = {
@@ -967,7 +1052,7 @@ class LangGraphAgentRunnable(CompiledStateGraph):
967
1052
  (msg.content for msg in reversed(messages)
968
1053
  if not isinstance(msg, HumanMessage)),
969
1054
  messages[-1].content
970
- )
1055
+ ) if messages else result.get('output')
971
1056
  elif printer_output is not None:
972
1057
  # Printer node has output (interrupted state)
973
1058
  output = printer_output
@@ -981,7 +1066,7 @@ class LangGraphAgentRunnable(CompiledStateGraph):
981
1066
  )
982
1067
  except Exception:
983
1068
  # Fallback: try to get last value or last message
984
- output = list(result.values())[-1] if result else None
1069
+ output = str(list(result.values())[-1]) if result else 'Output is undefined'
985
1070
  config_state = self.get_state(config)
986
1071
  is_execution_finished = not config_state.next
987
1072
  if is_execution_finished:
@@ -2,7 +2,7 @@ import builtins
2
2
  import json
3
3
  import logging
4
4
  import re
5
- from pydantic import create_model, Field, Json
5
+ from pydantic import create_model, Field, JsonValue
6
6
  from typing import Tuple, TypedDict, Any, Optional, Annotated
7
7
  from langchain_core.messages import AnyMessage
8
8
  from langgraph.graph import add_messages
@@ -12,6 +12,52 @@ from ...runtime.langchain.constants import ELITEA_RS, PRINTER_NODE_RS
12
12
  logger = logging.getLogger(__name__)
13
13
 
14
14
 
15
+ def extract_text_from_completion(completion) -> str:
16
+ """Extract text content from LLM completion, handling both string and list formats.
17
+
18
+ For thinking-enabled models (like Claude with extended thinking), completion.content
19
+ can be a list of content blocks. This function extracts only the text blocks and
20
+ concatenates them into a single string.
21
+
22
+ Args:
23
+ completion: LLM completion object with content attribute
24
+
25
+ Returns:
26
+ str: Extracted text content (never a list)
27
+ """
28
+ if not hasattr(completion, 'content'):
29
+ return ""
30
+
31
+ content = completion.content
32
+
33
+ # Handle list of content blocks (Anthropic extended thinking format)
34
+ if isinstance(content, list):
35
+ text_blocks = []
36
+
37
+ for block in content:
38
+ if isinstance(block, dict):
39
+ block_type = block.get('type', '')
40
+ if block_type == 'text':
41
+ text_blocks.append(block.get('text', ''))
42
+ elif block_type == 'thinking':
43
+ # Skip thinking blocks - we only want the actual text response
44
+ continue
45
+ elif hasattr(block, 'type'):
46
+ # Handle object format
47
+ if block.type == 'text':
48
+ text_blocks.append(getattr(block, 'text', ''))
49
+ # Skip thinking blocks
50
+
51
+ return '\n\n'.join(text_blocks) if text_blocks else ""
52
+
53
+ # Handle simple string content
54
+ elif isinstance(content, str):
55
+ return content
56
+
57
+ # Fallback
58
+ return str(content) if content else ""
59
+
60
+
15
61
  def _find_json_bounds(json_string: str) -> Tuple[int, int] | Tuple[None, None]:
16
62
  stack = []
17
63
  json_start = None
@@ -217,17 +263,33 @@ def create_pydantic_model(model_name: str, variables: dict[str, dict]):
217
263
  return create_model(model_name, **fields)
218
264
 
219
265
  def parse_pydantic_type(type_name: str):
220
- """
221
- Helper function to parse type names into Python types.
222
- Extend this function to handle custom types like 'dict' -> Json[Any].
223
- """
224
- type_mapping = {
225
- 'str': str,
226
- 'int': int,
227
- 'float': float,
228
- 'bool': bool,
229
- 'dict': Json[Any], # Map 'dict' to Pydantic's Json type
230
- 'list': list,
231
- 'any': Any
266
+ t = (type_name or "any").strip().lower()
267
+
268
+ base = {
269
+ "str": str,
270
+ "int": int,
271
+ "float": float,
272
+ "bool": bool,
273
+ # "dict" means JSON object
274
+ "dict": dict[str, JsonValue],
275
+ # "list" means array of JSON values (or pick str if you want)
276
+ "list": list[JsonValue],
277
+ # IMPORTANT: don't return bare Any -> it produces {} schema
278
+ "any": JsonValue,
232
279
  }
233
- return type_mapping.get(type_name, Any)
280
+ if t in base:
281
+ return base[t]
282
+
283
+ m = re.fullmatch(r"list\[(.+)\]", t)
284
+ if m:
285
+ return list[parse_pydantic_type(m.group(1))]
286
+
287
+ m = re.fullmatch(r"dict\[(.+?),(.+)\]", t)
288
+ if m:
289
+ k = parse_pydantic_type(m.group(1))
290
+ v = parse_pydantic_type(m.group(2))
291
+ # restrict keys to str for JSON objects
292
+ return dict[str, v] if k is not str else dict[str, v]
293
+
294
+ # fallback: avoid Any
295
+ return JsonValue
@@ -0,0 +1,91 @@
1
+ """
2
+ Skills Registry system for alita_sdk.
3
+
4
+ This package provides a comprehensive skills registry system that supports
5
+ both graph-based and agent-based skills with isolated execution and
6
+ callback support.
7
+
8
+ Key Components:
9
+ - models: Core data models and types
10
+ - discovery: Skill discovery from filesystem
11
+ - registry: Thread-safe registry service
12
+ - executor: Skill execution with isolation
13
+ - callbacks: Event system for execution transparency
14
+
15
+ Usage:
16
+ from alita_sdk.runtime.skills import get_default_registry
17
+
18
+ registry = get_default_registry()
19
+ skills = registry.list()
20
+ skill = registry.get("my_skill")
21
+
22
+ # Execute skill through SkillRouterTool or direct execution
23
+ """
24
+
25
+ from .models import (
26
+ SkillMetadata,
27
+ SkillType,
28
+ SkillSource,
29
+ ExecutionMode,
30
+ SkillStatus,
31
+ SkillEventType,
32
+ ExecutionConfig,
33
+ ResultsConfig,
34
+ SkillInputSchema,
35
+ SkillOutputSchema,
36
+ SkillExecutionResult,
37
+ SkillOutputFile,
38
+ SkillEvent,
39
+ SkillValidationError,
40
+ SkillExecutionError
41
+ )
42
+
43
+ from .discovery import SkillDiscovery
44
+ from .registry import SkillsRegistry, get_default_registry, reset_default_registry
45
+ from .executor import SkillExecutor
46
+ from .input_builder import SkillInputBuilder
47
+ from .callbacks import (
48
+ SkillCallback, CallbackManager, LoggingCallback, ProgressCallback,
49
+ FileCallback, SkillLangChainCallback, CallbackEmitter,
50
+ create_default_callbacks, create_debug_callbacks
51
+ )
52
+
53
+ __all__ = [
54
+ # Core models
55
+ "SkillMetadata",
56
+ "SkillType",
57
+ "SkillSource",
58
+ "ExecutionMode",
59
+ "SkillStatus",
60
+ "SkillEventType",
61
+ "ExecutionConfig",
62
+ "ResultsConfig",
63
+ "SkillInputSchema",
64
+ "SkillOutputSchema",
65
+ "SkillExecutionResult",
66
+ "SkillOutputFile",
67
+ "SkillEvent",
68
+
69
+ # Exceptions
70
+ "SkillValidationError",
71
+ "SkillExecutionError",
72
+
73
+ # Services
74
+ "SkillDiscovery",
75
+ "SkillsRegistry",
76
+ "get_default_registry",
77
+ "reset_default_registry",
78
+ "SkillExecutor",
79
+ "SkillInputBuilder",
80
+
81
+ # Callbacks
82
+ "SkillCallback",
83
+ "CallbackManager",
84
+ "LoggingCallback",
85
+ "ProgressCallback",
86
+ "FileCallback",
87
+ "SkillLangChainCallback",
88
+ "CallbackEmitter",
89
+ "create_default_callbacks",
90
+ "create_debug_callbacks"
91
+ ]