alita-sdk 0.3.528__py3-none-any.whl → 0.3.554__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of alita-sdk might be problematic. Click here for more details.

Files changed (46) hide show
  1. alita_sdk/community/__init__.py +8 -4
  2. alita_sdk/configurations/__init__.py +1 -0
  3. alita_sdk/configurations/openapi.py +111 -0
  4. alita_sdk/runtime/clients/client.py +185 -10
  5. alita_sdk/runtime/langchain/langraph_agent.py +2 -2
  6. alita_sdk/runtime/langchain/utils.py +46 -0
  7. alita_sdk/runtime/skills/__init__.py +91 -0
  8. alita_sdk/runtime/skills/callbacks.py +498 -0
  9. alita_sdk/runtime/skills/discovery.py +540 -0
  10. alita_sdk/runtime/skills/executor.py +610 -0
  11. alita_sdk/runtime/skills/input_builder.py +371 -0
  12. alita_sdk/runtime/skills/models.py +330 -0
  13. alita_sdk/runtime/skills/registry.py +355 -0
  14. alita_sdk/runtime/skills/skill_runner.py +330 -0
  15. alita_sdk/runtime/toolkits/__init__.py +2 -0
  16. alita_sdk/runtime/toolkits/skill_router.py +238 -0
  17. alita_sdk/runtime/toolkits/tools.py +76 -9
  18. alita_sdk/runtime/tools/__init__.py +3 -1
  19. alita_sdk/runtime/tools/artifact.py +70 -21
  20. alita_sdk/runtime/tools/image_generation.py +50 -44
  21. alita_sdk/runtime/tools/llm.py +363 -44
  22. alita_sdk/runtime/tools/loop.py +3 -1
  23. alita_sdk/runtime/tools/loop_output.py +3 -1
  24. alita_sdk/runtime/tools/skill_router.py +776 -0
  25. alita_sdk/runtime/tools/tool.py +3 -1
  26. alita_sdk/runtime/tools/vectorstore.py +7 -2
  27. alita_sdk/runtime/tools/vectorstore_base.py +7 -2
  28. alita_sdk/runtime/utils/AlitaCallback.py +2 -1
  29. alita_sdk/runtime/utils/utils.py +34 -0
  30. alita_sdk/tools/__init__.py +41 -1
  31. alita_sdk/tools/ado/work_item/ado_wrapper.py +33 -2
  32. alita_sdk/tools/base_indexer_toolkit.py +36 -24
  33. alita_sdk/tools/confluence/api_wrapper.py +5 -6
  34. alita_sdk/tools/confluence/loader.py +4 -2
  35. alita_sdk/tools/openapi/__init__.py +280 -120
  36. alita_sdk/tools/openapi/api_wrapper.py +883 -0
  37. alita_sdk/tools/openapi/tool.py +20 -0
  38. alita_sdk/tools/pandas/dataframe/generator/base.py +3 -1
  39. alita_sdk/tools/servicenow/__init__.py +9 -9
  40. alita_sdk/tools/servicenow/api_wrapper.py +1 -1
  41. {alita_sdk-0.3.528.dist-info → alita_sdk-0.3.554.dist-info}/METADATA +2 -2
  42. {alita_sdk-0.3.528.dist-info → alita_sdk-0.3.554.dist-info}/RECORD +46 -33
  43. {alita_sdk-0.3.528.dist-info → alita_sdk-0.3.554.dist-info}/WHEEL +0 -0
  44. {alita_sdk-0.3.528.dist-info → alita_sdk-0.3.554.dist-info}/entry_points.txt +0 -0
  45. {alita_sdk-0.3.528.dist-info → alita_sdk-0.3.554.dist-info}/licenses/LICENSE +0 -0
  46. {alita_sdk-0.3.528.dist-info → alita_sdk-0.3.554.dist-info}/top_level.txt +0 -0
@@ -96,18 +96,22 @@ def get_toolkits():
96
96
  def get_tools(tools_list: list, alita_client, llm) -> list:
97
97
  """Get community tools based on the tools list configuration."""
98
98
  tools = []
99
-
99
+
100
100
  # Tool type to class mapping
101
101
  _tool_mapping = {
102
102
  'analyse_jira': 'AnalyseJira',
103
103
  'analyse_ado': 'AnalyseAdo',
104
- 'analyse_gitlab': 'AnalyseGitLab',
104
+ 'analyse_gitlab': 'AnalyseGitLab',
105
105
  'analyse_github': 'AnalyseGithub',
106
106
  'inventory': 'InventoryToolkit'
107
107
  }
108
-
108
+
109
109
  for tool in tools_list:
110
- tool_type = tool.get('type')
110
+ if isinstance(tool, dict):
111
+ tool_type = tool.get('type')
112
+ else:
113
+ logger.error(f"Community tools received non-dict tool: {tool} (type: {type(tool)})")
114
+ continue
111
115
  class_name = _tool_mapping.get(tool_type)
112
116
 
113
117
  if class_name and class_name in globals():
@@ -53,6 +53,7 @@ _safe_import_configuration('sharepoint', 'sharepoint', 'SharepointConfiguration'
53
53
  _safe_import_configuration('carrier', 'carrier', 'CarrierConfiguration')
54
54
  _safe_import_configuration('report_portal', 'report_portal', 'ReportPortalConfiguration')
55
55
  _safe_import_configuration('testio', 'testio', 'TestIOConfiguration')
56
+ _safe_import_configuration('openapi', 'openapi', 'OpenApiConfiguration')
56
57
 
57
58
  # Log import summary
58
59
  available_count = len(AVAILABLE_CONFIGURATIONS)
@@ -0,0 +1,111 @@
1
+ from typing import Any, Literal, Optional
2
+ from pydantic import BaseModel, ConfigDict, Field, SecretStr, model_validator
3
+
4
+
5
+ class OpenApiConfiguration(BaseModel):
6
+ model_config = ConfigDict(
7
+ extra='allow',
8
+ json_schema_extra={
9
+ "metadata": {
10
+ "label": "OpenAPI",
11
+ "icon_url": "openapi.svg",
12
+ "categories": ["integrations"],
13
+ "type": "openapi",
14
+ "extra_categories": ["api", "openapi", "swagger"],
15
+ "sections": {
16
+ "auth": {
17
+ "required": False,
18
+ "subsections": [
19
+ {
20
+ "name": "API Key",
21
+ "fields": ["api_key", "auth_type", "custom_header_name"],
22
+ },
23
+ {
24
+ "name": "OAuth",
25
+ "fields": [
26
+ "client_id",
27
+ "client_secret",
28
+ "auth_url",
29
+ "token_url",
30
+ "scope",
31
+ "method",
32
+ ],
33
+ },
34
+ ],
35
+ },
36
+ },
37
+ "section": "credentials",
38
+ }
39
+ }
40
+ )
41
+
42
+ api_key: Optional[SecretStr] = Field(
43
+ default=None,
44
+ description=(
45
+ "API key value (stored as a secret). Used when selecting 'API Key' authentication subsection."
46
+ ),
47
+ )
48
+ auth_type: Optional[Literal['Basic', 'Bearer', 'Custom']] = Field(
49
+ default='Bearer',
50
+ description=(
51
+ "How to apply the API key. "
52
+ "- 'Bearer': sets 'Authorization: Bearer <api_key>' "
53
+ "- 'Basic': sets 'Authorization: Basic <api_key>' "
54
+ "- 'custom': sets '<custom_header_name>: <api_key>'"
55
+ ),
56
+ )
57
+ custom_header_name: Optional[str] = Field(
58
+ default=None,
59
+ description="Custom header name to use when auth_type='custom' (e.g. 'X-Api-Key').",
60
+ json_schema_extra={'visible_when': {'field': 'auth_type', 'value': 'custom'}},
61
+ )
62
+
63
+ client_id: Optional[str] = Field(default=None, description='OAuth client ID')
64
+ client_secret: Optional[SecretStr] = Field(default=None, description='OAuth client secret (stored as a secret)')
65
+ auth_url: Optional[str] = Field(default=None, description='OAuth authorization URL')
66
+ token_url: Optional[str] = Field(default=None, description='OAuth token URL')
67
+ scope: Optional[str] = Field(default=None, description='OAuth scope(s)')
68
+ method: Optional[Literal['default', 'Basic']] = Field(
69
+ default='default',
70
+ description=(
71
+ "Token exchange method. 'default' uses standard POST body; 'Basic' uses a Basic authorization header."
72
+ ),
73
+ )
74
+
75
+ @model_validator(mode='before')
76
+ @classmethod
77
+ def _validate_auth_consistency(cls, values):
78
+ if not isinstance(values, dict):
79
+ return values
80
+
81
+ # OAuth: if any OAuth field is provided, require the key ones.
82
+ has_any_oauth = any(
83
+ (values.get('client_id'), values.get('client_secret'), values.get('auth_url'), values.get('token_url'))
84
+ )
85
+ if has_any_oauth:
86
+ missing = []
87
+ if not values.get('client_id'):
88
+ missing.append('client_id')
89
+ if not values.get('client_secret'):
90
+ missing.append('client_secret')
91
+ if not values.get('token_url'):
92
+ missing.append('token_url')
93
+ if missing:
94
+ raise ValueError(f"OAuth is misconfigured; missing: {', '.join(missing)}")
95
+
96
+ # API key: if auth_type is custom, custom_header_name must be present.
97
+ auth_type = values.get('auth_type')
98
+ if isinstance(auth_type, str) and auth_type.strip().lower() == 'custom' and values.get('api_key'):
99
+ if not values.get('custom_header_name'):
100
+ raise ValueError("custom_header_name is required when auth_type='custom'")
101
+
102
+ return values
103
+
104
+ @staticmethod
105
+ def check_connection(settings: dict) -> str | None:
106
+ """Best-effort validation for OpenAPI credentials.
107
+
108
+ This model is intended to store reusable credentials only.
109
+ Spec/base_url validation happens at toolkit configuration level.
110
+ """
111
+ return None
@@ -44,6 +44,7 @@ class AlitaClient:
44
44
  self.base_url = base_url.rstrip('/')
45
45
  self.api_path = '/api/v1'
46
46
  self.llm_path = '/llm/v1'
47
+ self.allm_path = '/llm'
47
48
  self.project_id = project_id
48
49
  self.auth_token = auth_token
49
50
  self.headers = {
@@ -75,6 +76,10 @@ class AlitaClient:
75
76
  self.configurations: list = configurations or []
76
77
  self.model_timeout = kwargs.get('model_timeout', 120)
77
78
  self.model_image_generation = kwargs.get('model_image_generation')
79
+
80
+ # Cache for generated images to avoid token consumption
81
+ # This is used by image_generation and artifact toolkits to pass data via reference
82
+ self._generated_images_cache: Dict[str, Dict[str, Any]] = {}
78
83
 
79
84
  def get_mcp_toolkits(self):
80
85
  if user_id := self._get_real_user_id():
@@ -254,27 +259,43 @@ class AlitaClient:
254
259
  use_responses_api = True
255
260
  break
256
261
 
257
- # handle case when max_tokens are auto-configurable == -1
262
+ # handle case when max_tokens are auto-configurable == -1 or None
258
263
  llm_max_tokens = model_config.get("max_tokens", None)
259
- if llm_max_tokens and llm_max_tokens == -1:
260
- logger.warning(f'User selected `MAX COMPLETION TOKENS` as `auto`')
261
- # default nuber for a case when auto is selected for an agent
264
+ if llm_max_tokens is None or llm_max_tokens == -1:
265
+ logger.warning(f'User selected `MAX COMPLETION TOKENS` as `auto` or value is None/missing')
266
+ # default number for a case when auto is selected for an agent
262
267
  llm_max_tokens = 4000
263
268
 
264
269
  if is_anthropic:
265
270
  # ChatAnthropic configuration
271
+ # Anthropic requires max_tokens to be an integer, never None
266
272
  target_kwargs = {
267
- "base_url": f"{self.base_url}{self.llm_path}",
273
+ "base_url": f"{self.base_url}{self.allm_path}",
268
274
  "model": model_name,
269
275
  "api_key": self.auth_token,
270
276
  "streaming": model_config.get("streaming", True),
271
- "max_tokens": llm_max_tokens,
272
- "effort": model_config.get("reasoning_effort"),
277
+ "max_tokens": llm_max_tokens, # Always an integer now
273
278
  "temperature": model_config.get("temperature"),
274
279
  "max_retries": model_config.get("max_retries", 3),
275
- "default_headers": {"openai-organization": str(self.project_id)},
280
+ "default_headers": {"openai-organization": str(self.project_id),
281
+ "Authorization": f"Bearer {self.auth_token}"},
276
282
  }
277
-
283
+
284
+ # TODO": Check on ChatAnthropic client when they get "effort" support back
285
+ if model_config.get("reasoning_effort"):
286
+ if model_config["reasoning_effort"].lower() == "low":
287
+ target_kwargs['thinking'] = {"type": "enabled", "budget_tokens": 2048}
288
+ target_kwargs['temperature'] = 1
289
+ target_kwargs["max_tokens"] = 2048 + target_kwargs["max_tokens"]
290
+ elif model_config["reasoning_effort"].lower() == "medium":
291
+ target_kwargs['thinking'] = {"type": "enabled", "budget_tokens": 4096}
292
+ target_kwargs['temperature'] = 1
293
+ target_kwargs["max_tokens"] = 4096 + target_kwargs["max_tokens"]
294
+ elif model_config["reasoning_effort"].lower() == "high":
295
+ target_kwargs['thinking'] = {"type": "enabled", "budget_tokens": 9092}
296
+ target_kwargs['temperature'] = 1
297
+ target_kwargs["max_tokens"] = 9092 + target_kwargs["max_tokens"]
298
+
278
299
  # Add http_client if provided
279
300
  if "http_client" in model_config:
280
301
  target_kwargs["http_client"] = model_config["http_client"]
@@ -300,7 +321,6 @@ class AlitaClient:
300
321
  target_kwargs["use_responses_api"] = True
301
322
 
302
323
  llm = ChatOpenAI(**target_kwargs)
303
-
304
324
  return llm
305
325
 
306
326
  def generate_image(self,
@@ -1134,3 +1154,158 @@ class AlitaClient:
1134
1154
  "events_dispatched": [],
1135
1155
  "execution_time_seconds": 0.0
1136
1156
  }
1157
+
1158
+ def test_mcp_connection(self, toolkit_config: dict, mcp_tokens: dict = None) -> dict:
1159
+ """
1160
+ Test MCP server connection using protocol-level list_tools.
1161
+
1162
+ This method verifies MCP server connectivity and authentication by calling
1163
+ the protocol-level tools/list JSON-RPC method (NOT executing a tool).
1164
+ This is ideal for auth checks as it validates the connection without
1165
+ requiring any tool execution.
1166
+
1167
+ Args:
1168
+ toolkit_config: Configuration dictionary for the MCP toolkit containing:
1169
+ - toolkit_name: Name of the toolkit
1170
+ - settings: Dictionary with 'url', optional 'headers', 'session_id'
1171
+ mcp_tokens: Optional dictionary of MCP OAuth tokens by server URL
1172
+ Format: {canonical_url: {access_token: str, session_id: str}}
1173
+
1174
+ Returns:
1175
+ Dictionary containing:
1176
+ - success: Boolean indicating if the connection was successful
1177
+ - tools: List of tool names available on the MCP server (if successful)
1178
+ - tools_count: Number of tools discovered
1179
+ - server_session_id: Session ID provided by the server (if any)
1180
+ - error: Error message (if unsuccessful)
1181
+ - toolkit_config: Original toolkit configuration
1182
+
1183
+ Raises:
1184
+ McpAuthorizationRequired: If MCP server requires OAuth authorization
1185
+
1186
+ Example:
1187
+ >>> config = {
1188
+ ... 'toolkit_name': 'my-mcp-server',
1189
+ ... 'type': 'mcp',
1190
+ ... 'settings': {
1191
+ ... 'url': 'https://mcp-server.example.com/mcp',
1192
+ ... 'headers': {'X-Custom': 'value'}
1193
+ ... }
1194
+ ... }
1195
+ >>> result = client.test_mcp_connection(config)
1196
+ >>> if result['success']:
1197
+ ... print(f"Connected! Found {result['tools_count']} tools")
1198
+ """
1199
+ import asyncio
1200
+ import time
1201
+ from ..utils.mcp_client import McpClient
1202
+ from ..utils.mcp_oauth import canonical_resource
1203
+
1204
+ toolkit_name = toolkit_config.get('toolkit_name', 'unknown')
1205
+ settings = toolkit_config.get('settings', {})
1206
+
1207
+ # Extract connection parameters
1208
+ url = settings.get('url')
1209
+ if not url:
1210
+ return {
1211
+ "success": False,
1212
+ "error": "MCP toolkit configuration missing 'url' in settings",
1213
+ "toolkit_config": toolkit_config,
1214
+ "tools": [],
1215
+ "tools_count": 0
1216
+ }
1217
+
1218
+ headers = settings.get('headers') or {}
1219
+ session_id = settings.get('session_id')
1220
+
1221
+ # Apply OAuth token if available
1222
+ if mcp_tokens and url:
1223
+ canonical_url = canonical_resource(url)
1224
+ token_data = mcp_tokens.get(canonical_url)
1225
+ if token_data:
1226
+ if isinstance(token_data, dict):
1227
+ access_token = token_data.get('access_token')
1228
+ if not session_id:
1229
+ session_id = token_data.get('session_id')
1230
+ else:
1231
+ # Backward compatibility: plain token string
1232
+ access_token = token_data
1233
+
1234
+ if access_token:
1235
+ headers = dict(headers) # Copy to avoid mutating original
1236
+ headers.setdefault('Authorization', f'Bearer {access_token}')
1237
+ logger.info(f"[MCP Auth Check] Applied OAuth token for {canonical_url}")
1238
+
1239
+ logger.info(f"Testing MCP connection to '{toolkit_name}' at {url}")
1240
+
1241
+ start_time = time.time()
1242
+
1243
+ async def _test_connection():
1244
+ client = McpClient(
1245
+ url=url,
1246
+ session_id=session_id,
1247
+ headers=headers,
1248
+ timeout=60 # Reasonable timeout for connection test
1249
+ )
1250
+
1251
+ async with client:
1252
+ # Initialize MCP protocol session
1253
+ await client.initialize()
1254
+ logger.info(f"[MCP Auth Check] Session initialized (transport={client.detected_transport})")
1255
+
1256
+ # Call protocol-level list_tools (tools/list JSON-RPC method)
1257
+ tools = await client.list_tools()
1258
+
1259
+ return {
1260
+ "tools": tools,
1261
+ "server_session_id": client.server_session_id,
1262
+ "transport": client.detected_transport
1263
+ }
1264
+
1265
+ try:
1266
+ # Run async operation
1267
+ try:
1268
+ loop = asyncio.get_event_loop()
1269
+ if loop.is_running():
1270
+ # If we're already in an async context, create a new task
1271
+ import concurrent.futures
1272
+ with concurrent.futures.ThreadPoolExecutor() as executor:
1273
+ future = executor.submit(asyncio.run, _test_connection())
1274
+ result = future.result(timeout=120)
1275
+ else:
1276
+ result = loop.run_until_complete(_test_connection())
1277
+ except RuntimeError:
1278
+ # No event loop, create one
1279
+ result = asyncio.run(_test_connection())
1280
+
1281
+ execution_time = time.time() - start_time
1282
+
1283
+ # Extract tool names for the response
1284
+ tool_names = [tool.get('name', 'unknown') for tool in result.get('tools', [])]
1285
+
1286
+ logger.info(f"[MCP Auth Check] Connection successful to '{toolkit_name}': {len(tool_names)} tools in {execution_time:.3f}s")
1287
+
1288
+ return {
1289
+ "success": True,
1290
+ "tools": tool_names,
1291
+ "tools_count": len(tool_names),
1292
+ "server_session_id": result.get('server_session_id'),
1293
+ "transport": result.get('transport'),
1294
+ "toolkit_config": toolkit_config,
1295
+ "execution_time_seconds": execution_time
1296
+ }
1297
+
1298
+ except McpAuthorizationRequired:
1299
+ # Re-raise to allow proper handling upstream
1300
+ raise
1301
+ except Exception as e:
1302
+ execution_time = time.time() - start_time
1303
+ logger.error(f"[MCP Auth Check] Connection failed to '{toolkit_name}': {str(e)}")
1304
+ return {
1305
+ "success": False,
1306
+ "error": f"MCP connection failed: {str(e)}",
1307
+ "toolkit_config": toolkit_config,
1308
+ "tools": [],
1309
+ "tools_count": 0,
1310
+ "execution_time_seconds": execution_time
1311
+ }
@@ -967,7 +967,7 @@ class LangGraphAgentRunnable(CompiledStateGraph):
967
967
  (msg.content for msg in reversed(messages)
968
968
  if not isinstance(msg, HumanMessage)),
969
969
  messages[-1].content
970
- )
970
+ ) if messages else result.get('output')
971
971
  elif printer_output is not None:
972
972
  # Printer node has output (interrupted state)
973
973
  output = printer_output
@@ -981,7 +981,7 @@ class LangGraphAgentRunnable(CompiledStateGraph):
981
981
  )
982
982
  except Exception:
983
983
  # Fallback: try to get last value or last message
984
- output = list(result.values())[-1] if result else None
984
+ output = str(list(result.values())[-1]) if result else 'Output is undefined'
985
985
  config_state = self.get_state(config)
986
986
  is_execution_finished = not config_state.next
987
987
  if is_execution_finished:
@@ -12,6 +12,52 @@ from ...runtime.langchain.constants import ELITEA_RS, PRINTER_NODE_RS
12
12
  logger = logging.getLogger(__name__)
13
13
 
14
14
 
15
+ def extract_text_from_completion(completion) -> str:
16
+ """Extract text content from LLM completion, handling both string and list formats.
17
+
18
+ For thinking-enabled models (like Claude with extended thinking), completion.content
19
+ can be a list of content blocks. This function extracts only the text blocks and
20
+ concatenates them into a single string.
21
+
22
+ Args:
23
+ completion: LLM completion object with content attribute
24
+
25
+ Returns:
26
+ str: Extracted text content (never a list)
27
+ """
28
+ if not hasattr(completion, 'content'):
29
+ return ""
30
+
31
+ content = completion.content
32
+
33
+ # Handle list of content blocks (Anthropic extended thinking format)
34
+ if isinstance(content, list):
35
+ text_blocks = []
36
+
37
+ for block in content:
38
+ if isinstance(block, dict):
39
+ block_type = block.get('type', '')
40
+ if block_type == 'text':
41
+ text_blocks.append(block.get('text', ''))
42
+ elif block_type == 'thinking':
43
+ # Skip thinking blocks - we only want the actual text response
44
+ continue
45
+ elif hasattr(block, 'type'):
46
+ # Handle object format
47
+ if block.type == 'text':
48
+ text_blocks.append(getattr(block, 'text', ''))
49
+ # Skip thinking blocks
50
+
51
+ return '\n\n'.join(text_blocks) if text_blocks else ""
52
+
53
+ # Handle simple string content
54
+ elif isinstance(content, str):
55
+ return content
56
+
57
+ # Fallback
58
+ return str(content) if content else ""
59
+
60
+
15
61
  def _find_json_bounds(json_string: str) -> Tuple[int, int] | Tuple[None, None]:
16
62
  stack = []
17
63
  json_start = None
@@ -0,0 +1,91 @@
1
+ """
2
+ Skills Registry system for alita_sdk.
3
+
4
+ This package provides a comprehensive skills registry system that supports
5
+ both graph-based and agent-based skills with isolated execution and
6
+ callback support.
7
+
8
+ Key Components:
9
+ - models: Core data models and types
10
+ - discovery: Skill discovery from filesystem
11
+ - registry: Thread-safe registry service
12
+ - executor: Skill execution with isolation
13
+ - callbacks: Event system for execution transparency
14
+
15
+ Usage:
16
+ from alita_sdk.runtime.skills import get_default_registry
17
+
18
+ registry = get_default_registry()
19
+ skills = registry.list()
20
+ skill = registry.get("my_skill")
21
+
22
+ # Execute skill through SkillRouterTool or direct execution
23
+ """
24
+
25
+ from .models import (
26
+ SkillMetadata,
27
+ SkillType,
28
+ SkillSource,
29
+ ExecutionMode,
30
+ SkillStatus,
31
+ SkillEventType,
32
+ ExecutionConfig,
33
+ ResultsConfig,
34
+ SkillInputSchema,
35
+ SkillOutputSchema,
36
+ SkillExecutionResult,
37
+ SkillOutputFile,
38
+ SkillEvent,
39
+ SkillValidationError,
40
+ SkillExecutionError
41
+ )
42
+
43
+ from .discovery import SkillDiscovery
44
+ from .registry import SkillsRegistry, get_default_registry, reset_default_registry
45
+ from .executor import SkillExecutor
46
+ from .input_builder import SkillInputBuilder
47
+ from .callbacks import (
48
+ SkillCallback, CallbackManager, LoggingCallback, ProgressCallback,
49
+ FileCallback, SkillLangChainCallback, CallbackEmitter,
50
+ create_default_callbacks, create_debug_callbacks
51
+ )
52
+
53
+ __all__ = [
54
+ # Core models
55
+ "SkillMetadata",
56
+ "SkillType",
57
+ "SkillSource",
58
+ "ExecutionMode",
59
+ "SkillStatus",
60
+ "SkillEventType",
61
+ "ExecutionConfig",
62
+ "ResultsConfig",
63
+ "SkillInputSchema",
64
+ "SkillOutputSchema",
65
+ "SkillExecutionResult",
66
+ "SkillOutputFile",
67
+ "SkillEvent",
68
+
69
+ # Exceptions
70
+ "SkillValidationError",
71
+ "SkillExecutionError",
72
+
73
+ # Services
74
+ "SkillDiscovery",
75
+ "SkillsRegistry",
76
+ "get_default_registry",
77
+ "reset_default_registry",
78
+ "SkillExecutor",
79
+ "SkillInputBuilder",
80
+
81
+ # Callbacks
82
+ "SkillCallback",
83
+ "CallbackManager",
84
+ "LoggingCallback",
85
+ "ProgressCallback",
86
+ "FileCallback",
87
+ "SkillLangChainCallback",
88
+ "CallbackEmitter",
89
+ "create_default_callbacks",
90
+ "create_debug_callbacks"
91
+ ]