kubiya-control-plane-api 0.1.0__py3-none-any.whl → 0.3.4__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of kubiya-control-plane-api might be problematic. Click here for more details.

Files changed (185) hide show
  1. control_plane_api/README.md +266 -0
  2. control_plane_api/__init__.py +0 -0
  3. control_plane_api/__version__.py +1 -0
  4. control_plane_api/alembic/README +1 -0
  5. control_plane_api/alembic/env.py +98 -0
  6. control_plane_api/alembic/script.py.mako +28 -0
  7. control_plane_api/alembic/versions/1382bec74309_initial_migration_with_all_models.py +251 -0
  8. control_plane_api/alembic/versions/1f54bc2a37e3_add_analytics_tables.py +162 -0
  9. control_plane_api/alembic/versions/2e4cb136dc10_rename_toolset_ids_to_skill_ids_in_teams.py +30 -0
  10. control_plane_api/alembic/versions/31cd69a644ce_add_skill_templates_table.py +28 -0
  11. control_plane_api/alembic/versions/89e127caa47d_add_jobs_and_job_executions_tables.py +161 -0
  12. control_plane_api/alembic/versions/add_llm_models_table.py +51 -0
  13. control_plane_api/alembic/versions/b0e10697f212_add_runtime_column_to_teams_simple.py +42 -0
  14. control_plane_api/alembic/versions/ce43b24b63bf_add_execution_trigger_source_and_fix_.py +155 -0
  15. control_plane_api/alembic/versions/d4eaf16e3f8d_rename_toolsets_to_skills.py +84 -0
  16. control_plane_api/alembic/versions/efa2dc427da1_rename_metadata_to_custom_metadata.py +32 -0
  17. control_plane_api/alembic/versions/f973b431d1ce_add_workflow_executor_to_skill_types.py +44 -0
  18. control_plane_api/alembic.ini +148 -0
  19. control_plane_api/api/index.py +12 -0
  20. control_plane_api/app/__init__.py +11 -0
  21. control_plane_api/app/activities/__init__.py +20 -0
  22. control_plane_api/app/activities/agent_activities.py +379 -0
  23. control_plane_api/app/activities/team_activities.py +410 -0
  24. control_plane_api/app/activities/temporal_cloud_activities.py +577 -0
  25. control_plane_api/app/config/__init__.py +35 -0
  26. control_plane_api/app/config/api_config.py +354 -0
  27. control_plane_api/app/config/model_pricing.py +318 -0
  28. control_plane_api/app/config.py +95 -0
  29. control_plane_api/app/database.py +135 -0
  30. control_plane_api/app/exceptions.py +408 -0
  31. control_plane_api/app/lib/__init__.py +11 -0
  32. control_plane_api/app/lib/job_executor.py +312 -0
  33. control_plane_api/app/lib/kubiya_client.py +235 -0
  34. control_plane_api/app/lib/litellm_pricing.py +166 -0
  35. control_plane_api/app/lib/planning_tools/__init__.py +22 -0
  36. control_plane_api/app/lib/planning_tools/agents.py +155 -0
  37. control_plane_api/app/lib/planning_tools/base.py +189 -0
  38. control_plane_api/app/lib/planning_tools/environments.py +214 -0
  39. control_plane_api/app/lib/planning_tools/resources.py +240 -0
  40. control_plane_api/app/lib/planning_tools/teams.py +198 -0
  41. control_plane_api/app/lib/policy_enforcer_client.py +939 -0
  42. control_plane_api/app/lib/redis_client.py +436 -0
  43. control_plane_api/app/lib/supabase.py +71 -0
  44. control_plane_api/app/lib/temporal_client.py +138 -0
  45. control_plane_api/app/lib/validation/__init__.py +20 -0
  46. control_plane_api/app/lib/validation/runtime_validation.py +287 -0
  47. control_plane_api/app/main.py +128 -0
  48. control_plane_api/app/middleware/__init__.py +8 -0
  49. control_plane_api/app/middleware/auth.py +513 -0
  50. control_plane_api/app/middleware/exception_handler.py +267 -0
  51. control_plane_api/app/middleware/rate_limiting.py +384 -0
  52. control_plane_api/app/middleware/request_id.py +202 -0
  53. control_plane_api/app/models/__init__.py +27 -0
  54. control_plane_api/app/models/agent.py +79 -0
  55. control_plane_api/app/models/analytics.py +206 -0
  56. control_plane_api/app/models/associations.py +81 -0
  57. control_plane_api/app/models/environment.py +63 -0
  58. control_plane_api/app/models/execution.py +93 -0
  59. control_plane_api/app/models/job.py +179 -0
  60. control_plane_api/app/models/llm_model.py +75 -0
  61. control_plane_api/app/models/presence.py +49 -0
  62. control_plane_api/app/models/project.py +47 -0
  63. control_plane_api/app/models/session.py +38 -0
  64. control_plane_api/app/models/team.py +66 -0
  65. control_plane_api/app/models/workflow.py +55 -0
  66. control_plane_api/app/policies/README.md +121 -0
  67. control_plane_api/app/policies/approved_users.rego +62 -0
  68. control_plane_api/app/policies/business_hours.rego +51 -0
  69. control_plane_api/app/policies/rate_limiting.rego +100 -0
  70. control_plane_api/app/policies/tool_restrictions.rego +86 -0
  71. control_plane_api/app/routers/__init__.py +4 -0
  72. control_plane_api/app/routers/agents.py +364 -0
  73. control_plane_api/app/routers/agents_v2.py +1260 -0
  74. control_plane_api/app/routers/analytics.py +1014 -0
  75. control_plane_api/app/routers/context_manager.py +562 -0
  76. control_plane_api/app/routers/environment_context.py +270 -0
  77. control_plane_api/app/routers/environments.py +715 -0
  78. control_plane_api/app/routers/execution_environment.py +517 -0
  79. control_plane_api/app/routers/executions.py +1911 -0
  80. control_plane_api/app/routers/health.py +92 -0
  81. control_plane_api/app/routers/health_v2.py +326 -0
  82. control_plane_api/app/routers/integrations.py +274 -0
  83. control_plane_api/app/routers/jobs.py +1344 -0
  84. control_plane_api/app/routers/models.py +82 -0
  85. control_plane_api/app/routers/models_v2.py +361 -0
  86. control_plane_api/app/routers/policies.py +639 -0
  87. control_plane_api/app/routers/presence.py +234 -0
  88. control_plane_api/app/routers/projects.py +902 -0
  89. control_plane_api/app/routers/runners.py +379 -0
  90. control_plane_api/app/routers/runtimes.py +172 -0
  91. control_plane_api/app/routers/secrets.py +155 -0
  92. control_plane_api/app/routers/skills.py +1001 -0
  93. control_plane_api/app/routers/skills_definitions.py +140 -0
  94. control_plane_api/app/routers/task_planning.py +1256 -0
  95. control_plane_api/app/routers/task_queues.py +654 -0
  96. control_plane_api/app/routers/team_context.py +270 -0
  97. control_plane_api/app/routers/teams.py +1400 -0
  98. control_plane_api/app/routers/worker_queues.py +1545 -0
  99. control_plane_api/app/routers/workers.py +935 -0
  100. control_plane_api/app/routers/workflows.py +204 -0
  101. control_plane_api/app/runtimes/__init__.py +6 -0
  102. control_plane_api/app/runtimes/validation.py +344 -0
  103. control_plane_api/app/schemas/job_schemas.py +295 -0
  104. control_plane_api/app/services/__init__.py +1 -0
  105. control_plane_api/app/services/agno_service.py +619 -0
  106. control_plane_api/app/services/litellm_service.py +190 -0
  107. control_plane_api/app/services/policy_service.py +525 -0
  108. control_plane_api/app/services/temporal_cloud_provisioning.py +150 -0
  109. control_plane_api/app/skills/__init__.py +44 -0
  110. control_plane_api/app/skills/base.py +229 -0
  111. control_plane_api/app/skills/business_intelligence.py +189 -0
  112. control_plane_api/app/skills/data_visualization.py +154 -0
  113. control_plane_api/app/skills/docker.py +104 -0
  114. control_plane_api/app/skills/file_generation.py +94 -0
  115. control_plane_api/app/skills/file_system.py +110 -0
  116. control_plane_api/app/skills/python.py +92 -0
  117. control_plane_api/app/skills/registry.py +65 -0
  118. control_plane_api/app/skills/shell.py +102 -0
  119. control_plane_api/app/skills/workflow_executor.py +469 -0
  120. control_plane_api/app/utils/workflow_executor.py +354 -0
  121. control_plane_api/app/workflows/__init__.py +11 -0
  122. control_plane_api/app/workflows/agent_execution.py +507 -0
  123. control_plane_api/app/workflows/agent_execution_with_skills.py +222 -0
  124. control_plane_api/app/workflows/namespace_provisioning.py +326 -0
  125. control_plane_api/app/workflows/team_execution.py +399 -0
  126. control_plane_api/scripts/seed_models.py +239 -0
  127. control_plane_api/worker/__init__.py +0 -0
  128. control_plane_api/worker/activities/__init__.py +0 -0
  129. control_plane_api/worker/activities/agent_activities.py +1241 -0
  130. control_plane_api/worker/activities/approval_activities.py +234 -0
  131. control_plane_api/worker/activities/runtime_activities.py +388 -0
  132. control_plane_api/worker/activities/skill_activities.py +267 -0
  133. control_plane_api/worker/activities/team_activities.py +1217 -0
  134. control_plane_api/worker/config/__init__.py +31 -0
  135. control_plane_api/worker/config/worker_config.py +275 -0
  136. control_plane_api/worker/control_plane_client.py +529 -0
  137. control_plane_api/worker/examples/analytics_integration_example.py +362 -0
  138. control_plane_api/worker/models/__init__.py +1 -0
  139. control_plane_api/worker/models/inputs.py +89 -0
  140. control_plane_api/worker/runtimes/__init__.py +31 -0
  141. control_plane_api/worker/runtimes/base.py +789 -0
  142. control_plane_api/worker/runtimes/claude_code_runtime.py +1443 -0
  143. control_plane_api/worker/runtimes/default_runtime.py +617 -0
  144. control_plane_api/worker/runtimes/factory.py +173 -0
  145. control_plane_api/worker/runtimes/validation.py +93 -0
  146. control_plane_api/worker/services/__init__.py +1 -0
  147. control_plane_api/worker/services/agent_executor.py +422 -0
  148. control_plane_api/worker/services/agent_executor_v2.py +383 -0
  149. control_plane_api/worker/services/analytics_collector.py +457 -0
  150. control_plane_api/worker/services/analytics_service.py +464 -0
  151. control_plane_api/worker/services/approval_tools.py +310 -0
  152. control_plane_api/worker/services/approval_tools_agno.py +207 -0
  153. control_plane_api/worker/services/cancellation_manager.py +177 -0
  154. control_plane_api/worker/services/data_visualization.py +827 -0
  155. control_plane_api/worker/services/jira_tools.py +257 -0
  156. control_plane_api/worker/services/runtime_analytics.py +328 -0
  157. control_plane_api/worker/services/session_service.py +194 -0
  158. control_plane_api/worker/services/skill_factory.py +175 -0
  159. control_plane_api/worker/services/team_executor.py +574 -0
  160. control_plane_api/worker/services/team_executor_v2.py +465 -0
  161. control_plane_api/worker/services/workflow_executor_tools.py +1418 -0
  162. control_plane_api/worker/tests/__init__.py +1 -0
  163. control_plane_api/worker/tests/e2e/__init__.py +0 -0
  164. control_plane_api/worker/tests/e2e/test_execution_flow.py +571 -0
  165. control_plane_api/worker/tests/integration/__init__.py +0 -0
  166. control_plane_api/worker/tests/integration/test_control_plane_integration.py +308 -0
  167. control_plane_api/worker/tests/unit/__init__.py +0 -0
  168. control_plane_api/worker/tests/unit/test_control_plane_client.py +401 -0
  169. control_plane_api/worker/utils/__init__.py +1 -0
  170. control_plane_api/worker/utils/chunk_batcher.py +305 -0
  171. control_plane_api/worker/utils/retry_utils.py +60 -0
  172. control_plane_api/worker/utils/streaming_utils.py +373 -0
  173. control_plane_api/worker/worker.py +753 -0
  174. control_plane_api/worker/workflows/__init__.py +0 -0
  175. control_plane_api/worker/workflows/agent_execution.py +589 -0
  176. control_plane_api/worker/workflows/team_execution.py +429 -0
  177. kubiya_control_plane_api-0.3.4.dist-info/METADATA +229 -0
  178. kubiya_control_plane_api-0.3.4.dist-info/RECORD +182 -0
  179. kubiya_control_plane_api-0.3.4.dist-info/entry_points.txt +2 -0
  180. kubiya_control_plane_api-0.3.4.dist-info/top_level.txt +1 -0
  181. kubiya_control_plane_api-0.1.0.dist-info/METADATA +0 -66
  182. kubiya_control_plane_api-0.1.0.dist-info/RECORD +0 -5
  183. kubiya_control_plane_api-0.1.0.dist-info/top_level.txt +0 -1
  184. {kubiya_control_plane_api-0.1.0.dist-info/licenses → control_plane_api}/LICENSE +0 -0
  185. {kubiya_control_plane_api-0.1.0.dist-info → kubiya_control_plane_api-0.3.4.dist-info}/WHEEL +0 -0
@@ -0,0 +1,1418 @@
1
+ """
2
+ Workflow Executor Tools for Agent Control Plane Worker
3
+
4
+ This module provides tools for agents to execute workflows defined via
5
+ JSON or Python DSL. Agents can call these tools to run multi-step workflows
6
+ with parameter injection and streaming execution.
7
+
8
+ Workflows execute remotely on specified runners using the Kubiya SDK.
9
+ """
10
+
11
+ import json
12
+ import logging
13
+ import asyncio
14
+ import os
15
+ from typing import Optional, Callable, Dict, Any, List
16
+ from agno.tools import Toolkit
17
+
18
+ logger = logging.getLogger(__name__)
19
+
20
+
21
+ class WorkflowExecutorTools(Toolkit):
22
+ """
23
+ Workflow Executor toolkit for running workflows from agents.
24
+
25
+ Agents can use these tools to:
26
+ - Execute JSON-defined workflows with parameters
27
+ - Run Python DSL workflows
28
+ - Stream workflow execution events
29
+ - Get workflow execution status
30
+ """
31
+
32
+ def __init__(
33
+ self,
34
+ name: Optional[str] = None,
35
+ workflows: Optional[List[Dict[str, Any]]] = None,
36
+ validation_enabled: bool = True,
37
+ default_runner: Optional[str] = None,
38
+ timeout: int = 3600,
39
+ default_parameters: Optional[Dict[str, Any]] = None,
40
+ stream_callback: Optional[Callable[[str], None]] = None,
41
+ kubiya_api_key: Optional[str] = None,
42
+ kubiya_api_base: Optional[str] = None,
43
+ # Legacy parameters for backwards compatibility
44
+ workflow_type: Optional[str] = None,
45
+ workflow_definition: Optional[str] = None,
46
+ python_dsl_code: Optional[str] = None,
47
+ ):
48
+ """
49
+ Initialize WorkflowExecutorTools.
50
+
51
+ Args:
52
+ name: Skill instance name (defaults to "workflow_executor")
53
+ workflows: List of workflow definitions. Each workflow becomes a separate tool.
54
+ Format: [{"name": "analyze-logs", "type": "json", "definition": {...}}, ...]
55
+ validation_enabled: Enable pre-execution validation
56
+ default_runner: Default runner/environment name
57
+ timeout: Maximum execution timeout in seconds
58
+ default_parameters: Default parameter values to use for all workflows
59
+ stream_callback: Optional callback for streaming output
60
+ kubiya_api_key: Kubiya API key (defaults to KUBIYA_API_KEY env var)
61
+ kubiya_api_base: Kubiya API base URL (defaults to KUBIYA_API_BASE env var)
62
+ workflow_type: LEGACY - Type of workflow ("json" or "python_dsl")
63
+ workflow_definition: LEGACY - JSON workflow definition string
64
+ python_dsl_code: LEGACY - Python DSL code string
65
+ """
66
+ super().__init__(name=name or "workflow_executor")
67
+
68
+ self.validation_enabled = validation_enabled
69
+ self.default_runner = default_runner or "default"
70
+ self.timeout = timeout
71
+ self.default_parameters = default_parameters or {}
72
+ self.stream_callback = stream_callback
73
+
74
+ # Get Kubiya API credentials from parameters or environment
75
+ self.kubiya_api_key = kubiya_api_key or os.environ.get("KUBIYA_API_KEY")
76
+ self.kubiya_api_base = kubiya_api_base or os.environ.get("KUBIYA_API_BASE", "https://api.kubiya.ai")
77
+
78
+ if not self.kubiya_api_key:
79
+ logger.warning("No KUBIYA_API_KEY provided - workflow execution will fail")
80
+
81
+ # Get control plane client for publishing events
82
+ try:
83
+ from control_plane_api.worker.control_plane_client import get_control_plane_client
84
+ self.control_plane = get_control_plane_client()
85
+ except Exception as e:
86
+ logger.warning(f"Failed to get control plane client: {e}")
87
+ self.control_plane = None
88
+
89
+ # Initialize Kubiya SDK client for remote execution
90
+ self.kubiya_client = None
91
+ if self.kubiya_api_key:
92
+ try:
93
+ from kubiya import KubiyaClient
94
+
95
+ self.kubiya_client = KubiyaClient(
96
+ api_key=self.kubiya_api_key,
97
+ base_url=self.kubiya_api_base,
98
+ runner=self.default_runner,
99
+ timeout=self.timeout
100
+ )
101
+ logger.info(f"Initialized Kubiya SDK client for remote workflow execution (runner: {self.default_runner})")
102
+ except ImportError as e:
103
+ logger.error(f"Failed to import Kubiya SDK: {e}. Install with: pip install git+https://github.com/kubiyabot/sdk-py.git@main")
104
+ self.kubiya_client = None
105
+
106
+ # Handle legacy single workflow format
107
+ if workflow_definition or python_dsl_code:
108
+ logger.info("Using legacy single-workflow format")
109
+
110
+ legacy_workflow = {
111
+ "name": "default",
112
+ "type": workflow_type or "json",
113
+ }
114
+ if workflow_type == "json" and workflow_definition:
115
+ legacy_workflow["definition"] = workflow_definition
116
+ elif workflow_type == "python_dsl" and python_dsl_code:
117
+ legacy_workflow["code"] = python_dsl_code
118
+
119
+ workflows = [legacy_workflow]
120
+
121
+ # Store legacy attributes for backward compatibility
122
+ self.workflow_type = workflow_type
123
+ self.workflow_definition = workflow_definition
124
+ self.python_dsl_code = python_dsl_code
125
+
126
+ # Parse workflow data for legacy JSON workflows
127
+ if workflow_type == "json" and workflow_definition:
128
+ try:
129
+ self.workflow_data = json.loads(workflow_definition) if isinstance(workflow_definition, str) else workflow_definition
130
+ except Exception as e:
131
+ logger.error(f"Failed to parse legacy workflow definition: {e}")
132
+ self.workflow_data = None
133
+ else:
134
+ self.workflow_data = None
135
+ else:
136
+ # Not using legacy format - no legacy attributes
137
+ self.workflow_type = None
138
+ self.workflow_definition = None
139
+ self.python_dsl_code = None
140
+ self.workflow_data = None
141
+
142
+ # Store workflows collection
143
+ self.workflows = workflows or []
144
+
145
+ # Dynamically register a tool for each workflow
146
+ for workflow in self.workflows:
147
+ self._register_workflow_tool(workflow)
148
+
149
+ # If no workflows registered (empty or legacy format), register default execution tool
150
+ if not self.workflows or len(self.workflows) == 0:
151
+ logger.warning("No workflows configured in WorkflowExecutorTools")
152
+
153
+ # Register helper tools
154
+ self.register(self.list_all_workflows)
155
+ self.register(self.get_workflow_info)
156
+
157
+ def _register_workflow_tool(self, workflow: Dict[str, Any]):
158
+ """
159
+ Dynamically register a tool method for a specific workflow.
160
+
161
+ Creates a method named after the workflow that executes it on the configured runner.
162
+
163
+ Args:
164
+ workflow: Workflow definition dict with name, type, and definition/code
165
+ """
166
+ workflow_name = workflow.get("name", "unknown")
167
+ workflow_type = workflow.get("type", "json")
168
+
169
+ # Use clean workflow name as method name (replace hyphens/spaces with underscores)
170
+ # For "analyze-logs" workflow → method name "analyze_logs"
171
+ # For "default" workflow (legacy) → use the toolkit name
172
+ safe_name = workflow_name.replace("-", "_").replace(" ", "_").lower()
173
+
174
+ # If this is the default workflow, use the skill name
175
+ if workflow_name == "default" and self.name != "workflow_executor":
176
+ method_name = self.name
177
+ else:
178
+ method_name = safe_name
179
+
180
+ # Create a closure that captures the workflow definition
181
+ def workflow_executor(parameters: Optional[Dict[str, Any]] = None) -> str:
182
+ f"""
183
+ Execute the '{workflow_name}' workflow on the configured runner.
184
+
185
+ This workflow executes on the runner specified in the workflow definition
186
+ using the Kubiya SDK. All steps are executed in dependency order.
187
+
188
+ Args:
189
+ parameters: Dictionary of parameters to inject into the workflow.
190
+ Parameters can be referenced in workflow steps using {{{{param_name}}}} syntax.
191
+
192
+ Returns:
193
+ str: Formatted workflow execution results including step outputs and status.
194
+
195
+ Examples:
196
+ # Execute workflow with parameters
197
+ {method_name}(parameters={{"environment": "production", "version": "v1.2.3"}})
198
+ """
199
+ return self._execute_specific_workflow(workflow, parameters)
200
+
201
+ # Set proper docstring
202
+ workflow_executor.__doc__ = f"""
203
+ Execute the '{workflow_name}' workflow on the configured runner.
204
+
205
+ Type: {workflow_type}
206
+ Runner: Specified in workflow definition or default_runner config
207
+
208
+ Args:
209
+ parameters: Optional dictionary of parameters to inject into workflow steps.
210
+ Reference parameters in steps using {{{{param_name}}}} syntax.
211
+
212
+ Returns:
213
+ str: Workflow execution results including all step outputs.
214
+ """
215
+
216
+ # Set method name for proper tool registration
217
+ workflow_executor.__name__ = method_name
218
+
219
+ # Register as a tool
220
+ self.register(workflow_executor)
221
+
222
+ # Also set as attribute on self for direct access
223
+ setattr(self, method_name, workflow_executor)
224
+
225
+ logger.info(f"Registered workflow tool: {method_name} for workflow '{workflow_name}'")
226
+
227
+ def _execute_specific_workflow(
228
+ self,
229
+ workflow: Dict[str, Any],
230
+ parameters: Optional[Dict[str, Any]] = None
231
+ ) -> str:
232
+ """
233
+ Execute a specific workflow from the collection.
234
+
235
+ Args:
236
+ workflow: Workflow definition
237
+ parameters: Execution parameters
238
+
239
+ Returns:
240
+ str: Formatted execution result
241
+ """
242
+ try:
243
+ workflow_name = workflow.get("name", "unknown")
244
+ workflow_type = workflow.get("type", "json")
245
+
246
+ # Merge default parameters with runtime parameters
247
+ # Runtime parameters override defaults
248
+ params = {**self.default_parameters, **(parameters or {})}
249
+
250
+ # Determine runner
251
+ effective_runner = self.default_runner
252
+ if workflow_type == "json":
253
+ workflow_def = workflow.get("definition")
254
+ if isinstance(workflow_def, str):
255
+ workflow_data = json.loads(workflow_def)
256
+ else:
257
+ workflow_data = workflow_def
258
+
259
+ effective_runner = workflow_data.get("runner") or self.default_runner
260
+ else:
261
+ effective_runner = self.default_runner
262
+
263
+ # Stream start message
264
+ if self.stream_callback:
265
+ self.stream_callback(
266
+ f"šŸš€ Starting workflow: {workflow_name}\n"
267
+ f" Type: {workflow_type}\n"
268
+ f" Parameters: {json.dumps(params, indent=2)}\n"
269
+ f" Runner: {effective_runner}\n\n"
270
+ )
271
+
272
+ # Execute based on workflow type
273
+ if workflow_type == "json":
274
+ result = self._execute_json_workflow_specific(workflow, params, effective_runner)
275
+ elif workflow_type == "python_dsl":
276
+ result = self._execute_python_dsl_workflow_specific(workflow, params, effective_runner)
277
+ else:
278
+ raise ValueError(f"Unsupported workflow type: {workflow_type}")
279
+
280
+ # Stream completion message
281
+ if self.stream_callback:
282
+ self.stream_callback(f"\nāœ… Workflow '{workflow_name}' completed successfully\n")
283
+
284
+ return result
285
+
286
+ except Exception as e:
287
+ error_msg = f"āŒ Workflow '{workflow.get('name', 'unknown')}' execution failed: {str(e)}"
288
+ logger.error(error_msg, exc_info=True)
289
+
290
+ if self.stream_callback:
291
+ self.stream_callback(f"\n{error_msg}\n")
292
+
293
+ return error_msg
294
+
295
+ def _execute_json_workflow_specific(
296
+ self,
297
+ workflow: Dict[str, Any],
298
+ parameters: Dict[str, Any],
299
+ runner: str
300
+ ) -> str:
301
+ """Execute a JSON workflow."""
302
+ workflow_def = workflow.get("definition")
303
+ if isinstance(workflow_def, str):
304
+ workflow_data = json.loads(workflow_def)
305
+ else:
306
+ workflow_data = workflow_def
307
+
308
+ if not workflow_data:
309
+ raise ValueError("No workflow definition available")
310
+
311
+ if not self.kubiya_client:
312
+ raise RuntimeError("Kubiya SDK client not initialized")
313
+
314
+ # Ensure runner is set
315
+ workflow_data["runner"] = runner
316
+
317
+ # Remove 'triggers' key if it exists - not needed for direct execution
318
+ # The DAG builder rejects this key when executing workflows directly
319
+ if "triggers" in workflow_data:
320
+ logger.debug(f"Removing 'triggers' key from workflow definition (not needed for execution)")
321
+ workflow_data.pop("triggers")
322
+
323
+ # Execute remotely
324
+ from datetime import datetime
325
+ start_time = datetime.utcnow()
326
+
327
+ if self.stream_callback:
328
+ self.stream_callback(f"ā–¶ļø Submitting to runner '{runner}'...\n\n")
329
+
330
+ # āœ… Enable streaming to capture real-time workflow output
331
+ response = self.kubiya_client.execute_workflow(
332
+ workflow_definition=workflow_data,
333
+ parameters=parameters,
334
+ stream=True
335
+ )
336
+
337
+ # Accumulate streaming results
338
+ accumulated_output = []
339
+ event_count = 0
340
+ step_outputs = {}
341
+ current_step = None
342
+
343
+ # Iterate over streaming results (SDK yields JSON strings)
344
+ for event in response:
345
+ event_count += 1
346
+
347
+ # Skip None/empty events
348
+ if event is None:
349
+ logger.debug(f"ā­ļø Skipping None event #{event_count}")
350
+ continue
351
+
352
+ # Debug: Log raw event with actual content
353
+ event_repr = repr(event)[:500] # Use repr to see exact content
354
+ logger.info(f"šŸ“¦ Received event #{event_count} (type={type(event).__name__}, length={len(str(event)) if event else 0})")
355
+ logger.debug(f" Raw content: {event_repr}")
356
+
357
+ # Parse the event (SDK yields JSON strings or bytes)
358
+ try:
359
+ if isinstance(event, bytes):
360
+ # Decode bytes to string first
361
+ logger.debug(f" šŸ”„ Decoding bytes to string...")
362
+ event = event.decode('utf-8')
363
+ logger.debug(f" āœ… Decoded to string (length={len(event)})")
364
+
365
+ if isinstance(event, str):
366
+ # Skip empty strings
367
+ if not event.strip():
368
+ logger.debug(f" ā­ļø Skipping empty string event")
369
+ continue
370
+
371
+ # Try to parse as JSON
372
+ logger.debug(f" šŸ”„ Parsing JSON string...")
373
+ event_data = json.loads(event)
374
+ logger.debug(f" āœ… Parsed JSON: type={event_data.get('type', 'unknown')}")
375
+ elif isinstance(event, dict):
376
+ # Already a dict
377
+ logger.debug(f" āœ… Already a dict: type={event.get('type', 'unknown')}")
378
+ event_data = event
379
+ else:
380
+ # Unknown type, treat as plain text
381
+ logger.warning(f" āš ļø Unknown event type: {type(event).__name__}, treating as plain text")
382
+ event_str = str(event)
383
+ if event_str.strip(): # Only add non-empty text
384
+ accumulated_output.append(event_str)
385
+ if self.stream_callback:
386
+ self.stream_callback(f"{event_str}\n")
387
+ continue
388
+ except (json.JSONDecodeError, UnicodeDecodeError) as e:
389
+ # If not valid JSON or can't decode, treat as plain text
390
+ logger.warning(f" āš ļø Failed to parse event: {e}, treating as plain text")
391
+ event_str = str(event)
392
+ if event_str.strip(): # Only add non-empty text
393
+ accumulated_output.append(event_str)
394
+ if self.stream_callback:
395
+ self.stream_callback(f"{event_str}\n")
396
+ continue
397
+
398
+ # Extract meaningful content based on event type
399
+ event_type = event_data.get("type", "unknown")
400
+ logger.info(f" šŸŽÆ Event type: {event_type}")
401
+
402
+ # Handle actual Kubiya workflow event types
403
+ if event_type == "step_output":
404
+ # step_output contains the actual workflow output in step.output
405
+ step = event_data.get("step", {})
406
+ step_name = step.get("name", "unknown")
407
+ output = step.get("output", "")
408
+
409
+ if output.strip():
410
+ logger.info(f" šŸ“ Step output: {step_name} - {output[:100]}")
411
+
412
+ # Format for display
413
+ formatted_output = f"```\n{output}\n```\n"
414
+
415
+ # Stream to callback if provided
416
+ if self.stream_callback:
417
+ self.stream_callback(formatted_output)
418
+
419
+ # Publish to control plane for live UI updates
420
+ if self.control_plane:
421
+ try:
422
+ execution_id = os.environ.get("EXECUTION_ID")
423
+ if execution_id:
424
+ self.control_plane.publish_event(
425
+ execution_id=execution_id,
426
+ event_type="workflow_output",
427
+ data={
428
+ "content": formatted_output,
429
+ "step_name": step_name,
430
+ "timestamp": __import__('datetime').datetime.utcnow().isoformat(),
431
+ }
432
+ )
433
+ except Exception as e:
434
+ logger.debug(f"Failed to publish workflow output to control plane: {e}")
435
+
436
+ accumulated_output.append(output)
437
+
438
+ # Track by step
439
+ if step_name not in step_outputs:
440
+ step_outputs[step_name] = []
441
+ step_outputs[step_name].append(output)
442
+
443
+ elif event_type == "step_running":
444
+ # Step is starting
445
+ step = event_data.get("step", {})
446
+ step_name = step.get("name", "unknown")
447
+ current_step = step_name
448
+ formatted = f"\nā–¶ļø Step: {step_name}"
449
+ logger.info(f" ā–¶ļø Starting step: {step_name}")
450
+ accumulated_output.append(formatted)
451
+ if self.stream_callback:
452
+ self.stream_callback(f"{formatted}\n")
453
+
454
+ elif event_type == "step_complete":
455
+ # Step finished
456
+ step = event_data.get("step", {})
457
+ step_name = step.get("name", "unknown")
458
+ status = step.get("status", "unknown")
459
+ icon = "āœ…" if status == "finished" else "āŒ"
460
+ formatted = f"{icon} Step '{step_name}' {status}"
461
+ logger.info(f" {icon} Step completed: {step_name} ({status})")
462
+ accumulated_output.append(formatted)
463
+ current_step = None
464
+ if self.stream_callback:
465
+ self.stream_callback(f"{formatted}\n")
466
+
467
+ elif event_type == "workflow_complete":
468
+ # Workflow finished
469
+ dag_name = event_data.get("dagName", "unknown")
470
+ status = event_data.get("status", "unknown")
471
+ success = event_data.get("success", False)
472
+ icon = "āœ…" if success else "āŒ"
473
+ formatted = f"{icon} Workflow '{dag_name}' {status}"
474
+ logger.info(f" {icon} Workflow completed: {dag_name} ({status}, success={success})")
475
+ accumulated_output.append(formatted)
476
+ if self.stream_callback:
477
+ self.stream_callback(f"{formatted}\n")
478
+
479
+ elif event_type == "log":
480
+ # Legacy log format (keep for backward compatibility)
481
+ message = event_data.get("message", "")
482
+ level = event_data.get("level", "info")
483
+ formatted = f"[{level.upper()}] {message}"
484
+ logger.info(f" šŸ’¬ Log message: {message[:100]}")
485
+ accumulated_output.append(formatted)
486
+ if self.stream_callback:
487
+ self.stream_callback(f"{formatted}\n")
488
+
489
+ elif event_type == "error":
490
+ error_msg = event_data.get("message", str(event_data))
491
+ formatted = f"āŒ Error: {error_msg}"
492
+ logger.error(f" āŒ Workflow error: {error_msg}")
493
+ accumulated_output.append(formatted)
494
+ if self.stream_callback:
495
+ self.stream_callback(f"{formatted}\n")
496
+
497
+ elif event_type == "heartbeat":
498
+ # Skip heartbeat events in output
499
+ logger.debug(f" šŸ’“ Heartbeat (skipping)")
500
+ continue
501
+
502
+ else:
503
+ # For unknown event types, log but don't show to user
504
+ logger.info(f" ā“ Unknown event type: {event_type}")
505
+ logger.debug(f" Raw data: {json.dumps(event_data)[:200]}")
506
+ # Skip unknown events instead of showing raw JSON
507
+ continue
508
+
509
+ end_time = datetime.utcnow()
510
+ duration = (end_time - start_time).total_seconds()
511
+
512
+ # Format complete results for Claude to see
513
+ result_text = f"\n{'='*60}\n"
514
+ result_text += f"Workflow Execution: {workflow_data.get('name', 'unknown')}\n"
515
+ result_text += f"{'='*60}\n\n"
516
+ result_text += f"Status: āœ… Completed\n"
517
+ result_text += f"Duration: {duration:.2f}s\n"
518
+ result_text += f"Runner: {runner}\n"
519
+ result_text += f"Parameters: {json.dumps(parameters, indent=2)}\n"
520
+ result_text += f"\nTotal Events: {event_count}\n"
521
+
522
+ # Include all captured output in the result
523
+ if accumulated_output:
524
+ result_text += f"\n{'='*60}\n"
525
+ result_text += f"Workflow Output:\n"
526
+ result_text += f"{'='*60}\n\n"
527
+ result_text += "\n".join(accumulated_output)
528
+ logger.info(f"āœ… Workflow execution complete: {event_count} events processed, {len(accumulated_output)} output lines accumulated")
529
+ else:
530
+ logger.warning(f"āš ļø No workflow output accumulated (received {event_count} events but none produced output)")
531
+
532
+ logger.debug(f"Final result preview: {result_text[:500]}")
533
+ return result_text
534
+
535
+ def _execute_python_dsl_workflow_specific(
536
+ self,
537
+ workflow: Dict[str, Any],
538
+ parameters: Dict[str, Any],
539
+ runner: str
540
+ ) -> str:
541
+ """Execute a Python DSL workflow."""
542
+ python_code = workflow.get("code")
543
+ if not python_code:
544
+ raise ValueError("No Python DSL code available")
545
+
546
+ if not self.kubiya_client:
547
+ raise RuntimeError("Kubiya SDK client not initialized")
548
+
549
+ workflow_name = workflow.get("name", "python-dsl-workflow")
550
+
551
+ # Create workflow definition for remote execution
552
+ workflow_definition = {
553
+ "name": workflow_name,
554
+ "description": f"Python DSL workflow: {workflow_name}",
555
+ "runner": runner,
556
+ "steps": [
557
+ {
558
+ "name": "execute_python_dsl",
559
+ "description": "Execute Python DSL workflow code",
560
+ "executor": {
561
+ "type": "python_dsl",
562
+ "config": {"code": python_code}
563
+ }
564
+ }
565
+ ]
566
+ }
567
+
568
+ from datetime import datetime
569
+ start_time = datetime.utcnow()
570
+
571
+ if self.stream_callback:
572
+ self.stream_callback(f"ā–¶ļø Submitting to runner '{runner}'...\n\n")
573
+
574
+ # āœ… Enable streaming to capture real-time workflow output
575
+ response = self.kubiya_client.execute_workflow(
576
+ workflow_definition=workflow_definition,
577
+ parameters=parameters,
578
+ stream=True
579
+ )
580
+
581
+ # Accumulate streaming results
582
+ accumulated_output = []
583
+ event_count = 0
584
+ step_outputs = {}
585
+ current_step = None
586
+
587
+ # Iterate over streaming results (SDK yields JSON strings)
588
+ for event in response:
589
+ event_count += 1
590
+
591
+ # Skip None/empty events
592
+ if event is None:
593
+ logger.debug(f"ā­ļø Skipping None event #{event_count}")
594
+ continue
595
+
596
+ # Debug: Log raw event with actual content
597
+ event_repr = repr(event)[:500] # Use repr to see exact content
598
+ logger.info(f"šŸ“¦ Received event #{event_count} (type={type(event).__name__}, length={len(str(event)) if event else 0})")
599
+ logger.debug(f" Raw content: {event_repr}")
600
+
601
+ # Parse the event (SDK yields JSON strings or bytes)
602
+ try:
603
+ if isinstance(event, bytes):
604
+ # Decode bytes to string first
605
+ logger.debug(f" šŸ”„ Decoding bytes to string...")
606
+ event = event.decode('utf-8')
607
+ logger.debug(f" āœ… Decoded to string (length={len(event)})")
608
+
609
+ if isinstance(event, str):
610
+ # Skip empty strings
611
+ if not event.strip():
612
+ logger.debug(f" ā­ļø Skipping empty string event")
613
+ continue
614
+
615
+ # Try to parse as JSON
616
+ logger.debug(f" šŸ”„ Parsing JSON string...")
617
+ event_data = json.loads(event)
618
+ logger.debug(f" āœ… Parsed JSON: type={event_data.get('type', 'unknown')}")
619
+ elif isinstance(event, dict):
620
+ # Already a dict
621
+ logger.debug(f" āœ… Already a dict: type={event.get('type', 'unknown')}")
622
+ event_data = event
623
+ else:
624
+ # Unknown type, treat as plain text
625
+ logger.warning(f" āš ļø Unknown event type: {type(event).__name__}, treating as plain text")
626
+ event_str = str(event)
627
+ if event_str.strip(): # Only add non-empty text
628
+ accumulated_output.append(event_str)
629
+ if self.stream_callback:
630
+ self.stream_callback(f"{event_str}\n")
631
+ continue
632
+ except (json.JSONDecodeError, UnicodeDecodeError) as e:
633
+ # If not valid JSON or can't decode, treat as plain text
634
+ logger.warning(f" āš ļø Failed to parse event: {e}, treating as plain text")
635
+ event_str = str(event)
636
+ if event_str.strip(): # Only add non-empty text
637
+ accumulated_output.append(event_str)
638
+ if self.stream_callback:
639
+ self.stream_callback(f"{event_str}\n")
640
+ continue
641
+
642
+ # Extract meaningful content based on event type
643
+ event_type = event_data.get("type", "unknown")
644
+ logger.info(f" šŸŽÆ Event type: {event_type}")
645
+
646
+ # Handle actual Kubiya workflow event types
647
+ if event_type == "step_output":
648
+ # step_output contains the actual workflow output in step.output
649
+ step = event_data.get("step", {})
650
+ step_name = step.get("name", "unknown")
651
+ output = step.get("output", "")
652
+
653
+ if output.strip():
654
+ logger.info(f" šŸ“ Step output: {step_name} - {output[:100]}")
655
+
656
+ # Format for display
657
+ formatted_output = f"```\n{output}\n```\n"
658
+
659
+ # Stream to callback if provided
660
+ if self.stream_callback:
661
+ self.stream_callback(formatted_output)
662
+
663
+ # Publish to control plane for live UI updates
664
+ if self.control_plane:
665
+ try:
666
+ execution_id = os.environ.get("EXECUTION_ID")
667
+ if execution_id:
668
+ self.control_plane.publish_event(
669
+ execution_id=execution_id,
670
+ event_type="workflow_output",
671
+ data={
672
+ "content": formatted_output,
673
+ "step_name": step_name,
674
+ "timestamp": __import__('datetime').datetime.utcnow().isoformat(),
675
+ }
676
+ )
677
+ except Exception as e:
678
+ logger.debug(f"Failed to publish workflow output to control plane: {e}")
679
+
680
+ accumulated_output.append(output)
681
+
682
+ # Track by step
683
+ if step_name not in step_outputs:
684
+ step_outputs[step_name] = []
685
+ step_outputs[step_name].append(output)
686
+
687
+ elif event_type == "step_running":
688
+ # Step is starting
689
+ step = event_data.get("step", {})
690
+ step_name = step.get("name", "unknown")
691
+ current_step = step_name
692
+ formatted = f"\nā–¶ļø Step: {step_name}"
693
+ logger.info(f" ā–¶ļø Starting step: {step_name}")
694
+ accumulated_output.append(formatted)
695
+ if self.stream_callback:
696
+ self.stream_callback(f"{formatted}\n")
697
+
698
+ elif event_type == "step_complete":
699
+ # Step finished
700
+ step = event_data.get("step", {})
701
+ step_name = step.get("name", "unknown")
702
+ status = step.get("status", "unknown")
703
+ icon = "āœ…" if status == "finished" else "āŒ"
704
+ formatted = f"{icon} Step '{step_name}' {status}"
705
+ logger.info(f" {icon} Step completed: {step_name} ({status})")
706
+ accumulated_output.append(formatted)
707
+ current_step = None
708
+ if self.stream_callback:
709
+ self.stream_callback(f"{formatted}\n")
710
+
711
+ elif event_type == "workflow_complete":
712
+ # Workflow finished
713
+ dag_name = event_data.get("dagName", "unknown")
714
+ status = event_data.get("status", "unknown")
715
+ success = event_data.get("success", False)
716
+ icon = "āœ…" if success else "āŒ"
717
+ formatted = f"{icon} Workflow '{dag_name}' {status}"
718
+ logger.info(f" {icon} Workflow completed: {dag_name} ({status}, success={success})")
719
+ accumulated_output.append(formatted)
720
+ if self.stream_callback:
721
+ self.stream_callback(f"{formatted}\n")
722
+
723
+ elif event_type == "log":
724
+ # Legacy log format (keep for backward compatibility)
725
+ message = event_data.get("message", "")
726
+ level = event_data.get("level", "info")
727
+ formatted = f"[{level.upper()}] {message}"
728
+ logger.info(f" šŸ’¬ Log message: {message[:100]}")
729
+ accumulated_output.append(formatted)
730
+ if self.stream_callback:
731
+ self.stream_callback(f"{formatted}\n")
732
+
733
+ elif event_type == "error":
734
+ error_msg = event_data.get("message", str(event_data))
735
+ formatted = f"āŒ Error: {error_msg}"
736
+ logger.error(f" āŒ Workflow error: {error_msg}")
737
+ accumulated_output.append(formatted)
738
+ if self.stream_callback:
739
+ self.stream_callback(f"{formatted}\n")
740
+
741
+ elif event_type == "heartbeat":
742
+ # Skip heartbeat events in output
743
+ logger.debug(f" šŸ’“ Heartbeat (skipping)")
744
+ continue
745
+
746
+ else:
747
+ # For unknown event types, log but don't show to user
748
+ logger.info(f" ā“ Unknown event type: {event_type}")
749
+ logger.debug(f" Raw data: {json.dumps(event_data)[:200]}")
750
+ # Skip unknown events instead of showing raw JSON
751
+ continue
752
+
753
+ end_time = datetime.utcnow()
754
+ duration = (end_time - start_time).total_seconds()
755
+
756
+ result_text = f"\n{'='*60}\n"
757
+ result_text += f"Python DSL Workflow: {workflow_name}\n"
758
+ result_text += f"{'='*60}\n\n"
759
+ result_text += f"Status: āœ… Completed\n"
760
+ result_text += f"Duration: {duration:.2f}s\n"
761
+ result_text += f"Runner: {runner}\n"
762
+ result_text += f"\nTotal Events: {event_count}\n"
763
+
764
+ # Include all captured output in the result
765
+ if accumulated_output:
766
+ result_text += f"\n{'='*60}\n"
767
+ result_text += f"Workflow Output:\n"
768
+ result_text += f"{'='*60}\n\n"
769
+ result_text += "\n".join(accumulated_output)
770
+ logger.info(f"āœ… Workflow execution complete: {event_count} events processed, {len(accumulated_output)} output lines accumulated")
771
+ else:
772
+ logger.warning(f"āš ļø No workflow output accumulated (received {event_count} events but none produced output)")
773
+
774
+ logger.debug(f"Final result preview: {result_text[:500]}")
775
+ return result_text
776
+
777
+ def list_all_workflows(self) -> str:
778
+ """
779
+ List all available workflows in this skill instance.
780
+
781
+ Returns:
782
+ str: Formatted list of all workflows with their names, types, and descriptions.
783
+
784
+ Examples:
785
+ # List all workflows
786
+ list_all_workflows()
787
+ """
788
+ if not self.workflows:
789
+ return "No workflows defined in this skill instance."
790
+
791
+ result = f"\nšŸ“‹ Available Workflows ({len(self.workflows)}):\n"
792
+ result += "=" * 60 + "\n\n"
793
+
794
+ for idx, workflow in enumerate(self.workflows, 1):
795
+ name = workflow.get("name", "unknown")
796
+ wf_type = workflow.get("type", "unknown")
797
+ safe_name = name.replace("-", "_").replace(" ", "_").lower()
798
+
799
+ result += f"{idx}. {name} ({wf_type})\n"
800
+ result += f" Tool: execute_workflow_{safe_name}()\n"
801
+
802
+ # Get description from workflow definition
803
+ if wf_type == "json":
804
+ wf_def = workflow.get("definition")
805
+ if isinstance(wf_def, str):
806
+ try:
807
+ wf_data = json.loads(wf_def)
808
+ desc = wf_data.get("description", "No description")
809
+ steps = len(wf_data.get("steps", []))
810
+ result += f" Description: {desc}\n"
811
+ result += f" Steps: {steps}\n"
812
+ except:
813
+ pass
814
+ elif isinstance(wf_def, dict):
815
+ desc = wf_def.get("description", "No description")
816
+ steps = len(wf_def.get("steps", []))
817
+ result += f" Description: {desc}\n"
818
+ result += f" Steps: {steps}\n"
819
+
820
+ result += "\n"
821
+
822
+ return result
823
+
824
+ def execute_workflow(
825
+ self,
826
+ parameters: Optional[Dict[str, Any]] = None,
827
+ override_timeout: Optional[int] = None,
828
+ ) -> str:
829
+ """
830
+ Execute the first configured workflow with the provided parameters.
831
+
832
+ LEGACY METHOD: For backward compatibility with single-workflow format.
833
+ For multi-workflow skills, use execute_workflow_<name>() methods instead.
834
+
835
+ This tool allows agents to run multi-step workflows by providing
836
+ parameter values. The workflow will execute all steps in dependency
837
+ order and return the results.
838
+
839
+ The runner/environment is determined from the workflow definition itself,
840
+ not passed as a parameter. This ensures workflows execute in their
841
+ intended environments.
842
+
843
+ Args:
844
+ parameters: Dictionary of parameters to inject into the workflow.
845
+ These can be referenced in workflow steps using {{param_name}} syntax.
846
+ override_timeout: Optional timeout override in seconds.
847
+ If not provided, uses the timeout from configuration.
848
+
849
+ Returns:
850
+ str: A formatted string containing the workflow execution results,
851
+ including step outputs and any errors encountered.
852
+
853
+ Examples:
854
+ # Execute a deployment workflow with environment parameter
855
+ execute_workflow(parameters={"environment": "production", "version": "v1.2.3"})
856
+
857
+ # Execute with timeout override
858
+ execute_workflow(
859
+ parameters={"data_source": "s3://bucket/data"},
860
+ override_timeout=7200
861
+ )
862
+ """
863
+ try:
864
+ # For multi-workflow format, execute the first workflow
865
+ if self.workflows:
866
+ if len(self.workflows) > 1:
867
+ logger.warning(
868
+ "Multiple workflows defined but execute_workflow() called. "
869
+ "Executing first workflow. Use execute_workflow_<name>() for specific workflows."
870
+ )
871
+ return self._execute_specific_workflow(self.workflows[0], parameters)
872
+
873
+ # Legacy single-workflow format
874
+ # Use provided parameters or empty dict
875
+ params = parameters or {}
876
+
877
+ # Determine runner from workflow definition or use default_runner/default_runner from config
878
+ effective_runner = None
879
+ if hasattr(self, 'workflow_type') and self.workflow_type == "json" and hasattr(self, 'workflow_data') and self.workflow_data:
880
+ # Get runner from workflow definition first, then step-level, then default
881
+ effective_runner = self.workflow_data.get("runner") or self.default_runner
882
+ else:
883
+ effective_runner = self.default_runner
884
+
885
+ # Determine timeout
886
+ effective_timeout = override_timeout or self.timeout
887
+
888
+ # Stream start message
889
+ if self.stream_callback:
890
+ self.stream_callback(
891
+ f"šŸš€ Starting workflow execution...\n"
892
+ f" Workflow Type: {getattr(self, 'workflow_type', 'unknown')}\n"
893
+ f" Parameters: {json.dumps(params, indent=2)}\n"
894
+ f" Runner: {effective_runner or 'default'}\n"
895
+ f" Timeout: {effective_timeout}s\n\n"
896
+ )
897
+
898
+ # Execute based on workflow type
899
+ if hasattr(self, 'workflow_type'):
900
+ if self.workflow_type == "json":
901
+ result = self._execute_json_workflow(params, effective_runner, effective_timeout)
902
+ elif self.workflow_type == "python_dsl":
903
+ result = self._execute_python_dsl_workflow(params, effective_runner, effective_timeout)
904
+ else:
905
+ raise ValueError(f"Unsupported workflow type: {self.workflow_type}")
906
+ else:
907
+ raise ValueError("No workflow configured")
908
+
909
+ # Stream completion message
910
+ if self.stream_callback:
911
+ self.stream_callback(f"\nāœ… Workflow execution completed successfully\n")
912
+
913
+ return result
914
+
915
+ except Exception as e:
916
+ error_msg = f"āŒ Workflow execution failed: {str(e)}"
917
+ logger.error(error_msg, exc_info=True)
918
+
919
+ if self.stream_callback:
920
+ self.stream_callback(f"\n{error_msg}\n")
921
+
922
+ return error_msg
923
+
924
+ def _execute_json_workflow(
925
+ self,
926
+ parameters: Dict[str, Any],
927
+ runner: Optional[str],
928
+ timeout: int
929
+ ) -> str:
930
+ """Execute a JSON workflow using kubiya SDK (remote execution)."""
931
+ if not self.workflow_data:
932
+ raise ValueError("No workflow definition available")
933
+
934
+ if not self.kubiya_client:
935
+ raise RuntimeError("Kubiya SDK client not initialized - cannot execute workflow remotely")
936
+
937
+ workflow_name = self.workflow_data.get("name", "unknown")
938
+ steps = self.workflow_data.get("steps", [])
939
+
940
+ if self.stream_callback:
941
+ self.stream_callback(f"šŸ“‹ Workflow: {workflow_name}\n")
942
+ self.stream_callback(f" Steps: {len(steps)}\n")
943
+ self.stream_callback(f" Runner: {runner or self.default_runner}\n\n")
944
+
945
+ try:
946
+ # Execute workflow remotely using Kubiya SDK
947
+ from datetime import datetime
948
+ start_time = datetime.utcnow()
949
+
950
+ if self.stream_callback:
951
+ self.stream_callback(f"ā–¶ļø Submitting to runner '{runner or self.default_runner}'...\n\n")
952
+
953
+ # Submit workflow definition to remote runner
954
+ # The workflow_data already contains the complete workflow definition
955
+ workflow_def = dict(self.workflow_data)
956
+
957
+ # Ensure runner is set correctly
958
+ workflow_def["runner"] = runner or self.default_runner
959
+
960
+ # Remove 'triggers' key if it exists - not needed for direct execution
961
+ # The DAG builder rejects this key when executing workflows directly
962
+ if "triggers" in workflow_def:
963
+ logger.debug(f"Removing 'triggers' key from workflow definition (not needed for execution)")
964
+ workflow_def.pop("triggers")
965
+
966
+ # āœ… Enable streaming to capture real-time workflow output
967
+ response = self.kubiya_client.execute_workflow(
968
+ workflow_definition=workflow_def,
969
+ parameters=parameters,
970
+ stream=True
971
+ )
972
+
973
+ # Accumulate streaming results
974
+ accumulated_output = []
975
+ event_count = 0
976
+
977
+ # Iterate over streaming results
978
+ for event in response:
979
+ event_count += 1
980
+
981
+ # Stream to user in real-time
982
+ if self.stream_callback:
983
+ if isinstance(event, str):
984
+ self.stream_callback(f"{event}\n")
985
+ accumulated_output.append(event)
986
+ elif isinstance(event, dict):
987
+ event_type = event.get("type", "event")
988
+ event_data = event.get("data", event)
989
+ formatted_event = f"[{event_type}] {json.dumps(event_data, indent=2)}"
990
+ self.stream_callback(f"{formatted_event}\n")
991
+ accumulated_output.append(formatted_event)
992
+ else:
993
+ formatted_event = str(event)
994
+ self.stream_callback(f"{formatted_event}\n")
995
+ accumulated_output.append(formatted_event)
996
+
997
+ end_time = datetime.utcnow()
998
+ duration = (end_time - start_time).total_seconds()
999
+
1000
+ # Format results
1001
+ result_text = f"\n{'='*60}\n"
1002
+ result_text += f"Workflow Execution Summary\n"
1003
+ result_text += f"{'='*60}\n\n"
1004
+ result_text += f"Workflow: {workflow_name}\n"
1005
+ result_text += f"Runner: {runner or self.default_runner}\n"
1006
+ result_text += f"Status: āœ… Completed\n"
1007
+ result_text += f"Duration: {duration:.2f}s\n"
1008
+ result_text += f"Steps: {len(steps)}\n"
1009
+ result_text += f"Parameters: {json.dumps(parameters, indent=2)}\n"
1010
+ result_text += f"\nTotal Events: {event_count}\n"
1011
+
1012
+ # Include all captured output in the result
1013
+ if accumulated_output:
1014
+ result_text += f"\n{'='*60}\n"
1015
+ result_text += f"Workflow Output:\n"
1016
+ result_text += f"{'='*60}\n\n"
1017
+ result_text += "\n".join(accumulated_output)
1018
+
1019
+ if self.stream_callback:
1020
+ self.stream_callback(f"\nāœ… Workflow execution completed in {duration:.2f}s\n")
1021
+
1022
+ return result_text
1023
+
1024
+ except Exception as e:
1025
+ error_msg = f"JSON workflow execution failed: {str(e)}"
1026
+ logger.error(error_msg, exc_info=True)
1027
+ raise RuntimeError(error_msg)
1028
+
1029
+ def _execute_python_dsl_workflow(
1030
+ self,
1031
+ parameters: Dict[str, Any],
1032
+ runner: Optional[str],
1033
+ timeout: int
1034
+ ) -> str:
1035
+ """Execute a Python DSL workflow using kubiya SDK (remote execution)."""
1036
+ if not self.python_dsl_code:
1037
+ raise ValueError("No Python DSL code available")
1038
+
1039
+ if not self.kubiya_client:
1040
+ raise RuntimeError("Kubiya SDK client not initialized - cannot execute workflow remotely")
1041
+
1042
+ if self.stream_callback:
1043
+ self.stream_callback(f"šŸ Submitting Python DSL workflow for remote execution...\n\n")
1044
+
1045
+ try:
1046
+ # Parse the Python DSL code to extract workflow name
1047
+ # For now, we'll create a workflow definition that the runner can execute
1048
+ workflow_name = "python-dsl-workflow"
1049
+
1050
+ # Try to extract workflow name from code
1051
+ if "name=" in self.python_dsl_code:
1052
+ try:
1053
+ import re
1054
+ match = re.search(r'name\s*=\s*["\']([^"\']+)["\']', self.python_dsl_code)
1055
+ if match:
1056
+ workflow_name = match.group(1)
1057
+ except:
1058
+ pass
1059
+
1060
+ if self.stream_callback:
1061
+ self.stream_callback(f"šŸ“‹ Workflow: {workflow_name}\n")
1062
+ self.stream_callback(f" Runner: {runner or self.default_runner}\n")
1063
+ self.stream_callback(f" Parameters: {json.dumps(parameters)}\n\n")
1064
+
1065
+ # Create workflow definition for remote execution
1066
+ # The runner will execute the Python DSL code
1067
+ workflow_definition = {
1068
+ "name": workflow_name,
1069
+ "description": "Python DSL workflow",
1070
+ "runner": runner or self.default_runner,
1071
+ "steps": [
1072
+ {
1073
+ "name": "execute_python_dsl",
1074
+ "description": "Execute Python DSL workflow code",
1075
+ "executor": {
1076
+ "type": "python_dsl",
1077
+ "config": {
1078
+ "code": self.python_dsl_code
1079
+ }
1080
+ }
1081
+ }
1082
+ ]
1083
+ }
1084
+
1085
+ # Execute workflow remotely using Kubiya SDK
1086
+ from datetime import datetime
1087
+ start_time = datetime.utcnow()
1088
+
1089
+ if self.stream_callback:
1090
+ self.stream_callback(f"ā–¶ļø Submitting to runner '{runner or self.default_runner}'...\n\n")
1091
+
1092
+ # āœ… Enable streaming to capture real-time workflow output
1093
+ response = self.kubiya_client.execute_workflow(
1094
+ workflow_definition=workflow_definition,
1095
+ parameters=parameters,
1096
+ stream=True
1097
+ )
1098
+
1099
+ # Accumulate streaming results
1100
+ accumulated_output = []
1101
+ event_count = 0
1102
+
1103
+ # Iterate over streaming results
1104
+ for event in response:
1105
+ event_count += 1
1106
+
1107
+ # Stream to user in real-time
1108
+ if self.stream_callback:
1109
+ if isinstance(event, str):
1110
+ self.stream_callback(f"{event}\n")
1111
+ accumulated_output.append(event)
1112
+ elif isinstance(event, dict):
1113
+ event_type = event.get("type", "event")
1114
+ event_data = event.get("data", event)
1115
+ formatted_event = f"[{event_type}] {json.dumps(event_data, indent=2)}"
1116
+ self.stream_callback(f"{formatted_event}\n")
1117
+ accumulated_output.append(formatted_event)
1118
+ else:
1119
+ formatted_event = str(event)
1120
+ self.stream_callback(f"{formatted_event}\n")
1121
+ accumulated_output.append(formatted_event)
1122
+
1123
+ end_time = datetime.utcnow()
1124
+ duration = (end_time - start_time).total_seconds()
1125
+
1126
+ # Format results
1127
+ result_text = f"\n{'='*60}\n"
1128
+ result_text += f"Python DSL Workflow Execution Summary\n"
1129
+ result_text += f"{'='*60}\n\n"
1130
+ result_text += f"Workflow: {workflow_name}\n"
1131
+ result_text += f"Runner: {runner or self.default_runner}\n"
1132
+ result_text += f"Status: āœ… Completed\n"
1133
+ result_text += f"Duration: {duration:.2f}s\n"
1134
+ result_text += f"Parameters: {json.dumps(parameters, indent=2)}\n"
1135
+ result_text += f"\nTotal Events: {event_count}\n"
1136
+
1137
+ # Include all captured output in the result
1138
+ if accumulated_output:
1139
+ result_text += f"\n{'='*60}\n"
1140
+ result_text += f"Workflow Output:\n"
1141
+ result_text += f"{'='*60}\n\n"
1142
+ result_text += "\n".join(accumulated_output)
1143
+
1144
+ if self.stream_callback:
1145
+ self.stream_callback(f"\nāœ… Workflow execution completed in {duration:.2f}s\n")
1146
+
1147
+ return result_text
1148
+
1149
+ except Exception as e:
1150
+ error_msg = f"Python DSL workflow execution failed: {str(e)}"
1151
+ logger.error(error_msg, exc_info=True)
1152
+ raise RuntimeError(error_msg)
1153
+
1154
+ def _inject_parameters(self, config: Dict[str, Any], parameters: Dict[str, Any]) -> Dict[str, Any]:
1155
+ """Inject parameters into configuration values."""
1156
+ result = {}
1157
+
1158
+ for key, value in config.items():
1159
+ if isinstance(value, str):
1160
+ # Replace {{param_name}} with parameter value
1161
+ for param_name, param_value in parameters.items():
1162
+ value = value.replace(f"{{{{{param_name}}}}}", str(param_value))
1163
+ result[key] = value
1164
+ elif isinstance(value, dict):
1165
+ result[key] = self._inject_parameters(value, parameters)
1166
+ elif isinstance(value, list):
1167
+ result[key] = [
1168
+ self._inject_parameters(item, parameters) if isinstance(item, dict)
1169
+ else str(item).replace(f"{{{{{pn}}}}}", str(pv)) if isinstance(item, str) else item
1170
+ for item in value
1171
+ for pn, pv in [(pn, pv)]
1172
+ for pn, pv in [(list(parameters.keys())[0] if parameters else "", list(parameters.values())[0] if parameters else "")]
1173
+ ][:len(value)]
1174
+ # Simplified version
1175
+ result[key] = value
1176
+ else:
1177
+ result[key] = value
1178
+
1179
+ return result
1180
+
1181
+ def list_workflow_steps(self, workflow_name: Optional[str] = None) -> str:
1182
+ """
1183
+ List all steps in the configured workflow(s).
1184
+
1185
+ LEGACY METHOD: For multi-workflow skills, this lists all workflows.
1186
+ For legacy single-workflow format, lists steps of that workflow.
1187
+
1188
+ Args:
1189
+ workflow_name: Optional workflow name to filter by (multi-workflow only)
1190
+
1191
+ Returns:
1192
+ str: A formatted string listing all workflow steps with their
1193
+ descriptions, executor types, and dependencies.
1194
+
1195
+ Examples:
1196
+ # List all steps in the workflow
1197
+ list_workflow_steps()
1198
+ """
1199
+ try:
1200
+ # Multi-workflow format
1201
+ if self.workflows:
1202
+ if workflow_name:
1203
+ # Find specific workflow
1204
+ workflow = next((w for w in self.workflows if w.get("name") == workflow_name), None)
1205
+ if not workflow:
1206
+ return f"āŒ Workflow '{workflow_name}' not found"
1207
+ workflows_to_show = [workflow]
1208
+ else:
1209
+ workflows_to_show = self.workflows
1210
+
1211
+ result = f"\nšŸ“‹ Workflows: {len(workflows_to_show)}\n"
1212
+ result += "=" * 60 + "\n\n"
1213
+
1214
+ for wf in workflows_to_show:
1215
+ wf_name = wf.get("name", "unknown")
1216
+ wf_type = wf.get("type", "unknown")
1217
+
1218
+ result += f"Workflow: {wf_name} ({wf_type})\n"
1219
+
1220
+ if wf_type == "json":
1221
+ wf_def = wf.get("definition")
1222
+ if isinstance(wf_def, str):
1223
+ try:
1224
+ wf_data = json.loads(wf_def)
1225
+ except:
1226
+ result += " āŒ Invalid JSON definition\n\n"
1227
+ continue
1228
+ else:
1229
+ wf_data = wf_def
1230
+
1231
+ if wf_data:
1232
+ workflow_desc = wf_data.get("description", "No description")
1233
+ steps = wf_data.get("steps", [])
1234
+
1235
+ result += f" Description: {workflow_desc}\n"
1236
+ result += f" Total Steps: {len(steps)}\n\n"
1237
+
1238
+ if steps:
1239
+ result += " Steps:\n"
1240
+ for idx, step in enumerate(steps, 1):
1241
+ step_name = step.get("name", "unknown")
1242
+ step_desc = step.get("description", "")
1243
+ executor = step.get("executor", {})
1244
+ executor_type = executor.get("type", "unknown")
1245
+ depends_on = step.get("depends_on", [])
1246
+
1247
+ result += f" {idx}. {step_name}\n"
1248
+ if step_desc:
1249
+ result += f" Description: {step_desc}\n"
1250
+ result += f" Executor: {executor_type}\n"
1251
+ if depends_on:
1252
+ result += f" Depends on: {', '.join(depends_on)}\n"
1253
+ else:
1254
+ result += " (No steps defined)\n"
1255
+
1256
+ elif wf_type == "python_dsl":
1257
+ result += " Type: Python DSL\n"
1258
+ result += " (To view steps, execute the workflow)\n"
1259
+
1260
+ result += "\n"
1261
+
1262
+ return result
1263
+
1264
+ # Legacy single-workflow format
1265
+ if self.workflow_type == "json":
1266
+ if not self.workflow_data:
1267
+ return "āŒ No workflow definition available"
1268
+
1269
+ workflow_name_legacy = self.workflow_data.get("name", "unknown")
1270
+ workflow_desc = self.workflow_data.get("description", "No description")
1271
+ steps = self.workflow_data.get("steps", [])
1272
+
1273
+ result = f"\nšŸ“‹ Workflow: {workflow_name_legacy}\n"
1274
+ result += f" Description: {workflow_desc}\n"
1275
+ result += f" Total Steps: {len(steps)}\n\n"
1276
+
1277
+ if not steps:
1278
+ result += " (No steps defined)\n"
1279
+ return result
1280
+
1281
+ result += "Steps:\n"
1282
+ for idx, step in enumerate(steps, 1):
1283
+ step_name = step.get("name", "unknown")
1284
+ step_desc = step.get("description", "")
1285
+ executor = step.get("executor", {})
1286
+ executor_type = executor.get("type", "unknown")
1287
+ depends_on = step.get("depends_on", [])
1288
+
1289
+ result += f"\n{idx}. {step_name}\n"
1290
+ if step_desc:
1291
+ result += f" Description: {step_desc}\n"
1292
+ result += f" Executor: {executor_type}\n"
1293
+ if depends_on:
1294
+ result += f" Depends on: {', '.join(depends_on)}\n"
1295
+
1296
+ return result
1297
+
1298
+ elif self.workflow_type == "python_dsl":
1299
+ return f"\nšŸ Python DSL Workflow\n\nTo view steps, execute the workflow.\n"
1300
+
1301
+ else:
1302
+ return "āŒ No workflow configured"
1303
+
1304
+ except Exception as e:
1305
+ logger.error(f"Failed to list workflow steps: {e}", exc_info=True)
1306
+ return f"āŒ Error listing workflow steps: {str(e)}"
1307
+
1308
+ def get_workflow_info(self) -> str:
1309
+ """
1310
+ Get detailed information about the configured workflow(s).
1311
+
1312
+ This tool provides comprehensive information about the workflow
1313
+ including its name, description, type, number of steps, triggers,
1314
+ and configuration.
1315
+
1316
+ For multi-workflow skills, lists all workflows with their configurations.
1317
+ For legacy single-workflow format, shows that workflow's information.
1318
+
1319
+ Returns:
1320
+ str: A formatted string with complete workflow information.
1321
+
1322
+ Examples:
1323
+ # Get workflow information
1324
+ get_workflow_info()
1325
+ """
1326
+ try:
1327
+ result = f"\n{'='*60}\n"
1328
+ result += f"Workflow Executor Information\n"
1329
+ result += f"{'='*60}\n\n"
1330
+
1331
+ result += f"Validation Enabled: {self.validation_enabled}\n"
1332
+ result += f"Timeout: {self.timeout}s\n"
1333
+ result += f"Default Runner: {self.default_runner or 'None'}\n"
1334
+ result += f"Total Workflows: {len(self.workflows)}\n\n"
1335
+
1336
+ # Multi-workflow format
1337
+ if self.workflows:
1338
+ result += "Configured Workflows:\n"
1339
+ result += "-" * 60 + "\n\n"
1340
+
1341
+ for idx, workflow in enumerate(self.workflows, 1):
1342
+ wf_name = workflow.get("name", "unknown")
1343
+ wf_type = workflow.get("type", "unknown")
1344
+ safe_name = wf_name.replace("-", "_").replace(" ", "_").lower()
1345
+
1346
+ result += f"{idx}. {wf_name}\n"
1347
+ result += f" Type: {wf_type}\n"
1348
+ result += f" Tool: execute_workflow_{safe_name}()\n"
1349
+
1350
+ if wf_type == "json":
1351
+ wf_def = workflow.get("definition")
1352
+ if isinstance(wf_def, str):
1353
+ try:
1354
+ wf_data = json.loads(wf_def)
1355
+ except:
1356
+ result += " āŒ Invalid JSON definition\n\n"
1357
+ continue
1358
+ else:
1359
+ wf_data = wf_def
1360
+
1361
+ if wf_data:
1362
+ workflow_desc = wf_data.get("description", "No description")
1363
+ steps = wf_data.get("steps", [])
1364
+ triggers = wf_data.get("triggers", [])
1365
+ workflow_runner = wf_data.get("runner")
1366
+
1367
+ result += f" Description: {workflow_desc}\n"
1368
+ result += f" Steps: {len(steps)}\n"
1369
+ result += f" Triggers: {len(triggers)}\n"
1370
+
1371
+ # Show runner hierarchy
1372
+ if workflow_runner:
1373
+ result += f" Runner: {workflow_runner} (specified in workflow)\n"
1374
+ elif self.default_runner:
1375
+ result += f" Runner: {self.default_runner} (from skill config)\n"
1376
+ else:
1377
+ result += f" Runner: default (no runner specified)\n"
1378
+
1379
+ elif wf_type == "python_dsl":
1380
+ python_code = workflow.get("code", "")
1381
+ result += f" Code Length: {len(python_code)} characters\n"
1382
+
1383
+ result += "\n"
1384
+
1385
+ return result
1386
+
1387
+ # Legacy single-workflow format
1388
+ result += f"Type: {self.workflow_type or 'none'}\n\n"
1389
+
1390
+ if self.workflow_type == "json" and self.workflow_data:
1391
+ workflow_name = self.workflow_data.get("name", "unknown")
1392
+ workflow_desc = self.workflow_data.get("description", "No description")
1393
+ steps = self.workflow_data.get("steps", [])
1394
+ triggers = self.workflow_data.get("triggers", [])
1395
+ workflow_runner = self.workflow_data.get("runner")
1396
+
1397
+ result += f"Name: {workflow_name}\n"
1398
+ result += f"Description: {workflow_desc}\n"
1399
+ result += f"Steps: {len(steps)}\n"
1400
+ result += f"Triggers: {len(triggers)}\n"
1401
+
1402
+ # Show runner hierarchy
1403
+ if workflow_runner:
1404
+ result += f"Workflow Runner: {workflow_runner} (will be used for execution)\n"
1405
+ elif self.default_runner:
1406
+ result += f"Workflow Runner: {self.default_runner} (from skill config)\n"
1407
+ else:
1408
+ result += f"Workflow Runner: default (no runner specified)\n"
1409
+
1410
+ elif self.workflow_type == "python_dsl":
1411
+ result += f"Python DSL Workflow\n"
1412
+ result += f"Code Length: {len(self.python_dsl_code or '')} characters\n"
1413
+
1414
+ return result
1415
+
1416
+ except Exception as e:
1417
+ logger.error(f"Failed to get workflow info: {e}", exc_info=True)
1418
+ return f"āŒ Error getting workflow info: {str(e)}"