alita-sdk 0.3.528__py3-none-any.whl → 0.3.554__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of alita-sdk might be problematic. Click here for more details.

Files changed (46) hide show
  1. alita_sdk/community/__init__.py +8 -4
  2. alita_sdk/configurations/__init__.py +1 -0
  3. alita_sdk/configurations/openapi.py +111 -0
  4. alita_sdk/runtime/clients/client.py +185 -10
  5. alita_sdk/runtime/langchain/langraph_agent.py +2 -2
  6. alita_sdk/runtime/langchain/utils.py +46 -0
  7. alita_sdk/runtime/skills/__init__.py +91 -0
  8. alita_sdk/runtime/skills/callbacks.py +498 -0
  9. alita_sdk/runtime/skills/discovery.py +540 -0
  10. alita_sdk/runtime/skills/executor.py +610 -0
  11. alita_sdk/runtime/skills/input_builder.py +371 -0
  12. alita_sdk/runtime/skills/models.py +330 -0
  13. alita_sdk/runtime/skills/registry.py +355 -0
  14. alita_sdk/runtime/skills/skill_runner.py +330 -0
  15. alita_sdk/runtime/toolkits/__init__.py +2 -0
  16. alita_sdk/runtime/toolkits/skill_router.py +238 -0
  17. alita_sdk/runtime/toolkits/tools.py +76 -9
  18. alita_sdk/runtime/tools/__init__.py +3 -1
  19. alita_sdk/runtime/tools/artifact.py +70 -21
  20. alita_sdk/runtime/tools/image_generation.py +50 -44
  21. alita_sdk/runtime/tools/llm.py +363 -44
  22. alita_sdk/runtime/tools/loop.py +3 -1
  23. alita_sdk/runtime/tools/loop_output.py +3 -1
  24. alita_sdk/runtime/tools/skill_router.py +776 -0
  25. alita_sdk/runtime/tools/tool.py +3 -1
  26. alita_sdk/runtime/tools/vectorstore.py +7 -2
  27. alita_sdk/runtime/tools/vectorstore_base.py +7 -2
  28. alita_sdk/runtime/utils/AlitaCallback.py +2 -1
  29. alita_sdk/runtime/utils/utils.py +34 -0
  30. alita_sdk/tools/__init__.py +41 -1
  31. alita_sdk/tools/ado/work_item/ado_wrapper.py +33 -2
  32. alita_sdk/tools/base_indexer_toolkit.py +36 -24
  33. alita_sdk/tools/confluence/api_wrapper.py +5 -6
  34. alita_sdk/tools/confluence/loader.py +4 -2
  35. alita_sdk/tools/openapi/__init__.py +280 -120
  36. alita_sdk/tools/openapi/api_wrapper.py +883 -0
  37. alita_sdk/tools/openapi/tool.py +20 -0
  38. alita_sdk/tools/pandas/dataframe/generator/base.py +3 -1
  39. alita_sdk/tools/servicenow/__init__.py +9 -9
  40. alita_sdk/tools/servicenow/api_wrapper.py +1 -1
  41. {alita_sdk-0.3.528.dist-info → alita_sdk-0.3.554.dist-info}/METADATA +2 -2
  42. {alita_sdk-0.3.528.dist-info → alita_sdk-0.3.554.dist-info}/RECORD +46 -33
  43. {alita_sdk-0.3.528.dist-info → alita_sdk-0.3.554.dist-info}/WHEEL +0 -0
  44. {alita_sdk-0.3.528.dist-info → alita_sdk-0.3.554.dist-info}/entry_points.txt +0 -0
  45. {alita_sdk-0.3.528.dist-info → alita_sdk-0.3.554.dist-info}/licenses/LICENSE +0 -0
  46. {alita_sdk-0.3.528.dist-info → alita_sdk-0.3.554.dist-info}/top_level.txt +0 -0
@@ -0,0 +1,610 @@
1
+ """
2
+ Skill execution service with subprocess isolation.
3
+
4
+ This module provides the core execution engine for skills, supporting
5
+ both subprocess and remote execution modes with proper isolation
6
+ and result handling.
7
+ """
8
+
9
+ import json
10
+ import logging
11
+ import os
12
+ import shutil
13
+ import subprocess
14
+ import sys
15
+ import tempfile
16
+ import time
17
+ import uuid
18
+ from pathlib import Path
19
+ from typing import Any, Dict, List, Optional
20
+
21
+ from .input_builder import SkillInputBuilder
22
+ from .models import (
23
+ ExecutionMode, SkillExecutionError, SkillExecutionResult,
24
+ SkillMetadata, SkillOutputFile, SkillStatus, SkillType, SkillSource
25
+ )
26
+
27
+ logger = logging.getLogger(__name__)
28
+
29
+
30
+ class SkillExecutor:
31
+ """
32
+ Base class for skill execution with different isolation modes.
33
+ """
34
+
35
+ def __init__(self, alita_client=None):
36
+ """
37
+ Initialize skill executor.
38
+
39
+ Args:
40
+ alita_client: AlitaClient instance for remote execution and LLM access.
41
+ """
42
+ self.alita_client = alita_client
43
+ self.input_builder = SkillInputBuilder()
44
+
45
+ def execute_skill(
46
+ self,
47
+ metadata: SkillMetadata,
48
+ task: str,
49
+ context: Optional[Dict[str, Any]] = None,
50
+ chat_history: Optional[List[Dict[str, str]]] = None,
51
+ execution_id: Optional[str] = None
52
+ ) -> SkillExecutionResult:
53
+ """
54
+ Execute a skill with the specified parameters.
55
+
56
+ Args:
57
+ metadata: Skill metadata containing execution configuration.
58
+ task: Main task or user input for the skill.
59
+ context: Additional context (variables or state).
60
+ chat_history: Chat history for agent skills.
61
+ execution_id: Optional execution ID for tracking.
62
+
63
+ Returns:
64
+ SkillExecutionResult with output and metadata.
65
+
66
+ Raises:
67
+ SkillExecutionError: If execution fails.
68
+ """
69
+ execution_id = execution_id or str(uuid.uuid4())
70
+
71
+ logger.info(f"Executing skill '{metadata.name}' (mode: {metadata.execution.mode})")
72
+
73
+ if metadata.execution.mode == ExecutionMode.SUBPROCESS:
74
+ executor = SubprocessSkillExecutor(self.alita_client)
75
+ elif metadata.execution.mode == ExecutionMode.REMOTE:
76
+ executor = RemoteSkillExecutor(self.alita_client)
77
+ else:
78
+ raise SkillExecutionError(f"Unsupported execution mode: {metadata.execution.mode}")
79
+
80
+ return executor._execute_skill_internal(
81
+ metadata, task, context, chat_history, execution_id
82
+ )
83
+
84
+
85
+ class SubprocessSkillExecutor(SkillExecutor):
86
+ """
87
+ Subprocess-based skill executor for local isolated execution.
88
+ """
89
+
90
+ def _execute_skill_internal(
91
+ self,
92
+ metadata: SkillMetadata,
93
+ task: str,
94
+ context: Optional[Dict[str, Any]] = None,
95
+ chat_history: Optional[List[Dict[str, str]]] = None,
96
+ execution_id: str = None
97
+ ) -> SkillExecutionResult:
98
+ """
99
+ Execute skill in subprocess with isolation.
100
+ """
101
+ start_time = time.time()
102
+
103
+ # Create isolated working directory
104
+ work_dir = self._create_working_directory(metadata.name, execution_id)
105
+
106
+ try:
107
+ # Prepare skill input
108
+ skill_input = self.input_builder.prepare_input(
109
+ metadata, task, context, chat_history
110
+ )
111
+
112
+ # Prepare execution environment
113
+ env = self._prepare_environment(metadata, work_dir)
114
+
115
+ # Create input file for subprocess
116
+ input_file = work_dir / "skill_input.json"
117
+ with open(input_file, 'w', encoding='utf-8') as f:
118
+ json.dump({
119
+ 'skill_metadata': metadata.dict(),
120
+ 'skill_input': skill_input,
121
+ 'execution_id': execution_id
122
+ }, f, indent=2, default=str)
123
+
124
+ # Execute skill in subprocess
125
+ result = self._run_subprocess(metadata, work_dir, input_file, env)
126
+
127
+ # Calculate duration
128
+ duration = time.time() - start_time
129
+
130
+ # Parse and return result
131
+ return self._parse_execution_result(
132
+ metadata, result, work_dir, execution_id, duration
133
+ )
134
+
135
+ except Exception as e:
136
+ duration = time.time() - start_time
137
+ logger.error(f"Skill execution failed for '{metadata.name}': {e}")
138
+
139
+ return SkillExecutionResult(
140
+ skill_name=metadata.name,
141
+ skill_type=metadata.skill_type,
142
+ status=SkillStatus.ERROR,
143
+ execution_mode=ExecutionMode.SUBPROCESS,
144
+ execution_id=execution_id,
145
+ output_text=f"Skill execution failed: {str(e)}",
146
+ output_files=[],
147
+ duration=duration,
148
+ working_directory=work_dir,
149
+ error_details=str(e)
150
+ )
151
+
152
+ finally:
153
+ # Cleanup based on policy
154
+ if metadata.results.cleanup_policy == "cleanup":
155
+ self._cleanup_working_directory(work_dir)
156
+
157
+ def _create_working_directory(self, skill_name: str, execution_id: str) -> Path:
158
+ """
159
+ Create isolated working directory for skill execution.
160
+
161
+ Args:
162
+ skill_name: Name of the skill being executed.
163
+ execution_id: Unique execution identifier.
164
+
165
+ Returns:
166
+ Path to created working directory.
167
+ """
168
+ # Create unique directory name
169
+ dir_name = f"skill_{skill_name}_{execution_id}_{int(time.time())}"
170
+
171
+ # Use system temp directory
172
+ base_temp = Path(tempfile.gettempdir())
173
+ work_dir = base_temp / "alita_skills" / dir_name
174
+
175
+ # Create directory with proper permissions
176
+ work_dir.mkdir(parents=True, exist_ok=True)
177
+
178
+ logger.debug(f"Created working directory: {work_dir}")
179
+ return work_dir
180
+
181
+ def _prepare_environment(self, metadata: SkillMetadata, work_dir: Path) -> Dict[str, str]:
182
+ """
183
+ Prepare environment variables for subprocess execution.
184
+
185
+ Args:
186
+ metadata: Skill metadata with environment configuration.
187
+ work_dir: Working directory path.
188
+
189
+ Returns:
190
+ Dictionary of environment variables.
191
+ """
192
+ # Start with current environment
193
+ env = os.environ.copy()
194
+
195
+ # Add skill-specific environment variables
196
+ env.update(metadata.execution.environment)
197
+
198
+ # Add standard variables
199
+ env.update({
200
+ 'SKILL_NAME': metadata.name,
201
+ 'SKILL_TYPE': metadata.skill_type.value,
202
+ 'SKILL_WORK_DIR': str(work_dir),
203
+ 'PYTHONPATH': env.get('PYTHONPATH', '') + f":{work_dir}",
204
+ })
205
+
206
+ # Add alita-sdk to path if not present
207
+ alita_sdk_path = str(Path(__file__).parent.parent.parent)
208
+ if alita_sdk_path not in env.get('PYTHONPATH', ''):
209
+ env['PYTHONPATH'] = f"{env['PYTHONPATH']}:{alita_sdk_path}"
210
+
211
+ return env
212
+
213
+ def _run_subprocess(
214
+ self,
215
+ metadata: SkillMetadata,
216
+ work_dir: Path,
217
+ input_file: Path,
218
+ env: Dict[str, str]
219
+ ) -> subprocess.CompletedProcess:
220
+ """
221
+ Run the skill in a subprocess.
222
+
223
+ Args:
224
+ metadata: Skill metadata.
225
+ work_dir: Working directory.
226
+ input_file: Path to input JSON file.
227
+ env: Environment variables.
228
+
229
+ Returns:
230
+ CompletedProcess result.
231
+
232
+ Raises:
233
+ SkillExecutionError: If subprocess execution fails.
234
+ """
235
+ # Build command to run skill runner
236
+ cmd = [
237
+ sys.executable,
238
+ "-m", "alita_sdk.runtime.skills.skill_runner",
239
+ "--input-file", str(input_file),
240
+ "--work-dir", str(work_dir)
241
+ ]
242
+
243
+ logger.debug(f"Running subprocess command: {' '.join(cmd)}")
244
+
245
+ try:
246
+ result = subprocess.run(
247
+ cmd,
248
+ cwd=work_dir,
249
+ env=env,
250
+ capture_output=True,
251
+ text=True,
252
+ timeout=metadata.execution.timeout
253
+ )
254
+
255
+ logger.debug(f"Subprocess completed with return code: {result.returncode}")
256
+
257
+ if result.returncode != 0:
258
+ error_msg = f"Skill subprocess failed with code {result.returncode}: {result.stderr}"
259
+ logger.error(error_msg)
260
+ raise SkillExecutionError(error_msg)
261
+
262
+ return result
263
+
264
+ except subprocess.TimeoutExpired as e:
265
+ error_msg = f"Skill execution timed out after {metadata.execution.timeout} seconds"
266
+ logger.error(error_msg)
267
+ raise SkillExecutionError(error_msg) from e
268
+
269
+ except Exception as e:
270
+ error_msg = f"Subprocess execution failed: {str(e)}"
271
+ logger.error(error_msg)
272
+ raise SkillExecutionError(error_msg) from e
273
+
274
+ def _parse_execution_result(
275
+ self,
276
+ metadata: SkillMetadata,
277
+ subprocess_result: subprocess.CompletedProcess,
278
+ work_dir: Path,
279
+ execution_id: str,
280
+ duration: float
281
+ ) -> SkillExecutionResult:
282
+ """
283
+ Parse subprocess result into SkillExecutionResult.
284
+
285
+ Args:
286
+ metadata: Skill metadata.
287
+ subprocess_result: Result from subprocess execution.
288
+ work_dir: Working directory.
289
+ execution_id: Execution identifier.
290
+ duration: Execution duration.
291
+
292
+ Returns:
293
+ Parsed SkillExecutionResult.
294
+ """
295
+ # Try to read result file written by skill runner
296
+ result_file = work_dir / "skill_result.json"
297
+
298
+ if result_file.exists():
299
+ try:
300
+ with open(result_file, 'r', encoding='utf-8') as f:
301
+ result_data = json.load(f)
302
+
303
+ output_text = result_data.get('output_text', subprocess_result.stdout)
304
+ status = SkillStatus(result_data.get('status', 'success'))
305
+ error_details = result_data.get('error_details')
306
+
307
+ except Exception as e:
308
+ logger.warning(f"Failed to parse result file: {e}, using subprocess output")
309
+ output_text = subprocess_result.stdout
310
+ status = SkillStatus.SUCCESS
311
+ error_details = None
312
+ else:
313
+ # Fallback to subprocess stdout
314
+ output_text = subprocess_result.stdout
315
+ status = SkillStatus.SUCCESS
316
+ error_details = None
317
+
318
+ # Find output files
319
+ output_files = self._discover_output_files(work_dir, metadata.results.output_files)
320
+
321
+ return SkillExecutionResult(
322
+ skill_name=metadata.name,
323
+ skill_type=metadata.skill_type,
324
+ status=status,
325
+ execution_mode=ExecutionMode.SUBPROCESS,
326
+ execution_id=execution_id,
327
+ output_text=output_text,
328
+ output_files=output_files,
329
+ duration=duration,
330
+ working_directory=work_dir,
331
+ error_details=error_details
332
+ )
333
+
334
+ def _discover_output_files(
335
+ self,
336
+ work_dir: Path,
337
+ expected_patterns: List[str]
338
+ ) -> List[SkillOutputFile]:
339
+ """
340
+ Discover output files in the working directory.
341
+
342
+ Args:
343
+ work_dir: Working directory to search.
344
+ expected_patterns: List of expected file patterns.
345
+
346
+ Returns:
347
+ List of discovered output files.
348
+ """
349
+ output_files = []
350
+
351
+ # Look for expected files first
352
+ for pattern in expected_patterns:
353
+ pattern_path = work_dir / pattern
354
+ if pattern_path.exists() and pattern_path.is_file():
355
+ output_files.append(self._create_output_file_reference(pattern_path))
356
+
357
+ # Also discover any additional files that might have been created
358
+ for file_path in work_dir.rglob("*"):
359
+ if (file_path.is_file() and
360
+ file_path.name not in ['skill_input.json', 'skill_result.json'] and
361
+ not any(str(file_path).endswith(pattern) for pattern in expected_patterns)):
362
+
363
+ # Only include files that seem to be outputs (not system files)
364
+ if not file_path.name.startswith('.') and file_path.suffix in [
365
+ '.json', '.md', '.txt', '.csv', '.html', '.yaml', '.yml'
366
+ ]:
367
+ output_files.append(self._create_output_file_reference(file_path))
368
+
369
+ return output_files
370
+
371
+ def _create_output_file_reference(self, file_path: Path) -> SkillOutputFile:
372
+ """
373
+ Create a SkillOutputFile reference for a discovered file.
374
+
375
+ Args:
376
+ file_path: Path to the output file.
377
+
378
+ Returns:
379
+ SkillOutputFile reference.
380
+ """
381
+ # Determine file type from extension
382
+ file_type = file_path.suffix.lstrip('.').lower()
383
+ if not file_type:
384
+ file_type = 'unknown'
385
+
386
+ # Get file size
387
+ try:
388
+ size_bytes = file_path.stat().st_size
389
+ except OSError:
390
+ size_bytes = 0
391
+
392
+ # Generate description based on file name
393
+ description = file_path.stem.replace('_', ' ').replace('-', ' ').title()
394
+
395
+ return SkillOutputFile(
396
+ path=file_path,
397
+ description=description,
398
+ file_type=file_type,
399
+ size_bytes=size_bytes
400
+ )
401
+
402
+ def _cleanup_working_directory(self, work_dir: Path) -> None:
403
+ """
404
+ Clean up working directory after execution.
405
+
406
+ Args:
407
+ work_dir: Directory to clean up.
408
+ """
409
+ try:
410
+ if work_dir.exists():
411
+ shutil.rmtree(work_dir)
412
+ logger.debug(f"Cleaned up working directory: {work_dir}")
413
+ except Exception as e:
414
+ logger.warning(f"Failed to cleanup working directory {work_dir}: {e}")
415
+
416
+
417
+ class RemoteSkillExecutor(SkillExecutor):
418
+ """
419
+ Remote skill executor using AlitaClient for distributed execution.
420
+ """
421
+
422
+ def _execute_skill_internal(
423
+ self,
424
+ metadata: SkillMetadata,
425
+ task: str,
426
+ context: Optional[Dict[str, Any]] = None,
427
+ chat_history: Optional[List[Dict[str, str]]] = None,
428
+ execution_id: str = None
429
+ ) -> SkillExecutionResult:
430
+ """
431
+ Execute skill remotely via AlitaClient.
432
+
433
+ For platform-based skills (agents/pipelines), uses the AlitaClient
434
+ to execute them directly on the platform.
435
+ """
436
+ if not self.alita_client:
437
+ raise SkillExecutionError(
438
+ "AlitaClient is required for remote skill execution"
439
+ )
440
+
441
+ start_time = time.time()
442
+
443
+ try:
444
+ # Handle platform-based skills (agents/pipelines)
445
+ if metadata.source == SkillSource.PLATFORM:
446
+ return self._execute_platform_skill(
447
+ metadata, task, context, chat_history, execution_id, start_time
448
+ )
449
+
450
+ # Handle filesystem-based skills (remote execution)
451
+ else:
452
+ return self._execute_filesystem_skill_remotely(
453
+ metadata, task, context, chat_history, execution_id, start_time
454
+ )
455
+
456
+ except Exception as e:
457
+ duration = time.time() - start_time
458
+ logger.error(f"Remote skill execution failed for '{metadata.name}': {e}")
459
+
460
+ return SkillExecutionResult(
461
+ skill_name=metadata.name,
462
+ skill_type=metadata.skill_type,
463
+ status=SkillStatus.ERROR,
464
+ execution_mode=ExecutionMode.REMOTE,
465
+ execution_id=execution_id,
466
+ output_text=f"Remote skill execution failed: {str(e)}",
467
+ output_files=[],
468
+ duration=duration,
469
+ error_details=str(e)
470
+ )
471
+
472
+ def _execute_platform_skill(
473
+ self,
474
+ metadata: SkillMetadata,
475
+ task: str,
476
+ context: Optional[Dict[str, Any]] = None,
477
+ chat_history: Optional[List[Dict[str, str]]] = None,
478
+ execution_id: str = None,
479
+ start_time: float = None
480
+ ) -> SkillExecutionResult:
481
+ """
482
+ Execute a platform-based skill (agent or pipeline).
483
+ """
484
+ try:
485
+ if metadata.skill_type == SkillType.AGENT:
486
+ # Execute agent via AlitaClient
487
+ result = self._execute_platform_agent(metadata, task, context, chat_history)
488
+ elif metadata.skill_type == SkillType.PIPELINE:
489
+ # Execute pipeline via AlitaClient
490
+ result = self._execute_platform_pipeline(metadata, task, context, chat_history)
491
+ else:
492
+ raise SkillExecutionError(f"Unsupported platform skill type: {metadata.skill_type}")
493
+
494
+ duration = time.time() - start_time
495
+
496
+ return SkillExecutionResult(
497
+ skill_name=metadata.name,
498
+ skill_type=metadata.skill_type,
499
+ status=SkillStatus.SUCCESS,
500
+ execution_mode=ExecutionMode.REMOTE,
501
+ execution_id=execution_id,
502
+ output_text=result,
503
+ output_files=[],
504
+ duration=duration,
505
+ error_details=None
506
+ )
507
+
508
+ except Exception as e:
509
+ duration = time.time() - start_time
510
+ raise SkillExecutionError(f"Platform skill execution failed: {e}")
511
+
512
+ def _execute_platform_agent(
513
+ self,
514
+ metadata: SkillMetadata,
515
+ task: str,
516
+ context: Optional[Dict[str, Any]] = None,
517
+ chat_history: Optional[List[Dict[str, str]]] = None
518
+ ) -> str:
519
+ """Execute a platform-hosted agent."""
520
+ try:
521
+ # Get agent application from platform
522
+ app = self.alita_client.application(metadata.id, metadata.version_id)
523
+
524
+ # Prepare input message for agent
525
+ if chat_history:
526
+ # If we have chat history, append the task as the latest message
527
+ messages = chat_history + [{"role": "user", "content": task}]
528
+ else:
529
+ # Create fresh conversation with the task
530
+ messages = [{"role": "user", "content": task}]
531
+
532
+ # Add context as variables if provided
533
+ variables = context or {}
534
+
535
+ # Execute the agent
536
+ logger.info(f"Executing platform agent {metadata.id}/{metadata.version_id} with task: {task}")
537
+ response = app.invoke({
538
+ "input": task,
539
+ "messages": messages,
540
+ **variables # Spread context variables
541
+ })
542
+
543
+ # Extract output text from response
544
+ if isinstance(response, dict):
545
+ return response.get("output", str(response))
546
+ else:
547
+ return str(response)
548
+
549
+ except Exception as e:
550
+ logger.error(f"Failed to execute platform agent {metadata.id}/{metadata.version_id}: {e}")
551
+ raise SkillExecutionError(f"Agent execution failed: {e}")
552
+
553
+ def _execute_platform_pipeline(
554
+ self,
555
+ metadata: SkillMetadata,
556
+ task: str,
557
+ context: Optional[Dict[str, Any]] = None,
558
+ chat_history: Optional[List[Dict[str, str]]] = None
559
+ ) -> str:
560
+ """Execute a platform-hosted pipeline."""
561
+ try:
562
+ # Get pipeline application from platform
563
+ app = self.alita_client.application(metadata.id, metadata.version_id)
564
+
565
+ # Prepare input for pipeline
566
+ # Pipelines typically work with input and context/state variables
567
+ pipeline_input = {
568
+ "input": task
569
+ }
570
+
571
+ # Add context variables
572
+ if context:
573
+ pipeline_input.update(context)
574
+
575
+ # Execute the pipeline
576
+ logger.info(f"Executing platform pipeline {metadata.id}/{metadata.version_id} with task: {task}")
577
+ response = app.invoke(pipeline_input)
578
+
579
+ # Extract output text from response
580
+ if isinstance(response, dict):
581
+ return response.get("output", str(response))
582
+ else:
583
+ return str(response)
584
+
585
+ except Exception as e:
586
+ logger.error(f"Failed to execute platform pipeline {metadata.id}/{metadata.version_id}: {e}")
587
+ raise SkillExecutionError(f"Pipeline execution failed: {e}")
588
+
589
+ def _execute_filesystem_skill_remotely(
590
+ self,
591
+ metadata: SkillMetadata,
592
+ task: str,
593
+ context: Optional[Dict[str, Any]] = None,
594
+ chat_history: Optional[List[Dict[str, str]]] = None,
595
+ execution_id: str = None,
596
+ start_time: float = None
597
+ ) -> SkillExecutionResult:
598
+ """
599
+ Execute a filesystem-based skill remotely.
600
+
601
+ This is a placeholder for future remote execution of filesystem skills.
602
+ For now, it falls back to subprocess execution.
603
+ """
604
+ logger.warning(f"Remote execution of filesystem skill '{metadata.name}' not implemented, using subprocess")
605
+
606
+ # Fall back to subprocess execution
607
+ subprocess_executor = SubprocessSkillExecutor(self.alita_client)
608
+ return subprocess_executor._execute_skill_internal(
609
+ metadata, task, context, chat_history, execution_id
610
+ )