kweaver-dolphin 0.2.0__py3-none-any.whl → 0.2.2__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- dolphin/cli/runner/runner.py +20 -0
- dolphin/cli/ui/console.py +29 -11
- dolphin/cli/utils/helpers.py +4 -4
- dolphin/core/agent/base_agent.py +2 -2
- dolphin/core/code_block/basic_code_block.py +140 -30
- dolphin/core/code_block/explore_block.py +353 -29
- dolphin/core/code_block/explore_block_v2.py +21 -17
- dolphin/core/code_block/explore_strategy.py +1 -0
- dolphin/core/code_block/judge_block.py +10 -1
- dolphin/core/code_block/skill_call_deduplicator.py +32 -10
- dolphin/core/code_block/tool_block.py +12 -3
- dolphin/core/common/constants.py +25 -1
- dolphin/core/config/global_config.py +35 -0
- dolphin/core/context/context.py +168 -5
- dolphin/core/context/cow_context.py +392 -0
- dolphin/core/flags/definitions.py +2 -2
- dolphin/core/runtime/runtime_instance.py +31 -0
- dolphin/core/skill/context_retention.py +3 -3
- dolphin/core/task_registry.py +404 -0
- dolphin/lib/__init__.py +0 -2
- dolphin/lib/skillkits/__init__.py +2 -2
- dolphin/lib/skillkits/plan_skillkit.py +756 -0
- dolphin/lib/skillkits/system_skillkit.py +103 -30
- dolphin/sdk/skill/global_skills.py +43 -3
- {kweaver_dolphin-0.2.0.dist-info → kweaver_dolphin-0.2.2.dist-info}/METADATA +1 -1
- {kweaver_dolphin-0.2.0.dist-info → kweaver_dolphin-0.2.2.dist-info}/RECORD +30 -28
- {kweaver_dolphin-0.2.0.dist-info → kweaver_dolphin-0.2.2.dist-info}/WHEEL +1 -1
- kweaver_dolphin-0.2.2.dist-info/entry_points.txt +15 -0
- dolphin/lib/skillkits/plan_act_skillkit.py +0 -452
- kweaver_dolphin-0.2.0.dist-info/entry_points.txt +0 -27
- {kweaver_dolphin-0.2.0.dist-info → kweaver_dolphin-0.2.2.dist-info}/licenses/LICENSE.txt +0 -0
- {kweaver_dolphin-0.2.0.dist-info → kweaver_dolphin-0.2.2.dist-info}/top_level.txt +0 -0
|
@@ -0,0 +1,404 @@
|
|
|
1
|
+
"""Task Registry for Plan Mode.
|
|
2
|
+
|
|
3
|
+
This module provides task state management for unified plan architecture.
|
|
4
|
+
|
|
5
|
+
Logging conventions:
|
|
6
|
+
- DEBUG: Task registration, status updates, cancellations
|
|
7
|
+
- INFO: Registry lifecycle events (reset, mode changes)
|
|
8
|
+
- WARNING: Invalid operations (unknown task, missing registry)
|
|
9
|
+
- ERROR: Critical failures in task management
|
|
10
|
+
"""
|
|
11
|
+
|
|
12
|
+
import asyncio
|
|
13
|
+
import time
|
|
14
|
+
from dataclasses import dataclass, field
|
|
15
|
+
from enum import Enum
|
|
16
|
+
from typing import Any, Dict, List, Optional
|
|
17
|
+
|
|
18
|
+
from dolphin.core.logging.logger import get_logger
|
|
19
|
+
|
|
20
|
+
logger = get_logger("task_registry")
|
|
21
|
+
|
|
22
|
+
|
|
23
|
+
class OutputEventType(str, Enum):
|
|
24
|
+
"""Output event types for UI/SDK consumers.
|
|
25
|
+
|
|
26
|
+
These events are emitted via Context.write_output() and can be consumed
|
|
27
|
+
by UI components or SDK integrations for real-time updates.
|
|
28
|
+
"""
|
|
29
|
+
TASK_STARTED = "task_started"
|
|
30
|
+
TASK_COMPLETED = "task_completed"
|
|
31
|
+
TASK_FAILED = "task_failed"
|
|
32
|
+
TASK_CANCELLED = "task_cancelled"
|
|
33
|
+
TASK_PROGRESS = "task_progress"
|
|
34
|
+
PLAN_CREATED = "plan_created"
|
|
35
|
+
PLAN_UPDATED = "plan_updated"
|
|
36
|
+
PLAN_FINISHED = "plan_finished"
|
|
37
|
+
|
|
38
|
+
|
|
39
|
+
class TaskStatus(str, Enum):
|
|
40
|
+
"""Task status values."""
|
|
41
|
+
PENDING = "pending"
|
|
42
|
+
RUNNING = "running"
|
|
43
|
+
COMPLETED = "completed"
|
|
44
|
+
FAILED = "failed"
|
|
45
|
+
CANCELLED = "cancelled"
|
|
46
|
+
SKIPPED = "skipped"
|
|
47
|
+
|
|
48
|
+
|
|
49
|
+
class PlanExecMode(str, Enum):
|
|
50
|
+
"""Execution mode for plan orchestration."""
|
|
51
|
+
PARALLEL = "parallel"
|
|
52
|
+
SEQUENTIAL = "sequential"
|
|
53
|
+
|
|
54
|
+
@staticmethod
|
|
55
|
+
def from_str(mode: str) -> "PlanExecMode":
|
|
56
|
+
"""Convert a string (seq/para/sequential/parallel) to PlanExecMode."""
|
|
57
|
+
if not mode:
|
|
58
|
+
return PlanExecMode.PARALLEL
|
|
59
|
+
|
|
60
|
+
mode = mode.lower().strip()
|
|
61
|
+
if mode in ("seq", "sequential"):
|
|
62
|
+
return PlanExecMode.SEQUENTIAL
|
|
63
|
+
if mode in ("para", "parallel"):
|
|
64
|
+
return PlanExecMode.PARALLEL
|
|
65
|
+
|
|
66
|
+
raise ValueError(f"Invalid execution mode: {mode}. Must be 'seq' or 'para'.")
|
|
67
|
+
|
|
68
|
+
|
|
69
|
+
@dataclass
|
|
70
|
+
class Task:
|
|
71
|
+
"""Task metadata and state."""
|
|
72
|
+
id: str
|
|
73
|
+
name: str
|
|
74
|
+
prompt: str
|
|
75
|
+
|
|
76
|
+
# Runtime fields
|
|
77
|
+
status: TaskStatus = TaskStatus.PENDING
|
|
78
|
+
answer: Optional[str] = None
|
|
79
|
+
think: Optional[str] = None
|
|
80
|
+
block_answer: Optional[str] = None
|
|
81
|
+
error: Optional[str] = None
|
|
82
|
+
started_at: Optional[float] = None
|
|
83
|
+
duration: Optional[float] = None
|
|
84
|
+
attempt: int = 0
|
|
85
|
+
|
|
86
|
+
# Reserved for Phase 2
|
|
87
|
+
depends_on: List[str] = field(default_factory=list)
|
|
88
|
+
|
|
89
|
+
def to_dict(self) -> Dict[str, Any]:
|
|
90
|
+
"""Convert Task to dictionary."""
|
|
91
|
+
return {
|
|
92
|
+
"id": self.id,
|
|
93
|
+
"name": self.name,
|
|
94
|
+
"prompt": self.prompt,
|
|
95
|
+
"status": self.status.value,
|
|
96
|
+
"answer": self.answer,
|
|
97
|
+
"think": self.think,
|
|
98
|
+
"block_answer": self.block_answer,
|
|
99
|
+
"error": self.error,
|
|
100
|
+
"started_at": self.started_at,
|
|
101
|
+
"duration": self.duration,
|
|
102
|
+
"attempt": self.attempt,
|
|
103
|
+
"depends_on": self.depends_on,
|
|
104
|
+
}
|
|
105
|
+
|
|
106
|
+
@classmethod
|
|
107
|
+
def from_dict(cls, data: Dict[str, Any]) -> "Task":
|
|
108
|
+
"""Create Task from dictionary."""
|
|
109
|
+
return cls(
|
|
110
|
+
id=data["id"],
|
|
111
|
+
name=data["name"],
|
|
112
|
+
prompt=data["prompt"],
|
|
113
|
+
status=TaskStatus(data["status"]),
|
|
114
|
+
answer=data.get("answer"),
|
|
115
|
+
think=data.get("think"),
|
|
116
|
+
block_answer=data.get("block_answer"),
|
|
117
|
+
error=data.get("error"),
|
|
118
|
+
started_at=data.get("started_at"),
|
|
119
|
+
duration=data.get("duration"),
|
|
120
|
+
attempt=data.get("attempt", 0),
|
|
121
|
+
depends_on=data.get("depends_on", []),
|
|
122
|
+
)
|
|
123
|
+
|
|
124
|
+
|
|
125
|
+
class TaskRegistry:
|
|
126
|
+
"""Persistent task state registry.
|
|
127
|
+
|
|
128
|
+
Notes:
|
|
129
|
+
- Stores only serializable task state.
|
|
130
|
+
- Runtime handles (asyncio.Task) are kept outside for correctness and recoverability.
|
|
131
|
+
- Thread-safe via asyncio.Lock.
|
|
132
|
+
"""
|
|
133
|
+
|
|
134
|
+
def __init__(self):
|
|
135
|
+
self.tasks: Dict[str, Task] = {}
|
|
136
|
+
self._lock = asyncio.Lock()
|
|
137
|
+
|
|
138
|
+
# Config fields (set by PlanSkillkit._plan_tasks)
|
|
139
|
+
self.exec_mode: PlanExecMode = PlanExecMode.PARALLEL
|
|
140
|
+
self.max_concurrency: int = 5
|
|
141
|
+
|
|
142
|
+
# Runtime handles (not persisted)
|
|
143
|
+
self.running_asyncio_tasks: Dict[str, asyncio.Task] = {}
|
|
144
|
+
|
|
145
|
+
def to_dict(self) -> Dict[str, Any]:
|
|
146
|
+
"""Convert TaskRegistry to dictionary."""
|
|
147
|
+
return {
|
|
148
|
+
"tasks": {task_id: task.to_dict() for task_id, task in self.tasks.items()},
|
|
149
|
+
"exec_mode": self.exec_mode.value,
|
|
150
|
+
"max_concurrency": self.max_concurrency,
|
|
151
|
+
}
|
|
152
|
+
|
|
153
|
+
@classmethod
|
|
154
|
+
def from_dict(cls, data: Dict[str, Any]) -> "TaskRegistry":
|
|
155
|
+
"""Create TaskRegistry from dictionary."""
|
|
156
|
+
registry = cls()
|
|
157
|
+
registry.tasks = {
|
|
158
|
+
task_id: Task.from_dict(task_data)
|
|
159
|
+
for task_id, task_data in data.get("tasks", {}).items()
|
|
160
|
+
}
|
|
161
|
+
registry.exec_mode = PlanExecMode(data.get("exec_mode", PlanExecMode.PARALLEL.value))
|
|
162
|
+
registry.max_concurrency = data.get("max_concurrency", 5)
|
|
163
|
+
return registry
|
|
164
|
+
|
|
165
|
+
async def add_task(self, task: Task):
|
|
166
|
+
"""Register a new task."""
|
|
167
|
+
async with self._lock:
|
|
168
|
+
self.tasks[task.id] = task
|
|
169
|
+
logger.debug(f"Task registered: {task.id} ({task.name})")
|
|
170
|
+
|
|
171
|
+
async def get_task(self, task_id: str) -> Optional[Task]:
|
|
172
|
+
"""Retrieve a task by ID (thread-safe)."""
|
|
173
|
+
async with self._lock:
|
|
174
|
+
return self.tasks.get(task_id)
|
|
175
|
+
|
|
176
|
+
async def get_all_tasks(self) -> List[Task]:
|
|
177
|
+
"""Return all tasks (thread-safe)."""
|
|
178
|
+
async with self._lock:
|
|
179
|
+
return list(self.tasks.values())
|
|
180
|
+
|
|
181
|
+
async def get_pending_tasks(self) -> List[Task]:
|
|
182
|
+
"""Return tasks that are pending (thread-safe)."""
|
|
183
|
+
async with self._lock:
|
|
184
|
+
return [t for t in self.tasks.values() if t.status == TaskStatus.PENDING]
|
|
185
|
+
|
|
186
|
+
async def get_ready_tasks(self) -> List[Task]:
|
|
187
|
+
"""Return tasks that are ready to be started (thread-safe).
|
|
188
|
+
|
|
189
|
+
Phase 1 (no dependency scheduling):
|
|
190
|
+
- All PENDING tasks are considered ready.
|
|
191
|
+
|
|
192
|
+
Phase 2 (reserved):
|
|
193
|
+
- Check depends_on and only return tasks whose dependencies are completed.
|
|
194
|
+
"""
|
|
195
|
+
async with self._lock:
|
|
196
|
+
return [task for task in self.tasks.values() if task.status == TaskStatus.PENDING]
|
|
197
|
+
|
|
198
|
+
async def get_running_tasks(self) -> List[Task]:
|
|
199
|
+
"""Return tasks that are running (thread-safe)."""
|
|
200
|
+
async with self._lock:
|
|
201
|
+
return [t for t in self.tasks.values() if t.status == TaskStatus.RUNNING]
|
|
202
|
+
|
|
203
|
+
async def get_completed_tasks(self) -> List[Task]:
|
|
204
|
+
"""Return tasks that are completed (thread-safe)."""
|
|
205
|
+
async with self._lock:
|
|
206
|
+
return [t for t in self.tasks.values() if t.status == TaskStatus.COMPLETED]
|
|
207
|
+
|
|
208
|
+
async def get_failed_tasks(self) -> List[Task]:
|
|
209
|
+
"""Return tasks that have failed (thread-safe)."""
|
|
210
|
+
async with self._lock:
|
|
211
|
+
return [t for t in self.tasks.values() if t.status == TaskStatus.FAILED]
|
|
212
|
+
|
|
213
|
+
async def has_tasks(self) -> bool:
|
|
214
|
+
"""Return whether any tasks are registered (thread-safe)."""
|
|
215
|
+
async with self._lock:
|
|
216
|
+
return bool(self.tasks)
|
|
217
|
+
|
|
218
|
+
async def reset(self):
|
|
219
|
+
"""Reset task state (used for replan).
|
|
220
|
+
|
|
221
|
+
Clears:
|
|
222
|
+
- All tasks
|
|
223
|
+
- Running asyncio task handles (cancels them first)
|
|
224
|
+
|
|
225
|
+
Preserves:
|
|
226
|
+
- exec_mode (will be overwritten by next _plan_tasks call)
|
|
227
|
+
- max_concurrency (will be overwritten by next _plan_tasks call)
|
|
228
|
+
|
|
229
|
+
Note:
|
|
230
|
+
This is an async method to ensure proper locking for concurrent safety.
|
|
231
|
+
"""
|
|
232
|
+
async with self._lock:
|
|
233
|
+
# Cancel all running asyncio tasks to prevent background leaks during replan
|
|
234
|
+
for task_id, asyncio_task in self.running_asyncio_tasks.items():
|
|
235
|
+
if not asyncio_task.done():
|
|
236
|
+
asyncio_task.cancel()
|
|
237
|
+
logger.debug(f"Cancelled orphaned task {task_id} during registry reset")
|
|
238
|
+
|
|
239
|
+
self.tasks.clear()
|
|
240
|
+
self.running_asyncio_tasks.clear()
|
|
241
|
+
logger.info("TaskRegistry reset (replan)")
|
|
242
|
+
|
|
243
|
+
async def is_all_done(self) -> bool:
|
|
244
|
+
"""Return whether all tasks have reached a terminal state (thread-safe)."""
|
|
245
|
+
async with self._lock:
|
|
246
|
+
terminal = {TaskStatus.COMPLETED, TaskStatus.FAILED, TaskStatus.CANCELLED, TaskStatus.SKIPPED}
|
|
247
|
+
return all(task.status in terminal for task in self.tasks.values())
|
|
248
|
+
|
|
249
|
+
async def update_status(
|
|
250
|
+
self,
|
|
251
|
+
task_id: str,
|
|
252
|
+
status: TaskStatus,
|
|
253
|
+
**kwargs
|
|
254
|
+
):
|
|
255
|
+
"""Update task status and related fields.
|
|
256
|
+
|
|
257
|
+
Args:
|
|
258
|
+
task_id: Task identifier
|
|
259
|
+
status: New status
|
|
260
|
+
**kwargs: Additional fields to update (answer, think, block_answer, error, started_at, etc.)
|
|
261
|
+
|
|
262
|
+
Note:
|
|
263
|
+
Terminal states (COMPLETED, FAILED, CANCELLED, SKIPPED) cannot be transitioned.
|
|
264
|
+
This prevents race conditions during task cancellation or completion.
|
|
265
|
+
"""
|
|
266
|
+
async with self._lock:
|
|
267
|
+
task = self.tasks.get(task_id)
|
|
268
|
+
if not task:
|
|
269
|
+
logger.warning(f"Cannot update status for unknown task: {task_id}")
|
|
270
|
+
return
|
|
271
|
+
|
|
272
|
+
# Validate state transitions: terminal states cannot be changed
|
|
273
|
+
# Note: FAILED is excluded from terminal states to allow retries
|
|
274
|
+
terminal_states = {TaskStatus.COMPLETED, TaskStatus.CANCELLED, TaskStatus.SKIPPED}
|
|
275
|
+
if task.status in terminal_states:
|
|
276
|
+
logger.warning(
|
|
277
|
+
f"Cannot transition task {task_id} from terminal state {task.status.value} to {status.value}"
|
|
278
|
+
)
|
|
279
|
+
return
|
|
280
|
+
|
|
281
|
+
task.status = status
|
|
282
|
+
|
|
283
|
+
# Update additional fields
|
|
284
|
+
for key, value in kwargs.items():
|
|
285
|
+
if hasattr(task, key):
|
|
286
|
+
setattr(task, key, value)
|
|
287
|
+
|
|
288
|
+
# Compute duration for terminal states
|
|
289
|
+
if status in (TaskStatus.COMPLETED, TaskStatus.FAILED, TaskStatus.CANCELLED):
|
|
290
|
+
if task.started_at and not task.duration:
|
|
291
|
+
task.duration = time.time() - task.started_at
|
|
292
|
+
|
|
293
|
+
logger.debug(f"Task {task_id} status updated: {status.value}")
|
|
294
|
+
|
|
295
|
+
async def get_status_counts(self) -> Dict[str, int]:
|
|
296
|
+
"""Return count per status (thread-safe)."""
|
|
297
|
+
async with self._lock:
|
|
298
|
+
counts = {status.value: 0 for status in TaskStatus}
|
|
299
|
+
for task in self.tasks.values():
|
|
300
|
+
counts[task.status.value] += 1
|
|
301
|
+
return counts
|
|
302
|
+
|
|
303
|
+
async def get_progress_signature(self) -> tuple:
|
|
304
|
+
"""Compute a signature representing the current task progress state (thread-safe).
|
|
305
|
+
|
|
306
|
+
Returns:
|
|
307
|
+
A tuple of (task_id, status) pairs sorted by task_id.
|
|
308
|
+
This signature changes whenever any task changes status.
|
|
309
|
+
|
|
310
|
+
Usage:
|
|
311
|
+
Used by ExploreBlock to detect whether tasks have made progress.
|
|
312
|
+
If the signature is the same across multiple rounds, it indicates
|
|
313
|
+
the plan is stalled (no status changes).
|
|
314
|
+
|
|
315
|
+
Example:
|
|
316
|
+
>>> registry.get_progress_signature()
|
|
317
|
+
(('task_1', 'running'), ('task_2', 'pending'))
|
|
318
|
+
"""
|
|
319
|
+
async with self._lock:
|
|
320
|
+
tasks = list(self.tasks.values())
|
|
321
|
+
return tuple(
|
|
322
|
+
(t.id, getattr(t.status, "value", str(t.status)))
|
|
323
|
+
for t in sorted(tasks, key=lambda x: x.id)
|
|
324
|
+
)
|
|
325
|
+
|
|
326
|
+
async def get_all_status(self) -> str:
|
|
327
|
+
"""Return a formatted status summary (for _check_progress, thread-safe).
|
|
328
|
+
|
|
329
|
+
Returns:
|
|
330
|
+
A multi-line string with task status, including error details for failed tasks.
|
|
331
|
+
"""
|
|
332
|
+
async with self._lock:
|
|
333
|
+
lines = []
|
|
334
|
+
now = time.time()
|
|
335
|
+
for task in self.tasks.values():
|
|
336
|
+
if task.status == TaskStatus.RUNNING and task.started_at:
|
|
337
|
+
duration_str = f"{now - task.started_at:.1f}s+"
|
|
338
|
+
else:
|
|
339
|
+
duration_str = f"{task.duration:.1f}s" if task.duration else "N/A"
|
|
340
|
+
|
|
341
|
+
icon_map = {
|
|
342
|
+
TaskStatus.PENDING: "⏳",
|
|
343
|
+
TaskStatus.RUNNING: "🔄",
|
|
344
|
+
TaskStatus.COMPLETED: "✅",
|
|
345
|
+
TaskStatus.FAILED: "❌",
|
|
346
|
+
TaskStatus.CANCELLED: "🚫",
|
|
347
|
+
TaskStatus.SKIPPED: "⏭️",
|
|
348
|
+
}
|
|
349
|
+
icon = icon_map.get(task.status, "?")
|
|
350
|
+
status_label = task.status.value
|
|
351
|
+
|
|
352
|
+
# Item 3 Optimization: Check if task is in the process of being cancelled
|
|
353
|
+
if task.status == TaskStatus.RUNNING and task.id in self.running_asyncio_tasks:
|
|
354
|
+
asyncio_task = self.running_asyncio_tasks[task.id]
|
|
355
|
+
# Check if cancelling (Python 3.11+) or if it's already done but status not updated
|
|
356
|
+
is_cancelling = False
|
|
357
|
+
if hasattr(asyncio_task, "cancelling"):
|
|
358
|
+
is_cancelling = asyncio_task.cancelling() > 0
|
|
359
|
+
|
|
360
|
+
if is_cancelling:
|
|
361
|
+
icon = "⏳🚫"
|
|
362
|
+
status_label = "cancelling..."
|
|
363
|
+
|
|
364
|
+
base_line = f"{icon} {task.id}: {task.name} [{status_label}] ({duration_str})"
|
|
365
|
+
|
|
366
|
+
# For failed tasks, include error details to enable self-correction
|
|
367
|
+
if task.status == TaskStatus.FAILED and task.error:
|
|
368
|
+
error_preview = task.error[:150] # Limit error length
|
|
369
|
+
if len(task.error) > 150:
|
|
370
|
+
error_preview += "..."
|
|
371
|
+
base_line += f"\n Error: {error_preview}"
|
|
372
|
+
|
|
373
|
+
lines.append(base_line)
|
|
374
|
+
|
|
375
|
+
return "\n".join(lines)
|
|
376
|
+
|
|
377
|
+
async def cancel_all_running(self) -> int:
|
|
378
|
+
"""Cancel all running asyncio tasks and update their status.
|
|
379
|
+
|
|
380
|
+
Returns:
|
|
381
|
+
Number of tasks cancelled
|
|
382
|
+
|
|
383
|
+
Note:
|
|
384
|
+
This method both cancels the asyncio.Task objects and updates
|
|
385
|
+
the Task status to CANCELLED to keep state synchronized.
|
|
386
|
+
"""
|
|
387
|
+
async with self._lock:
|
|
388
|
+
cancelled = 0
|
|
389
|
+
for task_id, asyncio_task in list(self.running_asyncio_tasks.items()):
|
|
390
|
+
if not asyncio_task.done():
|
|
391
|
+
asyncio_task.cancel()
|
|
392
|
+
cancelled += 1
|
|
393
|
+
# Update task status to CANCELLED
|
|
394
|
+
# This ensures Registry state matches actual execution state
|
|
395
|
+
task = self.tasks.get(task_id)
|
|
396
|
+
if task:
|
|
397
|
+
task.status = TaskStatus.CANCELLED
|
|
398
|
+
# Compute duration if task was started
|
|
399
|
+
if task.started_at and not task.duration:
|
|
400
|
+
task.duration = time.time() - task.started_at
|
|
401
|
+
logger.debug(f"Cancelled running task: {task_id}")
|
|
402
|
+
|
|
403
|
+
self.running_asyncio_tasks.clear()
|
|
404
|
+
return cancelled
|
dolphin/lib/__init__.py
CHANGED
|
@@ -24,7 +24,6 @@ if TYPE_CHECKING:
|
|
|
24
24
|
MemorySkillkit,
|
|
25
25
|
MCPSkillkit,
|
|
26
26
|
OntologySkillkit,
|
|
27
|
-
PlanActSkillkit,
|
|
28
27
|
CognitiveSkillkit,
|
|
29
28
|
VMSkillkit,
|
|
30
29
|
NoopSkillkit,
|
|
@@ -61,7 +60,6 @@ _module_lookup = {
|
|
|
61
60
|
"MemorySkillkit": "dolphin.lib.skillkits",
|
|
62
61
|
"MCPSkillkit": "dolphin.lib.skillkits",
|
|
63
62
|
"OntologySkillkit": "dolphin.lib.skillkits",
|
|
64
|
-
"PlanActSkillkit": "dolphin.lib.skillkits",
|
|
65
63
|
"CognitiveSkillkit": "dolphin.lib.skillkits",
|
|
66
64
|
"VMSkillkit": "dolphin.lib.skillkits",
|
|
67
65
|
"NoopSkillkit": "dolphin.lib.skillkits",
|
|
@@ -9,7 +9,7 @@ if TYPE_CHECKING:
|
|
|
9
9
|
from dolphin.lib.skillkits.memory_skillkit import MemorySkillkit
|
|
10
10
|
from dolphin.lib.skillkits.mcp_skillkit import MCPSkillkit
|
|
11
11
|
from dolphin.lib.skillkits.ontology_skillkit import OntologySkillkit
|
|
12
|
-
from dolphin.lib.skillkits.
|
|
12
|
+
from dolphin.lib.skillkits.plan_skillkit import PlanSkillkit
|
|
13
13
|
from dolphin.lib.skillkits.cognitive_skillkit import CognitiveSkillkit
|
|
14
14
|
from dolphin.lib.skillkits.vm_skillkit import VMSkillkit
|
|
15
15
|
from dolphin.lib.skillkits.noop_skillkit import NoopSkillkit
|
|
@@ -24,7 +24,7 @@ _module_lookup = {
|
|
|
24
24
|
"MemorySkillkit": "dolphin.lib.skillkits.memory_skillkit",
|
|
25
25
|
"MCPSkillkit": "dolphin.lib.skillkits.mcp_skillkit",
|
|
26
26
|
"OntologySkillkit": "dolphin.lib.skillkits.ontology_skillkit",
|
|
27
|
-
"
|
|
27
|
+
"PlanSkillkit": "dolphin.lib.skillkits.plan_skillkit",
|
|
28
28
|
"CognitiveSkillkit": "dolphin.lib.skillkits.cognitive_skillkit",
|
|
29
29
|
"VMSkillkit": "dolphin.lib.skillkits.vm_skillkit",
|
|
30
30
|
"NoopSkillkit": "dolphin.lib.skillkits.noop_skillkit",
|