ai-coding-assistant 0.5.0__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- ai_coding_assistant-0.5.0.dist-info/METADATA +226 -0
- ai_coding_assistant-0.5.0.dist-info/RECORD +89 -0
- ai_coding_assistant-0.5.0.dist-info/WHEEL +4 -0
- ai_coding_assistant-0.5.0.dist-info/entry_points.txt +3 -0
- ai_coding_assistant-0.5.0.dist-info/licenses/LICENSE +21 -0
- coding_assistant/__init__.py +3 -0
- coding_assistant/__main__.py +19 -0
- coding_assistant/cli/__init__.py +1 -0
- coding_assistant/cli/app.py +158 -0
- coding_assistant/cli/commands/__init__.py +19 -0
- coding_assistant/cli/commands/ask.py +178 -0
- coding_assistant/cli/commands/config.py +438 -0
- coding_assistant/cli/commands/diagram.py +267 -0
- coding_assistant/cli/commands/document.py +410 -0
- coding_assistant/cli/commands/explain.py +192 -0
- coding_assistant/cli/commands/fix.py +249 -0
- coding_assistant/cli/commands/index.py +162 -0
- coding_assistant/cli/commands/refactor.py +245 -0
- coding_assistant/cli/commands/search.py +182 -0
- coding_assistant/cli/commands/serve_docs.py +128 -0
- coding_assistant/cli/repl.py +381 -0
- coding_assistant/cli/theme.py +90 -0
- coding_assistant/codebase/__init__.py +1 -0
- coding_assistant/codebase/crawler.py +93 -0
- coding_assistant/codebase/parser.py +266 -0
- coding_assistant/config/__init__.py +25 -0
- coding_assistant/config/config_manager.py +615 -0
- coding_assistant/config/settings.py +82 -0
- coding_assistant/context/__init__.py +19 -0
- coding_assistant/context/chunker.py +443 -0
- coding_assistant/context/enhanced_retriever.py +322 -0
- coding_assistant/context/hybrid_search.py +311 -0
- coding_assistant/context/ranker.py +355 -0
- coding_assistant/context/retriever.py +119 -0
- coding_assistant/context/window.py +362 -0
- coding_assistant/documentation/__init__.py +23 -0
- coding_assistant/documentation/agents/__init__.py +27 -0
- coding_assistant/documentation/agents/coordinator.py +510 -0
- coding_assistant/documentation/agents/module_documenter.py +111 -0
- coding_assistant/documentation/agents/synthesizer.py +139 -0
- coding_assistant/documentation/agents/task_delegator.py +100 -0
- coding_assistant/documentation/decomposition/__init__.py +21 -0
- coding_assistant/documentation/decomposition/context_preserver.py +477 -0
- coding_assistant/documentation/decomposition/module_detector.py +302 -0
- coding_assistant/documentation/decomposition/partitioner.py +621 -0
- coding_assistant/documentation/generators/__init__.py +14 -0
- coding_assistant/documentation/generators/dataflow_generator.py +440 -0
- coding_assistant/documentation/generators/diagram_generator.py +511 -0
- coding_assistant/documentation/graph/__init__.py +13 -0
- coding_assistant/documentation/graph/dependency_builder.py +468 -0
- coding_assistant/documentation/graph/module_analyzer.py +475 -0
- coding_assistant/documentation/writers/__init__.py +11 -0
- coding_assistant/documentation/writers/markdown_writer.py +322 -0
- coding_assistant/embeddings/__init__.py +0 -0
- coding_assistant/embeddings/generator.py +89 -0
- coding_assistant/embeddings/store.py +187 -0
- coding_assistant/exceptions/__init__.py +50 -0
- coding_assistant/exceptions/base.py +110 -0
- coding_assistant/exceptions/llm.py +249 -0
- coding_assistant/exceptions/recovery.py +263 -0
- coding_assistant/exceptions/storage.py +213 -0
- coding_assistant/exceptions/validation.py +230 -0
- coding_assistant/llm/__init__.py +1 -0
- coding_assistant/llm/client.py +277 -0
- coding_assistant/llm/gemini_client.py +181 -0
- coding_assistant/llm/groq_client.py +160 -0
- coding_assistant/llm/prompts.py +98 -0
- coding_assistant/llm/together_client.py +160 -0
- coding_assistant/operations/__init__.py +13 -0
- coding_assistant/operations/differ.py +369 -0
- coding_assistant/operations/generator.py +347 -0
- coding_assistant/operations/linter.py +430 -0
- coding_assistant/operations/validator.py +406 -0
- coding_assistant/storage/__init__.py +9 -0
- coding_assistant/storage/database.py +363 -0
- coding_assistant/storage/session.py +231 -0
- coding_assistant/utils/__init__.py +31 -0
- coding_assistant/utils/cache.py +477 -0
- coding_assistant/utils/hardware.py +132 -0
- coding_assistant/utils/keystore.py +206 -0
- coding_assistant/utils/logger.py +32 -0
- coding_assistant/utils/progress.py +311 -0
- coding_assistant/validation/__init__.py +13 -0
- coding_assistant/validation/files.py +305 -0
- coding_assistant/validation/inputs.py +335 -0
- coding_assistant/validation/params.py +280 -0
- coding_assistant/validation/sanitizers.py +243 -0
- coding_assistant/vcs/__init__.py +5 -0
- coding_assistant/vcs/git.py +269 -0
|
@@ -0,0 +1,362 @@
|
|
|
1
|
+
"""Token window management for LLM context limits."""
|
|
2
|
+
|
|
3
|
+
from typing import List, Dict, Optional, Any
|
|
4
|
+
import tiktoken
|
|
5
|
+
from dataclasses import dataclass
|
|
6
|
+
|
|
7
|
+
|
|
8
|
+
@dataclass
|
|
9
|
+
class TokenBudget:
|
|
10
|
+
"""Token budget allocation."""
|
|
11
|
+
total: int
|
|
12
|
+
system_prompt: int
|
|
13
|
+
query: int
|
|
14
|
+
history: int
|
|
15
|
+
chunks: int
|
|
16
|
+
response_reserved: int
|
|
17
|
+
|
|
18
|
+
@property
|
|
19
|
+
def used(self) -> int:
|
|
20
|
+
"""Total tokens used."""
|
|
21
|
+
return self.system_prompt + self.query + self.history + self.chunks
|
|
22
|
+
|
|
23
|
+
@property
|
|
24
|
+
def available(self) -> int:
|
|
25
|
+
"""Available tokens for response."""
|
|
26
|
+
return self.total - self.used - self.response_reserved
|
|
27
|
+
|
|
28
|
+
@property
|
|
29
|
+
def utilization(self) -> float:
|
|
30
|
+
"""Context window utilization (0-1)."""
|
|
31
|
+
return self.used / (self.total - self.response_reserved)
|
|
32
|
+
|
|
33
|
+
|
|
34
|
+
class TokenWindowManager:
|
|
35
|
+
"""
|
|
36
|
+
Manages context within LLM token limits.
|
|
37
|
+
|
|
38
|
+
This class handles:
|
|
39
|
+
- Token counting for text
|
|
40
|
+
- Context prioritization
|
|
41
|
+
- Truncation when needed
|
|
42
|
+
- Budget allocation
|
|
43
|
+
|
|
44
|
+
Default allocations:
|
|
45
|
+
- System prompt: 5-10% (reserved)
|
|
46
|
+
- Response: 15-20% (reserved)
|
|
47
|
+
- Query: actual size
|
|
48
|
+
- History: up to 30%
|
|
49
|
+
- Code chunks: remaining space
|
|
50
|
+
"""
|
|
51
|
+
|
|
52
|
+
def __init__(
|
|
53
|
+
self,
|
|
54
|
+
max_tokens: int = 150000, # Default for Claude Sonnet
|
|
55
|
+
reserved_response: int = 4000,
|
|
56
|
+
reserved_system: int = 1000,
|
|
57
|
+
max_history_ratio: float = 0.3, # Max 30% for history
|
|
58
|
+
encoding: str = "cl100k_base" # OpenAI compatible encoding
|
|
59
|
+
):
|
|
60
|
+
"""
|
|
61
|
+
Initialize token window manager.
|
|
62
|
+
|
|
63
|
+
Args:
|
|
64
|
+
max_tokens: Maximum context window size
|
|
65
|
+
reserved_response: Tokens reserved for LLM response
|
|
66
|
+
reserved_system: Tokens reserved for system prompt
|
|
67
|
+
max_history_ratio: Maximum ratio of tokens for history
|
|
68
|
+
encoding: Tiktoken encoding to use
|
|
69
|
+
"""
|
|
70
|
+
self.max_tokens = max_tokens
|
|
71
|
+
self.reserved_response = reserved_response
|
|
72
|
+
self.reserved_system = reserved_system
|
|
73
|
+
self.max_history_ratio = max_history_ratio
|
|
74
|
+
|
|
75
|
+
try:
|
|
76
|
+
self.encoder = tiktoken.get_encoding(encoding)
|
|
77
|
+
except Exception:
|
|
78
|
+
# Fallback to basic estimation
|
|
79
|
+
self.encoder = None
|
|
80
|
+
|
|
81
|
+
def count_tokens(self, text: str) -> int:
|
|
82
|
+
"""
|
|
83
|
+
Count tokens in text.
|
|
84
|
+
|
|
85
|
+
Args:
|
|
86
|
+
text: Text to count tokens for
|
|
87
|
+
|
|
88
|
+
Returns:
|
|
89
|
+
Number of tokens
|
|
90
|
+
"""
|
|
91
|
+
if not text:
|
|
92
|
+
return 0
|
|
93
|
+
|
|
94
|
+
if self.encoder:
|
|
95
|
+
try:
|
|
96
|
+
return len(self.encoder.encode(text))
|
|
97
|
+
except Exception:
|
|
98
|
+
pass
|
|
99
|
+
|
|
100
|
+
# Fallback: rough estimation (1 token ≈ 4 characters)
|
|
101
|
+
return len(text) // 4
|
|
102
|
+
|
|
103
|
+
def build_context(
|
|
104
|
+
self,
|
|
105
|
+
query: str,
|
|
106
|
+
chunks: List[Dict[str, Any]],
|
|
107
|
+
history: Optional[List[Dict[str, str]]] = None,
|
|
108
|
+
system_prompt: Optional[str] = None
|
|
109
|
+
) -> Dict[str, Any]:
|
|
110
|
+
"""
|
|
111
|
+
Build context within token limit.
|
|
112
|
+
|
|
113
|
+
Args:
|
|
114
|
+
query: User query
|
|
115
|
+
chunks: Code chunks (must have 'content' key)
|
|
116
|
+
history: Conversation history (list of {role, content})
|
|
117
|
+
system_prompt: System prompt text
|
|
118
|
+
|
|
119
|
+
Returns:
|
|
120
|
+
Dict with selected context and token budget
|
|
121
|
+
"""
|
|
122
|
+
history = history or []
|
|
123
|
+
|
|
124
|
+
# Count tokens for fixed components
|
|
125
|
+
query_tokens = self.count_tokens(query)
|
|
126
|
+
system_tokens = self.count_tokens(system_prompt) if system_prompt else self.reserved_system
|
|
127
|
+
|
|
128
|
+
# Available tokens for history and chunks
|
|
129
|
+
available = self.max_tokens - self.reserved_response - system_tokens - query_tokens
|
|
130
|
+
|
|
131
|
+
if available <= 0:
|
|
132
|
+
# Query too long!
|
|
133
|
+
return {
|
|
134
|
+
'query': query,
|
|
135
|
+
'history': [],
|
|
136
|
+
'chunks': [],
|
|
137
|
+
'budget': self._create_budget(
|
|
138
|
+
system_tokens, query_tokens, 0, 0
|
|
139
|
+
),
|
|
140
|
+
'truncated': True,
|
|
141
|
+
'error': 'Query exceeds token limit'
|
|
142
|
+
}
|
|
143
|
+
|
|
144
|
+
# Allocate history (up to max_history_ratio)
|
|
145
|
+
max_history_tokens = int(available * self.max_history_ratio)
|
|
146
|
+
included_history, history_tokens = self._select_history(
|
|
147
|
+
history, max_history_tokens
|
|
148
|
+
)
|
|
149
|
+
|
|
150
|
+
# Remaining tokens for chunks
|
|
151
|
+
available_for_chunks = available - history_tokens
|
|
152
|
+
|
|
153
|
+
# Select chunks that fit
|
|
154
|
+
included_chunks, chunk_tokens = self._select_chunks(
|
|
155
|
+
chunks, available_for_chunks
|
|
156
|
+
)
|
|
157
|
+
|
|
158
|
+
budget = self._create_budget(
|
|
159
|
+
system_tokens, query_tokens, history_tokens, chunk_tokens
|
|
160
|
+
)
|
|
161
|
+
|
|
162
|
+
return {
|
|
163
|
+
'query': query,
|
|
164
|
+
'history': included_history,
|
|
165
|
+
'chunks': included_chunks,
|
|
166
|
+
'budget': budget,
|
|
167
|
+
'truncated': len(included_chunks) < len(chunks) or len(included_history) < len(history)
|
|
168
|
+
}
|
|
169
|
+
|
|
170
|
+
def _select_history(
|
|
171
|
+
self,
|
|
172
|
+
history: List[Dict[str, str]],
|
|
173
|
+
max_tokens: int
|
|
174
|
+
) -> tuple[List[Dict[str, str]], int]:
|
|
175
|
+
"""
|
|
176
|
+
Select history messages that fit within token limit.
|
|
177
|
+
|
|
178
|
+
Prioritizes recent messages.
|
|
179
|
+
|
|
180
|
+
Args:
|
|
181
|
+
history: Full conversation history
|
|
182
|
+
max_tokens: Maximum tokens for history
|
|
183
|
+
|
|
184
|
+
Returns:
|
|
185
|
+
Tuple of (selected messages, total tokens)
|
|
186
|
+
"""
|
|
187
|
+
if not history:
|
|
188
|
+
return [], 0
|
|
189
|
+
|
|
190
|
+
included = []
|
|
191
|
+
total_tokens = 0
|
|
192
|
+
|
|
193
|
+
# Start from most recent and work backwards
|
|
194
|
+
for msg in reversed(history[-20:]): # Consider last 20 messages
|
|
195
|
+
content = msg.get('content', '')
|
|
196
|
+
msg_tokens = self.count_tokens(content)
|
|
197
|
+
|
|
198
|
+
if total_tokens + msg_tokens <= max_tokens:
|
|
199
|
+
included.insert(0, msg) # Maintain chronological order
|
|
200
|
+
total_tokens += msg_tokens
|
|
201
|
+
else:
|
|
202
|
+
# Can't fit more messages
|
|
203
|
+
break
|
|
204
|
+
|
|
205
|
+
return included, total_tokens
|
|
206
|
+
|
|
207
|
+
def _select_chunks(
|
|
208
|
+
self,
|
|
209
|
+
chunks: List[Dict[str, Any]],
|
|
210
|
+
max_tokens: int
|
|
211
|
+
) -> tuple[List[Dict[str, Any]], int]:
|
|
212
|
+
"""
|
|
213
|
+
Select code chunks that fit within token limit.
|
|
214
|
+
|
|
215
|
+
Assumes chunks are already ranked by relevance.
|
|
216
|
+
|
|
217
|
+
Args:
|
|
218
|
+
chunks: Code chunks (ranked)
|
|
219
|
+
max_tokens: Maximum tokens for chunks
|
|
220
|
+
|
|
221
|
+
Returns:
|
|
222
|
+
Tuple of (selected chunks, total tokens)
|
|
223
|
+
"""
|
|
224
|
+
if not chunks:
|
|
225
|
+
return [], 0
|
|
226
|
+
|
|
227
|
+
included = []
|
|
228
|
+
total_tokens = 0
|
|
229
|
+
|
|
230
|
+
for chunk in chunks:
|
|
231
|
+
content = chunk.get('content', '')
|
|
232
|
+
chunk_tokens = self.count_tokens(content)
|
|
233
|
+
|
|
234
|
+
if total_tokens + chunk_tokens <= max_tokens:
|
|
235
|
+
included.append(chunk)
|
|
236
|
+
total_tokens += chunk_tokens
|
|
237
|
+
else:
|
|
238
|
+
# Try to fit at least one chunk
|
|
239
|
+
if not included and chunk_tokens <= max_tokens * 1.2:
|
|
240
|
+
# Allow slight overflow for first chunk
|
|
241
|
+
included.append(chunk)
|
|
242
|
+
total_tokens += chunk_tokens
|
|
243
|
+
break
|
|
244
|
+
|
|
245
|
+
return included, total_tokens
|
|
246
|
+
|
|
247
|
+
def _create_budget(
|
|
248
|
+
self,
|
|
249
|
+
system_tokens: int,
|
|
250
|
+
query_tokens: int,
|
|
251
|
+
history_tokens: int,
|
|
252
|
+
chunk_tokens: int
|
|
253
|
+
) -> TokenBudget:
|
|
254
|
+
"""Create token budget summary."""
|
|
255
|
+
return TokenBudget(
|
|
256
|
+
total=self.max_tokens,
|
|
257
|
+
system_prompt=system_tokens,
|
|
258
|
+
query=query_tokens,
|
|
259
|
+
history=history_tokens,
|
|
260
|
+
chunks=chunk_tokens,
|
|
261
|
+
response_reserved=self.reserved_response
|
|
262
|
+
)
|
|
263
|
+
|
|
264
|
+
def estimate_response_tokens(self, response: str) -> int:
|
|
265
|
+
"""
|
|
266
|
+
Estimate tokens in a response.
|
|
267
|
+
|
|
268
|
+
Args:
|
|
269
|
+
response: Response text
|
|
270
|
+
|
|
271
|
+
Returns:
|
|
272
|
+
Estimated token count
|
|
273
|
+
"""
|
|
274
|
+
return self.count_tokens(response)
|
|
275
|
+
|
|
276
|
+
def can_fit(self, text: str, available_tokens: int) -> bool:
|
|
277
|
+
"""
|
|
278
|
+
Check if text fits within token limit.
|
|
279
|
+
|
|
280
|
+
Args:
|
|
281
|
+
text: Text to check
|
|
282
|
+
available_tokens: Available token budget
|
|
283
|
+
|
|
284
|
+
Returns:
|
|
285
|
+
True if text fits
|
|
286
|
+
"""
|
|
287
|
+
return self.count_tokens(text) <= available_tokens
|
|
288
|
+
|
|
289
|
+
def truncate_text(self, text: str, max_tokens: int) -> str:
|
|
290
|
+
"""
|
|
291
|
+
Truncate text to fit within token limit.
|
|
292
|
+
|
|
293
|
+
Args:
|
|
294
|
+
text: Text to truncate
|
|
295
|
+
max_tokens: Maximum tokens allowed
|
|
296
|
+
|
|
297
|
+
Returns:
|
|
298
|
+
Truncated text
|
|
299
|
+
"""
|
|
300
|
+
tokens = self.count_tokens(text)
|
|
301
|
+
|
|
302
|
+
if tokens <= max_tokens:
|
|
303
|
+
return text
|
|
304
|
+
|
|
305
|
+
# Binary search for the right length
|
|
306
|
+
if self.encoder:
|
|
307
|
+
encoded = self.encoder.encode(text)
|
|
308
|
+
truncated = encoded[:max_tokens]
|
|
309
|
+
return self.encoder.decode(truncated) + "..."
|
|
310
|
+
|
|
311
|
+
# Fallback: character-based truncation
|
|
312
|
+
ratio = max_tokens / tokens
|
|
313
|
+
char_limit = int(len(text) * ratio)
|
|
314
|
+
return text[:char_limit] + "..."
|
|
315
|
+
|
|
316
|
+
def get_model_limits(self, model_name: str) -> tuple[int, int]:
|
|
317
|
+
"""
|
|
318
|
+
Get token limits for known models.
|
|
319
|
+
|
|
320
|
+
Args:
|
|
321
|
+
model_name: Model identifier
|
|
322
|
+
|
|
323
|
+
Returns:
|
|
324
|
+
Tuple of (max_tokens, recommended_response_reserve)
|
|
325
|
+
"""
|
|
326
|
+
limits = {
|
|
327
|
+
'claude-sonnet': (200000, 4000),
|
|
328
|
+
'claude-opus': (200000, 4000),
|
|
329
|
+
'gpt-4': (128000, 4000),
|
|
330
|
+
'gpt-3.5': (16385, 2000),
|
|
331
|
+
'ollama-default': (4096, 512),
|
|
332
|
+
}
|
|
333
|
+
|
|
334
|
+
# Match by substring
|
|
335
|
+
for key, (max_tok, reserve) in limits.items():
|
|
336
|
+
if key in model_name.lower():
|
|
337
|
+
return max_tok, reserve
|
|
338
|
+
|
|
339
|
+
# Conservative default
|
|
340
|
+
return 8000, 1000
|
|
341
|
+
|
|
342
|
+
def format_budget_summary(self, budget: TokenBudget) -> str:
|
|
343
|
+
"""
|
|
344
|
+
Format budget for display.
|
|
345
|
+
|
|
346
|
+
Args:
|
|
347
|
+
budget: Token budget
|
|
348
|
+
|
|
349
|
+
Returns:
|
|
350
|
+
Formatted string
|
|
351
|
+
"""
|
|
352
|
+
return f"""Token Budget:
|
|
353
|
+
Total Window: {budget.total:,} tokens
|
|
354
|
+
System Prompt: {budget.system_prompt:,} tokens ({budget.system_prompt/budget.total*100:.1f}%)
|
|
355
|
+
Query: {budget.query:,} tokens ({budget.query/budget.total*100:.1f}%)
|
|
356
|
+
History: {budget.history:,} tokens ({budget.history/budget.total*100:.1f}%)
|
|
357
|
+
Code Chunks: {budget.chunks:,} tokens ({budget.chunks/budget.total*100:.1f}%)
|
|
358
|
+
Reserved (Resp): {budget.response_reserved:,} tokens ({budget.response_reserved/budget.total*100:.1f}%)
|
|
359
|
+
─────────────────────────────────────
|
|
360
|
+
Used: {budget.used:,} tokens ({budget.utilization*100:.1f}%)
|
|
361
|
+
Available: {budget.available:,} tokens
|
|
362
|
+
"""
|
|
@@ -0,0 +1,23 @@
|
|
|
1
|
+
"""Documentation generation module for AI Coding Assistant.
|
|
2
|
+
|
|
3
|
+
This module provides CodeWiki-style automated documentation generation:
|
|
4
|
+
- Repository-level architecture understanding
|
|
5
|
+
- Hierarchical decomposition for scalability
|
|
6
|
+
- Multi-agent documentation generation
|
|
7
|
+
- Visual diagram creation (Mermaid)
|
|
8
|
+
- Incremental updates and continuous synchronization
|
|
9
|
+
"""
|
|
10
|
+
|
|
11
|
+
from coding_assistant.documentation.graph.dependency_builder import (
|
|
12
|
+
DependencyGraphBuilder,
|
|
13
|
+
CodeEntity,
|
|
14
|
+
)
|
|
15
|
+
from coding_assistant.documentation.graph.module_analyzer import ModuleAnalyzer
|
|
16
|
+
|
|
17
|
+
__all__ = [
|
|
18
|
+
'DependencyGraphBuilder',
|
|
19
|
+
'CodeEntity',
|
|
20
|
+
'ModuleAnalyzer',
|
|
21
|
+
]
|
|
22
|
+
|
|
23
|
+
__version__ = '0.5.0'
|
|
@@ -0,0 +1,27 @@
|
|
|
1
|
+
"""Multi-agent documentation generation system.
|
|
2
|
+
|
|
3
|
+
This module implements parallel documentation generation using multiple LLM agents,
|
|
4
|
+
inspired by CodeWiki's recursive multi-agent approach.
|
|
5
|
+
"""
|
|
6
|
+
|
|
7
|
+
from coding_assistant.documentation.agents.coordinator import (
|
|
8
|
+
MultiAgentCoordinator,
|
|
9
|
+
DocumentationTask,
|
|
10
|
+
)
|
|
11
|
+
from coding_assistant.documentation.agents.module_documenter import (
|
|
12
|
+
ModuleDocumenter,
|
|
13
|
+
)
|
|
14
|
+
from coding_assistant.documentation.agents.synthesizer import (
|
|
15
|
+
DocumentationSynthesizer,
|
|
16
|
+
)
|
|
17
|
+
from coding_assistant.documentation.agents.task_delegator import (
|
|
18
|
+
TaskDelegator,
|
|
19
|
+
)
|
|
20
|
+
|
|
21
|
+
__all__ = [
|
|
22
|
+
'MultiAgentCoordinator',
|
|
23
|
+
'DocumentationTask',
|
|
24
|
+
'ModuleDocumenter',
|
|
25
|
+
'DocumentationSynthesizer',
|
|
26
|
+
'TaskDelegator',
|
|
27
|
+
]
|