zwarm 2.3.5__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
zwarm/orchestrator.py ADDED
@@ -0,0 +1,683 @@
1
+ """
2
+ Orchestrator: The agent that coordinates multiple executor agents.
3
+
4
+ The orchestrator:
5
+ - Plans and breaks down complex tasks
6
+ - Delegates work to executor agents (codex, claude-code, etc.)
7
+ - Supervises progress and provides clarification
8
+ - Verifies work before marking complete
9
+
10
+ It does NOT write code directly - that's the executor's job.
11
+ """
12
+
13
+ from __future__ import annotations
14
+
15
+ import json
16
+ from pathlib import Path
17
+ from typing import Any, Callable
18
+
19
+ import weave
20
+ from pydantic import Field, PrivateAttr
21
+ from wbal.agents.yaml_agent import YamlAgent
22
+ from wbal.helper import TOOL_CALL_TYPE, format_openai_tool_response
23
+ from wbal.lm import LM as wbalLMGeneric
24
+ from wbal.lm import GPT5LargeVerbose
25
+
26
+ from zwarm.adapters import ExecutorAdapter, get_adapter
27
+ from zwarm.core.compact import compact_messages, should_compact
28
+ from zwarm.core.config import ZwarmConfig, load_config
29
+ from zwarm.core.environment import OrchestratorEnv
30
+ from zwarm.core.models import ConversationSession
31
+ from zwarm.core.state import StateManager
32
+ from zwarm.prompts import get_orchestrator_prompt
33
+ from zwarm.watchers import (
34
+ WatcherAction,
35
+ WatcherContext,
36
+ WatcherManager,
37
+ build_watcher_manager,
38
+ )
39
+
40
+
41
+ class Orchestrator(YamlAgent):
42
+ """
43
+ Multi-agent orchestrator built on WBAL's YamlAgent.
44
+
45
+ Extends YamlAgent with:
46
+ - Delegation tools (delegate, converse, check_session, end_session)
47
+ - Session tracking
48
+ - State persistence
49
+ - Watcher integration
50
+ - Weave integration
51
+ """
52
+
53
+ # LM definition override:
54
+ lm: wbalLMGeneric = Field(default_factory=GPT5LargeVerbose)
55
+
56
+ # Configuration
57
+ config: ZwarmConfig = Field(default_factory=ZwarmConfig)
58
+ working_dir: Path = Field(default_factory=Path.cwd)
59
+
60
+ # Instance identification (for multi-orchestrator isolation)
61
+ instance_id: str | None = Field(default=None)
62
+ instance_name: str | None = Field(default=None)
63
+
64
+ # Load tools from modules (delegation + bash for verification)
65
+ agent_tool_modules: list[str] = Field(
66
+ default=[
67
+ "zwarm.tools.delegation",
68
+ "wbal.tools.bash",
69
+ ]
70
+ )
71
+
72
+ # State management
73
+ _state: StateManager = PrivateAttr()
74
+ _sessions: dict[str, ConversationSession] = PrivateAttr(default_factory=dict)
75
+ _adapters: dict[str, ExecutorAdapter] = PrivateAttr(default_factory=dict)
76
+ _watcher_manager: WatcherManager | None = PrivateAttr(default=None)
77
+ _resumed: bool = PrivateAttr(default=False)
78
+ _total_tokens: int = PrivateAttr(default=0) # Cumulative orchestrator tokens
79
+ _executor_usage: dict[str, int] = PrivateAttr(
80
+ default_factory=lambda: {
81
+ "input_tokens": 0,
82
+ "output_tokens": 0,
83
+ "total_tokens": 0,
84
+ }
85
+ )
86
+
87
+ def model_post_init(self, __context: Any) -> None:
88
+ """Initialize state and adapters after model creation."""
89
+ super().model_post_init(__context)
90
+
91
+ # Initialize state manager with instance isolation
92
+ base_state_dir = self.working_dir / self.config.state_dir
93
+ self._state = StateManager(
94
+ state_dir=base_state_dir,
95
+ instance_id=self.instance_id,
96
+ )
97
+ self._state.init()
98
+ self._state.load()
99
+
100
+ # Register instance if using instance isolation
101
+ if self.instance_id:
102
+ from zwarm.core.state import register_instance
103
+
104
+ register_instance(
105
+ instance_id=self.instance_id,
106
+ name=self.instance_name,
107
+ task=None, # Will be updated when task is set
108
+ base_dir=base_state_dir,
109
+ )
110
+
111
+ # Load existing sessions
112
+ for session in self._state.list_sessions():
113
+ self._sessions[session.id] = session
114
+
115
+ # Initialize Weave if configured
116
+ if self.config.weave.enabled and self.config.weave.project:
117
+ weave.init(self.config.weave.project)
118
+
119
+ # Initialize watchers if configured
120
+ if self.config.watchers.enabled:
121
+ self._watcher_manager = build_watcher_manager(
122
+ {
123
+ "watchers": [
124
+ {"name": w.name, "enabled": w.enabled, "config": w.config}
125
+ for w in self.config.watchers.watchers
126
+ ]
127
+ }
128
+ )
129
+
130
+ # Initialize CodexSessionManager and link to environment
131
+ # This is the SAME manager used by delegation tools
132
+ from zwarm.sessions import CodexSessionManager
133
+ self._session_manager = CodexSessionManager(self.working_dir / ".zwarm")
134
+
135
+ # Link session manager to environment for live session visibility in observe()
136
+ if hasattr(self.env, "set_session_manager"):
137
+ self.env.set_session_manager(self._session_manager)
138
+
139
+ # Set budget limits in environment
140
+ if hasattr(self.env, "set_budget"):
141
+ # Extract budget from watcher config if available
142
+ max_sessions = None
143
+ for w in self.config.watchers.watchers:
144
+ if w.name == "budget" and w.config:
145
+ max_sessions = w.config.get("max_sessions")
146
+ break
147
+ self.env.set_budget(max_sessions=max_sessions)
148
+
149
+ @property
150
+ def state(self) -> StateManager:
151
+ """Access state manager."""
152
+ return self._state
153
+
154
+ def _get_adapter(self, name: str) -> ExecutorAdapter:
155
+ """Get or create an adapter by name using the adapter registry."""
156
+ if name not in self._adapters:
157
+ # Get model from config (adapters have their own defaults if None)
158
+ model = self.config.executor.model
159
+
160
+ # Use isolated codex config if available
161
+ config_path = self.working_dir / self.config.state_dir / "codex.toml"
162
+ if not config_path.exists():
163
+ config_path = None # Fallback to adapter defaults
164
+
165
+ self._adapters[name] = get_adapter(
166
+ name, model=model, config_path=config_path
167
+ )
168
+ return self._adapters[name]
169
+
170
+ def get_executor_usage(self) -> dict[str, int]:
171
+ """Get aggregated token usage across all executors."""
172
+ total = {
173
+ "input_tokens": 0,
174
+ "output_tokens": 0,
175
+ "total_tokens": 0,
176
+ }
177
+ for adapter in self._adapters.values():
178
+ if hasattr(adapter, "total_usage"):
179
+ usage = adapter.total_usage
180
+ for key in total:
181
+ total[key] += usage.get(key, 0)
182
+ return total
183
+
184
+ @property
185
+ def executor_usage(self) -> dict[str, int]:
186
+ """Aggregated executor token usage (for Weave tracking)."""
187
+ return self.get_executor_usage()
188
+
189
+ def save_state(self) -> None:
190
+ """Save orchestrator state for resume."""
191
+ self._state.save_orchestrator_messages(self.messages)
192
+
193
+ def load_state(self) -> None:
194
+ """Load orchestrator state for resume.
195
+
196
+ Only marks as resumed if we actually loaded non-empty messages.
197
+ This prevents the resume message from being injected before the
198
+ system prompt when there's no saved state to resume from.
199
+ """
200
+ loaded_messages = self._state.load_orchestrator_messages()
201
+ if loaded_messages:
202
+ self.messages = self._sanitize_messages_for_resume(loaded_messages)
203
+ self._resumed = True
204
+ # If no messages were saved, don't set _resumed - start fresh
205
+
206
+ def _sanitize_messages_for_resume(self, messages: list[dict]) -> list[dict]:
207
+ """
208
+ Sanitize messages loaded from disk for sending back to the API.
209
+
210
+ OpenAI's reasoning models include response-only fields (status, encrypted_content)
211
+ in reasoning blocks that can't be sent back as input. We keep the reasoning
212
+ items but strip the response-only fields.
213
+
214
+ Response-only fields that must be removed:
215
+ - status: reasoning item status (null, "in_progress", "completed")
216
+ - encrypted_content: encrypted reasoning content
217
+ """
218
+ # Fields that are response-only and must be stripped for input
219
+ RESPONSE_ONLY_FIELDS = {
220
+ "status",
221
+ "encrypted_content",
222
+ }
223
+
224
+ def clean_item(item: Any) -> Any:
225
+ """Recursively clean an item, removing response-only fields."""
226
+ if isinstance(item, dict):
227
+ return {
228
+ k: clean_item(v)
229
+ for k, v in item.items()
230
+ if k not in RESPONSE_ONLY_FIELDS
231
+ }
232
+ elif isinstance(item, list):
233
+ return [clean_item(x) for x in item]
234
+ else:
235
+ return item
236
+
237
+ return [clean_item(msg) for msg in messages]
238
+
239
+ def _maybe_compact(self) -> bool:
240
+ """
241
+ Check if compaction is needed and compact if so.
242
+
243
+ Returns True if compaction was performed.
244
+ """
245
+ compact_config = self.config.orchestrator.compaction
246
+ if not compact_config.enabled:
247
+ return False
248
+
249
+ # Check if we should compact
250
+ if not should_compact(
251
+ self.messages,
252
+ max_tokens=compact_config.max_tokens,
253
+ threshold_pct=compact_config.threshold_pct,
254
+ ):
255
+ return False
256
+
257
+ # Perform compaction
258
+ result = compact_messages(
259
+ self.messages,
260
+ keep_first_n=compact_config.keep_first_n,
261
+ keep_last_n=compact_config.keep_last_n,
262
+ max_tokens=compact_config.max_tokens,
263
+ target_token_pct=compact_config.target_pct,
264
+ )
265
+
266
+ if result.was_compacted:
267
+ self.messages = result.messages
268
+
269
+ # Log compaction event
270
+ from zwarm.core.models import Event
271
+
272
+ self._state.log_event(
273
+ Event(
274
+ kind="context_compacted",
275
+ payload={
276
+ "step": self._step_count,
277
+ "original_count": result.original_count,
278
+ "new_count": len(result.messages),
279
+ "removed_count": result.removed_count,
280
+ },
281
+ )
282
+ )
283
+
284
+ return True
285
+
286
+ return False
287
+
288
+ def _inject_resume_message(self) -> None:
289
+ """Inject a system message about resumed state."""
290
+ if not self._resumed:
291
+ return
292
+
293
+ # Build list of old sessions and INVALIDATE their conversation IDs
294
+ # The MCP server was restarted, so all conversation IDs are now stale
295
+ old_sessions = []
296
+ invalidated_count = 0
297
+ for sid, session in self._sessions.items():
298
+ old_sessions.append(
299
+ f" - {sid[:8]}... ({session.adapter}, {session.status.value})"
300
+ )
301
+ # Clear stale conversation_id to prevent converse() from trying to use it
302
+ if session.conversation_id:
303
+ session.conversation_id = None
304
+ invalidated_count += 1
305
+
306
+ session_info = "\n".join(old_sessions) if old_sessions else " (none)"
307
+
308
+ resume_msg = {
309
+ "role": "user",
310
+ "content": f"""[SYSTEM NOTICE] You have been resumed from a previous session.
311
+
312
+ CRITICAL: Your previous executor sessions are NO LONGER USABLE. The MCP server was restarted, so all conversation state was lost. {invalidated_count} conversation ID(s) have been invalidated.
313
+
314
+ Previous sessions (conversation IDs cleared):
315
+ {session_info}
316
+
317
+ You MUST start NEW sessions with delegate() to continue any work. The converse() tool will fail on these old sessions because they have no active conversation.
318
+
319
+ Review what was accomplished in the previous session and delegate new tasks as needed.""",
320
+ }
321
+
322
+ self.messages.append(resume_msg)
323
+ self._resumed = False # Only inject once
324
+
325
+ def perceive(self) -> None:
326
+ """
327
+ Override perceive to refresh environment observation each step.
328
+
329
+ The base YamlAgent only adds env.observe() on step 0. We need to
330
+ update it each step to show current progress, sessions, etc.
331
+ """
332
+ # Let base class do initial setup
333
+ super().perceive()
334
+
335
+ # Update environment observation
336
+ env_obs = (self.env.observe() or "").strip()
337
+ if not env_obs:
338
+ return
339
+
340
+ # Find and update existing env observation, or append new one
341
+ # Look for a system message containing our markers
342
+ env_marker = "## Progress" # Our env observation has this
343
+
344
+ for i, msg in enumerate(self.messages):
345
+ if msg.get("role") == "system" and env_marker in msg.get("content", ""):
346
+ # Update in place
347
+ self.messages[i]["content"] = env_obs
348
+ return
349
+
350
+ # Not found - append as new system message (shouldn't happen after step 0)
351
+ self.messages.append({"role": "system", "content": env_obs})
352
+
353
+ @weave.op()
354
+ def _run_watchers(self) -> WatcherAction:
355
+ """Run watchers and return the action to take."""
356
+ if not self._watcher_manager:
357
+ return WatcherAction.CONTINUE
358
+
359
+ # Build watcher context
360
+ task = getattr(self.env, "task", "") if self.env else ""
361
+ events = [e.to_dict() for e in self.state.get_events(limit=200)]
362
+ ctx = WatcherContext(
363
+ task=task,
364
+ step=self._step_count,
365
+ max_steps=self.maxSteps,
366
+ messages=self.messages,
367
+ sessions=[s.to_dict() for s in self._sessions.values()],
368
+ events=events,
369
+ working_dir=str(self.working_dir.absolute()) if self.working_dir else None,
370
+ metadata={
371
+ "config": self.config.to_dict()
372
+ if hasattr(self.config, "to_dict")
373
+ else {},
374
+ },
375
+ )
376
+
377
+ # Run watchers synchronously (they're async internally)
378
+ import asyncio
379
+
380
+ try:
381
+ loop = asyncio.get_running_loop()
382
+ except RuntimeError:
383
+ loop = None
384
+
385
+ if loop and loop.is_running():
386
+ # We're in an async context, create a task
387
+ import concurrent.futures
388
+
389
+ with concurrent.futures.ThreadPoolExecutor() as pool:
390
+ result = pool.submit(
391
+ asyncio.run, self._watcher_manager.observe(ctx)
392
+ ).result()
393
+ else:
394
+ result = asyncio.run(self._watcher_manager.observe(ctx))
395
+
396
+ # Log watcher execution to events
397
+ from zwarm.core.models import Event
398
+
399
+ watcher_names = [w.name for w in self.config.watchers.watchers if w.enabled]
400
+ self.state.log_event(
401
+ Event(
402
+ kind="watchers_run",
403
+ payload={
404
+ "step": self._step_count,
405
+ "watchers": watcher_names,
406
+ "action": result.action.value,
407
+ "triggered_by": result.metadata.get("triggered_by"),
408
+ "reason": result.metadata.get("reason"),
409
+ },
410
+ )
411
+ )
412
+
413
+ # Handle watcher result
414
+ if result.action == WatcherAction.NUDGE and result.guidance:
415
+ # Inject guidance as a message with configurable role
416
+ message_role = self.config.watchers.message_role
417
+ # Validate role (default to user if invalid)
418
+ if message_role not in ("user", "assistant", "system"):
419
+ message_role = "user"
420
+
421
+ self.messages.append(
422
+ {
423
+ "role": message_role,
424
+ "content": f"[WATCHER: {result.metadata.get('triggered_by', 'unknown')}] {result.guidance}",
425
+ }
426
+ )
427
+
428
+ return result.action
429
+
430
+ def do(self) -> list[tuple[dict[str, Any], Any]]:
431
+ """
432
+ Execute tool calls from the LLM response.
433
+
434
+ Overrides base do() to capture and return tool calls with results
435
+ for Weave tracing visibility.
436
+
437
+ Returns:
438
+ List of (tool_call_info, result) tuples
439
+ """
440
+ if self._last_response is None:
441
+ return []
442
+
443
+ output = getattr(self._last_response, "output", None)
444
+ if output is None:
445
+ return []
446
+
447
+ # Extract tool calls
448
+ tool_calls = [
449
+ item for item in output if getattr(item, "type", None) == TOOL_CALL_TYPE
450
+ ]
451
+
452
+ # If no tool calls, handle text output
453
+ if not tool_calls:
454
+ output_text = getattr(self._last_response, "output_text", "")
455
+ if output_text and hasattr(self.env, "output_handler"):
456
+ self.env.output_handler(output_text)
457
+ return []
458
+
459
+ # Execute each tool call and collect results
460
+ tool_results: list[tuple[dict[str, Any], Any]] = []
461
+
462
+ for tc in tool_calls:
463
+ tc_name = getattr(tc, "name", "")
464
+ tc_args_raw = getattr(tc, "arguments", "{}")
465
+ tc_id = getattr(tc, "call_id", "")
466
+
467
+ # Parse arguments
468
+ if isinstance(tc_args_raw, str):
469
+ try:
470
+ tc_args = json.loads(tc_args_raw)
471
+ except json.JSONDecodeError:
472
+ tc_args = {}
473
+ else:
474
+ tc_args = tc_args_raw or {}
475
+
476
+ # Execute tool
477
+ if tc_name in self._tool_callables:
478
+ try:
479
+ tc_output = self._tool_callables[tc_name](**tc_args)
480
+ except Exception as e:
481
+ tc_output = f"Error executing {tc_name}: {e}"
482
+ else:
483
+ tc_output = f"Unknown tool: {tc_name}"
484
+
485
+ # Collect tool call info and result
486
+ tool_call_info = {
487
+ "name": tc_name,
488
+ "args": tc_args,
489
+ "call_id": tc_id,
490
+ }
491
+ tool_results.append((tool_call_info, tc_output))
492
+
493
+ # Format and append result to messages
494
+ result = format_openai_tool_response(tc_output, tc_id)
495
+ self.messages.append(result)
496
+
497
+ return tool_results
498
+
499
+ @weave.op()
500
+ def step(self) -> list[tuple[dict[str, Any], Any]]:
501
+ """
502
+ Execute one perceive-invoke-do cycle.
503
+
504
+ Overrides base step() to return tool calls with results
505
+ for Weave tracing visibility.
506
+
507
+ Returns:
508
+ List of (tool_call_info, result) tuples from this step.
509
+ Each tuple contains:
510
+ - tool_call_info: {"name": str, "args": dict, "call_id": str}
511
+ - result: The tool output (any type)
512
+ """
513
+ # Check for context compaction before perceive
514
+ # This prevents context overflow on long-running tasks
515
+ self._maybe_compact()
516
+
517
+ # Update environment with current progress before perceive
518
+ if hasattr(self.env, "update_progress"):
519
+ executor_usage = self.get_executor_usage()
520
+ self.env.update_progress(
521
+ step_count=self._step_count,
522
+ max_steps=self.maxSteps,
523
+ total_tokens=self._total_tokens,
524
+ executor_tokens=executor_usage.get("total_tokens", 0),
525
+ )
526
+
527
+ self.perceive()
528
+ self.invoke()
529
+
530
+ # Track cumulative token usage from the API response
531
+ if self._last_response and hasattr(self._last_response, "usage"):
532
+ usage = self._last_response.usage
533
+ if usage:
534
+ self._total_tokens += getattr(usage, "total_tokens", 0)
535
+
536
+ tool_results = self.do()
537
+ self._step_count += 1
538
+ return tool_results
539
+
540
+ @weave.op()
541
+ def run(
542
+ self, task: str | None = None, max_steps: int | None = None
543
+ ) -> dict[str, Any]:
544
+ """
545
+ Run the orchestrator until stop condition is met.
546
+
547
+ Overrides base run() to integrate watchers.
548
+
549
+ Args:
550
+ task: The task string. If not provided, uses env.task
551
+ max_steps: Override maxSteps for this run.
552
+
553
+ Returns:
554
+ Dict with run results
555
+ """
556
+ # Set task from argument or environment
557
+ if task is not None:
558
+ self.env.task = task
559
+
560
+ # Override max_steps if provided
561
+ if max_steps is not None:
562
+ self.maxSteps = max_steps
563
+
564
+ # Reset counters
565
+ self._step_count = 0
566
+ self._total_tokens = 0
567
+
568
+ # Inject resume message if we were resumed
569
+ self._inject_resume_message()
570
+
571
+ for _ in range(self.maxSteps):
572
+ # Run watchers before each step
573
+ watcher_action = self._run_watchers()
574
+
575
+ if watcher_action == WatcherAction.ABORT:
576
+ return {
577
+ "steps": self._step_count,
578
+ "task": self.env.task,
579
+ "stopped_by": "watcher_abort",
580
+ }
581
+ elif watcher_action == WatcherAction.PAUSE:
582
+ # For now, treat pause as stop (could add human-in-loop later)
583
+ return {
584
+ "steps": self._step_count,
585
+ "task": self.env.task,
586
+ "stopped_by": "watcher_pause",
587
+ }
588
+ # NUDGE and CONTINUE just continue
589
+
590
+ self.step()
591
+
592
+ if self.stopCondition:
593
+ break
594
+
595
+ return {
596
+ "steps": self._step_count,
597
+ "task": self.env.task,
598
+ }
599
+
600
+ async def cleanup(self) -> None:
601
+ """Clean up resources."""
602
+ for adapter in self._adapters.values():
603
+ await adapter.cleanup()
604
+
605
+
606
+ def build_orchestrator(
607
+ config_path: Path | None = None,
608
+ task: str | None = None,
609
+ working_dir: Path | None = None,
610
+ overrides: list[str] | None = None,
611
+ resume: bool = False,
612
+ output_handler: Callable[[str], None] | None = None,
613
+ instance_id: str | None = None,
614
+ instance_name: str | None = None,
615
+ ) -> Orchestrator:
616
+ """
617
+ Build an orchestrator from configuration.
618
+
619
+ Args:
620
+ config_path: Path to YAML config file
621
+ task: The task to accomplish
622
+ working_dir: Working directory (default: cwd)
623
+ overrides: CLI overrides (--set key=value)
624
+ resume: Whether to resume from previous state
625
+ output_handler: Function to handle orchestrator output
626
+ instance_id: Unique ID for this instance (enables multi-orchestrator isolation)
627
+ instance_name: Human-readable name for this instance
628
+
629
+ Returns:
630
+ Configured Orchestrator instance
631
+ """
632
+ from uuid import uuid4
633
+
634
+ # Load configuration
635
+ config = load_config(
636
+ config_path=config_path,
637
+ overrides=overrides,
638
+ )
639
+
640
+ # Resolve working directory
641
+ working_dir = working_dir or Path.cwd()
642
+
643
+ # Generate instance ID if not provided (enables isolation by default for new runs)
644
+ # For resume, instance_id should be provided explicitly
645
+ if instance_id is None and not resume:
646
+ instance_id = str(uuid4())
647
+
648
+ # Build system prompt
649
+ system_prompt = _build_system_prompt(config, working_dir)
650
+
651
+ # Create lean orchestrator environment
652
+ env = OrchestratorEnv(
653
+ task=task or "",
654
+ working_dir=working_dir,
655
+ )
656
+
657
+ # Set up output handler
658
+ if output_handler:
659
+ env.output_handler = output_handler
660
+
661
+ # Create orchestrator
662
+ orchestrator = Orchestrator(
663
+ config=config,
664
+ working_dir=working_dir,
665
+ system_prompt=system_prompt,
666
+ maxSteps=config.orchestrator.max_steps,
667
+ env=env,
668
+ instance_id=instance_id,
669
+ instance_name=instance_name,
670
+ )
671
+
672
+ # Resume if requested
673
+ if resume:
674
+ orchestrator.load_state()
675
+
676
+ return orchestrator
677
+
678
+
679
+ def _build_system_prompt(config: ZwarmConfig, working_dir: Path | None = None) -> str:
680
+ """Build the orchestrator system prompt."""
681
+ return get_orchestrator_prompt(
682
+ working_dir=str(working_dir) if working_dir else None
683
+ )
@@ -0,0 +1,10 @@
1
+ """
2
+ System prompts for zwarm agents.
3
+ """
4
+
5
+ from zwarm.prompts.orchestrator import ORCHESTRATOR_SYSTEM_PROMPT, get_orchestrator_prompt
6
+
7
+ __all__ = [
8
+ "ORCHESTRATOR_SYSTEM_PROMPT",
9
+ "get_orchestrator_prompt",
10
+ ]