@wazir-dev/cli 1.2.0 → 1.4.0
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/CHANGELOG.md +54 -44
- package/README.md +13 -13
- package/assets/demo.cast +47 -0
- package/assets/demo.gif +0 -0
- package/docs/anti-patterns/AP-23-skipping-enabled-workflows.md +28 -0
- package/docs/anti-patterns/AP-24-clarifier-deciding-scope.md +34 -0
- package/docs/concepts/architecture.md +1 -1
- package/docs/concepts/why-wazir.md +1 -1
- package/docs/readmes/INDEX.md +1 -1
- package/docs/readmes/features/expertise/README.md +1 -1
- package/docs/readmes/features/hooks/pre-compact-summary.md +1 -1
- package/docs/reference/hooks.md +1 -0
- package/docs/reference/launch-checklist.md +3 -3
- package/docs/reference/review-loop-pattern.md +3 -2
- package/docs/reference/skill-tiers.md +2 -2
- package/docs/research/2026-03-20-agents/a18fb002157904af5.txt +187 -0
- package/docs/research/2026-03-20-agents/a1d0ac79ac2f11e6f.txt +2 -0
- package/docs/research/2026-03-20-agents/a324079de037abd7c.txt +198 -0
- package/docs/research/2026-03-20-agents/a357586bccfafb0e5.txt +256 -0
- package/docs/research/2026-03-20-agents/a4365394e4d753105.txt +137 -0
- package/docs/research/2026-03-20-agents/a492af28bc52d3613.txt +136 -0
- package/docs/research/2026-03-20-agents/a4984db0b6a8eee07.txt +124 -0
- package/docs/research/2026-03-20-agents/a5b30e59d34bbb062.txt +214 -0
- package/docs/research/2026-03-20-agents/a5cf7829dab911586.txt +165 -0
- package/docs/research/2026-03-20-agents/a607157c30dd97c9e.txt +96 -0
- package/docs/research/2026-03-20-agents/a60b68b1e19d1e16b.txt +115 -0
- package/docs/research/2026-03-20-agents/a722af01c5594aba0.txt +166 -0
- package/docs/research/2026-03-20-agents/a787bdc516faa5829.txt +181 -0
- package/docs/research/2026-03-20-agents/a7c46d1bba1056ed2.txt +132 -0
- package/docs/research/2026-03-20-agents/a7e5abbab2b281a0d.txt +100 -0
- package/docs/research/2026-03-20-agents/a8dbadc66cd0d7d5a.txt +95 -0
- package/docs/research/2026-03-20-agents/a904d9f45d6b86a6d.txt +75 -0
- package/docs/research/2026-03-20-agents/a927659a942ee7f60.txt +102 -0
- package/docs/research/2026-03-20-agents/a962cb569191f7583.txt +125 -0
- package/docs/research/2026-03-20-agents/aab6decea538aac41.txt +148 -0
- package/docs/research/2026-03-20-agents/abd58b853dd938a1b.txt +295 -0
- package/docs/research/2026-03-20-agents/ac009da573eff7f65.txt +100 -0
- package/docs/research/2026-03-20-agents/ac1bc783364405e5f.txt +190 -0
- package/docs/research/2026-03-20-agents/aca5e2b57fde152a0.txt +132 -0
- package/docs/research/2026-03-20-agents/ad849b8c0a7e95b8b.txt +176 -0
- package/docs/research/2026-03-20-agents/adc2b12a4da32c962.txt +258 -0
- package/docs/research/2026-03-20-agents/af97caaaa9a80e4cb.txt +146 -0
- package/docs/research/2026-03-20-agents/afc5faceee368b3ca.txt +111 -0
- package/docs/research/2026-03-20-agents/afdb282d866e3c1e4.txt +164 -0
- package/docs/research/2026-03-20-agents/afe9d1f61c02b1e8d.txt +299 -0
- package/docs/research/2026-03-20-agents/b4hmkwril.txt +1856 -0
- package/docs/research/2026-03-20-agents/b80ptk89g.txt +1856 -0
- package/docs/research/2026-03-20-agents/bf54s1jss.txt +1150 -0
- package/docs/research/2026-03-20-agents/bhd6kq2kx.txt +1856 -0
- package/docs/research/2026-03-20-agents/bmb2fodyr.txt +988 -0
- package/docs/research/2026-03-20-agents/bmmsrij8i.txt +826 -0
- package/docs/research/2026-03-20-agents/bn4t2ywpu.txt +2175 -0
- package/docs/research/2026-03-20-agents/bu22t9f1z.txt +0 -0
- package/docs/research/2026-03-20-agents/bwvl98v2p.txt +738 -0
- package/docs/research/2026-03-20-agents/psych-a3697a7fd06eb64fd.txt +135 -0
- package/docs/research/2026-03-20-agents/psych-a37776fabc870feae.txt +123 -0
- package/docs/research/2026-03-20-agents/psych-a5b1fe05c0589efaf.txt +2 -0
- package/docs/research/2026-03-20-agents/psych-a95c15b1f29424435.txt +76 -0
- package/docs/research/2026-03-20-agents/psych-a9c26f4d9172dde7c.txt +2 -0
- package/docs/research/2026-03-20-agents/psych-aa19c69f0ca2c5ad3.txt +2 -0
- package/docs/research/2026-03-20-agents/psych-aa4e4cb70e1be5ecb.txt +95 -0
- package/docs/research/2026-03-20-agents/psych-ab5b302f26a554663.txt +102 -0
- package/docs/research/2026-03-20-deep-research-complete.md +101 -0
- package/docs/research/2026-03-20-deep-research-status.md +38 -0
- package/docs/research/2026-03-20-enforcement-research.md +107 -0
- package/expertise/antipatterns/process/ai-coding-antipatterns.md +117 -0
- package/expertise/composition-map.yaml +27 -8
- package/expertise/digests/reviewer/ai-coding-digest.md +83 -0
- package/expertise/digests/reviewer/architectural-thinking-digest.md +63 -0
- package/expertise/digests/reviewer/architecture-antipatterns-digest.md +49 -0
- package/expertise/digests/reviewer/code-smells-digest.md +53 -0
- package/expertise/digests/reviewer/coupling-cohesion-digest.md +54 -0
- package/expertise/digests/reviewer/ddd-digest.md +60 -0
- package/expertise/digests/reviewer/dependency-risk-digest.md +40 -0
- package/expertise/digests/reviewer/error-handling-digest.md +55 -0
- package/expertise/digests/reviewer/review-methodology-digest.md +49 -0
- package/exports/hosts/claude/.claude/commands/learn.md +61 -8
- package/exports/hosts/claude/.claude/commands/plan-review.md +3 -1
- package/exports/hosts/claude/.claude/commands/verify.md +30 -1
- package/exports/hosts/claude/.claude/settings.json +7 -6
- package/exports/hosts/claude/export.manifest.json +8 -5
- package/exports/hosts/claude/host-package.json +3 -0
- package/exports/hosts/codex/export.manifest.json +8 -5
- package/exports/hosts/codex/host-package.json +3 -0
- package/exports/hosts/cursor/.cursor/hooks.json +6 -6
- package/exports/hosts/cursor/export.manifest.json +8 -5
- package/exports/hosts/cursor/host-package.json +3 -0
- package/exports/hosts/gemini/export.manifest.json +8 -5
- package/exports/hosts/gemini/host-package.json +3 -0
- package/hooks/definitions/pretooluse_dispatcher.yaml +26 -0
- package/hooks/definitions/pretooluse_pipeline_guard.yaml +22 -0
- package/hooks/definitions/stop_pipeline_gate.yaml +22 -0
- package/hooks/hooks.json +7 -6
- package/hooks/pretooluse-dispatcher +84 -0
- package/hooks/pretooluse-pipeline-guard +9 -0
- package/hooks/stop-pipeline-gate +9 -0
- package/llms-full.txt +48 -18
- package/package.json +2 -3
- package/schemas/decision.schema.json +15 -0
- package/schemas/hook.schema.json +4 -1
- package/schemas/phase-report.schema.json +9 -0
- package/skills/TEMPLATE-3-ZONE.md +160 -0
- package/skills/brainstorming/SKILL.md +137 -21
- package/skills/clarifier/SKILL.md +364 -53
- package/skills/claude-cli/SKILL.md +91 -12
- package/skills/codex-cli/SKILL.md +91 -12
- package/skills/debugging/SKILL.md +133 -38
- package/skills/design/SKILL.md +173 -37
- package/skills/dispatching-parallel-agents/SKILL.md +129 -31
- package/skills/executing-plans/SKILL.md +113 -25
- package/skills/executor/SKILL.md +252 -21
- package/skills/finishing-a-development-branch/SKILL.md +107 -18
- package/skills/gemini-cli/SKILL.md +91 -12
- package/skills/humanize/SKILL.md +92 -13
- package/skills/init-pipeline/SKILL.md +90 -18
- package/skills/prepare-next/SKILL.md +93 -24
- package/skills/receiving-code-review/SKILL.md +90 -16
- package/skills/requesting-code-review/SKILL.md +100 -24
- package/skills/requesting-code-review/code-reviewer.md +29 -17
- package/skills/reviewer/SKILL.md +270 -57
- package/skills/run-audit/SKILL.md +92 -15
- package/skills/scan-project/SKILL.md +93 -14
- package/skills/self-audit/SKILL.md +133 -39
- package/skills/skill-research/SKILL.md +275 -0
- package/skills/subagent-driven-development/SKILL.md +129 -30
- package/skills/subagent-driven-development/code-quality-reviewer-prompt.md +30 -2
- package/skills/subagent-driven-development/implementer-prompt.md +40 -27
- package/skills/subagent-driven-development/spec-reviewer-prompt.md +25 -12
- package/skills/tdd/SKILL.md +125 -20
- package/skills/using-git-worktrees/SKILL.md +118 -28
- package/skills/using-skills/SKILL.md +116 -29
- package/skills/verification/SKILL.md +160 -17
- package/skills/wazir/SKILL.md +750 -120
- package/skills/writing-plans/SKILL.md +134 -28
- package/skills/writing-skills/SKILL.md +91 -13
- package/skills/writing-skills/anthropic-best-practices.md +104 -64
- package/skills/writing-skills/persuasion-principles.md +100 -34
- package/tooling/src/capture/command.js +46 -2
- package/tooling/src/capture/decision.js +40 -0
- package/tooling/src/capture/store.js +33 -0
- package/tooling/src/capture/user-input.js +66 -0
- package/tooling/src/checks/security-sensitivity.js +69 -0
- package/tooling/src/cli.js +28 -26
- package/tooling/src/config/depth-table.js +60 -0
- package/tooling/src/export/compiler.js +7 -8
- package/tooling/src/guards/guardrail-functions.js +131 -0
- package/tooling/src/guards/phase-prerequisite-guard.js +97 -3
- package/tooling/src/hooks/pretooluse-dispatcher.js +300 -0
- package/tooling/src/hooks/pretooluse-pipeline-guard.js +141 -0
- package/tooling/src/hooks/stop-pipeline-gate.js +92 -0
- package/tooling/src/init/auto-detect.js +0 -2
- package/tooling/src/init/command.js +3 -95
- package/tooling/src/learn/pipeline.js +177 -0
- package/tooling/src/state/db.js +251 -2
- package/tooling/src/state/pipeline-state.js +262 -0
- package/tooling/src/status/command.js +6 -1
- package/tooling/src/verify/proof-collector.js +299 -0
- package/wazir.manifest.yaml +3 -0
- package/workflows/learn.md +61 -8
- package/workflows/plan-review.md +3 -1
- package/workflows/verify.md +30 -1
|
@@ -0,0 +1,738 @@
|
|
|
1
|
+
445: for task in self.tasks:
|
|
2
|
+
446- if task.agent is None:
|
|
3
|
+
447- raise PydanticCustomError(
|
|
4
|
+
448- "missing_agent_in_task",
|
|
5
|
+
449- "Sequential process error: Agent is missing in the task with the following description: {description}",
|
|
6
|
+
450- {"description": task.description},
|
|
7
|
+
451- )
|
|
8
|
+
452-
|
|
9
|
+
453- return self
|
|
10
|
+
454-
|
|
11
|
+
455- @model_validator(mode="after")
|
|
12
|
+
456- def validate_end_with_at_most_one_async_task(self) -> Self:
|
|
13
|
+
457- """Validates that the crew ends with at most one asynchronous task."""
|
|
14
|
+
458- final_async_task_count = 0
|
|
15
|
+
459-
|
|
16
|
+
460- # Traverse tasks backward
|
|
17
|
+
461: for task in reversed(self.tasks):
|
|
18
|
+
462: if task.async_execution:
|
|
19
|
+
463- final_async_task_count += 1
|
|
20
|
+
464- else:
|
|
21
|
+
465- break # Stop traversing as soon as a non-async task is encountered
|
|
22
|
+
466-
|
|
23
|
+
467- if final_async_task_count > 1:
|
|
24
|
+
468- raise PydanticCustomError(
|
|
25
|
+
469- "async_task_count",
|
|
26
|
+
470- "The crew must end with at most one asynchronous task.",
|
|
27
|
+
471- {},
|
|
28
|
+
472- )
|
|
29
|
+
473-
|
|
30
|
+
474- return self
|
|
31
|
+
475-
|
|
32
|
+
476- @model_validator(mode="after")
|
|
33
|
+
477- def validate_must_have_non_conditional_task(self) -> Crew:
|
|
34
|
+
478- """Ensure that a crew has at least one non-conditional task."""
|
|
35
|
+
479- if not self.tasks:
|
|
36
|
+
480- return self
|
|
37
|
+
481- non_conditional_count = sum(
|
|
38
|
+
482: 1 for task in self.tasks if not isinstance(task, ConditionalTask)
|
|
39
|
+
483- )
|
|
40
|
+
484- if non_conditional_count == 0:
|
|
41
|
+
485- raise PydanticCustomError(
|
|
42
|
+
486- "only_conditional_tasks",
|
|
43
|
+
487- "Crew must include at least one non-conditional task",
|
|
44
|
+
488- {},
|
|
45
|
+
489- )
|
|
46
|
+
490- return self
|
|
47
|
+
491-
|
|
48
|
+
492- @model_validator(mode="after")
|
|
49
|
+
493- def validate_first_task(self) -> Crew:
|
|
50
|
+
494- """Ensure the first task is not a ConditionalTask."""
|
|
51
|
+
495- if self.tasks and isinstance(self.tasks[0], ConditionalTask):
|
|
52
|
+
496- raise PydanticCustomError(
|
|
53
|
+
497- "invalid_first_task",
|
|
54
|
+
498- "The first task cannot be a ConditionalTask.",
|
|
55
|
+
499- {},
|
|
56
|
+
500- )
|
|
57
|
+
501- return self
|
|
58
|
+
502-
|
|
59
|
+
503- @model_validator(mode="after")
|
|
60
|
+
504- def validate_async_tasks_not_async(self) -> Crew:
|
|
61
|
+
505- """Ensure that ConditionalTask is not async."""
|
|
62
|
+
506: for task in self.tasks:
|
|
63
|
+
507: if task.async_execution and isinstance(task, ConditionalTask):
|
|
64
|
+
508- raise PydanticCustomError(
|
|
65
|
+
509- "invalid_async_conditional_task",
|
|
66
|
+
510- (
|
|
67
|
+
511- "Conditional Task: {description}, cannot be executed asynchronously."
|
|
68
|
+
512- ),
|
|
69
|
+
513- {"description": task.description},
|
|
70
|
+
514- )
|
|
71
|
+
515- return self
|
|
72
|
+
516-
|
|
73
|
+
517- @model_validator(mode="after")
|
|
74
|
+
518- def validate_async_task_cannot_include_sequential_async_tasks_in_context(
|
|
75
|
+
519- self,
|
|
76
|
+
520- ) -> Self:
|
|
77
|
+
521- """
|
|
78
|
+
522- Validates that if a task is set to be executed asynchronously,
|
|
79
|
+
523- it cannot include other asynchronous tasks in its context unless
|
|
80
|
+
524- separated by a synchronous task.
|
|
81
|
+
525- """
|
|
82
|
+
526- for i, task in enumerate(self.tasks):
|
|
83
|
+
527: if task.async_execution and isinstance(task.context, list):
|
|
84
|
+
528- for context_task in task.context:
|
|
85
|
+
529: if context_task.async_execution:
|
|
86
|
+
530- for j in range(i - 1, -1, -1):
|
|
87
|
+
531- if self.tasks[j] == context_task:
|
|
88
|
+
532- raise ValueError(
|
|
89
|
+
533- f"Task '{task.description}' is asynchronous and "
|
|
90
|
+
534- f"cannot include other sequential asynchronous "
|
|
91
|
+
535- f"tasks in its context."
|
|
92
|
+
536- )
|
|
93
|
+
537: if not self.tasks[j].async_execution:
|
|
94
|
+
538- break
|
|
95
|
+
539- return self
|
|
96
|
+
540-
|
|
97
|
+
541- @model_validator(mode="after")
|
|
98
|
+
542- def validate_context_no_future_tasks(self) -> Self:
|
|
99
|
+
543- """Validates that a task's context does not include future tasks."""
|
|
100
|
+
544- task_indices = {id(task): i for i, task in enumerate(self.tasks)}
|
|
101
|
+
545-
|
|
102
|
+
546: for task in self.tasks:
|
|
103
|
+
547- if isinstance(task.context, list):
|
|
104
|
+
548- for context_task in task.context:
|
|
105
|
+
549- if id(context_task) not in task_indices:
|
|
106
|
+
550- continue # Skip context tasks not in the main tasks list
|
|
107
|
+
551- if task_indices[id(context_task)] > task_indices[id(task)]:
|
|
108
|
+
552- raise ValueError(
|
|
109
|
+
553- f"Task '{task.description}' has a context dependency "
|
|
110
|
+
554- f"on a future task '{context_task.description}', "
|
|
111
|
+
555- f"which is not allowed."
|
|
112
|
+
556- )
|
|
113
|
+
557- return self
|
|
114
|
+
558-
|
|
115
|
+
559- @property
|
|
116
|
+
560- def key(self) -> str:
|
|
117
|
+
561- source: list[str] = [agent.key for agent in self.agents] + [
|
|
118
|
+
562: task.key for task in self.tasks
|
|
119
|
+
563- ]
|
|
120
|
+
564- return md5("|".join(source).encode(), usedforsecurity=False).hexdigest()
|
|
121
|
+
565-
|
|
122
|
+
566- @property
|
|
123
|
+
567- def fingerprint(self) -> Fingerprint:
|
|
124
|
+
568- """
|
|
125
|
+
569- Get the crew's fingerprint.
|
|
126
|
+
570-
|
|
127
|
+
571- Returns:
|
|
128
|
+
572- Fingerprint: The crew's fingerprint
|
|
129
|
+
573- """
|
|
130
|
+
574- return self.security_config.fingerprint
|
|
131
|
+
575-
|
|
132
|
+
576- def _setup_from_config(self) -> None:
|
|
133
|
+
577- """Initializes agents and tasks from the provided config."""
|
|
134
|
+
578- if self.config is None:
|
|
135
|
+
579- raise ValueError("Config should not be None.")
|
|
136
|
+
580- if not self.config.get("agents") or not self.config.get("tasks"):
|
|
137
|
+
581- raise PydanticCustomError(
|
|
138
|
+
582- "missing_keys_in_config", "Config should have 'agents' and 'tasks'.", {}
|
|
139
|
+
583- )
|
|
140
|
+
584-
|
|
141
|
+
585- self.process = self.config.get("process", self.process)
|
|
142
|
+
586- self.agents = [Agent(**agent) for agent in self.config["agents"]]
|
|
143
|
+
587: self.tasks = [self._create_task(task) for task in self.config["tasks"]]
|
|
144
|
+
588-
|
|
145
|
+
589- def _create_task(self, task_config: dict[str, Any]) -> Task:
|
|
146
|
+
590- """Creates a task instance from its configuration.
|
|
147
|
+
591-
|
|
148
|
+
592- Args:
|
|
149
|
+
593- task_config: The configuration of the task.
|
|
150
|
+
594-
|
|
151
|
+
595- Returns:
|
|
152
|
+
596- A task instance.
|
|
153
|
+
597- """
|
|
154
|
+
598- task_agent = next(
|
|
155
|
+
599- agt for agt in self.agents if agt.role == task_config["agent"]
|
|
156
|
+
600- )
|
|
157
|
+
601- del task_config["agent"]
|
|
158
|
+
602- return Task(**task_config, agent=task_agent)
|
|
159
|
+
603-
|
|
160
|
+
604- def _setup_for_training(self, filename: str) -> None:
|
|
161
|
+
605- """Sets up the crew for training."""
|
|
162
|
+
606- self._train = True
|
|
163
|
+
607-
|
|
164
|
+
608: for task in self.tasks:
|
|
165
|
+
609- task.human_input = True
|
|
166
|
+
610-
|
|
167
|
+
611- for agent in self.agents:
|
|
168
|
+
612- agent.allow_delegation = False
|
|
169
|
+
613-
|
|
170
|
+
614- CrewTrainingHandler(TRAINING_DATA_FILE).initialize_file()
|
|
171
|
+
615- CrewTrainingHandler(filename).initialize_file()
|
|
172
|
+
616-
|
|
173
|
+
617- def train(
|
|
174
|
+
618- self, n_iterations: int, filename: str, inputs: dict[str, Any] | None = None
|
|
175
|
+
619- ) -> None:
|
|
176
|
+
620- """Trains the crew for a given number of iterations."""
|
|
177
|
+
621- inputs = inputs or {}
|
|
178
|
+
622- try:
|
|
179
|
+
623- crewai_event_bus.emit(
|
|
180
|
+
624- self,
|
|
181
|
+
625- CrewTrainStartedEvent(
|
|
182
|
+
626- crew_name=self.name,
|
|
183
|
+
627- n_iterations=n_iterations,
|
|
184
|
+
628- filename=filename,
|
|
185
|
+
629- inputs=inputs,
|
|
186
|
+
630- ),
|
|
187
|
+
631- )
|
|
188
|
+
632- train_crew = self.copy()
|
|
189
|
+
633- train_crew._setup_for_training(filename)
|
|
190
|
+
634-
|
|
191
|
+
635- for n_iteration in range(n_iterations):
|
|
192
|
+
636- train_crew._train_iteration = n_iteration
|
|
193
|
+
637- train_crew.kickoff(inputs=inputs)
|
|
194
|
+
638-
|
|
195
|
+
639- training_data = CrewTrainingHandler(TRAINING_DATA_FILE).load()
|
|
196
|
+
640-
|
|
197
|
+
641- for agent in train_crew.agents:
|
|
198
|
+
642- if training_data.get(str(agent.id)):
|
|
199
|
+
643- result = TaskEvaluator(agent).evaluate_training_data( # type: ignore[arg-type]
|
|
200
|
+
644- training_data=training_data, agent_id=str(agent.id)
|
|
201
|
+
645- )
|
|
202
|
+
646- CrewTrainingHandler(filename).save_trained_data(
|
|
203
|
+
647- agent_id=str(agent.role),
|
|
204
|
+
648- trained_data=result.model_dump(),
|
|
205
|
+
649- )
|
|
206
|
+
650-
|
|
207
|
+
651- crewai_event_bus.emit(
|
|
208
|
+
652- self,
|
|
209
|
+
653- CrewTrainCompletedEvent(
|
|
210
|
+
654- crew_name=self.name,
|
|
211
|
+
655- n_iterations=n_iterations,
|
|
212
|
+
656- filename=filename,
|
|
213
|
+
657- ),
|
|
214
|
+
658- )
|
|
215
|
+
--
|
|
216
|
+
677: inputs: Optional input dictionary for task interpolation.
|
|
217
|
+
678- input_files: Optional dict of named file inputs for the crew.
|
|
218
|
+
679-
|
|
219
|
+
680- Returns:
|
|
220
|
+
681- CrewOutput or CrewStreamingOutput if streaming is enabled.
|
|
221
|
+
682- """
|
|
222
|
+
683- get_env_context()
|
|
223
|
+
684- if self.stream:
|
|
224
|
+
685- enable_agent_streaming(self.agents)
|
|
225
|
+
686- ctx = StreamingContext()
|
|
226
|
+
687-
|
|
227
|
+
688- def run_crew() -> None:
|
|
228
|
+
689- """Execute the crew and capture the result."""
|
|
229
|
+
690- try:
|
|
230
|
+
691- self.stream = False
|
|
231
|
+
692- crew_result = self.kickoff(inputs=inputs, input_files=input_files)
|
|
232
|
+
693- if isinstance(crew_result, CrewOutput):
|
|
233
|
+
694- ctx.result_holder.append(crew_result)
|
|
234
|
+
695- except Exception as exc:
|
|
235
|
+
696- signal_error(ctx.state, exc)
|
|
236
|
+
697- finally:
|
|
237
|
+
698- self.stream = True
|
|
238
|
+
699- signal_end(ctx.state)
|
|
239
|
+
700-
|
|
240
|
+
701- streaming_output = CrewStreamingOutput(
|
|
241
|
+
702- sync_iterator=create_chunk_generator(
|
|
242
|
+
703- ctx.state, run_crew, ctx.output_holder
|
|
243
|
+
704- )
|
|
244
|
+
705- )
|
|
245
|
+
706- ctx.output_holder.append(streaming_output)
|
|
246
|
+
707- return streaming_output
|
|
247
|
+
708-
|
|
248
|
+
709- baggage_ctx = baggage.set_baggage(
|
|
249
|
+
710- "crew_context", CrewContext(id=str(self.id), key=self.key)
|
|
250
|
+
711- )
|
|
251
|
+
712- token = attach(baggage_ctx)
|
|
252
|
+
713-
|
|
253
|
+
714- try:
|
|
254
|
+
715- inputs = prepare_kickoff(self, inputs, input_files)
|
|
255
|
+
716-
|
|
256
|
+
717- if self.process == Process.sequential:
|
|
257
|
+
718: result = self._run_sequential_process()
|
|
258
|
+
719- elif self.process == Process.hierarchical:
|
|
259
|
+
720: result = self._run_hierarchical_process()
|
|
260
|
+
721- else:
|
|
261
|
+
722- raise NotImplementedError(
|
|
262
|
+
723- f"The process '{self.process}' is not implemented yet."
|
|
263
|
+
724- )
|
|
264
|
+
725-
|
|
265
|
+
726- for after_callback in self.after_kickoff_callbacks:
|
|
266
|
+
727- result = after_callback(result)
|
|
267
|
+
728-
|
|
268
|
+
729- result = self._post_kickoff(result)
|
|
269
|
+
730-
|
|
270
|
+
731- self.usage_metrics = self.calculate_usage_metrics()
|
|
271
|
+
732-
|
|
272
|
+
733- return result
|
|
273
|
+
734- except Exception as e:
|
|
274
|
+
735- crewai_event_bus.emit(
|
|
275
|
+
736- self,
|
|
276
|
+
737- CrewKickoffFailedEvent(
|
|
277
|
+
738- error=str(e),
|
|
278
|
+
739- crew_name=self.name,
|
|
279
|
+
740- started_event_id=self._kickoff_event_id,
|
|
280
|
+
741- ),
|
|
281
|
+
742- )
|
|
282
|
+
743- raise
|
|
283
|
+
744- finally:
|
|
284
|
+
745- # Ensure all background memory saves complete before returning
|
|
285
|
+
746- if self._memory is not None and hasattr(self._memory, "drain_writes"):
|
|
286
|
+
747- self._memory.drain_writes()
|
|
287
|
+
748- clear_files(self.id)
|
|
288
|
+
749- detach(token)
|
|
289
|
+
750-
|
|
290
|
+
751- def _post_kickoff(self, result: CrewOutput) -> CrewOutput:
|
|
291
|
+
752- return result
|
|
292
|
+
753-
|
|
293
|
+
754- def kickoff_for_each(
|
|
294
|
+
755- self,
|
|
295
|
+
756- inputs: list[dict[str, Any]],
|
|
296
|
+
757- input_files: dict[str, FileInput] | None = None,
|
|
297
|
+
758- ) -> list[CrewOutput | CrewStreamingOutput]:
|
|
298
|
+
759- """Executes the Crew's workflow for each input and aggregates results.
|
|
299
|
+
760-
|
|
300
|
+
761- Args:
|
|
301
|
+
762- inputs: List of input dictionaries, one per execution.
|
|
302
|
+
763- input_files: Optional dict of named file inputs shared across all executions.
|
|
303
|
+
764-
|
|
304
|
+
765- Returns:
|
|
305
|
+
766- List of CrewOutput or CrewStreamingOutput objects.
|
|
306
|
+
767-
|
|
307
|
+
768- If stream=True, returns a list of CrewStreamingOutput objects that must
|
|
308
|
+
769- each be iterated to get stream chunks and access results.
|
|
309
|
+
770- """
|
|
310
|
+
--
|
|
311
|
+
798: inputs: Optional input dictionary for task interpolation.
|
|
312
|
+
799- input_files: Optional dict of named file inputs for the crew.
|
|
313
|
+
800-
|
|
314
|
+
801- Returns:
|
|
315
|
+
802- CrewOutput or CrewStreamingOutput if streaming is enabled.
|
|
316
|
+
803-
|
|
317
|
+
804- If stream=True, returns a CrewStreamingOutput that can be async-iterated
|
|
318
|
+
805- to get stream chunks. After iteration completes, access the final result
|
|
319
|
+
806- via .result.
|
|
320
|
+
807- """
|
|
321
|
+
808- inputs = inputs or {}
|
|
322
|
+
809-
|
|
323
|
+
810- if self.stream:
|
|
324
|
+
811- enable_agent_streaming(self.agents)
|
|
325
|
+
812- ctx = StreamingContext(use_async=True)
|
|
326
|
+
813-
|
|
327
|
+
814- async def run_crew() -> None:
|
|
328
|
+
815- try:
|
|
329
|
+
816- self.stream = False
|
|
330
|
+
817- result = await asyncio.to_thread(self.kickoff, inputs, input_files)
|
|
331
|
+
818- if isinstance(result, CrewOutput):
|
|
332
|
+
819- ctx.result_holder.append(result)
|
|
333
|
+
820- except Exception as e:
|
|
334
|
+
821- signal_error(ctx.state, e, is_async=True)
|
|
335
|
+
822- finally:
|
|
336
|
+
823- self.stream = True
|
|
337
|
+
824- signal_end(ctx.state, is_async=True)
|
|
338
|
+
825-
|
|
339
|
+
826- streaming_output = CrewStreamingOutput(
|
|
340
|
+
827- async_iterator=create_async_chunk_generator(
|
|
341
|
+
828- ctx.state, run_crew, ctx.output_holder
|
|
342
|
+
829- )
|
|
343
|
+
830- )
|
|
344
|
+
831- ctx.output_holder.append(streaming_output)
|
|
345
|
+
832-
|
|
346
|
+
833- return streaming_output
|
|
347
|
+
834-
|
|
348
|
+
835- return await asyncio.to_thread(self.kickoff, inputs, input_files)
|
|
349
|
+
836-
|
|
350
|
+
837- async def kickoff_for_each_async(
|
|
351
|
+
838- self,
|
|
352
|
+
839- inputs: list[dict[str, Any]],
|
|
353
|
+
840- input_files: dict[str, FileInput] | None = None,
|
|
354
|
+
841- ) -> list[CrewOutput | CrewStreamingOutput] | CrewStreamingOutput:
|
|
355
|
+
842- """Executes the Crew's workflow for each input asynchronously.
|
|
356
|
+
843-
|
|
357
|
+
844- Args:
|
|
358
|
+
845- inputs: List of input dictionaries, one per execution.
|
|
359
|
+
846- input_files: Optional dict of named file inputs shared across all executions.
|
|
360
|
+
847-
|
|
361
|
+
848- Returns:
|
|
362
|
+
--
|
|
363
|
+
875: inputs: Optional input dictionary for task interpolation.
|
|
364
|
+
876- input_files: Optional dict of named file inputs for the crew.
|
|
365
|
+
877-
|
|
366
|
+
878- Returns:
|
|
367
|
+
879- CrewOutput or CrewStreamingOutput if streaming is enabled.
|
|
368
|
+
880- """
|
|
369
|
+
881- if self.stream:
|
|
370
|
+
882- enable_agent_streaming(self.agents)
|
|
371
|
+
883- ctx = StreamingContext(use_async=True)
|
|
372
|
+
884-
|
|
373
|
+
885- async def run_crew() -> None:
|
|
374
|
+
886- try:
|
|
375
|
+
887- self.stream = False
|
|
376
|
+
888- inner_result = await self.akickoff(inputs, input_files)
|
|
377
|
+
889- if isinstance(inner_result, CrewOutput):
|
|
378
|
+
890- ctx.result_holder.append(inner_result)
|
|
379
|
+
891- except Exception as exc:
|
|
380
|
+
892- signal_error(ctx.state, exc, is_async=True)
|
|
381
|
+
893- finally:
|
|
382
|
+
894- self.stream = True
|
|
383
|
+
895- signal_end(ctx.state, is_async=True)
|
|
384
|
+
896-
|
|
385
|
+
897- streaming_output = CrewStreamingOutput(
|
|
386
|
+
898- async_iterator=create_async_chunk_generator(
|
|
387
|
+
899- ctx.state, run_crew, ctx.output_holder
|
|
388
|
+
900- )
|
|
389
|
+
901- )
|
|
390
|
+
902- ctx.output_holder.append(streaming_output)
|
|
391
|
+
903-
|
|
392
|
+
904- return streaming_output
|
|
393
|
+
905-
|
|
394
|
+
906- baggage_ctx = baggage.set_baggage(
|
|
395
|
+
907- "crew_context", CrewContext(id=str(self.id), key=self.key)
|
|
396
|
+
908- )
|
|
397
|
+
909- token = attach(baggage_ctx)
|
|
398
|
+
910-
|
|
399
|
+
911- try:
|
|
400
|
+
912- inputs = prepare_kickoff(self, inputs, input_files)
|
|
401
|
+
913-
|
|
402
|
+
914- if self.process == Process.sequential:
|
|
403
|
+
915- result = await self._arun_sequential_process()
|
|
404
|
+
916- elif self.process == Process.hierarchical:
|
|
405
|
+
917- result = await self._arun_hierarchical_process()
|
|
406
|
+
918- else:
|
|
407
|
+
919- raise NotImplementedError(
|
|
408
|
+
920- f"The process '{self.process}' is not implemented yet."
|
|
409
|
+
921- )
|
|
410
|
+
922-
|
|
411
|
+
923- for after_callback in self.after_kickoff_callbacks:
|
|
412
|
+
924- result = after_callback(result)
|
|
413
|
+
925-
|
|
414
|
+
--
|
|
415
|
+
1016: if task.async_execution:
|
|
416
|
+
1017- context = self._get_context(
|
|
417
|
+
1018- task, [last_sync_output] if last_sync_output else []
|
|
418
|
+
1019- )
|
|
419
|
+
1020- async_task = asyncio.create_task(
|
|
420
|
+
1021- task.aexecute_sync(
|
|
421
|
+
1022- agent=exec_data.agent,
|
|
422
|
+
1023- context=context,
|
|
423
|
+
1024- tools=exec_data.tools,
|
|
424
|
+
1025- )
|
|
425
|
+
1026- )
|
|
426
|
+
1027- pending_tasks.append((task, async_task, task_index))
|
|
427
|
+
1028- else:
|
|
428
|
+
1029- if pending_tasks:
|
|
429
|
+
1030- task_outputs = await self._aprocess_async_tasks(
|
|
430
|
+
1031- pending_tasks, was_replayed
|
|
431
|
+
1032- )
|
|
432
|
+
1033- pending_tasks.clear()
|
|
433
|
+
1034-
|
|
434
|
+
1035- context = self._get_context(task, task_outputs)
|
|
435
|
+
1036- task_output = await task.aexecute_sync(
|
|
436
|
+
1037- agent=exec_data.agent,
|
|
437
|
+
1038- context=context,
|
|
438
|
+
1039- tools=exec_data.tools,
|
|
439
|
+
1040- )
|
|
440
|
+
1041- task_outputs.append(task_output)
|
|
441
|
+
1042- self._process_task_result(task, task_output)
|
|
442
|
+
1043- self._store_execution_log(task, task_output, task_index, was_replayed)
|
|
443
|
+
1044-
|
|
444
|
+
1045- if pending_tasks:
|
|
445
|
+
1046- task_outputs = await self._aprocess_async_tasks(pending_tasks, was_replayed)
|
|
446
|
+
1047-
|
|
447
|
+
1048- return self._create_crew_output(task_outputs)
|
|
448
|
+
1049-
|
|
449
|
+
1050- async def _ahandle_conditional_task(
|
|
450
|
+
1051- self,
|
|
451
|
+
1052- task: ConditionalTask,
|
|
452
|
+
1053- task_outputs: list[TaskOutput],
|
|
453
|
+
1054- pending_tasks: list[tuple[Task, asyncio.Task[TaskOutput], int]],
|
|
454
|
+
1055- task_index: int,
|
|
455
|
+
1056- was_replayed: bool,
|
|
456
|
+
1057- ) -> TaskOutput | None:
|
|
457
|
+
1058- """Handle conditional task evaluation using native async."""
|
|
458
|
+
1059- if pending_tasks:
|
|
459
|
+
1060- task_outputs = await self._aprocess_async_tasks(pending_tasks, was_replayed)
|
|
460
|
+
1061- pending_tasks.clear()
|
|
461
|
+
1062-
|
|
462
|
+
1063- return check_conditional_skip(
|
|
463
|
+
1064- self, task, task_outputs, task_index, was_replayed
|
|
464
|
+
1065- )
|
|
465
|
+
1066-
|
|
466
|
+
--
|
|
467
|
+
1141: def _run_sequential_process(self) -> CrewOutput:
|
|
468
|
+
1142- """Executes tasks sequentially and returns the final output."""
|
|
469
|
+
1143- return self._execute_tasks(self.tasks)
|
|
470
|
+
1144-
|
|
471
|
+
1145: def _run_hierarchical_process(self) -> CrewOutput:
|
|
472
|
+
1146- """Creates and assigns a manager agent to complete the tasks."""
|
|
473
|
+
1147- self._create_manager_agent()
|
|
474
|
+
1148- return self._execute_tasks(self.tasks)
|
|
475
|
+
1149-
|
|
476
|
+
1150- def _create_manager_agent(self) -> None:
|
|
477
|
+
1151- if self.manager_agent is not None:
|
|
478
|
+
1152- self.manager_agent.allow_delegation = True
|
|
479
|
+
1153- manager = self.manager_agent
|
|
480
|
+
1154- if manager.tools is not None and len(manager.tools) > 0:
|
|
481
|
+
1155- self._logger.log(
|
|
482
|
+
1156- "warning",
|
|
483
|
+
1157- "Manager agent should not have tools",
|
|
484
|
+
1158- color="bold_yellow",
|
|
485
|
+
1159- )
|
|
486
|
+
1160- manager.tools = []
|
|
487
|
+
1161- raise Exception("Manager agent should not have tools")
|
|
488
|
+
1162- else:
|
|
489
|
+
1163- self.manager_llm = create_llm(self.manager_llm)
|
|
490
|
+
1164- i18n = get_i18n(prompt_file=self.prompt_file)
|
|
491
|
+
1165- manager = Agent(
|
|
492
|
+
1166- role=i18n.retrieve("hierarchical_manager_agent", "role"),
|
|
493
|
+
1167- goal=i18n.retrieve("hierarchical_manager_agent", "goal"),
|
|
494
|
+
1168- backstory=i18n.retrieve("hierarchical_manager_agent", "backstory"),
|
|
495
|
+
1169- tools=AgentTools(agents=self.agents).tools(),
|
|
496
|
+
1170- allow_delegation=True,
|
|
497
|
+
1171- llm=self.manager_llm,
|
|
498
|
+
1172- verbose=self.verbose,
|
|
499
|
+
1173- )
|
|
500
|
+
1174- self.manager_agent = manager
|
|
501
|
+
1175- manager.crew = self
|
|
502
|
+
1176-
|
|
503
|
+
1177- def _get_execution_start_index(self, tasks: list[Task]) -> int | None:
|
|
504
|
+
1178- return None
|
|
505
|
+
1179-
|
|
506
|
+
1180: def _execute_tasks(
|
|
507
|
+
1181- self,
|
|
508
|
+
1182- tasks: list[Task],
|
|
509
|
+
1183- start_index: int | None = 0,
|
|
510
|
+
1184- was_replayed: bool = False,
|
|
511
|
+
1185- ) -> CrewOutput:
|
|
512
|
+
1186- """Executes tasks sequentially and returns the final output.
|
|
513
|
+
1187-
|
|
514
|
+
1188- Args:
|
|
515
|
+
1189- tasks (List[Task]): List of tasks to execute
|
|
516
|
+
1190- manager (Optional[BaseAgent], optional): Manager agent to use for
|
|
517
|
+
1191- delegation. Defaults to None.
|
|
518
|
+
1192-
|
|
519
|
+
1193- Returns:
|
|
520
|
+
1194- CrewOutput: Final output of the crew
|
|
521
|
+
1195- """
|
|
522
|
+
1196- custom_start = self._get_execution_start_index(tasks)
|
|
523
|
+
1197- if custom_start is not None:
|
|
524
|
+
1198- start_index = custom_start
|
|
525
|
+
1199-
|
|
526
|
+
1200- task_outputs: list[TaskOutput] = []
|
|
527
|
+
1201- futures: list[tuple[Task, Future[TaskOutput], int]] = []
|
|
528
|
+
1202- last_sync_output: TaskOutput | None = None
|
|
529
|
+
1203-
|
|
530
|
+
1204- for task_index, task in enumerate(tasks):
|
|
531
|
+
1205- exec_data, task_outputs, last_sync_output = prepare_task_execution(
|
|
532
|
+
1206- self, task, task_index, start_index, task_outputs, last_sync_output
|
|
533
|
+
1207- )
|
|
534
|
+
1208- if exec_data.should_skip:
|
|
535
|
+
1209- continue
|
|
536
|
+
1210-
|
|
537
|
+
1211- if isinstance(task, ConditionalTask):
|
|
538
|
+
1212- skipped_task_output = self._handle_conditional_task(
|
|
539
|
+
1213- task, task_outputs, futures, task_index, was_replayed
|
|
540
|
+
1214- )
|
|
541
|
+
1215- if skipped_task_output:
|
|
542
|
+
1216- task_outputs.append(skipped_task_output)
|
|
543
|
+
1217- continue
|
|
544
|
+
1218-
|
|
545
|
+
1219: if task.async_execution:
|
|
546
|
+
1220- context = self._get_context(
|
|
547
|
+
1221- task, [last_sync_output] if last_sync_output else []
|
|
548
|
+
1222- )
|
|
549
|
+
1223- future = task.execute_async(
|
|
550
|
+
1224- agent=exec_data.agent,
|
|
551
|
+
1225- context=context,
|
|
552
|
+
1226- tools=exec_data.tools,
|
|
553
|
+
1227- )
|
|
554
|
+
1228- futures.append((task, future, task_index))
|
|
555
|
+
1229- else:
|
|
556
|
+
1230- if futures:
|
|
557
|
+
1231- task_outputs = self._process_async_tasks(futures, was_replayed)
|
|
558
|
+
1232- futures.clear()
|
|
559
|
+
1233-
|
|
560
|
+
1234- context = self._get_context(task, task_outputs)
|
|
561
|
+
1235- task_output = task.execute_sync(
|
|
562
|
+
1236- agent=exec_data.agent,
|
|
563
|
+
1237- context=context,
|
|
564
|
+
1238- tools=exec_data.tools,
|
|
565
|
+
1239- )
|
|
566
|
+
1240- task_outputs.append(task_output)
|
|
567
|
+
1241- self._process_task_result(task, task_output)
|
|
568
|
+
1242- self._store_execution_log(task, task_output, task_index, was_replayed)
|
|
569
|
+
1243-
|
|
570
|
+
1244- if futures:
|
|
571
|
+
1245- task_outputs = self._process_async_tasks(futures, was_replayed)
|
|
572
|
+
1246-
|
|
573
|
+
1247- return self._create_crew_output(task_outputs)
|
|
574
|
+
1248-
|
|
575
|
+
1249- def _handle_conditional_task(
|
|
576
|
+
1250- self,
|
|
577
|
+
1251- task: ConditionalTask,
|
|
578
|
+
1252- task_outputs: list[TaskOutput],
|
|
579
|
+
1253- futures: list[tuple[Task, Future[TaskOutput], int]],
|
|
580
|
+
1254- task_index: int,
|
|
581
|
+
1255- was_replayed: bool,
|
|
582
|
+
1256- ) -> TaskOutput | None:
|
|
583
|
+
1257- if futures:
|
|
584
|
+
1258- task_outputs = self._process_async_tasks(futures, was_replayed)
|
|
585
|
+
1259- futures.clear()
|
|
586
|
+
1260-
|
|
587
|
+
1261- return check_conditional_skip(
|
|
588
|
+
1262- self, task, task_outputs, task_index, was_replayed
|
|
589
|
+
1263- )
|
|
590
|
+
1264-
|
|
591
|
+
1265- def _prepare_tools(
|
|
592
|
+
1266- self, agent: BaseAgent, task: Task, tools: list[BaseTool]
|
|
593
|
+
1267- ) -> list[BaseTool]:
|
|
594
|
+
1268- # Add delegation tools if agent allows delegation
|
|
595
|
+
1269- if hasattr(agent, "allow_delegation") and getattr(
|
|
596
|
+
--
|
|
597
|
+
1645: for task in self.tasks:
|
|
598
|
+
1646- # description and expected_output might contain e.g. {topic}, {user_name}
|
|
599
|
+
1647- text = f"{task.description or ''} {task.expected_output or ''}"
|
|
600
|
+
1648- required_inputs.update(placeholder_pattern.findall(text))
|
|
601
|
+
1649-
|
|
602
|
+
1650- # Scan agents for inputs
|
|
603
|
+
1651- for agent in self.agents:
|
|
604
|
+
1652- # role, goal, backstory might have placeholders like {role_detail}, etc.
|
|
605
|
+
1653- text = f"{agent.role or ''} {agent.goal or ''} {agent.backstory or ''}"
|
|
606
|
+
1654- required_inputs.update(placeholder_pattern.findall(text))
|
|
607
|
+
1655-
|
|
608
|
+
1656- return required_inputs
|
|
609
|
+
1657-
|
|
610
|
+
1658- def copy(self) -> Crew: # type: ignore[override]
|
|
611
|
+
1659- """
|
|
612
|
+
1660- Creates a deep copy of the Crew instance.
|
|
613
|
+
1661-
|
|
614
|
+
1662- Returns:
|
|
615
|
+
1663- Crew: A new instance with copied components
|
|
616
|
+
1664- """
|
|
617
|
+
1665-
|
|
618
|
+
1666- exclude = {
|
|
619
|
+
1667- "id",
|
|
620
|
+
1668- "_rpm_controller",
|
|
621
|
+
1669- "_logger",
|
|
622
|
+
1670- "_execution_span",
|
|
623
|
+
1671- "_file_handler",
|
|
624
|
+
1672- "_cache_handler",
|
|
625
|
+
1673- "_memory",
|
|
626
|
+
1674- "agents",
|
|
627
|
+
1675- "tasks",
|
|
628
|
+
1676- "knowledge_sources",
|
|
629
|
+
1677- "knowledge",
|
|
630
|
+
1678- "manager_agent",
|
|
631
|
+
1679- "manager_llm",
|
|
632
|
+
1680- }
|
|
633
|
+
1681-
|
|
634
|
+
1682- cloned_agents = [agent.copy() for agent in self.agents]
|
|
635
|
+
1683- manager_agent = self.manager_agent.copy() if self.manager_agent else None
|
|
636
|
+
1684- manager_llm = shallow_copy(self.manager_llm) if self.manager_llm else None
|
|
637
|
+
1685-
|
|
638
|
+
1686- task_mapping: dict[str, Any] = {}
|
|
639
|
+
1687-
|
|
640
|
+
1688- cloned_tasks = []
|
|
641
|
+
1689- existing_knowledge_sources = shallow_copy(self.knowledge_sources)
|
|
642
|
+
1690- existing_knowledge = shallow_copy(self.knowledge)
|
|
643
|
+
1691-
|
|
644
|
+
1692: for task in self.tasks:
|
|
645
|
+
1693- cloned_task = task.copy(cloned_agents, task_mapping)
|
|
646
|
+
1694- cloned_tasks.append(cloned_task)
|
|
647
|
+
1695- task_mapping[task.key] = cloned_task
|
|
648
|
+
1696-
|
|
649
|
+
1697- for cloned_task, original_task in zip(cloned_tasks, self.tasks, strict=False):
|
|
650
|
+
1698- if isinstance(original_task.context, list):
|
|
651
|
+
1699- cloned_context = [
|
|
652
|
+
1700- task_mapping[context_task.key]
|
|
653
|
+
1701- for context_task in original_task.context
|
|
654
|
+
1702- ]
|
|
655
|
+
1703- cloned_task.context = cloned_context
|
|
656
|
+
1704-
|
|
657
|
+
1705- copied_data = self.model_dump(exclude=exclude)
|
|
658
|
+
1706- copied_data = {k: v for k, v in copied_data.items() if v is not None}
|
|
659
|
+
1707- if getattr(self, "_memory", None):
|
|
660
|
+
1708- copied_data["memory"] = self._memory
|
|
661
|
+
1709-
|
|
662
|
+
1710- copied_data.pop("agents", None)
|
|
663
|
+
1711- copied_data.pop("tasks", None)
|
|
664
|
+
1712-
|
|
665
|
+
1713- return Crew(
|
|
666
|
+
1714- **copied_data,
|
|
667
|
+
1715- agents=cloned_agents,
|
|
668
|
+
1716- tasks=cloned_tasks,
|
|
669
|
+
1717- knowledge_sources=existing_knowledge_sources,
|
|
670
|
+
1718- knowledge=existing_knowledge,
|
|
671
|
+
1719- manager_agent=manager_agent,
|
|
672
|
+
1720- manager_llm=manager_llm,
|
|
673
|
+
1721- )
|
|
674
|
+
1722-
|
|
675
|
+
1723- def _set_tasks_callbacks(self) -> None:
|
|
676
|
+
1724- """Sets callback for every task suing task_callback"""
|
|
677
|
+
1725: for task in self.tasks:
|
|
678
|
+
1726- if not task.callback:
|
|
679
|
+
1727- task.callback = self.task_callback
|
|
680
|
+
1728-
|
|
681
|
+
1729- def _interpolate_inputs(self, inputs: dict[str, Any]) -> None:
|
|
682
|
+
1730- """Interpolates the inputs in the tasks and agents."""
|
|
683
|
+
1731- [
|
|
684
|
+
1732- task.interpolate_inputs_and_add_conversation_history(
|
|
685
|
+
1733- # type: ignore # "interpolate_inputs" of "Task" does not return a value (it only ever returns None)
|
|
686
|
+
1734- inputs
|
|
687
|
+
1735- )
|
|
688
|
+
1736: for task in self.tasks
|
|
689
|
+
1737- ]
|
|
690
|
+
1738- for agent in self.agents:
|
|
691
|
+
1739- agent.interpolate_inputs(inputs)
|
|
692
|
+
1740-
|
|
693
|
+
1741- def _finish_execution(self, final_string_output: str) -> None:
|
|
694
|
+
1742- if self.max_rpm:
|
|
695
|
+
1743- self._rpm_controller.stop_rpm_counter()
|
|
696
|
+
1744-
|
|
697
|
+
1745- def calculate_usage_metrics(self) -> UsageMetrics:
|
|
698
|
+
1746- """Calculates and returns the usage metrics."""
|
|
699
|
+
1747- total_usage_metrics = UsageMetrics()
|
|
700
|
+
1748-
|
|
701
|
+
1749- for agent in self.agents:
|
|
702
|
+
1750- if isinstance(agent.llm, BaseLLM):
|
|
703
|
+
1751- llm_usage = agent.llm.get_token_usage_summary()
|
|
704
|
+
1752-
|
|
705
|
+
1753- total_usage_metrics.add_usage_metrics(llm_usage)
|
|
706
|
+
1754- else:
|
|
707
|
+
1755- # fallback litellm
|
|
708
|
+
1756- if hasattr(agent, "_token_process"):
|
|
709
|
+
1757- token_sum = agent._token_process.get_summary()
|
|
710
|
+
1758- total_usage_metrics.add_usage_metrics(token_sum)
|
|
711
|
+
1759-
|
|
712
|
+
1760- if self.manager_agent and hasattr(self.manager_agent, "_token_process"):
|
|
713
|
+
1761- token_sum = self.manager_agent._token_process.get_summary()
|
|
714
|
+
1762- total_usage_metrics.add_usage_metrics(token_sum)
|
|
715
|
+
1763-
|
|
716
|
+
1764- if (
|
|
717
|
+
1765- self.manager_agent
|
|
718
|
+
1766- and hasattr(self.manager_agent, "llm")
|
|
719
|
+
1767- and hasattr(self.manager_agent.llm, "get_token_usage_summary")
|
|
720
|
+
1768- ):
|
|
721
|
+
1769- if isinstance(self.manager_agent.llm, BaseLLM):
|
|
722
|
+
1770- llm_usage = self.manager_agent.llm.get_token_usage_summary()
|
|
723
|
+
1771- else:
|
|
724
|
+
1772- llm_usage = self.manager_agent.llm._token_process.get_summary()
|
|
725
|
+
1773-
|
|
726
|
+
1774- total_usage_metrics.add_usage_metrics(llm_usage)
|
|
727
|
+
1775-
|
|
728
|
+
1776- self.usage_metrics = total_usage_metrics
|
|
729
|
+
1777- return total_usage_metrics
|
|
730
|
+
1778-
|
|
731
|
+
1779- def test(
|
|
732
|
+
1780- self,
|
|
733
|
+
1781- n_iterations: int,
|
|
734
|
+
1782- eval_llm: str | InstanceOf[BaseLLM],
|
|
735
|
+
1783- inputs: dict[str, Any] | None = None,
|
|
736
|
+
1784- ) -> None:
|
|
737
|
+
1785- """Test and evaluate the Crew with the given inputs for n iterations.
|
|
738
|
+
1786-
|