agnt5 0.1.0__cp39-abi3-macosx_11_0_arm64.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- agnt5/__init__.py +307 -0
- agnt5/__pycache__/__init__.cpython-311.pyc +0 -0
- agnt5/__pycache__/agent.cpython-311.pyc +0 -0
- agnt5/__pycache__/context.cpython-311.pyc +0 -0
- agnt5/__pycache__/durable.cpython-311.pyc +0 -0
- agnt5/__pycache__/extraction.cpython-311.pyc +0 -0
- agnt5/__pycache__/memory.cpython-311.pyc +0 -0
- agnt5/__pycache__/reflection.cpython-311.pyc +0 -0
- agnt5/__pycache__/runtime.cpython-311.pyc +0 -0
- agnt5/__pycache__/task.cpython-311.pyc +0 -0
- agnt5/__pycache__/tool.cpython-311.pyc +0 -0
- agnt5/__pycache__/tracing.cpython-311.pyc +0 -0
- agnt5/__pycache__/types.cpython-311.pyc +0 -0
- agnt5/__pycache__/workflow.cpython-311.pyc +0 -0
- agnt5/_core.abi3.so +0 -0
- agnt5/agent.py +1086 -0
- agnt5/context.py +406 -0
- agnt5/durable.py +1050 -0
- agnt5/extraction.py +410 -0
- agnt5/llm/__init__.py +179 -0
- agnt5/llm/__pycache__/__init__.cpython-311.pyc +0 -0
- agnt5/llm/__pycache__/anthropic.cpython-311.pyc +0 -0
- agnt5/llm/__pycache__/azure.cpython-311.pyc +0 -0
- agnt5/llm/__pycache__/base.cpython-311.pyc +0 -0
- agnt5/llm/__pycache__/google.cpython-311.pyc +0 -0
- agnt5/llm/__pycache__/mistral.cpython-311.pyc +0 -0
- agnt5/llm/__pycache__/openai.cpython-311.pyc +0 -0
- agnt5/llm/__pycache__/together.cpython-311.pyc +0 -0
- agnt5/llm/anthropic.py +319 -0
- agnt5/llm/azure.py +348 -0
- agnt5/llm/base.py +315 -0
- agnt5/llm/google.py +373 -0
- agnt5/llm/mistral.py +330 -0
- agnt5/llm/model_registry.py +467 -0
- agnt5/llm/models.json +227 -0
- agnt5/llm/openai.py +334 -0
- agnt5/llm/together.py +377 -0
- agnt5/memory.py +746 -0
- agnt5/reflection.py +514 -0
- agnt5/runtime.py +699 -0
- agnt5/task.py +476 -0
- agnt5/testing.py +451 -0
- agnt5/tool.py +516 -0
- agnt5/tracing.py +624 -0
- agnt5/types.py +210 -0
- agnt5/workflow.py +897 -0
- agnt5-0.1.0.dist-info/METADATA +93 -0
- agnt5-0.1.0.dist-info/RECORD +49 -0
- agnt5-0.1.0.dist-info/WHEEL +4 -0
agnt5/reflection.py
ADDED
|
@@ -0,0 +1,514 @@
|
|
|
1
|
+
"""
|
|
2
|
+
Reflection capabilities for AGNT5 SDK.
|
|
3
|
+
|
|
4
|
+
Provides self-reflection, meta-cognition, and performance evaluation
|
|
5
|
+
capabilities for agents to improve their responses and learn from experience.
|
|
6
|
+
"""
|
|
7
|
+
|
|
8
|
+
import json
|
|
9
|
+
import logging
|
|
10
|
+
from dataclasses import dataclass, field
|
|
11
|
+
from datetime import datetime
|
|
12
|
+
from enum import Enum
|
|
13
|
+
from typing import Any, Dict, List, Optional, Tuple
|
|
14
|
+
|
|
15
|
+
from .types import Message, MessageRole
|
|
16
|
+
from .task import task, OutputFormat
|
|
17
|
+
from .tracing import trace_agent_run, span, traced, log, TraceLevel
|
|
18
|
+
|
|
19
|
+
logger = logging.getLogger(__name__)
|
|
20
|
+
|
|
21
|
+
|
|
22
|
+
class ReflectionType(Enum):
|
|
23
|
+
"""Types of reflection."""
|
|
24
|
+
RESPONSE_QUALITY = "response_quality"
|
|
25
|
+
GOAL_ACHIEVEMENT = "goal_achievement"
|
|
26
|
+
PROCESS_EVALUATION = "process_evaluation"
|
|
27
|
+
ERROR_ANALYSIS = "error_analysis"
|
|
28
|
+
PERFORMANCE_REVIEW = "performance_review"
|
|
29
|
+
|
|
30
|
+
|
|
31
|
+
class ReflectionLevel(Enum):
|
|
32
|
+
"""Levels of reflection depth."""
|
|
33
|
+
SURFACE = "surface" # Basic quality check
|
|
34
|
+
ANALYTICAL = "analytical" # Detailed analysis
|
|
35
|
+
METACOGNITIVE = "metacognitive" # Deep self-awareness
|
|
36
|
+
|
|
37
|
+
|
|
38
|
+
@dataclass
|
|
39
|
+
class ReflectionCriteria:
|
|
40
|
+
"""Criteria for reflection evaluation."""
|
|
41
|
+
name: str
|
|
42
|
+
description: str
|
|
43
|
+
weight: float = 1.0
|
|
44
|
+
min_score: float = 0.0
|
|
45
|
+
max_score: float = 10.0
|
|
46
|
+
|
|
47
|
+
|
|
48
|
+
@dataclass
|
|
49
|
+
class ReflectionResult:
|
|
50
|
+
"""Result of a reflection process."""
|
|
51
|
+
reflection_type: ReflectionType
|
|
52
|
+
level: ReflectionLevel
|
|
53
|
+
overall_score: float
|
|
54
|
+
criteria_scores: Dict[str, float] = field(default_factory=dict)
|
|
55
|
+
insights: List[str] = field(default_factory=list)
|
|
56
|
+
improvements: List[str] = field(default_factory=list)
|
|
57
|
+
metadata: Dict[str, Any] = field(default_factory=dict)
|
|
58
|
+
timestamp: datetime = field(default_factory=datetime.utcnow)
|
|
59
|
+
|
|
60
|
+
def to_dict(self) -> Dict[str, Any]:
|
|
61
|
+
return {
|
|
62
|
+
"reflection_type": self.reflection_type.value,
|
|
63
|
+
"level": self.level.value,
|
|
64
|
+
"overall_score": self.overall_score,
|
|
65
|
+
"criteria_scores": self.criteria_scores,
|
|
66
|
+
"insights": self.insights,
|
|
67
|
+
"improvements": self.improvements,
|
|
68
|
+
"metadata": self.metadata,
|
|
69
|
+
"timestamp": self.timestamp.isoformat()
|
|
70
|
+
}
|
|
71
|
+
|
|
72
|
+
|
|
73
|
+
class ReflectionEngine:
|
|
74
|
+
"""Engine for conducting various types of reflection."""
|
|
75
|
+
|
|
76
|
+
def __init__(self):
|
|
77
|
+
self.default_criteria = {
|
|
78
|
+
ReflectionType.RESPONSE_QUALITY: [
|
|
79
|
+
ReflectionCriteria("accuracy", "How accurate is the response?", weight=2.0),
|
|
80
|
+
ReflectionCriteria("completeness", "How complete is the response?", weight=1.5),
|
|
81
|
+
ReflectionCriteria("clarity", "How clear and understandable is the response?", weight=1.0),
|
|
82
|
+
ReflectionCriteria("relevance", "How relevant is the response to the question?", weight=2.0),
|
|
83
|
+
ReflectionCriteria("helpfulness", "How helpful is the response to the user?", weight=1.5),
|
|
84
|
+
],
|
|
85
|
+
ReflectionType.GOAL_ACHIEVEMENT: [
|
|
86
|
+
ReflectionCriteria("goal_alignment", "How well aligned is the response with stated goals?", weight=2.0),
|
|
87
|
+
ReflectionCriteria("task_completion", "How completely was the task accomplished?", weight=2.0),
|
|
88
|
+
ReflectionCriteria("efficiency", "How efficiently was the goal achieved?", weight=1.0),
|
|
89
|
+
],
|
|
90
|
+
ReflectionType.PROCESS_EVALUATION: [
|
|
91
|
+
ReflectionCriteria("reasoning_quality", "Quality of the reasoning process", weight=2.0),
|
|
92
|
+
ReflectionCriteria("tool_usage", "Appropriateness of tool usage", weight=1.5),
|
|
93
|
+
ReflectionCriteria("information_gathering", "Effectiveness of information gathering", weight=1.0),
|
|
94
|
+
ReflectionCriteria("decision_making", "Quality of decision making", weight=1.5),
|
|
95
|
+
]
|
|
96
|
+
}
|
|
97
|
+
|
|
98
|
+
async def reflect(
|
|
99
|
+
self,
|
|
100
|
+
reflection_type: ReflectionType,
|
|
101
|
+
level: ReflectionLevel,
|
|
102
|
+
context: Dict[str, Any],
|
|
103
|
+
agent: Optional['Agent'] = None,
|
|
104
|
+
custom_criteria: Optional[List[ReflectionCriteria]] = None
|
|
105
|
+
) -> ReflectionResult:
|
|
106
|
+
"""
|
|
107
|
+
Conduct reflection of specified type and level.
|
|
108
|
+
|
|
109
|
+
Args:
|
|
110
|
+
reflection_type: Type of reflection to conduct
|
|
111
|
+
level: Depth level of reflection
|
|
112
|
+
context: Context information for reflection
|
|
113
|
+
agent: Agent to use for reflection (if not provided, creates one)
|
|
114
|
+
custom_criteria: Custom criteria for evaluation
|
|
115
|
+
|
|
116
|
+
Returns:
|
|
117
|
+
ReflectionResult with scores, insights, and improvements
|
|
118
|
+
"""
|
|
119
|
+
with traced(f"reflection.{reflection_type.value}") as reflection_span:
|
|
120
|
+
reflection_span.set_attribute("reflection.type", reflection_type.value)
|
|
121
|
+
reflection_span.set_attribute("reflection.level", level.value)
|
|
122
|
+
|
|
123
|
+
# Get criteria for evaluation
|
|
124
|
+
criteria = custom_criteria or self.default_criteria.get(reflection_type, [])
|
|
125
|
+
|
|
126
|
+
# Use provided agent or create a reflection-focused one
|
|
127
|
+
if agent is None:
|
|
128
|
+
from .agent import Agent
|
|
129
|
+
agent = Agent(
|
|
130
|
+
name="reflection_agent",
|
|
131
|
+
model="gpt-4o",
|
|
132
|
+
system_prompt="You are a metacognitive AI assistant specialized in reflection and self-evaluation. Provide thoughtful, honest assessments."
|
|
133
|
+
)
|
|
134
|
+
|
|
135
|
+
# Conduct reflection based on type and level
|
|
136
|
+
if reflection_type == ReflectionType.RESPONSE_QUALITY:
|
|
137
|
+
return await self._reflect_on_response_quality(context, criteria, level, agent)
|
|
138
|
+
elif reflection_type == ReflectionType.GOAL_ACHIEVEMENT:
|
|
139
|
+
return await self._reflect_on_goal_achievement(context, criteria, level, agent)
|
|
140
|
+
elif reflection_type == ReflectionType.PROCESS_EVALUATION:
|
|
141
|
+
return await self._reflect_on_process(context, criteria, level, agent)
|
|
142
|
+
elif reflection_type == ReflectionType.ERROR_ANALYSIS:
|
|
143
|
+
return await self._reflect_on_errors(context, criteria, level, agent)
|
|
144
|
+
else:
|
|
145
|
+
return await self._reflect_generic(reflection_type, context, criteria, level, agent)
|
|
146
|
+
|
|
147
|
+
async def _reflect_on_response_quality(
|
|
148
|
+
self,
|
|
149
|
+
context: Dict[str, Any],
|
|
150
|
+
criteria: List[ReflectionCriteria],
|
|
151
|
+
level: ReflectionLevel,
|
|
152
|
+
agent: 'Agent'
|
|
153
|
+
) -> ReflectionResult:
|
|
154
|
+
"""Reflect on the quality of a response."""
|
|
155
|
+
user_query = context.get("user_query", "")
|
|
156
|
+
agent_response = context.get("agent_response", "")
|
|
157
|
+
|
|
158
|
+
prompt = self._build_reflection_prompt(
|
|
159
|
+
reflection_type="response quality",
|
|
160
|
+
level=level,
|
|
161
|
+
criteria=criteria,
|
|
162
|
+
context_description=f"""
|
|
163
|
+
User Query: {user_query}
|
|
164
|
+
Agent Response: {agent_response}
|
|
165
|
+
|
|
166
|
+
Evaluate the agent's response quality based on the criteria provided.
|
|
167
|
+
""")
|
|
168
|
+
|
|
169
|
+
return await self._execute_reflection(
|
|
170
|
+
ReflectionType.RESPONSE_QUALITY,
|
|
171
|
+
level,
|
|
172
|
+
criteria,
|
|
173
|
+
prompt,
|
|
174
|
+
agent,
|
|
175
|
+
context
|
|
176
|
+
)
|
|
177
|
+
|
|
178
|
+
async def _reflect_on_goal_achievement(
|
|
179
|
+
self,
|
|
180
|
+
context: Dict[str, Any],
|
|
181
|
+
criteria: List[ReflectionCriteria],
|
|
182
|
+
level: ReflectionLevel,
|
|
183
|
+
agent: 'Agent'
|
|
184
|
+
) -> ReflectionResult:
|
|
185
|
+
"""Reflect on how well goals were achieved."""
|
|
186
|
+
goals = context.get("goals", [])
|
|
187
|
+
actions_taken = context.get("actions_taken", [])
|
|
188
|
+
outcomes = context.get("outcomes", [])
|
|
189
|
+
|
|
190
|
+
prompt = self._build_reflection_prompt(
|
|
191
|
+
reflection_type="goal achievement",
|
|
192
|
+
level=level,
|
|
193
|
+
criteria=criteria,
|
|
194
|
+
context_description=f"""
|
|
195
|
+
Goals: {goals}
|
|
196
|
+
Actions Taken: {actions_taken}
|
|
197
|
+
Outcomes: {outcomes}
|
|
198
|
+
|
|
199
|
+
Evaluate how well the stated goals were achieved.
|
|
200
|
+
""")
|
|
201
|
+
|
|
202
|
+
return await self._execute_reflection(
|
|
203
|
+
ReflectionType.GOAL_ACHIEVEMENT,
|
|
204
|
+
level,
|
|
205
|
+
criteria,
|
|
206
|
+
prompt,
|
|
207
|
+
agent,
|
|
208
|
+
context
|
|
209
|
+
)
|
|
210
|
+
|
|
211
|
+
async def _reflect_on_process(
|
|
212
|
+
self,
|
|
213
|
+
context: Dict[str, Any],
|
|
214
|
+
criteria: List[ReflectionCriteria],
|
|
215
|
+
level: ReflectionLevel,
|
|
216
|
+
agent: 'Agent'
|
|
217
|
+
) -> ReflectionResult:
|
|
218
|
+
"""Reflect on the process used to complete a task."""
|
|
219
|
+
process_steps = context.get("process_steps", [])
|
|
220
|
+
tools_used = context.get("tools_used", [])
|
|
221
|
+
reasoning = context.get("reasoning", "")
|
|
222
|
+
|
|
223
|
+
prompt = self._build_reflection_prompt(
|
|
224
|
+
reflection_type="process evaluation",
|
|
225
|
+
level=level,
|
|
226
|
+
criteria=criteria,
|
|
227
|
+
context_description=f"""
|
|
228
|
+
Process Steps: {process_steps}
|
|
229
|
+
Tools Used: {tools_used}
|
|
230
|
+
Reasoning: {reasoning}
|
|
231
|
+
|
|
232
|
+
Evaluate the process and methodology used.
|
|
233
|
+
""")
|
|
234
|
+
|
|
235
|
+
return await self._execute_reflection(
|
|
236
|
+
ReflectionType.PROCESS_EVALUATION,
|
|
237
|
+
level,
|
|
238
|
+
criteria,
|
|
239
|
+
prompt,
|
|
240
|
+
agent,
|
|
241
|
+
context
|
|
242
|
+
)
|
|
243
|
+
|
|
244
|
+
async def _reflect_on_errors(
|
|
245
|
+
self,
|
|
246
|
+
context: Dict[str, Any],
|
|
247
|
+
criteria: List[ReflectionCriteria],
|
|
248
|
+
level: ReflectionLevel,
|
|
249
|
+
agent: 'Agent'
|
|
250
|
+
) -> ReflectionResult:
|
|
251
|
+
"""Reflect on errors and failures."""
|
|
252
|
+
errors = context.get("errors", [])
|
|
253
|
+
attempted_solutions = context.get("attempted_solutions", [])
|
|
254
|
+
|
|
255
|
+
prompt = self._build_reflection_prompt(
|
|
256
|
+
reflection_type="error analysis",
|
|
257
|
+
level=level,
|
|
258
|
+
criteria=criteria or [
|
|
259
|
+
ReflectionCriteria("error_identification", "How well were errors identified?"),
|
|
260
|
+
ReflectionCriteria("root_cause_analysis", "Quality of root cause analysis"),
|
|
261
|
+
ReflectionCriteria("solution_appropriateness", "How appropriate were the attempted solutions?"),
|
|
262
|
+
],
|
|
263
|
+
context_description=f"""
|
|
264
|
+
Errors Encountered: {errors}
|
|
265
|
+
Attempted Solutions: {attempted_solutions}
|
|
266
|
+
|
|
267
|
+
Analyze the errors and the approach to handling them.
|
|
268
|
+
""")
|
|
269
|
+
|
|
270
|
+
return await self._execute_reflection(
|
|
271
|
+
ReflectionType.ERROR_ANALYSIS,
|
|
272
|
+
level,
|
|
273
|
+
criteria,
|
|
274
|
+
prompt,
|
|
275
|
+
agent,
|
|
276
|
+
context
|
|
277
|
+
)
|
|
278
|
+
|
|
279
|
+
async def _reflect_generic(
|
|
280
|
+
self,
|
|
281
|
+
reflection_type: ReflectionType,
|
|
282
|
+
context: Dict[str, Any],
|
|
283
|
+
criteria: List[ReflectionCriteria],
|
|
284
|
+
level: ReflectionLevel,
|
|
285
|
+
agent: 'Agent'
|
|
286
|
+
) -> ReflectionResult:
|
|
287
|
+
"""Generic reflection for custom types."""
|
|
288
|
+
prompt = self._build_reflection_prompt(
|
|
289
|
+
reflection_type=reflection_type.value,
|
|
290
|
+
level=level,
|
|
291
|
+
criteria=criteria,
|
|
292
|
+
context_description=f"Context: {json.dumps(context, indent=2)}"
|
|
293
|
+
)
|
|
294
|
+
|
|
295
|
+
return await self._execute_reflection(
|
|
296
|
+
reflection_type,
|
|
297
|
+
level,
|
|
298
|
+
criteria,
|
|
299
|
+
prompt,
|
|
300
|
+
agent,
|
|
301
|
+
context
|
|
302
|
+
)
|
|
303
|
+
|
|
304
|
+
def _build_reflection_prompt(
|
|
305
|
+
self,
|
|
306
|
+
reflection_type: str,
|
|
307
|
+
level: ReflectionLevel,
|
|
308
|
+
criteria: List[ReflectionCriteria],
|
|
309
|
+
context_description: str
|
|
310
|
+
) -> str:
|
|
311
|
+
"""Build a prompt for reflection."""
|
|
312
|
+
level_instructions = {
|
|
313
|
+
ReflectionLevel.SURFACE: "Provide a quick, high-level evaluation.",
|
|
314
|
+
ReflectionLevel.ANALYTICAL: "Provide a detailed analysis with specific examples and reasoning.",
|
|
315
|
+
ReflectionLevel.METACOGNITIVE: "Provide deep introspection, considering thinking patterns, biases, and meta-level insights."
|
|
316
|
+
}
|
|
317
|
+
|
|
318
|
+
criteria_text = "\n".join([
|
|
319
|
+
f"- {criterion.name}: {criterion.description} (weight: {criterion.weight})"
|
|
320
|
+
for criterion in criteria
|
|
321
|
+
])
|
|
322
|
+
|
|
323
|
+
return f"""You are conducting a {reflection_type} reflection at the {level.value} level.
|
|
324
|
+
|
|
325
|
+
{level_instructions[level]}
|
|
326
|
+
|
|
327
|
+
{context_description}
|
|
328
|
+
|
|
329
|
+
Evaluation Criteria:
|
|
330
|
+
{criteria_text}
|
|
331
|
+
|
|
332
|
+
Please provide:
|
|
333
|
+
1. A score (0-10) for each criterion
|
|
334
|
+
2. An overall weighted score
|
|
335
|
+
3. Key insights about what went well and what could be improved
|
|
336
|
+
4. Specific recommendations for improvement
|
|
337
|
+
5. Any meta-level observations about the thinking process
|
|
338
|
+
|
|
339
|
+
Format your response as JSON:
|
|
340
|
+
{{
|
|
341
|
+
"criteria_scores": {{"criterion_name": score, ...}},
|
|
342
|
+
"overall_score": weighted_average,
|
|
343
|
+
"insights": ["insight1", "insight2", ...],
|
|
344
|
+
"improvements": ["improvement1", "improvement2", ...],
|
|
345
|
+
"meta_observations": ["observation1", "observation2", ...]
|
|
346
|
+
}}
|
|
347
|
+
|
|
348
|
+
Be honest, constructive, and specific in your evaluation."""
|
|
349
|
+
|
|
350
|
+
async def _execute_reflection(
|
|
351
|
+
self,
|
|
352
|
+
reflection_type: ReflectionType,
|
|
353
|
+
level: ReflectionLevel,
|
|
354
|
+
criteria: List[ReflectionCriteria],
|
|
355
|
+
prompt: str,
|
|
356
|
+
agent: 'Agent',
|
|
357
|
+
context: Dict[str, Any]
|
|
358
|
+
) -> ReflectionResult:
|
|
359
|
+
"""Execute the reflection and parse results."""
|
|
360
|
+
try:
|
|
361
|
+
# Get reflection response from agent
|
|
362
|
+
response = await agent.run(prompt)
|
|
363
|
+
|
|
364
|
+
# Extract JSON from response
|
|
365
|
+
from .extraction import JSONExtractor
|
|
366
|
+
extractor = JSONExtractor()
|
|
367
|
+
json_results = extractor.extract_json_from_text(response.content)
|
|
368
|
+
|
|
369
|
+
if not json_results:
|
|
370
|
+
# Fallback: create a basic reflection result
|
|
371
|
+
return ReflectionResult(
|
|
372
|
+
reflection_type=reflection_type,
|
|
373
|
+
level=level,
|
|
374
|
+
overall_score=5.0,
|
|
375
|
+
insights=["Unable to parse detailed reflection"],
|
|
376
|
+
improvements=["Improve reflection response parsing"]
|
|
377
|
+
)
|
|
378
|
+
|
|
379
|
+
reflection_data = json_results[0]
|
|
380
|
+
|
|
381
|
+
# Calculate overall score if not provided
|
|
382
|
+
criteria_scores = reflection_data.get("criteria_scores", {})
|
|
383
|
+
if "overall_score" not in reflection_data and criteria_scores:
|
|
384
|
+
total_weighted_score = 0
|
|
385
|
+
total_weight = 0
|
|
386
|
+
|
|
387
|
+
for criterion in criteria:
|
|
388
|
+
if criterion.name in criteria_scores:
|
|
389
|
+
score = criteria_scores[criterion.name]
|
|
390
|
+
total_weighted_score += score * criterion.weight
|
|
391
|
+
total_weight += criterion.weight
|
|
392
|
+
|
|
393
|
+
overall_score = total_weighted_score / total_weight if total_weight > 0 else 5.0
|
|
394
|
+
else:
|
|
395
|
+
overall_score = reflection_data.get("overall_score", 5.0)
|
|
396
|
+
|
|
397
|
+
return ReflectionResult(
|
|
398
|
+
reflection_type=reflection_type,
|
|
399
|
+
level=level,
|
|
400
|
+
overall_score=overall_score,
|
|
401
|
+
criteria_scores=criteria_scores,
|
|
402
|
+
insights=reflection_data.get("insights", []),
|
|
403
|
+
improvements=reflection_data.get("improvements", []),
|
|
404
|
+
metadata={
|
|
405
|
+
"meta_observations": reflection_data.get("meta_observations", []),
|
|
406
|
+
"context": context
|
|
407
|
+
}
|
|
408
|
+
)
|
|
409
|
+
|
|
410
|
+
except Exception as e:
|
|
411
|
+
logger.error(f"Reflection execution failed: {e}")
|
|
412
|
+
|
|
413
|
+
# Return a fallback reflection result
|
|
414
|
+
return ReflectionResult(
|
|
415
|
+
reflection_type=reflection_type,
|
|
416
|
+
level=level,
|
|
417
|
+
overall_score=5.0,
|
|
418
|
+
insights=[f"Reflection failed: {str(e)}"],
|
|
419
|
+
improvements=["Fix reflection execution errors"],
|
|
420
|
+
metadata={"error": str(e)}
|
|
421
|
+
)
|
|
422
|
+
|
|
423
|
+
|
|
424
|
+
# Pre-built reflection tasks
|
|
425
|
+
@task(
|
|
426
|
+
name="reflect_on_response",
|
|
427
|
+
description="Reflect on the quality of an agent response",
|
|
428
|
+
output_format=OutputFormat.JSON
|
|
429
|
+
)
|
|
430
|
+
async def reflect_on_response(
|
|
431
|
+
user_query: str,
|
|
432
|
+
agent_response: str,
|
|
433
|
+
level: str = "analytical",
|
|
434
|
+
agent: Optional['Agent'] = None
|
|
435
|
+
) -> Dict[str, Any]:
|
|
436
|
+
"""Reflect on response quality."""
|
|
437
|
+
engine = ReflectionEngine()
|
|
438
|
+
level_enum = ReflectionLevel(level) if level in [l.value for l in ReflectionLevel] else ReflectionLevel.ANALYTICAL
|
|
439
|
+
|
|
440
|
+
context = {
|
|
441
|
+
"user_query": user_query,
|
|
442
|
+
"agent_response": agent_response
|
|
443
|
+
}
|
|
444
|
+
|
|
445
|
+
result = await engine.reflect(
|
|
446
|
+
ReflectionType.RESPONSE_QUALITY,
|
|
447
|
+
level_enum,
|
|
448
|
+
context,
|
|
449
|
+
agent
|
|
450
|
+
)
|
|
451
|
+
|
|
452
|
+
return result.to_dict()
|
|
453
|
+
|
|
454
|
+
|
|
455
|
+
@task(
|
|
456
|
+
name="reflect_on_goals",
|
|
457
|
+
description="Reflect on goal achievement",
|
|
458
|
+
output_format=OutputFormat.JSON
|
|
459
|
+
)
|
|
460
|
+
async def reflect_on_goals(
|
|
461
|
+
goals: List[str],
|
|
462
|
+
actions_taken: List[str],
|
|
463
|
+
outcomes: List[str],
|
|
464
|
+
level: str = "analytical",
|
|
465
|
+
agent: Optional['Agent'] = None
|
|
466
|
+
) -> Dict[str, Any]:
|
|
467
|
+
"""Reflect on goal achievement."""
|
|
468
|
+
engine = ReflectionEngine()
|
|
469
|
+
level_enum = ReflectionLevel(level) if level in [l.value for l in ReflectionLevel] else ReflectionLevel.ANALYTICAL
|
|
470
|
+
|
|
471
|
+
context = {
|
|
472
|
+
"goals": goals,
|
|
473
|
+
"actions_taken": actions_taken,
|
|
474
|
+
"outcomes": outcomes
|
|
475
|
+
}
|
|
476
|
+
|
|
477
|
+
result = await engine.reflect(
|
|
478
|
+
ReflectionType.GOAL_ACHIEVEMENT,
|
|
479
|
+
level_enum,
|
|
480
|
+
context,
|
|
481
|
+
agent
|
|
482
|
+
)
|
|
483
|
+
|
|
484
|
+
return result.to_dict()
|
|
485
|
+
|
|
486
|
+
|
|
487
|
+
@task(
|
|
488
|
+
name="analyze_errors",
|
|
489
|
+
description="Analyze errors and failures for learning",
|
|
490
|
+
output_format=OutputFormat.JSON
|
|
491
|
+
)
|
|
492
|
+
async def analyze_errors(
|
|
493
|
+
errors: List[str],
|
|
494
|
+
attempted_solutions: List[str],
|
|
495
|
+
level: str = "analytical",
|
|
496
|
+
agent: Optional['Agent'] = None
|
|
497
|
+
) -> Dict[str, Any]:
|
|
498
|
+
"""Analyze errors for learning."""
|
|
499
|
+
engine = ReflectionEngine()
|
|
500
|
+
level_enum = ReflectionLevel(level) if level in [l.value for l in ReflectionLevel] else ReflectionLevel.ANALYTICAL
|
|
501
|
+
|
|
502
|
+
context = {
|
|
503
|
+
"errors": errors,
|
|
504
|
+
"attempted_solutions": attempted_solutions
|
|
505
|
+
}
|
|
506
|
+
|
|
507
|
+
result = await engine.reflect(
|
|
508
|
+
ReflectionType.ERROR_ANALYSIS,
|
|
509
|
+
level_enum,
|
|
510
|
+
context,
|
|
511
|
+
agent
|
|
512
|
+
)
|
|
513
|
+
|
|
514
|
+
return result.to_dict()
|