attune-ai 2.1.4__py3-none-any.whl → 2.2.0__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- attune/cli/__init__.py +3 -55
- attune/cli/commands/batch.py +4 -12
- attune/cli/commands/cache.py +7 -15
- attune/cli/commands/provider.py +17 -0
- attune/cli/commands/routing.py +3 -1
- attune/cli/commands/setup.py +122 -0
- attune/cli/commands/tier.py +1 -3
- attune/cli/commands/workflow.py +31 -0
- attune/cli/parsers/cache.py +1 -0
- attune/cli/parsers/help.py +1 -3
- attune/cli/parsers/provider.py +7 -0
- attune/cli/parsers/routing.py +1 -3
- attune/cli/parsers/setup.py +7 -0
- attune/cli/parsers/status.py +1 -3
- attune/cli/parsers/tier.py +1 -3
- attune/cli_minimal.py +34 -28
- attune/cli_router.py +9 -7
- attune/cli_unified.py +3 -0
- attune/core.py +190 -0
- attune/dashboard/app.py +4 -2
- attune/dashboard/simple_server.py +3 -1
- attune/dashboard/standalone_server.py +7 -3
- attune/mcp/server.py +54 -102
- attune/memory/long_term.py +0 -2
- attune/memory/short_term/__init__.py +84 -0
- attune/memory/short_term/base.py +467 -0
- attune/memory/short_term/batch.py +219 -0
- attune/memory/short_term/caching.py +227 -0
- attune/memory/short_term/conflicts.py +265 -0
- attune/memory/short_term/cross_session.py +122 -0
- attune/memory/short_term/facade.py +655 -0
- attune/memory/short_term/pagination.py +215 -0
- attune/memory/short_term/patterns.py +271 -0
- attune/memory/short_term/pubsub.py +286 -0
- attune/memory/short_term/queues.py +244 -0
- attune/memory/short_term/security.py +300 -0
- attune/memory/short_term/sessions.py +250 -0
- attune/memory/short_term/streams.py +249 -0
- attune/memory/short_term/timelines.py +234 -0
- attune/memory/short_term/transactions.py +186 -0
- attune/memory/short_term/working.py +252 -0
- attune/meta_workflows/cli_commands/__init__.py +3 -0
- attune/meta_workflows/cli_commands/agent_commands.py +0 -4
- attune/meta_workflows/cli_commands/analytics_commands.py +0 -6
- attune/meta_workflows/cli_commands/config_commands.py +0 -5
- attune/meta_workflows/cli_commands/memory_commands.py +0 -5
- attune/meta_workflows/cli_commands/template_commands.py +0 -5
- attune/meta_workflows/cli_commands/workflow_commands.py +0 -6
- attune/meta_workflows/workflow.py +1 -1
- attune/models/adaptive_routing.py +4 -8
- attune/models/auth_cli.py +3 -9
- attune/models/auth_strategy.py +2 -4
- attune/models/provider_config.py +20 -1
- attune/models/telemetry/analytics.py +0 -2
- attune/models/telemetry/backend.py +0 -3
- attune/models/telemetry/storage.py +0 -2
- attune/orchestration/_strategies/__init__.py +156 -0
- attune/orchestration/_strategies/base.py +231 -0
- attune/orchestration/_strategies/conditional_strategies.py +373 -0
- attune/orchestration/_strategies/conditions.py +369 -0
- attune/orchestration/_strategies/core_strategies.py +491 -0
- attune/orchestration/_strategies/data_classes.py +64 -0
- attune/orchestration/_strategies/nesting.py +233 -0
- attune/orchestration/execution_strategies.py +58 -1567
- attune/orchestration/meta_orchestrator.py +1 -3
- attune/project_index/scanner.py +1 -3
- attune/project_index/scanner_parallel.py +7 -5
- attune/socratic_router.py +1 -3
- attune/telemetry/agent_coordination.py +9 -3
- attune/telemetry/agent_tracking.py +16 -3
- attune/telemetry/approval_gates.py +22 -5
- attune/telemetry/cli.py +3 -3
- attune/telemetry/commands/dashboard_commands.py +24 -8
- attune/telemetry/event_streaming.py +8 -2
- attune/telemetry/feedback_loop.py +10 -2
- attune/tools.py +1 -0
- attune/workflow_commands.py +1 -3
- attune/workflows/__init__.py +53 -10
- attune/workflows/autonomous_test_gen.py +160 -104
- attune/workflows/base.py +48 -664
- attune/workflows/batch_processing.py +2 -4
- attune/workflows/compat.py +156 -0
- attune/workflows/cost_mixin.py +141 -0
- attune/workflows/data_classes.py +92 -0
- attune/workflows/document_gen/workflow.py +11 -14
- attune/workflows/history.py +62 -37
- attune/workflows/llm_base.py +2 -4
- attune/workflows/migration.py +422 -0
- attune/workflows/output.py +3 -9
- attune/workflows/parsing_mixin.py +427 -0
- attune/workflows/perf_audit.py +3 -1
- attune/workflows/progress.py +10 -13
- attune/workflows/release_prep.py +5 -1
- attune/workflows/routing.py +0 -2
- attune/workflows/secure_release.py +2 -1
- attune/workflows/security_audit.py +19 -14
- attune/workflows/security_audit_phase3.py +28 -22
- attune/workflows/seo_optimization.py +29 -29
- attune/workflows/test_gen/test_templates.py +1 -4
- attune/workflows/test_gen/workflow.py +0 -2
- attune/workflows/test_gen_behavioral.py +7 -20
- attune/workflows/test_gen_parallel.py +6 -4
- {attune_ai-2.1.4.dist-info → attune_ai-2.2.0.dist-info}/METADATA +4 -3
- {attune_ai-2.1.4.dist-info → attune_ai-2.2.0.dist-info}/RECORD +119 -94
- {attune_ai-2.1.4.dist-info → attune_ai-2.2.0.dist-info}/entry_points.txt +0 -2
- attune_healthcare/monitors/monitoring/__init__.py +9 -9
- attune_llm/agent_factory/__init__.py +6 -6
- attune_llm/commands/__init__.py +10 -10
- attune_llm/commands/models.py +3 -3
- attune_llm/config/__init__.py +8 -8
- attune_llm/learning/__init__.py +3 -3
- attune_llm/learning/extractor.py +5 -3
- attune_llm/learning/storage.py +5 -3
- attune_llm/security/__init__.py +17 -17
- attune_llm/utils/tokens.py +3 -1
- attune/cli_legacy.py +0 -3957
- attune/memory/short_term.py +0 -2192
- attune/workflows/manage_docs.py +0 -87
- attune/workflows/test5.py +0 -125
- {attune_ai-2.1.4.dist-info → attune_ai-2.2.0.dist-info}/WHEEL +0 -0
- {attune_ai-2.1.4.dist-info → attune_ai-2.2.0.dist-info}/licenses/LICENSE +0 -0
- {attune_ai-2.1.4.dist-info → attune_ai-2.2.0.dist-info}/licenses/LICENSE_CHANGE_ANNOUNCEMENT.md +0 -0
- {attune_ai-2.1.4.dist-info → attune_ai-2.2.0.dist-info}/top_level.txt +0 -0
attune/cli_router.py
CHANGED
|
@@ -195,9 +195,7 @@ class HybridRouter:
|
|
|
195
195
|
with open(self.preferences_path, "w") as f:
|
|
196
196
|
yaml.dump(data, f, default_flow_style=False)
|
|
197
197
|
|
|
198
|
-
async def route(
|
|
199
|
-
self, user_input: str, context: dict[str, Any] | None = None
|
|
200
|
-
) -> dict[str, Any]:
|
|
198
|
+
async def route(self, user_input: str, context: dict[str, Any] | None = None) -> dict[str, Any]:
|
|
201
199
|
"""Route user input to appropriate command or workflow.
|
|
202
200
|
|
|
203
201
|
Args:
|
|
@@ -242,7 +240,8 @@ class HybridRouter:
|
|
|
242
240
|
"args": args,
|
|
243
241
|
"original": command,
|
|
244
242
|
"confidence": 1.0,
|
|
245
|
-
"instruction": f"Use Skill tool with skill='{skill}'"
|
|
243
|
+
"instruction": f"Use Skill tool with skill='{skill}'"
|
|
244
|
+
+ (f", args='{args}'" if args else ""),
|
|
246
245
|
}
|
|
247
246
|
|
|
248
247
|
def _infer_command(self, keyword: str) -> dict[str, Any] | None:
|
|
@@ -271,7 +270,8 @@ class HybridRouter:
|
|
|
271
270
|
"original": keyword,
|
|
272
271
|
"confidence": pref.confidence,
|
|
273
272
|
"source": "learned",
|
|
274
|
-
"instruction": f"Use Skill tool with skill='{pref.skill}'"
|
|
273
|
+
"instruction": f"Use Skill tool with skill='{pref.skill}'"
|
|
274
|
+
+ (f", args='{pref.args}'" if pref.args else ""),
|
|
275
275
|
}
|
|
276
276
|
|
|
277
277
|
# Check built-in keyword map
|
|
@@ -284,7 +284,8 @@ class HybridRouter:
|
|
|
284
284
|
"original": keyword,
|
|
285
285
|
"confidence": 0.9,
|
|
286
286
|
"source": "builtin",
|
|
287
|
-
"instruction": f"Use Skill tool with skill='{skill}'"
|
|
287
|
+
"instruction": f"Use Skill tool with skill='{skill}'"
|
|
288
|
+
+ (f", args='{args}'" if args else ""),
|
|
288
289
|
}
|
|
289
290
|
|
|
290
291
|
# Check for hub names (show hub menu)
|
|
@@ -329,7 +330,8 @@ class HybridRouter:
|
|
|
329
330
|
"reasoning": decision.reasoning,
|
|
330
331
|
"original": text,
|
|
331
332
|
"source": "natural_language",
|
|
332
|
-
"instruction": f"Use Skill tool with skill='{skill}'"
|
|
333
|
+
"instruction": f"Use Skill tool with skill='{skill}'"
|
|
334
|
+
+ (f", args='{args}'" if args else ""),
|
|
333
335
|
}
|
|
334
336
|
|
|
335
337
|
def _workflow_to_skill(self, workflow: str) -> tuple[str, str]:
|
attune/cli_unified.py
CHANGED
attune/core.py
CHANGED
|
@@ -25,6 +25,23 @@ if TYPE_CHECKING:
|
|
|
25
25
|
from .pattern_library import PatternLibrary
|
|
26
26
|
|
|
27
27
|
|
|
28
|
+
@dataclass
|
|
29
|
+
class InteractionResponse:
|
|
30
|
+
"""Response from an interaction with EmpathyOS.
|
|
31
|
+
|
|
32
|
+
Attributes:
|
|
33
|
+
level: Empathy level used (1-5)
|
|
34
|
+
response: The response text
|
|
35
|
+
confidence: Confidence score (0.0 to 1.0)
|
|
36
|
+
predictions: Optional list of predictions (for Level 4+)
|
|
37
|
+
"""
|
|
38
|
+
|
|
39
|
+
level: int
|
|
40
|
+
response: str
|
|
41
|
+
confidence: float = 1.0
|
|
42
|
+
predictions: list[str] | None = None
|
|
43
|
+
|
|
44
|
+
|
|
28
45
|
@dataclass
|
|
29
46
|
class CollaborationState:
|
|
30
47
|
"""Stock & Flow model of AI-human collaboration
|
|
@@ -68,6 +85,11 @@ class CollaborationState:
|
|
|
68
85
|
# Track trajectory
|
|
69
86
|
self.trust_trajectory.append(self.trust_level)
|
|
70
87
|
|
|
88
|
+
@property
|
|
89
|
+
def current_level(self) -> float:
|
|
90
|
+
"""Get current trust level (alias for trust_level)."""
|
|
91
|
+
return self.trust_level
|
|
92
|
+
|
|
71
93
|
|
|
72
94
|
class EmpathyOS:
|
|
73
95
|
"""Empathy Operating System for AI-Human Collaboration.
|
|
@@ -130,6 +152,7 @@ class EmpathyOS:
|
|
|
130
152
|
shared_library: PatternLibrary | None = None,
|
|
131
153
|
short_term_memory: RedisShortTermMemory | None = None,
|
|
132
154
|
access_tier: AccessTier = AccessTier.CONTRIBUTOR,
|
|
155
|
+
persistence_enabled: bool = True,
|
|
133
156
|
):
|
|
134
157
|
"""Initialize EmpathyOS
|
|
135
158
|
|
|
@@ -146,11 +169,13 @@ class EmpathyOS:
|
|
|
146
169
|
staging, and conflict resolution.
|
|
147
170
|
access_tier: Access tier for this agent (Observer, Contributor, Validator, Steward).
|
|
148
171
|
Determines what operations the agent can perform on shared memory.
|
|
172
|
+
persistence_enabled: Whether to enable pattern/state persistence (default: True)
|
|
149
173
|
|
|
150
174
|
"""
|
|
151
175
|
self.user_id = user_id
|
|
152
176
|
self.target_level = target_level
|
|
153
177
|
self.confidence_threshold = confidence_threshold
|
|
178
|
+
self.persistence_enabled = persistence_enabled
|
|
154
179
|
self.logger = logger or logging.getLogger(__name__)
|
|
155
180
|
self.shared_library = shared_library
|
|
156
181
|
|
|
@@ -1263,6 +1288,171 @@ class EmpathyOS:
|
|
|
1263
1288
|
"""Reset collaboration state (new session)"""
|
|
1264
1289
|
self.collaboration_state = CollaborationState()
|
|
1265
1290
|
|
|
1291
|
+
def interact(
|
|
1292
|
+
self,
|
|
1293
|
+
user_id: str,
|
|
1294
|
+
user_input: str,
|
|
1295
|
+
context: dict | None = None,
|
|
1296
|
+
) -> InteractionResponse:
|
|
1297
|
+
"""Process a user interaction and return a response.
|
|
1298
|
+
|
|
1299
|
+
This is a synchronous convenience method for simple interactions.
|
|
1300
|
+
For full empathy level control, use the level_X_* async methods.
|
|
1301
|
+
|
|
1302
|
+
Args:
|
|
1303
|
+
user_id: User identifier
|
|
1304
|
+
user_input: The user's input text
|
|
1305
|
+
context: Optional context dictionary
|
|
1306
|
+
|
|
1307
|
+
Returns:
|
|
1308
|
+
InteractionResponse with level, response, confidence, and optional predictions
|
|
1309
|
+
|
|
1310
|
+
Example:
|
|
1311
|
+
>>> empathy = EmpathyOS(user_id="dev_123")
|
|
1312
|
+
>>> response = empathy.interact(
|
|
1313
|
+
... user_id="dev_123",
|
|
1314
|
+
... user_input="How do I optimize this query?",
|
|
1315
|
+
... context={"domain": "database"}
|
|
1316
|
+
... )
|
|
1317
|
+
>>> print(f"[L{response.level}] {response.response}")
|
|
1318
|
+
|
|
1319
|
+
"""
|
|
1320
|
+
context = context or {}
|
|
1321
|
+
|
|
1322
|
+
# Determine appropriate empathy level based on trust and context
|
|
1323
|
+
current_trust = self.collaboration_state.trust_level
|
|
1324
|
+
level = self._determine_interaction_level(current_trust, context)
|
|
1325
|
+
|
|
1326
|
+
# Generate response based on level
|
|
1327
|
+
response_text = self._generate_response(user_input, context, level)
|
|
1328
|
+
|
|
1329
|
+
# For Level 4+, generate predictions
|
|
1330
|
+
predictions = None
|
|
1331
|
+
if level >= 4:
|
|
1332
|
+
predictions = self._generate_predictions(user_input, context)
|
|
1333
|
+
|
|
1334
|
+
# Calculate confidence based on context completeness and trust
|
|
1335
|
+
confidence = self._calculate_confidence(context, current_trust)
|
|
1336
|
+
|
|
1337
|
+
# Update interaction tracking
|
|
1338
|
+
self.collaboration_state.total_interactions += 1
|
|
1339
|
+
self.current_empathy_level = level
|
|
1340
|
+
|
|
1341
|
+
return InteractionResponse(
|
|
1342
|
+
level=level,
|
|
1343
|
+
response=response_text,
|
|
1344
|
+
confidence=confidence,
|
|
1345
|
+
predictions=predictions,
|
|
1346
|
+
)
|
|
1347
|
+
|
|
1348
|
+
def record_success(self, success: bool) -> None:
|
|
1349
|
+
"""Record the outcome of an interaction for trust tracking.
|
|
1350
|
+
|
|
1351
|
+
Call this after receiving user feedback on whether an interaction
|
|
1352
|
+
was helpful. Updates the collaboration state's trust level.
|
|
1353
|
+
|
|
1354
|
+
Args:
|
|
1355
|
+
success: True if the interaction was helpful, False otherwise
|
|
1356
|
+
|
|
1357
|
+
Example:
|
|
1358
|
+
>>> response = empathy.interact(...)
|
|
1359
|
+
>>> # After getting user feedback
|
|
1360
|
+
>>> feedback = input("Was this helpful? (y/n): ")
|
|
1361
|
+
>>> empathy.record_success(success=(feedback.lower() == 'y'))
|
|
1362
|
+
>>> print(f"Trust level: {empathy.collaboration_state.trust_level:.0%}")
|
|
1363
|
+
|
|
1364
|
+
"""
|
|
1365
|
+
outcome = "success" if success else "failure"
|
|
1366
|
+
self.collaboration_state.update_trust(outcome)
|
|
1367
|
+
|
|
1368
|
+
self.logger.debug(
|
|
1369
|
+
f"Recorded interaction outcome: {outcome}",
|
|
1370
|
+
extra={
|
|
1371
|
+
"user_id": self.user_id,
|
|
1372
|
+
"success": success,
|
|
1373
|
+
"new_trust_level": self.collaboration_state.trust_level,
|
|
1374
|
+
},
|
|
1375
|
+
)
|
|
1376
|
+
|
|
1377
|
+
def _determine_interaction_level(self, trust: float, context: dict) -> int:
|
|
1378
|
+
"""Determine appropriate empathy level for interaction.
|
|
1379
|
+
|
|
1380
|
+
Args:
|
|
1381
|
+
trust: Current trust level (0.0 to 1.0)
|
|
1382
|
+
context: Interaction context
|
|
1383
|
+
|
|
1384
|
+
Returns:
|
|
1385
|
+
Empathy level (1-5)
|
|
1386
|
+
|
|
1387
|
+
"""
|
|
1388
|
+
# Start conservative, increase with trust
|
|
1389
|
+
if trust < 0.3:
|
|
1390
|
+
return 1 # Reactive only
|
|
1391
|
+
elif trust < 0.5:
|
|
1392
|
+
return 2 # Guided
|
|
1393
|
+
elif trust < 0.7:
|
|
1394
|
+
return min(3, self.target_level) # Proactive
|
|
1395
|
+
elif trust < 0.85:
|
|
1396
|
+
return min(4, self.target_level) # Anticipatory
|
|
1397
|
+
else:
|
|
1398
|
+
return min(5, self.target_level) # Systems
|
|
1399
|
+
|
|
1400
|
+
def _generate_response(self, user_input: str, context: dict, level: int) -> str:
|
|
1401
|
+
"""Generate response based on empathy level.
|
|
1402
|
+
|
|
1403
|
+
**Extension Point**: Override this method to implement domain-specific
|
|
1404
|
+
response generation (e.g., using LLMs, templates, or rule engines).
|
|
1405
|
+
|
|
1406
|
+
Args:
|
|
1407
|
+
user_input: User's input text
|
|
1408
|
+
context: Interaction context
|
|
1409
|
+
level: Empathy level to use
|
|
1410
|
+
|
|
1411
|
+
Returns:
|
|
1412
|
+
Response text
|
|
1413
|
+
|
|
1414
|
+
"""
|
|
1415
|
+
# Default implementation - override for real logic
|
|
1416
|
+
level_descriptions = {
|
|
1417
|
+
1: "Reactive response",
|
|
1418
|
+
2: "Guided response with clarification",
|
|
1419
|
+
3: "Proactive response anticipating needs",
|
|
1420
|
+
4: "Anticipatory response predicting future needs",
|
|
1421
|
+
5: "Systems-level response addressing root patterns",
|
|
1422
|
+
}
|
|
1423
|
+
return f"[Level {level}] {level_descriptions.get(level, 'Response')}: Processing '{user_input}'"
|
|
1424
|
+
|
|
1425
|
+
def _generate_predictions(self, user_input: str, context: dict) -> list[str]:
|
|
1426
|
+
"""Generate predictions for Level 4+ interactions.
|
|
1427
|
+
|
|
1428
|
+
**Extension Point**: Override to implement domain-specific prediction logic.
|
|
1429
|
+
|
|
1430
|
+
Args:
|
|
1431
|
+
user_input: User's input text
|
|
1432
|
+
context: Interaction context
|
|
1433
|
+
|
|
1434
|
+
Returns:
|
|
1435
|
+
List of prediction strings
|
|
1436
|
+
|
|
1437
|
+
"""
|
|
1438
|
+
# Default implementation - override for real predictions
|
|
1439
|
+
return ["Potential follow-up: Related topics may include..."]
|
|
1440
|
+
|
|
1441
|
+
def _calculate_confidence(self, context: dict, trust: float) -> float:
|
|
1442
|
+
"""Calculate confidence score for response.
|
|
1443
|
+
|
|
1444
|
+
Args:
|
|
1445
|
+
context: Interaction context
|
|
1446
|
+
trust: Current trust level
|
|
1447
|
+
|
|
1448
|
+
Returns:
|
|
1449
|
+
Confidence score (0.0 to 1.0)
|
|
1450
|
+
|
|
1451
|
+
"""
|
|
1452
|
+
# Base confidence on context completeness and trust
|
|
1453
|
+
context_score = min(1.0, len(context) * 0.1) if context else 0.5
|
|
1454
|
+
return (context_score + trust) / 2
|
|
1455
|
+
|
|
1266
1456
|
# =========================================================================
|
|
1267
1457
|
# SHORT-TERM MEMORY (Redis-backed Multi-Agent Coordination)
|
|
1268
1458
|
# =========================================================================
|
attune/dashboard/app.py
CHANGED
|
@@ -377,7 +377,9 @@ async def get_underperforming_stages(threshold: float = 0.7):
|
|
|
377
377
|
all_underperforming = []
|
|
378
378
|
|
|
379
379
|
for workflow in workflows:
|
|
380
|
-
underperforming = feedback.get_underperforming_stages(
|
|
380
|
+
underperforming = feedback.get_underperforming_stages(
|
|
381
|
+
workflow, quality_threshold=threshold
|
|
382
|
+
)
|
|
381
383
|
for stage_name, stats in underperforming:
|
|
382
384
|
all_underperforming.append(
|
|
383
385
|
{
|
|
@@ -472,7 +474,7 @@ async def websocket_endpoint(websocket: WebSocket):
|
|
|
472
474
|
try:
|
|
473
475
|
while True:
|
|
474
476
|
# Receive ping to keep connection alive
|
|
475
|
-
|
|
477
|
+
_ = await websocket.receive_text()
|
|
476
478
|
|
|
477
479
|
# Send updates (in production, this would stream from Redis)
|
|
478
480
|
coordinator = HeartbeatCoordinator()
|
|
@@ -378,7 +378,9 @@ class DashboardHandler(BaseHTTPRequestHandler):
|
|
|
378
378
|
all_underperforming = []
|
|
379
379
|
|
|
380
380
|
for workflow in workflows:
|
|
381
|
-
underperforming = feedback.get_underperforming_stages(
|
|
381
|
+
underperforming = feedback.get_underperforming_stages(
|
|
382
|
+
workflow, quality_threshold=threshold
|
|
383
|
+
)
|
|
382
384
|
for stage_name, stats in underperforming:
|
|
383
385
|
all_underperforming.append(
|
|
384
386
|
{
|
|
@@ -305,7 +305,11 @@ class StandaloneDashboardHandler(BaseHTTPRequestHandler):
|
|
|
305
305
|
|
|
306
306
|
result.append(
|
|
307
307
|
{
|
|
308
|
-
"event_id":
|
|
308
|
+
"event_id": (
|
|
309
|
+
entry_id.decode("utf-8")
|
|
310
|
+
if isinstance(entry_id, bytes)
|
|
311
|
+
else entry_id
|
|
312
|
+
),
|
|
309
313
|
"event_type": event_type,
|
|
310
314
|
"timestamp": timestamp,
|
|
311
315
|
"data": data,
|
|
@@ -423,7 +427,7 @@ class StandaloneDashboardHandler(BaseHTTPRequestHandler):
|
|
|
423
427
|
|
|
424
428
|
# Calculate stats
|
|
425
429
|
result = []
|
|
426
|
-
for
|
|
430
|
+
for _group_key, group in feedback_groups.items():
|
|
427
431
|
qualities = group["qualities"]
|
|
428
432
|
if qualities:
|
|
429
433
|
avg_quality = sum(qualities) / len(qualities)
|
|
@@ -476,7 +480,7 @@ class StandaloneDashboardHandler(BaseHTTPRequestHandler):
|
|
|
476
480
|
|
|
477
481
|
# Find underperforming stages
|
|
478
482
|
result = []
|
|
479
|
-
for
|
|
483
|
+
for _group_key, group in feedback_groups.items():
|
|
480
484
|
qualities = group["qualities"]
|
|
481
485
|
if qualities:
|
|
482
486
|
avg_quality = sum(qualities) / len(qualities)
|