deepagents-printshop 0.1.0__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (37) hide show
  1. agents/content_editor/__init__.py +1 -0
  2. agents/content_editor/agent.py +279 -0
  3. agents/content_editor/content_reviewer.py +327 -0
  4. agents/content_editor/versioned_agent.py +455 -0
  5. agents/latex_specialist/__init__.py +1 -0
  6. agents/latex_specialist/agent.py +531 -0
  7. agents/latex_specialist/latex_analyzer.py +510 -0
  8. agents/latex_specialist/latex_optimizer.py +1192 -0
  9. agents/qa_orchestrator/__init__.py +1 -0
  10. agents/qa_orchestrator/agent.py +603 -0
  11. agents/qa_orchestrator/langgraph_workflow.py +733 -0
  12. agents/qa_orchestrator/pipeline_types.py +72 -0
  13. agents/qa_orchestrator/quality_gates.py +495 -0
  14. agents/qa_orchestrator/workflow_coordinator.py +139 -0
  15. agents/research_agent/__init__.py +1 -0
  16. agents/research_agent/agent.py +258 -0
  17. agents/research_agent/llm_report_generator.py +1023 -0
  18. agents/research_agent/report_generator.py +536 -0
  19. agents/visual_qa/__init__.py +1 -0
  20. agents/visual_qa/agent.py +410 -0
  21. deepagents_printshop-0.1.0.dist-info/METADATA +744 -0
  22. deepagents_printshop-0.1.0.dist-info/RECORD +37 -0
  23. deepagents_printshop-0.1.0.dist-info/WHEEL +4 -0
  24. deepagents_printshop-0.1.0.dist-info/entry_points.txt +2 -0
  25. deepagents_printshop-0.1.0.dist-info/licenses/LICENSE +86 -0
  26. tools/__init__.py +1 -0
  27. tools/change_tracker.py +419 -0
  28. tools/content_type_loader.py +171 -0
  29. tools/graph_generator.py +281 -0
  30. tools/latex_generator.py +374 -0
  31. tools/llm_latex_generator.py +678 -0
  32. tools/magazine_layout.py +462 -0
  33. tools/pattern_injector.py +250 -0
  34. tools/pattern_learner.py +477 -0
  35. tools/pdf_compiler.py +386 -0
  36. tools/version_manager.py +346 -0
  37. tools/visual_qa.py +799 -0
@@ -0,0 +1 @@
1
+ """QA Orchestrator Agent for DeepAgents PrintShop."""
@@ -0,0 +1,603 @@
1
+ """
2
+ QA Orchestrator Agent
3
+
4
+ Master orchestration agent that coordinates all specialized QA agents in an automated pipeline
5
+ with quality gates, decision logic, and iterative improvement workflows.
6
+ """
7
+
8
+ import os
9
+ import sys
10
+ import json
11
+ import shutil
12
+ import uuid
13
+ from pathlib import Path
14
+ from typing import Dict, List, Optional
15
+ from datetime import datetime
16
+
17
+ # Add project root to path
18
+ project_root = Path(__file__).parent.parent.parent
19
+ sys.path.insert(0, str(project_root))
20
+
21
+ from agents.qa_orchestrator.quality_gates import QualityGateManager, QualityThresholds
22
+ from agents.qa_orchestrator.workflow_coordinator import WorkflowCoordinator
23
+ from agents.qa_orchestrator.pipeline_types import AgentResult
24
+ from agents.qa_orchestrator.langgraph_workflow import compile_qa_pipeline
25
+ from tools.version_manager import VersionManager
26
+
27
+
28
+ class QAOrchestratorAgent:
29
+ """
30
+ QA Orchestrator Agent - The master coordinator for multi-agent quality assurance.
31
+
32
+ Features:
33
+ - Multi-agent workflow orchestration
34
+ - Quality gate enforcement
35
+ - Iterative improvement cycles
36
+ - Intelligent escalation logic
37
+ - Comprehensive reporting
38
+ """
39
+
40
+ def __init__(self, memory_dir: str = ".deepagents/qa_orchestrator/memories",
41
+ content_source: str = "research_report"):
42
+ """
43
+ Initialize the QA orchestrator agent.
44
+
45
+ Args:
46
+ memory_dir: Directory for storing agent memories
47
+ content_source: Content source folder (e.g., 'research_report', 'magazine')
48
+ """
49
+ self.memory_dir = Path(memory_dir)
50
+ self.memory_dir.mkdir(parents=True, exist_ok=True)
51
+ self.content_source = content_source
52
+
53
+ # Initialize components
54
+ self.workflow_coordinator = WorkflowCoordinator(content_source=content_source)
55
+ self.quality_gate_manager = QualityGateManager()
56
+ self.version_manager = VersionManager()
57
+
58
+ # Paths
59
+ self.reports_dir = Path("artifacts/agent_reports/orchestration")
60
+ self.reports_dir.mkdir(parents=True, exist_ok=True)
61
+
62
+ # Initialize memory
63
+ self.init_memory()
64
+
65
+ def init_memory(self):
66
+ """Initialize agent memory files."""
67
+ memory_files = {
68
+ "quality_thresholds.md": """# Quality Thresholds and Gates
69
+
70
+ ## Content Quality Standards
71
+ - **Minimum Acceptable**: 80/100 (good baseline quality)
72
+ - **Good Quality**: 85/100 (professional standard)
73
+ - **Excellent Quality**: 90/100 (publication ready)
74
+
75
+ ## LaTeX Quality Standards
76
+ - **Minimum Acceptable**: 85/100 (professional formatting)
77
+ - **Good Quality**: 90/100 (high-quality typography)
78
+ - **Excellent Quality**: 95/100 (publication perfect)
79
+
80
+ ## Component Thresholds (out of 25 each)
81
+ - **Structure Minimum**: 22/25 (well-organized document)
82
+ - **Typography Minimum**: 20/25 (professional appearance)
83
+ - **Tables/Figures Minimum**: 20/25 (clear presentation)
84
+ - **Best Practices Minimum**: 20/25 (standards compliance)
85
+
86
+ ## Workflow Control
87
+ - **Overall Target**: 85/100 (combined quality threshold)
88
+ - **Human Handoff**: 90/100 (ready for human review)
89
+ - **Maximum Iterations**: 3 cycles before escalation
90
+ - **Convergence Threshold**: <2 points improvement = plateau
91
+
92
+ ## Escalation Rules
93
+ - Quality plateau reached after maximum iterations
94
+ - Persistent agent failures
95
+ - Quality regression detected
96
+ - Time limits exceeded
97
+ """,
98
+ "workflow_patterns.md": """# Successful Workflow Patterns
99
+
100
+ ## Standard Success Pattern
101
+ 1. **Content Review**: v0 → v1_content_edited (target: 80+)
102
+ 2. **LaTeX Optimization**: v1 → v2_latex_optimized (target: 85+)
103
+ 3. **Quality Assessment**: Overall score 85+ → Human handoff
104
+
105
+ ## Iteration Patterns
106
+ ### Content Quality Issues
107
+ - Run additional content editor cycles
108
+ - Focus on grammar, readability, structure
109
+ - Target: Consistent 5+ point improvements
110
+
111
+ ### LaTeX Quality Issues
112
+ - Re-run LaTeX specialist with higher optimization
113
+ - Address structure, typography, formatting
114
+ - Target: Professional formatting standards
115
+
116
+ ### Combined Quality Issues
117
+ - Full pipeline iteration
118
+ - Coordinate improvements across agents
119
+ - Balance content quality vs. formatting quality
120
+
121
+ ## Escalation Patterns
122
+ ### Plateau Detection
123
+ - <2 points improvement over 2 iterations
124
+ - Quality good enough (85+) but not excellent
125
+ - → Human review recommended
126
+
127
+ ### Persistent Failures
128
+ - Agent failures after retry attempts
129
+ - Quality regression between iterations
130
+ - → Human intervention required
131
+
132
+ ### Time/Resource Limits
133
+ - Maximum iterations reached (3 cycles)
134
+ - Processing time exceeds limits
135
+ - → Escalate with current best quality
136
+ """,
137
+ "escalation_rules.md": """# Human Escalation Decision Rules
138
+
139
+ ## Automatic Escalation Triggers
140
+ 1. **Quality Achievement**: Score ≥90 → Immediate handoff
141
+ 2. **Target Achievement**: Score ≥85 at iteration limit → Handoff
142
+ 3. **Plateau Detection**: <2 point improvement → Review needed
143
+ 4. **Agent Failures**: Persistent processing failures → Intervention
144
+
145
+ ## Escalation Types
146
+ ### Success Escalation (Quality Achieved)
147
+ - Quality meets or exceeds targets
148
+ - Ready for human review and approval
149
+ - Low priority for human reviewer
150
+
151
+ ### Plateau Escalation (Good but Stuck)
152
+ - Quality acceptable but not improving
153
+ - May benefit from human insight
154
+ - Medium priority for human reviewer
155
+
156
+ ### Failure Escalation (Issues Detected)
157
+ - Persistent quality issues
158
+ - Agent processing failures
159
+ - Technical problems detected
160
+ - High priority for human intervention
161
+
162
+ ## Escalation Information Package
163
+ - Complete quality progression report
164
+ - Version history and change tracking
165
+ - Agent performance analysis
166
+ - Specific issues and recommendations
167
+ - Estimated time for human review
168
+ """,
169
+ "pipeline_optimization.md": """# Pipeline Optimization Strategies
170
+
171
+ ## Agent Sequencing
172
+ - **Content First**: Always start with content quality
173
+ - **LaTeX Second**: Build on solid content foundation
174
+ - **Quality Gates**: Enforce thresholds at each step
175
+
176
+ ## Iteration Strategies
177
+ ### Focused Iteration
178
+ - Target specific agent when below threshold
179
+ - More efficient than full pipeline re-run
180
+ - Good for isolated quality issues
181
+
182
+ ### Full Pipeline Iteration
183
+ - Re-run complete workflow
184
+ - Better for systematic quality issues
185
+ - Necessary when agents are interdependent
186
+
187
+ ### Adaptive Thresholds
188
+ - Lower thresholds for difficult content
189
+ - Higher thresholds for simple content
190
+ - Context-aware quality expectations
191
+
192
+ ## Performance Optimization
193
+ - **Version Reuse**: Skip processing if good version exists
194
+ - **Incremental Processing**: Only process changed content
195
+ - **Parallel Execution**: Run independent analyses simultaneously
196
+ - **Resource Management**: Balance quality vs. processing time
197
+
198
+ ## Quality vs. Efficiency Trade-offs
199
+ - **Fast Track**: Single iteration for time-sensitive content
200
+ - **Standard Track**: 2-3 iterations for normal content
201
+ - **Premium Track**: Unlimited iterations for critical content
202
+ """
203
+ }
204
+
205
+ for filename, content in memory_files.items():
206
+ file_path = self.memory_dir / filename
207
+ if not file_path.exists():
208
+ with open(file_path, 'w', encoding='utf-8') as f:
209
+ f.write(content)
210
+
211
+ def _purge_intermediate_artifacts(self):
212
+ """Remove intermediate artifacts from previous runs.
213
+
214
+ Clears reviewed_content/, version_history/, and agent_reports/ while
215
+ preserving .gitkeep files. Sample content and .deepagents memory are
216
+ left untouched.
217
+ """
218
+ dirs_to_purge = [
219
+ Path("artifacts/reviewed_content"),
220
+ Path("artifacts/version_history"),
221
+ Path("artifacts/agent_reports"),
222
+ ]
223
+
224
+ for dir_path in dirs_to_purge:
225
+ if not dir_path.exists():
226
+ continue
227
+ for child in dir_path.iterdir():
228
+ if child.name == ".gitkeep":
229
+ continue
230
+ if child.is_symlink():
231
+ child.unlink()
232
+ elif child.is_dir():
233
+ shutil.rmtree(child)
234
+ else:
235
+ child.unlink()
236
+ print(f" Purged {dir_path}/")
237
+
238
+ # Re-create report subdirectories
239
+ self.reports_dir.mkdir(parents=True, exist_ok=True)
240
+ Path("artifacts/agent_reports/quality").mkdir(parents=True, exist_ok=True)
241
+
242
+ def orchestrate_qa_pipeline(self,
243
+ starting_version: str = "v0_original",
244
+ workflow_id: Optional[str] = None,
245
+ quality_thresholds: Optional[QualityThresholds] = None,
246
+ output_dir: Optional[str] = None) -> Dict:
247
+ """
248
+ Orchestrate complete QA pipeline from start to finish using LangGraph.
249
+
250
+ Args:
251
+ starting_version: Version to start the pipeline from
252
+ workflow_id: Unique identifier for this workflow (auto-generated if None)
253
+ quality_thresholds: Custom quality thresholds (uses defaults if None)
254
+ output_dir: Output directory for artifacts (defaults to artifacts/output/{workflow_id})
255
+
256
+ Returns:
257
+ Complete pipeline execution results
258
+ """
259
+ # Generate workflow ID if not provided
260
+ if workflow_id is None:
261
+ import uuid as _uuid
262
+ workflow_id = _uuid.uuid4().hex[:8]
263
+
264
+ # Resolve output directory
265
+ if output_dir is None:
266
+ output_dir = f"artifacts/output/{workflow_id}"
267
+
268
+ print(f"QA ORCHESTRATOR: Starting Pipeline {workflow_id} (LangGraph)")
269
+ print(f"Output directory: {output_dir}")
270
+ print("=" * 70)
271
+
272
+ # Purge intermediate artifacts from previous runs
273
+ self._purge_intermediate_artifacts()
274
+
275
+ # Update quality thresholds if provided
276
+ if quality_thresholds:
277
+ self.quality_gate_manager.thresholds = quality_thresholds
278
+
279
+ # Build and run LangGraph pipeline
280
+ app = compile_qa_pipeline()
281
+
282
+ initial_state = {
283
+ "workflow_id": workflow_id,
284
+ "content_source": self.content_source,
285
+ "starting_version": starting_version,
286
+ "current_version": starting_version,
287
+ "current_stage": "initialization",
288
+ "iterations_completed": 0,
289
+ "success": False,
290
+ "human_handoff": False,
291
+ "escalated": False,
292
+ "start_time": datetime.now().isoformat(),
293
+ "end_time": None,
294
+ "total_processing_time": None,
295
+ "agent_results": [],
296
+ "quality_assessments": [],
297
+ "quality_evaluations": [],
298
+ "output_dir": output_dir,
299
+ "agent_context": {},
300
+ }
301
+
302
+ final_state = app.invoke(
303
+ initial_state,
304
+ config={
305
+ "configurable": {"thread_id": workflow_id},
306
+ "recursion_limit": 30,
307
+ },
308
+ )
309
+
310
+ # Compile results directly from PipelineState
311
+ results = self.compile_pipeline_results(final_state)
312
+
313
+ # Save pipeline report
314
+ self.save_pipeline_report(results, workflow_id)
315
+
316
+ print("=" * 70)
317
+ print(f"QA ORCHESTRATOR: Pipeline {workflow_id} Complete")
318
+ self.print_pipeline_summary(results)
319
+
320
+ return results
321
+
322
+ def compile_pipeline_results(self, state: Dict) -> Dict:
323
+ """
324
+ Compile comprehensive pipeline results from PipelineState dict.
325
+
326
+ Args:
327
+ state: Final PipelineState dict from LangGraph.
328
+
329
+ Returns:
330
+ Complete pipeline results dictionary.
331
+ """
332
+ # Reconstruct AgentResults for performance analysis
333
+ agent_results = [AgentResult.from_dict(r) for r in state.get("agent_results", [])]
334
+
335
+ # Get workflow summary from coordinator (reads state dict directly)
336
+ workflow_summary = self.workflow_coordinator.get_workflow_summary(state)
337
+
338
+ # Quality data (already dicts in state)
339
+ quality_assessments = state.get("quality_assessments", [])
340
+ quality_evaluations = state.get("quality_evaluations", [])
341
+ final_assessment = quality_assessments[-1] if quality_assessments else None
342
+
343
+ # Compute timing
344
+ total_processing_time = state.get("total_processing_time")
345
+ if total_processing_time is None and state.get("end_time") and state.get("start_time"):
346
+ try:
347
+ start = datetime.fromisoformat(state["start_time"])
348
+ end = datetime.fromisoformat(state["end_time"])
349
+ total_processing_time = (end - start).total_seconds()
350
+ except (ValueError, TypeError):
351
+ total_processing_time = 0.0
352
+
353
+ # Version data
354
+ version_stats = self.version_manager.get_version_stats()
355
+ final_version = state.get("current_version")
356
+
357
+ results = {
358
+ "pipeline_metadata": {
359
+ "workflow_id": state.get("workflow_id", ""),
360
+ "execution_timestamp": state.get("start_time"),
361
+ "orchestrator_version": "2.0",
362
+ "pipeline_duration": total_processing_time or 0.0,
363
+ "output_dir": state.get("output_dir"),
364
+ },
365
+ "workflow_execution": workflow_summary,
366
+ "quality_results": {
367
+ "final_assessment": final_assessment,
368
+ "quality_progression": quality_assessments,
369
+ "gate_evaluations": quality_evaluations,
370
+ "quality_thresholds": self.quality_gate_manager.thresholds.__dict__,
371
+ },
372
+ "version_management": {
373
+ "starting_version": agent_results[0].version_created if agent_results else None,
374
+ "final_version": final_version,
375
+ "versions_created": [r.version_created for r in agent_results if r.success],
376
+ "version_lineage": self.version_manager.get_version_lineage(final_version) if final_version else [],
377
+ "repository_stats": version_stats,
378
+ },
379
+ "agent_performance": {
380
+ "agents_executed": len(agent_results),
381
+ "successful_executions": len([r for r in agent_results if r.success]),
382
+ "total_processing_time": sum(r.processing_time for r in agent_results),
383
+ "average_processing_time": (
384
+ sum(r.processing_time for r in agent_results) / len(agent_results)
385
+ if agent_results else 0
386
+ ),
387
+ "agent_details": [r.to_dict() for r in agent_results],
388
+ },
389
+ "pipeline_outcome": {
390
+ "success": state.get("success", False),
391
+ "human_handoff": state.get("human_handoff", False),
392
+ "escalated": state.get("escalated", False),
393
+ "iterations_completed": state.get("iterations_completed", 0),
394
+ "final_stage": state.get("current_stage", "unknown"),
395
+ "ready_for_review": state.get("human_handoff", False) or state.get("escalated", False),
396
+ },
397
+ }
398
+
399
+ return results
400
+
401
+ def save_pipeline_report(self, results: Dict, workflow_id: str):
402
+ """Save comprehensive pipeline report."""
403
+ # JSON report
404
+ json_path = self.reports_dir / f"{workflow_id}_pipeline_report.json"
405
+ with open(json_path, 'w', encoding='utf-8') as f:
406
+ json.dump(results, f, indent=2, ensure_ascii=False, default=str)
407
+
408
+ # Markdown report
409
+ md_path = self.reports_dir / f"{workflow_id}_pipeline_summary.md"
410
+ self.create_pipeline_markdown(results, md_path)
411
+
412
+ def create_pipeline_markdown(self, results: Dict, output_path: Path):
413
+ """Create human-readable pipeline summary."""
414
+ metadata = results["pipeline_metadata"]
415
+ workflow = results["workflow_execution"]
416
+ quality = results["quality_results"]
417
+ outcome = results["pipeline_outcome"]
418
+
419
+ content = f"""# QA Pipeline Report: {metadata["workflow_id"]}
420
+
421
+ **Generated:** {metadata["execution_timestamp"]}
422
+ **Pipeline Duration:** {metadata["pipeline_duration"]:.2f} seconds
423
+ **Orchestrator Version:** {metadata["orchestrator_version"]}
424
+
425
+ ## Pipeline Outcome
426
+
427
+ | Metric | Result |
428
+ |--------|--------|
429
+ | **Success** | {'Yes' if outcome['success'] else 'No'} |
430
+ | **Human Handoff** | {'Ready' if outcome['human_handoff'] else 'Not Ready'} |
431
+ | **Escalated** | {'Yes' if outcome['escalated'] else 'No'} |
432
+ | **Iterations** | {outcome['iterations_completed']} |
433
+ | **Final Stage** | {outcome['final_stage']} |
434
+
435
+ ## Quality Results
436
+
437
+ """
438
+
439
+ final_assessment = quality["final_assessment"]
440
+ if final_assessment:
441
+ content += f"""### Final Quality Assessment
442
+ - **Overall Score:** {final_assessment.get('overall_score', 'N/A')}/100
443
+ - **Content Score:** {final_assessment.get('content_score', 'N/A')}/100
444
+ - **LaTeX Score:** {final_assessment.get('latex_score', 'N/A')}/100
445
+
446
+ ### LaTeX Component Scores
447
+ - **Structure:** {final_assessment.get('latex_structure', 'N/A')}/25
448
+ - **Typography:** {final_assessment.get('latex_typography', 'N/A')}/25
449
+ - **Tables/Figures:** {final_assessment.get('latex_tables_figures', 'N/A')}/25
450
+ - **Best Practices:** {final_assessment.get('latex_best_practices', 'N/A')}/25
451
+
452
+ """
453
+
454
+ # Quality progression
455
+ quality_progression = quality["quality_progression"]
456
+ if quality_progression:
457
+ content += f"""### Quality Progression
458
+ | Iteration | Content Score | LaTeX Score | Overall Score |
459
+ |-----------|---------------|-------------|---------------|
460
+ """
461
+ for i, assessment in enumerate(quality_progression, 1):
462
+ content_score = assessment.get('content_score', 'N/A')
463
+ latex_score = assessment.get('latex_score', 'N/A')
464
+ overall_score = assessment.get('overall_score', 'N/A')
465
+ content += f"| {i} | {content_score} | {latex_score} | {overall_score} |\n"
466
+
467
+ content += f"""
468
+ ## Version Management
469
+
470
+ ### Version Progression
471
+ - **Starting Version:** {results["version_management"]["starting_version"]}
472
+ - **Final Version:** {results["version_management"]["final_version"]}
473
+ - **Versions Created:** {len(results["version_management"]["versions_created"])}
474
+
475
+ ### Version Lineage
476
+ {' -> '.join(results["version_management"]["version_lineage"])}
477
+
478
+ ## Agent Performance
479
+
480
+ ### Execution Summary
481
+ - **Agents Executed:** {results["agent_performance"]["agents_executed"]}
482
+ - **Successful Executions:** {results["agent_performance"]["successful_executions"]}
483
+ - **Total Processing Time:** {results["agent_performance"]["total_processing_time"]:.2f}s
484
+ - **Average Processing Time:** {results["agent_performance"]["average_processing_time"]:.2f}s
485
+
486
+ ### Agent Details
487
+ """
488
+
489
+ for agent_detail in results["agent_performance"]["agent_details"]:
490
+ agent_type = agent_detail["agent_type"]
491
+ success = "Yes" if agent_detail["success"] else "No"
492
+ quality_score = agent_detail.get("quality_score", "N/A")
493
+ processing_time = agent_detail["processing_time"]
494
+
495
+ content += f"""
496
+ #### {agent_type}
497
+ - **Status:** {success}
498
+ - **Quality Score:** {quality_score}
499
+ - **Processing Time:** {processing_time:.2f}s
500
+ - **Version Created:** {agent_detail["version_created"]}
501
+ """
502
+
503
+ content += f"""
504
+ ## Recommendations
505
+
506
+ """
507
+
508
+ if outcome["success"] and outcome["human_handoff"]:
509
+ content += "**Pipeline Success**: Quality targets achieved. Ready for human review and approval.\n\n"
510
+ elif outcome["escalated"]:
511
+ content += "**Human Intervention**: Pipeline escalated. Review required for quality issues or plateau.\n\n"
512
+ else:
513
+ content += "**Pipeline Issues**: Processing incomplete. Check agent logs for details.\n\n"
514
+
515
+ # Add specific recommendations based on final quality
516
+ if final_assessment:
517
+ overall_score = final_assessment.get('overall_score', 0)
518
+ if overall_score >= 90:
519
+ content += "**Excellent Quality**: Document exceeds publication standards.\n"
520
+ elif overall_score >= 85:
521
+ content += "**Good Quality**: Document meets professional standards.\n"
522
+ elif overall_score >= 80:
523
+ content += "**Acceptable Quality**: Document may benefit from additional review.\n"
524
+ else:
525
+ content += "**Quality Concerns**: Document requires significant improvement.\n"
526
+
527
+ content += f"""
528
+ ## Next Steps
529
+
530
+ {'1. **Human Review**: Proceed with human reviewer approval process' if outcome['ready_for_review'] else '1. **Technical Review**: Address pipeline issues before proceeding'}
531
+ 2. **Version Management**: Final version `{results["version_management"]["final_version"]}` ready for use
532
+ 3. **Quality Tracking**: Monitor quality metrics for future improvements
533
+ """
534
+
535
+ with open(output_path, 'w', encoding='utf-8') as f:
536
+ f.write(content)
537
+
538
+ def print_pipeline_summary(self, results: Dict):
539
+ """Print concise pipeline summary to console."""
540
+ outcome = results["pipeline_outcome"]
541
+ final_assessment = results["quality_results"]["final_assessment"]
542
+
543
+ print(f"PIPELINE SUMMARY")
544
+ print(f" Status: {'Success' if outcome['success'] else 'Escalated' if outcome['escalated'] else 'Failed'}")
545
+ print(f" Iterations: {outcome['iterations_completed']}")
546
+ print(f" Final Version: {results['version_management']['final_version']}")
547
+
548
+ if final_assessment:
549
+ overall_score = final_assessment.get('overall_score', 0)
550
+ content_score = final_assessment.get('content_score', 0)
551
+ latex_score = final_assessment.get('latex_score', 0)
552
+ print(f" Quality Scores: Overall {overall_score}, Content {content_score}, LaTeX {latex_score}")
553
+
554
+ print(f" Human Handoff: {'Ready' if outcome['human_handoff'] else 'Not Ready'}")
555
+ if results["pipeline_metadata"].get("output_dir"):
556
+ print(f" Output Dir: {results['pipeline_metadata']['output_dir']}")
557
+ print(f" Report: {self.reports_dir}/{results['pipeline_metadata']['workflow_id']}_pipeline_summary.md")
558
+
559
+
560
+ def main():
561
+ """Run the QA orchestrator agent."""
562
+ import argparse
563
+
564
+ parser = argparse.ArgumentParser(description='QA Orchestrator Agent')
565
+ parser.add_argument(
566
+ '--content', '-c',
567
+ default='research_report',
568
+ help='Content source folder (e.g., research_report, magazine)'
569
+ )
570
+ args = parser.parse_args()
571
+
572
+ content_source = args.content
573
+ run_id = uuid.uuid4().hex[:8]
574
+ output_dir = f"artifacts/output/{run_id}"
575
+
576
+ print("[*] Starting QA Orchestrator Agent")
577
+ print("=" * 70)
578
+ print(f"Content source: {content_source}")
579
+ print(f"Run ID: {run_id}")
580
+ print(f"Output directory: {output_dir}")
581
+
582
+ # Initialize agent with content source
583
+ agent = QAOrchestratorAgent(content_source=content_source)
584
+
585
+ try:
586
+ # Run complete QA pipeline
587
+ results = agent.orchestrate_qa_pipeline(
588
+ starting_version="v0_original",
589
+ workflow_id=run_id,
590
+ output_dir=output_dir,
591
+ )
592
+
593
+ print("\n" + "=" * 70)
594
+ print(f"QA Orchestration complete! Output: {output_dir}")
595
+
596
+ except Exception as e:
597
+ print(f"Error: {e}")
598
+ import traceback
599
+ traceback.print_exc()
600
+
601
+
602
+ if __name__ == "__main__":
603
+ main()