deepagents-printshop 0.1.0__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (37) hide show
  1. agents/content_editor/__init__.py +1 -0
  2. agents/content_editor/agent.py +279 -0
  3. agents/content_editor/content_reviewer.py +327 -0
  4. agents/content_editor/versioned_agent.py +455 -0
  5. agents/latex_specialist/__init__.py +1 -0
  6. agents/latex_specialist/agent.py +531 -0
  7. agents/latex_specialist/latex_analyzer.py +510 -0
  8. agents/latex_specialist/latex_optimizer.py +1192 -0
  9. agents/qa_orchestrator/__init__.py +1 -0
  10. agents/qa_orchestrator/agent.py +603 -0
  11. agents/qa_orchestrator/langgraph_workflow.py +733 -0
  12. agents/qa_orchestrator/pipeline_types.py +72 -0
  13. agents/qa_orchestrator/quality_gates.py +495 -0
  14. agents/qa_orchestrator/workflow_coordinator.py +139 -0
  15. agents/research_agent/__init__.py +1 -0
  16. agents/research_agent/agent.py +258 -0
  17. agents/research_agent/llm_report_generator.py +1023 -0
  18. agents/research_agent/report_generator.py +536 -0
  19. agents/visual_qa/__init__.py +1 -0
  20. agents/visual_qa/agent.py +410 -0
  21. deepagents_printshop-0.1.0.dist-info/METADATA +744 -0
  22. deepagents_printshop-0.1.0.dist-info/RECORD +37 -0
  23. deepagents_printshop-0.1.0.dist-info/WHEEL +4 -0
  24. deepagents_printshop-0.1.0.dist-info/entry_points.txt +2 -0
  25. deepagents_printshop-0.1.0.dist-info/licenses/LICENSE +86 -0
  26. tools/__init__.py +1 -0
  27. tools/change_tracker.py +419 -0
  28. tools/content_type_loader.py +171 -0
  29. tools/graph_generator.py +281 -0
  30. tools/latex_generator.py +374 -0
  31. tools/llm_latex_generator.py +678 -0
  32. tools/magazine_layout.py +462 -0
  33. tools/pattern_injector.py +250 -0
  34. tools/pattern_learner.py +477 -0
  35. tools/pdf_compiler.py +386 -0
  36. tools/version_manager.py +346 -0
  37. tools/visual_qa.py +799 -0
@@ -0,0 +1,477 @@
1
+ """
2
+ Pattern Learner - Milestone 1
3
+
4
+ Mines version history to extract actionable patterns and insights.
5
+ Simple, read-only analysis that generates learned_patterns.json.
6
+ """
7
+
8
+ import json
9
+ from pathlib import Path
10
+ from typing import Dict, List, Tuple
11
+ from datetime import datetime
12
+ from collections import defaultdict
13
+
14
+
15
+ class PatternLearner:
16
+ """
17
+ Learn from version history to identify improvement patterns.
18
+
19
+ Milestone 1 Features:
20
+ - Extract common LaTeX fixes
21
+ - Track quality improvements
22
+ - Identify recurring issues
23
+ - Generate simple recommendations
24
+ """
25
+
26
+ def __init__(self, base_dir: str = "artifacts", document_type: str = "research_report"):
27
+ """
28
+ Initialize pattern learner.
29
+
30
+ Args:
31
+ base_dir: Base artifacts directory
32
+ document_type: Type of document (e.g., 'research_report', 'article', 'technical_doc')
33
+ """
34
+ self.base_dir = Path(base_dir)
35
+ self.version_dir = self.base_dir / "version_history"
36
+ self.changes_dir = self.version_dir / "changes"
37
+ self.reports_dir = self.base_dir / "agent_reports" / "quality"
38
+ self.document_type = document_type
39
+ self.output_dir = Path(".deepagents") / "memories" / document_type
40
+ self.output_dir.mkdir(parents=True, exist_ok=True)
41
+
42
+ def mine_patterns(self) -> Dict:
43
+ """
44
+ Mine all patterns from version history.
45
+
46
+ Returns:
47
+ Dictionary of learned patterns
48
+ """
49
+ print("šŸ” Mining version history for patterns...")
50
+ print("=" * 60)
51
+
52
+ patterns = {
53
+ "metadata": {
54
+ "generated_at": datetime.now().isoformat(),
55
+ "documents_analyzed": 0,
56
+ "transitions_analyzed": 0
57
+ },
58
+ "common_latex_fixes": {},
59
+ "quality_improvements": [],
60
+ "recurring_issues": [],
61
+ "agent_performance": {},
62
+ "insights": []
63
+ }
64
+
65
+ # 1. Load version manifest
66
+ manifest_path = self.version_dir / "version_manifest.json"
67
+ if not manifest_path.exists():
68
+ print("āš ļø No version manifest found - no history to analyze yet")
69
+ return patterns
70
+
71
+ with open(manifest_path, 'r') as f:
72
+ manifest = json.load(f)
73
+
74
+ patterns["metadata"]["documents_analyzed"] = len(manifest.get("versions", {}))
75
+
76
+ # 2. Analyze each change file
77
+ change_files = list(self.changes_dir.glob("*.json"))
78
+ patterns["metadata"]["transitions_analyzed"] = len(change_files)
79
+
80
+ print(f"šŸ“Š Found {len(change_files)} version transitions to analyze\n")
81
+
82
+ for change_file in change_files:
83
+ self._analyze_change_file(change_file, patterns)
84
+
85
+ # 3. Analyze quality reports from agent_reports/quality
86
+ if self.reports_dir.exists():
87
+ quality_reports = list(self.reports_dir.glob("*_latex_processing_report.md"))
88
+ print(f"šŸ“‘ Found {len(quality_reports)} quality reports to analyze\n")
89
+
90
+ for report_file in quality_reports:
91
+ self._analyze_quality_report(report_file, patterns)
92
+
93
+ # 4. Generate insights
94
+ self._generate_insights(patterns)
95
+
96
+ print("\nāœ… Pattern mining complete!")
97
+ return patterns
98
+
99
+ def _analyze_change_file(self, change_file: Path, patterns: Dict):
100
+ """Analyze a single change file for patterns."""
101
+ try:
102
+ with open(change_file, 'r') as f:
103
+ change_data = json.load(f)
104
+ except Exception as e:
105
+ print(f"āš ļø Could not read {change_file.name}: {e}")
106
+ return
107
+
108
+ # Extract LaTeX optimizations
109
+ if "optimizations_applied" in change_data:
110
+ optimizations = change_data["optimizations_applied"]
111
+
112
+ for opt in optimizations:
113
+ if opt not in patterns["common_latex_fixes"]:
114
+ patterns["common_latex_fixes"][opt] = {
115
+ "count": 0,
116
+ "scores_before": [],
117
+ "scores_after": []
118
+ }
119
+
120
+ patterns["common_latex_fixes"][opt]["count"] += 1
121
+
122
+ # Extract quality improvements
123
+ if "latex_analysis" in change_data:
124
+ latex_data = change_data["latex_analysis"]
125
+
126
+ # Track score improvements
127
+ if "overall_score" in latex_data:
128
+ parent_version = change_data.get("parent_version", "unknown")
129
+ target_version = change_data.get("target_version", "unknown")
130
+
131
+ improvement = {
132
+ "from_version": parent_version,
133
+ "to_version": target_version,
134
+ "score": latex_data["overall_score"],
135
+ "optimizations_count": change_data.get("optimization_results", {}).get("optimization_count", 0)
136
+ }
137
+ patterns["quality_improvements"].append(improvement)
138
+
139
+ # Track recurring issues
140
+ if "suggestions" in latex_data:
141
+ for suggestion in latex_data["suggestions"]:
142
+ if suggestion not in patterns["recurring_issues"]:
143
+ patterns["recurring_issues"].append(suggestion)
144
+
145
+ # Track agent performance
146
+ agent = change_data.get("version_created", {}).get("agent", "unknown")
147
+ if agent != "unknown":
148
+ if agent not in patterns["agent_performance"]:
149
+ patterns["agent_performance"][agent] = {
150
+ "versions_created": 0,
151
+ "avg_quality_score": [],
152
+ "optimizations_applied": 0
153
+ }
154
+
155
+ patterns["agent_performance"][agent]["versions_created"] += 1
156
+
157
+ if "latex_analysis" in change_data:
158
+ score = change_data["latex_analysis"].get("overall_score")
159
+ if score:
160
+ patterns["agent_performance"][agent]["avg_quality_score"].append(score)
161
+
162
+ def _analyze_quality_report(self, report_file: Path, patterns: Dict):
163
+ """Analyze a quality report markdown file for patterns."""
164
+ try:
165
+ with open(report_file, 'r', encoding='utf-8') as f:
166
+ content = f.read()
167
+ except Exception as e:
168
+ print(f"āš ļø Could not read {report_file.name}: {e}")
169
+ return
170
+
171
+ import re
172
+
173
+ # Extract version name from filename (e.g., v2_latex_optimized_latex_processing_report.md)
174
+ version_match = re.search(r'(v\d+_[^_]+(?:_[^_]+)?)', report_file.name)
175
+ version_name = version_match.group(1) if version_match else "unknown"
176
+
177
+ # Extract overall score (e.g., "| **Overall Score** | **89** | **100** |")
178
+ score_match = re.search(r'\*\*Overall Score\*\*.*?\*\*(\d+)\*\*', content)
179
+ if score_match:
180
+ score = int(score_match.group(1))
181
+
182
+ # Track quality improvement
183
+ improvement = {
184
+ "version": version_name,
185
+ "score": score,
186
+ "source": "quality_report"
187
+ }
188
+ patterns["quality_improvements"].append(improvement)
189
+
190
+ # Extract optimizations applied
191
+ # Pattern: ## Optimizations Applied (5 total)
192
+ # 1. Fixed multiple consecutive spaces
193
+ opt_section_match = re.search(r'## Optimizations Applied \((\d+) total\)\s+((?:\d+\. .+\n?)+)', content)
194
+ if opt_section_match:
195
+ opt_count = int(opt_section_match.group(1))
196
+ opt_text = opt_section_match.group(2)
197
+
198
+ # Parse each optimization line
199
+ for line in opt_text.strip().split('\n'):
200
+ line = line.strip()
201
+ if line and re.match(r'^\d+\.', line):
202
+ # Remove numbering (e.g., "1. Fixed ..." -> "Fixed ...")
203
+ opt = re.sub(r'^\d+\.\s*', '', line)
204
+
205
+ if opt not in patterns["common_latex_fixes"]:
206
+ patterns["common_latex_fixes"][opt] = {
207
+ "count": 0,
208
+ "versions": []
209
+ }
210
+
211
+ patterns["common_latex_fixes"][opt]["count"] += 1
212
+ patterns["common_latex_fixes"][opt]["versions"].append(version_name)
213
+
214
+ # Extract recommendations as recurring issues
215
+ # Pattern: ## Recommendations
216
+ # - Address 3 formatting warnings...
217
+ rec_match = re.search(r'## Recommendations\s+((?:- .+\n?)+)', content)
218
+ if rec_match:
219
+ rec_text = rec_match.group(1)
220
+ for line in rec_text.strip().split('\n'):
221
+ line = line.strip()
222
+ if line.startswith('- '):
223
+ recommendation = line[2:].strip() # Remove "- " prefix
224
+ if recommendation not in patterns["recurring_issues"]:
225
+ patterns["recurring_issues"].append(recommendation)
226
+
227
+ # Extract agent info
228
+ agent_match = re.search(r'\*\*Agent:\*\* (\w+)', content)
229
+ if agent_match and score_match:
230
+ agent = agent_match.group(1)
231
+ score = int(score_match.group(1))
232
+
233
+ if agent not in patterns["agent_performance"]:
234
+ patterns["agent_performance"][agent] = {
235
+ "versions_created": 0,
236
+ "avg_quality_score": [],
237
+ "optimizations_applied": 0
238
+ }
239
+
240
+ patterns["agent_performance"][agent]["versions_created"] += 1
241
+ patterns["agent_performance"][agent]["avg_quality_score"].append(score)
242
+
243
+ if opt_section_match:
244
+ patterns["agent_performance"][agent]["optimizations_applied"] += int(opt_section_match.group(1))
245
+
246
+ def _generate_insights(self, patterns: Dict):
247
+ """Generate actionable insights from patterns."""
248
+ insights = []
249
+
250
+ # Insight 1: Most common fixes
251
+ if patterns["common_latex_fixes"]:
252
+ sorted_fixes = sorted(
253
+ patterns["common_latex_fixes"].items(),
254
+ key=lambda x: x[1]["count"],
255
+ reverse=True
256
+ )
257
+
258
+ if sorted_fixes:
259
+ top_fix = sorted_fixes[0]
260
+ insights.append({
261
+ "type": "common_fix",
262
+ "title": "Most Common LaTeX Fix",
263
+ "description": f"'{top_fix[0]}' appears in {top_fix[1]['count']} documents",
264
+ "recommendation": "Consider applying this fix proactively in future documents"
265
+ })
266
+
267
+ # Insight 2: Average quality improvement
268
+ if patterns["quality_improvements"]:
269
+ scores = [imp["score"] for imp in patterns["quality_improvements"]]
270
+ avg_score = sum(scores) / len(scores)
271
+
272
+ insights.append({
273
+ "type": "quality_baseline",
274
+ "title": "Average LaTeX Quality Score",
275
+ "description": f"Average score across {len(scores)} optimizations: {avg_score:.1f}/100",
276
+ "recommendation": f"Target score of {avg_score + 5:.1f} for above-average quality"
277
+ })
278
+
279
+ # Insight 3: Agent effectiveness
280
+ for agent, perf in patterns["agent_performance"].items():
281
+ if perf["avg_quality_score"]:
282
+ avg = sum(perf["avg_quality_score"]) / len(perf["avg_quality_score"])
283
+
284
+ insights.append({
285
+ "type": "agent_performance",
286
+ "title": f"{agent.replace('_', ' ').title()} Performance",
287
+ "description": f"Processed {perf['versions_created']} versions with avg quality {avg:.1f}",
288
+ "recommendation": "Performance is stable" if avg > 85 else "Consider tuning quality thresholds"
289
+ })
290
+
291
+ # Insight 4: Recurring issues
292
+ unique_issues = len(patterns["recurring_issues"])
293
+ if unique_issues > 0:
294
+ insights.append({
295
+ "type": "recurring_issues",
296
+ "title": "Recurring Suggestions",
297
+ "description": f"Found {unique_issues} recurring suggestions across documents",
298
+ "recommendation": "Review these suggestions for proactive fixes"
299
+ })
300
+
301
+ patterns["insights"] = insights
302
+
303
+ def save_patterns(self, patterns: Dict, filename: str = "learned_patterns.json") -> str:
304
+ """
305
+ Save learned patterns to file.
306
+
307
+ Args:
308
+ patterns: Pattern dictionary
309
+ filename: Output filename
310
+
311
+ Returns:
312
+ Path to saved file
313
+ """
314
+ output_path = self.output_dir / filename
315
+
316
+ with open(output_path, 'w', encoding='utf-8') as f:
317
+ json.dump(patterns, f, indent=2, ensure_ascii=False)
318
+
319
+ print(f"\nšŸ’¾ Patterns saved to: {output_path}")
320
+ print(f"šŸ“ Document type: {self.document_type}")
321
+ return str(output_path)
322
+
323
+ def generate_report(self, patterns: Dict) -> str:
324
+ """
325
+ Generate human-readable pattern report.
326
+
327
+ Args:
328
+ patterns: Pattern dictionary
329
+
330
+ Returns:
331
+ Path to report file
332
+ """
333
+ report_path = self.output_dir / "pattern_learning_report.md"
334
+
335
+ report = f"""# Pattern Learning Report
336
+
337
+ **Generated:** {patterns['metadata']['generated_at']}
338
+
339
+ ## Overview
340
+
341
+ - **Documents Analyzed:** {patterns['metadata']['documents_analyzed']}
342
+ - **Version Transitions:** {patterns['metadata']['transitions_analyzed']}
343
+
344
+ ## Common LaTeX Fixes
345
+
346
+ The following fixes appear frequently across documents:
347
+
348
+ """
349
+
350
+ # Sort fixes by frequency
351
+ sorted_fixes = sorted(
352
+ patterns["common_latex_fixes"].items(),
353
+ key=lambda x: x[1]["count"],
354
+ reverse=True
355
+ )
356
+
357
+ for fix, data in sorted_fixes:
358
+ versions_str = ""
359
+ if "versions" in data and data["versions"]:
360
+ versions_str = f" (versions: {', '.join(data['versions'])})"
361
+ report += f"- **{fix}** - Applied {data['count']} times{versions_str}\n"
362
+
363
+ report += f"\n## Quality Improvements\n\n"
364
+ report += f"Total optimizations tracked: {len(patterns['quality_improvements'])}\n\n"
365
+
366
+ if patterns['quality_improvements']:
367
+ scores = [imp["score"] for imp in patterns["quality_improvements"]]
368
+ report += f"- Average LaTeX quality score: {sum(scores)/len(scores):.1f}/100\n"
369
+ report += f"- Highest score achieved: {max(scores)}/100\n"
370
+ report += f"- Lowest score achieved: {min(scores)}/100\n\n"
371
+
372
+ # Show version-by-version scores
373
+ report += f"**Score by Version:**\n\n"
374
+ for imp in patterns["quality_improvements"]:
375
+ report += f"- {imp.get('version', 'unknown')}: {imp['score']}/100\n"
376
+
377
+ report += f"\n## Agent Performance\n\n"
378
+
379
+ for agent, perf in patterns["agent_performance"].items():
380
+ report += f"### {agent.replace('_', ' ').title()}\n\n"
381
+ report += f"- Versions created: {perf['versions_created']}\n"
382
+
383
+ if perf['avg_quality_score']:
384
+ avg = sum(perf['avg_quality_score']) / len(perf['avg_quality_score'])
385
+ report += f"- Average quality score: {avg:.1f}/100\n"
386
+
387
+ if perf.get('optimizations_applied', 0) > 0:
388
+ report += f"- Total optimizations applied: {perf['optimizations_applied']}\n"
389
+
390
+ report += "\n"
391
+
392
+ report += f"## Recurring Issues\n\n"
393
+
394
+ if patterns["recurring_issues"]:
395
+ for issue in patterns["recurring_issues"]:
396
+ report += f"- {issue}\n"
397
+ else:
398
+ report += "*No recurring issues identified*\n"
399
+
400
+ report += f"\n## Key Insights\n\n"
401
+
402
+ for insight in patterns["insights"]:
403
+ report += f"### {insight['title']}\n\n"
404
+ report += f"**{insight['description']}**\n\n"
405
+ report += f"šŸ’” *Recommendation:* {insight['recommendation']}\n\n"
406
+
407
+ report += f"""
408
+ ---
409
+
410
+ *Generated by DeepAgents PrintShop Pattern Learner*
411
+ """
412
+
413
+ with open(report_path, 'w', encoding='utf-8') as f:
414
+ f.write(report)
415
+
416
+ print(f"šŸ“„ Report saved to: {report_path}")
417
+ return str(report_path)
418
+
419
+ def print_summary(self, patterns: Dict):
420
+ """Print a summary of learned patterns to console."""
421
+ print("\n" + "=" * 60)
422
+ print("šŸ“Š PATTERN LEARNING SUMMARY")
423
+ print("=" * 60)
424
+
425
+ print(f"\nšŸ“ˆ Analyzed: {patterns['metadata']['documents_analyzed']} documents, "
426
+ f"{patterns['metadata']['transitions_analyzed']} transitions")
427
+
428
+ print(f"\nšŸ”§ Common Fixes ({len(patterns['common_latex_fixes'])}):")
429
+ sorted_fixes = sorted(
430
+ patterns["common_latex_fixes"].items(),
431
+ key=lambda x: x[1]["count"],
432
+ reverse=True
433
+ )[:5] # Top 5
434
+
435
+ for fix, data in sorted_fixes:
436
+ print(f" • {fix}: {data['count']}x")
437
+
438
+ print(f"\nšŸ’” Key Insights ({len(patterns['insights'])}):")
439
+ for insight in patterns["insights"][:3]: # Top 3
440
+ print(f" • {insight['title']}")
441
+ print(f" → {insight['recommendation']}")
442
+
443
+
444
+ def main():
445
+ """Run pattern learning analysis."""
446
+ print("\n" + "=" * 60)
447
+ print("🧠 DeepAgents PrintShop - Pattern Learner")
448
+ print("=" * 60)
449
+ print("\nMilestone 3: Mining version history for improvement patterns\n")
450
+
451
+ # Initialize learner with document type
452
+ document_type = "research_report" # Default document type
453
+ learner = PatternLearner(document_type=document_type)
454
+ print(f"šŸ“„ Learning patterns for document type: {document_type}\n")
455
+
456
+ # Mine patterns
457
+ patterns = learner.mine_patterns()
458
+
459
+ # Save results
460
+ learner.save_patterns(patterns)
461
+ learner.generate_report(patterns)
462
+
463
+ # Print summary
464
+ learner.print_summary(patterns)
465
+
466
+ print("\n" + "=" * 60)
467
+ print("āœ… Pattern learning complete!")
468
+ print("=" * 60)
469
+ print("\nNext steps:")
470
+ print(f" 1. Review: .deepagents/memories/{document_type}/pattern_learning_report.md")
471
+ print(f" 2. Check: .deepagents/memories/{document_type}/learned_patterns.json")
472
+ print(" 3. Use insights to optimize future document generation")
473
+ print()
474
+
475
+
476
+ if __name__ == "__main__":
477
+ main()