deepagents-printshop 0.1.0__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- agents/content_editor/__init__.py +1 -0
- agents/content_editor/agent.py +279 -0
- agents/content_editor/content_reviewer.py +327 -0
- agents/content_editor/versioned_agent.py +455 -0
- agents/latex_specialist/__init__.py +1 -0
- agents/latex_specialist/agent.py +531 -0
- agents/latex_specialist/latex_analyzer.py +510 -0
- agents/latex_specialist/latex_optimizer.py +1192 -0
- agents/qa_orchestrator/__init__.py +1 -0
- agents/qa_orchestrator/agent.py +603 -0
- agents/qa_orchestrator/langgraph_workflow.py +733 -0
- agents/qa_orchestrator/pipeline_types.py +72 -0
- agents/qa_orchestrator/quality_gates.py +495 -0
- agents/qa_orchestrator/workflow_coordinator.py +139 -0
- agents/research_agent/__init__.py +1 -0
- agents/research_agent/agent.py +258 -0
- agents/research_agent/llm_report_generator.py +1023 -0
- agents/research_agent/report_generator.py +536 -0
- agents/visual_qa/__init__.py +1 -0
- agents/visual_qa/agent.py +410 -0
- deepagents_printshop-0.1.0.dist-info/METADATA +744 -0
- deepagents_printshop-0.1.0.dist-info/RECORD +37 -0
- deepagents_printshop-0.1.0.dist-info/WHEEL +4 -0
- deepagents_printshop-0.1.0.dist-info/entry_points.txt +2 -0
- deepagents_printshop-0.1.0.dist-info/licenses/LICENSE +86 -0
- tools/__init__.py +1 -0
- tools/change_tracker.py +419 -0
- tools/content_type_loader.py +171 -0
- tools/graph_generator.py +281 -0
- tools/latex_generator.py +374 -0
- tools/llm_latex_generator.py +678 -0
- tools/magazine_layout.py +462 -0
- tools/pattern_injector.py +250 -0
- tools/pattern_learner.py +477 -0
- tools/pdf_compiler.py +386 -0
- tools/version_manager.py +346 -0
- tools/visual_qa.py +799 -0
|
@@ -0,0 +1,171 @@
|
|
|
1
|
+
"""
|
|
2
|
+
Content Type Loader
|
|
3
|
+
|
|
4
|
+
Loads content type definitions from content_types/{type_id}/type.md files.
|
|
5
|
+
Provides structured metadata for DocumentConfig and raw markdown for LLM prompts.
|
|
6
|
+
"""
|
|
7
|
+
|
|
8
|
+
import logging
|
|
9
|
+
import re
|
|
10
|
+
from dataclasses import dataclass, field
|
|
11
|
+
from pathlib import Path
|
|
12
|
+
from typing import List, Optional
|
|
13
|
+
|
|
14
|
+
logger = logging.getLogger(__name__)
|
|
15
|
+
|
|
16
|
+
|
|
17
|
+
@dataclass
|
|
18
|
+
class ContentTypeDefinition:
|
|
19
|
+
"""A loaded content type definition."""
|
|
20
|
+
type_id: str
|
|
21
|
+
type_md_content: str # Full type.md — goes to LLM as-is
|
|
22
|
+
document_class: str # Extracted for DocumentConfig
|
|
23
|
+
default_font_size: str # Extracted for DocumentConfig
|
|
24
|
+
default_paper_size: str # Extracted for DocumentConfig
|
|
25
|
+
|
|
26
|
+
@property
|
|
27
|
+
def rendering_instructions(self) -> str:
|
|
28
|
+
"""Extract the ## Rendering Instructions section text."""
|
|
29
|
+
if not self.type_md_content:
|
|
30
|
+
return ""
|
|
31
|
+
match = re.search(
|
|
32
|
+
r'## Rendering Instructions\s*\n(.*?)(?=\n## |\Z)',
|
|
33
|
+
self.type_md_content,
|
|
34
|
+
re.DOTALL
|
|
35
|
+
)
|
|
36
|
+
return match.group(1).strip() if match else ""
|
|
37
|
+
|
|
38
|
+
@property
|
|
39
|
+
def latex_preamble_blocks(self) -> List[str]:
|
|
40
|
+
"""Extract all ```latex code blocks from the ## LaTeX Requirements section."""
|
|
41
|
+
if not self.type_md_content:
|
|
42
|
+
return []
|
|
43
|
+
section_match = re.search(
|
|
44
|
+
r'## LaTeX Requirements\s*\n(.*?)(?=\n## |\Z)',
|
|
45
|
+
self.type_md_content,
|
|
46
|
+
re.DOTALL
|
|
47
|
+
)
|
|
48
|
+
if not section_match:
|
|
49
|
+
return []
|
|
50
|
+
section_text = section_match.group(1)
|
|
51
|
+
return re.findall(r'```latex\s*\n(.*?)```', section_text, re.DOTALL)
|
|
52
|
+
|
|
53
|
+
@property
|
|
54
|
+
def structure_rules(self) -> str:
|
|
55
|
+
"""Extract the ## Structure Rules section text."""
|
|
56
|
+
if not self.type_md_content:
|
|
57
|
+
return ""
|
|
58
|
+
match = re.search(
|
|
59
|
+
r'## Structure Rules\s*\n(.*?)(?=\n## |\Z)',
|
|
60
|
+
self.type_md_content,
|
|
61
|
+
re.DOTALL
|
|
62
|
+
)
|
|
63
|
+
return match.group(1).strip() if match else ""
|
|
64
|
+
|
|
65
|
+
|
|
66
|
+
class ContentTypeLoader:
|
|
67
|
+
"""
|
|
68
|
+
Loads content type definitions from content_types/ directory.
|
|
69
|
+
|
|
70
|
+
Each content type is a directory containing a type.md file with:
|
|
71
|
+
- Type Metadata section with structured fields
|
|
72
|
+
- Rendering Instructions for LLM consumption
|
|
73
|
+
- LaTeX Requirements for package/preamble info
|
|
74
|
+
- Structure Rules for compilation constraints
|
|
75
|
+
"""
|
|
76
|
+
|
|
77
|
+
def __init__(self, types_dir: Optional[str] = None):
|
|
78
|
+
if types_dir is not None:
|
|
79
|
+
self.types_dir = Path(types_dir)
|
|
80
|
+
else:
|
|
81
|
+
# Resolve from this file's location so it works regardless of CWD
|
|
82
|
+
self.types_dir = Path(__file__).parent.parent / "content_types"
|
|
83
|
+
|
|
84
|
+
def load_type(self, type_id: str) -> ContentTypeDefinition:
|
|
85
|
+
"""
|
|
86
|
+
Load a content type definition by ID.
|
|
87
|
+
|
|
88
|
+
Args:
|
|
89
|
+
type_id: The type identifier (e.g., 'research_report', 'magazine')
|
|
90
|
+
|
|
91
|
+
Returns:
|
|
92
|
+
ContentTypeDefinition with metadata and full markdown content
|
|
93
|
+
"""
|
|
94
|
+
type_path = self.types_dir / type_id / "type.md"
|
|
95
|
+
|
|
96
|
+
if not type_path.exists():
|
|
97
|
+
logger.warning(
|
|
98
|
+
"Content type '%s' not found at %s, using defaults",
|
|
99
|
+
type_id, type_path
|
|
100
|
+
)
|
|
101
|
+
return ContentTypeDefinition(
|
|
102
|
+
type_id=type_id,
|
|
103
|
+
type_md_content="",
|
|
104
|
+
document_class="article",
|
|
105
|
+
default_font_size="12pt",
|
|
106
|
+
default_paper_size="letterpaper",
|
|
107
|
+
)
|
|
108
|
+
|
|
109
|
+
with open(type_path, 'r', encoding='utf-8') as f:
|
|
110
|
+
content = f.read()
|
|
111
|
+
|
|
112
|
+
metadata = self._extract_metadata(content)
|
|
113
|
+
|
|
114
|
+
return ContentTypeDefinition(
|
|
115
|
+
type_id=type_id,
|
|
116
|
+
type_md_content=content,
|
|
117
|
+
document_class=metadata.get("document_class", "article"),
|
|
118
|
+
default_font_size=metadata.get("default_font_size", "12pt"),
|
|
119
|
+
default_paper_size=metadata.get("default_paper_size", "letterpaper"),
|
|
120
|
+
)
|
|
121
|
+
|
|
122
|
+
def list_types(self) -> List[str]:
|
|
123
|
+
"""
|
|
124
|
+
List all available content type IDs.
|
|
125
|
+
|
|
126
|
+
Returns:
|
|
127
|
+
List of type_id strings
|
|
128
|
+
"""
|
|
129
|
+
if not self.types_dir.exists():
|
|
130
|
+
return []
|
|
131
|
+
|
|
132
|
+
return sorted(
|
|
133
|
+
d.name for d in self.types_dir.iterdir()
|
|
134
|
+
if d.is_dir() and (d / "type.md").exists()
|
|
135
|
+
)
|
|
136
|
+
|
|
137
|
+
def _extract_metadata(self, content: str) -> dict:
|
|
138
|
+
"""
|
|
139
|
+
Extract structured metadata from the ## Type Metadata section.
|
|
140
|
+
|
|
141
|
+
Parses key-value pairs in the format:
|
|
142
|
+
- key: value
|
|
143
|
+
|
|
144
|
+
Returns:
|
|
145
|
+
Dictionary of metadata fields
|
|
146
|
+
"""
|
|
147
|
+
metadata = {}
|
|
148
|
+
|
|
149
|
+
# Find the Type Metadata section
|
|
150
|
+
in_metadata = False
|
|
151
|
+
for line in content.split('\n'):
|
|
152
|
+
stripped = line.strip()
|
|
153
|
+
|
|
154
|
+
if stripped == '## Type Metadata':
|
|
155
|
+
in_metadata = True
|
|
156
|
+
continue
|
|
157
|
+
|
|
158
|
+
if in_metadata:
|
|
159
|
+
# Stop at next section header
|
|
160
|
+
if stripped.startswith('## '):
|
|
161
|
+
break
|
|
162
|
+
|
|
163
|
+
# Parse "- key: value" lines
|
|
164
|
+
if stripped.startswith('- ') and ':' in stripped:
|
|
165
|
+
key_value = stripped[2:].split(':', 1)
|
|
166
|
+
if len(key_value) == 2:
|
|
167
|
+
key = key_value[0].strip()
|
|
168
|
+
value = key_value[1].strip()
|
|
169
|
+
metadata[key] = value
|
|
170
|
+
|
|
171
|
+
return metadata
|
tools/graph_generator.py
ADDED
|
@@ -0,0 +1,281 @@
|
|
|
1
|
+
"""Graph Generator for Magazine Data Visualizations."""
|
|
2
|
+
|
|
3
|
+
import os
|
|
4
|
+
from pathlib import Path
|
|
5
|
+
from typing import Dict, List, Optional
|
|
6
|
+
import pandas as pd
|
|
7
|
+
import matplotlib.pyplot as plt
|
|
8
|
+
import matplotlib
|
|
9
|
+
matplotlib.use('Agg') # Non-interactive backend
|
|
10
|
+
|
|
11
|
+
# Set style for magazine-quality charts
|
|
12
|
+
plt.style.use('seaborn-v0_8-whitegrid')
|
|
13
|
+
|
|
14
|
+
|
|
15
|
+
class GraphGenerator:
|
|
16
|
+
"""Generate publication-quality graphs from CSV data."""
|
|
17
|
+
|
|
18
|
+
def __init__(self, output_dir: str = "artifacts/sample_content/magazine/images"):
|
|
19
|
+
"""
|
|
20
|
+
Initialize graph generator.
|
|
21
|
+
|
|
22
|
+
Args:
|
|
23
|
+
output_dir: Directory to save generated graphs
|
|
24
|
+
"""
|
|
25
|
+
self.output_dir = Path(output_dir)
|
|
26
|
+
self.output_dir.mkdir(parents=True, exist_ok=True)
|
|
27
|
+
|
|
28
|
+
# Magazine color palette
|
|
29
|
+
self.colors = {
|
|
30
|
+
'primary': '#2E86AB',
|
|
31
|
+
'secondary': '#A23B72',
|
|
32
|
+
'tertiary': '#F18F01',
|
|
33
|
+
'quaternary': '#C73E1D',
|
|
34
|
+
'quinary': '#3B1F2B',
|
|
35
|
+
'light': '#E8E8E8',
|
|
36
|
+
'dark': '#1A1A2E'
|
|
37
|
+
}
|
|
38
|
+
self.color_list = ['#2E86AB', '#A23B72', '#F18F01', '#C73E1D', '#3B1F2B', '#5C946E']
|
|
39
|
+
|
|
40
|
+
def generate_adoption_chart(self, csv_path: str, output_name: str = "adoption_chart.png") -> str:
|
|
41
|
+
"""
|
|
42
|
+
Generate adoption metrics chart showing year-over-year growth.
|
|
43
|
+
|
|
44
|
+
Args:
|
|
45
|
+
csv_path: Path to adoption_metrics.csv
|
|
46
|
+
output_name: Output filename
|
|
47
|
+
|
|
48
|
+
Returns:
|
|
49
|
+
Path to generated chart
|
|
50
|
+
"""
|
|
51
|
+
df = pd.read_csv(csv_path)
|
|
52
|
+
|
|
53
|
+
fig, ax = plt.subplots(figsize=(8, 5), dpi=150)
|
|
54
|
+
|
|
55
|
+
years = ['2024', '2025', '2026 Projected']
|
|
56
|
+
x = range(len(years))
|
|
57
|
+
width = 0.2
|
|
58
|
+
|
|
59
|
+
metrics = df['Metric'].tolist()
|
|
60
|
+
for i, metric in enumerate(metrics):
|
|
61
|
+
values = df.iloc[i, 1:].values.astype(float)
|
|
62
|
+
bars = ax.bar([xi + i * width for xi in x], values, width,
|
|
63
|
+
label=metric, color=self.color_list[i % len(self.color_list)])
|
|
64
|
+
# Add value labels on bars
|
|
65
|
+
for bar, val in zip(bars, values):
|
|
66
|
+
ax.annotate(f'{val:.0f}',
|
|
67
|
+
xy=(bar.get_x() + bar.get_width() / 2, bar.get_height()),
|
|
68
|
+
ha='center', va='bottom', fontsize=8)
|
|
69
|
+
|
|
70
|
+
ax.set_xlabel('Year', fontsize=11, fontweight='bold')
|
|
71
|
+
ax.set_ylabel('Percentage / Minutes', fontsize=11, fontweight='bold')
|
|
72
|
+
ax.set_title('AI Agent Adoption Metrics (2024-2026)', fontsize=14, fontweight='bold', pad=15)
|
|
73
|
+
ax.set_xticks([xi + width * 1.5 for xi in x])
|
|
74
|
+
ax.set_xticklabels(years)
|
|
75
|
+
ax.legend(loc='upper left', fontsize=9, framealpha=0.9)
|
|
76
|
+
ax.set_ylim(0, 100)
|
|
77
|
+
|
|
78
|
+
plt.tight_layout()
|
|
79
|
+
output_path = self.output_dir / output_name
|
|
80
|
+
plt.savefig(output_path, dpi=150, bbox_inches='tight', facecolor='white')
|
|
81
|
+
plt.close()
|
|
82
|
+
|
|
83
|
+
print(f"[OK] Generated: {output_path}")
|
|
84
|
+
return str(output_path)
|
|
85
|
+
|
|
86
|
+
def generate_framework_comparison(self, csv_path: str, output_name: str = "framework_comparison.png") -> str:
|
|
87
|
+
"""
|
|
88
|
+
Generate framework comparison bar chart.
|
|
89
|
+
|
|
90
|
+
Args:
|
|
91
|
+
csv_path: Path to framework_comparison.csv
|
|
92
|
+
output_name: Output filename
|
|
93
|
+
|
|
94
|
+
Returns:
|
|
95
|
+
Path to generated chart
|
|
96
|
+
"""
|
|
97
|
+
df = pd.read_csv(csv_path)
|
|
98
|
+
|
|
99
|
+
fig, axes = plt.subplots(1, 3, figsize=(12, 4), dpi=150)
|
|
100
|
+
|
|
101
|
+
frameworks = df['Framework'].tolist()
|
|
102
|
+
colors = [self.color_list[i % len(self.color_list)] for i in range(len(frameworks))]
|
|
103
|
+
|
|
104
|
+
# Latency (lower is better)
|
|
105
|
+
ax1 = axes[0]
|
|
106
|
+
bars1 = ax1.barh(frameworks, df['Latency (ms)'], color=colors)
|
|
107
|
+
ax1.set_xlabel('Latency (ms)', fontsize=10, fontweight='bold')
|
|
108
|
+
ax1.set_title('Response Latency', fontsize=11, fontweight='bold')
|
|
109
|
+
ax1.invert_xaxis() # Lower is better, so invert
|
|
110
|
+
for bar, val in zip(bars1, df['Latency (ms)']):
|
|
111
|
+
ax1.annotate(f'{val}ms', xy=(val - 5, bar.get_y() + bar.get_height()/2),
|
|
112
|
+
ha='right', va='center', fontsize=8, color='white', fontweight='bold')
|
|
113
|
+
|
|
114
|
+
# Token Efficiency
|
|
115
|
+
ax2 = axes[1]
|
|
116
|
+
bars2 = ax2.barh(frameworks, df['Token Efficiency'], color=colors)
|
|
117
|
+
ax2.set_xlabel('Token Efficiency (%)', fontsize=10, fontweight='bold')
|
|
118
|
+
ax2.set_title('Token Efficiency', fontsize=11, fontweight='bold')
|
|
119
|
+
ax2.set_xlim(70, 100)
|
|
120
|
+
for bar, val in zip(bars2, df['Token Efficiency']):
|
|
121
|
+
ax2.annotate(f'{val}%', xy=(val - 1, bar.get_y() + bar.get_height()/2),
|
|
122
|
+
ha='right', va='center', fontsize=8, color='white', fontweight='bold')
|
|
123
|
+
|
|
124
|
+
# Success Rate
|
|
125
|
+
ax3 = axes[2]
|
|
126
|
+
bars3 = ax3.barh(frameworks, df['Success Rate'], color=colors)
|
|
127
|
+
ax3.set_xlabel('Success Rate (%)', fontsize=10, fontweight='bold')
|
|
128
|
+
ax3.set_title('Success Rate', fontsize=11, fontweight='bold')
|
|
129
|
+
ax3.set_xlim(85, 100)
|
|
130
|
+
for bar, val in zip(bars3, df['Success Rate']):
|
|
131
|
+
ax3.annotate(f'{val}%', xy=(val - 0.5, bar.get_y() + bar.get_height()/2),
|
|
132
|
+
ha='right', va='center', fontsize=8, color='white', fontweight='bold')
|
|
133
|
+
|
|
134
|
+
plt.suptitle('Agent Framework Comparison', fontsize=14, fontweight='bold', y=1.02)
|
|
135
|
+
plt.tight_layout()
|
|
136
|
+
|
|
137
|
+
output_path = self.output_dir / output_name
|
|
138
|
+
plt.savefig(output_path, dpi=150, bbox_inches='tight', facecolor='white')
|
|
139
|
+
plt.close()
|
|
140
|
+
|
|
141
|
+
print(f"[OK] Generated: {output_path}")
|
|
142
|
+
return str(output_path)
|
|
143
|
+
|
|
144
|
+
def generate_model_performance_radar(self, csv_path: str, output_name: str = "model_performance.png") -> str:
|
|
145
|
+
"""
|
|
146
|
+
Generate model performance comparison chart.
|
|
147
|
+
|
|
148
|
+
Args:
|
|
149
|
+
csv_path: Path to model_performance.csv
|
|
150
|
+
output_name: Output filename
|
|
151
|
+
|
|
152
|
+
Returns:
|
|
153
|
+
Path to generated chart
|
|
154
|
+
"""
|
|
155
|
+
df = pd.read_csv(csv_path)
|
|
156
|
+
|
|
157
|
+
fig, ax = plt.subplots(figsize=(10, 5), dpi=150)
|
|
158
|
+
|
|
159
|
+
models = df['Model'].tolist()
|
|
160
|
+
x = range(len(models))
|
|
161
|
+
width = 0.25
|
|
162
|
+
|
|
163
|
+
metrics = ['Tool Use Accuracy', 'Multi-Step Planning', 'Code Generation']
|
|
164
|
+
|
|
165
|
+
for i, metric in enumerate(metrics):
|
|
166
|
+
values = df[metric].values
|
|
167
|
+
bars = ax.bar([xi + i * width for xi in x], values, width,
|
|
168
|
+
label=metric, color=self.color_list[i])
|
|
169
|
+
for bar, val in zip(bars, values):
|
|
170
|
+
ax.annotate(f'{val:.1f}',
|
|
171
|
+
xy=(bar.get_x() + bar.get_width() / 2, bar.get_height()),
|
|
172
|
+
ha='center', va='bottom', fontsize=7)
|
|
173
|
+
|
|
174
|
+
ax.set_xlabel('Model', fontsize=11, fontweight='bold')
|
|
175
|
+
ax.set_ylabel('Score (%)', fontsize=11, fontweight='bold')
|
|
176
|
+
ax.set_title('LLM Performance Comparison for Agent Tasks', fontsize=14, fontweight='bold', pad=15)
|
|
177
|
+
ax.set_xticks([xi + width for xi in x])
|
|
178
|
+
ax.set_xticklabels(models, rotation=15, ha='right')
|
|
179
|
+
ax.legend(loc='lower right', fontsize=9)
|
|
180
|
+
ax.set_ylim(80, 100)
|
|
181
|
+
|
|
182
|
+
plt.tight_layout()
|
|
183
|
+
output_path = self.output_dir / output_name
|
|
184
|
+
plt.savefig(output_path, dpi=150, bbox_inches='tight', facecolor='white')
|
|
185
|
+
plt.close()
|
|
186
|
+
|
|
187
|
+
print(f"[OK] Generated: {output_path}")
|
|
188
|
+
return str(output_path)
|
|
189
|
+
|
|
190
|
+
def generate_cost_comparison(self, csv_path: str, output_name: str = "cost_comparison.png") -> str:
|
|
191
|
+
"""
|
|
192
|
+
Generate cost comparison chart.
|
|
193
|
+
|
|
194
|
+
Args:
|
|
195
|
+
csv_path: Path to model_performance.csv
|
|
196
|
+
output_name: Output filename
|
|
197
|
+
|
|
198
|
+
Returns:
|
|
199
|
+
Path to generated chart
|
|
200
|
+
"""
|
|
201
|
+
df = pd.read_csv(csv_path)
|
|
202
|
+
|
|
203
|
+
fig, ax = plt.subplots(figsize=(8, 5), dpi=150)
|
|
204
|
+
|
|
205
|
+
models = df['Model'].tolist()
|
|
206
|
+
costs = df['Cost per 1M Tokens'].values
|
|
207
|
+
colors = [self.color_list[i % len(self.color_list)] for i in range(len(models))]
|
|
208
|
+
|
|
209
|
+
bars = ax.bar(models, costs, color=colors)
|
|
210
|
+
|
|
211
|
+
# Add value labels
|
|
212
|
+
for bar, cost in zip(bars, costs):
|
|
213
|
+
ax.annotate(f'${cost:.2f}',
|
|
214
|
+
xy=(bar.get_x() + bar.get_width() / 2, bar.get_height()),
|
|
215
|
+
ha='center', va='bottom', fontsize=10, fontweight='bold')
|
|
216
|
+
|
|
217
|
+
ax.set_xlabel('Model', fontsize=11, fontweight='bold')
|
|
218
|
+
ax.set_ylabel('Cost per 1M Tokens ($)', fontsize=11, fontweight='bold')
|
|
219
|
+
ax.set_title('LLM Cost Comparison', fontsize=14, fontweight='bold', pad=15)
|
|
220
|
+
plt.xticks(rotation=15, ha='right')
|
|
221
|
+
|
|
222
|
+
plt.tight_layout()
|
|
223
|
+
output_path = self.output_dir / output_name
|
|
224
|
+
plt.savefig(output_path, dpi=150, bbox_inches='tight', facecolor='white')
|
|
225
|
+
plt.close()
|
|
226
|
+
|
|
227
|
+
print(f"[OK] Generated: {output_path}")
|
|
228
|
+
return str(output_path)
|
|
229
|
+
|
|
230
|
+
def generate_all_charts(self, data_dir: str) -> List[str]:
|
|
231
|
+
"""
|
|
232
|
+
Generate all charts from data directory.
|
|
233
|
+
|
|
234
|
+
Args:
|
|
235
|
+
data_dir: Directory containing CSV files
|
|
236
|
+
|
|
237
|
+
Returns:
|
|
238
|
+
List of paths to generated charts
|
|
239
|
+
"""
|
|
240
|
+
data_path = Path(data_dir)
|
|
241
|
+
generated = []
|
|
242
|
+
|
|
243
|
+
print("Generating magazine charts...")
|
|
244
|
+
print("=" * 50)
|
|
245
|
+
|
|
246
|
+
# Adoption metrics
|
|
247
|
+
adoption_csv = data_path / "adoption_metrics.csv"
|
|
248
|
+
if adoption_csv.exists():
|
|
249
|
+
generated.append(self.generate_adoption_chart(str(adoption_csv)))
|
|
250
|
+
|
|
251
|
+
# Framework comparison
|
|
252
|
+
framework_csv = data_path / "framework_comparison.csv"
|
|
253
|
+
if framework_csv.exists():
|
|
254
|
+
generated.append(self.generate_framework_comparison(str(framework_csv)))
|
|
255
|
+
|
|
256
|
+
# Model performance
|
|
257
|
+
model_csv = data_path / "model_performance.csv"
|
|
258
|
+
if model_csv.exists():
|
|
259
|
+
generated.append(self.generate_model_performance_radar(str(model_csv)))
|
|
260
|
+
generated.append(self.generate_cost_comparison(str(model_csv)))
|
|
261
|
+
|
|
262
|
+
print("=" * 50)
|
|
263
|
+
print(f"[OK] Generated {len(generated)} charts")
|
|
264
|
+
|
|
265
|
+
return generated
|
|
266
|
+
|
|
267
|
+
|
|
268
|
+
def main():
|
|
269
|
+
"""Generate charts for magazine."""
|
|
270
|
+
generator = GraphGenerator()
|
|
271
|
+
data_dir = "artifacts/sample_content/magazine/data"
|
|
272
|
+
|
|
273
|
+
charts = generator.generate_all_charts(data_dir)
|
|
274
|
+
|
|
275
|
+
print("\nGenerated charts:")
|
|
276
|
+
for chart in charts:
|
|
277
|
+
print(f" - {chart}")
|
|
278
|
+
|
|
279
|
+
|
|
280
|
+
if __name__ == "__main__":
|
|
281
|
+
main()
|