supervertaler 1.9.153__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of supervertaler might be problematic. Click here for more details.

Files changed (85) hide show
  1. Supervertaler.py +47886 -0
  2. modules/__init__.py +10 -0
  3. modules/ai_actions.py +964 -0
  4. modules/ai_attachment_manager.py +343 -0
  5. modules/ai_file_viewer_dialog.py +210 -0
  6. modules/autofingers_engine.py +466 -0
  7. modules/cafetran_docx_handler.py +379 -0
  8. modules/config_manager.py +469 -0
  9. modules/database_manager.py +1878 -0
  10. modules/database_migrations.py +417 -0
  11. modules/dejavurtf_handler.py +779 -0
  12. modules/document_analyzer.py +427 -0
  13. modules/docx_handler.py +689 -0
  14. modules/encoding_repair.py +319 -0
  15. modules/encoding_repair_Qt.py +393 -0
  16. modules/encoding_repair_ui.py +481 -0
  17. modules/feature_manager.py +350 -0
  18. modules/figure_context_manager.py +340 -0
  19. modules/file_dialog_helper.py +148 -0
  20. modules/find_replace.py +164 -0
  21. modules/find_replace_qt.py +457 -0
  22. modules/glossary_manager.py +433 -0
  23. modules/image_extractor.py +188 -0
  24. modules/keyboard_shortcuts_widget.py +571 -0
  25. modules/llm_clients.py +1211 -0
  26. modules/llm_leaderboard.py +737 -0
  27. modules/llm_superbench_ui.py +1401 -0
  28. modules/local_llm_setup.py +1104 -0
  29. modules/model_update_dialog.py +381 -0
  30. modules/model_version_checker.py +373 -0
  31. modules/mqxliff_handler.py +638 -0
  32. modules/non_translatables_manager.py +743 -0
  33. modules/pdf_rescue_Qt.py +1822 -0
  34. modules/pdf_rescue_tkinter.py +909 -0
  35. modules/phrase_docx_handler.py +516 -0
  36. modules/project_home_panel.py +209 -0
  37. modules/prompt_assistant.py +357 -0
  38. modules/prompt_library.py +689 -0
  39. modules/prompt_library_migration.py +447 -0
  40. modules/quick_access_sidebar.py +282 -0
  41. modules/ribbon_widget.py +597 -0
  42. modules/sdlppx_handler.py +874 -0
  43. modules/setup_wizard.py +353 -0
  44. modules/shortcut_manager.py +932 -0
  45. modules/simple_segmenter.py +128 -0
  46. modules/spellcheck_manager.py +727 -0
  47. modules/statuses.py +207 -0
  48. modules/style_guide_manager.py +315 -0
  49. modules/superbench_ui.py +1319 -0
  50. modules/superbrowser.py +329 -0
  51. modules/supercleaner.py +600 -0
  52. modules/supercleaner_ui.py +444 -0
  53. modules/superdocs.py +19 -0
  54. modules/superdocs_viewer_qt.py +382 -0
  55. modules/superlookup.py +252 -0
  56. modules/tag_cleaner.py +260 -0
  57. modules/tag_manager.py +333 -0
  58. modules/term_extractor.py +270 -0
  59. modules/termbase_entry_editor.py +842 -0
  60. modules/termbase_import_export.py +488 -0
  61. modules/termbase_manager.py +1060 -0
  62. modules/termview_widget.py +1172 -0
  63. modules/theme_manager.py +499 -0
  64. modules/tm_editor_dialog.py +99 -0
  65. modules/tm_manager_qt.py +1280 -0
  66. modules/tm_metadata_manager.py +545 -0
  67. modules/tmx_editor.py +1461 -0
  68. modules/tmx_editor_qt.py +2784 -0
  69. modules/tmx_generator.py +284 -0
  70. modules/tracked_changes.py +900 -0
  71. modules/trados_docx_handler.py +430 -0
  72. modules/translation_memory.py +715 -0
  73. modules/translation_results_panel.py +2134 -0
  74. modules/translation_services.py +282 -0
  75. modules/unified_prompt_library.py +659 -0
  76. modules/unified_prompt_manager_qt.py +3951 -0
  77. modules/voice_commands.py +920 -0
  78. modules/voice_dictation.py +477 -0
  79. modules/voice_dictation_lite.py +249 -0
  80. supervertaler-1.9.153.dist-info/METADATA +896 -0
  81. supervertaler-1.9.153.dist-info/RECORD +85 -0
  82. supervertaler-1.9.153.dist-info/WHEEL +5 -0
  83. supervertaler-1.9.153.dist-info/entry_points.txt +2 -0
  84. supervertaler-1.9.153.dist-info/licenses/LICENSE +21 -0
  85. supervertaler-1.9.153.dist-info/top_level.txt +2 -0
@@ -0,0 +1,427 @@
1
+ """
2
+ Document Analyzer Module
3
+
4
+ Analyzes loaded document segments to provide context-aware insights and suggestions.
5
+ Part of Phase 2 AI Assistant implementation.
6
+
7
+ Features:
8
+ - Domain detection (medical, legal, technical, etc.)
9
+ - Terminology extraction and analysis
10
+ - Tone and formality assessment
11
+ - Document structure analysis
12
+ - Prompt optimization suggestions
13
+ """
14
+
15
+ import re
16
+ from collections import Counter, defaultdict
17
+ from typing import Dict, List, Optional, Tuple
18
+
19
+
20
+ class DocumentAnalyzer:
21
+ """Analyzes document content to provide AI-powered insights"""
22
+
23
+ # Domain keywords for detection
24
+ DOMAIN_KEYWORDS = {
25
+ 'medical': {
26
+ 'keywords': ['patient', 'diagnosis', 'treatment', 'medication', 'clinical', 'medical',
27
+ 'hospital', 'doctor', 'surgery', 'therapeutic', 'pharmaceutical', 'disease',
28
+ 'symptom', 'therapy', 'prescription', 'dosage', 'adverse', 'pathology'],
29
+ 'patterns': [r'\d+\s*mg', r'\d+\s*ml', r'ICD-\d+', r'[A-Z]{3,}\s+\d+']
30
+ },
31
+ 'legal': {
32
+ 'keywords': ['contract', 'agreement', 'party', 'clause', 'hereby', 'whereas', 'pursuant',
33
+ 'liability', 'jurisdiction', 'arbitration', 'plaintiff', 'defendant', 'court',
34
+ 'law', 'regulation', 'statute', 'breach', 'damages', 'legal', 'attorney'],
35
+ 'patterns': [r'§\s*\d+', r'Article\s+\d+', r'\(\d+\)', r'Section\s+\d+\.\d+']
36
+ },
37
+ 'technical': {
38
+ 'keywords': ['system', 'configuration', 'parameter', 'interface', 'protocol', 'function',
39
+ 'module', 'component', 'specification', 'operation', 'procedure', 'mechanism',
40
+ 'algorithm', 'implementation', 'hardware', 'software', 'network', 'database'],
41
+ 'patterns': [r'\d+\.\d+\.\d+', r'[A-Z]{2,}\d+', r'\w+\(\)', r'[A-Z_]{3,}']
42
+ },
43
+ 'patent': {
44
+ 'keywords': ['invention', 'claim', 'embodiment', 'apparatus', 'method', 'comprising',
45
+ 'wherein', 'patent', 'prior art', 'novelty', 'utility', 'figure', 'drawing',
46
+ 'applicant', 'inventor', 'chemical', 'compound', 'formula'],
47
+ 'patterns': [r'Figure\s+\d+', r'claim\s+\d+', r'Fig\.\s*\d+', r'\([IVX]+\)']
48
+ },
49
+ 'marketing': {
50
+ 'keywords': ['brand', 'customer', 'product', 'service', 'campaign', 'audience', 'market',
51
+ 'engagement', 'strategy', 'creative', 'promotion', 'sales', 'consumer',
52
+ 'advertising', 'content', 'message', 'value', 'experience'],
53
+ 'patterns': [r'®', r'™', r'©', r'\d+%\s+(?:more|less|increase|decrease)']
54
+ },
55
+ 'financial': {
56
+ 'keywords': ['investment', 'revenue', 'profit', 'asset', 'liability', 'equity', 'financial',
57
+ 'fiscal', 'budget', 'expense', 'income', 'balance', 'statement', 'accounting',
58
+ 'audit', 'dividend', 'portfolio', 'securities', 'capital'],
59
+ 'patterns': [r'\$[\d,]+', r'€[\d,]+', r'£[\d,]+', r'\d+\.\d+%', r'Q[1-4]\s+\d{4}']
60
+ }
61
+ }
62
+
63
+ def __init__(self):
64
+ """Initialize the document analyzer"""
65
+ self.segments = []
66
+ self.analysis_cache = {}
67
+
68
+ def analyze_segments(self, segments: List) -> Dict:
69
+ """
70
+ Comprehensive analysis of loaded document segments.
71
+
72
+ Args:
73
+ segments: List of Segment objects from the translation grid
74
+
75
+ Returns:
76
+ Dictionary containing analysis results:
77
+ - domain: Detected domain(s)
78
+ - terminology: Key terms and phrases
79
+ - tone: Formality level and style
80
+ - structure: Document organization
81
+ - statistics: Word counts, segment counts, etc.
82
+ - suggestions: Recommended prompt adjustments
83
+ """
84
+ self.segments = segments
85
+
86
+ if not segments:
87
+ return {
88
+ 'success': False,
89
+ 'error': 'No segments to analyze'
90
+ }
91
+
92
+ # Extract all source text
93
+ source_texts = [seg.source for seg in segments if hasattr(seg, 'source') and seg.source]
94
+
95
+ if not source_texts:
96
+ return {
97
+ 'success': False,
98
+ 'error': 'No source text found in segments'
99
+ }
100
+
101
+ combined_text = ' '.join(source_texts)
102
+
103
+ # Perform analysis
104
+ analysis = {
105
+ 'success': True,
106
+ 'segment_count': len(segments),
107
+ 'domain': self._detect_domain(source_texts, combined_text),
108
+ 'terminology': self._extract_terminology(source_texts),
109
+ 'tone': self._assess_tone(combined_text),
110
+ 'structure': self._analyze_structure(segments, source_texts),
111
+ 'statistics': self._calculate_statistics(source_texts),
112
+ 'special_elements': self._detect_special_elements(combined_text),
113
+ 'suggestions': []
114
+ }
115
+
116
+ # Generate suggestions based on analysis
117
+ analysis['suggestions'] = self._generate_suggestions(analysis)
118
+
119
+ return analysis
120
+
121
+ def _detect_domain(self, texts: List[str], combined_text: str) -> Dict:
122
+ """Detect the primary domain(s) of the document"""
123
+ domain_scores = defaultdict(int)
124
+
125
+ combined_lower = combined_text.lower()
126
+
127
+ for domain, data in self.DOMAIN_KEYWORDS.items():
128
+ # Score based on keyword matches
129
+ for keyword in data['keywords']:
130
+ count = combined_lower.count(keyword)
131
+ domain_scores[domain] += count * 2 # Keywords worth more
132
+
133
+ # Score based on pattern matches
134
+ for pattern in data['patterns']:
135
+ matches = re.findall(pattern, combined_text)
136
+ domain_scores[domain] += len(matches)
137
+
138
+ # Normalize scores
139
+ total_words = len(combined_text.split())
140
+ if total_words > 0:
141
+ domain_scores = {k: (v / total_words) * 1000 for k, v in domain_scores.items()}
142
+
143
+ # Get top domains
144
+ sorted_domains = sorted(domain_scores.items(), key=lambda x: x[1], reverse=True)
145
+
146
+ primary = sorted_domains[0] if sorted_domains and sorted_domains[0][1] > 1 else None
147
+ secondary = sorted_domains[1] if len(sorted_domains) > 1 and sorted_domains[1][1] > 0.5 else None
148
+
149
+ return {
150
+ 'primary': primary[0] if primary else 'general',
151
+ 'primary_confidence': round(primary[1], 2) if primary else 0,
152
+ 'secondary': secondary[0] if secondary else None,
153
+ 'secondary_confidence': round(secondary[1], 2) if secondary else 0,
154
+ 'all_scores': dict(sorted_domains[:5])
155
+ }
156
+
157
+ def _extract_terminology(self, texts: List[str]) -> Dict:
158
+ """Extract and analyze key terminology"""
159
+ # Combine all text
160
+ combined = ' '.join(texts)
161
+
162
+ # Extract potential terms (capitalized words, technical terms, etc.)
163
+ # Capitalized words (potential proper nouns or technical terms)
164
+ capitalized = re.findall(r'\b[A-Z][a-z]+(?:\s+[A-Z][a-z]+)*\b', combined)
165
+
166
+ # Acronyms (but filter out Roman numerals and single letters)
167
+ acronyms_raw = re.findall(r'\b[A-Z]{2,}\b', combined)
168
+ acronyms = [a for a in acronyms_raw if not re.match(r'^[IVXLCDM]+$', a)] # Filter Roman numerals
169
+
170
+ # Technical terms with numbers (but must have letters too)
171
+ # Pattern: word containing both letters and numbers, or camelCase, or underscore
172
+ technical_raw = re.findall(r'\b(?:[a-zA-Z]+\d+[a-zA-Z\d]*|\d+[a-zA-Z]+[a-zA-Z\d]*|[a-z]+[A-Z][a-zA-Z]*|[a-z]+_[a-z]+)\b', combined)
173
+
174
+ # Filter out: pure numbers, very short terms (< 3 chars), common words
175
+ technical = [t for t in technical_raw
176
+ if len(t) >= 3
177
+ and not t.isdigit() # Not pure numbers
178
+ and not t.lower() in ['the', 'and', 'for', 'are', 'van', 'het', 'een', 'een']] # Common words
179
+
180
+ # Count frequencies
181
+ cap_counter = Counter(capitalized)
182
+ acro_counter = Counter(acronyms)
183
+ tech_counter = Counter(technical)
184
+
185
+ return {
186
+ 'capitalized_terms': dict(cap_counter.most_common(15)), # Increased from 10 to 15
187
+ 'acronyms': dict(acro_counter.most_common(15)),
188
+ 'technical_terms': dict(tech_counter.most_common(15)),
189
+ 'total_unique_terms': len(set(capitalized + acronyms + technical))
190
+ }
191
+
192
+ def _assess_tone(self, text: str) -> Dict:
193
+ """Assess the tone and formality of the text"""
194
+ text_lower = text.lower()
195
+
196
+ # Formal/Legal indicators (stronger weight for patents/legal)
197
+ formal_indicators = ['hereby', 'pursuant', 'whereas', 'thereof', 'aforementioned',
198
+ 'notwithstanding', 'shall', 'must', 'required', 'specified',
199
+ 'comprising', 'wherein', 'embodiment', 'invention', 'claim']
200
+ formal_count = sum(text_lower.count(ind) for ind in formal_indicators)
201
+
202
+ # Informal indicators
203
+ informal_indicators = ["don't", "can't", "won't", "it's", "you'll", "we're",
204
+ 'really', 'just', 'pretty', 'quite', 'maybe', 'probably']
205
+ informal_count = sum(text_lower.count(ind) for ind in informal_indicators)
206
+
207
+ # Technical indicators
208
+ technical_indicators = ['algorithm', 'parameter', 'configuration', 'implementation',
209
+ 'interface', 'protocol', 'specification', 'mechanism',
210
+ 'apparatus', 'method', 'device', 'system', 'process']
211
+ technical_count = sum(text_lower.count(ind) for ind in technical_indicators)
212
+
213
+ # Conversational indicators (but filter out common words in formal contexts)
214
+ # Only count these if they appear in clearly conversational patterns
215
+ conversational_patterns = [r'\byou can\b', r'\byour \w+\b', r'\bwe recommend\b',
216
+ r'\blet\'s\b', r'\bwant to\b', r'\bneed to\b']
217
+ conversational_count = sum(len(re.findall(pattern, text_lower)) for pattern in conversational_patterns)
218
+
219
+ # Determine primary tone
220
+ total_words = len(text.split())
221
+ if total_words == 0:
222
+ return {'tone': 'unknown', 'formality': 'unknown'}
223
+
224
+ formal_ratio = (formal_count / total_words) * 1000
225
+ informal_ratio = (informal_count / total_words) * 1000
226
+ technical_ratio = (technical_count / total_words) * 1000
227
+ conversational_ratio = (conversational_count / total_words) * 1000
228
+
229
+ # Determine formality
230
+ if formal_ratio > 5:
231
+ formality = 'very formal'
232
+ elif formal_ratio > 2 or technical_ratio > 3:
233
+ formality = 'formal'
234
+ elif informal_ratio > 3 or conversational_ratio > 10:
235
+ formality = 'informal'
236
+ else:
237
+ formality = 'neutral'
238
+
239
+ # Determine tone
240
+ if technical_ratio > 2:
241
+ tone = 'technical'
242
+ elif conversational_ratio > 8:
243
+ tone = 'conversational'
244
+ elif formal_ratio > 3:
245
+ tone = 'professional'
246
+ else:
247
+ tone = 'neutral'
248
+
249
+ return {
250
+ 'tone': tone,
251
+ 'formality': formality,
252
+ 'formal_score': round(formal_ratio, 2),
253
+ 'informal_score': round(informal_ratio, 2),
254
+ 'technical_score': round(technical_ratio, 2),
255
+ 'conversational_score': round(conversational_ratio, 2)
256
+ }
257
+
258
+ def _analyze_structure(self, segments: List, texts: List[str]) -> Dict:
259
+ """Analyze document structure"""
260
+ # Detect lists
261
+ list_items = sum(1 for text in texts if re.match(r'^\s*[-•*\d+\.]\s', text))
262
+
263
+ # Detect headings (short segments, possibly all caps or title case)
264
+ potential_headings = sum(1 for text in texts
265
+ if len(text.split()) <= 10 and (text.isupper() or text.istitle()))
266
+
267
+ # Detect references to figures/tables
268
+ figure_refs = sum(len(re.findall(r'Figure\s+\d+|Fig\.\s*\d+|Table\s+\d+', text))
269
+ for text in texts)
270
+
271
+ # Average segment length
272
+ avg_length = sum(len(text.split()) for text in texts) / len(texts) if texts else 0
273
+
274
+ return {
275
+ 'list_items': list_items,
276
+ 'potential_headings': potential_headings,
277
+ 'figure_references': figure_refs,
278
+ 'average_segment_length': round(avg_length, 1),
279
+ 'has_visual_elements': figure_refs > 0
280
+ }
281
+
282
+ def _calculate_statistics(self, texts: List[str]) -> Dict:
283
+ """Calculate basic statistics"""
284
+ combined = ' '.join(texts)
285
+ words = combined.split()
286
+
287
+ return {
288
+ 'total_words': len(words),
289
+ 'total_characters': len(combined),
290
+ 'average_words_per_segment': round(len(words) / len(texts), 1) if texts else 0,
291
+ 'unique_words': len(set(word.lower() for word in words))
292
+ }
293
+
294
+ def _detect_special_elements(self, text: str) -> Dict:
295
+ """Detect special elements in the text"""
296
+ return {
297
+ 'urls': len(re.findall(r'https?://\S+', text)),
298
+ 'emails': len(re.findall(r'\b[A-Za-z0-9._%+-]+@[A-Za-z0-9.-]+\.[A-Z|a-z]{2,}\b', text)),
299
+ 'dates': len(re.findall(r'\b\d{1,2}[/-]\d{1,2}[/-]\d{2,4}\b', text)),
300
+ 'numbers': len(re.findall(r'\b\d+(?:\.\d+)?\b', text)),
301
+ 'percentages': len(re.findall(r'\d+(?:\.\d+)?%', text)),
302
+ 'currencies': len(re.findall(r'[$€£¥]\s*\d+', text)),
303
+ 'measurements': len(re.findall(r'\d+\s*(?:mm|cm|m|km|mg|g|kg|ml|l|°C|°F)', text))
304
+ }
305
+
306
+ def _generate_suggestions(self, analysis: Dict) -> List[Dict]:
307
+ """Generate prompt optimization suggestions based on analysis"""
308
+ suggestions = []
309
+
310
+ domain = analysis.get('domain', {})
311
+ tone = analysis.get('tone', {})
312
+ structure = analysis.get('structure', {})
313
+ special = analysis.get('special_elements', {})
314
+
315
+ # Domain-specific suggestions
316
+ primary_domain = domain.get('primary', 'general')
317
+ if primary_domain != 'general' and domain.get('primary_confidence', 0) > 2:
318
+ suggestions.append({
319
+ 'type': 'domain',
320
+ 'priority': 'high',
321
+ 'title': f'Optimise for {primary_domain.title()} Domain',
322
+ 'description': f'Your document appears to be {primary_domain}-related. '
323
+ f'Consider using a specialised {primary_domain} translation prompt.',
324
+ 'action': f'switch_prompt_{primary_domain}'
325
+ })
326
+
327
+ # Tone suggestions
328
+ if tone.get('formality') == 'very formal':
329
+ suggestions.append({
330
+ 'type': 'tone',
331
+ 'priority': 'medium',
332
+ 'title': 'Very Formal Language Detected',
333
+ 'description': 'This document uses highly formal language. Ensure your prompt '
334
+ 'emphasizes maintaining professional tone and formal register.',
335
+ 'action': 'add_formality_instruction'
336
+ })
337
+
338
+ # Visual elements
339
+ if structure.get('figure_references', 0) > 0:
340
+ suggestions.append({
341
+ 'type': 'visual',
342
+ 'priority': 'high',
343
+ 'title': 'Figure References Detected',
344
+ 'description': f'Found {structure["figure_references"]} references to figures. '
345
+ 'Consider loading visual context in the Images tab.',
346
+ 'action': 'load_figure_context'
347
+ })
348
+
349
+ # Special elements
350
+ if special.get('measurements', 0) > 5:
351
+ suggestions.append({
352
+ 'type': 'formatting',
353
+ 'priority': 'medium',
354
+ 'title': 'Preserve Measurement Units',
355
+ 'description': 'Document contains many measurements. Add instruction to '
356
+ 'preserve units exactly as written.',
357
+ 'action': 'add_measurement_rule'
358
+ })
359
+
360
+ if special.get('currencies', 0) > 3:
361
+ suggestions.append({
362
+ 'type': 'formatting',
363
+ 'priority': 'medium',
364
+ 'title': 'Currency Values Present',
365
+ 'description': 'Document contains currency values. Ensure prompt specifies '
366
+ 'how to handle currency symbols and amounts.',
367
+ 'action': 'add_currency_rule'
368
+ })
369
+
370
+ # Terminology
371
+ terminology = analysis.get('terminology', {})
372
+ unique_terms = terminology.get('total_unique_terms', 0)
373
+ if unique_terms > 20:
374
+ suggestions.append({
375
+ 'type': 'terminology',
376
+ 'priority': 'medium',
377
+ 'title': 'Rich Terminology Detected',
378
+ 'description': f'Found {unique_terms} unique technical/specialized terms. '
379
+ 'Consider using a glossary for consistent translation.',
380
+ 'action': 'enable_glossary'
381
+ })
382
+
383
+ return suggestions
384
+
385
+ def get_summary_text(self, analysis: Dict) -> str:
386
+ """Generate human-readable summary of analysis"""
387
+ if not analysis.get('success'):
388
+ return "❌ Unable to analyze document: " + analysis.get('error', 'Unknown error')
389
+
390
+ domain = analysis.get('domain', {})
391
+ tone = analysis.get('tone', {})
392
+ stats = analysis.get('statistics', {})
393
+ structure = analysis.get('structure', {})
394
+
395
+ summary = f"""📊 Document Analysis Results
396
+
397
+ 📝 **Overview:**
398
+ - {analysis['segment_count']} segments
399
+ - {stats.get('total_words', 0):,} words total
400
+ - {stats.get('average_words_per_segment', 0)} words per segment on average
401
+
402
+ 🎯 **Domain:** {domain.get('primary', 'General').title()}"""
403
+
404
+ if domain.get('primary_confidence', 0) > 2:
405
+ summary += f" (confidence: {domain['primary_confidence']:.1f})"
406
+
407
+ if domain.get('secondary'):
408
+ summary += f"\n Secondary: {domain['secondary'].title()}"
409
+
410
+ summary += f"""
411
+
412
+ ✍️ **Tone & Style:**
413
+ - Formality: {tone.get('formality', 'unknown').title()}
414
+ - Style: {tone.get('tone', 'unknown').title()}
415
+
416
+ 📋 **Structure:**
417
+ - List items: {structure.get('list_items', 0)}
418
+ - Potential headings: {structure.get('potential_headings', 0)}
419
+ - Figure references: {structure.get('figure_references', 0)}
420
+ """
421
+
422
+ # Add suggestions summary
423
+ suggestions = analysis.get('suggestions', [])
424
+ if suggestions:
425
+ summary += f"\n💡 **Recommendations:** {len(suggestions)} suggestion(s) available"
426
+
427
+ return summary