cite-agent 1.3.9__py3-none-any.whl → 1.4.3__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (44) hide show
  1. cite_agent/__init__.py +13 -13
  2. cite_agent/__version__.py +1 -1
  3. cite_agent/action_first_mode.py +150 -0
  4. cite_agent/adaptive_providers.py +413 -0
  5. cite_agent/archive_api_client.py +186 -0
  6. cite_agent/auth.py +0 -1
  7. cite_agent/auto_expander.py +70 -0
  8. cite_agent/cache.py +379 -0
  9. cite_agent/circuit_breaker.py +370 -0
  10. cite_agent/citation_network.py +377 -0
  11. cite_agent/cli.py +8 -16
  12. cite_agent/cli_conversational.py +113 -3
  13. cite_agent/confidence_calibration.py +381 -0
  14. cite_agent/deduplication.py +325 -0
  15. cite_agent/enhanced_ai_agent.py +689 -371
  16. cite_agent/error_handler.py +228 -0
  17. cite_agent/execution_safety.py +329 -0
  18. cite_agent/full_paper_reader.py +239 -0
  19. cite_agent/observability.py +398 -0
  20. cite_agent/offline_mode.py +348 -0
  21. cite_agent/paper_comparator.py +368 -0
  22. cite_agent/paper_summarizer.py +420 -0
  23. cite_agent/pdf_extractor.py +350 -0
  24. cite_agent/proactive_boundaries.py +266 -0
  25. cite_agent/quality_gate.py +442 -0
  26. cite_agent/request_queue.py +390 -0
  27. cite_agent/response_enhancer.py +257 -0
  28. cite_agent/response_formatter.py +458 -0
  29. cite_agent/response_pipeline.py +295 -0
  30. cite_agent/response_style_enhancer.py +259 -0
  31. cite_agent/self_healing.py +418 -0
  32. cite_agent/similarity_finder.py +524 -0
  33. cite_agent/streaming_ui.py +13 -9
  34. cite_agent/thinking_blocks.py +308 -0
  35. cite_agent/tool_orchestrator.py +416 -0
  36. cite_agent/trend_analyzer.py +540 -0
  37. cite_agent/unpaywall_client.py +226 -0
  38. {cite_agent-1.3.9.dist-info → cite_agent-1.4.3.dist-info}/METADATA +15 -1
  39. cite_agent-1.4.3.dist-info/RECORD +62 -0
  40. cite_agent-1.3.9.dist-info/RECORD +0 -32
  41. {cite_agent-1.3.9.dist-info → cite_agent-1.4.3.dist-info}/WHEEL +0 -0
  42. {cite_agent-1.3.9.dist-info → cite_agent-1.4.3.dist-info}/entry_points.txt +0 -0
  43. {cite_agent-1.3.9.dist-info → cite_agent-1.4.3.dist-info}/licenses/LICENSE +0 -0
  44. {cite_agent-1.3.9.dist-info → cite_agent-1.4.3.dist-info}/top_level.txt +0 -0
@@ -0,0 +1,350 @@
1
+ #!/usr/bin/env python3
2
+ """
3
+ PDF Extraction Service - KILLER FEATURE
4
+ Extracts full text from academic papers so you don't have to read them!
5
+ """
6
+
7
+ import io
8
+ import logging
9
+ import re
10
+ from typing import Dict, List, Optional, Any
11
+ from pathlib import Path
12
+
13
+ try:
14
+ import PyPDF2
15
+ import pdfplumber
16
+ import fitz # PyMuPDF
17
+ PDF_LIBRARIES_AVAILABLE = True
18
+ except ImportError:
19
+ PDF_LIBRARIES_AVAILABLE = False
20
+
21
+ import requests
22
+ from dataclasses import dataclass
23
+
24
+ logger = logging.getLogger(__name__)
25
+
26
+
27
+ @dataclass
28
+ class ExtractedPaper:
29
+ """Fully extracted paper content"""
30
+ title: Optional[str] = None
31
+ abstract: Optional[str] = None
32
+ introduction: Optional[str] = None
33
+ methodology: Optional[str] = None
34
+ results: Optional[str] = None
35
+ discussion: Optional[str] = None
36
+ conclusion: Optional[str] = None
37
+ references: Optional[List[str]] = None
38
+ full_text: Optional[str] = None
39
+ tables: Optional[List[Dict[str, Any]]] = None
40
+ figures_count: int = 0
41
+ page_count: int = 0
42
+ word_count: int = 0
43
+ extraction_method: str = "unknown"
44
+ extraction_quality: str = "unknown" # high, medium, low
45
+ error_message: Optional[str] = None
46
+
47
+
48
+ class PDFExtractor:
49
+ """Extract and analyze full text from academic PDFs"""
50
+
51
+ def __init__(self):
52
+ if not PDF_LIBRARIES_AVAILABLE:
53
+ logger.warning("PDF libraries not installed. Install: pip install pypdf2 pdfplumber pymupdf")
54
+
55
+ self.max_file_size_mb = 50 # Don't download PDFs larger than 50MB
56
+ self.timeout_seconds = 30
57
+
58
+ async def extract_from_url(self, pdf_url: str) -> ExtractedPaper:
59
+ """
60
+ Download and extract full text from PDF URL
61
+
62
+ Args:
63
+ pdf_url: Direct link to PDF file
64
+
65
+ Returns:
66
+ ExtractedPaper with full content
67
+ """
68
+ if not PDF_LIBRARIES_AVAILABLE:
69
+ return ExtractedPaper(
70
+ error_message="PDF extraction libraries not installed",
71
+ extraction_quality="low"
72
+ )
73
+
74
+ try:
75
+ # Download PDF
76
+ logger.info(f"Downloading PDF from {pdf_url}")
77
+ response = requests.get(
78
+ pdf_url,
79
+ timeout=self.timeout_seconds,
80
+ headers={'User-Agent': 'Mozilla/5.0 (Research Bot)'},
81
+ stream=True
82
+ )
83
+ response.raise_for_status()
84
+
85
+ # Check file size
86
+ content_length = response.headers.get('content-length')
87
+ if content_length and int(content_length) > self.max_file_size_mb * 1024 * 1024:
88
+ return ExtractedPaper(
89
+ error_message=f"PDF too large ({int(content_length)/(1024*1024):.1f}MB > {self.max_file_size_mb}MB)",
90
+ extraction_quality="low"
91
+ )
92
+
93
+ pdf_bytes = response.content
94
+
95
+ # Try extraction methods in order of quality
96
+ # 1. PyMuPDF (best quality, fastest)
97
+ extracted = self._extract_with_pymupdf(pdf_bytes)
98
+ if extracted.extraction_quality == "high":
99
+ return extracted
100
+
101
+ # 2. pdfplumber (good for tables and layout)
102
+ extracted = self._extract_with_pdfplumber(pdf_bytes)
103
+ if extracted.extraction_quality in ("high", "medium"):
104
+ return extracted
105
+
106
+ # 3. PyPDF2 (basic fallback)
107
+ extracted = self._extract_with_pypdf2(pdf_bytes)
108
+ return extracted
109
+
110
+ except requests.Timeout:
111
+ return ExtractedPaper(
112
+ error_message="PDF download timeout",
113
+ extraction_quality="low"
114
+ )
115
+ except requests.RequestException as e:
116
+ return ExtractedPaper(
117
+ error_message=f"PDF download failed: {str(e)}",
118
+ extraction_quality="low"
119
+ )
120
+ except Exception as e:
121
+ logger.error(f"PDF extraction error: {e}")
122
+ return ExtractedPaper(
123
+ error_message=f"Extraction error: {str(e)}",
124
+ extraction_quality="low"
125
+ )
126
+
127
+ def _extract_with_pymupdf(self, pdf_bytes: bytes) -> ExtractedPaper:
128
+ """Extract using PyMuPDF (fitz) - fastest and most accurate"""
129
+ try:
130
+ doc = fitz.open(stream=pdf_bytes, filetype="pdf")
131
+
132
+ full_text = ""
133
+ for page in doc:
134
+ full_text += page.get_text()
135
+
136
+ # Parse sections
137
+ sections = self._parse_sections(full_text)
138
+
139
+ # Count stats
140
+ word_count = len(full_text.split())
141
+ page_count = len(doc)
142
+
143
+ # Extract tables (basic)
144
+ tables = []
145
+ for page in doc:
146
+ tabs = page.find_tables()
147
+ if tabs:
148
+ for tab in tabs:
149
+ tables.append({
150
+ 'page': page.number + 1,
151
+ 'rows': len(tab.extract()),
152
+ 'data': tab.extract()[:5] # First 5 rows only
153
+ })
154
+
155
+ doc.close()
156
+
157
+ quality = "high" if word_count > 500 else "medium"
158
+
159
+ return ExtractedPaper(
160
+ full_text=full_text,
161
+ title=sections.get('title'),
162
+ abstract=sections.get('abstract'),
163
+ introduction=sections.get('introduction'),
164
+ methodology=sections.get('methodology'),
165
+ results=sections.get('results'),
166
+ discussion=sections.get('discussion'),
167
+ conclusion=sections.get('conclusion'),
168
+ references=sections.get('references'),
169
+ tables=tables if tables else None,
170
+ page_count=page_count,
171
+ word_count=word_count,
172
+ extraction_method="pymupdf",
173
+ extraction_quality=quality
174
+ )
175
+
176
+ except Exception as e:
177
+ logger.warning(f"PyMuPDF extraction failed: {e}")
178
+ return ExtractedPaper(
179
+ error_message=f"PyMuPDF failed: {str(e)}",
180
+ extraction_quality="low"
181
+ )
182
+
183
+ def _extract_with_pdfplumber(self, pdf_bytes: bytes) -> ExtractedPaper:
184
+ """Extract using pdfplumber - good for tables"""
185
+ try:
186
+ pdf = pdfplumber.open(io.BytesIO(pdf_bytes))
187
+
188
+ full_text = ""
189
+ tables = []
190
+
191
+ for page_num, page in enumerate(pdf.pages, start=1):
192
+ # Extract text
193
+ text = page.extract_text()
194
+ if text:
195
+ full_text += text + "\n"
196
+
197
+ # Extract tables
198
+ page_tables = page.extract_tables()
199
+ if page_tables:
200
+ for table in page_tables:
201
+ tables.append({
202
+ 'page': page_num,
203
+ 'rows': len(table),
204
+ 'data': table[:5] # First 5 rows
205
+ })
206
+
207
+ pdf.close()
208
+
209
+ # Parse sections
210
+ sections = self._parse_sections(full_text)
211
+ word_count = len(full_text.split())
212
+
213
+ quality = "high" if word_count > 500 else "medium"
214
+
215
+ return ExtractedPaper(
216
+ full_text=full_text,
217
+ title=sections.get('title'),
218
+ abstract=sections.get('abstract'),
219
+ introduction=sections.get('introduction'),
220
+ methodology=sections.get('methodology'),
221
+ results=sections.get('results'),
222
+ discussion=sections.get('discussion'),
223
+ conclusion=sections.get('conclusion'),
224
+ references=sections.get('references'),
225
+ tables=tables if tables else None,
226
+ page_count=len(pdf.pages),
227
+ word_count=word_count,
228
+ extraction_method="pdfplumber",
229
+ extraction_quality=quality
230
+ )
231
+
232
+ except Exception as e:
233
+ logger.warning(f"pdfplumber extraction failed: {e}")
234
+ return ExtractedPaper(
235
+ error_message=f"pdfplumber failed: {str(e)}",
236
+ extraction_quality="low"
237
+ )
238
+
239
+ def _extract_with_pypdf2(self, pdf_bytes: bytes) -> ExtractedPaper:
240
+ """Extract using PyPDF2 - basic fallback"""
241
+ try:
242
+ pdf = PyPDF2.PdfReader(io.BytesIO(pdf_bytes))
243
+
244
+ full_text = ""
245
+ for page in pdf.pages:
246
+ text = page.extract_text()
247
+ if text:
248
+ full_text += text + "\n"
249
+
250
+ # Parse sections
251
+ sections = self._parse_sections(full_text)
252
+ word_count = len(full_text.split())
253
+
254
+ quality = "medium" if word_count > 500 else "low"
255
+
256
+ return ExtractedPaper(
257
+ full_text=full_text,
258
+ title=sections.get('title'),
259
+ abstract=sections.get('abstract'),
260
+ introduction=sections.get('introduction'),
261
+ methodology=sections.get('methodology'),
262
+ results=sections.get('results'),
263
+ discussion=sections.get('discussion'),
264
+ conclusion=sections.get('conclusion'),
265
+ references=sections.get('references'),
266
+ page_count=len(pdf.pages),
267
+ word_count=word_count,
268
+ extraction_method="pypdf2",
269
+ extraction_quality=quality
270
+ )
271
+
272
+ except Exception as e:
273
+ logger.warning(f"PyPDF2 extraction failed: {e}")
274
+ return ExtractedPaper(
275
+ error_message=f"PyPDF2 failed: {str(e)}",
276
+ extraction_quality="low"
277
+ )
278
+
279
+ def _parse_sections(self, full_text: str) -> Dict[str, Optional[str]]:
280
+ """
281
+ Parse academic paper sections from full text
282
+ Uses common section headers to split the paper
283
+ """
284
+ sections = {}
285
+
286
+ # Common section patterns (case-insensitive)
287
+ patterns = {
288
+ 'abstract': r'(?i)\bABSTRACT\b',
289
+ 'introduction': r'(?i)\b(INTRODUCTION|1\.\s*INTRODUCTION)\b',
290
+ 'methodology': r'(?i)\b(METHODOLOGY|METHODS|MATERIALS AND METHODS|2\.\s*METHOD)\b',
291
+ 'results': r'(?i)\b(RESULTS|FINDINGS|3\.\s*RESULTS)\b',
292
+ 'discussion': r'(?i)\b(DISCUSSION|4\.\s*DISCUSSION)\b',
293
+ 'conclusion': r'(?i)\b(CONCLUSION|CONCLUSIONS|5\.\s*CONCLUSION)\b',
294
+ 'references': r'(?i)\b(REFERENCES|BIBLIOGRAPHY)\b'
295
+ }
296
+
297
+ # Find all section positions
298
+ section_positions = {}
299
+ for section_name, pattern in patterns.items():
300
+ match = re.search(pattern, full_text)
301
+ if match:
302
+ section_positions[section_name] = match.start()
303
+
304
+ # Sort sections by position
305
+ sorted_sections = sorted(section_positions.items(), key=lambda x: x[1])
306
+
307
+ # Extract text between sections
308
+ for i, (section_name, start_pos) in enumerate(sorted_sections):
309
+ # Get end position (start of next section, or end of text)
310
+ if i + 1 < len(sorted_sections):
311
+ end_pos = sorted_sections[i + 1][1]
312
+ else:
313
+ end_pos = len(full_text)
314
+
315
+ # Extract section text
316
+ section_text = full_text[start_pos:end_pos].strip()
317
+
318
+ # Remove section header from text
319
+ section_text = re.sub(patterns[section_name], '', section_text, count=1).strip()
320
+
321
+ # Limit length (first 3000 chars per section)
322
+ if len(section_text) > 3000:
323
+ section_text = section_text[:3000] + "... [truncated]"
324
+
325
+ sections[section_name] = section_text if section_text else None
326
+
327
+ # Extract title (usually first few lines)
328
+ title_match = re.search(r'^(.+?)(?:\n\n|\n[A-Z])', full_text, re.MULTILINE)
329
+ if title_match:
330
+ title = title_match.group(1).strip()
331
+ # Clean up title
332
+ title = re.sub(r'\s+', ' ', title)
333
+ if len(title) > 200:
334
+ title = title[:200]
335
+ sections['title'] = title
336
+
337
+ # Extract references (last section, list of citations)
338
+ if 'references' in sections and sections['references']:
339
+ ref_text = sections['references']
340
+ # Split by newlines and filter
341
+ refs = [line.strip() for line in ref_text.split('\n') if line.strip()]
342
+ # Keep only lines that look like citations (have year and authors)
343
+ citations = [ref for ref in refs if re.search(r'\b(19|20)\d{2}\b', ref)]
344
+ sections['references'] = citations[:20] # First 20 refs
345
+
346
+ return sections
347
+
348
+
349
+ # Global instance
350
+ pdf_extractor = PDFExtractor()
@@ -0,0 +1,266 @@
1
+ """
2
+ Proactive Action Boundaries
3
+
4
+ Defines what actions the agent can do automatically vs what needs explicit permission
5
+
6
+ PHILOSOPHY: Be proactive with READ operations, cautious with WRITE operations
7
+ """
8
+
9
+ from typing import Dict, List, Set
10
+ import re
11
+
12
+
13
+ class ProactiveBoundaries:
14
+ """
15
+ Defines safe boundaries for proactive agent behavior
16
+
17
+ SAFE TO AUTO-DO (read-only, informational):
18
+ - These enhance user experience without risk
19
+
20
+ NEEDS PERMISSION (write/destructive):
21
+ - These could cause problems if done incorrectly
22
+ """
23
+
24
+ # Commands/actions that are SAFE to do proactively
25
+ SAFE_AUTO_ACTIONS: Set[str] = {
26
+ # File operations (read-only)
27
+ 'list_files',
28
+ 'read_file',
29
+ 'preview_file',
30
+ 'search_in_files',
31
+ 'find_files',
32
+ 'show_file_info',
33
+ 'cat',
34
+ 'head',
35
+ 'tail',
36
+ 'less',
37
+ 'grep',
38
+ 'find',
39
+
40
+ # Directory operations (read-only)
41
+ 'list_directory',
42
+ 'show_directory_tree',
43
+ 'navigate_directory', # cd is safe
44
+ 'pwd',
45
+ 'ls',
46
+ 'tree',
47
+
48
+ # Code analysis (read-only)
49
+ 'explain_code',
50
+ 'show_functions',
51
+ 'analyze_structure',
52
+ 'find_definitions',
53
+
54
+ # Data operations (read-only)
55
+ 'query_api',
56
+ 'fetch_data',
57
+ 'show_stats',
58
+ 'search_papers',
59
+ 'get_financial_data',
60
+
61
+ # Git operations (read-only)
62
+ 'git_status',
63
+ 'git_log',
64
+ 'git_diff',
65
+ 'git_show',
66
+ 'git_blame',
67
+
68
+ # System info (read-only)
69
+ 'show_env',
70
+ 'check_dependencies',
71
+ 'list_processes',
72
+ }
73
+
74
+ # Commands/actions that NEED EXPLICIT PERMISSION
75
+ NEEDS_PERMISSION: Set[str] = {
76
+ # File operations (write/destructive)
77
+ 'create_file',
78
+ 'delete_file',
79
+ 'modify_file',
80
+ 'move_file',
81
+ 'rename_file',
82
+ 'chmod',
83
+ 'chown',
84
+ 'touch',
85
+ 'mkdir',
86
+ 'rmdir',
87
+ 'rm',
88
+ 'mv',
89
+ 'cp', # Can overwrite
90
+
91
+ # Code execution (potentially dangerous)
92
+ 'run_script',
93
+ 'execute_code',
94
+ 'eval',
95
+ 'exec',
96
+
97
+ # Package management
98
+ 'install_package',
99
+ 'uninstall_package',
100
+ 'update_packages',
101
+ 'pip',
102
+ 'npm',
103
+ 'apt',
104
+ 'brew',
105
+
106
+ # Git operations (write)
107
+ 'git_add',
108
+ 'git_commit',
109
+ 'git_push',
110
+ 'git_pull',
111
+ 'git_merge',
112
+ 'git_rebase',
113
+ 'git_reset',
114
+
115
+ # Network operations (write/external)
116
+ 'send_request',
117
+ 'post_data',
118
+ 'upload_file',
119
+ 'download_file',
120
+
121
+ # System operations
122
+ 'change_settings',
123
+ 'modify_config',
124
+ 'kill_process',
125
+ 'start_service',
126
+ 'stop_service',
127
+ }
128
+
129
+ @classmethod
130
+ def is_safe_to_auto_do(cls, action: str) -> bool:
131
+ """
132
+ Check if action is safe to do automatically
133
+
134
+ Returns:
135
+ True if safe to do proactively
136
+ False if needs explicit user permission
137
+ """
138
+ action_lower = action.lower()
139
+
140
+ # Check exact matches
141
+ if action_lower in cls.SAFE_AUTO_ACTIONS:
142
+ return True
143
+
144
+ if action_lower in cls.NEEDS_PERMISSION:
145
+ return False
146
+
147
+ # Check patterns
148
+ # Safe patterns
149
+ safe_patterns = [
150
+ r'^(ls|pwd|cd|find|grep|cat|head|tail|less)',
151
+ r'^git\s+(status|log|diff|show|blame)',
152
+ r'search|find|list|show|display|preview|read',
153
+ ]
154
+
155
+ for pattern in safe_patterns:
156
+ if re.search(pattern, action_lower):
157
+ return True
158
+
159
+ # Dangerous patterns
160
+ dangerous_patterns = [
161
+ r'^(rm|mv|cp|touch|mkdir|chmod)',
162
+ r'^git\s+(add|commit|push|pull|merge|rebase|reset)',
163
+ r'(delete|remove|modify|edit|create|install|update)',
164
+ r'^(pip|npm|apt|brew)',
165
+ ]
166
+
167
+ for pattern in dangerous_patterns:
168
+ if re.search(pattern, action_lower):
169
+ return False
170
+
171
+ # Default: be conservative - if unsure, ask permission
172
+ return False
173
+
174
+ @classmethod
175
+ def get_auto_expansion_for_query(cls, query: str, initial_result: str) -> Dict[str, any]:
176
+ """
177
+ Determine what automatic expansion to do based on query and initial result
178
+
179
+ Returns dict with:
180
+ - should_expand: bool
181
+ - expansion_actions: List[str] - actions to take automatically
182
+ - reason: str - why expanding
183
+ """
184
+ query_lower = query.lower()
185
+ result_lower = initial_result.lower()
186
+
187
+ expansions = {
188
+ 'should_expand': False,
189
+ 'expansion_actions': [],
190
+ 'reason': ''
191
+ }
192
+
193
+ # Pattern 1: Listed files → preview main one
194
+ if any(word in query_lower for word in ['list', 'show', 'find']) and \
195
+ any(word in query_lower for word in ['file', 'files', 'py', 'js']):
196
+
197
+ # Check if result is just a list (short, has bullets/lines, BUT no code/details)
198
+ has_code_block = '```' in initial_result
199
+ has_detailed_descriptions = ' - ' in initial_result or ': ' in result_lower
200
+ is_short_list = len(initial_result) < 300 and ('•' in initial_result or '\n' in initial_result)
201
+
202
+ if is_short_list and not has_code_block and not has_detailed_descriptions:
203
+ expansions['should_expand'] = True
204
+ expansions['expansion_actions'] = ['preview_main_file']
205
+ expansions['reason'] = 'Listed files but no content shown - auto-preview main file'
206
+
207
+ # Pattern 2: Found papers → show abstracts
208
+ if 'paper' in query_lower and 'found' in result_lower:
209
+ if 'abstract' not in result_lower:
210
+ expansions['should_expand'] = True
211
+ expansions['expansion_actions'] = ['show_paper_abstracts']
212
+ expansions['reason'] = 'Found papers but no abstracts - auto-show summaries'
213
+
214
+ # Pattern 3: Code query → show examples
215
+ if any(word in query_lower for word in ['function', 'class', 'code']) and \
216
+ 'how' in query_lower:
217
+ if '```' not in initial_result and len(initial_result) < 200:
218
+ expansions['should_expand'] = True
219
+ expansions['expansion_actions'] = ['show_code_examples']
220
+ expansions['reason'] = 'Code explanation without examples - auto-show code'
221
+
222
+ # Pattern 4: Data query → show sample/visualization
223
+ if any(word in query_lower for word in ['data', 'revenue', 'metrics', 'stats']):
224
+ if len(initial_result) < 150: # Just a number, not detailed
225
+ expansions['should_expand'] = True
226
+ expansions['expansion_actions'] = ['show_data_breakdown']
227
+ expansions['reason'] = 'Data query with minimal detail - auto-show breakdown'
228
+
229
+ return expansions
230
+
231
+ @classmethod
232
+ def validate_proactive_action(cls, action: str, context: Dict) -> Dict[str, any]:
233
+ """
234
+ Validate if a proactive action should be allowed
235
+
236
+ Returns:
237
+ - allowed: bool
238
+ - reason: str
239
+ - requires_confirmation: bool
240
+ """
241
+ is_safe = cls.is_safe_to_auto_do(action)
242
+
243
+ if is_safe:
244
+ return {
245
+ 'allowed': True,
246
+ 'reason': 'Safe read-only operation',
247
+ 'requires_confirmation': False
248
+ }
249
+ else:
250
+ return {
251
+ 'allowed': False,
252
+ 'reason': 'Write/destructive operation requires explicit permission',
253
+ 'requires_confirmation': True
254
+ }
255
+
256
+
257
+ # Convenience functions
258
+ def is_safe_to_auto_do(action: str) -> bool:
259
+ """Quick check if action is safe to do automatically"""
260
+ return ProactiveBoundaries.is_safe_to_auto_do(action)
261
+
262
+
263
+ def should_auto_expand(query: str, result: str) -> bool:
264
+ """Quick check if result should be automatically expanded"""
265
+ expansion = ProactiveBoundaries.get_auto_expansion_for_query(query, result)
266
+ return expansion['should_expand']