arionxiv 1.0.32__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- arionxiv/__init__.py +40 -0
- arionxiv/__main__.py +10 -0
- arionxiv/arxiv_operations/__init__.py +0 -0
- arionxiv/arxiv_operations/client.py +225 -0
- arionxiv/arxiv_operations/fetcher.py +173 -0
- arionxiv/arxiv_operations/searcher.py +122 -0
- arionxiv/arxiv_operations/utils.py +293 -0
- arionxiv/cli/__init__.py +4 -0
- arionxiv/cli/commands/__init__.py +1 -0
- arionxiv/cli/commands/analyze.py +587 -0
- arionxiv/cli/commands/auth.py +365 -0
- arionxiv/cli/commands/chat.py +714 -0
- arionxiv/cli/commands/daily.py +482 -0
- arionxiv/cli/commands/fetch.py +217 -0
- arionxiv/cli/commands/library.py +295 -0
- arionxiv/cli/commands/preferences.py +426 -0
- arionxiv/cli/commands/search.py +254 -0
- arionxiv/cli/commands/settings_unified.py +1407 -0
- arionxiv/cli/commands/trending.py +41 -0
- arionxiv/cli/commands/welcome.py +168 -0
- arionxiv/cli/main.py +407 -0
- arionxiv/cli/ui/__init__.py +1 -0
- arionxiv/cli/ui/global_theme_manager.py +173 -0
- arionxiv/cli/ui/logo.py +127 -0
- arionxiv/cli/ui/splash.py +89 -0
- arionxiv/cli/ui/theme.py +32 -0
- arionxiv/cli/ui/theme_system.py +391 -0
- arionxiv/cli/utils/__init__.py +54 -0
- arionxiv/cli/utils/animations.py +522 -0
- arionxiv/cli/utils/api_client.py +583 -0
- arionxiv/cli/utils/api_config.py +505 -0
- arionxiv/cli/utils/command_suggestions.py +147 -0
- arionxiv/cli/utils/db_config_manager.py +254 -0
- arionxiv/github_actions_runner.py +206 -0
- arionxiv/main.py +23 -0
- arionxiv/prompts/__init__.py +9 -0
- arionxiv/prompts/prompts.py +247 -0
- arionxiv/rag_techniques/__init__.py +8 -0
- arionxiv/rag_techniques/basic_rag.py +1531 -0
- arionxiv/scheduler_daemon.py +139 -0
- arionxiv/server.py +1000 -0
- arionxiv/server_main.py +24 -0
- arionxiv/services/__init__.py +73 -0
- arionxiv/services/llm_client.py +30 -0
- arionxiv/services/llm_inference/__init__.py +58 -0
- arionxiv/services/llm_inference/groq_client.py +469 -0
- arionxiv/services/llm_inference/llm_utils.py +250 -0
- arionxiv/services/llm_inference/openrouter_client.py +564 -0
- arionxiv/services/unified_analysis_service.py +872 -0
- arionxiv/services/unified_auth_service.py +457 -0
- arionxiv/services/unified_config_service.py +456 -0
- arionxiv/services/unified_daily_dose_service.py +823 -0
- arionxiv/services/unified_database_service.py +1633 -0
- arionxiv/services/unified_llm_service.py +366 -0
- arionxiv/services/unified_paper_service.py +604 -0
- arionxiv/services/unified_pdf_service.py +522 -0
- arionxiv/services/unified_prompt_service.py +344 -0
- arionxiv/services/unified_scheduler_service.py +589 -0
- arionxiv/services/unified_user_service.py +954 -0
- arionxiv/utils/__init__.py +51 -0
- arionxiv/utils/api_helpers.py +200 -0
- arionxiv/utils/file_cleanup.py +150 -0
- arionxiv/utils/ip_helper.py +96 -0
- arionxiv-1.0.32.dist-info/METADATA +336 -0
- arionxiv-1.0.32.dist-info/RECORD +69 -0
- arionxiv-1.0.32.dist-info/WHEEL +5 -0
- arionxiv-1.0.32.dist-info/entry_points.txt +4 -0
- arionxiv-1.0.32.dist-info/licenses/LICENSE +21 -0
- arionxiv-1.0.32.dist-info/top_level.txt +1 -0
|
@@ -0,0 +1,587 @@
|
|
|
1
|
+
"""Analyze command for ArionXiv CLI"""
|
|
2
|
+
|
|
3
|
+
import sys
|
|
4
|
+
import asyncio
|
|
5
|
+
import warnings
|
|
6
|
+
import logging
|
|
7
|
+
from pathlib import Path
|
|
8
|
+
from datetime import datetime
|
|
9
|
+
|
|
10
|
+
warnings.filterwarnings("ignore", category=FutureWarning)
|
|
11
|
+
warnings.filterwarnings("ignore", category=DeprecationWarning)
|
|
12
|
+
|
|
13
|
+
backend_path = Path(__file__).parent.parent.parent
|
|
14
|
+
sys.path.insert(0, str(backend_path))
|
|
15
|
+
|
|
16
|
+
import click
|
|
17
|
+
from rich.console import Console
|
|
18
|
+
from rich.progress import Progress, SpinnerColumn, TextColumn
|
|
19
|
+
from rich.panel import Panel
|
|
20
|
+
from rich.columns import Columns
|
|
21
|
+
from rich.text import Text
|
|
22
|
+
from rich.markdown import Markdown
|
|
23
|
+
from typing import Optional
|
|
24
|
+
|
|
25
|
+
from ...arxiv_operations.client import arxiv_client
|
|
26
|
+
from ...arxiv_operations.fetcher import arxiv_fetcher
|
|
27
|
+
from ...arxiv_operations.utils import ArxivUtils
|
|
28
|
+
from ...services.unified_pdf_service import pdf_processor
|
|
29
|
+
from ...services.unified_analysis_service import unified_analysis_service
|
|
30
|
+
from ..utils.api_client import api_client, APIClientError
|
|
31
|
+
from ..ui.theme import create_themed_console, print_header, style_text, print_success, print_error, print_warning, get_theme_colors
|
|
32
|
+
from ..utils.command_suggestions import show_command_suggestions
|
|
33
|
+
from ..utils.animations import left_to_right_reveal
|
|
34
|
+
|
|
35
|
+
console = create_themed_console()
|
|
36
|
+
logger = logging.getLogger(__name__)
|
|
37
|
+
|
|
38
|
+
# Maximum papers a user can save
|
|
39
|
+
MAX_USER_PAPERS = 10
|
|
40
|
+
|
|
41
|
+
@click.command()
|
|
42
|
+
@click.argument('query') # Changed from paper_id to query
|
|
43
|
+
@click.option('--analysis-type', '-t', type=click.Choice(['summary', 'detailed', 'technical', 'insights']),
|
|
44
|
+
default='summary', help='Type of analysis to perform')
|
|
45
|
+
@click.option('--save-results', '-s', is_flag=True, help='Save analysis results to file')
|
|
46
|
+
@click.option('--use-local', '-l', is_flag=True, help='Use local PDF if available')
|
|
47
|
+
def analyze_command(query: str, analysis_type: str, save_results: bool, use_local: bool):
|
|
48
|
+
"""
|
|
49
|
+
Analyze a research paper with AI
|
|
50
|
+
|
|
51
|
+
You can provide either:
|
|
52
|
+
- arXiv ID (e.g., 2301.07041)
|
|
53
|
+
- Paper title or keywords (e.g., "attention is all you need")
|
|
54
|
+
|
|
55
|
+
Examples:
|
|
56
|
+
\b
|
|
57
|
+
arionxiv analyze "attention is all you need"
|
|
58
|
+
arionxiv analyze 2301.07041 --analysis-type detailed
|
|
59
|
+
arionxiv analyze "transformer architecture" --save-results
|
|
60
|
+
arionxiv analyze "neural machine translation" --analysis-type insights
|
|
61
|
+
"""
|
|
62
|
+
# Run with proper session cleanup
|
|
63
|
+
async def run_analysis():
|
|
64
|
+
try:
|
|
65
|
+
await _analyze_paper(query, analysis_type, save_results, use_local)
|
|
66
|
+
finally:
|
|
67
|
+
# Clean up any remaining async sessions
|
|
68
|
+
try:
|
|
69
|
+
import gc
|
|
70
|
+
import asyncio
|
|
71
|
+
import aiohttp
|
|
72
|
+
|
|
73
|
+
# Close all unclosed sessions
|
|
74
|
+
for obj in gc.get_objects():
|
|
75
|
+
if isinstance(obj, aiohttp.ClientSession) and not obj.closed:
|
|
76
|
+
try:
|
|
77
|
+
await obj.close()
|
|
78
|
+
except:
|
|
79
|
+
pass
|
|
80
|
+
|
|
81
|
+
# Give aiohttp time to clean up connections
|
|
82
|
+
await asyncio.sleep(0.1)
|
|
83
|
+
gc.collect()
|
|
84
|
+
except:
|
|
85
|
+
pass # Ignore cleanup errors
|
|
86
|
+
|
|
87
|
+
try:
|
|
88
|
+
asyncio.run(run_analysis())
|
|
89
|
+
except RuntimeError as e:
|
|
90
|
+
if "cannot be called from a running event loop" in str(e):
|
|
91
|
+
# Handle already running event loop
|
|
92
|
+
import nest_asyncio
|
|
93
|
+
nest_asyncio.apply()
|
|
94
|
+
asyncio.run(run_analysis())
|
|
95
|
+
else:
|
|
96
|
+
console = create_themed_console()
|
|
97
|
+
colors = get_theme_colors()
|
|
98
|
+
left_to_right_reveal(console, f"Analysis error: {str(e)}", style=colors['error'])
|
|
99
|
+
except Exception as e:
|
|
100
|
+
console = create_themed_console()
|
|
101
|
+
colors = get_theme_colors()
|
|
102
|
+
left_to_right_reveal(console, f"Analysis error: {str(e)}", style=colors['error'])
|
|
103
|
+
|
|
104
|
+
async def _analyze_paper(query: str, analysis_type: str, save_results: bool, use_local: bool):
|
|
105
|
+
"""Execute the paper analysis - handles both arXiv IDs and natural language queries"""
|
|
106
|
+
|
|
107
|
+
logger.info(f"Starting analysis: query='{query}', type={analysis_type}, save={save_results}")
|
|
108
|
+
|
|
109
|
+
# Get theme colors for consistent styling
|
|
110
|
+
from ..ui.theme import get_theme_colors
|
|
111
|
+
colors = get_theme_colors()
|
|
112
|
+
|
|
113
|
+
# Determine if query is an arXiv ID or search term
|
|
114
|
+
import re
|
|
115
|
+
arxiv_id_pattern = r'^\d{4}\.\d{4,5}(v\d+)?$'
|
|
116
|
+
|
|
117
|
+
if re.match(arxiv_id_pattern, query):
|
|
118
|
+
logger.debug(f"Query recognized as arXiv ID: {query}")
|
|
119
|
+
# Direct arXiv ID provided
|
|
120
|
+
clean_paper_id = ArxivUtils.normalize_arxiv_id(query)
|
|
121
|
+
paper_metadata = None
|
|
122
|
+
else:
|
|
123
|
+
# Search query provided - find the most relevant paper
|
|
124
|
+
left_to_right_reveal(console, f"Searching for papers matching: '{query}'...", style="white")
|
|
125
|
+
search_results = arxiv_client.search_papers(query, max_results=5)
|
|
126
|
+
|
|
127
|
+
if not search_results:
|
|
128
|
+
left_to_right_reveal(console, f"No papers found matching '{query}'. Please try a different search term.", style=colors['error'])
|
|
129
|
+
return
|
|
130
|
+
|
|
131
|
+
# Show search results and let user choose (for now, auto-select first result)
|
|
132
|
+
papers = search_results # search_results is already a list of papers
|
|
133
|
+
selected_paper = papers[0] # Auto-select most relevant
|
|
134
|
+
|
|
135
|
+
left_to_right_reveal(console, f"Found paper: {selected_paper.get('title', 'Unknown')}", style=colors['primary'])
|
|
136
|
+
|
|
137
|
+
clean_paper_id = ArxivUtils.normalize_arxiv_id(selected_paper.get('arxiv_id', ''))
|
|
138
|
+
paper_metadata = selected_paper
|
|
139
|
+
|
|
140
|
+
with Progress(
|
|
141
|
+
SpinnerColumn(),
|
|
142
|
+
TextColumn("[progress.description]{task.description}"),
|
|
143
|
+
console=console
|
|
144
|
+
) as progress:
|
|
145
|
+
|
|
146
|
+
try:
|
|
147
|
+
# Step 1: Get paper metadata (if not already retrieved from search)
|
|
148
|
+
metadata_task = progress.add_task("Fetching paper metadata...", total=None)
|
|
149
|
+
|
|
150
|
+
if paper_metadata is None:
|
|
151
|
+
paper_metadata = arxiv_client.get_paper_by_id(clean_paper_id)
|
|
152
|
+
if not paper_metadata:
|
|
153
|
+
progress.remove_task(metadata_task)
|
|
154
|
+
left_to_right_reveal(console, f"Paper not found: {clean_paper_id}", style=colors['error'])
|
|
155
|
+
return
|
|
156
|
+
|
|
157
|
+
# Ensure arxiv_id is set correctly in paper_metadata (for later save)
|
|
158
|
+
paper_metadata['arxiv_id'] = clean_paper_id
|
|
159
|
+
|
|
160
|
+
progress.update(metadata_task, description="Metadata fetched")
|
|
161
|
+
progress.remove_task(metadata_task)
|
|
162
|
+
|
|
163
|
+
# Step 2: Get paper content
|
|
164
|
+
content_task = progress.add_task("Preparing paper content...", total=None)
|
|
165
|
+
|
|
166
|
+
paper_text = None
|
|
167
|
+
pdf_path = None
|
|
168
|
+
text_file = None
|
|
169
|
+
|
|
170
|
+
# Try to use local file first if requested
|
|
171
|
+
if use_local:
|
|
172
|
+
downloads_dir = Path(backend_path.parent) / "downloads"
|
|
173
|
+
local_files = list(downloads_dir.glob(f"{clean_paper_id}*.txt"))
|
|
174
|
+
if local_files:
|
|
175
|
+
text_file = local_files[0]
|
|
176
|
+
with open(text_file, 'r', encoding='utf-8') as f:
|
|
177
|
+
paper_text = f.read()
|
|
178
|
+
progress.update(content_task, description="Using local text file")
|
|
179
|
+
else:
|
|
180
|
+
left_to_right_reveal(console, "No local text file found, downloading PDF...", style=colors['warning'])
|
|
181
|
+
|
|
182
|
+
# If no local text, download and extract
|
|
183
|
+
if not paper_text:
|
|
184
|
+
# Download PDF
|
|
185
|
+
pdf_url = f"https://arxiv.org/pdf/{clean_paper_id}.pdf"
|
|
186
|
+
download_result = await arxiv_fetcher.fetch_paper_pdf(clean_paper_id, pdf_url)
|
|
187
|
+
|
|
188
|
+
if not download_result:
|
|
189
|
+
progress.remove_task(content_task)
|
|
190
|
+
left_to_right_reveal(console, "Failed to download paper: PDF download failed", style=colors['error'])
|
|
191
|
+
return
|
|
192
|
+
|
|
193
|
+
# Extract text
|
|
194
|
+
pdf_path = download_result
|
|
195
|
+
paper_text = await pdf_processor.extract_text(pdf_path)
|
|
196
|
+
|
|
197
|
+
if not paper_text:
|
|
198
|
+
progress.remove_task(content_task)
|
|
199
|
+
left_to_right_reveal(console, "Failed to extract text from PDF", style=colors['error'])
|
|
200
|
+
return
|
|
201
|
+
|
|
202
|
+
# Save extracted text
|
|
203
|
+
text_file = Path(pdf_path).with_suffix('.txt')
|
|
204
|
+
with open(text_file, 'w', encoding='utf-8') as f:
|
|
205
|
+
f.write(paper_text)
|
|
206
|
+
|
|
207
|
+
progress.update(content_task, description="Content prepared")
|
|
208
|
+
progress.remove_task(content_task)
|
|
209
|
+
|
|
210
|
+
# Step 3: Perform AI analysis
|
|
211
|
+
analysis_task = progress.add_task(f"Performing {analysis_type} analysis...", total=None)
|
|
212
|
+
|
|
213
|
+
# Build analysis prompt based on type
|
|
214
|
+
analysis_prompt = _build_analysis_prompt(analysis_type, paper_metadata, paper_text)
|
|
215
|
+
|
|
216
|
+
# Get AI analysis using LLM client directly for better results
|
|
217
|
+
from ...services.llm_client import llm_client
|
|
218
|
+
|
|
219
|
+
# Create comprehensive content for analysis
|
|
220
|
+
paper_content = f"""
|
|
221
|
+
Title: {paper_metadata.get('title', 'Unknown Title')}
|
|
222
|
+
|
|
223
|
+
Authors: {', '.join(paper_metadata.get('authors', []))}
|
|
224
|
+
|
|
225
|
+
Categories: {', '.join(paper_metadata.get('categories', []))}
|
|
226
|
+
|
|
227
|
+
Published: {paper_metadata.get('published', 'Unknown')}
|
|
228
|
+
|
|
229
|
+
Full Paper Content:
|
|
230
|
+
{paper_text}
|
|
231
|
+
"""
|
|
232
|
+
|
|
233
|
+
analysis_result = await llm_client.analyze_paper(paper_content)
|
|
234
|
+
|
|
235
|
+
progress.update(analysis_task, description="Analysis complete")
|
|
236
|
+
progress.remove_task(analysis_task)
|
|
237
|
+
|
|
238
|
+
if not analysis_result:
|
|
239
|
+
left_to_right_reveal(console, "Analysis failed: No result returned", style=colors['error'])
|
|
240
|
+
return
|
|
241
|
+
|
|
242
|
+
except Exception as e:
|
|
243
|
+
progress.stop()
|
|
244
|
+
from rich.panel import Panel
|
|
245
|
+
error_message = str(e)
|
|
246
|
+
error_panel = Panel(
|
|
247
|
+
f"[bold {colors['error']}]Error:[/bold {colors['error']}] {error_message}\n\n"
|
|
248
|
+
f"The analysis could not be completed due to the above issue.\n"
|
|
249
|
+
f"Please address the error and try again.",
|
|
250
|
+
title="Analysis Failed",
|
|
251
|
+
border_style=f"bold {colors['error']}"
|
|
252
|
+
)
|
|
253
|
+
console.print(error_panel)
|
|
254
|
+
return
|
|
255
|
+
|
|
256
|
+
# Display results
|
|
257
|
+
_display_analysis_results(paper_metadata, analysis_result, analysis_type)
|
|
258
|
+
|
|
259
|
+
# Save results if requested
|
|
260
|
+
if save_results:
|
|
261
|
+
_save_analysis_results(clean_paper_id, analysis_result, analysis_type)
|
|
262
|
+
left_to_right_reveal(console, f"\nAnalysis results saved!", style=colors['primary'])
|
|
263
|
+
|
|
264
|
+
# Offer to save paper to library (similar to chat flow)
|
|
265
|
+
await _offer_save_paper_to_library(console, colors, paper_metadata)
|
|
266
|
+
|
|
267
|
+
# Show command suggestions after save prompt
|
|
268
|
+
show_command_suggestions(console, context='analyze')
|
|
269
|
+
|
|
270
|
+
# Cleanup downloaded files after analysis
|
|
271
|
+
try:
|
|
272
|
+
from ...utils.file_cleanup import file_cleanup_manager
|
|
273
|
+
left_to_right_reveal(console, "\nCleaning up downloaded files...", style=colors['primary'])
|
|
274
|
+
if file_cleanup_manager.cleanup_paper_files(clean_paper_id):
|
|
275
|
+
left_to_right_reveal(console, "Files cleaned up successfully", style=colors['primary'])
|
|
276
|
+
except Exception as cleanup_error:
|
|
277
|
+
left_to_right_reveal(console, f"File cleanup warning: {cleanup_error}", style=colors['warning'])
|
|
278
|
+
|
|
279
|
+
def _build_analysis_prompt(analysis_type: str, metadata: dict, paper_text: str) -> str:
|
|
280
|
+
"""Build analysis prompt based on type"""
|
|
281
|
+
from ...prompts import format_prompt
|
|
282
|
+
|
|
283
|
+
title = metadata.get('title', 'Unknown')
|
|
284
|
+
authors = ', '.join(metadata.get('authors', []))
|
|
285
|
+
categories = ', '.join(metadata.get('categories', []))
|
|
286
|
+
published = metadata.get('published', 'Unknown')
|
|
287
|
+
|
|
288
|
+
if analysis_type == "summary":
|
|
289
|
+
return format_prompt("summary_analysis",
|
|
290
|
+
title=title,
|
|
291
|
+
abstract=metadata.get('abstract', ''),
|
|
292
|
+
content=paper_text)
|
|
293
|
+
|
|
294
|
+
elif analysis_type == "detailed":
|
|
295
|
+
return format_prompt("detailed_analysis",
|
|
296
|
+
title=title,
|
|
297
|
+
authors=authors,
|
|
298
|
+
categories=categories,
|
|
299
|
+
published=published)
|
|
300
|
+
|
|
301
|
+
elif analysis_type == "technical":
|
|
302
|
+
return format_prompt("technical_analysis",
|
|
303
|
+
title=title,
|
|
304
|
+
authors=authors,
|
|
305
|
+
categories=categories,
|
|
306
|
+
published=published)
|
|
307
|
+
|
|
308
|
+
elif analysis_type == "insights":
|
|
309
|
+
return format_prompt("insights_analysis",
|
|
310
|
+
title=title,
|
|
311
|
+
authors=authors,
|
|
312
|
+
categories=categories,
|
|
313
|
+
published=published)
|
|
314
|
+
|
|
315
|
+
return f"Paper Title: {title}\nAuthors: {authors}\nCategories: {categories}\nPublished: {published}"
|
|
316
|
+
|
|
317
|
+
def _display_animated_panel(console: Console, content: str, title: str, colors: dict):
|
|
318
|
+
"""Display a panel with left-to-right animated text"""
|
|
319
|
+
# Use left_to_right animation for the content
|
|
320
|
+
left_to_right_reveal(console, content, style=colors['primary'])
|
|
321
|
+
# Then display in a panel
|
|
322
|
+
console.print(Panel(content, border_style=f"bold {colors['primary']}", title=title))
|
|
323
|
+
|
|
324
|
+
def _display_analysis_results(metadata: dict, analysis_result: dict, analysis_type: str):
|
|
325
|
+
"""Display the analysis results with proper theming"""
|
|
326
|
+
from ..ui.theme import create_themed_console, get_theme_colors, create_themed_panel
|
|
327
|
+
from ..ui.logo import display_header
|
|
328
|
+
|
|
329
|
+
console = create_themed_console()
|
|
330
|
+
colors = get_theme_colors()
|
|
331
|
+
|
|
332
|
+
# Display header with logo
|
|
333
|
+
display_header(console)
|
|
334
|
+
|
|
335
|
+
# Analysis type header with animation
|
|
336
|
+
header_text = f"\nAI Analysis Results ({analysis_type.title()})"
|
|
337
|
+
left_to_right_reveal(console, header_text, style=f"bold {colors['primary']}")
|
|
338
|
+
|
|
339
|
+
# Paper details in a box
|
|
340
|
+
title = metadata.get("title", "Unknown Title")
|
|
341
|
+
arxiv_id = metadata.get('arxiv_id', 'Unknown')
|
|
342
|
+
authors = metadata.get('authors', [])
|
|
343
|
+
authors_str = ', '.join(authors[:3]) + (f" +{len(authors)-3} more" if len(authors) > 3 else "") if authors else "Unknown"
|
|
344
|
+
categories = metadata.get('categories', [])
|
|
345
|
+
categories_str = ', '.join(categories) if categories else "Unknown"
|
|
346
|
+
published = metadata.get('published', 'Unknown')
|
|
347
|
+
if published and len(published) > 10:
|
|
348
|
+
published = published[:10]
|
|
349
|
+
|
|
350
|
+
paper_info = f"""[bold]Title:[/bold] {title}
|
|
351
|
+
|
|
352
|
+
[bold]arXiv ID:[/bold] {arxiv_id}
|
|
353
|
+
|
|
354
|
+
[bold]Authors:[/bold] {authors_str}
|
|
355
|
+
|
|
356
|
+
[bold]Categories:[/bold] {categories_str}
|
|
357
|
+
|
|
358
|
+
[bold]Published:[/bold] {published}"""
|
|
359
|
+
|
|
360
|
+
console.print(Panel(paper_info, border_style=f"bold {colors['primary']}", title=f"[bold {colors['primary']}]Paper Details[/bold {colors['primary']}]"))
|
|
361
|
+
console.print()
|
|
362
|
+
|
|
363
|
+
# Analysis content - handle both analysis service and direct LLM results
|
|
364
|
+
if isinstance(analysis_result.get("analysis"), dict):
|
|
365
|
+
# Handle structured analysis result from analysis service
|
|
366
|
+
analysis_data = analysis_result["analysis"]
|
|
367
|
+
|
|
368
|
+
# Display summary
|
|
369
|
+
if "summary" in analysis_result:
|
|
370
|
+
content = analysis_result["summary"]
|
|
371
|
+
left_to_right_reveal(console, "Summary", style=f"bold {colors['primary']}")
|
|
372
|
+
left_to_right_reveal(console, content, style=colors['primary'])
|
|
373
|
+
console.print()
|
|
374
|
+
|
|
375
|
+
# Display key findings
|
|
376
|
+
if "key_ideas" in analysis_data:
|
|
377
|
+
key_ideas = analysis_data["key_ideas"]
|
|
378
|
+
if isinstance(key_ideas, list):
|
|
379
|
+
ideas_text = "\n\n".join([f"• {idea}" for idea in key_ideas])
|
|
380
|
+
else:
|
|
381
|
+
ideas_text = str(key_ideas)
|
|
382
|
+
left_to_right_reveal(console, "Key Ideas", style=f"bold {colors['primary']}")
|
|
383
|
+
left_to_right_reveal(console, ideas_text, style=colors['primary'])
|
|
384
|
+
console.print()
|
|
385
|
+
|
|
386
|
+
# Display technical approach if available
|
|
387
|
+
if "technical_approach" in analysis_data:
|
|
388
|
+
tech = analysis_data["technical_approach"]
|
|
389
|
+
if isinstance(tech, dict) and "methodology" in tech:
|
|
390
|
+
content = tech["methodology"]
|
|
391
|
+
left_to_right_reveal(console, "Methodology", style=f"bold {colors['primary']}")
|
|
392
|
+
left_to_right_reveal(console, content, style=colors['primary'])
|
|
393
|
+
console.print()
|
|
394
|
+
|
|
395
|
+
# Display significance
|
|
396
|
+
if "significance_impact" in analysis_data:
|
|
397
|
+
sig = analysis_data["significance_impact"]
|
|
398
|
+
if isinstance(sig, dict) and "field_impact" in sig:
|
|
399
|
+
content = sig["field_impact"]
|
|
400
|
+
left_to_right_reveal(console, "Impact", style=f"bold {colors['primary']}")
|
|
401
|
+
left_to_right_reveal(console, content, style=colors['primary'])
|
|
402
|
+
console.print()
|
|
403
|
+
|
|
404
|
+
elif "summary" in analysis_result:
|
|
405
|
+
# Handle direct LLM analysis results with animated reveal
|
|
406
|
+
summary_content = analysis_result["summary"]
|
|
407
|
+
left_to_right_reveal(console, "── Summary ──", style=f"bold {colors['primary']}")
|
|
408
|
+
left_to_right_reveal(console, summary_content, style="white")
|
|
409
|
+
console.print()
|
|
410
|
+
|
|
411
|
+
if "key_findings" in analysis_result and analysis_result["key_findings"]:
|
|
412
|
+
findings = analysis_result["key_findings"]
|
|
413
|
+
if isinstance(findings, list) and findings:
|
|
414
|
+
findings_text = "\n".join([f"• {finding}" for finding in findings if finding and finding.strip()])
|
|
415
|
+
if findings_text.strip():
|
|
416
|
+
left_to_right_reveal(console, "── Key Findings ──", style=f"bold {colors['primary']}")
|
|
417
|
+
left_to_right_reveal(console, findings_text, style="white")
|
|
418
|
+
console.print()
|
|
419
|
+
elif isinstance(findings, str) and findings.strip():
|
|
420
|
+
left_to_right_reveal(console, "── Key Findings ──", style=f"bold {colors['primary']}")
|
|
421
|
+
left_to_right_reveal(console, findings, style="white")
|
|
422
|
+
console.print()
|
|
423
|
+
|
|
424
|
+
if "methodology" in analysis_result and analysis_result["methodology"] and analysis_result["methodology"].strip():
|
|
425
|
+
methodology_content = analysis_result["methodology"]
|
|
426
|
+
left_to_right_reveal(console, "── Methodology ──", style=f"bold {colors['primary']}")
|
|
427
|
+
left_to_right_reveal(console, methodology_content, style="white")
|
|
428
|
+
console.print()
|
|
429
|
+
|
|
430
|
+
# Add new comprehensive fields with animated reveal
|
|
431
|
+
if "technical_details" in analysis_result and analysis_result["technical_details"] and analysis_result["technical_details"].strip():
|
|
432
|
+
technical_content = analysis_result["technical_details"]
|
|
433
|
+
left_to_right_reveal(console, "── Technical Details ──", style=f"bold {colors['primary']}")
|
|
434
|
+
left_to_right_reveal(console, technical_content, style="white")
|
|
435
|
+
console.print()
|
|
436
|
+
|
|
437
|
+
if "broader_impact" in analysis_result and analysis_result["broader_impact"] and analysis_result["broader_impact"].strip():
|
|
438
|
+
impact_content = analysis_result["broader_impact"]
|
|
439
|
+
left_to_right_reveal(console, "── Broader Impact & Future Directions ──", style=f"bold {colors['primary']}")
|
|
440
|
+
left_to_right_reveal(console, impact_content, style="white")
|
|
441
|
+
console.print()
|
|
442
|
+
|
|
443
|
+
if "strengths" in analysis_result and analysis_result["strengths"]:
|
|
444
|
+
strengths = analysis_result["strengths"]
|
|
445
|
+
if isinstance(strengths, list) and strengths:
|
|
446
|
+
strengths_text = "\n".join([f"• {strength}" for strength in strengths if strength and strength.strip()])
|
|
447
|
+
if strengths_text.strip():
|
|
448
|
+
left_to_right_reveal(console, "── Strengths ──", style=f"bold {colors['primary']}")
|
|
449
|
+
left_to_right_reveal(console, strengths_text, style="white")
|
|
450
|
+
console.print()
|
|
451
|
+
elif isinstance(strengths, str) and strengths.strip():
|
|
452
|
+
left_to_right_reveal(console, "── Strengths ──", style=f"bold {colors['primary']}")
|
|
453
|
+
left_to_right_reveal(console, strengths, style="white")
|
|
454
|
+
console.print()
|
|
455
|
+
|
|
456
|
+
if "limitations" in analysis_result and analysis_result["limitations"]:
|
|
457
|
+
limitations = analysis_result["limitations"]
|
|
458
|
+
if isinstance(limitations, list) and limitations:
|
|
459
|
+
limitations_text = "\n".join([f"• {limitation}" for limitation in limitations if limitation and limitation.strip()])
|
|
460
|
+
if limitations_text.strip():
|
|
461
|
+
left_to_right_reveal(console, "── Limitations ──", style=f"bold {colors['primary']}")
|
|
462
|
+
left_to_right_reveal(console, limitations_text, style="white")
|
|
463
|
+
console.print()
|
|
464
|
+
elif isinstance(limitations, str) and limitations.strip():
|
|
465
|
+
left_to_right_reveal(console, "── Limitations ──", style=f"bold {colors['primary']}")
|
|
466
|
+
left_to_right_reveal(console, limitations, style="white")
|
|
467
|
+
console.print()
|
|
468
|
+
|
|
469
|
+
else:
|
|
470
|
+
# Handle simple string analysis
|
|
471
|
+
analysis_content = analysis_result.get("analysis", "No analysis available")
|
|
472
|
+
if isinstance(analysis_content, str):
|
|
473
|
+
# Split analysis into sections if it contains numbered points
|
|
474
|
+
sections = analysis_content.split('\n\n')
|
|
475
|
+
|
|
476
|
+
for i, section in enumerate(sections):
|
|
477
|
+
if section.strip():
|
|
478
|
+
left_to_right_reveal(console, section.strip(), style="white")
|
|
479
|
+
console.print()
|
|
480
|
+
else:
|
|
481
|
+
content = str(analysis_content)
|
|
482
|
+
left_to_right_reveal(console, "── Analysis ──", style=f"bold {colors['primary']}")
|
|
483
|
+
left_to_right_reveal(console, content, style="white")
|
|
484
|
+
console.print()
|
|
485
|
+
|
|
486
|
+
|
|
487
|
+
async def _offer_save_paper_to_library(console: Console, colors: dict, paper_metadata: dict):
|
|
488
|
+
"""Offer to save paper to user's library after analysis via API"""
|
|
489
|
+
from rich.prompt import Prompt
|
|
490
|
+
from ...services.unified_user_service import unified_user_service
|
|
491
|
+
|
|
492
|
+
# Check auth
|
|
493
|
+
if not unified_user_service.is_authenticated() and not api_client.is_authenticated():
|
|
494
|
+
return
|
|
495
|
+
|
|
496
|
+
arxiv_id = paper_metadata.get('arxiv_id', '')
|
|
497
|
+
if not arxiv_id:
|
|
498
|
+
return
|
|
499
|
+
|
|
500
|
+
try:
|
|
501
|
+
# Check if already in library
|
|
502
|
+
library_result = await api_client.get_library(limit=100)
|
|
503
|
+
if library_result.get("success"):
|
|
504
|
+
papers = library_result.get("papers", [])
|
|
505
|
+
if any(p.get('arxiv_id') == arxiv_id for p in papers):
|
|
506
|
+
left_to_right_reveal(console, "\nThis paper is already in your library.",
|
|
507
|
+
style=f"bold {colors['primary']}", duration=1.0)
|
|
508
|
+
return
|
|
509
|
+
|
|
510
|
+
if len(papers) >= MAX_USER_PAPERS:
|
|
511
|
+
left_to_right_reveal(console, f"\nYou have reached the maximum of {MAX_USER_PAPERS} saved papers.",
|
|
512
|
+
style=f"bold {colors['warning']}", duration=1.0)
|
|
513
|
+
left_to_right_reveal(console, "Use 'arionxiv settings' to manage your saved papers.",
|
|
514
|
+
style=f"bold {colors['primary']}", duration=1.0)
|
|
515
|
+
return
|
|
516
|
+
|
|
517
|
+
# Ask user if they want to save
|
|
518
|
+
save_choice = Prompt.ask(
|
|
519
|
+
f"\n[bold {colors['primary']}]Save this paper to your library for quick access? (y/n)[/bold {colors['primary']}]",
|
|
520
|
+
choices=["y", "n"],
|
|
521
|
+
default="y"
|
|
522
|
+
)
|
|
523
|
+
|
|
524
|
+
if save_choice == "y":
|
|
525
|
+
with Progress(
|
|
526
|
+
SpinnerColumn(),
|
|
527
|
+
TextColumn("[progress.description]{task.description}"),
|
|
528
|
+
console=console,
|
|
529
|
+
transient=True
|
|
530
|
+
) as progress:
|
|
531
|
+
progress.add_task(f"[{colors['primary']}]Saving paper to library...[/{colors['primary']}]", total=None)
|
|
532
|
+
result = await api_client.add_to_library(arxiv_id=arxiv_id)
|
|
533
|
+
|
|
534
|
+
if result.get("success"):
|
|
535
|
+
left_to_right_reveal(console, "Paper saved to your library!",
|
|
536
|
+
style=f"bold {colors['primary']}", duration=1.0)
|
|
537
|
+
else:
|
|
538
|
+
left_to_right_reveal(console, "Could not save paper at this time.",
|
|
539
|
+
style=f"bold {colors['warning']}", duration=1.0)
|
|
540
|
+
|
|
541
|
+
except APIClientError as e:
|
|
542
|
+
logger.debug(f"API error saving paper: {e.message}")
|
|
543
|
+
except Exception as e:
|
|
544
|
+
logger.debug(f"Error saving paper: {e}")
|
|
545
|
+
|
|
546
|
+
|
|
547
|
+
def _save_analysis_results(paper_id: str, analysis_result: dict, analysis_type: str):
|
|
548
|
+
"""Save analysis results to file"""
|
|
549
|
+
try:
|
|
550
|
+
# Get theme colors for consistent styling
|
|
551
|
+
from ..ui.theme import get_theme_colors
|
|
552
|
+
colors = get_theme_colors()
|
|
553
|
+
|
|
554
|
+
# Create analysis directory
|
|
555
|
+
analysis_dir = Path(backend_path.parent) / "analysis_results"
|
|
556
|
+
analysis_dir.mkdir(exist_ok=True)
|
|
557
|
+
|
|
558
|
+
# Generate filename
|
|
559
|
+
filename = f"{paper_id}_{analysis_type}_analysis.md"
|
|
560
|
+
file_path = analysis_dir / filename
|
|
561
|
+
|
|
562
|
+
# Prepare content
|
|
563
|
+
content = f"""# Analysis Results
|
|
564
|
+
|
|
565
|
+
**Paper ID:** {paper_id}
|
|
566
|
+
**Analysis Type:** {analysis_type.title()}
|
|
567
|
+
**Generated:** {datetime.now().strftime('%Y-%m-%d %H:%M:%S')}
|
|
568
|
+
|
|
569
|
+
## Analysis
|
|
570
|
+
|
|
571
|
+
{analysis_result.get("analysis", "No analysis available")}
|
|
572
|
+
|
|
573
|
+
## Metadata
|
|
574
|
+
|
|
575
|
+
{analysis_result.get("metadata", {})}
|
|
576
|
+
"""
|
|
577
|
+
|
|
578
|
+
# Save to file
|
|
579
|
+
with open(file_path, 'w', encoding='utf-8') as f:
|
|
580
|
+
f.write(content)
|
|
581
|
+
|
|
582
|
+
left_to_right_reveal(console, f"Results saved to: {file_path}", style=colors['primary'])
|
|
583
|
+
|
|
584
|
+
except Exception as e:
|
|
585
|
+
left_to_right_reveal(console, f"Error saving results: {str(e)}", style=colors['error'])
|
|
586
|
+
|
|
587
|
+
# End of file - old next steps function removed
|