aurelian 0.3.3__py3-none-any.whl → 0.3.4__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -0,0 +1,566 @@
1
+ """
2
+ Tools for the PaperQA agent.
3
+ """
4
+ import os
5
+ import logging
6
+ from pathlib import Path
7
+ from typing import List, Dict, Any, Optional
8
+
9
+ from pydantic_ai import RunContext, ModelRetry
10
+
11
+ from paperqa import Docs, agent_query
12
+ from paperqa.agents.search import get_directory_index
13
+
14
+ from .paperqa_config import PaperQADependencies
15
+
16
+
17
+ def create_response(success: bool, paper_directory: str, doc_files: dict,
18
+ indexed_files: Optional[dict] = None, **kwargs) -> dict:
19
+ """Create a standardized response dictionary.
20
+
21
+ Args:
22
+ success: Whether the operation was successful
23
+ paper_directory: Path to the paper directory
24
+ doc_files: Dictionary with document files by type
25
+ indexed_files: Optional dictionary of indexed files
26
+ **kwargs: Additional key-value pairs to include in the response
27
+
28
+ Returns:
29
+ A standardized response dictionary
30
+ """
31
+ document_counts = {
32
+ 'total': len(doc_files['all']),
33
+ 'pdf': len(doc_files['pdf']),
34
+ 'txt': len(doc_files['txt']),
35
+ 'html': len(doc_files['html']),
36
+ 'md': len(doc_files['md']),
37
+ }
38
+
39
+ response = {
40
+ "success": success,
41
+ "paper_directory": paper_directory,
42
+ "document_counts": document_counts,
43
+ }
44
+
45
+ if indexed_files is not None:
46
+ response["indexed_chunks_count"] = len(indexed_files)
47
+ response["indexed_papers"] = list(indexed_files.keys()) if hasattr(indexed_files, 'keys') else []
48
+
49
+ response.update(kwargs)
50
+
51
+ return response
52
+
53
+ logger = logging.getLogger(__name__)
54
+
55
+
56
+ def get_document_files(directory: str) -> Dict[str, List[str]]:
57
+ """
58
+ Get all indexable document files in the given directory.
59
+
60
+ Args:
61
+ directory: Directory to search for document files
62
+
63
+ Returns:
64
+ dict: Dictionary with file lists by type and a combined list
65
+ """
66
+ document_extensions = ['.pdf', '.txt', '.html', '.md']
67
+ all_files = []
68
+
69
+ dir_path = Path(directory)
70
+ if dir_path.exists() and dir_path.is_dir():
71
+ all_files = [f.name for f in dir_path.iterdir()
72
+ if f.is_file() and any(f.name.lower().endswith(ext) for ext in document_extensions)]
73
+
74
+ return {
75
+ 'all': all_files,
76
+ 'pdf': [f for f in all_files if f.lower().endswith('.pdf')],
77
+ 'txt': [f for f in all_files if f.lower().endswith('.txt')],
78
+ 'html': [f for f in all_files if f.lower().endswith('.html')],
79
+ 'md': [f for f in all_files if f.lower().endswith('.md')],
80
+ }
81
+
82
+
83
+ async def search_papers(
84
+ ctx: RunContext[PaperQADependencies],
85
+ query: str,
86
+ max_papers: Optional[int] = None,
87
+ ) -> Any:
88
+ """
89
+ Search for papers relevant to the query using PaperQA.
90
+
91
+ Args:
92
+ ctx: The run context
93
+ query: The search query
94
+ max_papers: Maximum number of papers to return (overrides config)
95
+
96
+ Returns:
97
+ A simplified response with paper details and metadata
98
+ """
99
+ try:
100
+ settings = ctx.deps.set_paperqa_settings()
101
+
102
+ if max_papers is not None:
103
+ settings.agent.search_count = max_papers
104
+
105
+ try:
106
+ index = await get_directory_index(settings=settings, build=False)
107
+ index_files = await index.index_files
108
+ logger.info(f"Found existing index with {len(index_files)} files")
109
+ except Exception as e:
110
+ # If the error is about an empty index, try to build it
111
+ if "was empty, please rebuild it" in str(e):
112
+ logger.info("Index is empty, attempting to rebuild...")
113
+ index = await get_directory_index(settings=settings, build=True)
114
+ index_files = await index.index_files
115
+
116
+ if not index_files:
117
+ return {
118
+ "message": "No papers are currently indexed. You can add papers using the add_paper function.",
119
+ "papers": []
120
+ }
121
+ else:
122
+ raise
123
+
124
+ response = await agent_query(
125
+ query=f"Find scientific papers about: {query}",
126
+ settings=settings
127
+ )
128
+
129
+ return response
130
+ except Exception as e:
131
+ if "ModelRetry" in str(type(e)):
132
+ raise e
133
+
134
+ if "was empty, please rebuild it" in str(e):
135
+ return {
136
+ "message": "No papers are currently indexed. You can add papers using the add_paper function.",
137
+ "papers": []
138
+ }
139
+
140
+ raise ModelRetry(f"Error searching papers: {str(e)}")
141
+
142
+
143
+ async def query_papers(
144
+ ctx: RunContext[PaperQADependencies],
145
+ query: str,
146
+ ) -> Any:
147
+ """
148
+ Query the papers to answer a specific question using PaperQA.
149
+
150
+ Args:
151
+ ctx: The run context
152
+ query: The question to answer based on the papers
153
+
154
+ Returns:
155
+ The full PQASession object with the answer and context
156
+ """
157
+ try:
158
+ settings = ctx.deps.set_paperqa_settings()
159
+
160
+ try:
161
+ # First try to get the index without building
162
+ index = await get_directory_index(settings=settings, build=False)
163
+ index_files = await index.index_files
164
+
165
+ # If we get here, the index exists and has files
166
+ if not index_files:
167
+ return {
168
+ "message": "No papers are currently indexed. You can add papers using the add_paper function.",
169
+ "papers": []
170
+ }
171
+ except Exception as e:
172
+ if "was empty, please rebuild it" in str(e):
173
+ return {
174
+ "message": "No papers are currently indexed. You can add papers using the add_paper function.",
175
+ "papers": []
176
+ }
177
+ else:
178
+ raise
179
+
180
+ response = await agent_query(
181
+ query=query,
182
+ settings=settings
183
+ )
184
+
185
+ return response
186
+ except Exception as e:
187
+ if "ModelRetry" in str(type(e)):
188
+ raise e
189
+
190
+ if "was empty, please rebuild it" in str(e):
191
+ return {
192
+ "message": "No papers are currently indexed. You can add papers using the add_paper function.",
193
+ "papers": []
194
+ }
195
+
196
+ raise ModelRetry(f"Error querying papers: {str(e)}")
197
+
198
+
199
+ async def build_index(
200
+ ctx: RunContext[PaperQADependencies],
201
+ ) -> Any:
202
+ """
203
+ Rebuild the search index for papers.
204
+
205
+ Args:
206
+ ctx: The run context
207
+
208
+ Returns:
209
+ Information about the indexing process
210
+ """
211
+ try:
212
+
213
+ settings = ctx.deps.set_paperqa_settings()
214
+ paper_directory = settings.agent.index.paper_directory
215
+
216
+ os.makedirs(paper_directory, exist_ok=True)
217
+
218
+ doc_files = get_document_files(paper_directory)
219
+
220
+ if not doc_files['all']:
221
+ return create_response(
222
+ success=True,
223
+ paper_directory=paper_directory,
224
+ doc_files=doc_files,
225
+ indexed_files={},
226
+ message=f"No indexable documents found in {paper_directory}. Add documents (PDF, TXT, HTML, MD) to this directory before indexing."
227
+ )
228
+
229
+ try:
230
+ logger.info(f"Building index for {len(doc_files['all'])} documents in {paper_directory}:")
231
+ if doc_files['pdf']:
232
+ logger.info(f" - {len(doc_files['pdf'])} PDF files")
233
+ if doc_files['txt']:
234
+ logger.info(f" - {len(doc_files['txt'])} text files")
235
+ if doc_files['html']:
236
+ logger.info(f" - {len(doc_files['html'])} HTML files")
237
+ if doc_files['md']:
238
+ logger.info(f" - {len(doc_files['md'])} Markdown files")
239
+
240
+ index = await get_directory_index(settings=settings, build=True)
241
+ index_files = await index.index_files
242
+
243
+ if not index_files:
244
+ return create_response(
245
+ success=True,
246
+ paper_directory=paper_directory,
247
+ doc_files=doc_files,
248
+ indexed_files={},
249
+ documents_found=doc_files,
250
+ message=f"Found {len(doc_files['all'])} documents but none were successfully indexed. This could be due to parsing issues with the documents."
251
+ )
252
+
253
+ return create_response(
254
+ success=True,
255
+ paper_directory=paper_directory,
256
+ doc_files=doc_files,
257
+ indexed_files=index_files,
258
+ message=f"Successfully indexed {len(index_files)} document chunks from {len(doc_files['all'])} files."
259
+ )
260
+ except Exception as e:
261
+ return create_response(
262
+ success=False,
263
+ paper_directory=paper_directory,
264
+ doc_files=doc_files,
265
+ message=f"Error indexing documents: {str(e)}",
266
+ error=str(e)
267
+ )
268
+ except Exception as e:
269
+ if "ModelRetry" in str(type(e)):
270
+ raise e
271
+ raise ModelRetry(f"Error building index: {str(e)}")
272
+
273
+
274
+ async def add_paper(
275
+ ctx: RunContext[PaperQADependencies],
276
+ path: str,
277
+ citation: Optional[str] = None,
278
+ auto_index: bool = True,
279
+ ) -> Any:
280
+ """
281
+ Add a specific paper to the collection.
282
+
283
+ Args:
284
+ ctx: The run context
285
+ path: Path to the paper file or URL
286
+ citation: Optional citation for the paper
287
+ auto_index: Whether to automatically rebuild the index after adding the paper
288
+
289
+ Returns:
290
+ Information about the added paper
291
+ """
292
+ try:
293
+ settings = ctx.deps.set_paperqa_settings()
294
+
295
+ paper_directory = settings.agent.index.paper_directory
296
+ os.makedirs(paper_directory, exist_ok=True)
297
+
298
+ # For URLs, we need to:
299
+ # 1. Download the PDF
300
+ # 2. Save it to the paper directory
301
+ # 3. Process it with Docs
302
+
303
+ if path.startswith(("http://", "https://")):
304
+ import requests
305
+ from urllib.parse import urlparse
306
+
307
+ url_parts = urlparse(path)
308
+ file_name = os.path.basename(url_parts.path)
309
+ if not file_name or not file_name.lower().endswith('.pdf'):
310
+ file_name = "paper.pdf"
311
+
312
+ target_path = os.path.join(paper_directory, file_name)
313
+
314
+ try:
315
+ response = requests.get(path, stream=True)
316
+ response.raise_for_status()
317
+
318
+ with open(target_path, 'wb') as f:
319
+ for chunk in response.iter_content(chunk_size=8192):
320
+ f.write(chunk)
321
+
322
+ logger.info(f"Downloaded {path} to {target_path}")
323
+
324
+ docs = Docs()
325
+ docname = await docs.aadd(
326
+ path=target_path,
327
+ citation=citation,
328
+ settings=settings,
329
+ )
330
+ except Exception as e:
331
+ # If download fails, fall back to docs.aadd_url
332
+ logger.warning(f"Download failed: {str(e)}, falling back to docs.aadd_url")
333
+ docs = Docs()
334
+ docname = await docs.aadd_url(
335
+ url=path,
336
+ citation=citation,
337
+ settings=settings,
338
+ )
339
+
340
+ # If we successfully added it with aadd_url, try to find where it saved the file
341
+ if docname and hasattr(docs, 'docs') and docname in docs.docs:
342
+ doc = docs.docs[docname]
343
+ if hasattr(doc, 'filepath') and os.path.exists(doc.filepath):
344
+ import shutil
345
+ target_path = os.path.join(paper_directory, f"{docname}.pdf")
346
+ if not os.path.exists(target_path):
347
+ shutil.copy2(doc.filepath, target_path)
348
+ logger.info(f"Copied from {doc.filepath} to {target_path}")
349
+ else:
350
+ # For file paths, copy to paper directory if needed
351
+ if not os.path.isabs(path):
352
+ full_path = os.path.join(ctx.deps.paper_directory, path)
353
+ if os.path.exists(full_path):
354
+ path = full_path
355
+ else:
356
+ full_path = os.path.join(ctx.deps.workdir.location, path)
357
+ if os.path.exists(full_path):
358
+ path = full_path
359
+
360
+ # If the path is outside the paper directory, copy it there
361
+ if os.path.exists(path) and paper_directory not in path:
362
+ import shutil
363
+ target_path = os.path.join(paper_directory, os.path.basename(path))
364
+ if not os.path.exists(target_path):
365
+ shutil.copy2(path, target_path)
366
+
367
+ docs = Docs()
368
+ docname = await docs.aadd(
369
+ path=path,
370
+ citation=citation,
371
+ settings=settings,
372
+ )
373
+
374
+ if docname:
375
+ doc = next((d for d in docs.docs.values() if d.docname == docname), None)
376
+
377
+ result = {
378
+ "success": True,
379
+ "docname": docname,
380
+ "doc": doc,
381
+ }
382
+
383
+ if auto_index:
384
+ try:
385
+ index_result = await build_index(ctx)
386
+ result["index_result"] = index_result
387
+ if index_result["success"]:
388
+ result["message"] = f"Paper added and indexed successfully. {index_result['indexed_papers_count']} papers now in the index."
389
+ else:
390
+ result["message"] = f"Paper added but indexing failed: {index_result['error']}"
391
+ except Exception as e:
392
+ result["message"] = f"Paper added but indexing failed: {str(e)}"
393
+ else:
394
+ result["message"] = "Paper added successfully. Use 'aurelian paperqa index' to rebuild the index to make this paper searchable."
395
+
396
+ return result
397
+ else:
398
+ return {
399
+ "success": False,
400
+ "message": "Paper was already in the collection."
401
+ }
402
+ except Exception as e:
403
+ if "ModelRetry" in str(type(e)):
404
+ raise e
405
+ raise ModelRetry(f"Error adding paper: {str(e)}")
406
+
407
+
408
+ async def add_papers(
409
+ ctx: RunContext[PaperQADependencies],
410
+ directory: str,
411
+ citation: Optional[str] = None,
412
+ auto_index: bool = True,
413
+ ) -> Any:
414
+ """
415
+ Add multiple papers from a directory to the collection.
416
+
417
+ Args:
418
+ ctx: The run context
419
+ directory: Path to the directory containing papers
420
+ citation: Optional citation format to use for all papers (paper filename will be appended)
421
+ auto_index: Whether to automatically rebuild the index after adding the papers
422
+
423
+ Returns:
424
+ Information about the added papers
425
+ """
426
+ try:
427
+ settings = ctx.deps.set_paperqa_settings()
428
+ paper_directory = settings.agent.index.paper_directory
429
+ os.makedirs(paper_directory, exist_ok=True)
430
+
431
+ if not Path(directory).is_dir():
432
+ return create_response(
433
+ success=False,
434
+ paper_directory=paper_directory,
435
+ doc_files={"all": [], "pdf": [], "txt": [], "html": [], "md": []}
436
+ )
437
+
438
+ doc_files = get_document_files(directory)
439
+
440
+ if not doc_files['all']:
441
+ return create_response(
442
+ success=False,
443
+ paper_directory=paper_directory,
444
+ doc_files=doc_files
445
+ )
446
+
447
+ logger.info(f"Found {len(doc_files['all'])} documents in {directory}:")
448
+ if doc_files['pdf']:
449
+ logger.info(f" - {len(doc_files['pdf'])} PDF files")
450
+ if doc_files['txt']:
451
+ logger.info(f" - {len(doc_files['txt'])} text files")
452
+ if doc_files['html']:
453
+ logger.info(f" - {len(doc_files['html'])} HTML files")
454
+ if doc_files['md']:
455
+ logger.info(f" - {len(doc_files['md'])} Markdown files")
456
+
457
+ docs = Docs()
458
+ added_papers = []
459
+
460
+ for doc_file in doc_files['all']:
461
+ file_path = os.path.join(directory, doc_file)
462
+ try:
463
+ logger.info(f"Adding document: {file_path}")
464
+
465
+ doc_citation = None
466
+ if citation:
467
+ doc_citation = f"{citation} - {doc_file}"
468
+
469
+ if Path(file_path).exists() and paper_directory not in file_path:
470
+ import shutil
471
+ target_path = os.path.join(paper_directory, os.path.basename(file_path))
472
+ if not Path(target_path).exists():
473
+ shutil.copy2(file_path, target_path)
474
+ logger.info(f"Copied {file_path} to {target_path}")
475
+
476
+ docname = await docs.aadd(
477
+ path=file_path,
478
+ citation=doc_citation,
479
+ settings=settings,
480
+ )
481
+ if docname:
482
+ doc = next((d for d in docs.docs.values() if d.docname == docname), None)
483
+ added_papers.append({
484
+ "file": doc_file,
485
+ "docname": docname,
486
+ "citation": doc_citation,
487
+ "doc": doc
488
+ })
489
+ logger.info(f"Successfully added document: {doc_file}")
490
+ except Exception as e:
491
+ logger.error(f"Error adding {file_path}: {e}")
492
+
493
+ index_result = None
494
+ if auto_index and added_papers:
495
+ try:
496
+ index_result = await build_index(ctx)
497
+ logger.info(f"Index rebuilt with {len(index_result.get('indexed_papers', []))} papers")
498
+ except Exception as e:
499
+ logger.error(f"Error rebuilding index: {e}")
500
+ index_result = {"success": False, "error": str(e)}
501
+
502
+ response = create_response(
503
+ success=True,
504
+ paper_directory=paper_directory,
505
+ doc_files=doc_files,
506
+ message=f"Successfully added {len(added_papers)} documents out of {len(doc_files['all'])}",
507
+ documents_added=len(added_papers),
508
+ added_documents=added_papers
509
+ )
510
+
511
+ if index_result:
512
+ response["index_result"] = index_result
513
+
514
+ return response
515
+ except Exception as e:
516
+ if "ModelRetry" in str(type(e)):
517
+ raise e
518
+ raise ModelRetry(f"Error adding papers: {str(e)}")
519
+
520
+
521
+ async def list_papers(
522
+ ctx: RunContext[PaperQADependencies],
523
+ ) -> Any:
524
+ """
525
+ List all papers in the current paper directory.
526
+
527
+ Args:
528
+ ctx: The run context
529
+
530
+ Returns:
531
+ Information about all papers in the paper directory
532
+ """
533
+ try:
534
+ settings = ctx.deps.set_paperqa_settings()
535
+ paper_directory = settings.agent.index.paper_directory
536
+
537
+ doc_files = get_document_files(paper_directory)
538
+
539
+ indexed_files = []
540
+ try:
541
+ index = await get_directory_index(settings=settings, build=False)
542
+ index_files = await index.index_files
543
+ indexed_files = list(index_files.keys())
544
+ logger.info(f"Found {len(indexed_files)} indexed document chunks")
545
+ except Exception:
546
+ logger.info("No index found or index is empty")
547
+
548
+ return create_response(
549
+ success=True,
550
+ paper_directory=paper_directory,
551
+ doc_files=doc_files,
552
+ indexed_files=indexed_files,
553
+ message=f"Found {len(doc_files['all'])} documents and {len(indexed_files)} indexed chunks",
554
+ files_in_directory=doc_files['all'],
555
+ files_by_type={
556
+ "pdf": doc_files['pdf'],
557
+ "txt": doc_files['txt'],
558
+ "html": doc_files['html'],
559
+ "md": doc_files['md']
560
+ },
561
+ note="To search papers, they must be both in the paper directory AND indexed. If there are files in the directory but not indexed, use the CLI command 'aurelian paperqa index -d <directory>' to index them."
562
+ )
563
+ except Exception as e:
564
+ if "ModelRetry" in str(type(e)):
565
+ raise e
566
+ raise ModelRetry(f"Error listing papers: {str(e)}")
aurelian/cli.py CHANGED
@@ -958,6 +958,33 @@ def reaction(ui, query, **kwargs):
958
958
 
959
959
 
960
960
 
961
+ @main.command()
962
+ @model_option
963
+ @workdir_option
964
+ @share_option
965
+ @server_port_option
966
+ @ui_option
967
+ @click.argument("query", nargs=-1, required=False)
968
+ def paperqa(ui, query, **kwargs):
969
+ """Start the PaperQA Agent for scientific literature search and analysis.
970
+
971
+ The PaperQA Agent helps search, organize, and analyze scientific papers. It can
972
+ find papers on specific topics, add papers to your collection, and answer questions
973
+ based on the papers in your collection.
974
+
975
+ Run with a query for direct mode or with --ui for interactive chat mode.
976
+
977
+ Use `aurelian paperqa` subcommands for paper management:
978
+ - `aurelian paperqa index` to index papers for searching
979
+ - `aurelian paperqa list` to list papers in your collection
980
+ """
981
+ run_agent("paperqa", "aurelian.agents.paperqa", query=query, ui=ui, **kwargs)
982
+
983
+
984
+ # Import and register PaperQA CLI commands
985
+ from aurelian.agents.paperqa.paperqa_cli import paperqa_cli
986
+ main.add_command(paperqa_cli)
987
+
961
988
  # DO NOT REMOVE THIS LINE
962
989
  # added this for mkdocstrings to work
963
990
  # see https://github.com/bruce-szalwinski/mkdocs-typer/issues/18
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.3
2
2
  Name: aurelian
3
- Version: 0.3.3
3
+ Version: 0.3.4
4
4
  Summary: aurelian
5
5
  License: MIT
6
6
  Author: Author 1
@@ -33,6 +33,7 @@ Requires-Dist: markdownify (>=0.14.1)
33
33
  Requires-Dist: markitdown (>=0.0.1a4)
34
34
  Requires-Dist: mcp[cli] (>=1.3.0,<2.0.0)
35
35
  Requires-Dist: oaklib (>=0.6.19)
36
+ Requires-Dist: paper-qa (>=5.20.0,<6.0.0)
36
37
  Requires-Dist: pdfminer-six ; extra == "pdfminer"
37
38
  Requires-Dist: pydantic-ai (>=0.0.29)
38
39
  Requires-Dist: pypaperbot (>=1.4.1)
@@ -43,6 +44,7 @@ Requires-Dist: undetected-chromedriver (>=3.5.5)
43
44
  Requires-Dist: wikipedia (>=1.4.0)
44
45
  Description-Content-Type: text/markdown
45
46
 
47
+ [![DOI](https://zenodo.org/badge/932483388.svg)](https://doi.org/10.5281/zenodo.15299996)
46
48
 
47
49
  # Aurelian: Agentic Universal Research Engine for Literature, Integration, Annotation, and Navigation
48
50
 
@@ -172,6 +172,13 @@ aurelian/agents/ontology_mapper/ontology_mapper_evals.py,sha256=6kympT8LV570rRoz
172
172
  aurelian/agents/ontology_mapper/ontology_mapper_gradio.py,sha256=nzkwPvK3k6Sze8oNS2R8z4k2XeXj4LU2wjV4G_Jn-Ec,1819
173
173
  aurelian/agents/ontology_mapper/ontology_mapper_mcp.py,sha256=YnXCKU2Yzy_RPqEba8hJPWwtb1H07Wo-zlZY-nnKurw,2208
174
174
  aurelian/agents/ontology_mapper/ontology_mapper_tools.py,sha256=S0azD3wR0pTh-9ipjVBUiNRkgsDEb30n1KMZD0qWvyw,4272
175
+ aurelian/agents/paperqa/__init__.py,sha256=Dfff3fyx9PeoeqA4DcvyE_loW-UXegYM281alttwLvI,590
176
+ aurelian/agents/paperqa/paperqa_agent.py,sha256=LimGkiN89gK-kD-AdrBXqIHw9FdC2JkBZTDZAuP1YX4,2615
177
+ aurelian/agents/paperqa/paperqa_cli.py,sha256=A0cwGUUTVcFVO1e_PsXusvBae9N5ElyeCtsZuQh-eGo,10357
178
+ aurelian/agents/paperqa/paperqa_config.py,sha256=CUj3BARcII8AippAYJXAq9MsqrQlkRm36ucHyuCGnII,4554
179
+ aurelian/agents/paperqa/paperqa_gradio.py,sha256=Thz82905TBeUQSiKcT40ZFs9MOqlN3YzYrJ5LNZPjHs,2844
180
+ aurelian/agents/paperqa/paperqa_mcp.py,sha256=ApnH43iZpM4ToPpSpE1KQf2mF4o7HCTXPb_X6tjX0ic,4665
181
+ aurelian/agents/paperqa/paperqa_tools.py,sha256=k0WtWPhkyAndnpPjP7XRF5llkW-Ktkxx2JhChO-SKU4,20238
175
182
  aurelian/agents/phenopackets/__init__.py,sha256=TrXNpK2KDUjxok4nCuhepVPGH4RvALOZJoXjD4S-i1g,73
176
183
  aurelian/agents/phenopackets/phenopackets_agent.py,sha256=R--xoY558TaVcAG2HoQvj7lWwCr_PrpfG7xl33rCIpk,2229
177
184
  aurelian/agents/phenopackets/phenopackets_config.py,sha256=hpc3m2IboYlQoJselVMPf_EK0Z1YJBNp7qbj3TLk7PM,2304
@@ -222,7 +229,7 @@ aurelian/agents/web/web_gradio.py,sha256=T7qzuRuBaWCYckWjpLu3L0LzHPLEKkxUYp2rj-O
222
229
  aurelian/agents/web/web_mcp.py,sha256=3mrUlxBqeMSOmtpnD2wWedsOiRJbtveEnbyJqQdfEXQ,1163
223
230
  aurelian/agents/web/web_tools.py,sha256=BfJJWlHz7tKh9VDjymIwzziahFKrqr2ZUO0QH3IcL6U,4070
224
231
  aurelian/chat.py,sha256=hg9eGKiz_NAjwG5jNGwNqoFrhhx029XX3dWdMRrk-EU,563
225
- aurelian/cli.py,sha256=RvIl2Y4DtyEqXNTsY71n-0t_ZXCK3nTmzWAcnFmMvrE,33532
232
+ aurelian/cli.py,sha256=nRCLPZb__FpMsHqNnDeoYCc0KspESumIKQlrDe97594,34510
226
233
  aurelian/dependencies/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
227
234
  aurelian/dependencies/workdir.py,sha256=G_eGlxKpHRjO3EL2hHN8lvtticgSZvJe300KkJP4vZQ,2228
228
235
  aurelian/mcp/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
@@ -250,8 +257,8 @@ aurelian/utils/pubmed_utils.py,sha256=Gk00lu1Lv0GRSNeF5M4zplp3UMSpe5byCaVKCJimUH
250
257
  aurelian/utils/pytest_report_to_markdown.py,sha256=WH1NlkVYj0UfUqpXjRD1KMpkMgEW3qev3fDdPvZG9Yw,1406
251
258
  aurelian/utils/robot_ontology_utils.py,sha256=aaRe9eyLgJCtj1EfV13v4Q7khFTWzUoFFEE_lizGuGg,3591
252
259
  aurelian/utils/search_utils.py,sha256=9MloT3SzOE4JsElsYlCznp9N6fv_OQK7YWOU8MIy1WU,2818
253
- aurelian-0.3.3.dist-info/LICENSE,sha256=FB6RpUUfbUeKS4goWrvpp1QmOtyywrMiNBsYPMlLT3A,1086
254
- aurelian-0.3.3.dist-info/METADATA,sha256=zuOveEkQXBoEtZe5gOlQeTby9eIGowh4Pzp8QOwbVuc,3339
255
- aurelian-0.3.3.dist-info/WHEEL,sha256=fGIA9gx4Qxk2KDKeNJCbOEwSrmLtjWCwzBz351GyrPQ,88
256
- aurelian-0.3.3.dist-info/entry_points.txt,sha256=BInUyPfLrHdmH_Yvi71dx21MhkcNCEOPiqvpEIb2U5k,46
257
- aurelian-0.3.3.dist-info/RECORD,,
260
+ aurelian-0.3.4.dist-info/LICENSE,sha256=FB6RpUUfbUeKS4goWrvpp1QmOtyywrMiNBsYPMlLT3A,1086
261
+ aurelian-0.3.4.dist-info/METADATA,sha256=o6MapyVbBF6DMGrYZMYBNc-JKm2q-uv3qjSVCwuXR0E,3471
262
+ aurelian-0.3.4.dist-info/WHEEL,sha256=b4K_helf-jlQoXBBETfwnf4B04YC67LOev0jo4fX5m8,88
263
+ aurelian-0.3.4.dist-info/entry_points.txt,sha256=BInUyPfLrHdmH_Yvi71dx21MhkcNCEOPiqvpEIb2U5k,46
264
+ aurelian-0.3.4.dist-info/RECORD,,
@@ -1,4 +1,4 @@
1
1
  Wheel-Version: 1.0
2
- Generator: poetry-core 2.1.2
2
+ Generator: poetry-core 2.1.3
3
3
  Root-Is-Purelib: true
4
4
  Tag: py3-none-any