aurelian 0.3.2__py3-none-any.whl → 0.3.4__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -0,0 +1,566 @@
1
+ """
2
+ Tools for the PaperQA agent.
3
+ """
4
+ import os
5
+ import logging
6
+ from pathlib import Path
7
+ from typing import List, Dict, Any, Optional
8
+
9
+ from pydantic_ai import RunContext, ModelRetry
10
+
11
+ from paperqa import Docs, agent_query
12
+ from paperqa.agents.search import get_directory_index
13
+
14
+ from .paperqa_config import PaperQADependencies
15
+
16
+
17
+ def create_response(success: bool, paper_directory: str, doc_files: dict,
18
+ indexed_files: Optional[dict] = None, **kwargs) -> dict:
19
+ """Create a standardized response dictionary.
20
+
21
+ Args:
22
+ success: Whether the operation was successful
23
+ paper_directory: Path to the paper directory
24
+ doc_files: Dictionary with document files by type
25
+ indexed_files: Optional dictionary of indexed files
26
+ **kwargs: Additional key-value pairs to include in the response
27
+
28
+ Returns:
29
+ A standardized response dictionary
30
+ """
31
+ document_counts = {
32
+ 'total': len(doc_files['all']),
33
+ 'pdf': len(doc_files['pdf']),
34
+ 'txt': len(doc_files['txt']),
35
+ 'html': len(doc_files['html']),
36
+ 'md': len(doc_files['md']),
37
+ }
38
+
39
+ response = {
40
+ "success": success,
41
+ "paper_directory": paper_directory,
42
+ "document_counts": document_counts,
43
+ }
44
+
45
+ if indexed_files is not None:
46
+ response["indexed_chunks_count"] = len(indexed_files)
47
+ response["indexed_papers"] = list(indexed_files.keys()) if hasattr(indexed_files, 'keys') else []
48
+
49
+ response.update(kwargs)
50
+
51
+ return response
52
+
53
+ logger = logging.getLogger(__name__)
54
+
55
+
56
+ def get_document_files(directory: str) -> Dict[str, List[str]]:
57
+ """
58
+ Get all indexable document files in the given directory.
59
+
60
+ Args:
61
+ directory: Directory to search for document files
62
+
63
+ Returns:
64
+ dict: Dictionary with file lists by type and a combined list
65
+ """
66
+ document_extensions = ['.pdf', '.txt', '.html', '.md']
67
+ all_files = []
68
+
69
+ dir_path = Path(directory)
70
+ if dir_path.exists() and dir_path.is_dir():
71
+ all_files = [f.name for f in dir_path.iterdir()
72
+ if f.is_file() and any(f.name.lower().endswith(ext) for ext in document_extensions)]
73
+
74
+ return {
75
+ 'all': all_files,
76
+ 'pdf': [f for f in all_files if f.lower().endswith('.pdf')],
77
+ 'txt': [f for f in all_files if f.lower().endswith('.txt')],
78
+ 'html': [f for f in all_files if f.lower().endswith('.html')],
79
+ 'md': [f for f in all_files if f.lower().endswith('.md')],
80
+ }
81
+
82
+
83
+ async def search_papers(
84
+ ctx: RunContext[PaperQADependencies],
85
+ query: str,
86
+ max_papers: Optional[int] = None,
87
+ ) -> Any:
88
+ """
89
+ Search for papers relevant to the query using PaperQA.
90
+
91
+ Args:
92
+ ctx: The run context
93
+ query: The search query
94
+ max_papers: Maximum number of papers to return (overrides config)
95
+
96
+ Returns:
97
+ A simplified response with paper details and metadata
98
+ """
99
+ try:
100
+ settings = ctx.deps.set_paperqa_settings()
101
+
102
+ if max_papers is not None:
103
+ settings.agent.search_count = max_papers
104
+
105
+ try:
106
+ index = await get_directory_index(settings=settings, build=False)
107
+ index_files = await index.index_files
108
+ logger.info(f"Found existing index with {len(index_files)} files")
109
+ except Exception as e:
110
+ # If the error is about an empty index, try to build it
111
+ if "was empty, please rebuild it" in str(e):
112
+ logger.info("Index is empty, attempting to rebuild...")
113
+ index = await get_directory_index(settings=settings, build=True)
114
+ index_files = await index.index_files
115
+
116
+ if not index_files:
117
+ return {
118
+ "message": "No papers are currently indexed. You can add papers using the add_paper function.",
119
+ "papers": []
120
+ }
121
+ else:
122
+ raise
123
+
124
+ response = await agent_query(
125
+ query=f"Find scientific papers about: {query}",
126
+ settings=settings
127
+ )
128
+
129
+ return response
130
+ except Exception as e:
131
+ if "ModelRetry" in str(type(e)):
132
+ raise e
133
+
134
+ if "was empty, please rebuild it" in str(e):
135
+ return {
136
+ "message": "No papers are currently indexed. You can add papers using the add_paper function.",
137
+ "papers": []
138
+ }
139
+
140
+ raise ModelRetry(f"Error searching papers: {str(e)}")
141
+
142
+
143
+ async def query_papers(
144
+ ctx: RunContext[PaperQADependencies],
145
+ query: str,
146
+ ) -> Any:
147
+ """
148
+ Query the papers to answer a specific question using PaperQA.
149
+
150
+ Args:
151
+ ctx: The run context
152
+ query: The question to answer based on the papers
153
+
154
+ Returns:
155
+ The full PQASession object with the answer and context
156
+ """
157
+ try:
158
+ settings = ctx.deps.set_paperqa_settings()
159
+
160
+ try:
161
+ # First try to get the index without building
162
+ index = await get_directory_index(settings=settings, build=False)
163
+ index_files = await index.index_files
164
+
165
+ # If we get here, the index exists and has files
166
+ if not index_files:
167
+ return {
168
+ "message": "No papers are currently indexed. You can add papers using the add_paper function.",
169
+ "papers": []
170
+ }
171
+ except Exception as e:
172
+ if "was empty, please rebuild it" in str(e):
173
+ return {
174
+ "message": "No papers are currently indexed. You can add papers using the add_paper function.",
175
+ "papers": []
176
+ }
177
+ else:
178
+ raise
179
+
180
+ response = await agent_query(
181
+ query=query,
182
+ settings=settings
183
+ )
184
+
185
+ return response
186
+ except Exception as e:
187
+ if "ModelRetry" in str(type(e)):
188
+ raise e
189
+
190
+ if "was empty, please rebuild it" in str(e):
191
+ return {
192
+ "message": "No papers are currently indexed. You can add papers using the add_paper function.",
193
+ "papers": []
194
+ }
195
+
196
+ raise ModelRetry(f"Error querying papers: {str(e)}")
197
+
198
+
199
+ async def build_index(
200
+ ctx: RunContext[PaperQADependencies],
201
+ ) -> Any:
202
+ """
203
+ Rebuild the search index for papers.
204
+
205
+ Args:
206
+ ctx: The run context
207
+
208
+ Returns:
209
+ Information about the indexing process
210
+ """
211
+ try:
212
+
213
+ settings = ctx.deps.set_paperqa_settings()
214
+ paper_directory = settings.agent.index.paper_directory
215
+
216
+ os.makedirs(paper_directory, exist_ok=True)
217
+
218
+ doc_files = get_document_files(paper_directory)
219
+
220
+ if not doc_files['all']:
221
+ return create_response(
222
+ success=True,
223
+ paper_directory=paper_directory,
224
+ doc_files=doc_files,
225
+ indexed_files={},
226
+ message=f"No indexable documents found in {paper_directory}. Add documents (PDF, TXT, HTML, MD) to this directory before indexing."
227
+ )
228
+
229
+ try:
230
+ logger.info(f"Building index for {len(doc_files['all'])} documents in {paper_directory}:")
231
+ if doc_files['pdf']:
232
+ logger.info(f" - {len(doc_files['pdf'])} PDF files")
233
+ if doc_files['txt']:
234
+ logger.info(f" - {len(doc_files['txt'])} text files")
235
+ if doc_files['html']:
236
+ logger.info(f" - {len(doc_files['html'])} HTML files")
237
+ if doc_files['md']:
238
+ logger.info(f" - {len(doc_files['md'])} Markdown files")
239
+
240
+ index = await get_directory_index(settings=settings, build=True)
241
+ index_files = await index.index_files
242
+
243
+ if not index_files:
244
+ return create_response(
245
+ success=True,
246
+ paper_directory=paper_directory,
247
+ doc_files=doc_files,
248
+ indexed_files={},
249
+ documents_found=doc_files,
250
+ message=f"Found {len(doc_files['all'])} documents but none were successfully indexed. This could be due to parsing issues with the documents."
251
+ )
252
+
253
+ return create_response(
254
+ success=True,
255
+ paper_directory=paper_directory,
256
+ doc_files=doc_files,
257
+ indexed_files=index_files,
258
+ message=f"Successfully indexed {len(index_files)} document chunks from {len(doc_files['all'])} files."
259
+ )
260
+ except Exception as e:
261
+ return create_response(
262
+ success=False,
263
+ paper_directory=paper_directory,
264
+ doc_files=doc_files,
265
+ message=f"Error indexing documents: {str(e)}",
266
+ error=str(e)
267
+ )
268
+ except Exception as e:
269
+ if "ModelRetry" in str(type(e)):
270
+ raise e
271
+ raise ModelRetry(f"Error building index: {str(e)}")
272
+
273
+
274
+ async def add_paper(
275
+ ctx: RunContext[PaperQADependencies],
276
+ path: str,
277
+ citation: Optional[str] = None,
278
+ auto_index: bool = True,
279
+ ) -> Any:
280
+ """
281
+ Add a specific paper to the collection.
282
+
283
+ Args:
284
+ ctx: The run context
285
+ path: Path to the paper file or URL
286
+ citation: Optional citation for the paper
287
+ auto_index: Whether to automatically rebuild the index after adding the paper
288
+
289
+ Returns:
290
+ Information about the added paper
291
+ """
292
+ try:
293
+ settings = ctx.deps.set_paperqa_settings()
294
+
295
+ paper_directory = settings.agent.index.paper_directory
296
+ os.makedirs(paper_directory, exist_ok=True)
297
+
298
+ # For URLs, we need to:
299
+ # 1. Download the PDF
300
+ # 2. Save it to the paper directory
301
+ # 3. Process it with Docs
302
+
303
+ if path.startswith(("http://", "https://")):
304
+ import requests
305
+ from urllib.parse import urlparse
306
+
307
+ url_parts = urlparse(path)
308
+ file_name = os.path.basename(url_parts.path)
309
+ if not file_name or not file_name.lower().endswith('.pdf'):
310
+ file_name = "paper.pdf"
311
+
312
+ target_path = os.path.join(paper_directory, file_name)
313
+
314
+ try:
315
+ response = requests.get(path, stream=True)
316
+ response.raise_for_status()
317
+
318
+ with open(target_path, 'wb') as f:
319
+ for chunk in response.iter_content(chunk_size=8192):
320
+ f.write(chunk)
321
+
322
+ logger.info(f"Downloaded {path} to {target_path}")
323
+
324
+ docs = Docs()
325
+ docname = await docs.aadd(
326
+ path=target_path,
327
+ citation=citation,
328
+ settings=settings,
329
+ )
330
+ except Exception as e:
331
+ # If download fails, fall back to docs.aadd_url
332
+ logger.warning(f"Download failed: {str(e)}, falling back to docs.aadd_url")
333
+ docs = Docs()
334
+ docname = await docs.aadd_url(
335
+ url=path,
336
+ citation=citation,
337
+ settings=settings,
338
+ )
339
+
340
+ # If we successfully added it with aadd_url, try to find where it saved the file
341
+ if docname and hasattr(docs, 'docs') and docname in docs.docs:
342
+ doc = docs.docs[docname]
343
+ if hasattr(doc, 'filepath') and os.path.exists(doc.filepath):
344
+ import shutil
345
+ target_path = os.path.join(paper_directory, f"{docname}.pdf")
346
+ if not os.path.exists(target_path):
347
+ shutil.copy2(doc.filepath, target_path)
348
+ logger.info(f"Copied from {doc.filepath} to {target_path}")
349
+ else:
350
+ # For file paths, copy to paper directory if needed
351
+ if not os.path.isabs(path):
352
+ full_path = os.path.join(ctx.deps.paper_directory, path)
353
+ if os.path.exists(full_path):
354
+ path = full_path
355
+ else:
356
+ full_path = os.path.join(ctx.deps.workdir.location, path)
357
+ if os.path.exists(full_path):
358
+ path = full_path
359
+
360
+ # If the path is outside the paper directory, copy it there
361
+ if os.path.exists(path) and paper_directory not in path:
362
+ import shutil
363
+ target_path = os.path.join(paper_directory, os.path.basename(path))
364
+ if not os.path.exists(target_path):
365
+ shutil.copy2(path, target_path)
366
+
367
+ docs = Docs()
368
+ docname = await docs.aadd(
369
+ path=path,
370
+ citation=citation,
371
+ settings=settings,
372
+ )
373
+
374
+ if docname:
375
+ doc = next((d for d in docs.docs.values() if d.docname == docname), None)
376
+
377
+ result = {
378
+ "success": True,
379
+ "docname": docname,
380
+ "doc": doc,
381
+ }
382
+
383
+ if auto_index:
384
+ try:
385
+ index_result = await build_index(ctx)
386
+ result["index_result"] = index_result
387
+ if index_result["success"]:
388
+ result["message"] = f"Paper added and indexed successfully. {index_result['indexed_papers_count']} papers now in the index."
389
+ else:
390
+ result["message"] = f"Paper added but indexing failed: {index_result['error']}"
391
+ except Exception as e:
392
+ result["message"] = f"Paper added but indexing failed: {str(e)}"
393
+ else:
394
+ result["message"] = "Paper added successfully. Use 'aurelian paperqa index' to rebuild the index to make this paper searchable."
395
+
396
+ return result
397
+ else:
398
+ return {
399
+ "success": False,
400
+ "message": "Paper was already in the collection."
401
+ }
402
+ except Exception as e:
403
+ if "ModelRetry" in str(type(e)):
404
+ raise e
405
+ raise ModelRetry(f"Error adding paper: {str(e)}")
406
+
407
+
408
+ async def add_papers(
409
+ ctx: RunContext[PaperQADependencies],
410
+ directory: str,
411
+ citation: Optional[str] = None,
412
+ auto_index: bool = True,
413
+ ) -> Any:
414
+ """
415
+ Add multiple papers from a directory to the collection.
416
+
417
+ Args:
418
+ ctx: The run context
419
+ directory: Path to the directory containing papers
420
+ citation: Optional citation format to use for all papers (paper filename will be appended)
421
+ auto_index: Whether to automatically rebuild the index after adding the papers
422
+
423
+ Returns:
424
+ Information about the added papers
425
+ """
426
+ try:
427
+ settings = ctx.deps.set_paperqa_settings()
428
+ paper_directory = settings.agent.index.paper_directory
429
+ os.makedirs(paper_directory, exist_ok=True)
430
+
431
+ if not Path(directory).is_dir():
432
+ return create_response(
433
+ success=False,
434
+ paper_directory=paper_directory,
435
+ doc_files={"all": [], "pdf": [], "txt": [], "html": [], "md": []}
436
+ )
437
+
438
+ doc_files = get_document_files(directory)
439
+
440
+ if not doc_files['all']:
441
+ return create_response(
442
+ success=False,
443
+ paper_directory=paper_directory,
444
+ doc_files=doc_files
445
+ )
446
+
447
+ logger.info(f"Found {len(doc_files['all'])} documents in {directory}:")
448
+ if doc_files['pdf']:
449
+ logger.info(f" - {len(doc_files['pdf'])} PDF files")
450
+ if doc_files['txt']:
451
+ logger.info(f" - {len(doc_files['txt'])} text files")
452
+ if doc_files['html']:
453
+ logger.info(f" - {len(doc_files['html'])} HTML files")
454
+ if doc_files['md']:
455
+ logger.info(f" - {len(doc_files['md'])} Markdown files")
456
+
457
+ docs = Docs()
458
+ added_papers = []
459
+
460
+ for doc_file in doc_files['all']:
461
+ file_path = os.path.join(directory, doc_file)
462
+ try:
463
+ logger.info(f"Adding document: {file_path}")
464
+
465
+ doc_citation = None
466
+ if citation:
467
+ doc_citation = f"{citation} - {doc_file}"
468
+
469
+ if Path(file_path).exists() and paper_directory not in file_path:
470
+ import shutil
471
+ target_path = os.path.join(paper_directory, os.path.basename(file_path))
472
+ if not Path(target_path).exists():
473
+ shutil.copy2(file_path, target_path)
474
+ logger.info(f"Copied {file_path} to {target_path}")
475
+
476
+ docname = await docs.aadd(
477
+ path=file_path,
478
+ citation=doc_citation,
479
+ settings=settings,
480
+ )
481
+ if docname:
482
+ doc = next((d for d in docs.docs.values() if d.docname == docname), None)
483
+ added_papers.append({
484
+ "file": doc_file,
485
+ "docname": docname,
486
+ "citation": doc_citation,
487
+ "doc": doc
488
+ })
489
+ logger.info(f"Successfully added document: {doc_file}")
490
+ except Exception as e:
491
+ logger.error(f"Error adding {file_path}: {e}")
492
+
493
+ index_result = None
494
+ if auto_index and added_papers:
495
+ try:
496
+ index_result = await build_index(ctx)
497
+ logger.info(f"Index rebuilt with {len(index_result.get('indexed_papers', []))} papers")
498
+ except Exception as e:
499
+ logger.error(f"Error rebuilding index: {e}")
500
+ index_result = {"success": False, "error": str(e)}
501
+
502
+ response = create_response(
503
+ success=True,
504
+ paper_directory=paper_directory,
505
+ doc_files=doc_files,
506
+ message=f"Successfully added {len(added_papers)} documents out of {len(doc_files['all'])}",
507
+ documents_added=len(added_papers),
508
+ added_documents=added_papers
509
+ )
510
+
511
+ if index_result:
512
+ response["index_result"] = index_result
513
+
514
+ return response
515
+ except Exception as e:
516
+ if "ModelRetry" in str(type(e)):
517
+ raise e
518
+ raise ModelRetry(f"Error adding papers: {str(e)}")
519
+
520
+
521
+ async def list_papers(
522
+ ctx: RunContext[PaperQADependencies],
523
+ ) -> Any:
524
+ """
525
+ List all papers in the current paper directory.
526
+
527
+ Args:
528
+ ctx: The run context
529
+
530
+ Returns:
531
+ Information about all papers in the paper directory
532
+ """
533
+ try:
534
+ settings = ctx.deps.set_paperqa_settings()
535
+ paper_directory = settings.agent.index.paper_directory
536
+
537
+ doc_files = get_document_files(paper_directory)
538
+
539
+ indexed_files = []
540
+ try:
541
+ index = await get_directory_index(settings=settings, build=False)
542
+ index_files = await index.index_files
543
+ indexed_files = list(index_files.keys())
544
+ logger.info(f"Found {len(indexed_files)} indexed document chunks")
545
+ except Exception:
546
+ logger.info("No index found or index is empty")
547
+
548
+ return create_response(
549
+ success=True,
550
+ paper_directory=paper_directory,
551
+ doc_files=doc_files,
552
+ indexed_files=indexed_files,
553
+ message=f"Found {len(doc_files['all'])} documents and {len(indexed_files)} indexed chunks",
554
+ files_in_directory=doc_files['all'],
555
+ files_by_type={
556
+ "pdf": doc_files['pdf'],
557
+ "txt": doc_files['txt'],
558
+ "html": doc_files['html'],
559
+ "md": doc_files['md']
560
+ },
561
+ note="To search papers, they must be both in the paper directory AND indexed. If there are files in the directory but not indexed, use the CLI command 'aurelian paperqa index -d <directory>' to index them."
562
+ )
563
+ except Exception as e:
564
+ if "ModelRetry" in str(type(e)):
565
+ raise e
566
+ raise ModelRetry(f"Error listing papers: {str(e)}")
@@ -0,0 +1,17 @@
1
+ #!/usr/bin/env python3
2
+ """
3
+ Main entry point to run the talisman agent.
4
+ """
5
+ import os
6
+ import sys
7
+ from pydantic_ai import chat
8
+
9
+ # Add the parent directory to the path for absolute imports
10
+ sys.path.insert(0, os.path.abspath(os.path.join(os.path.dirname(__file__), "../../../")))
11
+
12
+ from aurelian.agents.talisman.talisman_agent import talisman_agent
13
+ from aurelian.agents.talisman.talisman_config import get_config
14
+
15
+ if __name__ == "__main__":
16
+ config = get_config()
17
+ chat(talisman_agent, deps=config)
@@ -0,0 +1,70 @@
1
+ """
2
+ CLI interface for the talisman agent.
3
+ This may not be in the original code, but let's add it to make sure it's properly configured.
4
+ """
5
+ import logging
6
+ import re
7
+ from pydantic_ai import RunContext
8
+
9
+ from aurelian.agents.talisman.talisman_config import TalismanConfig
10
+ from aurelian.agents.talisman.talisman_tools import GeneSetAnalysis, FunctionalTerm, GeneSummary
11
+
12
+ def format_talisman_output(result):
13
+ """Format the talisman output to ensure it always has all three sections."""
14
+ logging.info("Post-processing talisman output")
15
+
16
+ # Check if output already has proper sections
17
+ has_narrative = re.search(r'^\s*##\s*Narrative', result, re.MULTILINE) is not None
18
+ has_functional_terms = re.search(r'^\s*##\s*Functional Terms Table', result, re.MULTILINE) is not None
19
+ has_gene_summary = re.search(r'^\s*##\s*Gene Summary Table', result, re.MULTILINE) is not None
20
+
21
+ # If all sections are present, return as is
22
+ if has_narrative and has_functional_terms and has_gene_summary:
23
+ return result
24
+
25
+ # Need to reconstruct the output
26
+ # Extract gene summary table if it exists
27
+ gene_table_match = re.search(r'^\s*##\s*Gene Summary Table\s*\n(.*?)(?=$|\n\n|\Z)',
28
+ result, re.MULTILINE | re.DOTALL)
29
+
30
+ if gene_table_match:
31
+ gene_table = gene_table_match.group(0)
32
+
33
+ # Extract existing text that might be a narrative
34
+ narrative_text = result.replace(gene_table, '').strip()
35
+
36
+ # Create a proper narrative section if missing
37
+ if not has_narrative and narrative_text:
38
+ narrative_section = "## Narrative\n" + narrative_text + "\n\n"
39
+ else:
40
+ narrative_section = "## Narrative\nThese genes may have related functions as indicated in the gene summary table.\n\n"
41
+
42
+ # Create a functional terms section if missing
43
+ if not has_functional_terms:
44
+ # Extract gene IDs from the gene table
45
+ gene_ids = []
46
+ for line in gene_table.split('\n'):
47
+ if '|' in line and not line.strip().startswith('|--') and not 'ID |' in line:
48
+ parts = line.split('|')
49
+ if len(parts) > 1:
50
+ gene_id = parts[1].strip()
51
+ if gene_id and gene_id != 'ID':
52
+ gene_ids.append(gene_id)
53
+
54
+ # Create a simple functional terms table
55
+ functional_terms = "## Functional Terms Table\n"
56
+ functional_terms += "| Functional Term | Genes | Source |\n"
57
+ functional_terms += "|-----------------|-------|--------|\n"
58
+ functional_terms += f"| Gene set | {', '.join(gene_ids)} | Analysis |\n\n"
59
+ else:
60
+ # Find and extract existing functional terms section
61
+ ft_match = re.search(r'^\s*##\s*Functional Terms Table\s*\n(.*?)(?=^\s*##\s*|\Z)',
62
+ result, re.MULTILINE | re.DOTALL)
63
+ functional_terms = ft_match.group(0) if ft_match else ""
64
+
65
+ # Reconstruct the output with all sections
66
+ formatted_output = "# Gene Set Analysis\n\n" + narrative_section + functional_terms + gene_table
67
+ return formatted_output
68
+
69
+ # If no gene table was found, return the original result
70
+ return result
@@ -0,0 +1,18 @@
1
+ #!/usr/bin/env python3
2
+ """
3
+ Standalone script to run the talisman agent directly.
4
+ """
5
+ import os
6
+ import sys
7
+ from pydantic_ai import chat
8
+
9
+ # Add the src directory to the path for imports
10
+ src_dir = os.path.abspath(os.path.join(os.path.dirname(__file__), "../../../../"))
11
+ sys.path.insert(0, src_dir)
12
+
13
+ from aurelian.agents.talisman.talisman_agent import talisman_agent
14
+ from aurelian.agents.talisman.talisman_config import get_config
15
+
16
+ if __name__ == "__main__":
17
+ config = get_config()
18
+ chat(talisman_agent, deps=config)