npcsh 1.0.25__py3-none-any.whl → 1.0.27__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (50) hide show
  1. npcsh/_state.py +105 -105
  2. npcsh/alicanto.py +88 -88
  3. npcsh/corca.py +423 -81
  4. npcsh/guac.py +110 -107
  5. npcsh/mcp_helpers.py +45 -45
  6. npcsh/mcp_server.py +16 -17
  7. npcsh/npc.py +16 -17
  8. npcsh/npc_team/jinxs/bash_executer.jinx +1 -1
  9. npcsh/npc_team/jinxs/edit_file.jinx +6 -6
  10. npcsh/npc_team/jinxs/image_generation.jinx +5 -5
  11. npcsh/npc_team/jinxs/screen_cap.jinx +2 -2
  12. npcsh/npcsh.py +5 -2
  13. npcsh/plonk.py +8 -8
  14. npcsh/routes.py +110 -90
  15. npcsh/spool.py +13 -13
  16. npcsh/wander.py +37 -37
  17. npcsh/yap.py +72 -72
  18. {npcsh-1.0.25.data → npcsh-1.0.27.data}/data/npcsh/npc_team/bash_executer.jinx +1 -1
  19. {npcsh-1.0.25.data → npcsh-1.0.27.data}/data/npcsh/npc_team/edit_file.jinx +6 -6
  20. {npcsh-1.0.25.data → npcsh-1.0.27.data}/data/npcsh/npc_team/image_generation.jinx +5 -5
  21. {npcsh-1.0.25.data → npcsh-1.0.27.data}/data/npcsh/npc_team/screen_cap.jinx +2 -2
  22. {npcsh-1.0.25.dist-info → npcsh-1.0.27.dist-info}/METADATA +12 -6
  23. npcsh-1.0.27.dist-info/RECORD +73 -0
  24. npcsh-1.0.25.dist-info/RECORD +0 -73
  25. {npcsh-1.0.25.data → npcsh-1.0.27.data}/data/npcsh/npc_team/alicanto.npc +0 -0
  26. {npcsh-1.0.25.data → npcsh-1.0.27.data}/data/npcsh/npc_team/alicanto.png +0 -0
  27. {npcsh-1.0.25.data → npcsh-1.0.27.data}/data/npcsh/npc_team/corca.npc +0 -0
  28. {npcsh-1.0.25.data → npcsh-1.0.27.data}/data/npcsh/npc_team/corca.png +0 -0
  29. {npcsh-1.0.25.data → npcsh-1.0.27.data}/data/npcsh/npc_team/foreman.npc +0 -0
  30. {npcsh-1.0.25.data → npcsh-1.0.27.data}/data/npcsh/npc_team/frederic.npc +0 -0
  31. {npcsh-1.0.25.data → npcsh-1.0.27.data}/data/npcsh/npc_team/frederic4.png +0 -0
  32. {npcsh-1.0.25.data → npcsh-1.0.27.data}/data/npcsh/npc_team/guac.png +0 -0
  33. {npcsh-1.0.25.data → npcsh-1.0.27.data}/data/npcsh/npc_team/internet_search.jinx +0 -0
  34. {npcsh-1.0.25.data → npcsh-1.0.27.data}/data/npcsh/npc_team/kadiefa.npc +0 -0
  35. {npcsh-1.0.25.data → npcsh-1.0.27.data}/data/npcsh/npc_team/kadiefa.png +0 -0
  36. {npcsh-1.0.25.data → npcsh-1.0.27.data}/data/npcsh/npc_team/npcsh.ctx +0 -0
  37. {npcsh-1.0.25.data → npcsh-1.0.27.data}/data/npcsh/npc_team/npcsh_sibiji.png +0 -0
  38. {npcsh-1.0.25.data → npcsh-1.0.27.data}/data/npcsh/npc_team/plonk.npc +0 -0
  39. {npcsh-1.0.25.data → npcsh-1.0.27.data}/data/npcsh/npc_team/plonk.png +0 -0
  40. {npcsh-1.0.25.data → npcsh-1.0.27.data}/data/npcsh/npc_team/plonkjr.npc +0 -0
  41. {npcsh-1.0.25.data → npcsh-1.0.27.data}/data/npcsh/npc_team/plonkjr.png +0 -0
  42. {npcsh-1.0.25.data → npcsh-1.0.27.data}/data/npcsh/npc_team/python_executor.jinx +0 -0
  43. {npcsh-1.0.25.data → npcsh-1.0.27.data}/data/npcsh/npc_team/sibiji.npc +0 -0
  44. {npcsh-1.0.25.data → npcsh-1.0.27.data}/data/npcsh/npc_team/sibiji.png +0 -0
  45. {npcsh-1.0.25.data → npcsh-1.0.27.data}/data/npcsh/npc_team/spool.png +0 -0
  46. {npcsh-1.0.25.data → npcsh-1.0.27.data}/data/npcsh/npc_team/yap.png +0 -0
  47. {npcsh-1.0.25.dist-info → npcsh-1.0.27.dist-info}/WHEEL +0 -0
  48. {npcsh-1.0.25.dist-info → npcsh-1.0.27.dist-info}/entry_points.txt +0 -0
  49. {npcsh-1.0.25.dist-info → npcsh-1.0.27.dist-info}/licenses/LICENSE +0 -0
  50. {npcsh-1.0.25.dist-info → npcsh-1.0.27.dist-info}/top_level.txt +0 -0
npcsh/alicanto.py CHANGED
@@ -28,9 +28,9 @@ def generate_random_npcs(num_npcs: int,
28
28
  Generate a diverse set of NPCs with different expertise and perspectives
29
29
  related to the research request.
30
30
  """
31
- # For single NPC, use a simpler approach to avoid unnecessary LLM calls
31
+
32
32
  if num_npcs == 1:
33
- # Generate directly without complex JSON parsing
33
+
34
34
  name = f"Expert Researcher on {request}"
35
35
  expertise = "Interdisciplinary semantic theory researcher"
36
36
  background = "Extensive experience in linguistics, cognitive science, and NLP"
@@ -60,7 +60,7 @@ def generate_random_npcs(num_npcs: int,
60
60
  npc.system_prompt = system_prompt
61
61
  return [npc]
62
62
 
63
- # Generate diverse expert personas based on the research topic
63
+
64
64
  prompt = f"""
65
65
  For the research topic: "{request}"
66
66
 
@@ -80,24 +80,24 @@ def generate_random_npcs(num_npcs: int,
80
80
  prompt=prompt,
81
81
  model=model,
82
82
  provider=provider,
83
- format="json" # Directly request JSON format
83
+ format="json"
84
84
  )
85
85
 
86
- # Response will be properly structured JSON from get_llm_response
86
+
87
87
  experts_data = response.get('response', [])
88
88
 
89
- # Create NPC instances from expert data
89
+
90
90
  npcs = []
91
91
 
92
- # Handle experts_data safely whether it's a list or not
92
+
93
93
  if isinstance(experts_data, list):
94
94
  experts_to_process = experts_data[:num_npcs]
95
95
  else:
96
- # If not a list, try to convert or use as a single item
96
+
97
97
  if isinstance(experts_data, dict):
98
98
  experts_to_process = [experts_data]
99
99
  else:
100
- # Create a basic expert as fallback
100
+
101
101
  experts_to_process = [{
102
102
  "name": f"Expert_1",
103
103
  "expertise": "Interdisciplinary researcher",
@@ -110,7 +110,7 @@ def generate_random_npcs(num_npcs: int,
110
110
  for expert in experts_to_process:
111
111
  name = expert.get("name", f"Expert_{len(npcs)}")
112
112
 
113
- # Create a system prompt that defines this NPC's expertise and perspective
113
+
114
114
  system_prompt = f"""
115
115
  You are {name}, {expert.get('expertise', 'an expert researcher')}.
116
116
 
@@ -131,7 +131,7 @@ def generate_random_npcs(num_npcs: int,
131
131
  the essence of your insights in the most efficient way possible.
132
132
  """
133
133
 
134
- # Create NPC with name and primary_directive (required parameters)
134
+
135
135
  npc = NPC(name=name, primary_directive=f"Research expert on {request}")
136
136
  npc.system_prompt = system_prompt
137
137
  npcs.append(npc)
@@ -165,7 +165,7 @@ def generate_research_chain(request: str,
165
165
  """
166
166
  chain = []
167
167
 
168
- # Initial research prompt
168
+
169
169
  initial_prompt = f"""
170
170
  Research request: {request}
171
171
 
@@ -186,12 +186,12 @@ def generate_research_chain(request: str,
186
186
 
187
187
  chain.append(initial_findings)
188
188
 
189
- # For each level of depth, continue the research
189
+
190
190
  for i in range(1, depth):
191
- # Get recent memory to include as context
191
+
192
192
  memory_context = "\n\n".join(chain[-memory:]) if len(chain) > 0 else ""
193
193
 
194
- # Simple follow-up prompt without specific research modes
194
+
195
195
  next_prompt = f"""
196
196
  Research request: {request}
197
197
 
@@ -234,22 +234,22 @@ def simulate_experiments(research: Dict[str, Any],
234
234
  Returns:
235
235
  Dictionary mapping experiment titles to experiment data
236
236
  """
237
- # Prepare context with key facts
237
+
238
238
  facts_context = ""
239
239
 
240
- # Add facts from thematic groups
240
+
241
241
  if "fact_groups" in research:
242
- for group, facts in list(research["fact_groups"].items())[:5]: # Use top 5 groups
242
+ for group, facts in list(research["fact_groups"].items())[:5]:
243
243
  facts_context += f"\n\nThematic Group: {group}\n"
244
244
  facts_context += format_facts_list(facts)
245
245
 
246
- # Add insights from combinations
246
+
247
247
  if "combination_insights" in research:
248
248
  facts_context += "\n\nEmergent Insights:\n"
249
- for combo in research["combination_insights"][:3]: # Use top 3 insights
249
+ for combo in research["combination_insights"][:3]:
250
250
  facts_context += f"• {combo.get('emergent_insight', '')}\n"
251
251
 
252
- # Create prompt to design experiments
252
+
253
253
  prompt = f"""
254
254
  You are a creative research scientist exploring the topic: "{request}"
255
255
 
@@ -287,9 +287,9 @@ def simulate_experiments(research: Dict[str, Any],
287
287
  format="json")
288
288
  experiments = response.get("response", {})
289
289
 
290
- # Limit experiments if needed
290
+
291
291
  if max_experiments and isinstance(experiments, dict) and len(experiments) > max_experiments:
292
- # Sort by title length (approximating complexity/interestingness)
292
+
293
293
  sorted_exps = sorted(experiments.items(), key=lambda x: len(x[0]), reverse=True)
294
294
  experiments = dict(sorted_exps[:max_experiments])
295
295
 
@@ -334,17 +334,17 @@ def alicanto(request: str,
334
334
  Returns:
335
335
  Dictionary with research results
336
336
  """
337
- # Use default model/provider if not specified
337
+
338
338
  if model is None:
339
339
  model = NPCSH_CHAT_MODEL
340
340
  if provider is None:
341
341
  provider = NPCSH_CHAT_PROVIDER
342
342
 
343
- # Generate researcher NPCs with diverse expertise
343
+
344
344
  print(f"Generating {num_npcs} diverse researcher NPCs...")
345
345
  researchers = generate_random_npcs(num_npcs, model, provider, request)
346
346
 
347
- # Generate research chains for each NPC
347
+
348
348
  print(f"Generating research chains (depth={depth})...")
349
349
  research_chains = {}
350
350
  facts_by_researcher = {}
@@ -364,18 +364,18 @@ def alicanto(request: str,
364
364
  )
365
365
  research_chains[npc.name] = chain
366
366
 
367
- # Extract facts from chain
367
+
368
368
  print(f" Extracting facts from {npc.name}'s research...")
369
369
  facts = extract_facts("\n\n".join(chain), model=model, provider=provider, npc=npc, context=request)
370
370
 
371
- # Limit facts if specified
371
+
372
372
  if max_facts_per_chain is not None and len(facts) > max_facts_per_chain:
373
373
  facts = facts[:max_facts_per_chain]
374
374
 
375
375
  facts_by_researcher[npc.name] = facts
376
376
  print({"fact_list": facts})
377
377
 
378
- # Identify thematic groups across all research
378
+
379
379
  print("Identifying thematic groups across all research insights...")
380
380
  all_facts = []
381
381
  for researcher_facts in facts_by_researcher.values():
@@ -383,11 +383,11 @@ def alicanto(request: str,
383
383
 
384
384
  groups = identify_groups(all_facts, model=model, provider=provider)
385
385
 
386
- # Limit number of groups if specified
386
+
387
387
  if max_thematic_groups is not None and len(groups) > max_thematic_groups:
388
388
  groups = groups[:max_thematic_groups]
389
389
 
390
- # Assign facts to groups
390
+
391
391
  fact_groups = {group: [] for group in groups}
392
392
  for fact in all_facts:
393
393
  group_assignments = assign_groups_to_fact(fact, groups, model=model, provider=provider)
@@ -396,7 +396,7 @@ def alicanto(request: str,
396
396
  if group in fact_groups:
397
397
  fact_groups[group].append(fact)
398
398
 
399
- # Evaluate thematic groups
399
+
400
400
  print("Evaluating thematic groups for quality and risk...")
401
401
  group_evaluations = evaluate_thematic_groups(
402
402
  fact_groups,
@@ -406,7 +406,7 @@ def alicanto(request: str,
406
406
  max_criticisms=max_criticisms_per_group
407
407
  )
408
408
 
409
- # Generate group summaries
409
+
410
410
  group_summaries = {}
411
411
  for group_name, facts in fact_groups.items():
412
412
  if not facts:
@@ -433,7 +433,7 @@ def alicanto(request: str,
433
433
 
434
434
  group_summaries[group_name] = summary
435
435
 
436
- # Generate conceptual combinations to spark novel ideas
436
+
437
437
  print("Generating conceptual combinations to spark novel insights...")
438
438
  fact_lists = list(facts_by_researcher.values())
439
439
  combinations = generate_conceptual_combinations(
@@ -442,7 +442,7 @@ def alicanto(request: str,
442
442
  num_combinations=max_conceptual_combinations if max_conceptual_combinations is not None else 5
443
443
  )
444
444
 
445
- # Analyze combinations for emergent insights
445
+
446
446
  print("Analyzing conceptual combinations for emergent insights...")
447
447
  combination_insights = analyze_conceptual_combinations(
448
448
  combinations,
@@ -451,31 +451,31 @@ def alicanto(request: str,
451
451
  provider=provider
452
452
  )
453
453
 
454
- # Identify meta-patterns
454
+
455
455
  print("Identifying meta-patterns across research approaches...")
456
456
  meta_patterns = identify_patterns_across_chains(research_chains, model=model, provider=provider)
457
457
 
458
- # Generate consolidated research summary
458
+
459
459
  print("Consolidating research into comprehensive synthesis...")
460
460
 
461
- # Extract key points for integration
461
+
462
462
  integration_points = []
463
463
 
464
- # Add top facts from each thematic group
464
+
465
465
  for group, facts in fact_groups.items():
466
466
  if facts:
467
467
  integration_points.append(f"From thematic group '{group}':")
468
- for fact in facts[:3]: # Top 3 facts per group
468
+ for fact in facts[:3]:
469
469
  integration_points.append(f"- {fact}")
470
470
 
471
- # Add insights from combinations
472
- for insight in combination_insights[:3]: # Top 3 insights
471
+
472
+ for insight in combination_insights[:3]:
473
473
  integration_points.append(f"Emergent insight: {insight.get('emergent_insight', '')}")
474
474
 
475
- # Add key points from meta-analysis
475
+
476
476
  integration_points.append(f"Meta-analysis insight: {meta_patterns.get('meta_analysis', '')[:300]}...")
477
477
 
478
- # Generate integration
478
+
479
479
  integration_prompt = f"""
480
480
  Consolidate these diverse research findings into a comprehensive, integrative analysis of the topic:
481
481
  "{request}"
@@ -498,7 +498,7 @@ def alicanto(request: str,
498
498
  if isinstance(integration, (list, dict)) or hasattr(integration, '__iter__') and not isinstance(integration, (str, bytes)):
499
499
  integration = ''.join([str(chunk) for chunk in integration])
500
500
 
501
- # Create concise summary
501
+
502
502
  summary_prompt = f"""
503
503
  Create a concise executive summary (150 words max) of this research on:
504
504
  "{request}"
@@ -514,7 +514,7 @@ def alicanto(request: str,
514
514
  if isinstance(ideas_summarized, (list, dict)) or hasattr(ideas_summarized, '__iter__') and not isinstance(ideas_summarized, (str, bytes)):
515
515
  ideas_summarized = ''.join([str(chunk) for chunk in ideas_summarized])
516
516
 
517
- # Simulate experiments
517
+
518
518
  print("Generating simulated experiments...")
519
519
  research_results = {
520
520
  "research_request": request,
@@ -536,12 +536,12 @@ def alicanto(request: str,
536
536
  max_experiments=max_experiments
537
537
  )
538
538
 
539
- # Generate PDF report if requested
539
+
540
540
  pdf_path = None
541
541
  if generate_pdf:
542
542
  pdf_path = generate_pdf_report(request, model, provider, research_results, experiments)
543
543
 
544
- # Final research results
544
+
545
545
  research_results["experiments"] = experiments
546
546
  research_results["pdf_path"] = pdf_path
547
547
 
@@ -603,7 +603,7 @@ def evaluate_thematic_groups(fact_groups: Dict[str, List[str]], request: str, mo
603
603
  if isinstance(eval_text, (list, dict)) or hasattr(eval_text, '__iter__') and not isinstance(eval_text, (str, bytes)):
604
604
  eval_text = ''.join([str(chunk) for chunk in eval_text])
605
605
 
606
- # Parse scores
606
+
607
607
  scores = {}
608
608
  criticisms = []
609
609
  in_criticisms = False
@@ -618,12 +618,12 @@ def evaluate_thematic_groups(fact_groups: Dict[str, List[str]], request: str, mo
618
618
  continue
619
619
 
620
620
  if in_criticisms:
621
- # Parse criticisms
621
+
622
622
  if line[0].isdigit() and line[1:].startswith('. '):
623
623
  criticism = line[line.find(' ')+1:].strip()
624
624
  criticisms.append(criticism)
625
625
  else:
626
- # Parse scores
626
+
627
627
  if ':' in line:
628
628
  metric, score_str = line.split(':', 1)
629
629
  metric = metric.strip()
@@ -633,7 +633,7 @@ def evaluate_thematic_groups(fact_groups: Dict[str, List[str]], request: str, mo
633
633
  except ValueError:
634
634
  pass
635
635
 
636
- # Apply criticism limit if specified
636
+
637
637
  if max_criticisms is not None and len(criticisms) > max_criticisms:
638
638
  criticisms = criticisms[:max_criticisms]
639
639
 
@@ -656,13 +656,13 @@ def generate_conceptual_combinations(fact_lists: List[List[str]], sample_size: i
656
656
  Returns:
657
657
  List of dictionaries containing the combinations and generated insights
658
658
  """
659
- # Flatten facts with researcher ID
659
+
660
660
  all_facts_with_source = []
661
661
  for i, facts in enumerate(fact_lists):
662
662
  for fact in facts:
663
663
  all_facts_with_source.append((i, fact))
664
664
 
665
- # Generate random combinations
665
+
666
666
  combinations = []
667
667
  for _ in range(num_combinations):
668
668
  if len(all_facts_with_source) <= sample_size:
@@ -738,7 +738,7 @@ def identify_patterns_across_chains(chains: Dict[str, List[str]], model: str = N
738
738
  Returns:
739
739
  Dictionary with meta-analysis results
740
740
  """
741
- # Prepare a summary of each research chain
741
+
742
742
  chain_summaries = {}
743
743
  for name, chain in chains.items():
744
744
  full_text = "\n\n".join(chain)
@@ -759,7 +759,7 @@ def identify_patterns_across_chains(chains: Dict[str, List[str]], model: str = N
759
759
 
760
760
  chain_summaries[name] = summary
761
761
 
762
- # Generate meta-analysis across all chains
762
+
763
763
  all_summaries = "\n\n".join([f"[{name}]\n{summary}" for name, summary in chain_summaries.items()])
764
764
 
765
765
  meta_analysis_prompt = f"""
@@ -783,7 +783,7 @@ def identify_patterns_across_chains(chains: Dict[str, List[str]], model: str = N
783
783
  if isinstance(meta_analysis, (list, dict)) or hasattr(meta_analysis, '__iter__') and not isinstance(meta_analysis, (str, bytes)):
784
784
  meta_analysis = ''.join([str(chunk) for chunk in meta_analysis])
785
785
 
786
- # Generate innovative research directions
786
+
787
787
  directions_prompt = f"""
788
788
  Based on this meta-analysis of research approaches to the topic:
789
789
 
@@ -825,11 +825,11 @@ def preprocess_content_for_pdf(content: str, model: str = None, provider: str =
825
825
  Returns:
826
826
  Formatted content ready for PDF generation
827
827
  """
828
- # Handle non-string content
828
+
829
829
  if not isinstance(content, str):
830
830
  content = str(content)
831
831
 
832
- # If in concise mode, create a drastically shortened version
832
+
833
833
  if concise_mode:
834
834
 
835
835
  if model is None:
@@ -852,7 +852,7 @@ def preprocess_content_for_pdf(content: str, model: str = None, provider: str =
852
852
  response = get_llm_response(prompt=concise_prompt, model=model, provider=provider)
853
853
  content = response.get('response', '')
854
854
 
855
- # Basic cleanup for any problematic characters that cause PDF issues
855
+
856
856
  for char, replacement in {
857
857
  '%': '',
858
858
  '#': '-',
@@ -865,7 +865,7 @@ def preprocess_content_for_pdf(content: str, model: str = None, provider: str =
865
865
  }.items():
866
866
  content = content.replace(char, replacement)
867
867
 
868
- # Apply word count limit if the content is too long
868
+
869
869
  words = content.split()
870
870
  if len(words) > max_words:
871
871
  content = ' '.join(words[:max_words]) + '... [truncated]'
@@ -897,13 +897,13 @@ def generate_pdf_report(request: str,
897
897
  if output_path is None:
898
898
  output_path = os.getcwd()
899
899
 
900
- # Create filename
900
+
901
901
  sanitized_request = "".join(c for c in request if c.isalnum() or c.isspace()).strip()
902
902
  sanitized_request = sanitized_request.replace(" ", "_")[:50]
903
903
  timestamp = datetime.datetime.now().strftime("%Y%m%d_%H%M%S")
904
904
  filename = f"{sanitized_request}_{timestamp}"
905
905
 
906
- # Check for LaTeX installation
906
+
907
907
  try:
908
908
  subprocess.run(["which", "pdflatex"], check=True, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
909
909
  except subprocess.CalledProcessError:
@@ -915,32 +915,32 @@ def generate_pdf_report(request: str,
915
915
  except subprocess.CalledProcessError as e:
916
916
  print(f"Error installing LaTeX: {str(e)}")
917
917
  return None
918
- # Create chart for thematic groups using matplotlib
918
+
919
919
  chart_path = None
920
920
  try:
921
921
  if "group_evaluations" in research and research["group_evaluations"]:
922
- # Create basic folder for figures
922
+
923
923
  figures_dir = os.path.join(output_path, "figures")
924
924
  os.makedirs(figures_dir, exist_ok=True)
925
925
 
926
926
  fig, ax = plt.subplots(figsize=(7.5, 4))
927
- plt.style.use('ggplot') # Clean style without seaborn
927
+ plt.style.use('ggplot')
928
928
 
929
929
  groups = []
930
930
  scores = []
931
931
 
932
932
  for group_name, eval_data in research["group_evaluations"].items():
933
- groups.append(group_name[:30]) # Truncate long names
933
+ groups.append(group_name[:30])
934
934
  quality_score = (eval_data.get("Novelty", 5) + eval_data.get("Depth", 5) +
935
935
  eval_data.get("Practicality", 5) + eval_data.get("Evidence", 5)) / 4
936
936
  scores.append(quality_score)
937
937
 
938
- # Sort by score
938
+
939
939
  sorted_data = sorted(zip(groups, scores), key=lambda x: x[1], reverse=True)
940
940
  groups = [x[0] for x in sorted_data]
941
941
  scores = [x[1] for x in sorted_data]
942
942
 
943
- # Create horizontal bar chart
943
+
944
944
  y_pos = range(len(groups))
945
945
  ax.barh(y_pos, scores, color='steelblue')
946
946
  ax.set_yticks(y_pos)
@@ -949,24 +949,24 @@ def generate_pdf_report(request: str,
949
949
  ax.set_title('Thematic Groups by Quality Score')
950
950
  plt.tight_layout()
951
951
 
952
- # Save chart
952
+
953
953
  chart_path = os.path.join(figures_dir, f"thematic_groups.pdf")
954
954
  plt.savefig(chart_path, dpi=300, bbox_inches='tight', format='pdf')
955
955
  plt.close()
956
956
  except Exception as e:
957
957
  print(f"Warning: Could not generate chart: {str(e)}")
958
958
 
959
- # Create LaTeX document
959
+
960
960
  latex_content = generate_latex_document(request, model, provider, research, experiments, chart_path, max_pages)
961
961
 
962
- # Write LaTeX to file
962
+
963
963
  tex_path = os.path.join(output_path, f"{filename}.tex")
964
964
  with open(tex_path, "w") as f:
965
965
  f.write(latex_content)
966
966
 
967
- # Use subprocess to run pdflatex without check=True to prevent exceptions
967
+
968
968
  try:
969
- # First run
969
+
970
970
  result = subprocess.run(
971
971
  ["pdflatex", "-interaction=nonstopmode", "-output-directory", output_path, tex_path],
972
972
  stdout=subprocess.PIPE,
@@ -975,9 +975,9 @@ def generate_pdf_report(request: str,
975
975
 
976
976
  if result.returncode != 0:
977
977
  print(f"Warning: First LaTeX run had issues (exit code {result.returncode})")
978
- # Still continue - sometimes the second run fixes things
978
+
979
979
 
980
- # Second run for references
980
+
981
981
  result = subprocess.run(
982
982
  ["pdflatex", "-interaction=nonstopmode", "-output-directory", output_path, tex_path],
983
983
  stdout=subprocess.PIPE,
@@ -986,7 +986,7 @@ def generate_pdf_report(request: str,
986
986
 
987
987
  if result.returncode != 0:
988
988
  print(f"Warning: Second LaTeX run had issues (exit code {result.returncode})")
989
- # Write LaTeX log for debugging
989
+
990
990
  log_path = os.path.join(output_path, f"{filename}.log")
991
991
  if os.path.exists(log_path):
992
992
  print(f"Check LaTeX log for details: {log_path}")
@@ -994,14 +994,14 @@ def generate_pdf_report(request: str,
994
994
  print(f"Error during LaTeX compilation: {str(e)}")
995
995
  return None
996
996
 
997
- # Clean up temporary files
997
+
998
998
  for ext in [".aux", ".out", ".toc"]:
999
999
  try:
1000
1000
  os.remove(os.path.join(output_path, f"{filename}{ext}"))
1001
1001
  except OSError:
1002
1002
  pass
1003
1003
 
1004
- # Check if PDF was generated successfully
1004
+
1005
1005
  pdf_path = os.path.join(output_path, f"{filename}.pdf")
1006
1006
  if os.path.exists(pdf_path):
1007
1007
  print(f"PDF report successfully generated using LaTeX: {pdf_path}")
@@ -1025,14 +1025,14 @@ def generate_latex_document(request: str, model, provider, research: Dict[str, A
1025
1025
  Returns:
1026
1026
  LaTeX document content as a string
1027
1027
  """
1028
- # Collect experiment images that might be available
1028
+
1029
1029
  figure_paths = {}
1030
1030
  if chart_path:
1031
- # Use relative path instead of absolute path for figure
1031
+
1032
1032
  figure_paths["thematic_groups"] = os.path.basename(chart_path)
1033
1033
 
1034
- # Check for experiment images in the current directory
1035
- # Ensure experiments is a dictionary before trying to get keys
1034
+
1035
+
1036
1036
  if isinstance(experiments, dict):
1037
1037
  for title in experiments.keys():
1038
1038
  sanitized_title = title.replace(" ", "_")
@@ -1040,12 +1040,12 @@ def generate_latex_document(request: str, model, provider, research: Dict[str, A
1040
1040
  if os.path.exists(potential_image):
1041
1041
  figure_paths[sanitized_title] = potential_image
1042
1042
 
1043
- # Describe available figures to the LLM
1043
+
1044
1044
  figure_path_description_dict = {}
1045
1045
  for name, path in figure_paths.items():
1046
1046
  figure_path_description_dict[name] = path
1047
1047
 
1048
- # Create the prompt for generating LaTeX content
1048
+
1049
1049
  prompt = f'''
1050
1050
  Generate a LaTeX document for a research report on the topic: "{request}"
1051
1051
  Here is the summary of the research: {research}
@@ -1068,14 +1068,14 @@ def generate_latex_document(request: str, model, provider, research: Dict[str, A
1068
1068
  latex_response = get_llm_response(prompt=prompt, model=model, provider=provider )
1069
1069
  latex_content = latex_response.get('response', '')
1070
1070
 
1071
- # Post-process the LaTeX content to fix common issues
1071
+
1072
1072
  latex_content = latex_content.replace('\\bibliography{references}', '')
1073
1073
  latex_content = latex_content.replace('\\bibliographystyle{plain}', '')
1074
1074
 
1075
- # Replace absolute figure paths with relative paths
1075
+
1076
1076
  latex_content = latex_content.replace('/home/caug/npcww/npcsh/figures/', 'figures/')
1077
1077
 
1078
- # Add a simple bibliography if none exists
1078
+
1079
1079
  if '\\begin{thebibliography}' not in latex_content and '\\end{document}' in latex_content:
1080
1080
  bibliography = """
1081
1081
  \\begin{thebibliography}{9}