bioguider 0.2.33__tar.gz → 0.2.34__tar.gz
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of bioguider might be problematic. Click here for more details.
- {bioguider-0.2.33 → bioguider-0.2.34}/PKG-INFO +1 -1
- {bioguider-0.2.33 → bioguider-0.2.34}/bioguider/generation/llm_content_generator.py +292 -153
- {bioguider-0.2.33 → bioguider-0.2.34}/bioguider/generation/llm_injector.py +60 -7
- {bioguider-0.2.33 → bioguider-0.2.34}/bioguider/generation/suggestion_extractor.py +26 -26
- {bioguider-0.2.33 → bioguider-0.2.34}/bioguider/managers/generation_manager.py +14 -57
- {bioguider-0.2.33 → bioguider-0.2.34}/pyproject.toml +1 -1
- {bioguider-0.2.33 → bioguider-0.2.34}/LICENSE +0 -0
- {bioguider-0.2.33 → bioguider-0.2.34}/README.md +0 -0
- {bioguider-0.2.33 → bioguider-0.2.34}/bioguider/__init__.py +0 -0
- {bioguider-0.2.33 → bioguider-0.2.34}/bioguider/agents/__init__.py +0 -0
- {bioguider-0.2.33 → bioguider-0.2.34}/bioguider/agents/agent_task.py +0 -0
- {bioguider-0.2.33 → bioguider-0.2.34}/bioguider/agents/agent_tools.py +0 -0
- {bioguider-0.2.33 → bioguider-0.2.34}/bioguider/agents/agent_utils.py +0 -0
- {bioguider-0.2.33 → bioguider-0.2.34}/bioguider/agents/collection_execute_step.py +0 -0
- {bioguider-0.2.33 → bioguider-0.2.34}/bioguider/agents/collection_observe_step.py +0 -0
- {bioguider-0.2.33 → bioguider-0.2.34}/bioguider/agents/collection_plan_step.py +0 -0
- {bioguider-0.2.33 → bioguider-0.2.34}/bioguider/agents/collection_task.py +0 -0
- {bioguider-0.2.33 → bioguider-0.2.34}/bioguider/agents/collection_task_utils.py +0 -0
- {bioguider-0.2.33 → bioguider-0.2.34}/bioguider/agents/common_agent.py +0 -0
- {bioguider-0.2.33 → bioguider-0.2.34}/bioguider/agents/common_agent_2step.py +0 -0
- {bioguider-0.2.33 → bioguider-0.2.34}/bioguider/agents/common_conversation.py +0 -0
- {bioguider-0.2.33 → bioguider-0.2.34}/bioguider/agents/common_step.py +0 -0
- {bioguider-0.2.33 → bioguider-0.2.34}/bioguider/agents/consistency_collection_step.py +0 -0
- {bioguider-0.2.33 → bioguider-0.2.34}/bioguider/agents/consistency_evaluation_task.py +0 -0
- {bioguider-0.2.33 → bioguider-0.2.34}/bioguider/agents/consistency_evaluation_task_utils.py +0 -0
- {bioguider-0.2.33 → bioguider-0.2.34}/bioguider/agents/consistency_observe_step.py +0 -0
- {bioguider-0.2.33 → bioguider-0.2.34}/bioguider/agents/consistency_query_step.py +0 -0
- {bioguider-0.2.33 → bioguider-0.2.34}/bioguider/agents/dockergeneration_execute_step.py +0 -0
- {bioguider-0.2.33 → bioguider-0.2.34}/bioguider/agents/dockergeneration_observe_step.py +0 -0
- {bioguider-0.2.33 → bioguider-0.2.34}/bioguider/agents/dockergeneration_plan_step.py +0 -0
- {bioguider-0.2.33 → bioguider-0.2.34}/bioguider/agents/dockergeneration_task.py +0 -0
- {bioguider-0.2.33 → bioguider-0.2.34}/bioguider/agents/dockergeneration_task_utils.py +0 -0
- {bioguider-0.2.33 → bioguider-0.2.34}/bioguider/agents/evaluation_installation_task.py +0 -0
- {bioguider-0.2.33 → bioguider-0.2.34}/bioguider/agents/evaluation_readme_task.py +0 -0
- {bioguider-0.2.33 → bioguider-0.2.34}/bioguider/agents/evaluation_submission_requirements_task.py +0 -0
- {bioguider-0.2.33 → bioguider-0.2.34}/bioguider/agents/evaluation_task.py +0 -0
- {bioguider-0.2.33 → bioguider-0.2.34}/bioguider/agents/evaluation_tutorial_task.py +0 -0
- {bioguider-0.2.33 → bioguider-0.2.34}/bioguider/agents/evaluation_tutorial_task_prompts.py +0 -0
- {bioguider-0.2.33 → bioguider-0.2.34}/bioguider/agents/evaluation_userguide_prompts.py +0 -0
- {bioguider-0.2.33 → bioguider-0.2.34}/bioguider/agents/evaluation_userguide_task.py +0 -0
- {bioguider-0.2.33 → bioguider-0.2.34}/bioguider/agents/identification_execute_step.py +0 -0
- {bioguider-0.2.33 → bioguider-0.2.34}/bioguider/agents/identification_observe_step.py +0 -0
- {bioguider-0.2.33 → bioguider-0.2.34}/bioguider/agents/identification_plan_step.py +0 -0
- {bioguider-0.2.33 → bioguider-0.2.34}/bioguider/agents/identification_task.py +0 -0
- {bioguider-0.2.33 → bioguider-0.2.34}/bioguider/agents/identification_task_utils.py +0 -0
- {bioguider-0.2.33 → bioguider-0.2.34}/bioguider/agents/peo_common_step.py +0 -0
- {bioguider-0.2.33 → bioguider-0.2.34}/bioguider/agents/prompt_utils.py +0 -0
- {bioguider-0.2.33 → bioguider-0.2.34}/bioguider/agents/python_ast_repl_tool.py +0 -0
- {bioguider-0.2.33 → bioguider-0.2.34}/bioguider/agents/rag_collection_task.py +0 -0
- {bioguider-0.2.33 → bioguider-0.2.34}/bioguider/conversation.py +0 -0
- {bioguider-0.2.33 → bioguider-0.2.34}/bioguider/database/code_structure_db.py +0 -0
- {bioguider-0.2.33 → bioguider-0.2.34}/bioguider/database/summarized_file_db.py +0 -0
- {bioguider-0.2.33 → bioguider-0.2.34}/bioguider/generation/__init__.py +0 -0
- {bioguider-0.2.33 → bioguider-0.2.34}/bioguider/generation/change_planner.py +0 -0
- {bioguider-0.2.33 → bioguider-0.2.34}/bioguider/generation/document_renderer.py +0 -0
- {bioguider-0.2.33 → bioguider-0.2.34}/bioguider/generation/llm_cleaner.py +0 -0
- {bioguider-0.2.33 → bioguider-0.2.34}/bioguider/generation/models.py +0 -0
- {bioguider-0.2.33 → bioguider-0.2.34}/bioguider/generation/output_manager.py +0 -0
- {bioguider-0.2.33 → bioguider-0.2.34}/bioguider/generation/repo_reader.py +0 -0
- {bioguider-0.2.33 → bioguider-0.2.34}/bioguider/generation/report_loader.py +0 -0
- {bioguider-0.2.33 → bioguider-0.2.34}/bioguider/generation/style_analyzer.py +0 -0
- {bioguider-0.2.33 → bioguider-0.2.34}/bioguider/generation/test_metrics.py +0 -0
- {bioguider-0.2.33 → bioguider-0.2.34}/bioguider/managers/evaluation_manager.py +0 -0
- {bioguider-0.2.33 → bioguider-0.2.34}/bioguider/managers/generation_test_manager.py +0 -0
- {bioguider-0.2.33 → bioguider-0.2.34}/bioguider/rag/__init__.py +0 -0
- {bioguider-0.2.33 → bioguider-0.2.34}/bioguider/rag/config.py +0 -0
- {bioguider-0.2.33 → bioguider-0.2.34}/bioguider/rag/data_pipeline.py +0 -0
- {bioguider-0.2.33 → bioguider-0.2.34}/bioguider/rag/embedder.py +0 -0
- {bioguider-0.2.33 → bioguider-0.2.34}/bioguider/rag/rag.py +0 -0
- {bioguider-0.2.33 → bioguider-0.2.34}/bioguider/settings.py +0 -0
- {bioguider-0.2.33 → bioguider-0.2.34}/bioguider/utils/code_structure_builder.py +0 -0
- {bioguider-0.2.33 → bioguider-0.2.34}/bioguider/utils/constants.py +0 -0
- {bioguider-0.2.33 → bioguider-0.2.34}/bioguider/utils/default.gitignore +0 -0
- {bioguider-0.2.33 → bioguider-0.2.34}/bioguider/utils/file_utils.py +0 -0
- {bioguider-0.2.33 → bioguider-0.2.34}/bioguider/utils/gitignore_checker.py +0 -0
- {bioguider-0.2.33 → bioguider-0.2.34}/bioguider/utils/notebook_utils.py +0 -0
- {bioguider-0.2.33 → bioguider-0.2.34}/bioguider/utils/pyphen_utils.py +0 -0
- {bioguider-0.2.33 → bioguider-0.2.34}/bioguider/utils/python_file_handler.py +0 -0
- {bioguider-0.2.33 → bioguider-0.2.34}/bioguider/utils/r_file_handler.py +0 -0
- {bioguider-0.2.33 → bioguider-0.2.34}/bioguider/utils/utils.py +0 -0
|
@@ -3,6 +3,7 @@ from __future__ import annotations
|
|
|
3
3
|
from typing import Dict
|
|
4
4
|
import json
|
|
5
5
|
import re
|
|
6
|
+
import os
|
|
6
7
|
from langchain_openai.chat_models.base import BaseChatOpenAI
|
|
7
8
|
|
|
8
9
|
from bioguider.agents.common_conversation import CommonConversation
|
|
@@ -19,7 +20,6 @@ INPUTS (use only what is provided; never invent)
|
|
|
19
20
|
- suggestion_category: {suggestion_category}
|
|
20
21
|
- anchor_title: {anchor_title}
|
|
21
22
|
- guidance: {guidance}
|
|
22
|
-
- evidence_from_evaluation: {evidence}
|
|
23
23
|
- repo_context_excerpt (analyze tone/formatting; do not paraphrase it blindly): <<{context}>>
|
|
24
24
|
|
|
25
25
|
CRITICAL REQUIREMENTS
|
|
@@ -33,7 +33,7 @@ CRITICAL REQUIREMENTS
|
|
|
33
33
|
- ABSOLUTELY FORBIDDEN: Do NOT add summary sections, notes, conclusions, or any text at the end of documents
|
|
34
34
|
- ABSOLUTELY FORBIDDEN: Do NOT wrap content in markdown code fences (```markdown). Return pure content only.
|
|
35
35
|
- ABSOLUTELY FORBIDDEN: Do NOT add phrases like "Happy analyzing!", "Ensure all dependencies are up-to-date", or any concluding statements
|
|
36
|
-
- ALWAYS use the specific guidance provided above to create concrete, actionable content
|
|
36
|
+
- ALWAYS use the specific guidance provided above to create concrete, actionable content
|
|
37
37
|
|
|
38
38
|
STYLE & CONSTRAINTS
|
|
39
39
|
- Fix obvious errors in the content.
|
|
@@ -79,7 +79,7 @@ LLM_FULLDOC_PROMPT = """
|
|
|
79
79
|
You are "BioGuider," a documentation rewriter with enhanced capabilities for complex documents.
|
|
80
80
|
|
|
81
81
|
GOAL
|
|
82
|
-
Rewrite a complete target document
|
|
82
|
+
Rewrite a complete target document by enhancing the existing content while maintaining the EXACT original structure, sections, and flow. Use only the provided evaluation report signals and repository context excerpts. Output a full, ready-to-publish markdown file that follows the original document structure precisely while incorporating improvements. You now have increased token capacity to handle complex documents comprehensively.
|
|
83
83
|
|
|
84
84
|
INPUTS (authoritative)
|
|
85
85
|
- evaluation_report (structured JSON excerpts): <<{evaluation_report}>>
|
|
@@ -94,11 +94,12 @@ This file requires improvements from {total_suggestions} separate evaluation sug
|
|
|
94
94
|
4. **Write the document ONCE** with all improvements incorporated throughout
|
|
95
95
|
|
|
96
96
|
INTEGRATION STRATEGY
|
|
97
|
-
-
|
|
98
|
-
-
|
|
99
|
-
-
|
|
97
|
+
- **CRITICAL**: Follow the EXACT structure of the original document. Do NOT create new sections.
|
|
98
|
+
- Identify which suggestions target existing sections in the original document
|
|
99
|
+
- Apply improvements ONLY to existing sections - do NOT create new sections
|
|
100
|
+
- For tutorial files: Enhance existing sections with relevant suggestions, maintain original section order
|
|
100
101
|
- For documentation files: Merge suggestions into existing structure, avoid redundant sections
|
|
101
|
-
- Result: ONE enhanced document that addresses all {total_suggestions} suggestions
|
|
102
|
+
- Result: ONE enhanced document that follows the original structure and addresses all {total_suggestions} suggestions
|
|
102
103
|
|
|
103
104
|
CAPACITY AND SCOPE
|
|
104
105
|
- You have enhanced token capacity to handle complex documents comprehensively
|
|
@@ -107,23 +108,24 @@ CAPACITY AND SCOPE
|
|
|
107
108
|
- Comprehensive documents: Full capacity for complete documentation with all necessary sections
|
|
108
109
|
|
|
109
110
|
STRICT CONSTRAINTS
|
|
111
|
+
- **CRITICAL**: Follow the EXACT structure and sections of the original document. Do NOT create new sections or reorganize content.
|
|
110
112
|
- Base the content solely on the evaluation report and repo context. Do not invent features, data, or claims not supported by these sources.
|
|
111
113
|
- CRITICAL: NEVER invent technical specifications including:
|
|
112
114
|
* Hardware requirements (RAM, CPU, disk space) unless explicitly stated in guidance/context
|
|
113
115
|
* Version numbers for dependencies unless explicitly stated in guidance/context
|
|
114
116
|
* Performance metrics, benchmarks, or timing estimates
|
|
115
|
-
* Biological/computational parameters or thresholds without
|
|
117
|
+
* Biological/computational parameters or thresholds without substantiation
|
|
116
118
|
* Installation commands or package names not found in the repo context
|
|
117
|
-
-
|
|
118
|
-
-
|
|
119
|
-
- CRITICAL: Preserve the original document structure, sections, and flow. Only enhance existing content and add missing information.
|
|
120
|
-
- For tutorial files, maintain all original sections while improving clarity and adding missing details based on evaluation suggestions.
|
|
119
|
+
- **CRITICAL**: Preserve the original document structure, sections, and flow EXACTLY. Only enhance existing content and add missing information based on evaluation suggestions.
|
|
120
|
+
- For tutorial files, maintain ALL original sections in their original order while improving clarity and adding missing details based on evaluation suggestions.
|
|
121
121
|
- Fix obvious errors; improve structure and readability per report suggestions.
|
|
122
|
-
- Include ONLY sections
|
|
122
|
+
- Include ONLY sections that exist in the original document - do not add unnecessary sections.
|
|
123
123
|
- Avoid redundancy: do not duplicate information across multiple sections.
|
|
124
|
-
- ABSOLUTELY
|
|
125
|
-
- ABSOLUTELY
|
|
126
|
-
- ABSOLUTELY
|
|
124
|
+
- **ABSOLUTELY CRITICAL**: Do NOT add ANY conclusion, summary, or closing paragraph at the end
|
|
125
|
+
- **ABSOLUTELY CRITICAL**: Do NOT wrap the entire document inside markdown code fences (```markdown). Do NOT start with ```markdown or end with ```. Return pure content suitable for copy/paste.
|
|
126
|
+
- **ABSOLUTELY CRITICAL**: Do NOT add phrases like "Happy analyzing!", "This vignette demonstrates...", "By following the steps outlined...", or ANY concluding statements
|
|
127
|
+
- **ABSOLUTELY CRITICAL**: Stop writing IMMEDIATELY after the last content section from the original document. Do NOT add "## Conclusion", "## Summary", or any final paragraphs
|
|
128
|
+
- **CRITICAL**: Do NOT reorganize, rename, or create new sections. Follow the original document structure exactly.
|
|
127
129
|
- Keep links well-formed; keep neutral, professional tone; concise, skimmable formatting.
|
|
128
130
|
- Preserve file-specific formatting (e.g., YAML frontmatter, code fence syntax) and do not wrap content in extra code fences.
|
|
129
131
|
|
|
@@ -182,12 +184,47 @@ OUTPUT
|
|
|
182
184
|
- Return only the full README.md content. No commentary, no fences.
|
|
183
185
|
"""
|
|
184
186
|
|
|
187
|
+
# Continuation prompt template - used when document generation is truncated
|
|
188
|
+
LLM_CONTINUATION_PROMPT = """
|
|
189
|
+
You are "BioGuider," continuing a truncated documentation generation task.
|
|
190
|
+
|
|
191
|
+
IMPORTANT: This is STRICT CONTINUATION ONLY. You are NOT creating new content.
|
|
192
|
+
You are NOT adding conclusions or summaries. You are ONLY completing the missing sections from the original document.
|
|
193
|
+
|
|
194
|
+
PREVIOUS CONTENT (do not repeat this):
|
|
195
|
+
```
|
|
196
|
+
{existing_content_tail}
|
|
197
|
+
```
|
|
198
|
+
|
|
199
|
+
STRICT CONTINUATION RULES:
|
|
200
|
+
- Examine the previous content above and identify what section it ends with
|
|
201
|
+
- Continue IMMEDIATELY after that section with the next missing section from the original document
|
|
202
|
+
- Use the EXACT same structure, style, and tone as the existing content
|
|
203
|
+
- Add ONLY the specific content that should logically follow from the last section
|
|
204
|
+
- Do NOT add ANY conclusions, summaries, additional resources, or wrap-up content
|
|
205
|
+
- Do NOT add phrases like "For further guidance", "Additional Resources", or "In conclusion"
|
|
206
|
+
|
|
207
|
+
MISSING CONTENT TO ADD:
|
|
208
|
+
Based on typical RMarkdown vignette structure, if the document ends with "Common Pitfalls", you should add:
|
|
209
|
+
- SCT integration example (SCTransform section)
|
|
210
|
+
- Session info section
|
|
211
|
+
- Details section (if present in original)
|
|
212
|
+
- STOP after these sections - do NOT add anything else
|
|
213
|
+
|
|
214
|
+
CRITICAL: STOP IMMEDIATELY after completing the missing sections from the original document.
|
|
215
|
+
Do NOT add "## Additional Resources" or any final sections.
|
|
216
|
+
|
|
217
|
+
OUTPUT:
|
|
218
|
+
- Return ONLY the continuation content that completes the original document structure
|
|
219
|
+
- No commentary, no fences, no conclusions, no additional content
|
|
220
|
+
"""
|
|
221
|
+
|
|
185
222
|
|
|
186
223
|
class LLMContentGenerator:
|
|
187
224
|
def __init__(self, llm: BaseChatOpenAI):
|
|
188
225
|
self.llm = llm
|
|
189
226
|
|
|
190
|
-
def _detect_truncation(self, content: str, target_file: str) -> bool:
|
|
227
|
+
def _detect_truncation(self, content: str, target_file: str, original_content: str = None) -> bool:
|
|
191
228
|
"""
|
|
192
229
|
Detect if content appears to be truncated based on common patterns.
|
|
193
230
|
Universal detection for all file types.
|
|
@@ -195,6 +232,7 @@ class LLMContentGenerator:
|
|
|
195
232
|
Args:
|
|
196
233
|
content: Generated content to check
|
|
197
234
|
target_file: Target file path for context
|
|
235
|
+
original_content: Original content for comparison (if available)
|
|
198
236
|
|
|
199
237
|
Returns:
|
|
200
238
|
True if content appears truncated, False otherwise
|
|
@@ -202,19 +240,27 @@ class LLMContentGenerator:
|
|
|
202
240
|
if not content or len(content.strip()) < 100:
|
|
203
241
|
return True
|
|
204
242
|
|
|
205
|
-
# 1.
|
|
206
|
-
|
|
207
|
-
|
|
243
|
+
# 1. Compare to original length if available (most reliable indicator)
|
|
244
|
+
if original_content:
|
|
245
|
+
original_len = len(original_content)
|
|
246
|
+
generated_len = len(content)
|
|
247
|
+
# If generated content is significantly shorter than original (< 80%), likely truncated
|
|
248
|
+
if generated_len < original_len * 0.8:
|
|
249
|
+
return True
|
|
250
|
+
|
|
251
|
+
# 2. Check for very short content (applies to all files)
|
|
252
|
+
# Only flag as truncated if content is very short (< 500 chars)
|
|
253
|
+
if len(content) < 500:
|
|
208
254
|
return True
|
|
209
255
|
|
|
210
|
-
#
|
|
256
|
+
# 3. Check for incomplete code blocks (any language)
|
|
211
257
|
# Count opening and closing code fences
|
|
212
258
|
code_fence_count = content.count('```')
|
|
213
259
|
if code_fence_count > 0 and code_fence_count % 2 != 0:
|
|
214
260
|
# Unbalanced code fences suggest truncation
|
|
215
261
|
return True
|
|
216
262
|
|
|
217
|
-
#
|
|
263
|
+
# 4. Check for specific language code blocks
|
|
218
264
|
if target_file.endswith('.Rmd'):
|
|
219
265
|
# R chunks should be complete
|
|
220
266
|
r_chunks_open = re.findall(r'```\{r[^}]*\}', content)
|
|
@@ -278,14 +324,91 @@ class LLMContentGenerator:
|
|
|
278
324
|
|
|
279
325
|
return False
|
|
280
326
|
|
|
281
|
-
def
|
|
327
|
+
def _find_continuation_point(self, content: str, original_content: str = None) -> str:
|
|
328
|
+
"""
|
|
329
|
+
Find a better continuation point than just the last 1000 characters.
|
|
330
|
+
Looks for the last complete section or code block to continue from.
|
|
331
|
+
|
|
332
|
+
Args:
|
|
333
|
+
content: The generated content so far
|
|
334
|
+
original_content: The original content for comparison
|
|
335
|
+
|
|
336
|
+
Returns:
|
|
337
|
+
A suitable continuation point, or None if not found
|
|
282
338
|
"""
|
|
283
|
-
|
|
339
|
+
if not content:
|
|
340
|
+
return None
|
|
341
|
+
|
|
342
|
+
lines = content.split('\n')
|
|
343
|
+
if len(lines) < 10: # Too short to find good continuation point
|
|
344
|
+
return None
|
|
345
|
+
|
|
346
|
+
# Strategy 1: Find the last complete section (header with content after it)
|
|
347
|
+
for i in range(len(lines) - 1, -1, -1):
|
|
348
|
+
line = lines[i].strip()
|
|
349
|
+
if line.startswith('## ') and i + 1 < len(lines):
|
|
350
|
+
# Check if there's content after this header
|
|
351
|
+
next_lines = []
|
|
352
|
+
for j in range(i + 1, min(i + 10, len(lines))): # Look at next 10 lines
|
|
353
|
+
if lines[j].strip() and not lines[j].strip().startswith('##'):
|
|
354
|
+
next_lines.append(lines[j])
|
|
355
|
+
else:
|
|
356
|
+
break
|
|
357
|
+
|
|
358
|
+
if next_lines: # Found header with content after it
|
|
359
|
+
# Return from this header onwards
|
|
360
|
+
return '\n'.join(lines[i:])
|
|
361
|
+
|
|
362
|
+
# Strategy 2: Find the last complete code block
|
|
363
|
+
in_code_block = False
|
|
364
|
+
code_block_start = -1
|
|
365
|
+
|
|
366
|
+
for i in range(len(lines) - 1, -1, -1):
|
|
367
|
+
line = lines[i].strip()
|
|
368
|
+
if line.startswith('```') and not in_code_block:
|
|
369
|
+
in_code_block = True
|
|
370
|
+
code_block_start = i
|
|
371
|
+
elif line.startswith('```') and in_code_block:
|
|
372
|
+
# Found complete code block
|
|
373
|
+
return '\n'.join(lines[code_block_start:])
|
|
374
|
+
|
|
375
|
+
# Strategy 3: Find last complete paragraph (ends with period)
|
|
376
|
+
for i in range(len(lines) - 1, -1, -1):
|
|
377
|
+
line = lines[i].strip()
|
|
378
|
+
if line and line.endswith('.') and not line.startswith('#') and not line.startswith('```'):
|
|
379
|
+
# Found a complete sentence, return from there
|
|
380
|
+
return '\n'.join(lines[i:])
|
|
381
|
+
|
|
382
|
+
# Strategy 4: If original content is available, find where the generated content diverges
|
|
383
|
+
if original_content:
|
|
384
|
+
# Simple approach: find the longest common suffix
|
|
385
|
+
min_len = min(len(content), len(original_content))
|
|
386
|
+
common_length = 0
|
|
387
|
+
|
|
388
|
+
for i in range(1, min_len + 1):
|
|
389
|
+
if content[-i:] == original_content[-i:]:
|
|
390
|
+
common_length = i
|
|
391
|
+
else:
|
|
392
|
+
break
|
|
393
|
+
|
|
394
|
+
if common_length > 100: # Found significant common ending
|
|
395
|
+
return content[-(common_length + 100):] # Include some context
|
|
396
|
+
|
|
397
|
+
return None
|
|
398
|
+
|
|
399
|
+
def _appears_complete(self, content: str, target_file: str, original_content: str = None) -> bool:
|
|
400
|
+
"""
|
|
401
|
+
Check if content appears to be complete based on structure, patterns, AND original length.
|
|
284
402
|
Universal completion check for all file types.
|
|
285
403
|
|
|
404
|
+
CRITICAL: If original_content is provided, generated content MUST be at least 90% of original length
|
|
405
|
+
to be considered complete, regardless of other heuristics. This prevents the LLM from fooling us
|
|
406
|
+
with fake conclusions.
|
|
407
|
+
|
|
286
408
|
Args:
|
|
287
409
|
content: Generated content to check
|
|
288
410
|
target_file: Target file path for context
|
|
411
|
+
original_content: Original content for length comparison (optional but recommended)
|
|
289
412
|
|
|
290
413
|
Returns:
|
|
291
414
|
True if content appears complete, False if it needs continuation
|
|
@@ -293,6 +416,15 @@ class LLMContentGenerator:
|
|
|
293
416
|
if not content or len(content.strip()) < 100:
|
|
294
417
|
return False
|
|
295
418
|
|
|
419
|
+
# CRITICAL: If original content is provided, check length ratio first
|
|
420
|
+
# This prevents the LLM from fooling us with fake conclusions
|
|
421
|
+
if original_content and isinstance(original_content, str):
|
|
422
|
+
generated_len = len(content)
|
|
423
|
+
original_len = len(original_content)
|
|
424
|
+
if generated_len < original_len * 0.9:
|
|
425
|
+
# Generated content is too short compared to original - NOT complete
|
|
426
|
+
return False
|
|
427
|
+
|
|
296
428
|
# 1. Check for balanced code blocks (applies to all files)
|
|
297
429
|
code_block_count = content.count('```')
|
|
298
430
|
if code_block_count > 0 and code_block_count % 2 != 0:
|
|
@@ -441,61 +573,14 @@ class LLMContentGenerator:
|
|
|
441
573
|
elif "suggestions" in evaluation_report and isinstance(evaluation_report["suggestions"], list):
|
|
442
574
|
total_suggestions = len(evaluation_report["suggestions"])
|
|
443
575
|
|
|
444
|
-
|
|
445
|
-
|
|
446
|
-
|
|
447
|
-
|
|
448
|
-
|
|
449
|
-
|
|
450
|
-
|
|
451
|
-
|
|
452
|
-
PREVIOUS CONTENT (do not repeat this):
|
|
453
|
-
```
|
|
454
|
-
{existing_content[-1000:]} # Last 1000 chars for context
|
|
455
|
-
```
|
|
456
|
-
|
|
457
|
-
TASK
|
|
458
|
-
Continue the document naturally from the last complete section. Maintain the same style,
|
|
459
|
-
structure, and flow as the previous content. Complete all remaining sections that should
|
|
460
|
-
be in this document.
|
|
461
|
-
|
|
462
|
-
CAPACITY AND SCOPE
|
|
463
|
-
- You have enhanced token capacity to handle complex documents comprehensively
|
|
464
|
-
- Tutorial documents: Enhanced capacity for step-by-step content, code examples, and comprehensive explanations
|
|
465
|
-
- Complex documents: Increased capacity for multiple sections, detailed explanations, and extensive content
|
|
466
|
-
- Comprehensive documents: Full capacity for complete documentation with all necessary sections
|
|
467
|
-
|
|
468
|
-
INPUTS
|
|
469
|
-
- evaluation_report (contains {total_suggestions} suggestions to integrate): {json.dumps(evaluation_report)[:4000]}
|
|
470
|
-
- context: {context[:2000]}
|
|
471
|
-
|
|
472
|
-
REMINDER: SINGLE DOCUMENT APPROACH
|
|
473
|
-
- The evaluation report contains {total_suggestions} SEPARATE suggestions
|
|
474
|
-
- These should be integrated into ONE cohesive continuation
|
|
475
|
-
- Do NOT create {total_suggestions} separate sections for each suggestion
|
|
476
|
-
- Group related suggestions (e.g., setup, reproducibility, performance) and integrate them naturally
|
|
477
|
-
|
|
478
|
-
REQUIREMENTS
|
|
479
|
-
- Continue seamlessly from the previous content
|
|
480
|
-
- Maintain the same tone and style
|
|
481
|
-
- Complete all sections that should be in this document
|
|
482
|
-
- Preserve file-specific formatting (e.g., YAML frontmatter, code block syntax appropriate to the language)
|
|
483
|
-
- Do not repeat content already generated
|
|
484
|
-
- Return only the continuation content, not the full document
|
|
485
|
-
- Use the increased token capacity to provide thorough, complete content
|
|
486
|
-
- NEVER invent technical specifications (hardware, versions, performance) unless explicitly in evaluation report or context
|
|
487
|
-
- ABSOLUTELY FORBIDDEN: Do NOT wrap content in markdown code fences (```markdown). Return pure content only.
|
|
488
|
-
- ABSOLUTELY FORBIDDEN: Do NOT add summary sections, notes, conclusions, or any text at the end of documents
|
|
489
|
-
|
|
490
|
-
COMPLETENESS REQUIREMENTS
|
|
491
|
-
- Generate complete, comprehensive content that addresses all remaining evaluation suggestions
|
|
492
|
-
- For complex documents, ensure all sections are fully developed and detailed
|
|
493
|
-
- For tutorial documents, include complete step-by-step instructions with examples
|
|
494
|
-
- Use the increased token capacity to provide thorough, useful documentation
|
|
495
|
-
|
|
496
|
-
OUTPUT
|
|
497
|
-
Return only the continuation content that should be appended to the existing content.
|
|
498
|
-
"""
|
|
576
|
+
# Use the centralized continuation prompt template
|
|
577
|
+
continuation_prompt = LLM_CONTINUATION_PROMPT.format(
|
|
578
|
+
target_file=target_file,
|
|
579
|
+
existing_content_tail=existing_content[-1000:], # Last 1000 chars for context
|
|
580
|
+
total_suggestions=total_suggestions,
|
|
581
|
+
evaluation_report_excerpt=json.dumps(evaluation_report)[:4000],
|
|
582
|
+
context_excerpt=context[:2000],
|
|
583
|
+
)
|
|
499
584
|
|
|
500
585
|
content, token_usage = conv.generate(
|
|
501
586
|
system_prompt=continuation_prompt,
|
|
@@ -514,14 +599,13 @@ Return only the continuation content that should be appended to the existing con
|
|
|
514
599
|
section=section_name,
|
|
515
600
|
anchor_title=section_name,
|
|
516
601
|
suggestion_category=suggestion.category,
|
|
517
|
-
evidence=(suggestion.source.get("evidence", "") if suggestion.source else ""),
|
|
518
602
|
context=context[:2500],
|
|
519
603
|
guidance=(suggestion.content_guidance or "").strip(),
|
|
520
604
|
)
|
|
521
605
|
content, token_usage = conv.generate(system_prompt=system_prompt, instruction_prompt="Write the section content now.")
|
|
522
606
|
return content.strip(), token_usage
|
|
523
607
|
|
|
524
|
-
def generate_full_document(self, target_file: str, evaluation_report: dict, context: str = "") -> tuple[str, dict]:
|
|
608
|
+
def generate_full_document(self, target_file: str, evaluation_report: dict, context: str = "", original_content: str = None) -> tuple[str, dict]:
|
|
525
609
|
# Create LLM (uses 16k tokens by default - enough for any document)
|
|
526
610
|
from bioguider.agents.agent_utils import get_llm
|
|
527
611
|
import os
|
|
@@ -560,6 +644,11 @@ Return only the continuation content that should be appended to the existing con
|
|
|
560
644
|
with open(debug_file, 'w', encoding='utf-8') as f:
|
|
561
645
|
json.dump(debug_info, f, indent=2, ensure_ascii=False)
|
|
562
646
|
|
|
647
|
+
# Debug: Save raw evaluation_report to see what's being serialized
|
|
648
|
+
eval_report_file = os.path.join(debug_dir, f"{safe_filename}_raw_eval_report.json")
|
|
649
|
+
with open(eval_report_file, 'w', encoding='utf-8') as f:
|
|
650
|
+
json.dump(evaluation_report, f, indent=2, ensure_ascii=False)
|
|
651
|
+
|
|
563
652
|
# Use comprehensive README prompt for README.md files
|
|
564
653
|
if target_file.endswith("README.md"):
|
|
565
654
|
system_prompt = LLM_README_COMPREHENSIVE_PROMPT.format(
|
|
@@ -590,14 +679,24 @@ Return only the continuation content that should be appended to the existing con
|
|
|
590
679
|
f.write(system_prompt)
|
|
591
680
|
f.write("\n\n=== INSTRUCTION PROMPT ===\n")
|
|
592
681
|
f.write("Write the full document now.")
|
|
593
|
-
|
|
594
|
-
f.write(json.dumps(evaluation_report, indent=2))
|
|
595
|
-
f.write("\n\n=== CONTEXT ===\n")
|
|
596
|
-
f.write(context[:2000] + "..." if len(context) > 2000 else context)
|
|
682
|
+
# Context is already embedded in system prompt; avoid duplicating here
|
|
597
683
|
|
|
598
684
|
# Initial generation
|
|
599
|
-
|
|
600
|
-
|
|
685
|
+
# If the original document is long (RMarkdown > 8k chars), avoid truncation by chunked rewrite
|
|
686
|
+
# Lower threshold from 12k to 8k to catch more documents that would otherwise truncate
|
|
687
|
+
use_chunked = bool(target_file.endswith('.Rmd') and isinstance(original_content, str) and len(original_content) > 8000)
|
|
688
|
+
if use_chunked:
|
|
689
|
+
content, token_usage = self._generate_full_document_chunked(
|
|
690
|
+
target_file=target_file,
|
|
691
|
+
evaluation_report=evaluation_report,
|
|
692
|
+
context=context,
|
|
693
|
+
original_content=original_content or "",
|
|
694
|
+
debug_dir=debug_dir,
|
|
695
|
+
safe_filename=safe_filename,
|
|
696
|
+
)
|
|
697
|
+
else:
|
|
698
|
+
content, token_usage = conv.generate(system_prompt=system_prompt, instruction_prompt="Write the full document now.")
|
|
699
|
+
content = content.strip()
|
|
601
700
|
|
|
602
701
|
# Save initial generation for debugging
|
|
603
702
|
generation_file = os.path.join(debug_dir, f"{safe_filename}_generation_0.txt")
|
|
@@ -605,7 +704,9 @@ Return only the continuation content that should be appended to the existing con
|
|
|
605
704
|
f.write(f"=== INITIAL GENERATION ===\n")
|
|
606
705
|
f.write(f"Tokens: {token_usage}\n")
|
|
607
706
|
f.write(f"Length: {len(content)} characters\n")
|
|
608
|
-
|
|
707
|
+
if original_content:
|
|
708
|
+
f.write(f"Original length: {len(original_content)} characters\n")
|
|
709
|
+
f.write(f"Truncation detected: {self._detect_truncation(content, target_file, original_content)}\n")
|
|
609
710
|
f.write(f"\n=== CONTENT ===\n")
|
|
610
711
|
f.write(content)
|
|
611
712
|
|
|
@@ -613,79 +714,39 @@ Return only the continuation content that should be appended to the existing con
|
|
|
613
714
|
max_continuations = 3 # Limit to prevent infinite loops
|
|
614
715
|
continuation_count = 0
|
|
615
716
|
|
|
616
|
-
while (self._detect_truncation(content, target_file) and
|
|
717
|
+
while (not use_chunked and self._detect_truncation(content, target_file, original_content) and
|
|
617
718
|
continuation_count < max_continuations):
|
|
618
719
|
|
|
619
720
|
# Additional check: if content appears complete, don't continue
|
|
620
|
-
|
|
721
|
+
# Pass original_content so we can check length ratio
|
|
722
|
+
if self._appears_complete(content, target_file, original_content):
|
|
621
723
|
break
|
|
622
724
|
continuation_count += 1
|
|
623
725
|
|
|
624
|
-
#
|
|
625
|
-
|
|
626
|
-
|
|
627
|
-
|
|
628
|
-
|
|
629
|
-
|
|
630
|
-
|
|
631
|
-
|
|
632
|
-
|
|
633
|
-
|
|
634
|
-
|
|
635
|
-
|
|
636
|
-
{content[-1000:]} # Last 1000 chars for context
|
|
637
|
-
```
|
|
638
|
-
|
|
639
|
-
TASK
|
|
640
|
-
Continue the document naturally from the last complete section. Maintain the same style,
|
|
641
|
-
structure, and flow as the previous content. Complete all remaining sections that should
|
|
642
|
-
be in this document.
|
|
643
|
-
|
|
644
|
-
CRITICAL REQUIREMENTS:
|
|
645
|
-
- Do NOT repeat any content already generated above
|
|
646
|
-
- Do NOT duplicate sections, headers, or code blocks that already exist
|
|
647
|
-
- Generate ONLY new, unique content that continues from where the previous content ended
|
|
648
|
-
- If the previous content appears complete, add complementary sections that enhance the document
|
|
649
|
-
- Focus on adding missing sections, examples, or explanations that weren't covered
|
|
650
|
-
|
|
651
|
-
CAPACITY AND SCOPE
|
|
652
|
-
- You have enhanced token capacity to handle complex documents comprehensively
|
|
653
|
-
- Tutorial documents: Enhanced capacity for step-by-step content, code examples, and comprehensive explanations
|
|
654
|
-
- Complex documents: Increased capacity for multiple sections, detailed explanations, and extensive content
|
|
655
|
-
- Comprehensive documents: Full capacity for complete documentation with all necessary sections
|
|
656
|
-
|
|
657
|
-
INPUTS
|
|
658
|
-
- evaluation_report (contains {total_suggestions} suggestions to integrate): {json.dumps(evaluation_report)[:4000]}
|
|
659
|
-
- context: {context[:2000]}
|
|
660
|
-
|
|
661
|
-
REMINDER: SINGLE DOCUMENT APPROACH
|
|
662
|
-
- The evaluation report contains {total_suggestions} SEPARATE suggestions
|
|
663
|
-
- These should be integrated into ONE cohesive continuation
|
|
664
|
-
- Do NOT create {total_suggestions} separate sections for each suggestion
|
|
665
|
-
- Group related suggestions (e.g., setup, reproducibility, performance) and integrate them naturally
|
|
666
|
-
|
|
667
|
-
REQUIREMENTS
|
|
668
|
-
- Continue seamlessly from the previous content
|
|
669
|
-
- Maintain the same tone and style
|
|
670
|
-
- Complete all sections that should be in this document
|
|
671
|
-
- Preserve file-specific formatting (e.g., YAML frontmatter, code block syntax appropriate to the language)
|
|
672
|
-
- Do not repeat content already generated
|
|
673
|
-
- Return only the continuation content, not the full document
|
|
674
|
-
- Use the increased token capacity to provide thorough, complete content
|
|
675
|
-
- NEVER invent technical specifications (hardware, versions, performance) unless explicitly in evaluation report or context
|
|
676
|
-
- ABSOLUTELY FORBIDDEN: Do NOT wrap content in markdown code fences (```markdown). Return pure content only.
|
|
677
|
-
- ABSOLUTELY FORBIDDEN: Do NOT add summary sections, notes, conclusions, or any text at the end of documents
|
|
678
|
-
|
|
679
|
-
COMPLETENESS REQUIREMENTS
|
|
680
|
-
- Generate complete, comprehensive content that addresses all remaining evaluation suggestions
|
|
681
|
-
- For complex documents, ensure all sections are fully developed and detailed
|
|
682
|
-
- For tutorial documents, include complete step-by-step instructions with examples
|
|
683
|
-
- Use the increased token capacity to provide thorough, useful documentation
|
|
726
|
+
# Calculate total suggestions for debugging info
|
|
727
|
+
total_suggestions = 1
|
|
728
|
+
if isinstance(evaluation_report, dict):
|
|
729
|
+
if "total_suggestions" in evaluation_report:
|
|
730
|
+
total_suggestions = evaluation_report["total_suggestions"]
|
|
731
|
+
elif "suggestions" in evaluation_report and isinstance(evaluation_report["suggestions"], list):
|
|
732
|
+
total_suggestions = len(evaluation_report["suggestions"])
|
|
733
|
+
|
|
734
|
+
# Find better continuation point - look for last complete section
|
|
735
|
+
continuation_point = self._find_continuation_point(content, original_content)
|
|
736
|
+
if not continuation_point:
|
|
737
|
+
continuation_point = content[-1000:] # Fallback to last 1000 chars
|
|
684
738
|
|
|
685
|
-
|
|
686
|
-
|
|
687
|
-
|
|
739
|
+
# Generate continuation prompt using centralized template
|
|
740
|
+
continuation_prompt = LLM_CONTINUATION_PROMPT.format(
|
|
741
|
+
target_file=target_file,
|
|
742
|
+
existing_content_tail=continuation_point,
|
|
743
|
+
total_suggestions=total_suggestions,
|
|
744
|
+
evaluation_report_excerpt=json.dumps(evaluation_report)[:4000],
|
|
745
|
+
context_excerpt=context[:2000],
|
|
746
|
+
)
|
|
688
747
|
|
|
748
|
+
# Save continuation prompt for debugging
|
|
749
|
+
continuation_prompt_file = os.path.join(debug_dir, f"{safe_filename}_continuation_{continuation_count}_prompt.txt")
|
|
689
750
|
with open(continuation_prompt_file, 'w', encoding='utf-8') as f:
|
|
690
751
|
f.write(continuation_prompt)
|
|
691
752
|
|
|
@@ -768,4 +829,82 @@ OUTPUT
|
|
|
768
829
|
|
|
769
830
|
return '\n'.join(cleaned_lines)
|
|
770
831
|
|
|
832
|
+
def _split_rmd_into_chunks(self, content: str) -> list[dict]:
|
|
833
|
+
chunks = []
|
|
834
|
+
if not content:
|
|
835
|
+
return chunks
|
|
836
|
+
lines = content.split('\n')
|
|
837
|
+
n = len(lines)
|
|
838
|
+
i = 0
|
|
839
|
+
if n >= 3 and lines[0].strip() == '---':
|
|
840
|
+
j = 1
|
|
841
|
+
while j < n and lines[j].strip() != '---':
|
|
842
|
+
j += 1
|
|
843
|
+
if j < n and lines[j].strip() == '---':
|
|
844
|
+
chunks.append({"type": "yaml", "content": '\n'.join(lines[0:j+1])})
|
|
845
|
+
i = j + 1
|
|
846
|
+
buffer = []
|
|
847
|
+
in_code = False
|
|
848
|
+
for k in range(i, n):
|
|
849
|
+
line = lines[k]
|
|
850
|
+
if line.strip().startswith('```'):
|
|
851
|
+
if in_code:
|
|
852
|
+
buffer.append(line)
|
|
853
|
+
chunks.append({"type": "code", "content": '\n'.join(buffer)})
|
|
854
|
+
buffer = []
|
|
855
|
+
in_code = False
|
|
856
|
+
else:
|
|
857
|
+
if buffer and any(s.strip() for s in buffer):
|
|
858
|
+
chunks.append({"type": "text", "content": '\n'.join(buffer)})
|
|
859
|
+
buffer = [line]
|
|
860
|
+
in_code = True
|
|
861
|
+
else:
|
|
862
|
+
buffer.append(line)
|
|
863
|
+
if buffer and any(s.strip() for s in buffer):
|
|
864
|
+
chunks.append({"type": "code" if in_code else "text", "content": '\n'.join(buffer)})
|
|
865
|
+
return chunks
|
|
866
|
+
|
|
867
|
+
def _generate_text_chunk(self, conv: CommonConversation, evaluation_report: dict, context: str, chunk_text: str) -> tuple[str, dict]:
|
|
868
|
+
LLM_CHUNK_PROMPT = (
|
|
869
|
+
"You are BioGuider improving a single markdown chunk of a larger RMarkdown document.\n\n"
|
|
870
|
+
"GOAL\nRefine ONLY the given chunk's prose per evaluation suggestions while preserving structure.\n"
|
|
871
|
+
"Do not add conclusions or new sections.\n\n"
|
|
872
|
+
"INPUTS\n- evaluation_report: <<{evaluation_report}>>\n- repo_context_excerpt: <<{context}>>\n- original_chunk:\n<<<\n{chunk}\n>>>\n\n"
|
|
873
|
+
"RULES\n- Preserve headers/formatting in this chunk.\n- Do not invent technical specs.\n- Output ONLY the refined chunk (no fences)."
|
|
874
|
+
)
|
|
875
|
+
system_prompt = LLM_CHUNK_PROMPT.format(
|
|
876
|
+
evaluation_report=json.dumps(evaluation_report)[:4000],
|
|
877
|
+
context=context[:1500],
|
|
878
|
+
chunk=chunk_text[:6000],
|
|
879
|
+
)
|
|
880
|
+
content, usage = conv.generate(system_prompt=system_prompt, instruction_prompt="Rewrite this chunk now.")
|
|
881
|
+
return content.strip(), usage
|
|
882
|
+
|
|
883
|
+
def _generate_full_document_chunked(self, target_file: str, evaluation_report: dict, context: str, original_content: str, debug_dir: str, safe_filename: str) -> tuple[str, dict]:
|
|
884
|
+
conv = CommonConversation(self.llm)
|
|
885
|
+
chunks = self._split_rmd_into_chunks(original_content)
|
|
886
|
+
merged = []
|
|
887
|
+
total_usage = {"total_tokens": 0, "prompt_tokens": 0, "completion_tokens": 0}
|
|
888
|
+
from datetime import datetime
|
|
889
|
+
for idx, ch in enumerate(chunks):
|
|
890
|
+
if ch["type"] in ("yaml", "code"):
|
|
891
|
+
merged.append(ch["content"])
|
|
892
|
+
continue
|
|
893
|
+
out, usage = self._generate_text_chunk(conv, evaluation_report, context, ch["content"])
|
|
894
|
+
if not out:
|
|
895
|
+
out = ch["content"]
|
|
896
|
+
merged.append(out)
|
|
897
|
+
try:
|
|
898
|
+
total_usage["total_tokens"] += int(usage.get("total_tokens", 0))
|
|
899
|
+
total_usage["prompt_tokens"] += int(usage.get("prompt_tokens", 0))
|
|
900
|
+
total_usage["completion_tokens"] += int(usage.get("completion_tokens", 0))
|
|
901
|
+
except Exception:
|
|
902
|
+
pass
|
|
903
|
+
chunk_file = os.path.join(debug_dir, f"{safe_filename}_chunk_{idx}.txt")
|
|
904
|
+
with open(chunk_file, 'w', encoding='utf-8') as f:
|
|
905
|
+
f.write(f"=== CHUNK {idx} ({ch['type']}) at {datetime.now().isoformat()} ===\n")
|
|
906
|
+
f.write(out)
|
|
907
|
+
content = '\n'.join(merged)
|
|
908
|
+
return content, total_usage
|
|
909
|
+
|
|
771
910
|
|