bioguider 0.2.15__py3-none-any.whl → 0.2.16__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of bioguider might be problematic. Click here for more details.
- bioguider/agents/agent_utils.py +38 -54
- bioguider/agents/collection_observe_step.py +1 -1
- bioguider/agents/common_agent.py +3 -25
- bioguider/agents/common_agent_2step.py +1 -1
- bioguider/agents/common_conversation.py +43 -0
- bioguider/agents/dockergeneration_observe_step.py +2 -1
- bioguider/agents/evaluation_installation_task.py +68 -99
- bioguider/agents/evaluation_readme_task.py +280 -182
- bioguider/agents/evaluation_submission_requirements_task.py +69 -54
- bioguider/agents/evaluation_task.py +1 -1
- bioguider/agents/identification_observe_step.py +1 -1
- bioguider/agents/prompt_utils.py +4 -2
- bioguider/utils/constants.py +86 -1
- bioguider/utils/utils.py +45 -1
- {bioguider-0.2.15.dist-info → bioguider-0.2.16.dist-info}/METADATA +1 -1
- {bioguider-0.2.15.dist-info → bioguider-0.2.16.dist-info}/RECORD +18 -17
- {bioguider-0.2.15.dist-info → bioguider-0.2.16.dist-info}/LICENSE +0 -0
- {bioguider-0.2.15.dist-info → bioguider-0.2.16.dist-info}/WHEEL +0 -0
|
@@ -1,13 +1,18 @@
|
|
|
1
1
|
|
|
2
|
-
|
|
3
|
-
from pydantic import BaseModel, Field
|
|
2
|
+
|
|
4
3
|
from bioguider.agents.agent_utils import try_parse_json_object, try_parse_with_llm
|
|
5
4
|
from bioguider.agents.evaluation_task import EvaluationTask
|
|
6
5
|
from bioguider.agents.collection_task import CollectionTask
|
|
7
6
|
from bioguider.agents.identification_task import IdentificationTask
|
|
8
7
|
from bioguider.agents.prompt_utils import CollectionGoalItemEnum
|
|
9
|
-
from bioguider.
|
|
10
|
-
|
|
8
|
+
from bioguider.utils.constants import (
|
|
9
|
+
DEFAULT_TOKEN_USAGE,
|
|
10
|
+
EvaluationInstallationResult,
|
|
11
|
+
EvaluationREADMEResult,
|
|
12
|
+
SoftwarePackageContentResult,
|
|
13
|
+
DemoInstructionsResult,
|
|
14
|
+
EvaluationSubmissionRequirementsResult,
|
|
15
|
+
)
|
|
11
16
|
|
|
12
17
|
DEMO_INSTRUCTION_GOAL = """
|
|
13
18
|
1. Identify if it provides the instructions to run on provided data
|
|
@@ -18,11 +23,6 @@ DEMO_INSTRUCTION_GOAL = """
|
|
|
18
23
|
DEMO_INSTRUCTION_FINAL_ANSWER = \
|
|
19
24
|
'{{"run_on_data_instruction": <True or False>, "run_on_custom_instruction": <True or False>, "expected_output_description": <True Or False>}}'
|
|
20
25
|
|
|
21
|
-
class DemoInstructionsResult(BaseModel):
|
|
22
|
-
run_on_data_instruction: Optional[bool] = Field(description="A boolean value. Does it provide instructions on how to run on provided data?")
|
|
23
|
-
run_on_custom_instruction: Optional[bool] = Field(description="A boolean value. Does it provide instructions on how to run on custom data?")
|
|
24
|
-
expected_output_description: Optional[bool] = Field(description="A boolean value. Does it provide the description of expected output?")
|
|
25
|
-
|
|
26
26
|
class EvaluationSubmissionRequirementsTask(EvaluationTask):
|
|
27
27
|
def __init__(
|
|
28
28
|
self,
|
|
@@ -32,8 +32,8 @@ class EvaluationSubmissionRequirementsTask(EvaluationTask):
|
|
|
32
32
|
meta_data = None,
|
|
33
33
|
step_callback = None,
|
|
34
34
|
summarized_files_db = None,
|
|
35
|
-
readme_files_evaluation: dict | None = None,
|
|
36
|
-
installation_evaluation:
|
|
35
|
+
readme_files_evaluation: dict[str, EvaluationREADMEResult] | None = None,
|
|
36
|
+
installation_evaluation: EvaluationInstallationResult | None = None,
|
|
37
37
|
installation_files: list[str] | None = None
|
|
38
38
|
):
|
|
39
39
|
super().__init__(llm, repo_path, gitignore_path, meta_data, step_callback, summarized_files_db)
|
|
@@ -59,24 +59,24 @@ class EvaluationSubmissionRequirementsTask(EvaluationTask):
|
|
|
59
59
|
|
|
60
60
|
return files
|
|
61
61
|
|
|
62
|
-
def _evaluate_software_package_content(self):
|
|
62
|
+
def _evaluate_software_package_content(self) -> tuple[SoftwarePackageContentResult, list[str]]:
|
|
63
63
|
files = self._collect_software_package_content()
|
|
64
64
|
if len(files) == 3:
|
|
65
|
-
return
|
|
66
|
-
|
|
67
|
-
|
|
68
|
-
|
|
69
|
-
|
|
65
|
+
return SoftwarePackageContentResult(
|
|
66
|
+
compiled_standalone_software=files[0].strip().lower() != "n/a",
|
|
67
|
+
source_code=files[1].strip().lower() != "n/a",
|
|
68
|
+
demo_dataset=files[2].strip().lower() != "n/a",
|
|
69
|
+
), files
|
|
70
70
|
else:
|
|
71
|
-
return
|
|
72
|
-
|
|
73
|
-
|
|
74
|
-
|
|
75
|
-
|
|
71
|
+
return SoftwarePackageContentResult(
|
|
72
|
+
compiled_standalone_software=False,
|
|
73
|
+
source_code=False,
|
|
74
|
+
demo_dataset=False,
|
|
75
|
+
), files
|
|
76
76
|
|
|
77
|
-
def _evaluatie_demo_instructions(self):
|
|
77
|
+
def _evaluatie_demo_instructions(self) -> tuple[DemoInstructionsResult | None, list[str]]:
|
|
78
78
|
readme_files = [f for f in self.readme_files_evaluation.keys() \
|
|
79
|
-
if self.readme_files_evaluation[f]
|
|
79
|
+
if self.readme_files_evaluation[f].project_level]
|
|
80
80
|
installation_files = self.installation_files if self.installation_files is not None else []
|
|
81
81
|
provided_files = readme_files + installation_files
|
|
82
82
|
provided_files = provided_files if len(provided_files) > 0 else None
|
|
@@ -99,43 +99,58 @@ class EvaluationSubmissionRequirementsTask(EvaluationTask):
|
|
|
99
99
|
parsed_obj = self._parse_demo_instruction_result(final_answer)
|
|
100
100
|
return parsed_obj, provided_files
|
|
101
101
|
|
|
102
|
-
def _parse_demo_instruction_result(self, result: str | dict):
|
|
102
|
+
def _parse_demo_instruction_result(self, result: str | dict) -> DemoInstructionsResult:
|
|
103
|
+
parsed_obj = None
|
|
103
104
|
if isinstance(result, dict):
|
|
104
|
-
|
|
105
|
-
|
|
106
|
-
|
|
107
|
-
|
|
108
|
-
|
|
109
|
-
|
|
110
|
-
|
|
111
|
-
|
|
112
|
-
|
|
113
|
-
|
|
114
|
-
|
|
115
|
-
|
|
116
|
-
|
|
117
|
-
|
|
105
|
+
parsed_obj = result
|
|
106
|
+
else:
|
|
107
|
+
parsed_obj = try_parse_json_object(result)
|
|
108
|
+
if parsed_obj is None:
|
|
109
|
+
parsed_obj, token_usage = try_parse_with_llm(
|
|
110
|
+
llm=self.llm,
|
|
111
|
+
input_text=result,
|
|
112
|
+
schema=DemoInstructionsResult,
|
|
113
|
+
)
|
|
114
|
+
parsed_obj = vars(parsed_obj) if parsed_obj is not None else parsed_obj
|
|
115
|
+
self.print_step(token_usage=token_usage)
|
|
116
|
+
self.print_step(step_output=str(parsed_obj))
|
|
117
|
+
|
|
118
|
+
return DemoInstructionsResult(
|
|
119
|
+
run_on_data_instruction = parsed_obj["run_on_data_instruction"] \
|
|
120
|
+
if "run_on_data_instruction" in parsed_obj else False,
|
|
121
|
+
run_on_custom_instruction = parsed_obj["run_on_custom_instruction"] \
|
|
122
|
+
if "run_on_custom_instruction" in parsed_obj else False,
|
|
123
|
+
expected_output_description = parsed_obj["expected_output_description"] \
|
|
124
|
+
if "expected_output_description" in parsed_obj else False,
|
|
125
|
+
)
|
|
118
126
|
|
|
119
127
|
def _combine_evaluation(
|
|
120
128
|
self,
|
|
121
|
-
software_evaluation:
|
|
122
|
-
demo_evaluation:
|
|
123
|
-
):
|
|
129
|
+
software_evaluation: SoftwarePackageContentResult,
|
|
130
|
+
demo_evaluation: DemoInstructionsResult,
|
|
131
|
+
) -> EvaluationSubmissionRequirementsResult:
|
|
124
132
|
readme_files = [f for f in self.readme_files_evaluation.keys() \
|
|
125
|
-
if self.readme_files_evaluation[f]
|
|
126
|
-
structured_install_evaluation
|
|
127
|
-
self.installation_evaluation["structured_evaluation"]
|
|
133
|
+
if self.readme_files_evaluation[f].project_level]
|
|
134
|
+
structured_install_evaluation = self.installation_evaluation.structured_evaluation
|
|
128
135
|
software_dependency = structured_install_evaluation.dependency_number > 0
|
|
129
136
|
install_tutorial = structured_install_evaluation.install_tutorial
|
|
130
|
-
license = any([
|
|
131
|
-
|
|
132
|
-
|
|
133
|
-
|
|
134
|
-
|
|
135
|
-
|
|
136
|
-
|
|
137
|
-
|
|
138
|
-
|
|
137
|
+
license = any([
|
|
138
|
+
self.readme_files_evaluation[f].structured_evaluation.license_score \
|
|
139
|
+
if self.readme_files_evaluation[f].structured_evaluation is not None \
|
|
140
|
+
else False for f in readme_files
|
|
141
|
+
])
|
|
142
|
+
return EvaluationSubmissionRequirementsResult(
|
|
143
|
+
compiled_standalone_software=software_evaluation.compiled_standalone_software,
|
|
144
|
+
source_code=software_evaluation.source_code,
|
|
145
|
+
demo_dataset=software_evaluation.demo_dataset,
|
|
146
|
+
run_on_data_instruction=demo_evaluation.run_on_data_instruction,
|
|
147
|
+
run_on_custom_instruction=demo_evaluation.run_on_custom_instruction,
|
|
148
|
+
expected_output_description=demo_evaluation.expected_output_description,
|
|
149
|
+
complete_readme=len(readme_files) > 0,
|
|
150
|
+
software_dependency=software_dependency,
|
|
151
|
+
install_tutorial=install_tutorial,
|
|
152
|
+
license=license,
|
|
153
|
+
)
|
|
139
154
|
|
|
140
155
|
def _evaluate(self, files):
|
|
141
156
|
|
|
@@ -11,7 +11,7 @@ from bioguider.agents.agent_utils import read_file
|
|
|
11
11
|
from bioguider.agents.prompt_utils import EVALUATION_INSTRUCTION
|
|
12
12
|
from bioguider.database.summarized_file_db import SummarizedFilesDb
|
|
13
13
|
from bioguider.utils.constants import DEFAULT_TOKEN_USAGE, ProjectMetadata
|
|
14
|
-
from .
|
|
14
|
+
from .common_conversation import CommonConversation
|
|
15
15
|
from ..utils.pyphen_utils import PyphenReadability
|
|
16
16
|
|
|
17
17
|
logger = logging.getLogger(__name__)
|
|
@@ -79,7 +79,7 @@ class IdentificationObserveStep(PEOCommonStep):
|
|
|
79
79
|
def _execute_directly(self, state: IdentificationWorkflowState):
|
|
80
80
|
step_count = state["step_count"]
|
|
81
81
|
instruction = "Now, we have reached max recursion limit, please give me the **final answer** based on the current information" \
|
|
82
|
-
if step_count == MAX_STEP_COUNT - 2 else "Now, Let's begin."
|
|
82
|
+
if step_count == MAX_STEP_COUNT/3 - 2 else "Now, Let's begin."
|
|
83
83
|
system_prompt = self._prepare_system_prompt(state)
|
|
84
84
|
agent = CommonAgentTwoSteps(llm=self.llm)
|
|
85
85
|
res, _, token_usage, reasoning_process = agent.go(
|
bioguider/agents/prompt_utils.py
CHANGED
|
@@ -209,9 +209,11 @@ If **any one** of these is present, the document should be classified as Contrib
|
|
|
209
209
|
"plan_important_instructions": """ - A comiled standalone software file is non-textual and appears to be in an executable format (e.g., `.exe`, `.dll`, `.so`, `.bin`, `.elf`).
|
|
210
210
|
- A comiled standalone software file **is not a script or compiled library**, that is, It is not a wrapper script (e.g., shell, Python, Python notebook or Rmd) nor a dynamic/shared library meant for linking.
|
|
211
211
|
So, when you are identifying a binary file, **do not** use any tools (our tools don't work for binary file), you need to figure out if it is compiled standalone software file by the file name and extension on your own.
|
|
212
|
-
-
|
|
213
|
-
-
|
|
212
|
+
- **Source code files** are determined by their **extensions** or **file names** (e.g., `.py`, `.R`, `.ipynb`, `.ts`, `.js`). **Do not open or summarize their content.**
|
|
213
|
+
- **Example data files** are identified by typical data extensions (e.g., `.dat`, `.csv`, `.fastq`) or names like `example_*.txt`.
|
|
214
|
+
If extension/name is ambiguous, use summarize_file_tool to summarize file content to decide, **do not** read the file content.
|
|
214
215
|
- **Note**: You **only need to detect** whether at least **one** compiled standalone software file, **one** source code file and **one** example data file exist — no need to list all such files.
|
|
216
|
+
- **Note**: When identifying **compiled standalone software** or **example data files**, **ignore** any **image files** (e.g., `.png`, `.jpg`, `.jpeg`, `.gif`, `.svg`) and **image folders** (directories containing primarily images).
|
|
215
217
|
""",
|
|
216
218
|
"observe_important_instructions": """ - A comiled standalone software file is non-textual and appears to be in an executable format (e.g., `.exe`, `.dll`, `.so`, `.bin`, `.elf`).
|
|
217
219
|
- A comiled standalone software file **is not a script or compiled library**, that is, It is not a wrapper script (e.g., shell, Python, Python notebook or Rmd) nor a dynamic/shared library meant for linking.
|
bioguider/utils/constants.py
CHANGED
|
@@ -2,6 +2,8 @@
|
|
|
2
2
|
from enum import Enum
|
|
3
3
|
from typing import Optional
|
|
4
4
|
|
|
5
|
+
from pydantic import BaseModel, Field
|
|
6
|
+
|
|
5
7
|
DEFAULT_TOKEN_USAGE = {
|
|
6
8
|
"total_tokens": 0,
|
|
7
9
|
"completion_tokens": 0,
|
|
@@ -40,4 +42,87 @@ class ProjectMetadata:
|
|
|
40
42
|
|
|
41
43
|
MAX_FILE_LENGTH=10 *1024 # 10K
|
|
42
44
|
MAX_SENTENCE_NUM=20
|
|
43
|
-
MAX_STEP_COUNT=
|
|
45
|
+
MAX_STEP_COUNT=3*10
|
|
46
|
+
|
|
47
|
+
class ProjectLevelEvaluationREADMEResult(BaseModel):
|
|
48
|
+
project_level: Optional[bool]=Field(description="A boolean value specifying if the README file is **project-level** README. TRUE: project-level, FALSE, folder-level")
|
|
49
|
+
|
|
50
|
+
class StructuredEvaluationREADMEResult(BaseModel):
|
|
51
|
+
available_score: Optional[bool]=Field(description="A boolean value, Is the README accessible and present?")
|
|
52
|
+
readability_score: Optional[str]=Field(description="A string value, could be `Poor`, `Fair`, `Good`, or `Excellent`")
|
|
53
|
+
readability_suggestions: Optional[str]=Field(description="Suggestions to improve readability if necessary")
|
|
54
|
+
project_purpose_score: Optional[bool]=Field(description="A boolean value. Is the project's goal or function clearly stated?")
|
|
55
|
+
project_purpose_suggestions: Optional[str]=Field(description="Suggestions if not clear")
|
|
56
|
+
hardware_and_software_spec_score: Optional[str]=Field(description="A string value, could be `Poor`, `Fair`, `Good`, or `Excellent`")
|
|
57
|
+
hardware_and_software_spec_suggestions: Optional[str]=Field(description="Suggestions if not clear")
|
|
58
|
+
dependency_score: Optional[str]=Field(description="A string value, could be `Poor`, `Fair`, `Good`, or `Excellent`")
|
|
59
|
+
dependency_suggestions: Optional[str]=Field(description="Suggestions if dependencies are not clearly stated")
|
|
60
|
+
license_score: Optional[bool]=Field(description="A boolean value, Are contributor or maintainer details provided?")
|
|
61
|
+
license_suggestions: Optional[str]=Field(description="Suggestions to improve license information")
|
|
62
|
+
contributor_author_score: Optional[bool]=Field(description="A boolean value. are contributors or author included?")
|
|
63
|
+
overall_score: str=Field(description="A overall scroll for the README quality, could be `Poor`, `Fair`, `Good`, or `Excellent`")
|
|
64
|
+
|
|
65
|
+
class FreeProjectLevelEvaluationREADMEResult(BaseModel):
|
|
66
|
+
available: Optional[str]=Field(description="A string including assessment and suggestion for the availability of the README file")
|
|
67
|
+
readability: Optional[str]=Field(description="A string including assessment and suggestion for the readability of the README file")
|
|
68
|
+
project_purpose: Optional[str]=Field(description="A string including assessment and suggestion for the project purpose of the README file")
|
|
69
|
+
hardware_and_software_spec: Optional[str]=Field(description="A string including assessment and suggestion for the hardware and software spec and compatibility description of the README file")
|
|
70
|
+
dependency: Optional[str]=Field(description="A string including assessment and suggestion for the dependencies of the README file")
|
|
71
|
+
license: Optional[str]=Field(description="A string including assessment and suggestion for the license information of the README file")
|
|
72
|
+
contributor_author: Optional[str]=Field(description="A string including assessment and suggestion for the contributor and author information of the README file")
|
|
73
|
+
|
|
74
|
+
class FreeFolderLevelEvaluationREADMEResult(BaseModel):
|
|
75
|
+
score: Optional[str]=Field(description="An overall score")
|
|
76
|
+
key_strengths: Optional[str]=Field(description="A string specifying the key strengths of README file.")
|
|
77
|
+
overall_improvement_suggestions: Optional[list[str]]=Field(description="A list of overall improvement suggestions")
|
|
78
|
+
|
|
79
|
+
class EvaluationREADMEResult(BaseModel):
|
|
80
|
+
project_level: bool
|
|
81
|
+
structured_evaluation: StructuredEvaluationREADMEResult | None
|
|
82
|
+
free_evaluation: FreeProjectLevelEvaluationREADMEResult | FreeFolderLevelEvaluationREADMEResult | None
|
|
83
|
+
structured_reasoning_process: str | None
|
|
84
|
+
free_reasoning_process: str | None
|
|
85
|
+
|
|
86
|
+
|
|
87
|
+
class StructuredEvaluationInstallationResult(BaseModel):
|
|
88
|
+
install_available: Optional[bool]=Field(description="A boolean value. Is the installation documents accessible and present?")
|
|
89
|
+
install_tutorial: Optional[bool]=Field(description="A boolean value. Is the installation tutorial provided?")
|
|
90
|
+
dependency_number: Optional[int]=Field(description="A number. It is the number of dependencies that are required to install.")
|
|
91
|
+
dependency_suggestions: Optional[str]=Field(description="A string value. It is the specific improvements if necessary, such as missing dependencies")
|
|
92
|
+
compatible_os: Optional[bool]=Field(description="A boolean value. Is compatible operating system described?")
|
|
93
|
+
overall_score: Optional[str]=Field(description="A overall scroll for the installation quality, could be `Poor`, `Fair`, `Good`, or `Excellent`")
|
|
94
|
+
|
|
95
|
+
class FreeEvaluationInstallationResult(BaseModel):
|
|
96
|
+
ease_of_access: Optional[str]=Field(description="A string including assessment and suggestions for the ease of access of the installation information")
|
|
97
|
+
clarity_of_dependency: Optional[str]=Field(description="A string including assessment and suggestions for the clarity of dependency specification")
|
|
98
|
+
hardware_requirements: Optional[str]=Field(description="A string including assessment and suggestions for the hardware requirements")
|
|
99
|
+
installation_guide: Optional[str]=Field(description="A string including assessment and suggestions for the installation guide")
|
|
100
|
+
compatible_os: Optional[str]=Field(description="A string including assessment and suggestions for the compatible operating system")
|
|
101
|
+
|
|
102
|
+
class EvaluationInstallationResult(BaseModel):
|
|
103
|
+
structured_evaluation: StructuredEvaluationInstallationResult | None
|
|
104
|
+
free_evaluation: FreeEvaluationInstallationResult | None
|
|
105
|
+
structured_reasoning_process: str | None
|
|
106
|
+
free_reasoning_process: str | None
|
|
107
|
+
|
|
108
|
+
class SoftwarePackageContentResult(BaseModel):
|
|
109
|
+
compiled_standalone_software: Optional[bool] = Field(description="A boolean value. Does it provide the compiled standalone software?")
|
|
110
|
+
source_code: Optional[bool] = Field(description="A boolean value. Does it provide the source code?")
|
|
111
|
+
demo_dataset: Optional[bool] = Field(description="A boolean value. Does it provide the demo dataset?")
|
|
112
|
+
|
|
113
|
+
class DemoInstructionsResult(BaseModel):
|
|
114
|
+
run_on_data_instruction: Optional[bool] = Field(description="A boolean value. Does it provide instructions on how to run on provided data?")
|
|
115
|
+
run_on_custom_instruction: Optional[bool] = Field(description="A boolean value. Does it provide instructions on how to run on custom data?")
|
|
116
|
+
expected_output_description: Optional[bool] = Field(description="A boolean value. Does it provide the description of expected output?")
|
|
117
|
+
|
|
118
|
+
class EvaluationSubmissionRequirementsResult(BaseModel):
|
|
119
|
+
compiled_standalone_software: bool
|
|
120
|
+
source_code: bool
|
|
121
|
+
demo_dataset: bool
|
|
122
|
+
run_on_data_instruction: bool
|
|
123
|
+
run_on_custom_instruction: bool
|
|
124
|
+
expected_output_description: bool
|
|
125
|
+
complete_readme: bool
|
|
126
|
+
software_dependency: bool
|
|
127
|
+
install_tutorial: bool
|
|
128
|
+
license: bool
|
bioguider/utils/utils.py
CHANGED
|
@@ -1,5 +1,10 @@
|
|
|
1
1
|
import logging
|
|
2
|
+
import re
|
|
3
|
+
import subprocess
|
|
4
|
+
from typing import Optional
|
|
2
5
|
import tiktoken
|
|
6
|
+
|
|
7
|
+
from bioguider.utils.constants import DEFAULT_TOKEN_USAGE
|
|
3
8
|
logger = logging.getLogger(__name__)
|
|
4
9
|
|
|
5
10
|
def count_tokens(text: str, local_ollama: bool = False) -> int:
|
|
@@ -24,4 +29,43 @@ def count_tokens(text: str, local_ollama: bool = False) -> int:
|
|
|
24
29
|
# Fallback to a simple approximation if tiktoken fails
|
|
25
30
|
logger.warning(f"Error counting tokens with tiktoken: {e}")
|
|
26
31
|
# Rough approximation: 4 characters per token
|
|
27
|
-
return len(text) // 4
|
|
32
|
+
return len(text) // 4
|
|
33
|
+
|
|
34
|
+
|
|
35
|
+
def run_command(command: list, cwd: str = None, timeout: int = None):
|
|
36
|
+
"""
|
|
37
|
+
Run a shell command with optional timeout and return stdout, stderr, and return code.
|
|
38
|
+
"""
|
|
39
|
+
try:
|
|
40
|
+
result = subprocess.run(
|
|
41
|
+
command,
|
|
42
|
+
cwd=cwd,
|
|
43
|
+
stdout=subprocess.PIPE,
|
|
44
|
+
stderr=subprocess.PIPE,
|
|
45
|
+
text=True,
|
|
46
|
+
timeout=timeout
|
|
47
|
+
)
|
|
48
|
+
return result.stdout, result.stderr, result.returncode
|
|
49
|
+
except subprocess.TimeoutExpired as e:
|
|
50
|
+
return e.stdout or "", e.stderr or f"Command timed out after {timeout} seconds", -1
|
|
51
|
+
|
|
52
|
+
def escape_braces(text: str) -> str:
|
|
53
|
+
# First replace single } not part of }} with }}
|
|
54
|
+
text = re.sub(r'(?<!})}(?!})', '}}', text)
|
|
55
|
+
# Then replace single { not part of {{
|
|
56
|
+
text = re.sub(r'(?<!{){(?!{)', '{{', text)
|
|
57
|
+
return text
|
|
58
|
+
|
|
59
|
+
def increase_token_usage(
|
|
60
|
+
token_usage: Optional[dict] = None,
|
|
61
|
+
incremental: dict = {**DEFAULT_TOKEN_USAGE},
|
|
62
|
+
):
|
|
63
|
+
if token_usage is None:
|
|
64
|
+
token_usage = {**DEFAULT_TOKEN_USAGE}
|
|
65
|
+
token_usage["total_tokens"] += incremental["total_tokens"]
|
|
66
|
+
token_usage["completion_tokens"] += incremental["completion_tokens"]
|
|
67
|
+
token_usage["prompt_tokens"] += incremental["prompt_tokens"]
|
|
68
|
+
|
|
69
|
+
return token_usage
|
|
70
|
+
|
|
71
|
+
|
|
@@ -2,31 +2,32 @@ bioguider/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
|
|
|
2
2
|
bioguider/agents/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
|
|
3
3
|
bioguider/agents/agent_task.py,sha256=TL0Zx8zOmiAVslmNbfMPQ38qTQ73QospY6Dwrwf8POg,2890
|
|
4
4
|
bioguider/agents/agent_tools.py,sha256=r21wHV6a-Ic2T0dk4YzA-_d7PodHPM3GzRxJqv-llSw,7286
|
|
5
|
-
bioguider/agents/agent_utils.py,sha256=
|
|
5
|
+
bioguider/agents/agent_utils.py,sha256=Om1xwAmRdHFC4jkbtaBM3p309z5KFjMX1a6BFVPXX68,14449
|
|
6
6
|
bioguider/agents/collection_execute_step.py,sha256=Ev4BLjjmBdsc52M1zrq7QK8g7fsffDkSxu-jN2rvedw,5614
|
|
7
|
-
bioguider/agents/collection_observe_step.py,sha256=
|
|
7
|
+
bioguider/agents/collection_observe_step.py,sha256=n863HrbVANQVeltffjS2zXv-AfVErC8ZEMfb_78hafk,5140
|
|
8
8
|
bioguider/agents/collection_plan_step.py,sha256=Nn0f8AOkEDCDtnhaqE7yCQoi7PVpsHmiUcsIqC0T0dQ,5956
|
|
9
9
|
bioguider/agents/collection_task.py,sha256=MjpTYiQQYUpmQf2UOn-dOCZU3kxypc4uOnzd15wb1Ow,7882
|
|
10
10
|
bioguider/agents/collection_task_utils.py,sha256=_e2EebYhl-UYjZ0rHNf2-p32YlstBSffv32suiuT9LI,5386
|
|
11
|
-
bioguider/agents/common_agent.py,sha256=
|
|
12
|
-
bioguider/agents/common_agent_2step.py,sha256=
|
|
11
|
+
bioguider/agents/common_agent.py,sha256=TpfxbYskwuwWrjs1g9RaG7sdA5rOLdiVac7If7uK2sg,4558
|
|
12
|
+
bioguider/agents/common_agent_2step.py,sha256=rGiDzUkmmUIFnmJJxzXK5M5BfIyINHXLZ0pmPRUVqQg,7911
|
|
13
|
+
bioguider/agents/common_conversation.py,sha256=zSRiIbUlif8VMC5ZuFfDTyyf1olg3YyWSnYj_E0_SXg,1649
|
|
13
14
|
bioguider/agents/common_step.py,sha256=GdOCbmj1pwh4etg-futVFYVDQuoUG89DnIrw-B6QbzM,2594
|
|
14
15
|
bioguider/agents/dockergeneration_execute_step.py,sha256=F92jDlkc6KjAvTkX7q1FsCYP8J15SCaNgmwh3YPqfDo,6500
|
|
15
|
-
bioguider/agents/dockergeneration_observe_step.py,sha256=
|
|
16
|
+
bioguider/agents/dockergeneration_observe_step.py,sha256=Bo5Td0fzMYLbLki0FvwamzqRFOy4eu3AvIUa8oFApE4,6131
|
|
16
17
|
bioguider/agents/dockergeneration_plan_step.py,sha256=SB8tQM9PkIKsD2o1DFD7bedcxz6r6hSy8n_EVK60Fz0,7235
|
|
17
18
|
bioguider/agents/dockergeneration_task.py,sha256=mYmorLKnJ-Jku3Qq_Y_kcSTsbYIo3RiVdD0puxqXY5Q,6221
|
|
18
19
|
bioguider/agents/dockergeneration_task_utils.py,sha256=v7emqrJlVW-A5ZdLmPSdiaMSKCR8uzy9UYzx_1cgzyo,9041
|
|
19
|
-
bioguider/agents/evaluation_installation_task.py,sha256=
|
|
20
|
-
bioguider/agents/evaluation_readme_task.py,sha256=
|
|
21
|
-
bioguider/agents/evaluation_submission_requirements_task.py,sha256=
|
|
22
|
-
bioguider/agents/evaluation_task.py,sha256=
|
|
20
|
+
bioguider/agents/evaluation_installation_task.py,sha256=mbPRZK6gVEPt-XjDl76_Q71meqFkofFnGPDMq5csL7I,9496
|
|
21
|
+
bioguider/agents/evaluation_readme_task.py,sha256=l0QJAFEE0eBcF2WTDO3S4qPHCJujHPPkfi29tTZf9nI,25223
|
|
22
|
+
bioguider/agents/evaluation_submission_requirements_task.py,sha256=hlEB3_XTgKiwYLiGaWo1kHLFdYKx4NK3EX73zUzeIQY,7584
|
|
23
|
+
bioguider/agents/evaluation_task.py,sha256=4UrcUCy8UIVLd1NpBpqGgmudqeSK3wWI3Jm8LEYySbY,12661
|
|
23
24
|
bioguider/agents/identification_execute_step.py,sha256=w3IjL8f2WiHCyiLjVSoySnIAXpi1-hK1DLKCnXbAN2Y,5587
|
|
24
|
-
bioguider/agents/identification_observe_step.py,sha256=
|
|
25
|
+
bioguider/agents/identification_observe_step.py,sha256=Me5mhEM4e7FGnVFcluNtqfhIxzng6guGIu39xi1TrS8,4341
|
|
25
26
|
bioguider/agents/identification_plan_step.py,sha256=owsTK1NZIuiZL7QPVknJyp9TBRK-mhnuf2RwK4YzaxU,5442
|
|
26
27
|
bioguider/agents/identification_task.py,sha256=bTbovxxQVpO1TcdcQAxDxwPISuAcXndO7zsvHpJSb64,10147
|
|
27
28
|
bioguider/agents/identification_task_utils.py,sha256=Lf0Rj0L0KSiyJmPAgeSz0vLUFQr6TSFuzgufimEN4H0,630
|
|
28
29
|
bioguider/agents/peo_common_step.py,sha256=iw2c1h7X11WJzSE2tSRg0UAoXH0QOlQDxW9CCzSVMOY,2677
|
|
29
|
-
bioguider/agents/prompt_utils.py,sha256=
|
|
30
|
+
bioguider/agents/prompt_utils.py,sha256=EuR7NkB7PcCgNroeX91fHUSK2X6mLS6vK7RnzyQHAHI,17593
|
|
30
31
|
bioguider/agents/python_ast_repl_tool.py,sha256=o7-4P1h8jS8ikhGSA4CI_OWQ2a0Eg5tEdmuAp_qrO-0,2519
|
|
31
32
|
bioguider/agents/rag_collection_task.py,sha256=r_jPAMjQcC7dIydKxX77UuMqjJ3MiVKswNZ-yNw7yx8,5199
|
|
32
33
|
bioguider/conversation.py,sha256=DIvk_d7pz_guuORByK1eaaF09FAK-8shcNTrbSUHz9Y,1779
|
|
@@ -38,13 +39,13 @@ bioguider/rag/data_pipeline.py,sha256=bkJ2IUCgPx_OL2uZtPd6cIBor2VFZEIfGd5kVlmiPj
|
|
|
38
39
|
bioguider/rag/embedder.py,sha256=jofR8hOj3Aj2IyBQ9y6FeAc84tgq5agbIfCGyFxYpJ8,650
|
|
39
40
|
bioguider/rag/rag.py,sha256=JFPwrJlKDSyd3U3Gce_NSxI5343eNUbqPG9Fs5Pfoq0,4696
|
|
40
41
|
bioguider/settings.py,sha256=BD_iz9aYarxmWUl0XaKl4-D4oTXMhFzljsXLNn2phis,3143
|
|
41
|
-
bioguider/utils/constants.py,sha256=
|
|
42
|
+
bioguider/utils/constants.py,sha256=0e08fByA7a6V7umHyTc7dbOg9axZT6eIgxX_zh4NXQ0,7728
|
|
42
43
|
bioguider/utils/default.gitignore,sha256=XjPdyO2KV8z8iyuqluaNR_70tBQftMpyKL8HboVNyeI,1605
|
|
43
44
|
bioguider/utils/file_utils.py,sha256=9VfAHsz1UkFPtzAmvWZvPl1TMaKIYNjNlLgsfB8tNjg,3683
|
|
44
45
|
bioguider/utils/gitignore_checker.py,sha256=pOYUwsS9D5014LxcZb0cj3s2CAYaD2uF_pYJpaNKcho,6532
|
|
45
46
|
bioguider/utils/pyphen_utils.py,sha256=cdZc3qphkvMDeL5NiZ8Xou13M_uVNP7ifJ-FwxO-0BE,2680
|
|
46
|
-
bioguider/utils/utils.py,sha256=
|
|
47
|
-
bioguider-0.2.
|
|
48
|
-
bioguider-0.2.
|
|
49
|
-
bioguider-0.2.
|
|
50
|
-
bioguider-0.2.
|
|
47
|
+
bioguider/utils/utils.py,sha256=aWtgdvB04gEiJTfQNK4aQPO1mxv2zRZTbDaGUBy9DFc,2275
|
|
48
|
+
bioguider-0.2.16.dist-info/LICENSE,sha256=qzkvZcKwwA5DuSuhXMOm2LcO6BdEr4V7jwFZVL2-jL4,1065
|
|
49
|
+
bioguider-0.2.16.dist-info/METADATA,sha256=_aiIJAMXdRo04JI5rH-YRDUQT_E3PqvLy4l2YkPKKVk,1868
|
|
50
|
+
bioguider-0.2.16.dist-info/WHEEL,sha256=b4K_helf-jlQoXBBETfwnf4B04YC67LOev0jo4fX5m8,88
|
|
51
|
+
bioguider-0.2.16.dist-info/RECORD,,
|
|
File without changes
|
|
File without changes
|