versionhq 1.2.3.8__py3-none-any.whl → 1.2.4.2__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- versionhq/__init__.py +3 -2
- versionhq/_prompt/auto_feedback.py +103 -0
- versionhq/_prompt/constants.py +30 -0
- versionhq/_prompt/model.py +218 -0
- versionhq/_utils/__init__.py +1 -0
- versionhq/_utils/is_valid_url.py +15 -0
- versionhq/agent/model.py +33 -67
- versionhq/agent_network/formation.py +18 -10
- versionhq/agent_network/model.py +2 -6
- versionhq/knowledge/source_docling.py +3 -19
- versionhq/llm/model.py +8 -12
- versionhq/task/evaluation.py +1 -1
- versionhq/task/model.py +142 -142
- versionhq/task/structured_response.py +3 -1
- versionhq/task_graph/draft.py +11 -19
- versionhq/task_graph/model.py +90 -34
- {versionhq-1.2.3.8.dist-info → versionhq-1.2.4.2.dist-info}/METADATA +11 -16
- {versionhq-1.2.3.8.dist-info → versionhq-1.2.4.2.dist-info}/RECORD +21 -17
- {versionhq-1.2.3.8.dist-info → versionhq-1.2.4.2.dist-info}/WHEEL +1 -1
- {versionhq-1.2.3.8.dist-info → versionhq-1.2.4.2.dist-info}/LICENSE +0 -0
- {versionhq-1.2.3.8.dist-info → versionhq-1.2.4.2.dist-info}/top_level.txt +0 -0
versionhq/__init__.py
CHANGED
@@ -17,7 +17,7 @@ from versionhq.clients.workflow.model import MessagingWorkflow, MessagingCompone
|
|
17
17
|
from versionhq.knowledge.model import Knowledge, KnowledgeStorage
|
18
18
|
from versionhq.knowledge.source import PDFKnowledgeSource, CSVKnowledgeSource, JSONKnowledgeSource, TextFileKnowledgeSource, ExcelKnowledgeSource, StringKnowledgeSource
|
19
19
|
from versionhq.knowledge.source_docling import DoclingSource
|
20
|
-
from versionhq.task_graph.model import TaskStatus, TaskGraph, Node, Edge, DependencyType, Condition, ConditionType
|
20
|
+
from versionhq.task_graph.model import TaskStatus, TaskGraph, Node, Edge, DependencyType, Condition, ConditionType, ReformTriggerEvent
|
21
21
|
from versionhq.task.model import Task, TaskOutput, ResponseField, TaskExecutionType
|
22
22
|
from versionhq.task.evaluation import Evaluation, EvaluationItem
|
23
23
|
from versionhq.tool.model import Tool, ToolSet
|
@@ -32,7 +32,7 @@ from versionhq.agent_network.formation import form_agent_network
|
|
32
32
|
from versionhq.task_graph.draft import workflow
|
33
33
|
|
34
34
|
|
35
|
-
__version__ = "1.2.
|
35
|
+
__version__ = "1.2.4.2"
|
36
36
|
__all__ = [
|
37
37
|
"Agent",
|
38
38
|
|
@@ -70,6 +70,7 @@ __all__ = [
|
|
70
70
|
"DependencyType",
|
71
71
|
"Condition",
|
72
72
|
"ConditionType",
|
73
|
+
"ReformTriggerEvent",
|
73
74
|
|
74
75
|
"Task",
|
75
76
|
"TaskOutput",
|
@@ -0,0 +1,103 @@
|
|
1
|
+
from typing import List, Optional, Dict
|
2
|
+
from typing_extensions import Self
|
3
|
+
|
4
|
+
from pydantic import InstanceOf, Field
|
5
|
+
|
6
|
+
from versionhq.agent.model import Agent
|
7
|
+
from versionhq.task.model import Task
|
8
|
+
from versionhq.task_graph.model import TaskGraph, Node, DependencyType, ReformTriggerEvent
|
9
|
+
from versionhq._prompt.model import Prompt
|
10
|
+
from versionhq._prompt.constants import REFLECT, INTEGRATE, parameter_sets
|
11
|
+
|
12
|
+
|
13
|
+
class PromptFeedbackGraph(TaskGraph):
|
14
|
+
"""A Pydantic class to handle auto prompt feedback cycle."""
|
15
|
+
|
16
|
+
_times_iteration: int = 0
|
17
|
+
user_prompts: Optional[Dict[str, str]] = Field(default_factory=dict) # { "0": "...", "1": "..."}
|
18
|
+
dev_prompts: Optional[Dict[str, str]] = Field(default_factory=dict)
|
19
|
+
prompts: Optional[Dict[str, InstanceOf[Prompt]]] = Field(default_factory=dict)
|
20
|
+
|
21
|
+
|
22
|
+
def __init__(self, prompt: InstanceOf[Prompt] = None, *args, **kwargs):
|
23
|
+
super().__init__(*args, **kwargs)
|
24
|
+
|
25
|
+
if prompt:
|
26
|
+
user_prompt, dev_prompt, _ = prompt.format_core()
|
27
|
+
self.prompts = { self.key: prompt }
|
28
|
+
self.user_prompts = { self.key: user_prompt }
|
29
|
+
self.dev_prompts = { self.key: dev_prompt }
|
30
|
+
|
31
|
+
|
32
|
+
def _fetch_latest_prompt(self) -> InstanceOf[Prompt] | None:
|
33
|
+
return self.prompts[self.key] if self.key in self.prompts else None
|
34
|
+
|
35
|
+
|
36
|
+
def _generate_agents(self) -> List[Agent] | None:
|
37
|
+
agents = []
|
38
|
+
prompt = self._fetch_latest_prompt()
|
39
|
+
|
40
|
+
if not prompt:
|
41
|
+
return None
|
42
|
+
|
43
|
+
agent = prompt.agent
|
44
|
+
agent_params = agent.model_dump(exclude={"id", "llm", "llm_config", "self_learning"})
|
45
|
+
for params in parameter_sets:
|
46
|
+
agent = Agent(**agent_params, llm=agent.llm.model, llm_config={**params}, self_learning=True)
|
47
|
+
agents.append(agent)
|
48
|
+
return agents
|
49
|
+
|
50
|
+
|
51
|
+
def _reflect(self, original_response: str) -> Task:
|
52
|
+
description = REFLECT.format(original_prompt=self.original_prompt, original_response=original_response)
|
53
|
+
return Task(description=description)
|
54
|
+
|
55
|
+
|
56
|
+
def set_up_graph(self, **attributes) -> Self:
|
57
|
+
"""Sets up a TaskGraph object with nodes and edges."""
|
58
|
+
|
59
|
+
prompt = self._fetch_latest_prompt()
|
60
|
+
base_task = prompt.task if prompt else None
|
61
|
+
base_agent = prompt.agent if prompt else None
|
62
|
+
|
63
|
+
if not base_task or not base_agent:
|
64
|
+
return None
|
65
|
+
|
66
|
+
agents = self._generate_agents()
|
67
|
+
if not agents:
|
68
|
+
return None
|
69
|
+
|
70
|
+
self.concl_template = base_task.pydantic_output if base_task.pydantic_output else base_task.response_fields if base_task.response_fields else None
|
71
|
+
base_agent.callbacks.append(self._reflect)
|
72
|
+
init_node = Node(task=base_task, assigned_to=base_agent)
|
73
|
+
self.add_node(init_node)
|
74
|
+
|
75
|
+
final_task = Task(description=INTEGRATE.format(original_prompt=self.original_prompt, responses=""))
|
76
|
+
final_node = Node(task=final_task, agent=base_agent)
|
77
|
+
self.add_node(node=final_node)
|
78
|
+
|
79
|
+
for agent in agents:
|
80
|
+
node = Node(task=base_task, assigned_to=agent)
|
81
|
+
self.add_node(node=node)
|
82
|
+
self.add_dependency(source=init_node.identifier, target=node.identifier, dependency_type=DependencyType.FINISH_TO_START, required=True)
|
83
|
+
self.add_dependency(source=node.identifier, target=final_node.identifier, dependency_type=DependencyType.FINISH_TO_START, required=True)
|
84
|
+
|
85
|
+
if attributes:
|
86
|
+
for k, v in attributes.items():
|
87
|
+
if hasattr(self, k):
|
88
|
+
setattr(self, k, v)
|
89
|
+
|
90
|
+
return self
|
91
|
+
|
92
|
+
@property
|
93
|
+
def index(self) -> str:
|
94
|
+
"""Returns an index to add new item."""
|
95
|
+
return str(len([k for k in self.user_prompts.keys()]))
|
96
|
+
|
97
|
+
@property
|
98
|
+
def original_prompt(self) -> str:
|
99
|
+
return str(self.user_prompts["0"]) + str(self.dev_prompts["0"])
|
100
|
+
|
101
|
+
@property
|
102
|
+
def key(self):
|
103
|
+
return str(self._times_iteration)
|
@@ -0,0 +1,30 @@
|
|
1
|
+
REFLECT = "Here is the orignal prompt: {original_prompt}\nHere is the original response: {original_response}\nAnalyze the original prompt and repsonse, check for any pontential issue, and create an improved response."
|
2
|
+
|
3
|
+
INTEGRATE = "Here is the original prompt: {original_prompt}\nHere are responses: {responses}. Help integrate them as a single response."
|
4
|
+
|
5
|
+
parameter_sets = [
|
6
|
+
{
|
7
|
+
"temperature": 0.2,
|
8
|
+
"top_p": 0.5,
|
9
|
+
"max_tokens": 5000,
|
10
|
+
"frequency_penalty": 0.5,
|
11
|
+
"presence_penalty": 0.5,
|
12
|
+
"stop": ["\n\n", "###"],
|
13
|
+
},
|
14
|
+
{
|
15
|
+
"temperature": 0.7,
|
16
|
+
"top_p": 0.8,
|
17
|
+
"max_tokens": 8000,
|
18
|
+
"frequency_penalty": 0.3,
|
19
|
+
"presence_penalty": 0.3,
|
20
|
+
"stop": ["\n\n"],
|
21
|
+
},
|
22
|
+
{
|
23
|
+
"temperature": 1.0,
|
24
|
+
"top_p": 0.95,
|
25
|
+
"max_tokens": 12000,
|
26
|
+
"frequency_penalty": 0.0,
|
27
|
+
"presence_penalty": 0.0,
|
28
|
+
"stop": [],
|
29
|
+
}
|
30
|
+
]
|
@@ -0,0 +1,218 @@
|
|
1
|
+
|
2
|
+
from typing import Dict, List, Tuple, Any
|
3
|
+
from textwrap import dedent
|
4
|
+
|
5
|
+
from pydantic import InstanceOf
|
6
|
+
|
7
|
+
from versionhq._utils import is_valid_url
|
8
|
+
|
9
|
+
|
10
|
+
class Prompt:
|
11
|
+
"""A class to format, store, and manage a prompt."""
|
12
|
+
|
13
|
+
task: Any = None
|
14
|
+
agent: Any = None
|
15
|
+
context: Any = None
|
16
|
+
|
17
|
+
|
18
|
+
def __init__(self, task, agent, context):
|
19
|
+
from versionhq.agent.model import Agent
|
20
|
+
from versionhq.task.model import Task
|
21
|
+
|
22
|
+
self.task = task if isinstance(task, Task) else Task(description=str(task))
|
23
|
+
self.agent = agent if isinstance(agent, Agent) else Agent(role=str(agent))
|
24
|
+
self.context = context
|
25
|
+
|
26
|
+
|
27
|
+
def _draft_output_prompt(self) -> str:
|
28
|
+
"""Drafts prompt for output either from `pydantic_output` or `response_fields`"""
|
29
|
+
|
30
|
+
from versionhq.llm.model import DEFAULT_MODEL_PROVIDER_NAME
|
31
|
+
|
32
|
+
output_prompt = ""
|
33
|
+
model_provider = self.agent.llm.provider if self.agent else DEFAULT_MODEL_PROVIDER_NAME
|
34
|
+
|
35
|
+
if self.task.pydantic_output:
|
36
|
+
output_prompt, output_formats_to_follow = "", dict()
|
37
|
+
response_format = str(self.task._structure_response_format(model_provider=model_provider))
|
38
|
+
for k, v in self.task.pydantic_output.model_fields.items():
|
39
|
+
output_formats_to_follow[k] = f"<Return your answer in {v.annotation}>"
|
40
|
+
|
41
|
+
output_prompt = f"""Your response MUST be a valid JSON string that strictly follows the response format. Use double quotes for all keys and string values. Do not use single quotes, trailing commas, or any other non-standard JSON syntax.
|
42
|
+
Response format: {response_format}
|
43
|
+
Ref. Output image: {output_formats_to_follow}
|
44
|
+
"""
|
45
|
+
elif self.task.response_fields:
|
46
|
+
output_prompt, output_formats_to_follow = "", dict()
|
47
|
+
response_format = str(self.task._structure_response_format(model_provider=model_provider))
|
48
|
+
for item in self.task.response_fields:
|
49
|
+
if item:
|
50
|
+
output_formats_to_follow[item.title] = f"<Return your answer in {item.data_type.__name__}>"
|
51
|
+
|
52
|
+
output_prompt = f"""Your response MUST be a valid JSON string that strictly follows the response format. Use double quotes for all keys and string values. Do not use single quotes, trailing commas, or any other non-standard JSON syntax.
|
53
|
+
Response format: {response_format}
|
54
|
+
Ref. Output image: {output_formats_to_follow}
|
55
|
+
"""
|
56
|
+
else:
|
57
|
+
output_prompt = "You MUST return your response as a valid JSON serializable string, enclosed in double quotes. Use double quotes for all keys and string values. Do NOT use single quotes, trailing commas, or other non-standard JSON syntax."
|
58
|
+
|
59
|
+
return dedent(output_prompt)
|
60
|
+
|
61
|
+
|
62
|
+
def _draft_context_prompt(self, context: Any = None) -> str:
|
63
|
+
"""
|
64
|
+
Create a context prompt from the given context in any format: a task object, task output object, list, dict.
|
65
|
+
"""
|
66
|
+
from versionhq.task.model import Task, TaskOutput
|
67
|
+
|
68
|
+
context_to_add = None
|
69
|
+
if not context:
|
70
|
+
return context_to_add
|
71
|
+
|
72
|
+
match context:
|
73
|
+
case str():
|
74
|
+
context_to_add = context
|
75
|
+
|
76
|
+
case Task():
|
77
|
+
if not context.output:
|
78
|
+
res = context.execute()
|
79
|
+
context_to_add = res._to_context_prompt()
|
80
|
+
|
81
|
+
else:
|
82
|
+
context_to_add = context.output.raw
|
83
|
+
|
84
|
+
case TaskOutput():
|
85
|
+
context_to_add = context._to_context_prompt()
|
86
|
+
|
87
|
+
|
88
|
+
case dict():
|
89
|
+
context_to_add = str(context)
|
90
|
+
|
91
|
+
case list():
|
92
|
+
res = ", ".join([self._draft_context_prompt(context=item) for item in context])
|
93
|
+
context_to_add = res
|
94
|
+
|
95
|
+
case _:
|
96
|
+
pass
|
97
|
+
|
98
|
+
return dedent(context_to_add)
|
99
|
+
|
100
|
+
|
101
|
+
def _format_content_prompt(self) -> Dict[str, str]:
|
102
|
+
"""Formats content (file, image, audio) prompt message."""
|
103
|
+
|
104
|
+
import base64
|
105
|
+
from pathlib import Path
|
106
|
+
|
107
|
+
content_messages = {}
|
108
|
+
|
109
|
+
if self.task.image:
|
110
|
+
with open(self.task.image, "rb") as file:
|
111
|
+
content = file.read()
|
112
|
+
if content:
|
113
|
+
encoded_file = base64.b64encode(content).decode("utf-8")
|
114
|
+
img_url = f"data:image/jpeg;base64,{encoded_file}"
|
115
|
+
content_messages.update({ "type": "image_url", "image_url": { "url": img_url }})
|
116
|
+
|
117
|
+
if self.task.file:
|
118
|
+
if is_valid_url(self.task.file):
|
119
|
+
content_messages.update({ "type": "image_url", "image_url": self.file })
|
120
|
+
|
121
|
+
if self.task.audio and self.agent.llm.provider == "gemini":
|
122
|
+
audio_bytes = Path(self.task.audio).read_bytes()
|
123
|
+
encoded_data = base64.b64encode(audio_bytes).decode("utf-8")
|
124
|
+
content_messages.update({ "type": "image_url", "image_url": "data:audio/mp3;base64,{}".format(encoded_data)})
|
125
|
+
|
126
|
+
return content_messages
|
127
|
+
|
128
|
+
|
129
|
+
def _find_rag_tools(self) -> List[InstanceOf[Any]]:
|
130
|
+
"""Find RAG tools from the agent and task object."""
|
131
|
+
|
132
|
+
from versionhq.tool.rag_tool import RagTool
|
133
|
+
|
134
|
+
tools = []
|
135
|
+
if self.task.tools:
|
136
|
+
[tools.append(item) for item in self.task.tools if isinstance(item, RagTool)]
|
137
|
+
|
138
|
+
if self.agent.tools and self.task.can_use_agent_tools:
|
139
|
+
[tools.append(item) for item in self.agent.tools if isinstance(item, RagTool)]
|
140
|
+
|
141
|
+
return tools
|
142
|
+
|
143
|
+
|
144
|
+
def draft_user_prompt(self) -> str:
|
145
|
+
"""Draft task prompts from its description and context."""
|
146
|
+
|
147
|
+
output_prompt = self._draft_output_prompt()
|
148
|
+
task_slices = [self.task.description, output_prompt, ]
|
149
|
+
|
150
|
+
if self.context:
|
151
|
+
context_prompt = self._draft_context_prompt(context=self.context)
|
152
|
+
task_slices.insert(len(task_slices), f"Consider the following context when responding: {context_prompt}")
|
153
|
+
|
154
|
+
return "\n".join(task_slices)
|
155
|
+
|
156
|
+
|
157
|
+
def format_core(self, rag_tools: List[Any] = None) -> Tuple[str, str, List[Dict[str, str]]]:
|
158
|
+
"""Formats prompt messages sent to the LLM, then returns task prompt, developer prompt, and messages."""
|
159
|
+
|
160
|
+
from versionhq.knowledge._utils import extract_knowledge_context
|
161
|
+
from versionhq.memory.contextual_memory import ContextualMemory
|
162
|
+
|
163
|
+
user_prompt = self.draft_user_prompt()
|
164
|
+
rag_tools = rag_tools if rag_tools else self._find_rag_tools()
|
165
|
+
|
166
|
+
if self.agent._knowledge:
|
167
|
+
agent_knowledge = self.agent._knowledge.query(query=[user_prompt,], limit=5)
|
168
|
+
if agent_knowledge:
|
169
|
+
agent_knowledge_context = extract_knowledge_context(knowledge_snippets=agent_knowledge)
|
170
|
+
if agent_knowledge_context:
|
171
|
+
user_prompt += agent_knowledge_context
|
172
|
+
|
173
|
+
if rag_tools:
|
174
|
+
for item in rag_tools:
|
175
|
+
rag_tool_context = item.run(agent=self.agent, query=self.task.description)
|
176
|
+
if rag_tool_context:
|
177
|
+
user_prompt += ",".join(rag_tool_context) if isinstance(rag_tool_context, list) else str(rag_tool_context)
|
178
|
+
|
179
|
+
if self.agent.with_memory == True:
|
180
|
+
contextual_memory = ContextualMemory(
|
181
|
+
memory_config=self.agent.memory_config, stm=self.agent.short_term_memory, ltm=self.agent.long_term_memory, um=self.agent.user_memory
|
182
|
+
)
|
183
|
+
context_str = self._draft_context_prompt(context=self.context)
|
184
|
+
query = f"{self.task.description} {context_str}".strip()
|
185
|
+
memory = contextual_memory.build_context_for_task(query=query)
|
186
|
+
if memory.strip() != "":
|
187
|
+
user_prompt += memory.strip()
|
188
|
+
|
189
|
+
|
190
|
+
## comment out - training
|
191
|
+
# if self.agent.networks and self.agent.networks._train:
|
192
|
+
# user_prompt = self.agent._training_handler(user_prompt=user_prompt)
|
193
|
+
# else:
|
194
|
+
# user_prompt = self.agent._use_trained_data(user_prompt=user_prompt)
|
195
|
+
|
196
|
+
|
197
|
+
content_prompt = self._format_content_prompt()
|
198
|
+
|
199
|
+
messages = []
|
200
|
+
if content_prompt:
|
201
|
+
messages.append(
|
202
|
+
{
|
203
|
+
"role": "user",
|
204
|
+
"content": [
|
205
|
+
{
|
206
|
+
"type": "text",
|
207
|
+
"text": user_prompt
|
208
|
+
},
|
209
|
+
content_prompt,
|
210
|
+
]
|
211
|
+
})
|
212
|
+
else:
|
213
|
+
messages.append({ "role": "user", "content": user_prompt })
|
214
|
+
|
215
|
+
if self.agent.use_developer_prompt:
|
216
|
+
messages.append({ "role": "developer", "content": self.agent.backstory })
|
217
|
+
|
218
|
+
return user_prompt, self.agent.backstory if self.agent.use_developer_prompt else None, messages
|
versionhq/_utils/__init__.py
CHANGED
@@ -0,0 +1,15 @@
|
|
1
|
+
from urllib.parse import urlparse
|
2
|
+
|
3
|
+
|
4
|
+
def is_valid_url(url: str) -> bool:
|
5
|
+
try:
|
6
|
+
result = urlparse(url)
|
7
|
+
return all(
|
8
|
+
[
|
9
|
+
result.scheme in ("http", "https", "gs"),
|
10
|
+
result.netloc,
|
11
|
+
len(result.netloc.split(".")) >= 2, # Ensure domain has TLD
|
12
|
+
]
|
13
|
+
)
|
14
|
+
except Exception:
|
15
|
+
return False
|
versionhq/agent/model.py
CHANGED
@@ -10,10 +10,8 @@ from pydantic_core import PydanticCustomError
|
|
10
10
|
from versionhq.agent.rpm_controller import RPMController
|
11
11
|
from versionhq.tool.model import Tool, ToolSet, BaseTool
|
12
12
|
from versionhq.knowledge.model import BaseKnowledgeSource, Knowledge
|
13
|
-
from versionhq.memory.contextual_memory import ContextualMemory
|
14
13
|
from versionhq.memory.model import ShortTermMemory, LongTermMemory, UserMemory
|
15
|
-
from versionhq._utils
|
16
|
-
from versionhq._utils.process_config import process_config
|
14
|
+
from versionhq._utils import Logger, process_config, is_valid_url
|
17
15
|
|
18
16
|
|
19
17
|
load_dotenv(override=True)
|
@@ -33,6 +31,7 @@ class Agent(BaseModel):
|
|
33
31
|
_logger_config: Dict[str, Any] = PrivateAttr(default=dict(verbose=True, info_file_save=True))
|
34
32
|
|
35
33
|
api_key: Optional[str] = Field(default=None)
|
34
|
+
self_learn: bool = Field(default=False)
|
36
35
|
config: Optional[Dict[str, Any]] = Field(default=None, exclude=True, description="values to add to the Agent class")
|
37
36
|
|
38
37
|
id: UUID4 = Field(default_factory=uuid.uuid4, frozen=True)
|
@@ -217,7 +216,7 @@ class Agent(BaseModel):
|
|
217
216
|
if isinstance(item, BaseKnowledgeSource):
|
218
217
|
knowledge_sources.append(item)
|
219
218
|
|
220
|
-
elif isinstance(item, str) and "http" in item and
|
219
|
+
elif isinstance(item, str) and "http" in item and is_valid_url(url=item) == True:
|
221
220
|
docling_fp.append(item)
|
222
221
|
|
223
222
|
elif isinstance(item, str):
|
@@ -338,25 +337,6 @@ class Agent(BaseModel):
|
|
338
337
|
return llm
|
339
338
|
|
340
339
|
|
341
|
-
def _update_llm(self, llm: Any = None, llm_config: Optional[Dict[str, Any]] = None) -> Self:
|
342
|
-
"""
|
343
|
-
Update llm and llm_config of the exsiting agent. (Other conditions will remain the same.)
|
344
|
-
"""
|
345
|
-
|
346
|
-
if not llm and not llm_config:
|
347
|
-
Logger(**self._logger_config, filename=self.key).log(level="error", message="Missing llm or llm_config values to update", color="red")
|
348
|
-
pass
|
349
|
-
|
350
|
-
self.llm = llm
|
351
|
-
if llm_config:
|
352
|
-
if self.llm_config:
|
353
|
-
self.llm_config.update(llm_config)
|
354
|
-
else:
|
355
|
-
self.llm_config = llm_config
|
356
|
-
|
357
|
-
return self.set_up_llm()
|
358
|
-
|
359
|
-
|
360
340
|
def _train(self) -> Self:
|
361
341
|
"""
|
362
342
|
Fine-tuned the base model using OpenAI train framework.
|
@@ -369,7 +349,7 @@ class Agent(BaseModel):
|
|
369
349
|
|
370
350
|
def _invoke(
|
371
351
|
self,
|
372
|
-
|
352
|
+
messages: List[Dict[str, str]] = None,
|
373
353
|
response_format: Optional[Dict[str, Any]] = None,
|
374
354
|
tools: Optional[List[InstanceOf[Tool]| InstanceOf[ToolSet] | Type[Tool]]] = None,
|
375
355
|
tool_res_as_final: bool = False,
|
@@ -385,11 +365,6 @@ class Agent(BaseModel):
|
|
385
365
|
iterations = 0
|
386
366
|
raw_response = None
|
387
367
|
|
388
|
-
messages = []
|
389
|
-
messages.append({ "role": "user", "content": prompts })
|
390
|
-
if self.use_developer_prompt:
|
391
|
-
messages.append({ "role": "developer", "content": self.backstory })
|
392
|
-
|
393
368
|
try:
|
394
369
|
if self._rpm_controller and self.max_rpm:
|
395
370
|
self._rpm_controller.check_or_wait()
|
@@ -428,6 +403,25 @@ class Agent(BaseModel):
|
|
428
403
|
raise ValueError("Invalid response from LLM call - None or empty.")
|
429
404
|
|
430
405
|
|
406
|
+
def _update_llm(self, llm: Any = None, llm_config: Optional[Dict[str, Any]] = None) -> Self:
|
407
|
+
"""
|
408
|
+
Updates llm and llm_config of the exsiting agent. (Other conditions will remain the same.)
|
409
|
+
"""
|
410
|
+
|
411
|
+
if not llm and not llm_config:
|
412
|
+
Logger(**self._logger_config, filename=self.key).log(level="error", message="Missing llm or llm_config values to update", color="red")
|
413
|
+
pass
|
414
|
+
|
415
|
+
self.llm = llm
|
416
|
+
if llm_config:
|
417
|
+
if self.llm_config:
|
418
|
+
self.llm_config.update(llm_config)
|
419
|
+
else:
|
420
|
+
self.llm_config = llm_config
|
421
|
+
|
422
|
+
return self.set_up_llm()
|
423
|
+
|
424
|
+
|
431
425
|
def update(self, **kwargs) -> Self:
|
432
426
|
"""
|
433
427
|
Update the existing agent. Address variables that require runnning set_up_x methods first, then update remaining variables.
|
@@ -480,7 +474,7 @@ class Agent(BaseModel):
|
|
480
474
|
return self
|
481
475
|
|
482
476
|
|
483
|
-
def start(self, context: Any = None, tool_res_as_final: bool = False) -> Any | None:
|
477
|
+
def start(self, context: Any = None, tool_res_as_final: bool = False, image: str = None, file: str = None, audio: str = None) -> Any | None:
|
484
478
|
"""
|
485
479
|
Defines and executes a task when it is not given and returns TaskOutput object.
|
486
480
|
"""
|
@@ -498,19 +492,20 @@ class Agent(BaseModel):
|
|
498
492
|
description=f"Generate a simple result in a sentence to achieve the goal: {self.goal if self.goal else self.role}. If needed, list up necessary steps in concise manner.",
|
499
493
|
pydantic_output=Output,
|
500
494
|
tool_res_as_final=tool_res_as_final,
|
495
|
+
image=image, #REFINEME - query memory/knowledge or self create
|
496
|
+
file=file,
|
497
|
+
audio=audio,
|
501
498
|
)
|
502
499
|
res = task.execute(agent=self, context=context)
|
503
500
|
return res
|
504
501
|
|
505
502
|
|
506
503
|
def execute_task(self, task, context: Optional[Any] = None, task_tools: Optional[List[Tool | ToolSet]] = list()) -> str:
|
507
|
-
"""
|
508
|
-
Format a task prompt, adding context from knowledge and memory (if given), and invoke LLM.
|
509
|
-
"""
|
504
|
+
"""Handling task execution."""
|
510
505
|
|
511
506
|
from versionhq.task.model import Task
|
512
507
|
from versionhq.tool.rag_tool import RagTool
|
513
|
-
from versionhq.
|
508
|
+
from versionhq._prompt.model import Prompt
|
514
509
|
|
515
510
|
task: InstanceOf[Task] = task
|
516
511
|
all_tools: Optional[List[Tool | ToolSet | RagTool | Type[BaseTool]]] = task_tools + self.tools if task.can_use_agent_tools else task_tools
|
@@ -520,41 +515,12 @@ class Agent(BaseModel):
|
|
520
515
|
if self.max_rpm and self._rpm_controller:
|
521
516
|
self._rpm_controller._reset_request_count()
|
522
517
|
|
523
|
-
|
524
|
-
|
525
|
-
if self._knowledge:
|
526
|
-
agent_knowledge = self._knowledge.query(query=[task_prompt,], limit=5)
|
527
|
-
if agent_knowledge:
|
528
|
-
agent_knowledge_context = extract_knowledge_context(knowledge_snippets=agent_knowledge)
|
529
|
-
if agent_knowledge_context:
|
530
|
-
task_prompt += agent_knowledge_context
|
531
|
-
|
532
|
-
if rag_tools:
|
533
|
-
for item in rag_tools:
|
534
|
-
rag_tool_context = item.run(agent=self, query=task.description)
|
535
|
-
if rag_tool_context:
|
536
|
-
task_prompt += ",".join(rag_tool_context) if isinstance(rag_tool_context, list) else str(rag_tool_context)
|
537
|
-
|
538
|
-
if self.with_memory == True:
|
539
|
-
contextual_memory = ContextualMemory(
|
540
|
-
memory_config=self.memory_config, stm=self.short_term_memory, ltm=self.long_term_memory, um=self.user_memory
|
541
|
-
)
|
542
|
-
context_str = task._draft_context_prompt(context=context)
|
543
|
-
query = f"{task.description} {context_str}".strip()
|
544
|
-
memory = contextual_memory.build_context_for_task(query=query)
|
545
|
-
if memory.strip() != "":
|
546
|
-
task_prompt += memory.strip()
|
547
|
-
|
548
|
-
## comment out for now
|
549
|
-
# if self.networks and self.networks._train:
|
550
|
-
# task_prompt = self._training_handler(task_prompt=task_prompt)
|
551
|
-
# else:
|
552
|
-
# task_prompt = self._use_trained_data(task_prompt=task_prompt)
|
518
|
+
user_prompt, dev_prompt, messages = Prompt(task=task, agent=self, context=context).format_core(rag_tools=rag_tools)
|
553
519
|
|
554
520
|
try:
|
555
521
|
self._times_executed += 1
|
556
522
|
raw_response = self._invoke(
|
557
|
-
|
523
|
+
messages=messages,
|
558
524
|
response_format=task._structure_response_format(model_provider=self.llm.provider),
|
559
525
|
tools=tools,
|
560
526
|
tool_res_as_final=task.tool_res_as_final,
|
@@ -564,7 +530,7 @@ class Agent(BaseModel):
|
|
564
530
|
except Exception as e:
|
565
531
|
self._times_executed += 1
|
566
532
|
Logger(**self._logger_config, filename=self.key).log(level="error", message=f"The agent failed to execute the task. Error: {str(e)}", color="red")
|
567
|
-
raw_response = self.execute_task(task, context, task_tools)
|
533
|
+
user_prompt, dev_prompt, raw_response = self.execute_task(task, context, task_tools)
|
568
534
|
|
569
535
|
if self._times_executed > self.max_retry_limit:
|
570
536
|
Logger(**self._logger_config, filename=self.key).log(level="error", message=f"Max retry limit has exceeded.", color="red")
|
@@ -573,7 +539,7 @@ class Agent(BaseModel):
|
|
573
539
|
if self.max_rpm and self._rpm_controller:
|
574
540
|
self._rpm_controller.stop_rpm_counter()
|
575
541
|
|
576
|
-
return raw_response
|
542
|
+
return user_prompt, dev_prompt, raw_response
|
577
543
|
|
578
544
|
|
579
545
|
@property
|