versionhq 1.2.4.1__py3-none-any.whl → 1.2.4.2__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- versionhq/__init__.py +3 -2
- versionhq/_prompt/auto_feedback.py +103 -0
- versionhq/_prompt/constants.py +30 -0
- versionhq/_prompt/model.py +134 -63
- versionhq/agent/model.py +25 -77
- versionhq/agent_network/formation.py +6 -5
- versionhq/task/model.py +92 -145
- versionhq/task_graph/draft.py +3 -13
- versionhq/task_graph/model.py +90 -34
- {versionhq-1.2.4.1.dist-info → versionhq-1.2.4.2.dist-info}/METADATA +3 -8
- {versionhq-1.2.4.1.dist-info → versionhq-1.2.4.2.dist-info}/RECORD +14 -12
- {versionhq-1.2.4.1.dist-info → versionhq-1.2.4.2.dist-info}/WHEEL +1 -1
- {versionhq-1.2.4.1.dist-info → versionhq-1.2.4.2.dist-info}/LICENSE +0 -0
- {versionhq-1.2.4.1.dist-info → versionhq-1.2.4.2.dist-info}/top_level.txt +0 -0
versionhq/__init__.py
CHANGED
@@ -17,7 +17,7 @@ from versionhq.clients.workflow.model import MessagingWorkflow, MessagingCompone
|
|
17
17
|
from versionhq.knowledge.model import Knowledge, KnowledgeStorage
|
18
18
|
from versionhq.knowledge.source import PDFKnowledgeSource, CSVKnowledgeSource, JSONKnowledgeSource, TextFileKnowledgeSource, ExcelKnowledgeSource, StringKnowledgeSource
|
19
19
|
from versionhq.knowledge.source_docling import DoclingSource
|
20
|
-
from versionhq.task_graph.model import TaskStatus, TaskGraph, Node, Edge, DependencyType, Condition, ConditionType
|
20
|
+
from versionhq.task_graph.model import TaskStatus, TaskGraph, Node, Edge, DependencyType, Condition, ConditionType, ReformTriggerEvent
|
21
21
|
from versionhq.task.model import Task, TaskOutput, ResponseField, TaskExecutionType
|
22
22
|
from versionhq.task.evaluation import Evaluation, EvaluationItem
|
23
23
|
from versionhq.tool.model import Tool, ToolSet
|
@@ -32,7 +32,7 @@ from versionhq.agent_network.formation import form_agent_network
|
|
32
32
|
from versionhq.task_graph.draft import workflow
|
33
33
|
|
34
34
|
|
35
|
-
__version__ = "1.2.4.
|
35
|
+
__version__ = "1.2.4.2"
|
36
36
|
__all__ = [
|
37
37
|
"Agent",
|
38
38
|
|
@@ -70,6 +70,7 @@ __all__ = [
|
|
70
70
|
"DependencyType",
|
71
71
|
"Condition",
|
72
72
|
"ConditionType",
|
73
|
+
"ReformTriggerEvent",
|
73
74
|
|
74
75
|
"Task",
|
75
76
|
"TaskOutput",
|
@@ -0,0 +1,103 @@
|
|
1
|
+
from typing import List, Optional, Dict
|
2
|
+
from typing_extensions import Self
|
3
|
+
|
4
|
+
from pydantic import InstanceOf, Field
|
5
|
+
|
6
|
+
from versionhq.agent.model import Agent
|
7
|
+
from versionhq.task.model import Task
|
8
|
+
from versionhq.task_graph.model import TaskGraph, Node, DependencyType, ReformTriggerEvent
|
9
|
+
from versionhq._prompt.model import Prompt
|
10
|
+
from versionhq._prompt.constants import REFLECT, INTEGRATE, parameter_sets
|
11
|
+
|
12
|
+
|
13
|
+
class PromptFeedbackGraph(TaskGraph):
|
14
|
+
"""A Pydantic class to handle auto prompt feedback cycle."""
|
15
|
+
|
16
|
+
_times_iteration: int = 0
|
17
|
+
user_prompts: Optional[Dict[str, str]] = Field(default_factory=dict) # { "0": "...", "1": "..."}
|
18
|
+
dev_prompts: Optional[Dict[str, str]] = Field(default_factory=dict)
|
19
|
+
prompts: Optional[Dict[str, InstanceOf[Prompt]]] = Field(default_factory=dict)
|
20
|
+
|
21
|
+
|
22
|
+
def __init__(self, prompt: InstanceOf[Prompt] = None, *args, **kwargs):
|
23
|
+
super().__init__(*args, **kwargs)
|
24
|
+
|
25
|
+
if prompt:
|
26
|
+
user_prompt, dev_prompt, _ = prompt.format_core()
|
27
|
+
self.prompts = { self.key: prompt }
|
28
|
+
self.user_prompts = { self.key: user_prompt }
|
29
|
+
self.dev_prompts = { self.key: dev_prompt }
|
30
|
+
|
31
|
+
|
32
|
+
def _fetch_latest_prompt(self) -> InstanceOf[Prompt] | None:
|
33
|
+
return self.prompts[self.key] if self.key in self.prompts else None
|
34
|
+
|
35
|
+
|
36
|
+
def _generate_agents(self) -> List[Agent] | None:
|
37
|
+
agents = []
|
38
|
+
prompt = self._fetch_latest_prompt()
|
39
|
+
|
40
|
+
if not prompt:
|
41
|
+
return None
|
42
|
+
|
43
|
+
agent = prompt.agent
|
44
|
+
agent_params = agent.model_dump(exclude={"id", "llm", "llm_config", "self_learning"})
|
45
|
+
for params in parameter_sets:
|
46
|
+
agent = Agent(**agent_params, llm=agent.llm.model, llm_config={**params}, self_learning=True)
|
47
|
+
agents.append(agent)
|
48
|
+
return agents
|
49
|
+
|
50
|
+
|
51
|
+
def _reflect(self, original_response: str) -> Task:
|
52
|
+
description = REFLECT.format(original_prompt=self.original_prompt, original_response=original_response)
|
53
|
+
return Task(description=description)
|
54
|
+
|
55
|
+
|
56
|
+
def set_up_graph(self, **attributes) -> Self:
|
57
|
+
"""Sets up a TaskGraph object with nodes and edges."""
|
58
|
+
|
59
|
+
prompt = self._fetch_latest_prompt()
|
60
|
+
base_task = prompt.task if prompt else None
|
61
|
+
base_agent = prompt.agent if prompt else None
|
62
|
+
|
63
|
+
if not base_task or not base_agent:
|
64
|
+
return None
|
65
|
+
|
66
|
+
agents = self._generate_agents()
|
67
|
+
if not agents:
|
68
|
+
return None
|
69
|
+
|
70
|
+
self.concl_template = base_task.pydantic_output if base_task.pydantic_output else base_task.response_fields if base_task.response_fields else None
|
71
|
+
base_agent.callbacks.append(self._reflect)
|
72
|
+
init_node = Node(task=base_task, assigned_to=base_agent)
|
73
|
+
self.add_node(init_node)
|
74
|
+
|
75
|
+
final_task = Task(description=INTEGRATE.format(original_prompt=self.original_prompt, responses=""))
|
76
|
+
final_node = Node(task=final_task, agent=base_agent)
|
77
|
+
self.add_node(node=final_node)
|
78
|
+
|
79
|
+
for agent in agents:
|
80
|
+
node = Node(task=base_task, assigned_to=agent)
|
81
|
+
self.add_node(node=node)
|
82
|
+
self.add_dependency(source=init_node.identifier, target=node.identifier, dependency_type=DependencyType.FINISH_TO_START, required=True)
|
83
|
+
self.add_dependency(source=node.identifier, target=final_node.identifier, dependency_type=DependencyType.FINISH_TO_START, required=True)
|
84
|
+
|
85
|
+
if attributes:
|
86
|
+
for k, v in attributes.items():
|
87
|
+
if hasattr(self, k):
|
88
|
+
setattr(self, k, v)
|
89
|
+
|
90
|
+
return self
|
91
|
+
|
92
|
+
@property
|
93
|
+
def index(self) -> str:
|
94
|
+
"""Returns an index to add new item."""
|
95
|
+
return str(len([k for k in self.user_prompts.keys()]))
|
96
|
+
|
97
|
+
@property
|
98
|
+
def original_prompt(self) -> str:
|
99
|
+
return str(self.user_prompts["0"]) + str(self.dev_prompts["0"])
|
100
|
+
|
101
|
+
@property
|
102
|
+
def key(self):
|
103
|
+
return str(self._times_iteration)
|
@@ -0,0 +1,30 @@
|
|
1
|
+
REFLECT = "Here is the orignal prompt: {original_prompt}\nHere is the original response: {original_response}\nAnalyze the original prompt and repsonse, check for any pontential issue, and create an improved response."
|
2
|
+
|
3
|
+
INTEGRATE = "Here is the original prompt: {original_prompt}\nHere are responses: {responses}. Help integrate them as a single response."
|
4
|
+
|
5
|
+
parameter_sets = [
|
6
|
+
{
|
7
|
+
"temperature": 0.2,
|
8
|
+
"top_p": 0.5,
|
9
|
+
"max_tokens": 5000,
|
10
|
+
"frequency_penalty": 0.5,
|
11
|
+
"presence_penalty": 0.5,
|
12
|
+
"stop": ["\n\n", "###"],
|
13
|
+
},
|
14
|
+
{
|
15
|
+
"temperature": 0.7,
|
16
|
+
"top_p": 0.8,
|
17
|
+
"max_tokens": 8000,
|
18
|
+
"frequency_penalty": 0.3,
|
19
|
+
"presence_penalty": 0.3,
|
20
|
+
"stop": ["\n\n"],
|
21
|
+
},
|
22
|
+
{
|
23
|
+
"temperature": 1.0,
|
24
|
+
"top_p": 0.95,
|
25
|
+
"max_tokens": 12000,
|
26
|
+
"frequency_penalty": 0.0,
|
27
|
+
"presence_penalty": 0.0,
|
28
|
+
"stop": [],
|
29
|
+
}
|
30
|
+
]
|
versionhq/_prompt/model.py
CHANGED
@@ -1,34 +1,50 @@
|
|
1
|
-
|
1
|
+
|
2
|
+
from typing import Dict, List, Tuple, Any
|
2
3
|
from textwrap import dedent
|
3
4
|
|
5
|
+
from pydantic import InstanceOf
|
6
|
+
|
7
|
+
from versionhq._utils import is_valid_url
|
8
|
+
|
4
9
|
|
5
10
|
class Prompt:
|
6
11
|
"""A class to format, store, and manage a prompt."""
|
7
12
|
|
8
13
|
task: Any = None
|
9
14
|
agent: Any = None
|
10
|
-
context:
|
15
|
+
context: Any = None
|
11
16
|
|
12
17
|
|
13
|
-
def __init__(self, task, agent):
|
14
|
-
from versionhq.task.model import Task
|
18
|
+
def __init__(self, task, agent, context):
|
15
19
|
from versionhq.agent.model import Agent
|
20
|
+
from versionhq.task.model import Task
|
16
21
|
|
17
22
|
self.task = task if isinstance(task, Task) else Task(description=str(task))
|
18
23
|
self.agent = agent if isinstance(agent, Agent) else Agent(role=str(agent))
|
24
|
+
self.context = context
|
19
25
|
|
20
26
|
|
21
27
|
def _draft_output_prompt(self) -> str:
|
28
|
+
"""Drafts prompt for output either from `pydantic_output` or `response_fields`"""
|
29
|
+
|
30
|
+
from versionhq.llm.model import DEFAULT_MODEL_PROVIDER_NAME
|
31
|
+
|
22
32
|
output_prompt = ""
|
33
|
+
model_provider = self.agent.llm.provider if self.agent else DEFAULT_MODEL_PROVIDER_NAME
|
23
34
|
|
24
35
|
if self.task.pydantic_output:
|
25
|
-
output_prompt =
|
26
|
-
|
27
|
-
|
36
|
+
output_prompt, output_formats_to_follow = "", dict()
|
37
|
+
response_format = str(self.task._structure_response_format(model_provider=model_provider))
|
38
|
+
for k, v in self.task.pydantic_output.model_fields.items():
|
39
|
+
output_formats_to_follow[k] = f"<Return your answer in {v.annotation}>"
|
28
40
|
|
41
|
+
output_prompt = f"""Your response MUST be a valid JSON string that strictly follows the response format. Use double quotes for all keys and string values. Do not use single quotes, trailing commas, or any other non-standard JSON syntax.
|
42
|
+
Response format: {response_format}
|
43
|
+
Ref. Output image: {output_formats_to_follow}
|
44
|
+
"""
|
29
45
|
elif self.task.response_fields:
|
30
46
|
output_prompt, output_formats_to_follow = "", dict()
|
31
|
-
response_format = str(self.task._structure_response_format(model_provider=
|
47
|
+
response_format = str(self.task._structure_response_format(model_provider=model_provider))
|
32
48
|
for item in self.task.response_fields:
|
33
49
|
if item:
|
34
50
|
output_formats_to_follow[item.title] = f"<Return your answer in {item.data_type.__name__}>"
|
@@ -38,43 +54,42 @@ Response format: {response_format}
|
|
38
54
|
Ref. Output image: {output_formats_to_follow}
|
39
55
|
"""
|
40
56
|
else:
|
41
|
-
output_prompt = "You MUST
|
57
|
+
output_prompt = "You MUST return your response as a valid JSON serializable string, enclosed in double quotes. Use double quotes for all keys and string values. Do NOT use single quotes, trailing commas, or other non-standard JSON syntax."
|
42
58
|
|
43
59
|
return dedent(output_prompt)
|
44
60
|
|
45
61
|
|
46
|
-
def _draft_context_prompt(self) -> str:
|
62
|
+
def _draft_context_prompt(self, context: Any = None) -> str:
|
47
63
|
"""
|
48
64
|
Create a context prompt from the given context in any format: a task object, task output object, list, dict.
|
49
65
|
"""
|
50
66
|
from versionhq.task.model import Task, TaskOutput
|
51
67
|
|
52
68
|
context_to_add = None
|
53
|
-
if not
|
54
|
-
|
55
|
-
return
|
69
|
+
if not context:
|
70
|
+
return context_to_add
|
56
71
|
|
57
|
-
match
|
72
|
+
match context:
|
58
73
|
case str():
|
59
|
-
context_to_add =
|
74
|
+
context_to_add = context
|
60
75
|
|
61
76
|
case Task():
|
62
|
-
if not
|
63
|
-
res =
|
77
|
+
if not context.output:
|
78
|
+
res = context.execute()
|
64
79
|
context_to_add = res._to_context_prompt()
|
65
80
|
|
66
81
|
else:
|
67
|
-
context_to_add =
|
82
|
+
context_to_add = context.output.raw
|
68
83
|
|
69
84
|
case TaskOutput():
|
70
|
-
context_to_add =
|
85
|
+
context_to_add = context._to_context_prompt()
|
71
86
|
|
72
87
|
|
73
88
|
case dict():
|
74
|
-
context_to_add = str(
|
89
|
+
context_to_add = str(context)
|
75
90
|
|
76
91
|
case list():
|
77
|
-
res = ", ".join([self._draft_context_prompt(context=item) for item in
|
92
|
+
res = ", ".join([self._draft_context_prompt(context=item) for item in context])
|
78
93
|
context_to_add = res
|
79
94
|
|
80
95
|
case _:
|
@@ -83,65 +98,121 @@ Ref. Output image: {output_formats_to_follow}
|
|
83
98
|
return dedent(context_to_add)
|
84
99
|
|
85
100
|
|
86
|
-
def
|
101
|
+
def _format_content_prompt(self) -> Dict[str, str]:
|
102
|
+
"""Formats content (file, image, audio) prompt message."""
|
103
|
+
|
104
|
+
import base64
|
105
|
+
from pathlib import Path
|
106
|
+
|
107
|
+
content_messages = {}
|
108
|
+
|
109
|
+
if self.task.image:
|
110
|
+
with open(self.task.image, "rb") as file:
|
111
|
+
content = file.read()
|
112
|
+
if content:
|
113
|
+
encoded_file = base64.b64encode(content).decode("utf-8")
|
114
|
+
img_url = f"data:image/jpeg;base64,{encoded_file}"
|
115
|
+
content_messages.update({ "type": "image_url", "image_url": { "url": img_url }})
|
116
|
+
|
117
|
+
if self.task.file:
|
118
|
+
if is_valid_url(self.task.file):
|
119
|
+
content_messages.update({ "type": "image_url", "image_url": self.file })
|
120
|
+
|
121
|
+
if self.task.audio and self.agent.llm.provider == "gemini":
|
122
|
+
audio_bytes = Path(self.task.audio).read_bytes()
|
123
|
+
encoded_data = base64.b64encode(audio_bytes).decode("utf-8")
|
124
|
+
content_messages.update({ "type": "image_url", "image_url": "data:audio/mp3;base64,{}".format(encoded_data)})
|
125
|
+
|
126
|
+
return content_messages
|
127
|
+
|
128
|
+
|
129
|
+
def _find_rag_tools(self) -> List[InstanceOf[Any]]:
|
130
|
+
"""Find RAG tools from the agent and task object."""
|
131
|
+
|
132
|
+
from versionhq.tool.rag_tool import RagTool
|
133
|
+
|
134
|
+
tools = []
|
135
|
+
if self.task.tools:
|
136
|
+
[tools.append(item) for item in self.task.tools if isinstance(item, RagTool)]
|
137
|
+
|
138
|
+
if self.agent.tools and self.task.can_use_agent_tools:
|
139
|
+
[tools.append(item) for item in self.agent.tools if isinstance(item, RagTool)]
|
140
|
+
|
141
|
+
return tools
|
142
|
+
|
143
|
+
|
144
|
+
def draft_user_prompt(self) -> str:
|
145
|
+
"""Draft task prompts from its description and context."""
|
146
|
+
|
87
147
|
output_prompt = self._draft_output_prompt()
|
88
148
|
task_slices = [self.task.description, output_prompt, ]
|
89
149
|
|
90
150
|
if self.context:
|
91
|
-
context_prompt = self._draft_context_prompt()
|
151
|
+
context_prompt = self._draft_context_prompt(context=self.context)
|
92
152
|
task_slices.insert(len(task_slices), f"Consider the following context when responding: {context_prompt}")
|
93
153
|
|
94
154
|
return "\n".join(task_slices)
|
95
155
|
|
96
156
|
|
97
|
-
def
|
98
|
-
"""Formats
|
157
|
+
def format_core(self, rag_tools: List[Any] = None) -> Tuple[str, str, List[Dict[str, str]]]:
|
158
|
+
"""Formats prompt messages sent to the LLM, then returns task prompt, developer prompt, and messages."""
|
99
159
|
|
100
|
-
from versionhq._utils import
|
160
|
+
from versionhq.knowledge._utils import extract_knowledge_context
|
161
|
+
from versionhq.memory.contextual_memory import ContextualMemory
|
101
162
|
|
102
|
-
|
163
|
+
user_prompt = self.draft_user_prompt()
|
164
|
+
rag_tools = rag_tools if rag_tools else self._find_rag_tools()
|
103
165
|
|
104
|
-
if self.
|
105
|
-
|
106
|
-
|
107
|
-
|
108
|
-
|
166
|
+
if self.agent._knowledge:
|
167
|
+
agent_knowledge = self.agent._knowledge.query(query=[user_prompt,], limit=5)
|
168
|
+
if agent_knowledge:
|
169
|
+
agent_knowledge_context = extract_knowledge_context(knowledge_snippets=agent_knowledge)
|
170
|
+
if agent_knowledge_context:
|
171
|
+
user_prompt += agent_knowledge_context
|
109
172
|
|
110
|
-
if
|
111
|
-
|
112
|
-
|
113
|
-
|
114
|
-
|
173
|
+
if rag_tools:
|
174
|
+
for item in rag_tools:
|
175
|
+
rag_tool_context = item.run(agent=self.agent, query=self.task.description)
|
176
|
+
if rag_tool_context:
|
177
|
+
user_prompt += ",".join(rag_tool_context) if isinstance(rag_tool_context, list) else str(rag_tool_context)
|
115
178
|
|
116
|
-
if self.
|
117
|
-
|
118
|
-
|
179
|
+
if self.agent.with_memory == True:
|
180
|
+
contextual_memory = ContextualMemory(
|
181
|
+
memory_config=self.agent.memory_config, stm=self.agent.short_term_memory, ltm=self.agent.long_term_memory, um=self.agent.user_memory
|
182
|
+
)
|
183
|
+
context_str = self._draft_context_prompt(context=self.context)
|
184
|
+
query = f"{self.task.description} {context_str}".strip()
|
185
|
+
memory = contextual_memory.build_context_for_task(query=query)
|
186
|
+
if memory.strip() != "":
|
187
|
+
user_prompt += memory.strip()
|
119
188
|
|
120
|
-
audio_bytes = Path(self.audio_file_path).read_bytes()
|
121
|
-
encoded_data = base64.b64encode(audio_bytes).decode("utf-8")
|
122
|
-
content_messages.update({ "type": "image_url", "image_url": "data:audio/mp3;base64,{}".format(encoded_data)})
|
123
189
|
|
124
|
-
|
190
|
+
## comment out - training
|
191
|
+
# if self.agent.networks and self.agent.networks._train:
|
192
|
+
# user_prompt = self.agent._training_handler(user_prompt=user_prompt)
|
193
|
+
# else:
|
194
|
+
# user_prompt = self.agent._use_trained_data(user_prompt=user_prompt)
|
195
|
+
|
125
196
|
|
126
|
-
@property
|
127
|
-
def messages(self) -> List[Dict[str, str]]:
|
128
|
-
user_prompt = self._draft_user_prompt()
|
129
197
|
content_prompt = self._format_content_prompt()
|
130
198
|
|
131
199
|
messages = []
|
132
|
-
|
133
|
-
|
134
|
-
|
135
|
-
|
136
|
-
|
137
|
-
|
138
|
-
|
139
|
-
|
140
|
-
|
141
|
-
|
142
|
-
|
143
|
-
|
144
|
-
|
145
|
-
messages.append({ "role": "
|
146
|
-
|
147
|
-
|
200
|
+
if content_prompt:
|
201
|
+
messages.append(
|
202
|
+
{
|
203
|
+
"role": "user",
|
204
|
+
"content": [
|
205
|
+
{
|
206
|
+
"type": "text",
|
207
|
+
"text": user_prompt
|
208
|
+
},
|
209
|
+
content_prompt,
|
210
|
+
]
|
211
|
+
})
|
212
|
+
else:
|
213
|
+
messages.append({ "role": "user", "content": user_prompt })
|
214
|
+
|
215
|
+
if self.agent.use_developer_prompt:
|
216
|
+
messages.append({ "role": "developer", "content": self.agent.backstory })
|
217
|
+
|
218
|
+
return user_prompt, self.agent.backstory if self.agent.use_developer_prompt else None, messages
|
versionhq/agent/model.py
CHANGED
@@ -10,7 +10,6 @@ from pydantic_core import PydanticCustomError
|
|
10
10
|
from versionhq.agent.rpm_controller import RPMController
|
11
11
|
from versionhq.tool.model import Tool, ToolSet, BaseTool
|
12
12
|
from versionhq.knowledge.model import BaseKnowledgeSource, Knowledge
|
13
|
-
from versionhq.memory.contextual_memory import ContextualMemory
|
14
13
|
from versionhq.memory.model import ShortTermMemory, LongTermMemory, UserMemory
|
15
14
|
from versionhq._utils import Logger, process_config, is_valid_url
|
16
15
|
|
@@ -32,6 +31,7 @@ class Agent(BaseModel):
|
|
32
31
|
_logger_config: Dict[str, Any] = PrivateAttr(default=dict(verbose=True, info_file_save=True))
|
33
32
|
|
34
33
|
api_key: Optional[str] = Field(default=None)
|
34
|
+
self_learn: bool = Field(default=False)
|
35
35
|
config: Optional[Dict[str, Any]] = Field(default=None, exclude=True, description="values to add to the Agent class")
|
36
36
|
|
37
37
|
id: UUID4 = Field(default_factory=uuid.uuid4, frozen=True)
|
@@ -337,25 +337,6 @@ class Agent(BaseModel):
|
|
337
337
|
return llm
|
338
338
|
|
339
339
|
|
340
|
-
def _update_llm(self, llm: Any = None, llm_config: Optional[Dict[str, Any]] = None) -> Self:
|
341
|
-
"""
|
342
|
-
Update llm and llm_config of the exsiting agent. (Other conditions will remain the same.)
|
343
|
-
"""
|
344
|
-
|
345
|
-
if not llm and not llm_config:
|
346
|
-
Logger(**self._logger_config, filename=self.key).log(level="error", message="Missing llm or llm_config values to update", color="red")
|
347
|
-
pass
|
348
|
-
|
349
|
-
self.llm = llm
|
350
|
-
if llm_config:
|
351
|
-
if self.llm_config:
|
352
|
-
self.llm_config.update(llm_config)
|
353
|
-
else:
|
354
|
-
self.llm_config = llm_config
|
355
|
-
|
356
|
-
return self.set_up_llm()
|
357
|
-
|
358
|
-
|
359
340
|
def _train(self) -> Self:
|
360
341
|
"""
|
361
342
|
Fine-tuned the base model using OpenAI train framework.
|
@@ -422,6 +403,25 @@ class Agent(BaseModel):
|
|
422
403
|
raise ValueError("Invalid response from LLM call - None or empty.")
|
423
404
|
|
424
405
|
|
406
|
+
def _update_llm(self, llm: Any = None, llm_config: Optional[Dict[str, Any]] = None) -> Self:
|
407
|
+
"""
|
408
|
+
Updates llm and llm_config of the exsiting agent. (Other conditions will remain the same.)
|
409
|
+
"""
|
410
|
+
|
411
|
+
if not llm and not llm_config:
|
412
|
+
Logger(**self._logger_config, filename=self.key).log(level="error", message="Missing llm or llm_config values to update", color="red")
|
413
|
+
pass
|
414
|
+
|
415
|
+
self.llm = llm
|
416
|
+
if llm_config:
|
417
|
+
if self.llm_config:
|
418
|
+
self.llm_config.update(llm_config)
|
419
|
+
else:
|
420
|
+
self.llm_config = llm_config
|
421
|
+
|
422
|
+
return self.set_up_llm()
|
423
|
+
|
424
|
+
|
425
425
|
def update(self, **kwargs) -> Self:
|
426
426
|
"""
|
427
427
|
Update the existing agent. Address variables that require runnning set_up_x methods first, then update remaining variables.
|
@@ -501,13 +501,11 @@ class Agent(BaseModel):
|
|
501
501
|
|
502
502
|
|
503
503
|
def execute_task(self, task, context: Optional[Any] = None, task_tools: Optional[List[Tool | ToolSet]] = list()) -> str:
|
504
|
-
"""
|
505
|
-
Format a task prompt, adding context from knowledge and memory (if given), and invoke LLM.
|
506
|
-
"""
|
504
|
+
"""Handling task execution."""
|
507
505
|
|
508
506
|
from versionhq.task.model import Task
|
509
507
|
from versionhq.tool.rag_tool import RagTool
|
510
|
-
from versionhq.
|
508
|
+
from versionhq._prompt.model import Prompt
|
511
509
|
|
512
510
|
task: InstanceOf[Task] = task
|
513
511
|
all_tools: Optional[List[Tool | ToolSet | RagTool | Type[BaseTool]]] = task_tools + self.tools if task.can_use_agent_tools else task_tools
|
@@ -517,57 +515,7 @@ class Agent(BaseModel):
|
|
517
515
|
if self.max_rpm and self._rpm_controller:
|
518
516
|
self._rpm_controller._reset_request_count()
|
519
517
|
|
520
|
-
user_prompt = task
|
521
|
-
|
522
|
-
if self._knowledge:
|
523
|
-
agent_knowledge = self._knowledge.query(query=[user_prompt,], limit=5)
|
524
|
-
if agent_knowledge:
|
525
|
-
agent_knowledge_context = extract_knowledge_context(knowledge_snippets=agent_knowledge)
|
526
|
-
if agent_knowledge_context:
|
527
|
-
user_prompt += agent_knowledge_context
|
528
|
-
|
529
|
-
if rag_tools:
|
530
|
-
for item in rag_tools:
|
531
|
-
rag_tool_context = item.run(agent=self, query=task.description)
|
532
|
-
if rag_tool_context:
|
533
|
-
user_prompt += ",".join(rag_tool_context) if isinstance(rag_tool_context, list) else str(rag_tool_context)
|
534
|
-
|
535
|
-
if self.with_memory == True:
|
536
|
-
contextual_memory = ContextualMemory(
|
537
|
-
memory_config=self.memory_config, stm=self.short_term_memory, ltm=self.long_term_memory, um=self.user_memory
|
538
|
-
)
|
539
|
-
context_str = task._draft_context_prompt(context=context)
|
540
|
-
query = f"{task.description} {context_str}".strip()
|
541
|
-
memory = contextual_memory.build_context_for_task(query=query)
|
542
|
-
if memory.strip() != "":
|
543
|
-
user_prompt += memory.strip()
|
544
|
-
|
545
|
-
## comment out for now
|
546
|
-
# if self.networks and self.networks._train:
|
547
|
-
# user_prompt = self._training_handler(user_prompt=user_prompt)
|
548
|
-
# else:
|
549
|
-
# user_prompt = self._use_trained_data(user_prompt=user_prompt)
|
550
|
-
|
551
|
-
content_prompt = task._format_content_prompt()
|
552
|
-
|
553
|
-
messages = []
|
554
|
-
if content_prompt:
|
555
|
-
messages.append(
|
556
|
-
{
|
557
|
-
"role": "user",
|
558
|
-
"content": [
|
559
|
-
{
|
560
|
-
"type": "text",
|
561
|
-
"text": user_prompt
|
562
|
-
},
|
563
|
-
content_prompt,
|
564
|
-
]
|
565
|
-
})
|
566
|
-
else:
|
567
|
-
messages.append({ "role": "user", "content": user_prompt })
|
568
|
-
|
569
|
-
if self.use_developer_prompt:
|
570
|
-
messages.append({ "role": "developer", "content": self.backstory })
|
518
|
+
user_prompt, dev_prompt, messages = Prompt(task=task, agent=self, context=context).format_core(rag_tools=rag_tools)
|
571
519
|
|
572
520
|
try:
|
573
521
|
self._times_executed += 1
|
@@ -582,7 +530,7 @@ class Agent(BaseModel):
|
|
582
530
|
except Exception as e:
|
583
531
|
self._times_executed += 1
|
584
532
|
Logger(**self._logger_config, filename=self.key).log(level="error", message=f"The agent failed to execute the task. Error: {str(e)}", color="red")
|
585
|
-
raw_response = self.execute_task(task, context, task_tools)
|
533
|
+
user_prompt, dev_prompt, raw_response = self.execute_task(task, context, task_tools)
|
586
534
|
|
587
535
|
if self._times_executed > self.max_retry_limit:
|
588
536
|
Logger(**self._logger_config, filename=self.key).log(level="error", message=f"Max retry limit has exceeded.", color="red")
|
@@ -591,7 +539,7 @@ class Agent(BaseModel):
|
|
591
539
|
if self.max_rpm and self._rpm_controller:
|
592
540
|
self._rpm_controller.stop_rpm_counter()
|
593
541
|
|
594
|
-
return raw_response
|
542
|
+
return user_prompt, dev_prompt, raw_response
|
595
543
|
|
596
544
|
|
597
545
|
@property
|
@@ -93,7 +93,7 @@ def form_agent_network(
|
|
93
93
|
|
94
94
|
network_tasks = []
|
95
95
|
members = []
|
96
|
-
leader = str(res.pydantic.leader_agent) if res.pydantic else str(res.json_dict["leader_agent"])
|
96
|
+
leader = str(res.pydantic.leader_agent) if res.pydantic and hasattr(res.pydantic, "leader_agent") else str(res.json_dict["leader_agent"]) if "leader_agent" in res.json_dict else None
|
97
97
|
|
98
98
|
agent_roles = res.pydantic.agent_roles if res.pydantic else res.json_dict["agent_roles"]
|
99
99
|
created_agents = [Agent(role=str(item), goal=str(item)) for item in agent_roles]
|
@@ -139,13 +139,13 @@ def form_agent_network(
|
|
139
139
|
|
140
140
|
if len(created_tasks) <= len(created_agents):
|
141
141
|
for i in range(len(created_tasks)):
|
142
|
-
is_manager = bool(created_agents[i].role.lower() == leader.lower())
|
142
|
+
is_manager = False if not leader else bool(created_agents[i].role.lower() == leader.lower())
|
143
143
|
member = Member(agent=created_agents[i], is_manager=is_manager, tasks=[created_tasks[i]])
|
144
144
|
members.append(member)
|
145
145
|
|
146
146
|
for i in range(len(created_tasks), len(created_agents)):
|
147
147
|
try:
|
148
|
-
is_manager = bool(created_agents[i].role.lower() == leader.lower())
|
148
|
+
is_manager = False if not leader else bool(created_agents[i].role.lower() == leader.lower())
|
149
149
|
member_w_o_task = Member(agent=created_agents[i], is_manager=is_manager)
|
150
150
|
members.append(member_w_o_task)
|
151
151
|
except:
|
@@ -153,7 +153,7 @@ def form_agent_network(
|
|
153
153
|
|
154
154
|
elif len(created_tasks) > len(created_agents):
|
155
155
|
for i in range(len(created_agents)):
|
156
|
-
is_manager = bool(created_agents[i].role.lower() == leader.lower())
|
156
|
+
is_manager = False if not leader else bool(created_agents[i].role.lower() == leader.lower())
|
157
157
|
member = Member(agent=created_agents[i], is_manager=is_manager, tasks=[created_tasks[i]])
|
158
158
|
members.append(member)
|
159
159
|
|
@@ -161,7 +161,8 @@ def form_agent_network(
|
|
161
161
|
|
162
162
|
|
163
163
|
if _formation == Formation.SUPERVISING and not [member for member in members if member.is_manager]:
|
164
|
-
|
164
|
+
role = leader if leader else "Leader"
|
165
|
+
manager = Member(agent=Agent(role=role), is_manager=True)
|
165
166
|
members.append(manager)
|
166
167
|
|
167
168
|
members.sort(key=lambda x: x.is_manager == False)
|