versionhq 1.2.4.1__py3-none-any.whl → 1.2.4.3__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
versionhq/agent/model.py CHANGED
@@ -10,9 +10,8 @@ from pydantic_core import PydanticCustomError
10
10
  from versionhq.agent.rpm_controller import RPMController
11
11
  from versionhq.tool.model import Tool, ToolSet, BaseTool
12
12
  from versionhq.knowledge.model import BaseKnowledgeSource, Knowledge
13
- from versionhq.memory.contextual_memory import ContextualMemory
14
13
  from versionhq.memory.model import ShortTermMemory, LongTermMemory, UserMemory
15
- from versionhq._utils import Logger, process_config, is_valid_url
14
+ from versionhq._utils import Logger, process_config, is_valid_url, ErrorType
16
15
 
17
16
 
18
17
  load_dotenv(override=True)
@@ -32,6 +31,7 @@ class Agent(BaseModel):
32
31
  _logger_config: Dict[str, Any] = PrivateAttr(default=dict(verbose=True, info_file_save=True))
33
32
 
34
33
  api_key: Optional[str] = Field(default=None)
34
+ self_learn: bool = Field(default=False)
35
35
  config: Optional[Dict[str, Any]] = Field(default=None, exclude=True, description="values to add to the Agent class")
36
36
 
37
37
  id: UUID4 = Field(default_factory=uuid.uuid4, frozen=True)
@@ -337,25 +337,6 @@ class Agent(BaseModel):
337
337
  return llm
338
338
 
339
339
 
340
- def _update_llm(self, llm: Any = None, llm_config: Optional[Dict[str, Any]] = None) -> Self:
341
- """
342
- Update llm and llm_config of the exsiting agent. (Other conditions will remain the same.)
343
- """
344
-
345
- if not llm and not llm_config:
346
- Logger(**self._logger_config, filename=self.key).log(level="error", message="Missing llm or llm_config values to update", color="red")
347
- pass
348
-
349
- self.llm = llm
350
- if llm_config:
351
- if self.llm_config:
352
- self.llm_config.update(llm_config)
353
- else:
354
- self.llm_config = llm_config
355
-
356
- return self.set_up_llm()
357
-
358
-
359
340
  def _train(self) -> Self:
360
341
  """
361
342
  Fine-tuned the base model using OpenAI train framework.
@@ -392,16 +373,17 @@ class Agent(BaseModel):
392
373
 
393
374
  if tool_res_as_final:
394
375
  raw_response = self.func_calling_llm.call(messages=messages, tools=tools, tool_res_as_final=True)
395
- task._tokens = self.func_calling_llm._tokens
376
+ task._usage.record_token_usage(token_usages=self.func_calling_llm._usages)
396
377
  else:
397
378
  raw_response = self.llm.call(messages=messages, response_format=response_format, tools=tools)
398
- task._tokens = self.llm._tokens
379
+ task._usage.record_token_usage(token_usages=self.llm._usages)
399
380
 
400
381
  task_execution_counter += 1
401
382
  Logger(**self._logger_config, filename=self.key).log(level="info", message=f"Agent response: {raw_response}", color="green")
402
383
  return raw_response
403
384
 
404
385
  except Exception as e:
386
+ task._usage.record_errors(type=ErrorType.API)
405
387
  Logger(**self._logger_config, filename=self.key).log(level="error", message=f"An error occured. The agent will retry: {str(e)}", color="red")
406
388
 
407
389
  while not raw_response and task_execution_counter <= self.max_retry_limit:
@@ -422,6 +404,25 @@ class Agent(BaseModel):
422
404
  raise ValueError("Invalid response from LLM call - None or empty.")
423
405
 
424
406
 
407
+ def _update_llm(self, llm: Any = None, llm_config: Optional[Dict[str, Any]] = None) -> Self:
408
+ """
409
+ Updates llm and llm_config of the exsiting agent. (Other conditions will remain the same.)
410
+ """
411
+
412
+ if not llm and not llm_config:
413
+ Logger(**self._logger_config, filename=self.key).log(level="error", message="Missing llm or llm_config values to update", color="red")
414
+ pass
415
+
416
+ self.llm = llm
417
+ if llm_config:
418
+ if self.llm_config:
419
+ self.llm_config.update(llm_config)
420
+ else:
421
+ self.llm_config = llm_config
422
+
423
+ return self.set_up_llm()
424
+
425
+
425
426
  def update(self, **kwargs) -> Self:
426
427
  """
427
428
  Update the existing agent. Address variables that require runnning set_up_x methods first, then update remaining variables.
@@ -501,13 +502,11 @@ class Agent(BaseModel):
501
502
 
502
503
 
503
504
  def execute_task(self, task, context: Optional[Any] = None, task_tools: Optional[List[Tool | ToolSet]] = list()) -> str:
504
- """
505
- Format a task prompt, adding context from knowledge and memory (if given), and invoke LLM.
506
- """
505
+ """Handling task execution."""
507
506
 
508
507
  from versionhq.task.model import Task
509
508
  from versionhq.tool.rag_tool import RagTool
510
- from versionhq.knowledge._utils import extract_knowledge_context
509
+ from versionhq._prompt.model import Prompt
511
510
 
512
511
  task: InstanceOf[Task] = task
513
512
  all_tools: Optional[List[Tool | ToolSet | RagTool | Type[BaseTool]]] = task_tools + self.tools if task.can_use_agent_tools else task_tools
@@ -517,57 +516,7 @@ class Agent(BaseModel):
517
516
  if self.max_rpm and self._rpm_controller:
518
517
  self._rpm_controller._reset_request_count()
519
518
 
520
- user_prompt = task._user_prompt(model_provider=self.llm.provider, context=context)
521
-
522
- if self._knowledge:
523
- agent_knowledge = self._knowledge.query(query=[user_prompt,], limit=5)
524
- if agent_knowledge:
525
- agent_knowledge_context = extract_knowledge_context(knowledge_snippets=agent_knowledge)
526
- if agent_knowledge_context:
527
- user_prompt += agent_knowledge_context
528
-
529
- if rag_tools:
530
- for item in rag_tools:
531
- rag_tool_context = item.run(agent=self, query=task.description)
532
- if rag_tool_context:
533
- user_prompt += ",".join(rag_tool_context) if isinstance(rag_tool_context, list) else str(rag_tool_context)
534
-
535
- if self.with_memory == True:
536
- contextual_memory = ContextualMemory(
537
- memory_config=self.memory_config, stm=self.short_term_memory, ltm=self.long_term_memory, um=self.user_memory
538
- )
539
- context_str = task._draft_context_prompt(context=context)
540
- query = f"{task.description} {context_str}".strip()
541
- memory = contextual_memory.build_context_for_task(query=query)
542
- if memory.strip() != "":
543
- user_prompt += memory.strip()
544
-
545
- ## comment out for now
546
- # if self.networks and self.networks._train:
547
- # user_prompt = self._training_handler(user_prompt=user_prompt)
548
- # else:
549
- # user_prompt = self._use_trained_data(user_prompt=user_prompt)
550
-
551
- content_prompt = task._format_content_prompt()
552
-
553
- messages = []
554
- if content_prompt:
555
- messages.append(
556
- {
557
- "role": "user",
558
- "content": [
559
- {
560
- "type": "text",
561
- "text": user_prompt
562
- },
563
- content_prompt,
564
- ]
565
- })
566
- else:
567
- messages.append({ "role": "user", "content": user_prompt })
568
-
569
- if self.use_developer_prompt:
570
- messages.append({ "role": "developer", "content": self.backstory })
519
+ user_prompt, dev_prompt, messages = Prompt(task=task, agent=self, context=context).format_core(rag_tools=rag_tools)
571
520
 
572
521
  try:
573
522
  self._times_executed += 1
@@ -578,11 +527,13 @@ class Agent(BaseModel):
578
527
  tool_res_as_final=task.tool_res_as_final,
579
528
  task=task
580
529
  )
530
+ if raw_response:
531
+ task._usage.successful_requests += 1
581
532
 
582
533
  except Exception as e:
583
534
  self._times_executed += 1
584
535
  Logger(**self._logger_config, filename=self.key).log(level="error", message=f"The agent failed to execute the task. Error: {str(e)}", color="red")
585
- raw_response = self.execute_task(task, context, task_tools)
536
+ user_prompt, dev_prompt, raw_response = self.execute_task(task, context, task_tools)
586
537
 
587
538
  if self._times_executed > self.max_retry_limit:
588
539
  Logger(**self._logger_config, filename=self.key).log(level="error", message=f"Max retry limit has exceeded.", color="red")
@@ -591,7 +542,7 @@ class Agent(BaseModel):
591
542
  if self.max_rpm and self._rpm_controller:
592
543
  self._rpm_controller.stop_rpm_counter()
593
544
 
594
- return raw_response
545
+ return user_prompt, dev_prompt, raw_response
595
546
 
596
547
 
597
548
  @property
@@ -93,10 +93,11 @@ def form_agent_network(
93
93
 
94
94
  network_tasks = []
95
95
  members = []
96
- leader = str(res.pydantic.leader_agent) if res.pydantic else str(res.json_dict["leader_agent"])
97
-
98
- agent_roles = res.pydantic.agent_roles if res.pydantic else res.json_dict["agent_roles"]
99
- created_agents = [Agent(role=str(item), goal=str(item)) for item in agent_roles]
96
+ leader = res._fetch_value_of(key="leader_agent")
97
+ agent_roles = res._fetch_value_of(key="agent_roles")
98
+ created_agents = [Agent(role=str(item), goal=str(item)) for item in agent_roles] if agent_roles else []
99
+ task_descriptions = res._fetch_value_of(key="task_descriptions")
100
+ task_outcomes = res._fetch_value_of(key="task_outcomes")
100
101
 
101
102
  if agents:
102
103
  for i, agent in enumerate(created_agents):
@@ -108,9 +109,9 @@ def form_agent_network(
108
109
 
109
110
  created_tasks = []
110
111
 
111
- if res.pydantic:
112
- for i, item in enumerate(res.pydantic.task_outcomes):
113
- if len(res.pydantic.task_descriptions) > i and res.pydantic.task_descriptions[i]:
112
+ if task_outcomes:
113
+ for i, item in enumerate(task_outcomes):
114
+ if len(task_descriptions) > i and task_descriptions[i]:
114
115
  fields = {}
115
116
  for ob in item:
116
117
  try:
@@ -119,33 +120,18 @@ def form_agent_network(
119
120
  except:
120
121
  pass
121
122
  output = create_model("Output", **fields) if fields else None
122
- _task = Task(description=res.pydantic.task_descriptions[i], pydantic_output=output)
123
+ _task = Task(description=task_descriptions[i], pydantic_output=output)
123
124
  created_tasks.append(_task)
124
125
 
125
- elif res.json_dict:
126
- for i, item in enumerate(res["task_outcomes"]):
127
- if len(res["task_descriptions"]) > i and res["task_descriptions"][i]:
128
- fields = {}
129
- for ob in item:
130
- try:
131
- field_name = str(ob).lower().split(":")[0].replace(" ", "_")[0: 16]
132
- fields[field_name] = (str, Field(default=None))
133
- except:
134
- pass
135
- output = create_model("Output", **fields) if fields else None
136
- _task = Task(description=res["task_descriptions"][i], pydantic_output=output)
137
- created_tasks.append(_task)
138
-
139
-
140
126
  if len(created_tasks) <= len(created_agents):
141
127
  for i in range(len(created_tasks)):
142
- is_manager = bool(created_agents[i].role.lower() == leader.lower())
128
+ is_manager = False if not leader else bool(created_agents[i].role.lower() == leader.lower())
143
129
  member = Member(agent=created_agents[i], is_manager=is_manager, tasks=[created_tasks[i]])
144
130
  members.append(member)
145
131
 
146
132
  for i in range(len(created_tasks), len(created_agents)):
147
133
  try:
148
- is_manager = bool(created_agents[i].role.lower() == leader.lower())
134
+ is_manager = False if not leader else bool(created_agents[i].role.lower() == leader.lower())
149
135
  member_w_o_task = Member(agent=created_agents[i], is_manager=is_manager)
150
136
  members.append(member_w_o_task)
151
137
  except:
@@ -153,15 +139,15 @@ def form_agent_network(
153
139
 
154
140
  elif len(created_tasks) > len(created_agents):
155
141
  for i in range(len(created_agents)):
156
- is_manager = bool(created_agents[i].role.lower() == leader.lower())
142
+ is_manager = False if not leader else bool(created_agents[i].role.lower() == leader.lower())
157
143
  member = Member(agent=created_agents[i], is_manager=is_manager, tasks=[created_tasks[i]])
158
144
  members.append(member)
159
145
 
160
146
  network_tasks.extend(created_tasks[len(created_agents):len(created_tasks)])
161
147
 
162
-
163
148
  if _formation == Formation.SUPERVISING and not [member for member in members if member.is_manager]:
164
- manager = Member(agent=Agent(role=leader, goal=leader), is_manager=True)
149
+ role = leader if leader else "Leader"
150
+ manager = Member(agent=Agent(role=role), is_manager=True)
165
151
  members.append(manager)
166
152
 
167
153
  members.sort(key=lambda x: x.is_manager == False)
@@ -90,7 +90,6 @@ class AgentNetwork(BaseModel):
90
90
 
91
91
  cache: bool = Field(default=True)
92
92
  execution_logs: List[Dict[str, Any]] = Field(default_factory=list, description="list of execution logs of the tasks handled by members")
93
- # usage_metrics: Optional[UsageMetrics] = Field(default=None, description="usage metrics for all the llm executions")
94
93
 
95
94
 
96
95
  def __name__(self) -> str:
versionhq/llm/model.py CHANGED
@@ -69,7 +69,7 @@ class LLM(BaseModel):
69
69
 
70
70
  _logger: Logger = PrivateAttr(default_factory=lambda: Logger(verbose=True))
71
71
  _init_model_name: str = PrivateAttr(default=None)
72
- _tokens: int = PrivateAttr(default=0) # aggregate number of tokens consumed
72
+ _usages: list[Dict[str, int]] = PrivateAttr(default_factory=list)
73
73
 
74
74
  model: str = Field(default=None)
75
75
  provider: Optional[str] = Field(default=None, description="model provider")
@@ -181,8 +181,6 @@ class LLM(BaseModel):
181
181
  """
182
182
  litellm.drop_params = True
183
183
 
184
- self._tokens = 0
185
-
186
184
  if self.callbacks:
187
185
  self._set_callbacks(self.callbacks)
188
186
 
@@ -319,7 +317,7 @@ class LLM(BaseModel):
319
317
  if not tools:
320
318
  params = self._create_valid_params(config=config)
321
319
  res = litellm.completion(model=self.model, messages=messages, stream=False, **params, **cred)
322
- self._tokens += int(res["usage"]["total_tokens"])
320
+ self._usages.append(res["usage"])
323
321
  return res["choices"][0]["message"]["content"]
324
322
 
325
323
  else:
@@ -384,12 +382,11 @@ class LLM(BaseModel):
384
382
  else:
385
383
  pass
386
384
 
387
-
388
385
  if tool_res_as_final:
389
386
  return tool_res
390
387
  else:
391
388
  res = litellm.completion(model=self.model, messages=messages, **params, **cred)
392
- self._tokens += int(res["usage"]["total_tokens"])
389
+ self._usages.append(res["usage"])
393
390
  return res.choices[0].message.content
394
391
 
395
392
 
@@ -147,8 +147,8 @@ class TaskOutputStorageHandler:
147
147
  description=str(task.description),
148
148
  raw=str(task.output.raw),
149
149
  responsible_agents=str(task.processed_agents),
150
- tokens=task.output._tokens,
151
- latency=task.output.latency,
150
+ tokens=task._usage.total_tokens,
151
+ latency=task._usage.latency,
152
152
  score=task.output.aggregate_score if task.output.aggregate_score else "None",
153
153
  )
154
154
  self.storage.add(task=task, output=output_to_store, inputs=inputs)