versionhq 1.1.6.3__py3-none-any.whl → 1.1.7.0__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
versionhq/team/model.py CHANGED
@@ -56,39 +56,29 @@ class TaskHandlingProcess(str, Enum):
56
56
  """
57
57
  Class representing the different processes that can be used to tackle multiple tasks.
58
58
  """
59
-
60
59
  sequential = "sequential"
61
60
  hierarchical = "hierarchical"
62
61
  consensual = "consensual"
63
62
 
64
63
 
65
64
  class TeamOutput(BaseModel):
66
- """Class that represents the result of a team."""
65
+ """
66
+ Store outputs of the tasks handled by the team.
67
+ `json_dict` and `raw` store overall output of tasks that handled by the team,
68
+ while `task_output_list` stores each TaskOutput instance to the tasks handled by the team members.
69
+ Note that `raw` and `json_dict` will be prioritized as TeamOutput to refer over `task_output_list`.
70
+ """
67
71
 
68
- team_id: UUID4 = Field(
69
- default_factory=uuid.uuid4,
70
- frozen=True,
71
- description="store the team ID that generate the TeamOutput",
72
- )
73
- raw: str = Field(default="", description="raw output")
74
- pydantic: Optional[BaseModel] = Field(default=None, description="pydantic output")
75
- json_dict: Optional[Dict[str, Any]] = Field(
76
- default=None, description="JSON dict output"
77
- )
78
- task_output_list: list[TaskOutput] = Field(
79
- default=list,
80
- description="store output of all the tasks that the team has executed",
81
- )
82
- token_usage: UsageMetrics = Field(
83
- default=dict, description="processed token summary"
84
- )
72
+ team_id: UUID4 = Field(default_factory=uuid.uuid4, frozen=True, description="store the team ID that generate the TeamOutput")
73
+ raw: str = Field(default="", description="raw output of the team lead task handled by the team leader")
74
+ pydantic: Optional[Any] = Field(default=None, description="`raw` converted to the abs. pydantic model")
75
+ json_dict: Union[Dict[str, Any]] = Field(default=None, description="`raw` converted to dictionary")
76
+ task_output_list: list[TaskOutput] = Field(default=list, description="store output of all the tasks that the team has executed")
77
+ token_usage: UsageMetrics = Field(default=dict, description="processed token summary")
85
78
 
86
79
  def __str__(self):
87
- return (
88
- str(self.pydantic)
89
- if self.pydantic
90
- else str(self.json_dict) if self.json_dict else self.raw
91
- )
80
+ return (str(self.pydantic) if self.pydantic else str(self.json_dict) if self.json_dict else self.raw)
81
+
92
82
 
93
83
  def __getitem__(self, key):
94
84
  if self.pydantic and hasattr(self.pydantic, key):
@@ -98,6 +88,7 @@ class TeamOutput(BaseModel):
98
88
  else:
99
89
  raise KeyError(f"Key '{key}' not found in the team output.")
100
90
 
91
+
101
92
  @property
102
93
  def json(self) -> Optional[str]:
103
94
  if self.tasks_output[-1].output_format != TaskOutputFormat.JSON:
@@ -106,6 +97,7 @@ class TeamOutput(BaseModel):
106
97
  )
107
98
  return json.dumps(self.json_dict)
108
99
 
100
+
109
101
  def to_dict(self) -> Dict[str, Any]:
110
102
  """
111
103
  Convert json_output and pydantic_output to a dictionary.
@@ -116,15 +108,11 @@ class TeamOutput(BaseModel):
116
108
  elif self.pydantic:
117
109
  output_dict.update(self.pydantic.model_dump())
118
110
  else:
119
- output_dict.update({"raw", self.raw})
111
+ output_dict.update({"output", self.raw})
120
112
  return output_dict
121
113
 
122
114
  def return_all_task_outputs(self) -> List[Dict[str, Any]]:
123
- res = []
124
- for output in self.task_output_list:
125
- if output is not None:
126
- res.append(output.to_dict())
127
-
115
+ res = [output.json_dict for output in self.task_output_list]
128
116
  return res
129
117
 
130
118
 
@@ -149,71 +137,42 @@ class Team(BaseModel):
149
137
 
150
138
  id: UUID4 = Field(default_factory=uuid.uuid4, frozen=True)
151
139
  name: Optional[str] = Field(default=None)
152
- members: List[TeamMember] = Field(
153
- default_factory=list,
154
- description="store agents' uuids and bool if it is manager",
155
- )
140
+ members: List[TeamMember] = Field(default_factory=list, description="store agents' uuids and bool if it is manager")
156
141
 
157
142
  # work as a team
158
- team_tasks: Optional[List[Task]] = Field(
159
- default_factory=list, description="optional tasks for the team"
160
- )
161
- planning_llm: Optional[Any] = Field(
162
- default=None,
163
- description="llm to handle the planning of the team tasks (if any)",
164
- )
165
- function_calling_llm: Optional[Any] = Field(
166
- default=None,
167
- description="llm to execute func after all agent execution (if any)",
168
- )
169
- prompt_file: str = Field(
170
- default="", description="path to the prompt json file to be used by the team."
171
- )
143
+ team_tasks: Optional[List[Task]] = Field(default_factory=list, description="optional tasks for the team")
144
+ planning_llm: Optional[Any] = Field(default=None, description="llm to handle the planning of the team tasks (if any)")
145
+ function_calling_llm: Optional[Any] = Field(default=None, description="llm to execute func after all agent execution (if any)")
146
+ prompt_file: str = Field(default="", description="path to the prompt json file to be used by the team.")
172
147
  process: TaskHandlingProcess = Field(default=TaskHandlingProcess.sequential)
173
148
 
174
149
  # callbacks
175
- before_kickoff_callbacks: List[
176
- Callable[[Optional[Dict[str, Any]]], Optional[Dict[str, Any]]]
177
- ] = Field(
150
+ before_kickoff_callbacks: List[Callable[[Optional[Dict[str, Any]]], Optional[Dict[str, Any]]]] = Field(
178
151
  default_factory=list,
179
- description="list of callback functions to be executed before the team kickoff. i.e., adjust inputs",
152
+ description="list of callback functions to be executed before the team kickoff. i.e., adjust inputs"
180
153
  )
181
154
  after_kickoff_callbacks: List[Callable[[TeamOutput], TeamOutput]] = Field(
182
155
  default_factory=list,
183
- description="list of callback functions to be executed after the team kickoff. i.e., store the result in repo",
184
- )
185
- task_callback: Optional[Any] = Field(
186
- default=None,
187
- description="callback to be executed after each task for all agents execution",
188
- )
189
- step_callback: Optional[Any] = Field(
190
- default=None,
191
- description="callback to be executed after each step for all agents execution",
156
+ description="list of callback functions to be executed after the team kickoff. i.e., store the result in repo"
192
157
  )
158
+ task_callback: Optional[Any] = Field(default=None, description="callback to be executed after each task for all agents execution")
159
+ step_callback: Optional[Any] = Field(default=None, description="callback to be executed after each step for all agents execution")
193
160
 
194
161
  verbose: bool = Field(default=True)
195
162
  cache: bool = Field(default=True)
196
- memory: bool = Field(
197
- default=False,
198
- description="whether the team should use memory to store memories of its execution",
199
- )
200
- execution_logs: List[Dict[str, Any]] = Field(
201
- default=[], description="list of execution logs for tasks"
202
- )
203
- usage_metrics: Optional[UsageMetrics] = Field(
204
- default=None, description="usage metrics for all the llm executions"
205
- )
163
+ memory: bool = Field(default=False, description="whether the team should use memory to store memories of its execution")
164
+ execution_logs: List[Dict[str, Any]] = Field(default=[], description="list of execution logs for tasks")
165
+ usage_metrics: Optional[UsageMetrics] = Field(default=None, description="usage metrics for all the llm executions")
206
166
 
207
167
  def __name__(self) -> str:
208
- return self.name if self.name is not None else self.id
168
+ return self.name if self.name is not None else self.id.__str__
209
169
 
210
170
  @property
211
171
  def key(self) -> str:
212
- source = [member.agent.key for member in self.members] + [
213
- task.key for task in self.tasks
214
- ]
172
+ source = [str(member.agent.id.__str__) for member in self.members] + [str(task.id.__str__) for task in self.tasks]
215
173
  return md5("|".join(source).encode(), usedforsecurity=False).hexdigest()
216
174
 
175
+
217
176
  @property
218
177
  def manager_agent(self) -> Agent:
219
178
  manager_agent = [
@@ -221,6 +180,7 @@ class Team(BaseModel):
221
180
  ]
222
181
  return manager_agent[0] if len(manager_agent) > 0 else None
223
182
 
183
+
224
184
  @property
225
185
  def manager_task(self) -> Task:
226
186
  """
@@ -230,6 +190,7 @@ class Team(BaseModel):
230
190
  task = [member.task for member in self.members if member.is_manager == True]
231
191
  return task[0] if len(task) > 0 else None
232
192
 
193
+
233
194
  @property
234
195
  def tasks(self):
235
196
  """
@@ -253,9 +214,7 @@ class Team(BaseModel):
253
214
  def _deny_user_set_id(cls, v: Optional[UUID4]) -> None:
254
215
  """Prevent manual setting of the 'id' field by users."""
255
216
  if v:
256
- raise PydanticCustomError(
257
- "may_not_set_field", "The 'id' field cannot be set by the user.", {}
258
- )
217
+ raise PydanticCustomError("may_not_set_field", "The 'id' field cannot be set by the user.", {})
259
218
 
260
219
  # @field_validator("config", mode="before")
261
220
  # @classmethod
@@ -286,6 +245,7 @@ class Team(BaseModel):
286
245
  )
287
246
  return self
288
247
 
248
+
289
249
  @model_validator(mode="after")
290
250
  def validate_tasks(self):
291
251
  """
@@ -381,13 +341,16 @@ class Team(BaseModel):
381
341
  return skipped_task_output
382
342
  return None
383
343
 
384
- def _create_team_output(self, task_outputs: List[TaskOutput]) -> TeamOutput:
385
- if len(task_outputs) != 1:
386
- raise ValueError(
387
- "Something went wrong. Kickoff should return only one task output."
388
- )
389
344
 
390
- final_task_output = task_outputs[0]
345
+ def _create_team_output(self, task_outputs: List[TaskOutput], lead_task_output: TaskOutput = None) -> TeamOutput:
346
+ """
347
+ Take the output of the first task or the lead task output as the team output `raw` value.
348
+ Note that `tasks` are already sorted by the importance.
349
+ """
350
+ if len(task_outputs) < 1:
351
+ raise ValueError("Something went wrong. Kickoff should return only one task output.")
352
+
353
+ final_task_output = lead_task_output if lead_task_output is not None else task_outputs[0]
391
354
  # final_string_output = final_task_output.raw
392
355
  # self._finish_execution(final_string_output)
393
356
  token_usage = self._calculate_usage_metrics()
@@ -395,12 +358,13 @@ class Team(BaseModel):
395
358
  return TeamOutput(
396
359
  team_id=self.id,
397
360
  raw=final_task_output.raw,
398
- pydantic=final_task_output.pydantic,
399
361
  json_dict=final_task_output.json_dict,
400
- task_output_list=[task.output for task in self.tasks if task.output],
362
+ pydantic=final_task_output.pydantic,
363
+ task_output_list=task_outputs,
401
364
  token_usage=token_usage,
402
365
  )
403
366
 
367
+
404
368
  def _calculate_usage_metrics(self) -> UsageMetrics:
405
369
  """
406
370
  Calculate and return the usage metrics that consumed by the team.
@@ -420,12 +384,8 @@ class Team(BaseModel):
420
384
  self.usage_metrics = total_usage_metrics
421
385
  return total_usage_metrics
422
386
 
423
- def _execute_tasks(
424
- self,
425
- tasks: List[Task],
426
- start_index: Optional[int] = 0,
427
- was_replayed: bool = False,
428
- ) -> TeamOutput:
387
+
388
+ def _execute_tasks(self, tasks: List[Task], start_index: Optional[int] = 0, was_replayed: bool = False) -> TeamOutput:
429
389
  """
430
390
  Executes tasks sequentially and returns the final output in TeamOutput class.
431
391
  When we have a manager agent, we will start from executing manager agent's tasks.
@@ -434,6 +394,7 @@ class Team(BaseModel):
434
394
  """
435
395
 
436
396
  task_outputs: List[TaskOutput] = []
397
+ lead_task_output: TaskOutput = None
437
398
  futures: List[Tuple[Task, Future[TaskOutput], int]] = []
438
399
  last_sync_output: Optional[TaskOutput] = None
439
400
 
@@ -449,77 +410,44 @@ class Team(BaseModel):
449
410
 
450
411
  responsible_agent = self._get_responsible_agent(task)
451
412
  if responsible_agent is None:
452
- responsible_agent = self.members[
453
- 0
454
- ].agent #! REFINEME - select a suitable agent for the task
413
+ responsible_agent = self.manager_agent if self.manager_agent else self.members[0].agent
455
414
 
456
415
  # self._prepare_agent_tools(task)
457
416
  # self._log_task_start(task, responsible_agent)
458
417
 
459
418
  if isinstance(task, ConditionalTask):
460
- skipped_task_output = self._handle_conditional_task(
461
- task, task_outputs, futures, task_index, was_replayed
462
- )
419
+ skipped_task_output = self._handle_conditional_task(task, task_outputs, futures, task_index, was_replayed)
463
420
  if skipped_task_output:
464
421
  continue
465
422
 
466
423
  if task.async_execution:
467
- context = create_raw_outputs(
468
- tasks=[
469
- task,
470
- ],
471
- task_outputs=(
472
- [
473
- last_sync_output,
474
- ]
475
- if last_sync_output
476
- else []
477
- ),
478
- )
479
- future = task.execute_async(
480
- agent=responsible_agent,
481
- context=context,
482
- # tools=responsible_agent.tools,
483
- )
424
+ context = create_raw_outputs(tasks=[task, ],task_outputs=([last_sync_output,] if last_sync_output else []))
425
+ future = task.execute_async(agent=responsible_agent, context=context,
426
+ # tools=responsible_agent.tools
427
+ )
484
428
  futures.append((task, future, task_index))
485
429
  else:
486
430
  if futures:
487
431
  task_outputs = self._process_async_tasks(futures, was_replayed)
488
432
  futures.clear()
489
433
 
490
- context = create_raw_outputs(
491
- tasks=[
492
- task,
493
- ],
494
- task_outputs=(
495
- [
496
- last_sync_output,
497
- ]
498
- if last_sync_output
499
- else []
500
- ),
501
- )
502
- task_output = task.execute_sync(
503
- agent=responsible_agent,
504
- context=context,
505
- # tools=responsible_agent.tools,
506
- )
507
- task_outputs = [
508
- task_output,
509
- ]
434
+ context = create_raw_outputs(tasks=[task,], task_outputs=([ last_sync_output,] if last_sync_output else [] ))
435
+ task_output = task.execute_sync(agent=responsible_agent, context=context,
436
+ # tools=responsible_agent.tools
437
+ )
438
+ if responsible_agent is self.manager_agent:
439
+ lead_task_output = task_output
440
+
441
+ task_outputs.append(task_output)
510
442
  # self._process_task_result(task, task_output)
511
443
  # self._store_execution_log(task, task_output, task_index, was_replayed)
512
444
 
513
445
  # if futures:
514
446
  # task_outputs = self._process_async_tasks(futures, was_replayed)
447
+ return self._create_team_output(task_outputs, lead_task_output)
515
448
 
516
- return self._create_team_output(task_outputs)
517
449
 
518
- def kickoff(
519
- self,
520
- kwargs_before: Optional[Dict[str, str]] = None,
521
- kwargs_after: Optional[Dict[str, Any]] = None,
522
- ) -> TeamOutput:
450
+ def kickoff(self, kwargs_before: Optional[Dict[str, str]] = None, kwargs_after: Optional[Dict[str, Any]] = None) -> TeamOutput:
523
451
  """
524
452
  Kickoff the team:
525
453
  0. Plan the team action if we have `team_tasks` using `planning_llm`.
@@ -553,7 +481,6 @@ class Team(BaseModel):
553
481
 
554
482
  for member in self.members:
555
483
  agent = member.agent
556
- # agent.i18n = i18n
557
484
  agent.team = self
558
485
 
559
486
  # add the team's common callbacks to each agent.
@@ -574,9 +501,7 @@ class Team(BaseModel):
574
501
  for after_callback in self.after_kickoff_callbacks:
575
502
  result = after_callback(result, **kwargs_after)
576
503
 
577
- metrics += [
578
- member.agent._token_process.get_summary() for member in self.members
579
- ]
504
+ metrics += [member.agent._token_process.get_summary() for member in self.members]
580
505
 
581
506
  self.usage_metrics = UsageMetrics()
582
507
  for metric in metrics:
@@ -36,13 +36,13 @@ class TeamPlanner:
36
36
  task_to_handle = Task(
37
37
  description=f"""
38
38
  Based on the following task summaries, create the most descriptive plan that the team can execute most efficiently. Take all the task summaries - task's description and tools available - into consideration. Your answer only contains a dictionary.
39
-
39
+
40
40
  Task summaries: {" ".join(task_summary_list)}
41
41
  """,
42
42
  expected_output_json=False,
43
43
  expected_output_pydantic=True,
44
44
  output_field_list=[
45
- ResponseField(title=f"{task.id}", type="str", required=True)
45
+ ResponseField(title=f"{task.id}", type=str, required=True)
46
46
  for task in self.tasks
47
47
  ],
48
48
  )
versionhq/tool/model.py CHANGED
@@ -35,14 +35,12 @@ class Tool(ABC, BaseModel):
35
35
  @property
36
36
  def description(self):
37
37
  args_schema = {
38
- name: {
39
- "description": field.description,
40
- "type": Tool._get_arg_annotations(field.annotation),
41
- }
38
+ name: { "description": field.description, "type": Tool._get_arg_annotations(field.annotation) }
42
39
  for name, field in self.args_schema.model_fields.items()
43
40
  }
44
41
  return f"Tool Name: {self.name}\nTool Arguments: {args_schema}\nTool Description: {self.description}"
45
42
 
43
+
46
44
  @field_validator("args_schema", mode="before")
47
45
  @classmethod
48
46
  def _default_args_schema(cls, v: Type[BaseModel]) -> Type[BaseModel]:
@@ -188,21 +186,13 @@ class ToolCalled(BaseModel):
188
186
  Store the tool called and any kwargs used.
189
187
  """
190
188
 
191
- tool: InstanceOf[Tool] = Field(
192
- ..., description="store the tool instance to be called."
193
- )
194
- arguments: Optional[Dict[str, Any]] = Field(
195
- ..., description="kwargs passed to the tool"
196
- )
189
+ tool: InstanceOf[Tool] = Field(..., description="store the tool instance to be called.")
190
+ arguments: Optional[Dict[str, Any]] = Field(..., description="kwargs passed to the tool")
197
191
 
198
192
 
199
193
  class InstructorToolCalled(BaseModel):
200
- tool: InstanceOf[Tool] = Field(
201
- ..., description="store the tool instance to be called."
202
- )
203
- arguments: Optional[Dict[str, Any]] = Field(
204
- ..., description="kwargs passed to the tool"
205
- )
194
+ tool: InstanceOf[Tool] = Field(..., description="store the tool instance to be called.")
195
+ arguments: Optional[Dict[str, Any]] = Field(..., description="kwargs passed to the tool")
206
196
 
207
197
 
208
198
  class CacheTool(BaseModel):
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.1
2
2
  Name: versionhq
3
- Version: 1.1.6.3
3
+ Version: 1.1.7.0
4
4
  Summary: LLM orchestration frameworks for model-agnostic AI agents that handle complex outbound workflows
5
5
  Author-email: Kuriko Iwai <kuriko@versi0n.io>
6
6
  License: MIT License
@@ -52,7 +52,8 @@ Requires-Dist: wheel>=0.45.1
52
52
 
53
53
  # Overview
54
54
 
55
- ![python ver](https://img.shields.io/badge/Python-3.13.1-blue) ![pyenv ver](https://img.shields.io/badge/pyenv-2.4.23-orange)
55
+ ![MIT license](https://img.shields.io/badge/License-MIT-green) ![PyPi](https://img.shields.io/badge/pypi-v1.1.6.3-blue)
56
+ ![python ver](https://img.shields.io/badge/Python-3.12/3.13-purple) ![pyenv ver](https://img.shields.io/badge/pyenv-2.4.23-orange)
56
57
 
57
58
 
58
59
  An LLM orchestration frameworks for multi-agent systems with RAG to autopilot outbound workflows.
@@ -64,10 +65,10 @@ Messaging workflows are created at individual level, and will be deployed on thi
64
65
 
65
66
  **Visit:**
66
67
 
67
- - Marketing: [Landing page](https://home.versi0n.io)
68
- - Client app: [Production](https://versi0n.io/)
69
- - Backend: [Orchestration + multi-agent RAG system](https://github.com/versionHQ/multi-agent-system)
70
- - Test CI: [Test client app (React)](https://github.com/versionHQ/test-client-app)
68
+ - [Landing page](https://home.versi0n.io)
69
+ - [Client app](https://versi0n.io/)
70
+ - [Orchestration frameworks](https://github.com/versionHQ/multi-agent-system)
71
+ - [Test client app](https://github.com/versionHQ/test-client-app)
71
72
 
72
73
 
73
74
  ## Mindmap
@@ -75,7 +76,7 @@ Messaging workflows are created at individual level, and will be deployed on thi
75
76
  LLM-powered `agent`s and `team`s use `tool`s and their own knowledge to complete the `task` given by the client or the system.
76
77
 
77
78
  <p align="center">
78
- <img src="https://res.cloudinary.com/dfeirxlea/image/upload/v1733556715/pj_m_home/urwte15at3h0dr8mdlyo.png" alt="mindmap" width="1000">
79
+ <img src="https://res.cloudinary.com/dfeirxlea/image/upload/v1733556715/pj_m_home/urwte15at3h0dr8mdlyo.png" alt="mindmap" width="400">
79
80
  </p>
80
81
 
81
82
  <hr />
@@ -89,8 +90,7 @@ LLM-powered `agent`s and `team`s use `tool`s and their own knowledge to complete
89
90
  - [Project Structure](#project-structure)
90
91
  - [Setup](#setup)
91
92
  - [Usage](#usage)
92
- - [Installing as a Package Module (Alpha)](#installing-as-a-package-module-alpha)
93
- - [Contributing & Customizing](#contributing--customizing)
93
+ - [Contributing](#contributing)
94
94
  - [Customizing AI Agents](#customizing-ai-agents)
95
95
  - [Modifying RAG Functionality](#modifying-rag-functionality)
96
96
  - [Package Management with uv](#package-management-with-uv)
@@ -111,7 +111,7 @@ The `agent` is model agnostic. The default model is set Chat GTP 4o. We ask the
111
111
  Multiple `agents` can form a `team` to complete complex tasks together.
112
112
 
113
113
  **1. Analysis**
114
- - Professional `agents` handle the analysis `tasks` on each client, customer, and product.
114
+ - Professional `agents` handle the analysis `tasks` on each client, customer, and product.
115
115
 
116
116
  **2. Messaging Workflow Creation**
117
117
  - Several `teams` receive the analysis and design initial messaging workflow with several layers.
@@ -122,6 +122,8 @@ Multiple `agents` can form a `team` to complete complex tasks together.
122
122
  - Responsible `agents` or `teams` autopilot executing and refining the messaging workflow.
123
123
 
124
124
 
125
+ <hr />
126
+
125
127
  ## Technologies Used
126
128
  **Schema, Database, Data Validation**
127
129
  - [Pydantic](https://docs.pydantic.dev/latest/): Data validation and serialization library for Python
@@ -164,14 +166,16 @@ src/
164
166
  │ └── ...
165
167
 
166
168
  └──tests/
167
- └── cli/
169
+ └── cli/
168
170
  └── team/
169
171
  └── ...
170
-
172
+
171
173
  └── uploads/ # Uploaded files for the project
172
174
 
173
175
  ```
174
176
 
177
+ <hr />
178
+
175
179
  ## Setup
176
180
 
177
181
  1. Install the `uv` package manager:
@@ -204,53 +208,67 @@ src/
204
208
  COMPOSIO_CLI_KEY=your-composio-cli-key
205
209
  ```
206
210
 
207
- ## Usage
211
+ <hr />
208
212
 
209
- 1. Add features.
213
+ ## Usage
210
214
 
211
- 2. Test the features using the `tests` directory.
215
+ 1. Install `versionhq` package:
216
+ ```
217
+ uv pip install versionhq
218
+ ```
212
219
 
213
- - Add a file to the `tests` directory.
214
- - Run a test.
220
+ 2. You can use the `versionhq` module in your Python app.
215
221
  ```
216
- uv run <your file name>
222
+ from versionhq.agent.model import Agent
223
+ agent = Agent(llm="your-llm", ...)
217
224
  ```
218
- * All the `.py` files' names in the `tests` have to be ended with `_test.py`.
219
225
 
226
+ For more details:
220
227
 
221
- 3. Run a React demo app: [React demo app](https://github.com/versionHQ/test-client-app) to check it on the client endpoint.
222
- ```
223
- npm i
224
- npm start
225
- ```
226
- The frontend will be available at `http://localhost:3000`.
228
+ [PyPi package](https://pypi.org/project/versionhq/)
227
229
 
228
- 4. `production` is available at `https://versi0n.io`. Currently, we are running beta.
230
+ <hr />
229
231
 
232
+ ## Contributing
230
233
 
231
- ## Installing as a Package Module (Alpha)
234
+ 1. Fork the repository
232
235
 
233
- 1. Open another terminal, set your repository as root, and run
234
- ```
235
- uv pip install git+https://github.com/versionHQ/multi-agent-system.git#egg=versionhq
236
- ```
236
+ 2. Create your feature branch (`git checkout -b feature/your-amazing-feature`)
237
237
 
238
- 2. You can use the `versionhq` module in your Python app.
239
- ```
240
- from versionhq.agent.model import Agent
241
- agent = Agent(llm="your-llm"...)
242
- ```
238
+ 3. Create amazing features
243
239
 
244
- ## Contributing & Customizing
240
+ 4. Test the features using the `tests` directory.
245
241
 
246
- 1. Fork the repository
247
- 2. Create your feature branch (`git checkout -b feature/your-amazing-feature`)
248
- 3. Pull the latest version of source code from the main branch (`git pull origin main`) *Address conflicts if any.
249
- 4. Commit your changes (`git add .` / `git commit -m 'Add your-amazing-feature'`)
250
- 5. Push to the branch (`git push origin feature/your-amazing-feature`)
251
- 6. Open a pull request
242
+ - Add a test function to respective components in the `tests` directory.
243
+ - Add your `LITELLM_API_KEY` and `OPENAI_API_KEY` to the Github `repository secrets` @ settings > secrets & variables > Actions.
244
+ - Run a test.
245
+ ```
246
+ uv run pytest tests -vv
247
+ ```
248
+
249
+ **pytest**
250
+
251
+ * When adding a new file to `tests`, name the file ended with `_test.py`.
252
+ * When adding a new feature to the file, name the feature started with `test_`.
253
+
254
+ 5. Pull the latest version of source code from the main branch (`git pull origin main`) *Address conflicts if any.
255
+ 6. Commit your changes (`git add .` / `git commit -m 'Add your-amazing-feature'`)
256
+ 7. Push to the branch (`git push origin feature/your-amazing-feature`)
257
+ 8. Open a pull request
258
+
259
+
260
+ **Optional**
261
+ * Flag with `#! REFINEME` for any improvements needed and `#! FIXME` for any errors.
262
+
263
+ * Run a React demo app: [React demo app](https://github.com/versionHQ/test-client-app) to check it on the client endpoint.
264
+ ```
265
+ npm i
266
+ npm start
267
+ ```
268
+ The frontend will be available at `http://localhost:3000`.
269
+
270
+ * `production` is available at `https://versi0n.io`. Currently, we are running alpha test.
252
271
 
253
- 0. Flag with `#! REFINEME` for any improvements and `#! FIXME` for any errors.
254
272
 
255
273
 
256
274
  ### Customizing AI Agents
@@ -299,16 +317,20 @@ Pre-commit hooks help maintain code quality by running checks for formatting, li
299
317
  git commit --no-verify -m "your-commit-message"
300
318
  ```
301
319
 
320
+ <hr />
321
+
302
322
  ## Trouble Shooting
303
323
 
304
324
  Common issues and solutions:
305
325
  - API key errors: Ensure all API keys in the `.env` file are correct and up to date. Make sure to add `load_dotenv()` on the top of the python file to apply the latest environment values.
306
326
  - Database connection issues: Check if the Chroma DB is properly initialized and accessible.
307
327
  - Memory errors: If processing large contracts, you may need to increase the available memory for the Python process.
308
- - Issues related to dependencies:`rm -rf .venv uv.lock`, `uv cache clean` and run `uv run pip install -r requirements.txt -v`.
328
+ - Issues related to dependencies: `rm -rf uv.lock`, `uv cache clean`, `uv venv`, and run `uv pip install -r requirements.txt -v`.
309
329
  - Issues related to the AI agents or RAG system: Check the `output.log` file for detailed error messages and stack traces.
310
330
  - Issues related to `Python quit unexpectedly`: Check [this stackoverflow article](https://stackoverflow.com/questions/59888499/macos-catalina-python-quit-unexpectedly-error).
331
+ - `reportMissingImports` error from pyright after installing the package: This might occur when installing new libraries while VSCode is running. Open the command pallete (ctrl + shift + p) and run the Python: Restart language server task.
311
332
 
333
+ <hr />
312
334
 
313
335
  ## Frequently Asked Questions (FAQ)
314
336
  **Q. Where can I see if the agent is working?**