versionhq 1.1.7.0__py3-none-any.whl → 1.1.7.1__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- versionhq/__init__.py +1 -1
- versionhq/agent/TEMPLATES/Backstory.py +0 -4
- versionhq/agent/model.py +2 -2
- versionhq/task/model.py +93 -27
- versionhq/team/model.py +9 -9
- {versionhq-1.1.7.0.dist-info → versionhq-1.1.7.1.dist-info}/METADATA +51 -37
- {versionhq-1.1.7.0.dist-info → versionhq-1.1.7.1.dist-info}/RECORD +10 -10
- {versionhq-1.1.7.0.dist-info → versionhq-1.1.7.1.dist-info}/LICENSE +0 -0
- {versionhq-1.1.7.0.dist-info → versionhq-1.1.7.1.dist-info}/WHEEL +0 -0
- {versionhq-1.1.7.0.dist-info → versionhq-1.1.7.1.dist-info}/top_level.txt +0 -0
versionhq/__init__.py
CHANGED
@@ -1,7 +1,3 @@
|
|
1
1
|
BACKSTORY="""You are a {role} with deep understanding of {knowledge} and highly skilled in {skillsets}.
|
2
2
|
You have access to call the RAG tools that can {rag_tool_overview}. By leveraging these tools, your knowledge, and skillsets, you can identify competitive strategies that have been proven effective to achieve the goal: {goal}. Take these into consideration, create innovative solutions.
|
3
3
|
"""
|
4
|
-
|
5
|
-
|
6
|
-
# developing strategies to improve cohort retention and customer lifetime value
|
7
|
-
# ingest past successful campaigns
|
versionhq/agent/model.py
CHANGED
@@ -367,8 +367,8 @@ class Agent(ABC, BaseModel):
|
|
367
367
|
"""
|
368
368
|
|
369
369
|
task_prompt = task.prompt()
|
370
|
-
|
371
|
-
|
370
|
+
if context:
|
371
|
+
task_prompt += context
|
372
372
|
|
373
373
|
tool_results = []
|
374
374
|
if task.tools_called:
|
versionhq/task/model.py
CHANGED
@@ -18,21 +18,44 @@ class ResponseField(BaseModel):
|
|
18
18
|
"""
|
19
19
|
Field class to use in the response schema for the JSON response.
|
20
20
|
"""
|
21
|
+
|
21
22
|
title: str = Field(default=None)
|
22
23
|
type: Type = Field(default=str)
|
23
24
|
required: bool = Field(default=True)
|
24
25
|
|
26
|
+
|
25
27
|
def _annotate(self, value: Any) -> Annotated:
|
26
28
|
"""
|
27
29
|
Address `create_model`
|
28
30
|
"""
|
29
31
|
return Annotated[self.type, value] if isinstance(value, self.type) else Annotated[str, str(value)]
|
30
32
|
|
33
|
+
|
34
|
+
def _convert(self, value: Any) -> Any:
|
35
|
+
try:
|
36
|
+
if self.type is Any:
|
37
|
+
pass
|
38
|
+
elif self.type is int:
|
39
|
+
return int(value)
|
40
|
+
elif self.type is float:
|
41
|
+
return float(value)
|
42
|
+
elif self.type is list or self.type is dict:
|
43
|
+
return json.loads(value)
|
44
|
+
else:
|
45
|
+
return value
|
46
|
+
except:
|
47
|
+
return value
|
48
|
+
|
49
|
+
|
31
50
|
def create_pydantic_model(self, result: Dict, base_model: Union[BaseModel | Any]) -> Any:
|
32
51
|
for k, v in result.items():
|
33
|
-
if k is not self.title
|
52
|
+
if k is not self.title:
|
34
53
|
pass
|
35
|
-
|
54
|
+
elif type(v) is not self.type:
|
55
|
+
v = self._convert(v)
|
56
|
+
setattr(base_model, k, v)
|
57
|
+
else:
|
58
|
+
setattr(base_model, k, v)
|
36
59
|
return base_model
|
37
60
|
|
38
61
|
|
@@ -43,12 +66,14 @@ class AgentOutput(BaseModel):
|
|
43
66
|
"""
|
44
67
|
customer_id: str = Field(default=None, max_length=126, description="customer uuid")
|
45
68
|
customer_analysis: str = Field(default=None, max_length=256, description="analysis of the customer")
|
46
|
-
|
47
|
-
|
69
|
+
product_overview: str = Field(default=None, max_length=256, description="analysis of the client's business")
|
70
|
+
usp: str = Field()
|
71
|
+
cohort_timeframe: int = Field(default=None, max_length=256, description="suitable cohort timeframe in days")
|
48
72
|
kpi_metrics: List[str] = Field(default=list, description="Ideal KPIs to be tracked")
|
49
73
|
assumptions: List[Dict[str, Any]] = Field(default=list, description="assumptions to test")
|
50
74
|
|
51
75
|
|
76
|
+
|
52
77
|
class TaskOutput(BaseModel):
|
53
78
|
"""
|
54
79
|
Store the final output of the task in TaskOutput class.
|
@@ -57,8 +82,8 @@ class TaskOutput(BaseModel):
|
|
57
82
|
|
58
83
|
task_id: UUID4 = Field(default_factory=uuid.uuid4, frozen=True, description="store Task ID")
|
59
84
|
raw: str = Field(default="", description="Raw output of the task")
|
60
|
-
pydantic: Optional[Any] = Field(default=None, description="`raw` converted to the abs. pydantic model")
|
61
85
|
json_dict: Union[Dict[str, Any]] = Field(default=None, description="`raw` converted to dictionary")
|
86
|
+
pydantic: Optional[Any] = Field(default=None, description="`raw` converted to the abs. pydantic model")
|
62
87
|
|
63
88
|
def __str__(self) -> str:
|
64
89
|
return str(self.pydantic) if self.pydantic else str(self.json_dict) if self.json_dict else self.raw
|
@@ -75,16 +100,31 @@ class TaskOutput(BaseModel):
|
|
75
100
|
)
|
76
101
|
return json.dumps(self.json_dict)
|
77
102
|
|
103
|
+
|
78
104
|
def to_dict(self) -> Dict[str, Any]:
|
79
|
-
"""
|
105
|
+
"""
|
106
|
+
Convert pydantic / raw output into dict and return the dict.
|
107
|
+
When we only have `raw` output, return `{ output: raw }` to avoid an error
|
108
|
+
"""
|
109
|
+
|
80
110
|
output_dict = {}
|
81
111
|
if self.json_dict:
|
82
112
|
output_dict.update(self.json_dict)
|
83
113
|
elif self.pydantic:
|
84
114
|
output_dict.update(self.pydantic.model_dump())
|
115
|
+
else:
|
116
|
+
output_dict.upate({ "output": self.raw })
|
85
117
|
return output_dict
|
86
118
|
|
87
119
|
|
120
|
+
def context_prompting(self) -> str:
|
121
|
+
"""
|
122
|
+
When the task is called as context, return its output in concise string to add it to the prompt
|
123
|
+
"""
|
124
|
+
return json.dumps(self.json_dict) if self.json_dict else self.raw[0: 127]
|
125
|
+
|
126
|
+
|
127
|
+
|
88
128
|
class Task(BaseModel):
|
89
129
|
"""
|
90
130
|
Task to be executed by the agent or the team.
|
@@ -102,7 +142,10 @@ class Task(BaseModel):
|
|
102
142
|
# output
|
103
143
|
expected_output_json: bool = Field(default=True)
|
104
144
|
expected_output_pydantic: bool = Field(default=False)
|
105
|
-
output_field_list:
|
145
|
+
output_field_list: List[ResponseField] = Field(
|
146
|
+
default=[ResponseField(title="output", type=str, required=False)],
|
147
|
+
description="provide output key and data type. this will be cascaded to the agent via task.prompt()"
|
148
|
+
)
|
106
149
|
output: Optional[TaskOutput] = Field(default=None, description="store the final task output in TaskOutput class")
|
107
150
|
|
108
151
|
# task setup
|
@@ -123,18 +166,18 @@ class Task(BaseModel):
|
|
123
166
|
|
124
167
|
|
125
168
|
@property
|
126
|
-
def output_prompt(self):
|
169
|
+
def output_prompt(self) -> str:
|
127
170
|
"""
|
128
171
|
Draft prompts on the output format by converting `output_field_list` to dictionary.
|
129
172
|
"""
|
130
173
|
|
131
|
-
output_prompt,
|
174
|
+
output_prompt, output_formats_to_follow = "", dict()
|
132
175
|
for item in self.output_field_list:
|
133
|
-
|
176
|
+
output_formats_to_follow[item.title] = f"<Return your answer in {item.type.__name__}>"
|
134
177
|
|
135
178
|
output_prompt = f"""
|
136
|
-
|
137
|
-
|
179
|
+
Your outputs MUST adhere to the following format and should NOT include any irrelevant elements:
|
180
|
+
{output_formats_to_follow}
|
138
181
|
"""
|
139
182
|
return output_prompt
|
140
183
|
|
@@ -228,16 +271,25 @@ class Task(BaseModel):
|
|
228
271
|
return self
|
229
272
|
|
230
273
|
|
231
|
-
def prompt(self, customer
|
274
|
+
def prompt(self, customer: str = None, product_overview: str = None) -> str:
|
232
275
|
"""
|
233
|
-
Format the task prompt.
|
276
|
+
Format the task prompt and cascade it to the agent.
|
277
|
+
When the task has context, add context prompting of all the tasks in the context.
|
278
|
+
When we have cusotmer/product info, add them to the prompt.
|
234
279
|
"""
|
235
|
-
|
236
|
-
|
237
|
-
|
238
|
-
|
239
|
-
|
240
|
-
|
280
|
+
|
281
|
+
task_slices = [self.description, f"{self.output_prompt}"]
|
282
|
+
|
283
|
+
if self.context:
|
284
|
+
context_outputs = "\n".join([task.output.context_prompting() if hasattr(task, "output") else "" for task in self.context])
|
285
|
+
task_slices.insert(1, f"Take the following context into consideration: {context_outputs}")
|
286
|
+
|
287
|
+
if customer:
|
288
|
+
task_slices.insert(1, f"customer overview: {customer}")
|
289
|
+
|
290
|
+
if product_overview:
|
291
|
+
task_slices.insert(1, f"Product overview: {product_overview}")
|
292
|
+
|
241
293
|
return "\n".join(task_slices)
|
242
294
|
|
243
295
|
|
@@ -273,10 +325,10 @@ class Task(BaseModel):
|
|
273
325
|
|
274
326
|
def create_pydantic_output(self, output_json_dict: Dict[str, Any], raw_result: Any = None) -> Optional[Any]:
|
275
327
|
"""
|
276
|
-
Create pydantic output from the raw result.
|
328
|
+
Create pydantic output from the `raw` result.
|
277
329
|
"""
|
278
330
|
|
279
|
-
output_pydantic = None
|
331
|
+
output_pydantic = None
|
280
332
|
if isinstance(raw_result, BaseModel):
|
281
333
|
output_pydantic = raw_result
|
282
334
|
|
@@ -285,8 +337,14 @@ class Task(BaseModel):
|
|
285
337
|
|
286
338
|
else:
|
287
339
|
output_pydantic = create_model("PydanticTaskOutput", __base__=BaseModel)
|
288
|
-
|
289
|
-
item.
|
340
|
+
try:
|
341
|
+
for item in self.output_field_list:
|
342
|
+
value = output_json_dict[item.title] if hasattr(output_json_dict, item.title) else None
|
343
|
+
if value and type(value) is not item.type:
|
344
|
+
value = item._convert(value)
|
345
|
+
setattr(output_pydantic, item.title, value)
|
346
|
+
except:
|
347
|
+
setattr(output_pydantic, "output", output_json_dict)
|
290
348
|
|
291
349
|
return output_pydantic
|
292
350
|
|
@@ -307,11 +365,19 @@ class Task(BaseModel):
|
|
307
365
|
self.description = self._original_description.format(**inputs)
|
308
366
|
# self.expected_output = self._original_expected_output.format(**inputs)
|
309
367
|
|
368
|
+
|
310
369
|
# task execution
|
311
370
|
def execute_sync(self, agent, context: Optional[str] = None) -> TaskOutput:
|
312
371
|
"""
|
313
372
|
Execute the task synchronously.
|
373
|
+
When the task has context, make sure we have executed all the tasks in the context first.
|
314
374
|
"""
|
375
|
+
|
376
|
+
if self.context:
|
377
|
+
for task in self.context:
|
378
|
+
if task.output is None:
|
379
|
+
task._execute_core(agent, context)
|
380
|
+
|
315
381
|
return self._execute_core(agent, context)
|
316
382
|
|
317
383
|
|
@@ -341,12 +407,12 @@ class Task(BaseModel):
|
|
341
407
|
"""
|
342
408
|
|
343
409
|
self.prompt_context = context
|
344
|
-
|
345
|
-
output_json_dict = self.create_json_output(raw_result=
|
410
|
+
output_raw = agent.execute_task(task=self, context=context)
|
411
|
+
output_json_dict = self.create_json_output(raw_result=output_raw)
|
346
412
|
output_pydantic = self.create_pydantic_output(output_json_dict=output_json_dict)
|
347
413
|
task_output = TaskOutput(
|
348
414
|
task_id=self.id,
|
349
|
-
raw=
|
415
|
+
raw=output_raw,
|
350
416
|
pydantic=output_pydantic,
|
351
417
|
json_dict=output_json_dict
|
352
418
|
)
|
versionhq/team/model.py
CHANGED
@@ -100,26 +100,27 @@ class TeamOutput(BaseModel):
|
|
100
100
|
|
101
101
|
def to_dict(self) -> Dict[str, Any]:
|
102
102
|
"""
|
103
|
-
Convert
|
103
|
+
Convert pydantic / raw output into dict and return the dict.
|
104
|
+
When we only have `raw` output, return `{ output: raw }` to avoid an error
|
104
105
|
"""
|
106
|
+
|
105
107
|
output_dict = {}
|
106
108
|
if self.json_dict:
|
107
109
|
output_dict.update(self.json_dict)
|
108
110
|
elif self.pydantic:
|
109
111
|
output_dict.update(self.pydantic.model_dump())
|
110
112
|
else:
|
111
|
-
output_dict.
|
113
|
+
output_dict.upate({ "output": self.raw })
|
112
114
|
return output_dict
|
113
115
|
|
116
|
+
|
114
117
|
def return_all_task_outputs(self) -> List[Dict[str, Any]]:
|
115
118
|
res = [output.json_dict for output in self.task_output_list]
|
116
119
|
return res
|
117
120
|
|
118
121
|
|
119
122
|
class TeamMember(ABC, BaseModel):
|
120
|
-
agent: Agent | None = Field(
|
121
|
-
default=None, description="store the agent to be a member"
|
122
|
-
)
|
123
|
+
agent: Agent | None = Field(default=None, description="store the agent to be a member")
|
123
124
|
is_manager: bool = Field(default=False)
|
124
125
|
task: Task | None = Field(default=None)
|
125
126
|
|
@@ -164,9 +165,11 @@ class Team(BaseModel):
|
|
164
165
|
execution_logs: List[Dict[str, Any]] = Field(default=[], description="list of execution logs for tasks")
|
165
166
|
usage_metrics: Optional[UsageMetrics] = Field(default=None, description="usage metrics for all the llm executions")
|
166
167
|
|
168
|
+
|
167
169
|
def __name__(self) -> str:
|
168
170
|
return self.name if self.name is not None else self.id.__str__
|
169
171
|
|
172
|
+
|
170
173
|
@property
|
171
174
|
def key(self) -> str:
|
172
175
|
source = [str(member.agent.id.__str__) for member in self.members] + [str(task.id.__str__) for task in self.tasks]
|
@@ -175,9 +178,7 @@ class Team(BaseModel):
|
|
175
178
|
|
176
179
|
@property
|
177
180
|
def manager_agent(self) -> Agent:
|
178
|
-
manager_agent = [
|
179
|
-
member.agent for member in self.members if member.is_manager == True
|
180
|
-
]
|
181
|
+
manager_agent = [member.agent for member in self.members if member.is_manager == True]
|
181
182
|
return manager_agent[0] if len(manager_agent) > 0 else None
|
182
183
|
|
183
184
|
|
@@ -251,7 +252,6 @@ class Team(BaseModel):
|
|
251
252
|
"""
|
252
253
|
Every team member should have a task to handle.
|
253
254
|
"""
|
254
|
-
|
255
255
|
if self.process == TaskHandlingProcess.sequential:
|
256
256
|
for member in self.members:
|
257
257
|
if member.task is None:
|
@@ -1,6 +1,6 @@
|
|
1
1
|
Metadata-Version: 2.1
|
2
2
|
Name: versionhq
|
3
|
-
Version: 1.1.7.
|
3
|
+
Version: 1.1.7.1
|
4
4
|
Summary: LLM orchestration frameworks for model-agnostic AI agents that handle complex outbound workflows
|
5
5
|
Author-email: Kuriko Iwai <kuriko@versi0n.io>
|
6
6
|
License: MIT License
|
@@ -52,8 +52,7 @@ Requires-Dist: wheel>=0.45.1
|
|
52
52
|
|
53
53
|
# Overview
|
54
54
|
|
55
|
-
  
|
55
|
+
 [](https://github.com/versionHQ/multi-agent-system/actions/workflows/publish.yml)   
|
57
56
|
|
58
57
|
|
59
58
|
An LLM orchestration frameworks for multi-agent systems with RAG to autopilot outbound workflows.
|
@@ -70,6 +69,7 @@ Messaging workflows are created at individual level, and will be deployed on thi
|
|
70
69
|
- [Orchestration frameworks](https://github.com/versionHQ/multi-agent-system)
|
71
70
|
- [Test client app](https://github.com/versionHQ/test-client-app)
|
72
71
|
|
72
|
+
<hr />
|
73
73
|
|
74
74
|
## Mindmap
|
75
75
|
|
@@ -86,10 +86,10 @@ LLM-powered `agent`s and `team`s use `tool`s and their own knowledge to complete
|
|
86
86
|
<!-- DON'T EDIT THIS SECTION, INSTEAD RE-RUN doctoc TO UPDATE -->
|
87
87
|
|
88
88
|
- [Key Features](#key-features)
|
89
|
+
- [Usage](#usage)
|
89
90
|
- [Technologies Used](#technologies-used)
|
90
91
|
- [Project Structure](#project-structure)
|
91
92
|
- [Setup](#setup)
|
92
|
-
- [Usage](#usage)
|
93
93
|
- [Contributing](#contributing)
|
94
94
|
- [Customizing AI Agents](#customizing-ai-agents)
|
95
95
|
- [Modifying RAG Functionality](#modifying-rag-functionality)
|
@@ -121,6 +121,52 @@ Multiple `agents` can form a `team` to complete complex tasks together.
|
|
121
121
|
**3. Autopiloting**
|
122
122
|
- Responsible `agents` or `teams` autopilot executing and refining the messaging workflow.
|
123
123
|
|
124
|
+
<hr />
|
125
|
+
|
126
|
+
## Usage
|
127
|
+
|
128
|
+
1. Install `versionhq` package:
|
129
|
+
```
|
130
|
+
uv pip install versionhq
|
131
|
+
```
|
132
|
+
|
133
|
+
2. You can use the `versionhq` module in your Python app.
|
134
|
+
|
135
|
+
- **i.e.,** Make LLM-based agent execute the task and return JSON dict.
|
136
|
+
|
137
|
+
```
|
138
|
+
from versionhq.agent.model import Agent
|
139
|
+
from versionhq.task.model import Task, ResponseField
|
140
|
+
|
141
|
+
agent = Agent(
|
142
|
+
role="demo",
|
143
|
+
goal="amazing project goal",
|
144
|
+
skillsets=["skill_1", "skill_2", ],
|
145
|
+
llm="llm-of-choice"
|
146
|
+
)
|
147
|
+
|
148
|
+
task = Task(
|
149
|
+
description="Amazing task",
|
150
|
+
expected_output_json=True,
|
151
|
+
expected_output_pydantic=False,
|
152
|
+
output_field_list=[
|
153
|
+
ResponseField(title="test1", type=str, required=True),
|
154
|
+
ResponseField(title="test2", type=list, required=True),
|
155
|
+
],
|
156
|
+
context=["amazing context",],
|
157
|
+
tools=["amazing tool"],
|
158
|
+
callback=None,
|
159
|
+
)
|
160
|
+
|
161
|
+
res = task.execute_sync(agent=agent)
|
162
|
+
|
163
|
+
return res.to_dict()
|
164
|
+
|
165
|
+
```
|
166
|
+
|
167
|
+
For more details:
|
168
|
+
|
169
|
+
[PyPi package](https://pypi.org/project/versionhq/)
|
124
170
|
|
125
171
|
<hr />
|
126
172
|
|
@@ -145,6 +191,7 @@ Multiple `agents` can form a `team` to complete complex tasks together.
|
|
145
191
|
- [pre-commit](https://pre-commit.com/): Manage and maintain pre-commit hooks
|
146
192
|
- [setuptools](https://pypi.org/project/setuptools/): Build python modules
|
147
193
|
|
194
|
+
<hr />
|
148
195
|
|
149
196
|
## Project Structure
|
150
197
|
|
@@ -210,25 +257,6 @@ src/
|
|
210
257
|
|
211
258
|
<hr />
|
212
259
|
|
213
|
-
## Usage
|
214
|
-
|
215
|
-
1. Install `versionhq` package:
|
216
|
-
```
|
217
|
-
uv pip install versionhq
|
218
|
-
```
|
219
|
-
|
220
|
-
2. You can use the `versionhq` module in your Python app.
|
221
|
-
```
|
222
|
-
from versionhq.agent.model import Agent
|
223
|
-
agent = Agent(llm="your-llm", ...)
|
224
|
-
```
|
225
|
-
|
226
|
-
For more details:
|
227
|
-
|
228
|
-
[PyPi package](https://pypi.org/project/versionhq/)
|
229
|
-
|
230
|
-
<hr />
|
231
|
-
|
232
260
|
## Contributing
|
233
261
|
|
234
262
|
1. Fork the repository
|
@@ -359,17 +387,3 @@ Common issues and solutions:
|
|
359
387
|
> **Simple tasks**: You have a straightforward, one-off task that doesn't require significant complexity or iteration.
|
360
388
|
|
361
389
|
> **Human input**: You need to provide initial input or guidance to the agent, or you expect to review and refine the output.
|
362
|
-
|
363
|
-
|
364
|
-
|
365
|
-
<--- Remaining tasks --->
|
366
|
-
|
367
|
-
- llm handling - agent
|
368
|
-
- more llms integration
|
369
|
-
- simpler prompting
|
370
|
-
- broader knowledge
|
371
|
-
|
372
|
-
- utils - log
|
373
|
-
- utils - time
|
374
|
-
|
375
|
-
- end to end client app test
|
@@ -1,4 +1,4 @@
|
|
1
|
-
versionhq/__init__.py,sha256=
|
1
|
+
versionhq/__init__.py,sha256=u_mFdT53z-xk8SM6MzaqIiM3dnGxuBJjwNpWDBVOsGQ,773
|
2
2
|
versionhq/_utils/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
|
3
3
|
versionhq/_utils/cache_handler.py,sha256=zDQKzIn7vp-M2-uepHFxgJstjfftZS5mzXKL_-4uVvI,370
|
4
4
|
versionhq/_utils/i18n.py,sha256=TwA_PnYfDLA6VqlUDPuybdV9lgi3Frh_ASsb_X8jJo8,1483
|
@@ -7,9 +7,9 @@ versionhq/_utils/process_config.py,sha256=ogrhovLbwe0ocQlcohRgBBRtww7C3pk9hikjvg
|
|
7
7
|
versionhq/_utils/rpm_controller.py,sha256=T7waIGeblu5K58erY4lqVLcPsWM7W9UFdU3DG9Dsk0w,2214
|
8
8
|
versionhq/_utils/usage_metrics.py,sha256=c33a_28y8ECUgflsKN3mkNm0fNkWgZmXwybMwIqoKXA,1098
|
9
9
|
versionhq/agent/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
|
10
|
-
versionhq/agent/model.py,sha256=
|
10
|
+
versionhq/agent/model.py,sha256=Jf-mHPgg12B-JIc2A2fOsKfksLxS2lAivaZEvbWhu6Q,18257
|
11
11
|
versionhq/agent/parser.py,sha256=GhoNQo4WloVM3vGnAmt9lnEOTARX7nWMhJE55rF_5Rs,5500
|
12
|
-
versionhq/agent/TEMPLATES/Backstory.py,sha256=
|
12
|
+
versionhq/agent/TEMPLATES/Backstory.py,sha256=cdngBx1GEv7nroR46FEhnysnBJ9mEVL763_9np6Skkc,395
|
13
13
|
versionhq/agent/TEMPLATES/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
|
14
14
|
versionhq/cli/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
|
15
15
|
versionhq/clients/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
|
@@ -24,16 +24,16 @@ versionhq/llm/llm_vars.py,sha256=YZoXqFBW7XpclUZ14_AAz7WOjoyCXnGcI959GSpX2q0,534
|
|
24
24
|
versionhq/llm/model.py,sha256=PdwisrlrsDqd6gXwXCyGbGTRTeGZ8SXpt_gfua8qunk,8266
|
25
25
|
versionhq/task/__init__.py,sha256=g4mCATnn1mUXxsfQ5p6IpPawr8O421wVIT8kMKEcxQw,180
|
26
26
|
versionhq/task/formatter.py,sha256=N8Kmk9vtrMtBdgJ8J7RmlKNMdZWSmV8O1bDexmCWgU0,643
|
27
|
-
versionhq/task/model.py,sha256=
|
27
|
+
versionhq/task/model.py,sha256=LMCN_7s5weKbUZpEi01Yw3b16XazfxpWCTqLkvPXVRc,16837
|
28
28
|
versionhq/team/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
|
29
|
-
versionhq/team/model.py,sha256=
|
29
|
+
versionhq/team/model.py,sha256=pMCt0ZCVgE_pRH2qXlWj-PEZrI77JoRVVzUOZWY1Rws,20140
|
30
30
|
versionhq/team/team_planner.py,sha256=B1UOn_DYVVterUn2CAd80jfO4sViJCCXPJA3abSSugg,2143
|
31
31
|
versionhq/tool/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
|
32
32
|
versionhq/tool/decorator.py,sha256=Y-j4jkoujD5LUvpe8uf3p5Zagk2XVaRKC9rkIE-2geo,1189
|
33
33
|
versionhq/tool/model.py,sha256=JZOEcZRIEfcrjL8DgrFYDt4YNgMF8rXS26RK6D2x9mc,6906
|
34
34
|
versionhq/tool/tool_handler.py,sha256=e-2VfG9zFpfPG_oMoPXye93GDovs7FuUASWQwUTLrJ0,1498
|
35
|
-
versionhq-1.1.7.
|
36
|
-
versionhq-1.1.7.
|
37
|
-
versionhq-1.1.7.
|
38
|
-
versionhq-1.1.7.
|
39
|
-
versionhq-1.1.7.
|
35
|
+
versionhq-1.1.7.1.dist-info/LICENSE,sha256=7CCXuMrAjPVsUvZrsBq9DsxI2rLDUSYXR_qj4yO_ZII,1077
|
36
|
+
versionhq-1.1.7.1.dist-info/METADATA,sha256=WINOZxhTC1Cf6tuh8a7hjzjhjwQgDLCw6Tiv4rAv3XI,14278
|
37
|
+
versionhq-1.1.7.1.dist-info/WHEEL,sha256=PZUExdf71Ui_so67QXpySuHtCi3-J3wvF4ORK6k_S8U,91
|
38
|
+
versionhq-1.1.7.1.dist-info/top_level.txt,sha256=DClQwxDWqIUGeRJkA8vBlgeNsYZs4_nJWMonzFt5Wj0,10
|
39
|
+
versionhq-1.1.7.1.dist-info/RECORD,,
|
File without changes
|
File without changes
|
File without changes
|