versionhq 1.2.1.15__py3-none-any.whl → 1.2.1.16__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- versionhq/__init__.py +1 -1
- versionhq/agent/inhouse_agents.py +2 -2
- versionhq/agent/model.py +118 -119
- versionhq/task/model.py +2 -2
- versionhq/task_graph/draft.py +2 -2
- versionhq/tool/composio_tool.py +1 -2
- {versionhq-1.2.1.15.dist-info → versionhq-1.2.1.16.dist-info}/METADATA +1 -1
- {versionhq-1.2.1.15.dist-info → versionhq-1.2.1.16.dist-info}/RECORD +11 -11
- {versionhq-1.2.1.15.dist-info → versionhq-1.2.1.16.dist-info}/LICENSE +0 -0
- {versionhq-1.2.1.15.dist-info → versionhq-1.2.1.16.dist-info}/WHEEL +0 -0
- {versionhq-1.2.1.15.dist-info → versionhq-1.2.1.16.dist-info}/top_level.txt +0 -0
versionhq/__init__.py
CHANGED
@@ -10,7 +10,7 @@ vhq_client_manager = Agent(
|
|
10
10
|
role="vhq-Client Manager",
|
11
11
|
goal="Efficiently communicate with the client on the task progress",
|
12
12
|
llm=DEFAULT_MODEL_NAME,
|
13
|
-
|
13
|
+
with_memory=True,
|
14
14
|
)
|
15
15
|
|
16
16
|
|
@@ -21,7 +21,7 @@ vhq_task_evaluator = Agent(
|
|
21
21
|
llm_config=dict(top_p=0.8, top_k=30, max_tokens=5000, temperature=0.9),
|
22
22
|
maxit=1,
|
23
23
|
max_retry_limit=1,
|
24
|
-
|
24
|
+
with_memory=True # refer past eval records of similar tasks
|
25
25
|
)
|
26
26
|
|
27
27
|
|
versionhq/agent/model.py
CHANGED
@@ -60,7 +60,6 @@ class Agent(BaseModel):
|
|
60
60
|
"""
|
61
61
|
|
62
62
|
__hash__ = object.__hash__
|
63
|
-
_logger: Logger = PrivateAttr(default_factory=lambda: Logger(verbose=True))
|
64
63
|
_rpm_controller: Optional[RPMController] = PrivateAttr(default=None)
|
65
64
|
_request_within_rpm_limit: Any = PrivateAttr(default=None)
|
66
65
|
_token_process: TokenProcess = PrivateAttr(default_factory=TokenProcess)
|
@@ -76,39 +75,38 @@ class Agent(BaseModel):
|
|
76
75
|
|
77
76
|
# knowledge
|
78
77
|
knowledge_sources: Optional[List[BaseKnowledgeSource | Any]] = Field(default=None)
|
78
|
+
embedder_config: Optional[Dict[str, Any]] = Field(default=None, description="embedder configuration for knowledge sources")
|
79
79
|
_knowledge: Optional[Knowledge] = PrivateAttr(default=None)
|
80
80
|
|
81
81
|
# memory
|
82
|
-
|
83
|
-
memory_config: Optional[Dict[str, Any]] = Field(default=None, description="
|
82
|
+
with_memory: bool = Field(default=False, description="whether to use memories during the task execution")
|
83
|
+
memory_config: Optional[Dict[str, Any]] = Field(default=None, description="memory config. needs to store user_id for UserMemory to work")
|
84
84
|
short_term_memory: Optional[InstanceOf[ShortTermMemory]] = Field(default=None)
|
85
85
|
long_term_memory: Optional[InstanceOf[LongTermMemory]] = Field(default=None)
|
86
86
|
user_memory: Optional[InstanceOf[UserMemory]] = Field(default=None)
|
87
|
-
embedder_config: Optional[Dict[str, Any]] = Field(default=None, description="embedder configuration for the agent's knowledge")
|
88
87
|
|
89
88
|
# prompting
|
90
89
|
use_developer_prompt: Optional[bool] = Field(default=True, description="Use developer prompt when calling the llm")
|
91
|
-
developer_propmt_template: Optional[str] = Field(default=None, description="
|
92
|
-
user_prompt_template: Optional[str] = Field(default=None, description="user prompt template")
|
90
|
+
developer_propmt_template: Optional[str] = Field(default=None, description="abs. file path to developer prompt template")
|
91
|
+
user_prompt_template: Optional[str] = Field(default=None, description="abs. file path to user prompt template")
|
93
92
|
|
94
93
|
# task execution rules
|
95
94
|
network: Optional[List[Any]] = Field(default=None, description="store a list of agent networks that the agent belong as a member")
|
96
|
-
allow_delegation: bool = Field(default=False,description="
|
97
|
-
max_retry_limit: int = Field(default=2
|
98
|
-
maxit: Optional[int] = Field(default=25,description="max. number of total optimization loops conducted when an error occurs")
|
95
|
+
allow_delegation: bool = Field(default=False, description="whether to delegate the task to another agent")
|
96
|
+
max_retry_limit: int = Field(default=2, description="max. number of task retries when an error occurs")
|
97
|
+
maxit: Optional[int] = Field(default=25, description="max. number of total optimization loops conducted when an error occurs")
|
99
98
|
callbacks: Optional[List[Callable]] = Field(default_factory=list, description="callback functions to execute after any task execution")
|
100
99
|
|
101
100
|
# llm settings cascaded to the LLM model
|
102
101
|
llm: str | InstanceOf[LLM] | Dict[str, Any] = Field(default=None)
|
103
|
-
|
104
|
-
respect_context_window: bool = Field(default=True,description="
|
105
|
-
|
106
|
-
max_execution_time: Optional[int] = Field(default=None, description="max. execution time for an agent to execute a task")
|
102
|
+
func_calling_llm: str | InstanceOf[LLM] | Dict[str, Any] = Field(default=None)
|
103
|
+
respect_context_window: bool = Field(default=True,description="keep messages under the context window size")
|
104
|
+
max_execution_time: Optional[int] = Field(default=None, description="max. task execution time in seconds")
|
107
105
|
max_rpm: Optional[int] = Field(default=None, description="max. number of requests per minute")
|
108
106
|
llm_config: Optional[Dict[str, Any]] = Field(default=None, description="other llm config cascaded to the LLM model")
|
109
107
|
|
110
|
-
# cache, error, ops handling
|
111
|
-
formatting_errors: int = Field(default=0, description="number of formatting errors.")
|
108
|
+
# # cache, error, ops handling
|
109
|
+
# formatting_errors: int = Field(default=0, description="number of formatting errors.")
|
112
110
|
|
113
111
|
|
114
112
|
@field_validator("id", mode="before")
|
@@ -136,18 +134,18 @@ class Agent(BaseModel):
|
|
136
134
|
@model_validator(mode="after")
|
137
135
|
def set_up_llm(self) -> Self:
|
138
136
|
"""
|
139
|
-
Set up `llm` and `
|
137
|
+
Set up `llm` and `func_calling_llm` as valid LLM objects using the given kwargs.
|
140
138
|
"""
|
141
139
|
self.llm = self._convert_to_llm_object(llm=self.llm)
|
142
140
|
|
143
|
-
|
144
|
-
|
145
|
-
if
|
146
|
-
self.
|
141
|
+
func_calling_llm = self.func_calling_llm if self.func_calling_llm else self.llm if self.llm else None
|
142
|
+
func_calling_llm = self._convert_to_llm_object(llm=func_calling_llm)
|
143
|
+
if func_calling_llm._supports_function_calling():
|
144
|
+
self.func_calling_llm = func_calling_llm
|
147
145
|
elif self.llm._supports_function_calling():
|
148
|
-
self.
|
146
|
+
self.func_calling_llm = self.llm
|
149
147
|
else:
|
150
|
-
self.
|
148
|
+
self.func_calling_llm = self._convert_to_llm_object(llm=LLM(model=DEFAULT_MODEL_NAME))
|
151
149
|
return self
|
152
150
|
|
153
151
|
|
@@ -179,7 +177,7 @@ class Agent(BaseModel):
|
|
179
177
|
model_name = (getattr(self.llm, "model_name") or getattr(self.llm, "deployment_name") or str(self.llm))
|
180
178
|
llm_obj = LLM(model=model_name if model_name else DEFAULT_MODEL_NAME)
|
181
179
|
llm_params = {
|
182
|
-
"max_tokens": (getattr(llm, "max_tokens") or
|
180
|
+
"max_tokens": (getattr(llm, "max_tokens") or 3000),
|
183
181
|
"timeout": getattr(llm, "timeout", self.max_execution_time),
|
184
182
|
"callbacks": getattr(llm, "callbacks", None),
|
185
183
|
"temperature": getattr(llm, "temperature", None),
|
@@ -222,7 +220,7 @@ class Agent(BaseModel):
|
|
222
220
|
|
223
221
|
|
224
222
|
llm.timeout = self.max_execution_time if llm.timeout is None else llm.timeout
|
225
|
-
llm.max_tokens = self.max_tokens if self.max_tokens else llm.max_tokens
|
223
|
+
# llm.max_tokens = self.max_tokens if self.max_tokens else llm.max_tokens
|
226
224
|
|
227
225
|
if llm.provider is None:
|
228
226
|
provider_name = llm.model.split("/")[0]
|
@@ -262,7 +260,7 @@ class Agent(BaseModel):
|
|
262
260
|
tool_list.append(item)
|
263
261
|
|
264
262
|
else:
|
265
|
-
|
263
|
+
Logger().log(level="error", message=f"Tool {str(item)} is missing a function.", color="red")
|
266
264
|
raise PydanticCustomError("invalid_tool", f"The tool {str(item)} is missing a function.", {})
|
267
265
|
|
268
266
|
self.tools = tool_list
|
@@ -346,7 +344,7 @@ class Agent(BaseModel):
|
|
346
344
|
self._knowledge = Knowledge(sources=knowledge_sources, embedder_config=self.embedder_config, collection_name=collection_name)
|
347
345
|
|
348
346
|
except:
|
349
|
-
|
347
|
+
Logger().log(level="warning", message="We cannot find the format for the source. Add BaseKnowledgeSource objects instead.", color="yellow")
|
350
348
|
|
351
349
|
return self
|
352
350
|
|
@@ -357,7 +355,7 @@ class Agent(BaseModel):
|
|
357
355
|
Set up memories: stm, ltm, and um
|
358
356
|
"""
|
359
357
|
|
360
|
-
# if self.
|
358
|
+
# if self.with_memory == True:
|
361
359
|
self.long_term_memory = self.long_term_memory if self.long_term_memory else LongTermMemory()
|
362
360
|
self.short_term_memory = self.short_term_memory if self.short_term_memory else ShortTermMemory(agent=self, embedder_config=self.embedder_config)
|
363
361
|
|
@@ -371,21 +369,13 @@ class Agent(BaseModel):
|
|
371
369
|
return self
|
372
370
|
|
373
371
|
|
374
|
-
def
|
375
|
-
"""
|
376
|
-
Fine-tuned the base model using OpenAI train framework.
|
377
|
-
"""
|
378
|
-
if not isinstance(self.llm, LLM):
|
379
|
-
pass
|
380
|
-
|
381
|
-
|
382
|
-
def update_llm(self, llm: Any = None, llm_config: Optional[Dict[str, Any]] = None) -> Self:
|
372
|
+
def _update_llm(self, llm: Any = None, llm_config: Optional[Dict[str, Any]] = None) -> Self:
|
383
373
|
"""
|
384
374
|
Update llm and llm_config of the exsiting agent. (Other conditions will remain the same.)
|
385
375
|
"""
|
386
376
|
|
387
377
|
if not llm and not llm_config:
|
388
|
-
|
378
|
+
Logger().log(level="error", message="Missing llm or llm_config values to update", color="red")
|
389
379
|
pass
|
390
380
|
|
391
381
|
self.llm = llm
|
@@ -398,59 +388,15 @@ class Agent(BaseModel):
|
|
398
388
|
return self.set_up_llm()
|
399
389
|
|
400
390
|
|
401
|
-
def
|
391
|
+
def _train(self) -> Self:
|
402
392
|
"""
|
403
|
-
|
393
|
+
Fine-tuned the base model using OpenAI train framework.
|
404
394
|
"""
|
405
|
-
|
406
|
-
|
407
|
-
self._logger.log(level="error", message="Missing values to update", color="red")
|
408
|
-
return self
|
409
|
-
|
410
|
-
for k, v in kwargs.items():
|
411
|
-
match k:
|
412
|
-
case "tools":
|
413
|
-
self.tools = kwargs.get(k, self.tools)
|
414
|
-
self.set_up_tools()
|
415
|
-
|
416
|
-
case "role" | "goal":
|
417
|
-
self.role = kwargs.get("role", self.role)
|
418
|
-
self.goal = kwargs.get("goal", self.goal)
|
419
|
-
if not self.backstory:
|
420
|
-
self.set_up_backstory()
|
421
|
-
|
422
|
-
if self.backstory:
|
423
|
-
self.backstory += f"new role: {self.role}, new goal: {self.goal}"
|
424
|
-
|
425
|
-
case "max_rpm":
|
426
|
-
self.max_rpm = kwargs.get(k, self.max_rpm)
|
427
|
-
self.set_up_rpm()
|
428
|
-
|
429
|
-
case "knowledge_sources":
|
430
|
-
self.knowledge_sources = kwargs.get("knowledge_sources", self.knowledge_sources)
|
431
|
-
self.set_up_knowledge()
|
432
|
-
|
433
|
-
case "use_memory" | "memory_config":
|
434
|
-
self.use_memory = kwargs.get("use_memory", self.use_memory)
|
435
|
-
self.memory_config = kwargs.get("memory_config", self.memory_config)
|
436
|
-
self.set_up_memory()
|
437
|
-
|
438
|
-
case "llm" | "llm_config":
|
439
|
-
self.llm = kwargs.get("llm", self.llm)
|
440
|
-
self.llm_config = kwargs.get("llm_config", self.llm_config)
|
441
|
-
self.update_llm(llm=self.llm, llm_config=self.llm_config)
|
442
|
-
|
443
|
-
case _:
|
444
|
-
try:
|
445
|
-
setattr(self, k, v)
|
446
|
-
except Exception as e:
|
447
|
-
self._logger.log(level="error", message=f"Failed to update the key: {k} We'll skip. Error: {str(e)}", color="red")
|
448
|
-
pass
|
449
|
-
|
450
|
-
return self
|
395
|
+
if not isinstance(self.llm, LLM):
|
396
|
+
pass
|
451
397
|
|
452
398
|
|
453
|
-
def
|
399
|
+
def _invoke(
|
454
400
|
self,
|
455
401
|
prompts: str,
|
456
402
|
response_format: Optional[Dict[str, Any]] = None,
|
@@ -477,21 +423,21 @@ class Agent(BaseModel):
|
|
477
423
|
if self._rpm_controller and self.max_rpm:
|
478
424
|
self._rpm_controller.check_or_wait()
|
479
425
|
|
480
|
-
|
426
|
+
Logger().log(level="info", message=f"Messages sent to the model: {messages}", color="blue")
|
481
427
|
|
482
428
|
if tool_res_as_final:
|
483
|
-
raw_response = self.
|
484
|
-
task.tokens = self.
|
429
|
+
raw_response = self.func_calling_llm.call(messages=messages, tools=tools, tool_res_as_final=True)
|
430
|
+
task.tokens = self.func_calling_llm._tokens
|
485
431
|
else:
|
486
432
|
raw_response = self.llm.call(messages=messages, response_format=response_format, tools=tools)
|
487
433
|
task.tokens = self.llm._tokens
|
488
434
|
|
489
435
|
task_execution_counter += 1
|
490
|
-
|
436
|
+
Logger().log(level="info", message=f"Agent response: {raw_response}", color="green")
|
491
437
|
return raw_response
|
492
438
|
|
493
439
|
except Exception as e:
|
494
|
-
|
440
|
+
Logger().log(level="error", message=f"An error occured. The agent will retry: {str(e)}", color="red")
|
495
441
|
|
496
442
|
while not raw_response and task_execution_counter <= self.max_retry_limit:
|
497
443
|
while (not raw_response or raw_response == "" or raw_response is None) and iterations < self.maxit:
|
@@ -503,14 +449,89 @@ class Agent(BaseModel):
|
|
503
449
|
iterations += 1
|
504
450
|
|
505
451
|
task_execution_counter += 1
|
506
|
-
|
452
|
+
Logger().log(level="info", message=f"Agent #{task_execution_counter} response: {raw_response}", color="green")
|
507
453
|
return raw_response
|
508
454
|
|
509
455
|
if not raw_response:
|
510
|
-
|
456
|
+
Logger().log(level="error", message="Received None or empty response from the model", color="red")
|
511
457
|
raise ValueError("Invalid response from LLM call - None or empty.")
|
512
458
|
|
513
459
|
|
460
|
+
def update(self, **kwargs) -> Self:
|
461
|
+
"""
|
462
|
+
Update the existing agent. Address variables that require runnning set_up_x methods first, then update remaining variables.
|
463
|
+
"""
|
464
|
+
|
465
|
+
if not kwargs:
|
466
|
+
Logger().log(level="error", message="Missing values to update", color="red")
|
467
|
+
return self
|
468
|
+
|
469
|
+
for k, v in kwargs.items():
|
470
|
+
match k:
|
471
|
+
case "tools":
|
472
|
+
self.tools = kwargs.get(k, self.tools)
|
473
|
+
self.set_up_tools()
|
474
|
+
|
475
|
+
case "role" | "goal":
|
476
|
+
self.role = kwargs.get("role", self.role)
|
477
|
+
self.goal = kwargs.get("goal", self.goal)
|
478
|
+
if not self.backstory:
|
479
|
+
self.set_up_backstory()
|
480
|
+
|
481
|
+
if self.backstory:
|
482
|
+
self.backstory += f"new role: {self.role}, new goal: {self.goal}"
|
483
|
+
|
484
|
+
case "max_rpm":
|
485
|
+
self.max_rpm = kwargs.get(k, self.max_rpm)
|
486
|
+
self.set_up_rpm()
|
487
|
+
|
488
|
+
case "knowledge_sources":
|
489
|
+
self.knowledge_sources = kwargs.get("knowledge_sources", self.knowledge_sources)
|
490
|
+
self.set_up_knowledge()
|
491
|
+
|
492
|
+
case "with_memory" | "memory_config":
|
493
|
+
self.with_memory = kwargs.get("with_memory", self.with_memory)
|
494
|
+
self.memory_config = kwargs.get("memory_config", self.memory_config)
|
495
|
+
self.set_up_memory()
|
496
|
+
|
497
|
+
case "llm" | "llm_config":
|
498
|
+
self.llm = kwargs.get("llm", self.llm)
|
499
|
+
self.llm_config = kwargs.get("llm_config", self.llm_config)
|
500
|
+
self._update_llm(llm=self.llm, llm_config=self.llm_config)
|
501
|
+
|
502
|
+
case _:
|
503
|
+
try:
|
504
|
+
setattr(self, k, v)
|
505
|
+
except Exception as e:
|
506
|
+
Logger().log(level="error", message=f"Failed to update the field: {k} We'll skip it. Error: {str(e)}", color="red")
|
507
|
+
pass
|
508
|
+
|
509
|
+
return self
|
510
|
+
|
511
|
+
|
512
|
+
def start(self, context: Any = None, tool_res_as_final: bool = False) -> Any | None:
|
513
|
+
"""
|
514
|
+
Defines and executes a task when it is not given and returns TaskOutput object.
|
515
|
+
"""
|
516
|
+
|
517
|
+
if not self.goal or not self.role:
|
518
|
+
return None
|
519
|
+
|
520
|
+
from versionhq.task.model import Task
|
521
|
+
|
522
|
+
class Output(BaseModel):
|
523
|
+
result: str
|
524
|
+
steps: list[str]
|
525
|
+
|
526
|
+
task = Task(
|
527
|
+
description=f"Generate a simple result in a sentence to achieve the goal: {self.goal}. If needed, list up necessary steps in concise manner.",
|
528
|
+
pydantic_output=Output,
|
529
|
+
tool_res_as_final=tool_res_as_final,
|
530
|
+
)
|
531
|
+
res = task.execute(agent=self, context=context)
|
532
|
+
return res
|
533
|
+
|
534
|
+
|
514
535
|
def execute_task(self, task, context: Optional[Any] = None, task_tools: Optional[List[Tool | ToolSet]] = list()) -> str:
|
515
536
|
"""
|
516
537
|
Format a task prompt, adding context from knowledge and memory (if given), and invoke LLM.
|
@@ -534,7 +555,7 @@ class Agent(BaseModel):
|
|
534
555
|
if agent_knowledge_context:
|
535
556
|
task_prompt += agent_knowledge_context
|
536
557
|
|
537
|
-
if self.
|
558
|
+
if self.with_memory == True:
|
538
559
|
contextual_memory = ContextualMemory(
|
539
560
|
memory_config=self.memory_config, stm=self.short_term_memory, ltm=self.long_term_memory, um=self.user_memory
|
540
561
|
)
|
@@ -553,7 +574,7 @@ class Agent(BaseModel):
|
|
553
574
|
|
554
575
|
try:
|
555
576
|
self._times_executed += 1
|
556
|
-
raw_response = self.
|
577
|
+
raw_response = self._invoke(
|
557
578
|
prompts=task_prompt,
|
558
579
|
response_format=task._structure_response_format(model_provider=self.llm.provider),
|
559
580
|
tools=tools,
|
@@ -563,11 +584,11 @@ class Agent(BaseModel):
|
|
563
584
|
|
564
585
|
except Exception as e:
|
565
586
|
self._times_executed += 1
|
566
|
-
|
587
|
+
Logger().log(level="error", message=f"The agent failed to execute the task. Error: {str(e)}", color="red")
|
567
588
|
raw_response = self.execute_task(task, context, task_tools)
|
568
589
|
|
569
590
|
if self._times_executed > self.max_retry_limit:
|
570
|
-
|
591
|
+
Logger().log(level="error", message=f"Max retry limit has exceeded.", color="red")
|
571
592
|
raise e
|
572
593
|
|
573
594
|
if self.max_rpm and self._rpm_controller:
|
@@ -576,27 +597,5 @@ class Agent(BaseModel):
|
|
576
597
|
return raw_response
|
577
598
|
|
578
599
|
|
579
|
-
def start(self, context: Any = None) -> Any | None:
|
580
|
-
"""
|
581
|
-
Defines and executes a task when it is not given and returns TaskOutput object.
|
582
|
-
"""
|
583
|
-
|
584
|
-
if not self.goal or not self.role:
|
585
|
-
return None
|
586
|
-
|
587
|
-
from versionhq.task.model import Task, ResponseField
|
588
|
-
|
589
|
-
class Output(BaseModel):
|
590
|
-
result: str
|
591
|
-
steps: list[str]
|
592
|
-
|
593
|
-
task = Task(
|
594
|
-
description=f"Generate a simple result in a sentence to achieve the goal: {self.goal}. If needed, list up necessary steps in concise manner.",
|
595
|
-
pydantic_output=Output
|
596
|
-
)
|
597
|
-
res = task.execute(agent=self, context=context)
|
598
|
-
return res
|
599
|
-
|
600
|
-
|
601
600
|
def __repr__(self):
|
602
601
|
return f"Agent(role={self.role}, goal={self.goal}"
|
versionhq/task/model.py
CHANGED
@@ -530,9 +530,9 @@ Ref. Output image: {output_formats_to_follow}
|
|
530
530
|
from versionhq.agent.model import Agent
|
531
531
|
from versionhq.memory.model import ShortTermMemory, MemoryMetadata, LongTermMemory
|
532
532
|
|
533
|
-
agent = agent if isinstance(agent, Agent) else Agent(role=str(agent), goal=str(agent),
|
533
|
+
agent = agent if isinstance(agent, Agent) else Agent(role=str(agent), goal=str(agent), with_memory=True)
|
534
534
|
|
535
|
-
if agent.
|
535
|
+
if agent.with_memory == False:
|
536
536
|
return None
|
537
537
|
|
538
538
|
try:
|
versionhq/task_graph/draft.py
CHANGED
@@ -14,7 +14,7 @@ from versionhq.task_graph.model import TaskGraph, Task, DependencyType, Node
|
|
14
14
|
from versionhq._utils.logger import Logger
|
15
15
|
|
16
16
|
|
17
|
-
def workflow(final_output: Type[BaseModel], context: Any = None, human: bool = False,
|
17
|
+
def workflow(final_output: Type[BaseModel], context: Any = None, human: bool = False, with_memory: bool = False) -> TaskGraph | None:
|
18
18
|
"""
|
19
19
|
Generate a TaskGraph object to generate the givne final_output most resource-efficiently.
|
20
20
|
"""
|
@@ -43,7 +43,7 @@ def workflow(final_output: Type[BaseModel], context: Any = None, human: bool = F
|
|
43
43
|
", ".join([k for k in DependencyType._member_map_.keys()]),
|
44
44
|
],
|
45
45
|
llm="gemini-2.0",
|
46
|
-
|
46
|
+
with_memory=with_memory,
|
47
47
|
maxit=1,
|
48
48
|
max_retry_limit=1,
|
49
49
|
)
|
versionhq/tool/composio_tool.py
CHANGED
@@ -2,14 +2,13 @@ import os
|
|
2
2
|
import uuid
|
3
3
|
from abc import ABC
|
4
4
|
from dotenv import load_dotenv
|
5
|
-
from typing import Any,
|
5
|
+
from typing import Any, Optional, Tuple, Dict
|
6
6
|
from typing_extensions import Self
|
7
7
|
|
8
8
|
from pydantic import BaseModel, Field, model_validator, field_validator, UUID4, PrivateAttr
|
9
9
|
from pydantic_core import PydanticCustomError
|
10
10
|
|
11
11
|
from composio import ComposioToolSet
|
12
|
-
from composio_langchain import action
|
13
12
|
|
14
13
|
from versionhq.tool.composio_tool_vars import ComposioAppName, ComposioAuthScheme, composio_app_set, ComposioStatus, ComposioAction
|
15
14
|
from versionhq.tool.cache_handler import CacheHandler
|
@@ -1,4 +1,4 @@
|
|
1
|
-
versionhq/__init__.py,sha256=
|
1
|
+
versionhq/__init__.py,sha256=X8no15wt1eRD5MvPgotKJer6Zl8fu7CEm2X85NX0VMc,2883
|
2
2
|
versionhq/_utils/__init__.py,sha256=dzoZr4cBlh-2QZuPzTdehPUCe9lP1dmRtauD7qTjUaA,158
|
3
3
|
versionhq/_utils/i18n.py,sha256=TwA_PnYfDLA6VqlUDPuybdV9lgi3Frh_ASsb_X8jJo8,1483
|
4
4
|
versionhq/_utils/logger.py,sha256=zgogTwAY-ujDLrdryAKhdtoaNe1nOFajmEN0V8aMR34,3155
|
@@ -6,8 +6,8 @@ versionhq/_utils/process_config.py,sha256=jbPGXK2Kb4iyCugJ3FwRJuU0wL5Trq2x4xFQz2
|
|
6
6
|
versionhq/_utils/usage_metrics.py,sha256=NXF18dn5NNvGK7EsQ4AAghpR8ppYOjMx6ABenLLHnmM,1066
|
7
7
|
versionhq/_utils/vars.py,sha256=bZ5Dx_bFKlt3hi4-NNGXqdk7B23If_WaTIju2fiTyPQ,57
|
8
8
|
versionhq/agent/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
|
9
|
-
versionhq/agent/inhouse_agents.py,sha256=
|
10
|
-
versionhq/agent/model.py,sha256=
|
9
|
+
versionhq/agent/inhouse_agents.py,sha256=vupO1viYqVb7sKohIE1zThu6JArhh5JLo5LBeSnh0kM,2534
|
10
|
+
versionhq/agent/model.py,sha256=ReuQLuLQZ87TyzMKdQaqfDX496waLascBt2Ebw69QAY,25593
|
11
11
|
versionhq/agent/parser.py,sha256=riG0dkdQCxH7uJ0AbdVdg7WvL0BXhUgJht0VtQvxJBc,4082
|
12
12
|
versionhq/agent/rpm_controller.py,sha256=grezIxyBci_lDlwAlgWFRyR5KOocXeOhYkgN02dNFNE,2360
|
13
13
|
versionhq/agent/TEMPLATES/Backstory.py,sha256=IAhGnnt6VUMe3wO6IzeyZPDNu7XE7Uiu3VEXUreOcKs,532
|
@@ -47,22 +47,22 @@ versionhq/task/evaluate.py,sha256=WdUgjbZL62XrxyWe5MTz29scfzwmuAHGxJ7GvAB8Fmk,39
|
|
47
47
|
versionhq/task/formation.py,sha256=WH604q9bRmWH7KQCrk2qKJwisCopYX5CjJvsj4TgFjI,6894
|
48
48
|
versionhq/task/formatter.py,sha256=N8Kmk9vtrMtBdgJ8J7RmlKNMdZWSmV8O1bDexmCWgU0,643
|
49
49
|
versionhq/task/log_handler.py,sha256=LT7YnO7gcPR9IZS7eRvMjnHh8crMBFtqduxd8dxIbkk,1680
|
50
|
-
versionhq/task/model.py,sha256=
|
50
|
+
versionhq/task/model.py,sha256=i0MDTS9-htIygL7Q8CTOrG8Zt0gPcqQiS4wacmecqAc,28540
|
51
51
|
versionhq/task/structured_response.py,sha256=4q-hQPu7oMMHHXEzh9YW4SJ7N5eCZ7OfZ65juyl_jCI,5000
|
52
52
|
versionhq/task/TEMPLATES/Description.py,sha256=V-4kh8xpQTKOcDMi2xnuP-fcNk6kuoz1_5tYBlDLQWQ,420
|
53
53
|
versionhq/task_graph/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
|
54
54
|
versionhq/task_graph/colors.py,sha256=naJCx4Vho4iuJtbW8USUXb-M5uYvd5ds2p8qbjUfRus,669
|
55
|
-
versionhq/task_graph/draft.py,sha256=
|
55
|
+
versionhq/task_graph/draft.py,sha256=AuQ2X-T5xuQ2ipMiAqeh9Pjm6I2fIf952pBQRYqdaog,5131
|
56
56
|
versionhq/task_graph/model.py,sha256=njyHQyHrVTZP46iVkC6YvuMnGcS40vOy1wszRtf7DHY,23971
|
57
57
|
versionhq/tool/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
|
58
58
|
versionhq/tool/cache_handler.py,sha256=iL8FH7X0G-cdT0uhJwzuhLDaadTXOdfybZcDy151-es,1085
|
59
|
-
versionhq/tool/composio_tool.py,sha256=
|
59
|
+
versionhq/tool/composio_tool.py,sha256=IATfsEnF_1RPJyGtPBmAtEJh5XPcgDHpyG3SUR461Og,8572
|
60
60
|
versionhq/tool/composio_tool_vars.py,sha256=FvBuEXsOQUYnN7RTFxT20kAkiEYkxWKkiVtgpqOzKZQ,1843
|
61
61
|
versionhq/tool/decorator.py,sha256=C4ZM7Xi2gwtEMaSeRo-geo_g_MAkY77WkSLkAuY0AyI,1205
|
62
62
|
versionhq/tool/model.py,sha256=PO4zNWBZcJhYVur381YL1dy6zqurio2jWjtbxOxZMGI,12194
|
63
63
|
versionhq/tool/tool_handler.py,sha256=2m41K8qo5bGCCbwMFferEjT-XZ-mE9F0mDUOBkgivOI,1416
|
64
|
-
versionhq-1.2.1.
|
65
|
-
versionhq-1.2.1.
|
66
|
-
versionhq-1.2.1.
|
67
|
-
versionhq-1.2.1.
|
68
|
-
versionhq-1.2.1.
|
64
|
+
versionhq-1.2.1.16.dist-info/LICENSE,sha256=cRoGGdM73IiDs6nDWKqPlgSv7aR4n-qBXYnJlCMHCeE,1082
|
65
|
+
versionhq-1.2.1.16.dist-info/METADATA,sha256=hAoTlAANrgYU-5ybybIvcQsKn_q7XAQmljyWh6bnw4Y,22033
|
66
|
+
versionhq-1.2.1.16.dist-info/WHEEL,sha256=In9FTNxeP60KnTkGw7wk6mJPYd_dQSjEZmXdBdMCI-8,91
|
67
|
+
versionhq-1.2.1.16.dist-info/top_level.txt,sha256=DClQwxDWqIUGeRJkA8vBlgeNsYZs4_nJWMonzFt5Wj0,10
|
68
|
+
versionhq-1.2.1.16.dist-info/RECORD,,
|
File without changes
|
File without changes
|
File without changes
|