versionhq 1.1.9.14__py3-none-any.whl → 1.1.10.2__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
versionhq/task/model.py CHANGED
@@ -1,5 +1,6 @@
1
1
  import json
2
2
  import threading
3
+ import datetime
3
4
  import uuid
4
5
  from concurrent.futures import Future
5
6
  from hashlib import md5
@@ -18,22 +19,106 @@ from versionhq._utils.logger import Logger
18
19
 
19
20
  class ResponseField(BaseModel):
20
21
  """
21
- Field class to use in the response schema for the JSON response.
22
+ A class to store the response format and schema that will cascade to the LLM.
23
+ The `config` field can store additional params:
24
+ https://community.openai.com/t/official-documentation-for-supported-schemas-for-response-format-parameter-in-calls-to-client-beta-chats-completions-parse/932422/3
22
25
  """
23
26
 
24
- title: str = Field(default=None)
25
- type: Type = Field(default=str)
27
+ title: str = Field(default=None, description="title of the field")
28
+ data_type: Type = Field(default=None)
29
+ items: Optional[Type] = Field(default=None, description="store data type of the array items")
30
+ properties: Optional[List[BaseModel]] = Field(default=None, description="store dict items in ResponseField format")
26
31
  required: bool = Field(default=True)
32
+ nullable: bool = Field(default=False)
33
+ config: Optional[Dict[str, Any]] = Field(default=None, description="additional rules")
27
34
 
28
35
 
29
- def _annotate(self, value: Any) -> Annotated:
36
+ @model_validator(mode="after")
37
+ def validate_instance(self) -> Self:
30
38
  """
31
- Address `create_model`
39
+ Validate the model instance based on the given `data_type`. (An array must have `items`, dict must have properties.)
32
40
  """
33
- return Annotated[self.type, value] if isinstance(value, self.type) else Annotated[str, str(value)]
41
+
42
+ if self.data_type is list and self.items is None:
43
+ self.items = str
44
+
45
+ if self.data_type is dict or (self.data_type is list and self.items is dict):
46
+ if self.properties is None:
47
+ raise PydanticCustomError("missing_properties", "The dict type has to set the properties.", {})
48
+
49
+ else:
50
+ for item in self.properties:
51
+ if not isinstance(item, ResponseField):
52
+ raise PydanticCustomError("invalid_properties", "Properties field must input in ResponseField format.", {})
53
+
54
+ return self
55
+
56
+
57
+ def _format_props(self) -> Dict[str, Any]:
58
+ """
59
+ Structure valid properties. We accept 2 nested objects.
60
+ """
61
+ from versionhq.llm.llm_vars import SchemaType
62
+
63
+ schema_type = SchemaType(type=self.data_type).convert()
64
+ props: Dict[str, Any] = {}
65
+
66
+ if self.data_type is list and self.items is not dict:
67
+ props = {
68
+ "type": schema_type,
69
+ "items": { "type": SchemaType(type=self.items).convert() },
70
+ }
71
+
72
+ elif self.data_type is list and self.items is dict:
73
+ nested_p, nested_r = dict(), list()
74
+
75
+ if self.properties:
76
+ for item in self.properties:
77
+ nested_p.update(**item._format_props())
78
+
79
+ if item.required:
80
+ nested_r.append(item.title)
81
+
82
+ props = {
83
+ "type": schema_type,
84
+ "items": {
85
+ "type": SchemaType(type=self.items).convert(),
86
+ "properties": nested_p,
87
+ "required": nested_r,
88
+ "additionalProperties": False
89
+ }
90
+ }
91
+
92
+ elif self.data_type is dict:
93
+ p, r = dict(), list()
94
+
95
+ if self.properties:
96
+ for item in self.properties:
97
+ p.update(**item._format_props())
98
+
99
+ if item.required:
100
+ r.append(item.title)
101
+
102
+ props = {
103
+ "type": schema_type,
104
+ "properties": p,
105
+ "required": r,
106
+ "additionalProperties": False
107
+ }
108
+
109
+ else:
110
+ props = {
111
+ "type": schema_type,
112
+ "nullable": self.nullable,
113
+ }
114
+
115
+ return { self.title: { **props, **self.config }} if self.config else { self.title: props }
34
116
 
35
117
 
36
118
  def _convert(self, value: Any) -> Any:
119
+ """
120
+ Convert the given value to the ideal data type.
121
+ """
37
122
  try:
38
123
  if self.type is Any:
39
124
  pass
@@ -42,7 +127,9 @@ class ResponseField(BaseModel):
42
127
  elif self.type is float:
43
128
  return float(value)
44
129
  elif self.type is list or self.type is dict:
45
- return json.loads(value)
130
+ return json.loads(eval(str(value)))
131
+ elif self.type is str:
132
+ return str(value)
46
133
  else:
47
134
  return value
48
135
  except:
@@ -50,6 +137,9 @@ class ResponseField(BaseModel):
50
137
 
51
138
 
52
139
  def create_pydantic_model(self, result: Dict, base_model: InstanceOf[BaseModel] | Any) -> Any:
140
+ """
141
+ Create a Pydantic model from the given result
142
+ """
53
143
  for k, v in result.items():
54
144
  if k is not self.title:
55
145
  pass
@@ -61,6 +151,13 @@ class ResponseField(BaseModel):
61
151
  return base_model
62
152
 
63
153
 
154
+ def _annotate(self, value: Any) -> Annotated:
155
+ """
156
+ Address Pydantic's `create_model`
157
+ """
158
+ return Annotated[self.type, value] if isinstance(value, self.type) else Annotated[str, str(value)]
159
+
160
+
64
161
 
65
162
  class TaskOutput(BaseModel):
66
163
  """
@@ -108,14 +205,15 @@ class TaskOutput(BaseModel):
108
205
 
109
206
  class Task(BaseModel):
110
207
  """
111
- Task to be executed by the agent or the team.
112
- Each task must have a description and at least one expected output format either Pydantic, Raw, or JSON, with necessary fields in ResponseField.
113
- Then output will be stored in TaskOutput class.
208
+ Task to be executed by agents or teams.
209
+ Each task must have a description.
210
+ Default response is JSON string that strictly follows `response_fields` - and will be stored in TaskOuput.raw / json_dict.
211
+ When `pydantic_output` is provided, we prioritize them and store raw (json string), json_dict, pydantic in the TaskOutput class.
114
212
  """
115
213
 
116
214
  __hash__ = object.__hash__
215
+ _logger: Logger = PrivateAttr(default_factory=lambda: Logger(verbose=True))
117
216
  _original_description: str = PrivateAttr(default=None)
118
- _logger: Logger = PrivateAttr()
119
217
  _task_output_handler = TaskOutputStorageHandler()
120
218
  config: Optional[Dict[str, Any]] = Field(default=None, description="values to set on Task class")
121
219
 
@@ -124,11 +222,13 @@ class Task(BaseModel):
124
222
  description: str = Field(description="Description of the actual task")
125
223
 
126
224
  # output
127
- expected_output_json: bool = Field(default=True)
128
- expected_output_pydantic: bool = Field(default=False)
129
- output_field_list: List[ResponseField] = Field(
225
+ pydantic_custom_output: Optional[Any] = Field(
226
+ default=None,
227
+ description="store a custom Pydantic class that will be passed to the model as a response format."
228
+ )
229
+ response_fields: List[ResponseField] = Field(
130
230
  default_factory=list,
131
- description="provide output key and data type. this will be cascaded to the agent via task.prompt()"
231
+ description="store the list of ResponseFields to create the response format"
132
232
  )
133
233
  output: Optional[TaskOutput] = Field(default=None, description="store the final task output in TaskOutput class")
134
234
 
@@ -139,7 +239,7 @@ class Task(BaseModel):
139
239
  # tool usage
140
240
  tools: Optional[List[ToolSet | Tool | Any]] = Field(default_factory=list, description="tools that the agent can use aside from their tools")
141
241
  can_use_agent_tools: bool = Field(default=False, description="whether the agent can use their own tools when executing the task")
142
- take_tool_res_as_final: bool = Field(default=False, description="when set True, tools res will be stored in the `TaskOutput`")
242
+ tool_res_as_final: bool = Field(default=False, description="when set True, tools res will be stored in the `TaskOutput`")
143
243
 
144
244
  # execution rules
145
245
  allow_delegation: bool = Field(default=False, description="ask other agents for help and run the task instead")
@@ -151,6 +251,7 @@ class Task(BaseModel):
151
251
  processed_by_agents: Set[str] = Field(default_factory=set, description="store responsible agents' roles")
152
252
  tools_errors: int = 0
153
253
  delegations: int = 0
254
+ execution_span_in_sec: int = 0
154
255
 
155
256
 
156
257
  @model_validator(mode="before")
@@ -178,7 +279,7 @@ class Task(BaseModel):
178
279
  @model_validator(mode="after")
179
280
  def set_attributes_based_on_config(self) -> Self:
180
281
  """
181
- Set attributes based on the agent configuration.
282
+ Set attributes based on the task configuration.
182
283
  """
183
284
 
184
285
  if self.config:
@@ -211,85 +312,187 @@ class Task(BaseModel):
211
312
  return self
212
313
 
213
314
 
214
- def prompt(self, customer: str = None, product_overview: str = None) -> str:
315
+ def _draft_output_prompt(self, model_provider: str) -> str:
316
+ """
317
+ Draft prompts on the output format by converting `
318
+ """
319
+
320
+ output_prompt = ""
321
+
322
+ if self.pydantic_custom_output:
323
+ output_prompt = f"""
324
+ Your response MUST STRICTLY follow the given repsonse format:
325
+ JSON schema: {str({k: v for k, v in self.pydantic_custom_output.__fields__.items()})}
326
+ """
327
+
328
+ elif self.response_fields:
329
+ output_prompt, output_formats_to_follow = "", dict()
330
+ response_format = str(self._structure_response_format(model_provider=model_provider))
331
+ for item in self.response_fields:
332
+ if item:
333
+ output_formats_to_follow[item.title] = f"<Return your answer in {item.data_type.__name__}>"
334
+
335
+ output_prompt = f"""
336
+ Your response MUST be a valid JSON string that strictly follows the response format. Use double quotes for all keys and string values. Do not use single quotes, trailing commas, or any other non-standard JSON syntax.
337
+ Response format: {response_format}
338
+ Ref. Output image: {output_formats_to_follow}
339
+ """
340
+
341
+ else:
342
+ output_prompt = "Return your response as a valid JSON string, enclosed in double quotes. Do not use single quotes, trailing commas, or other non-standard JSON syntax."
343
+
344
+ return output_prompt
345
+
346
+
347
+ def prompt(self, model_provider: str = None, customer: str = None, product_overview: str = None) -> str:
215
348
  """
216
349
  Format the task prompt and cascade it to the agent.
217
350
  When the task has context, add context prompting of all the tasks in the context.
218
351
  When we have cusotmer/product info, add them to the prompt.
219
352
  """
220
353
 
221
- task_slices = [self.description, f"{self.output_prompt}", f"Take the following context into consideration: "]
354
+ output_prompt = self._draft_output_prompt(model_provider=model_provider)
355
+ task_slices = [self.description, output_prompt,]
222
356
 
223
357
  if self.context:
224
358
  context_outputs = "\n".join([task.output.context_prompting() if hasattr(task, "output") else "" for task in self.context])
225
- task_slices.insert(len(task_slices), context_outputs)
359
+ task_slices.insert(len(task_slices), f"Consider the following context when responding: {context_outputs}")
360
+
361
+ if self.prompt_context:
362
+ task_slices.insert(len(task_slices), f"Consider the following context when responding: {self.prompt_context}")
226
363
 
227
364
  if customer:
228
- task_slices.insert(len(task_slices), f"Customer overview: {customer}")
365
+ task_slices.insert(len(task_slices), f"Customer to address: {customer}")
229
366
 
230
367
  if product_overview:
231
- task_slices.insert(len(task_slices), f"Product overview: {product_overview}")
232
-
233
- if self.prompt_context:
234
- task_slices.insert(len(task_slices), self.prompt_context)
368
+ task_slices.insert(len(task_slices), f"Product to promote: {product_overview}")
235
369
 
236
370
  return "\n".join(task_slices)
237
371
 
238
372
 
239
- def _create_json_output(self, raw_result: str) -> Dict[str, Any]:
373
+ def _get_output_format(self) -> TaskOutputFormat:
374
+ if self.output_json == True:
375
+ return TaskOutputFormat.JSON
376
+ if self.output_pydantic == True:
377
+ return TaskOutputFormat.PYDANTIC
378
+ return TaskOutputFormat.RAW
379
+
380
+
381
+ def _structure_response_format(self, data_type: str = "object", model_provider: str = "gemini") -> Dict[str, Any] | None:
240
382
  """
241
- Create json (dict) output from the raw result.
383
+ Create and return a valid response format using
384
+ - mannual response schema from `self.response_fields`, or
385
+ - SDK objects from `pydantic_custom_output`.
386
+ OpenAI:
387
+ https://platform.openai.com/docs/guides/structured-outputs?context=ex1#function-calling-vs-response-format
388
+ https://platform.openai.com/docs/guides/structured-outputs?context=with_parse#some-type-specific-keywords-are-not-yet-supported
389
+ Gemini:
242
390
  """
243
- import ast
244
391
 
245
- output_json_dict: Dict[str, Any] = dict()
392
+ response_schema = None
393
+
394
+ if self.response_fields:
395
+ properties, required_fields = {}, []
396
+ for i, item in enumerate(self.response_fields):
397
+ if item:
398
+ if item.data_type is dict:
399
+ properties.update(item._format_props())
400
+ else:
401
+ properties.update(item._format_props())
402
+
403
+ required_fields.append(item.title)
404
+
405
+ response_schema = {
406
+ "type": "object",
407
+ "properties": properties,
408
+ "required": required_fields,
409
+ "additionalProperties": False, # for openai
410
+ }
411
+
412
+
413
+ elif self.pydantic_custom_output:
414
+ response_schema = {
415
+ **self.pydantic_custom_output.model_json_schema(),
416
+ "additionalProperties": False,
417
+ "required": [k for k, v in self.pydantic_custom_output.__fields__.items()],
418
+ "strict": True,
419
+ }
420
+
421
+
422
+ if response_schema:
423
+ if model_provider == "gemini":
424
+ return {
425
+ "type": data_type,
426
+ "response_schema": response_schema,
427
+ "enforce_validation": True
428
+ }
429
+
430
+ if model_provider == "openai":
431
+ if self.pydantic_custom_output:
432
+ return self.pydantic_custom_output
433
+ else:
434
+ return {
435
+ "type": "json_schema",
436
+ "json_schema": { "name": "outcome", "strict": True, "schema": response_schema },
437
+ }
438
+ else:
439
+ return None
440
+
441
+
442
+ def _create_json_output(self, raw: str) -> Dict[str, Any]:
443
+ """
444
+ Create json (dict) output from the raw output and `response_fields` information.
445
+ """
446
+
447
+ if raw is None or raw == "":
448
+ self._logger.log(level="error", message="The model returned an empty response. Returning an empty dict.", color="yellow")
449
+ output = { "output": "n.a." }
450
+ return output
246
451
 
247
452
  try:
248
- raw_result = raw_result.replace("{'", '{"').replace("{ '", '{"').replace("': '", '": "').replace("'}", '"}').replace("' }", '"}').replace("', '", '", "').replace("['", '["').replace("[ '", '[ "').replace("']", '"]').replace("' ]", '" ]').replace("{\n'", '{"').replace("{\'", '{"')
249
- r = json.dumps(eval(str(raw_result)))
250
- output_json_dict = json.loads(r)
453
+ r = str(raw).replace("true", "True").replace("false", "False")
454
+ j = json.dumps(eval(r))
455
+ output = json.loads(j)
456
+ if isinstance(output, dict):
457
+ return output
251
458
 
252
- if isinstance(output_json_dict, str):
253
- output_json_dict = ast.literal_eval(r)
459
+ else:
460
+ r = str(raw).replace("{'", '{"').replace("{ '", '{"').replace("': '", '": "').replace("'}", '"}').replace("' }", '"}').replace("', '", '", "').replace("['", '["').replace("[ '", '[ "').replace("']", '"]').replace("' ]", '" ]').replace("{\n'", '{"').replace("{\'", '{"').replace("true", "True").replace("false", "False")
461
+ j = json.dumps(eval(r))
462
+ output = json.loads(j)
463
+
464
+ if isinstance(output, dict):
465
+ return output
466
+
467
+ else:
468
+ import ast
469
+ output = ast.literal_eval(r)
470
+ return output if isinstance(output, dict) else { "output": str(r) }
254
471
 
255
472
  except:
256
- output_json_dict = { "output": raw_result }
473
+ output = { "output": str(raw) }
474
+ return output
257
475
 
258
- return output_json_dict
259
476
 
260
477
 
261
- def _create_pydantic_output(self, output_json_dict: Dict[str, Any], raw_result: Any = None) -> Optional[Any]:
478
+ def _create_pydantic_output(self, raw: str = None, json_dict: Dict[str, Any] = None) -> InstanceOf[BaseModel]:
262
479
  """
263
480
  Create pydantic output from the `raw` result.
264
481
  """
265
482
 
266
483
  output_pydantic = None
267
- if isinstance(raw_result, BaseModel):
268
- output_pydantic = raw_result
484
+ json_dict = json_dict
269
485
 
270
- elif hasattr(output_json_dict, "output"):
271
- output_pydantic = create_model("PydanticTaskOutput", output=output_json_dict["output"], __base__=BaseModel)
272
-
273
- else:
274
- output_pydantic = create_model("PydanticTaskOutput", __base__=BaseModel)
275
- try:
276
- for item in self.output_field_list:
277
- value = output_json_dict[item.title] if hasattr(output_json_dict, item.title) else None
278
- if value and type(value) is not item.type:
279
- value = item._convert(value)
280
- setattr(output_pydantic, item.title, value)
281
- except:
282
- setattr(output_pydantic, "output", output_json_dict)
486
+ try:
487
+ if not json_dict:
488
+ json_dict = self._create_json_output(raw=raw)
283
489
 
284
- return output_pydantic
490
+ output_pydantic = self.pydantic_custom_output(**json_dict)
285
491
 
492
+ except:
493
+ pass
286
494
 
287
- def _get_output_format(self) -> TaskOutputFormat:
288
- if self.output_json == True:
289
- return TaskOutputFormat.JSON
290
- if self.output_pydantic == True:
291
- return TaskOutputFormat.PYDANTIC
292
- return TaskOutputFormat.RAW
495
+ return output_pydantic
293
496
 
294
497
 
295
498
  def interpolate_inputs(self, inputs: Dict[str, Any]) -> None:
@@ -336,15 +539,23 @@ class Task(BaseModel):
336
539
 
337
540
  def _execute_core(self, agent, context: Optional[str]) -> TaskOutput:
338
541
  """
339
- Run the core execution logic of the task.
340
- To speed up the process, when the format is not expected to return, we will skip the conversion process.
341
- When the task is allowed to delegate to another agent, we will select a responsible one in order of manager > peer_agent > anoymous agent.
542
+ Execute the given task with the given agent.
543
+ Handle 1. agent delegation, 2. tools, 3. context to consider, and 4. callbacks
342
544
  """
545
+
343
546
  from versionhq.agent.model import Agent
344
547
  from versionhq.team.model import Team
345
548
 
346
549
  self.prompt_context = context
347
550
  task_output: InstanceOf[TaskOutput] = None
551
+ tool_output: str | list = None
552
+ task_tools: List[InstanceOf[Tool] | InstanceOf[ToolSet] | Type[Tool]] = []
553
+ started_at = datetime.datetime.now()
554
+
555
+ if self.tools:
556
+ for item in self.tools:
557
+ if isinstance(item, ToolSet) or isinstance(item, Tool) or type(item) == Tool:
558
+ task_tools.append(item)
348
559
 
349
560
  if self.allow_delegation:
350
561
  agent_to_delegate = None
@@ -363,38 +574,32 @@ class Task(BaseModel):
363
574
  agent = agent_to_delegate
364
575
  self.delegations += 1
365
576
 
366
- if self.take_tool_res_as_final == True:
367
- output = agent.execute_task(task=self, context=context)
368
- task_output = TaskOutput(task_id=self.id, tool_output=output)
369
577
 
370
- else:
371
- output_raw, output_json_dict, output_pydantic = agent.execute_task(task=self, context=context), None, None
578
+ if self.tool_res_as_final == True:
579
+ tool_output = agent.execute_task(task=self, context=context, task_tools=task_tools)
580
+ task_output = TaskOutput(task_id=self.id, tool_output=tool_output)
372
581
 
373
- if self.expected_output_json == True:
374
- output_json_dict = self._create_json_output(raw_result=output_raw)
582
+ else:
583
+ raw_output = agent.execute_task(task=self, context=context, task_tools=task_tools)
584
+ json_dict_output = self._create_json_output(raw=raw_output)
585
+ if "outcome" in json_dict_output:
586
+ json_dict_output = self._create_json_output(raw=str(json_dict_output["outcome"]))
375
587
 
376
- if self.expected_output_pydantic == True:
377
- output_pydantic = self._create_pydantic_output(output_json_dict=output_json_dict)
588
+ pydantic_output = self._create_pydantic_output(raw=raw_output, json_dict=json_dict_output) if self.pydantic_custom_output else None
378
589
 
379
590
  task_output = TaskOutput(
380
591
  task_id=self.id,
381
- raw=output_raw,
382
- pydantic=output_pydantic,
383
- json_dict=output_json_dict
592
+ raw=raw_output,
593
+ pydantic=pydantic_output,
594
+ json_dict=json_dict_output
384
595
  )
385
596
 
386
597
  self.output = task_output
387
598
  self.processed_by_agents.add(agent.role)
388
599
 
389
- # self._set_end_execution_time(start_time)
390
-
391
600
  if self.callback:
392
601
  self.callback({ **self.callback_kwargs, **self.output.__dict__ })
393
602
 
394
- # if self._execution_span:
395
- # # self._telemetry.task_ended(self._execution_span, self, agent.team)
396
- # self._execution_span = None
397
-
398
603
  # if self.output_file:
399
604
  # content = (
400
605
  # json_output
@@ -402,6 +607,8 @@ class Task(BaseModel):
402
607
  # else pydantic_output.model_dump_json() if pydantic_output else result
403
608
  # )
404
609
  # self._save_file(content)
610
+ ended_at = datetime.datetime.now()
611
+ self.execution_span_in_sec = (ended_at - started_at).total_seconds()
405
612
 
406
613
  return task_output
407
614
 
@@ -414,48 +621,9 @@ class Task(BaseModel):
414
621
  self._task_output_handler.update(task=self, task_index=task_index, was_replayed=was_replayed, inputs=inputs)
415
622
 
416
623
 
417
- @property
418
- def output_prompt(self) -> str:
419
- """
420
- Draft prompts on the output format by converting `output_field_list` to dictionary.
421
- """
422
-
423
- output_prompt, output_formats_to_follow = "", dict()
424
- for item in self.output_field_list:
425
- output_formats_to_follow[item.title] = f"<Return your answer in {item.type.__name__}>"
426
-
427
- output_prompt = f"""
428
- Output only valid JSON conforming to the specified format. Use double quotes for all keys and string values. Do not use single quotes, trailing commas, or other non-standard JSON syntax.
429
- Specified format: {output_formats_to_follow}
430
- """
431
- return output_prompt
432
-
433
-
434
- @property
435
- def expected_output_formats(self) -> List[str | TaskOutputFormat]:
436
- """
437
- Return output formats in list with the ENUM item.
438
- `TaskOutputFormat.RAW` is set as default.
439
- """
440
- outputs = [TaskOutputFormat.RAW.value,]
441
- if self.expected_output_json:
442
- outputs.append(TaskOutputFormat.JSON.value)
443
- if self.expected_output_pydantic:
444
- outputs.append(TaskOutputFormat.PYDANTIC.value)
445
- return outputs
446
-
447
-
448
624
  @property
449
625
  def key(self) -> str:
450
- output_format = (
451
- TaskOutputFormat.JSON
452
- if self.expected_output_json == True
453
- else (
454
- TaskOutputFormat.PYDANTIC
455
- if self.expected_output_pydantic == True
456
- else TaskOutputFormat.RAW
457
- )
458
- )
626
+ output_format = TaskOutputFormat.JSON if self.response_fields else TaskOutputFormat.PYDANTIC if self.pydantic_custom_output is not None else TaskOutputFormat.RAW
459
627
  source = [self.description, output_format]
460
628
  return md5("|".join(source).encode(), usedforsecurity=False).hexdigest()
461
629
 
@@ -463,15 +631,13 @@ Specified format: {output_formats_to_follow}
463
631
  @property
464
632
  def summary(self) -> str:
465
633
  return f"""
466
- Task ID: {str(self.id)}
467
- "Description": {self.description}
468
- "Prompt": {self.output_prompt}
469
- "Tools": {", ".join([tool.name for tool in self.tools])}
634
+ Task ID: {str(self.id)}
635
+ "Description": {self.description}
636
+ "Tools": {", ".join([tool.name for tool in self.tools])}
470
637
  """
471
638
 
472
639
 
473
640
 
474
-
475
641
  class ConditionalTask(Task):
476
642
  """
477
643
  A task that can be conditionally executed based on the output of another task.
versionhq/team/model.py CHANGED
@@ -435,11 +435,8 @@ class Team(BaseModel):
435
435
  if not agent.function_calling_llm and self.function_calling_llm:
436
436
  agent.function_calling_llm = self.function_calling_llm
437
437
 
438
- # if agent.allow_code_execution:
439
- # agent.tools += agent.get_code_execution_tools()
440
-
441
- if not agent.step_callback and self.step_callback:
442
- agent.step_callback = self.step_callback
438
+ if self.step_callback:
439
+ agent.callbacks.append(self.step_callback)
443
440
 
444
441
  if self.process is None:
445
442
  self.process = TaskHandlingProcess.sequential
@@ -1,6 +1,6 @@
1
1
  import os
2
2
  from dotenv import load_dotenv
3
- from typing import Any, List, Optional
3
+ from typing import Any, List, Optional, Dict
4
4
  from pydantic import BaseModel, Field
5
5
 
6
6
  load_dotenv(override=True)
@@ -42,10 +42,9 @@ class TeamPlanner:
42
42
  Based on the following task summary, draft a AI agent's role and goal in concise manner.
43
43
  Task summary: {unassgined_task.summary}
44
44
  """,
45
- expected_output_json=True,
46
- output_field_list=[
47
- ResponseField(title="goal", type=str, required=True),
48
- ResponseField(title="role", type=str, required=True),
45
+ response_fields=[
46
+ ResponseField(title="goal", data_type=str, required=True),
47
+ ResponseField(title="role", data_type=str, required=True),
49
48
  ],
50
49
  )
51
50
  res = task.execute_sync(agent=agent_creator)
@@ -67,7 +66,7 @@ class TeamPlanner:
67
66
  """
68
67
 
69
68
  from versionhq.agent.model import Agent
70
- from versionhq.task.model import Task, ResponseField
69
+ from versionhq.task.model import Task
71
70
 
72
71
  team_planner = Agent(
73
72
  role="team planner",
@@ -76,18 +75,18 @@ class TeamPlanner:
76
75
  )
77
76
 
78
77
  task_summary_list = [task.summary for task in self.tasks]
78
+
79
+ class TeamPlanIdea(BaseModel):
80
+ plan: str | Dict[str, Any] = Field(default=None, description="a decriptive plan to be executed by the team")
81
+
82
+
79
83
  task = Task(
80
84
  description=f"""
81
85
  Based on the following task summaries, create the most descriptive plan that the team can execute most efficiently. Take all the task summaries - task's description and tools available - into consideration. Your answer only contains a dictionary.
82
86
 
83
87
  Task summaries: {" ".join(task_summary_list)}
84
- """,
85
- expected_output_json=False,
86
- expected_output_pydantic=True,
87
- output_field_list=[
88
- ResponseField(title="task", type=str, required=True)
89
- for task in self.tasks
90
- ],
88
+ """,
89
+ pydantic_custom_output=TeamPlanIdea
91
90
  )
92
91
  output = task.execute_sync(agent=team_planner, context=context, tools=tools)
93
92
  return output