versionhq 1.1.9.14__py3-none-any.whl → 1.1.10.3__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
versionhq/task/model.py CHANGED
@@ -1,12 +1,13 @@
1
1
  import json
2
2
  import threading
3
+ import datetime
3
4
  import uuid
4
5
  from concurrent.futures import Future
5
6
  from hashlib import md5
6
- from typing import Any, Dict, List, Set, Optional, Tuple, Callable, Type
7
+ from typing import Any, Dict, List, Set, Optional, Tuple, Callable, Type, TypeVar
7
8
  from typing_extensions import Annotated, Self
8
9
 
9
- from pydantic import UUID4, BaseModel, Field, PrivateAttr, field_validator, model_validator, create_model, InstanceOf
10
+ from pydantic import UUID4, BaseModel, Field, PrivateAttr, field_validator, model_validator, create_model, InstanceOf, field_validator
10
11
  from pydantic_core import PydanticCustomError
11
12
 
12
13
  from versionhq._utils.process_config import process_config
@@ -18,22 +19,106 @@ from versionhq._utils.logger import Logger
18
19
 
19
20
  class ResponseField(BaseModel):
20
21
  """
21
- Field class to use in the response schema for the JSON response.
22
+ A class to store the response format and schema that will cascade to the LLM.
23
+ The `config` field can store additional params:
24
+ https://community.openai.com/t/official-documentation-for-supported-schemas-for-response-format-parameter-in-calls-to-client-beta-chats-completions-parse/932422/3
22
25
  """
23
26
 
24
- title: str = Field(default=None)
25
- type: Type = Field(default=str)
27
+ title: str = Field(default=None, description="title of the field")
28
+ data_type: Type = Field(default=None)
29
+ items: Optional[Type] = Field(default=None, description="store data type of the array items")
30
+ properties: Optional[List[BaseModel]] = Field(default=None, description="store dict items in ResponseField format")
26
31
  required: bool = Field(default=True)
32
+ nullable: bool = Field(default=False)
33
+ config: Optional[Dict[str, Any]] = Field(default=None, description="additional rules")
27
34
 
28
35
 
29
- def _annotate(self, value: Any) -> Annotated:
36
+ @model_validator(mode="after")
37
+ def validate_instance(self) -> Self:
30
38
  """
31
- Address `create_model`
39
+ Validate the model instance based on the given `data_type`. (An array must have `items`, dict must have properties.)
32
40
  """
33
- return Annotated[self.type, value] if isinstance(value, self.type) else Annotated[str, str(value)]
41
+
42
+ if self.data_type is list and self.items is None:
43
+ self.items = str
44
+
45
+ if self.data_type is dict or (self.data_type is list and self.items is dict):
46
+ if self.properties is None:
47
+ raise PydanticCustomError("missing_properties", "The dict type has to set the properties.", {})
48
+
49
+ else:
50
+ for item in self.properties:
51
+ if not isinstance(item, ResponseField):
52
+ raise PydanticCustomError("invalid_properties", "Properties field must input in ResponseField format.", {})
53
+
54
+ return self
55
+
56
+
57
+ def _format_props(self) -> Dict[str, Any]:
58
+ """
59
+ Structure valid properties. We accept 2 nested objects.
60
+ """
61
+ from versionhq.llm.llm_vars import SchemaType
62
+
63
+ schema_type = SchemaType(type=self.data_type).convert()
64
+ props: Dict[str, Any] = {}
65
+
66
+ if self.data_type is list and self.items is not dict:
67
+ props = {
68
+ "type": schema_type,
69
+ "items": { "type": SchemaType(type=self.items).convert() },
70
+ }
71
+
72
+ elif self.data_type is list and self.items is dict:
73
+ nested_p, nested_r = dict(), list()
74
+
75
+ if self.properties:
76
+ for item in self.properties:
77
+ nested_p.update(**item._format_props())
78
+
79
+ if item.required:
80
+ nested_r.append(item.title)
81
+
82
+ props = {
83
+ "type": schema_type,
84
+ "items": {
85
+ "type": SchemaType(type=self.items).convert(),
86
+ "properties": nested_p,
87
+ "required": nested_r,
88
+ "additionalProperties": False
89
+ }
90
+ }
91
+
92
+ elif self.data_type is dict:
93
+ p, r = dict(), list()
94
+
95
+ if self.properties:
96
+ for item in self.properties:
97
+ p.update(**item._format_props())
98
+
99
+ # if item.required:
100
+ r.append(item.title)
101
+
102
+ props = {
103
+ "type": schema_type,
104
+ "properties": p,
105
+ "required": r,
106
+ "additionalProperties": False
107
+ }
108
+
109
+ else:
110
+ props = {
111
+ "type": schema_type,
112
+ "nullable": self.nullable,
113
+ }
114
+
115
+ return { self.title: { **props, **self.config }} if self.config else { self.title: props }
34
116
 
35
117
 
36
118
  def _convert(self, value: Any) -> Any:
119
+ """
120
+ Convert the given value to the ideal data type.
121
+ """
37
122
  try:
38
123
  if self.type is Any:
39
124
  pass
@@ -42,7 +127,9 @@ class ResponseField(BaseModel):
42
127
  elif self.type is float:
43
128
  return float(value)
44
129
  elif self.type is list or self.type is dict:
45
- return json.loads(value)
130
+ return json.loads(eval(str(value)))
131
+ elif self.type is str:
132
+ return str(value)
46
133
  else:
47
134
  return value
48
135
  except:
@@ -50,6 +137,9 @@ class ResponseField(BaseModel):
50
137
 
51
138
 
52
139
  def create_pydantic_model(self, result: Dict, base_model: InstanceOf[BaseModel] | Any) -> Any:
140
+ """
141
+ Create a Pydantic model from the given result
142
+ """
53
143
  for k, v in result.items():
54
144
  if k is not self.title:
55
145
  pass
@@ -61,17 +151,23 @@ class ResponseField(BaseModel):
61
151
  return base_model
62
152
 
63
153
 
154
+ def _annotate(self, value: Any) -> Annotated:
155
+ """
156
+ Address Pydantic's `create_model`
157
+ """
158
+ return Annotated[self.type, value] if isinstance(value, self.type) else Annotated[str, str(value)]
159
+
160
+
64
161
 
65
162
  class TaskOutput(BaseModel):
66
163
  """
67
- Store the final output of the task in TaskOutput class.
68
- Depending on the task output format, use `raw`, `pydantic`, `json_dict` accordingly.
164
+ A class to store the final output of the given task in raw (string), json_dict, and pydantic class formats.
69
165
  """
70
166
 
71
167
  task_id: UUID4 = Field(default_factory=uuid.uuid4, frozen=True, description="store Task ID")
72
168
  raw: str = Field(default="", description="Raw output of the task")
73
169
  json_dict: Dict[str, Any] = Field(default=None, description="`raw` converted to dictionary")
74
- pydantic: Optional[Any] = Field(default=None, description="`raw` converted to the abs. pydantic model")
170
+ pydantic: Optional[Any] = Field(default=None)
75
171
  tool_output: Optional[Any] = Field(default=None, description="store tool result when the task takes tool output as its final output")
76
172
 
77
173
  def __str__(self) -> str:
@@ -108,14 +204,15 @@ class TaskOutput(BaseModel):
108
204
 
109
205
  class Task(BaseModel):
110
206
  """
111
- Task to be executed by the agent or the team.
112
- Each task must have a description and at least one expected output format either Pydantic, Raw, or JSON, with necessary fields in ResponseField.
113
- Then output will be stored in TaskOutput class.
207
+ Task to be executed by agents or teams.
208
+ Each task must have a description.
209
+ Default response is JSON string that strictly follows `response_fields` - and will be stored in TaskOuput.raw / json_dict.
210
+ When `pydantic_output` is provided, we prioritize them and store raw (json string), json_dict, pydantic in the TaskOutput class.
114
211
  """
115
212
 
116
213
  __hash__ = object.__hash__
214
+ _logger: Logger = PrivateAttr(default_factory=lambda: Logger(verbose=True))
117
215
  _original_description: str = PrivateAttr(default=None)
118
- _logger: Logger = PrivateAttr()
119
216
  _task_output_handler = TaskOutputStorageHandler()
120
217
  config: Optional[Dict[str, Any]] = Field(default=None, description="values to set on Task class")
121
218
 
@@ -124,11 +221,13 @@ class Task(BaseModel):
124
221
  description: str = Field(description="Description of the actual task")
125
222
 
126
223
  # output
127
- expected_output_json: bool = Field(default=True)
128
- expected_output_pydantic: bool = Field(default=False)
129
- output_field_list: List[ResponseField] = Field(
224
+ pydantic_custom_output: Optional[Any] = Field(
225
+ default=None,
226
+ description="store a custom Pydantic class that will be passed to the model as a response format."
227
+ )
228
+ response_fields: List[ResponseField] = Field(
130
229
  default_factory=list,
131
- description="provide output key and data type. this will be cascaded to the agent via task.prompt()"
230
+ description="store the list of ResponseFields to create the response format"
132
231
  )
133
232
  output: Optional[TaskOutput] = Field(default=None, description="store the final task output in TaskOutput class")
134
233
 
@@ -139,7 +238,7 @@ class Task(BaseModel):
139
238
  # tool usage
140
239
  tools: Optional[List[ToolSet | Tool | Any]] = Field(default_factory=list, description="tools that the agent can use aside from their tools")
141
240
  can_use_agent_tools: bool = Field(default=False, description="whether the agent can use their own tools when executing the task")
142
- take_tool_res_as_final: bool = Field(default=False, description="when set True, tools res will be stored in the `TaskOutput`")
241
+ tool_res_as_final: bool = Field(default=False, description="when set True, tools res will be stored in the `TaskOutput`")
143
242
 
144
243
  # execution rules
145
244
  allow_delegation: bool = Field(default=False, description="ask other agents for help and run the task instead")
@@ -151,11 +250,12 @@ class Task(BaseModel):
151
250
  processed_by_agents: Set[str] = Field(default_factory=set, description="store responsible agents' roles")
152
251
  tools_errors: int = 0
153
252
  delegations: int = 0
253
+ execution_span_in_sec: int = 0
154
254
 
155
255
 
156
256
  @model_validator(mode="before")
157
257
  @classmethod
158
- def process_model_config(cls, values: Dict[str, Any]) -> None:
258
+ def process_config(cls, values: Dict[str, Any]) -> None:
159
259
  return process_config(values_to_update=values, model_class=cls)
160
260
 
161
261
 
@@ -175,16 +275,16 @@ class Task(BaseModel):
175
275
  return self
176
276
 
177
277
 
178
- @model_validator(mode="after")
179
- def set_attributes_based_on_config(self) -> Self:
180
- """
181
- Set attributes based on the agent configuration.
182
- """
278
+ # @model_validator(mode="after")
279
+ # def set_attributes_based_on_config(self) -> Self:
280
+ # """
281
+ # Set attributes based on the task configuration.
282
+ # """
183
283
 
184
- if self.config:
185
- for key, value in self.config.items():
186
- setattr(self, key, value)
187
- return self
284
+ # if self.config:
285
+ # for key, value in self.config.items():
286
+ # setattr(self, key, value)
287
+ # return self
188
288
 
189
289
 
190
290
  @model_validator(mode="after")
@@ -211,85 +311,165 @@ class Task(BaseModel):
211
311
  return self
212
312
 
213
313
 
214
- def prompt(self, customer: str = None, product_overview: str = None) -> str:
314
+ def _draft_output_prompt(self, model_provider: str) -> str:
315
+ """
316
+ Draft prompts on the output format by converting `
317
+ """
318
+
319
+ output_prompt = ""
320
+
321
+ if self.pydantic_custom_output:
322
+ output_prompt = f"""
323
+ Your response MUST STRICTLY follow the given repsonse format:
324
+ JSON schema: {str(self.pydantic_custom_output)}
325
+ """
326
+
327
+ elif self.response_fields:
328
+ output_prompt, output_formats_to_follow = "", dict()
329
+ response_format = str(self._structure_response_format(model_provider=model_provider))
330
+ for item in self.response_fields:
331
+ if item:
332
+ output_formats_to_follow[item.title] = f"<Return your answer in {item.data_type.__name__}>"
333
+
334
+ output_prompt = f"""
335
+ Your response MUST be a valid JSON string that strictly follows the response format. Use double quotes for all keys and string values. Do not use single quotes, trailing commas, or any other non-standard JSON syntax.
336
+ Response format: {response_format}
337
+ Ref. Output image: {output_formats_to_follow}
338
+ """
339
+
340
+ else:
341
+ output_prompt = "Return your response as a valid JSON string, enclosed in double quotes. Do not use single quotes, trailing commas, or other non-standard JSON syntax."
342
+
343
+ return output_prompt
344
+
345
+
346
+ def prompt(self, model_provider: str = None, customer: str = None, product_overview: str = None) -> str:
215
347
  """
216
348
  Format the task prompt and cascade it to the agent.
217
349
  When the task has context, add context prompting of all the tasks in the context.
218
350
  When we have cusotmer/product info, add them to the prompt.
219
351
  """
220
352
 
221
- task_slices = [self.description, f"{self.output_prompt}", f"Take the following context into consideration: "]
353
+ output_prompt = self._draft_output_prompt(model_provider=model_provider)
354
+ task_slices = [self.description, output_prompt,]
222
355
 
223
356
  if self.context:
224
357
  context_outputs = "\n".join([task.output.context_prompting() if hasattr(task, "output") else "" for task in self.context])
225
- task_slices.insert(len(task_slices), context_outputs)
358
+ task_slices.insert(len(task_slices), f"Consider the following context when responding: {context_outputs}")
359
+
360
+ if self.prompt_context:
361
+ task_slices.insert(len(task_slices), f"Consider the following context when responding: {self.prompt_context}")
226
362
 
227
363
  if customer:
228
- task_slices.insert(len(task_slices), f"Customer overview: {customer}")
364
+ task_slices.insert(len(task_slices), f"Customer to address: {customer}")
229
365
 
230
366
  if product_overview:
231
- task_slices.insert(len(task_slices), f"Product overview: {product_overview}")
232
-
233
- if self.prompt_context:
234
- task_slices.insert(len(task_slices), self.prompt_context)
367
+ task_slices.insert(len(task_slices), f"Product to promote: {product_overview}")
235
368
 
236
369
  return "\n".join(task_slices)
237
370
 
238
371
 
239
- def _create_json_output(self, raw_result: str) -> Dict[str, Any]:
372
+ def _get_output_format(self) -> TaskOutputFormat:
373
+ if self.output_json == True:
374
+ return TaskOutputFormat.JSON
375
+ if self.output_pydantic == True:
376
+ return TaskOutputFormat.PYDANTIC
377
+ return TaskOutputFormat.RAW
378
+
379
+
380
+ def _structure_response_format(self, data_type: str = "object", model_provider: str = "gemini") -> Dict[str, Any] | None:
381
+ """
382
+ Structure a response format either from`response_fields` or `pydantic_custom_output`.
383
+ 1 nested item is accepted.
384
+ """
385
+
386
+ from versionhq.task.structured_response import StructuredOutput
387
+
388
+ response_format: Dict[str, Any] = None
389
+
390
+ if self.response_fields:
391
+ properties, required_fields = {}, []
392
+ for i, item in enumerate(self.response_fields):
393
+ if item:
394
+ if item.data_type is dict:
395
+ properties.update(item._format_props())
396
+ else:
397
+ properties.update(item._format_props())
398
+
399
+ required_fields.append(item.title)
400
+
401
+ response_schema = {
402
+ "type": "object",
403
+ "properties": properties,
404
+ "required": required_fields,
405
+ "additionalProperties": False,
406
+ }
407
+
408
+ response_format = {
409
+ "type": "json_schema",
410
+ "json_schema": { "name": "outcome", "schema": response_schema }
411
+ }
412
+
413
+
414
+ elif self.pydantic_custom_output:
415
+ response_format = StructuredOutput(response_format=self.pydantic_custom_output)._format()
416
+
417
+ return response_format
418
+
419
+
420
+ def _create_json_output(self, raw: str) -> Dict[str, Any]:
240
421
  """
241
- Create json (dict) output from the raw result.
422
+ Create json (dict) output from the raw output and `response_fields` information.
242
423
  """
243
- import ast
244
424
 
245
- output_json_dict: Dict[str, Any] = dict()
425
+ if raw is None or raw == "":
426
+ self._logger.log(level="error", message="The model returned an empty response. Returning an empty dict.", color="yellow")
427
+ output = { "output": "n.a." }
428
+ return output
246
429
 
247
430
  try:
248
- raw_result = raw_result.replace("{'", '{"').replace("{ '", '{"').replace("': '", '": "').replace("'}", '"}').replace("' }", '"}').replace("', '", '", "').replace("['", '["').replace("[ '", '[ "').replace("']", '"]').replace("' ]", '" ]').replace("{\n'", '{"').replace("{\'", '{"')
249
- r = json.dumps(eval(str(raw_result)))
250
- output_json_dict = json.loads(r)
431
+ r = str(raw).replace("true", "True").replace("false", "False")
432
+ j = json.dumps(eval(r))
433
+ output = json.loads(j)
434
+ if isinstance(output, dict):
435
+ return output
251
436
 
252
- if isinstance(output_json_dict, str):
253
- output_json_dict = ast.literal_eval(r)
437
+ else:
438
+ r = str(raw).replace("{'", '{"').replace("{ '", '{"').replace("': '", '": "').replace("'}", '"}').replace("' }", '"}').replace("', '", '", "').replace("['", '["').replace("[ '", '[ "').replace("']", '"]').replace("' ]", '" ]').replace("{\n'", '{"').replace("{\'", '{"').replace("true", "True").replace("false", "False")
439
+ j = json.dumps(eval(r))
440
+ output = json.loads(j)
441
+
442
+ if isinstance(output, dict):
443
+ return output
444
+
445
+ else:
446
+ import ast
447
+ output = ast.literal_eval(r)
448
+ return output if isinstance(output, dict) else { "output": str(r) }
254
449
 
255
450
  except:
256
- output_json_dict = { "output": raw_result }
451
+ output = { "output": str(raw) }
452
+ return output
257
453
 
258
- return output_json_dict
259
454
 
260
455
 
261
- def _create_pydantic_output(self, output_json_dict: Dict[str, Any], raw_result: Any = None) -> Optional[Any]:
456
+ def _create_pydantic_output(self, raw: str = None, json_dict: Dict[str, Any] = None) -> InstanceOf[BaseModel]:
262
457
  """
263
- Create pydantic output from the `raw` result.
458
+ Create pydantic output from raw or json_dict output.
264
459
  """
265
460
 
266
- output_pydantic = None
267
- if isinstance(raw_result, BaseModel):
268
- output_pydantic = raw_result
461
+ output_pydantic = self.pydantic_custom_output
269
462
 
270
- elif hasattr(output_json_dict, "output"):
271
- output_pydantic = create_model("PydanticTaskOutput", output=output_json_dict["output"], __base__=BaseModel)
272
-
273
- else:
274
- output_pydantic = create_model("PydanticTaskOutput", __base__=BaseModel)
275
- try:
276
- for item in self.output_field_list:
277
- value = output_json_dict[item.title] if hasattr(output_json_dict, item.title) else None
278
- if value and type(value) is not item.type:
279
- value = item._convert(value)
280
- setattr(output_pydantic, item.title, value)
281
- except:
282
- setattr(output_pydantic, "output", output_json_dict)
463
+ try:
464
+ json_dict = json_dict if json_dict else self._create_json_output(raw=raw)
283
465
 
284
- return output_pydantic
466
+ for k, v in json_dict.items():
467
+ setattr(output_pydantic, k, v)
285
468
 
469
+ except:
470
+ pass
286
471
 
287
- def _get_output_format(self) -> TaskOutputFormat:
288
- if self.output_json == True:
289
- return TaskOutputFormat.JSON
290
- if self.output_pydantic == True:
291
- return TaskOutputFormat.PYDANTIC
292
- return TaskOutputFormat.RAW
472
+ return output_pydantic
293
473
 
294
474
 
295
475
  def interpolate_inputs(self, inputs: Dict[str, Any]) -> None:
@@ -336,15 +516,23 @@ class Task(BaseModel):
336
516
 
337
517
  def _execute_core(self, agent, context: Optional[str]) -> TaskOutput:
338
518
  """
339
- Run the core execution logic of the task.
340
- To speed up the process, when the format is not expected to return, we will skip the conversion process.
341
- When the task is allowed to delegate to another agent, we will select a responsible one in order of manager > peer_agent > anoymous agent.
519
+ Execute the given task with the given agent.
520
+ Handle 1. agent delegation, 2. tools, 3. context to consider, and 4. callbacks
342
521
  """
522
+
343
523
  from versionhq.agent.model import Agent
344
524
  from versionhq.team.model import Team
345
525
 
346
526
  self.prompt_context = context
347
527
  task_output: InstanceOf[TaskOutput] = None
528
+ tool_output: str | list = None
529
+ task_tools: List[InstanceOf[Tool] | InstanceOf[ToolSet] | Type[Tool]] = []
530
+ started_at = datetime.datetime.now()
531
+
532
+ if self.tools:
533
+ for item in self.tools:
534
+ if isinstance(item, ToolSet) or isinstance(item, Tool) or type(item) == Tool:
535
+ task_tools.append(item)
348
536
 
349
537
  if self.allow_delegation:
350
538
  agent_to_delegate = None
@@ -363,39 +551,33 @@ class Task(BaseModel):
363
551
  agent = agent_to_delegate
364
552
  self.delegations += 1
365
553
 
366
- if self.take_tool_res_as_final == True:
367
- output = agent.execute_task(task=self, context=context)
368
- task_output = TaskOutput(task_id=self.id, tool_output=output)
369
554
 
370
- else:
371
- output_raw, output_json_dict, output_pydantic = agent.execute_task(task=self, context=context), None, None
555
+ if self.tool_res_as_final == True:
556
+ tool_output = agent.execute_task(task=self, context=context, task_tools=task_tools)
557
+ task_output = TaskOutput(task_id=self.id, tool_output=tool_output)
372
558
 
373
- if self.expected_output_json == True:
374
- output_json_dict = self._create_json_output(raw_result=output_raw)
559
+ else:
560
+ raw_output = agent.execute_task(task=self, context=context, task_tools=task_tools)
561
+ json_dict_output = self._create_json_output(raw=raw_output)
562
+ if "outcome" in json_dict_output:
563
+ json_dict_output = self._create_json_output(raw=str(json_dict_output["outcome"]))
375
564
 
376
- if self.expected_output_pydantic == True:
377
- output_pydantic = self._create_pydantic_output(output_json_dict=output_json_dict)
565
+ pydantic_output = self._create_pydantic_output(raw=raw_output, json_dict=json_dict_output) if self.pydantic_custom_output else None
378
566
 
379
567
  task_output = TaskOutput(
380
568
  task_id=self.id,
381
- raw=output_raw,
382
- pydantic=output_pydantic,
383
- json_dict=output_json_dict
569
+ raw=raw_output,
570
+ pydantic=pydantic_output,
571
+ json_dict=json_dict_output
384
572
  )
385
573
 
386
574
  self.output = task_output
387
575
  self.processed_by_agents.add(agent.role)
388
576
 
389
- # self._set_end_execution_time(start_time)
390
-
391
577
  if self.callback:
392
578
  self.callback({ **self.callback_kwargs, **self.output.__dict__ })
393
579
 
394
- # if self._execution_span:
395
- # # self._telemetry.task_ended(self._execution_span, self, agent.team)
396
- # self._execution_span = None
397
-
398
- # if self.output_file:
580
+ # if self.output_file: ## disabled for now
399
581
  # content = (
400
582
  # json_output
401
583
  # if json_output
@@ -403,6 +585,9 @@ class Task(BaseModel):
403
585
  # )
404
586
  # self._save_file(content)
405
587
 
588
+ ended_at = datetime.datetime.now()
589
+ self.execution_span_in_sec = (ended_at - started_at).total_seconds()
590
+
406
591
  return task_output
407
592
 
408
593
 
@@ -414,48 +599,9 @@ class Task(BaseModel):
414
599
  self._task_output_handler.update(task=self, task_index=task_index, was_replayed=was_replayed, inputs=inputs)
415
600
 
416
601
 
417
- @property
418
- def output_prompt(self) -> str:
419
- """
420
- Draft prompts on the output format by converting `output_field_list` to dictionary.
421
- """
422
-
423
- output_prompt, output_formats_to_follow = "", dict()
424
- for item in self.output_field_list:
425
- output_formats_to_follow[item.title] = f"<Return your answer in {item.type.__name__}>"
426
-
427
- output_prompt = f"""
428
- Output only valid JSON conforming to the specified format. Use double quotes for all keys and string values. Do not use single quotes, trailing commas, or other non-standard JSON syntax.
429
- Specified format: {output_formats_to_follow}
430
- """
431
- return output_prompt
432
-
433
-
434
- @property
435
- def expected_output_formats(self) -> List[str | TaskOutputFormat]:
436
- """
437
- Return output formats in list with the ENUM item.
438
- `TaskOutputFormat.RAW` is set as default.
439
- """
440
- outputs = [TaskOutputFormat.RAW.value,]
441
- if self.expected_output_json:
442
- outputs.append(TaskOutputFormat.JSON.value)
443
- if self.expected_output_pydantic:
444
- outputs.append(TaskOutputFormat.PYDANTIC.value)
445
- return outputs
446
-
447
-
448
602
  @property
449
603
  def key(self) -> str:
450
- output_format = (
451
- TaskOutputFormat.JSON
452
- if self.expected_output_json == True
453
- else (
454
- TaskOutputFormat.PYDANTIC
455
- if self.expected_output_pydantic == True
456
- else TaskOutputFormat.RAW
457
- )
458
- )
604
+ output_format = TaskOutputFormat.JSON if self.response_fields else TaskOutputFormat.PYDANTIC if self.pydantic_custom_output is not None else TaskOutputFormat.RAW
459
605
  source = [self.description, output_format]
460
606
  return md5("|".join(source).encode(), usedforsecurity=False).hexdigest()
461
607
 
@@ -463,15 +609,13 @@ Specified format: {output_formats_to_follow}
463
609
  @property
464
610
  def summary(self) -> str:
465
611
  return f"""
466
- Task ID: {str(self.id)}
467
- "Description": {self.description}
468
- "Prompt": {self.output_prompt}
469
- "Tools": {", ".join([tool.name for tool in self.tools])}
612
+ Task ID: {str(self.id)}
613
+ "Description": {self.description}
614
+ "Tools": {", ".join([tool.name for tool in self.tools])}
470
615
  """
471
616
 
472
617
 
473
618
 
474
-
475
619
  class ConditionalTask(Task):
476
620
  """
477
621
  A task that can be conditionally executed based on the output of another task.