agno 2.0.0rc1__py3-none-any.whl → 2.0.1__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (85) hide show
  1. agno/agent/agent.py +101 -140
  2. agno/db/mongo/mongo.py +8 -3
  3. agno/eval/accuracy.py +12 -5
  4. agno/knowledge/chunking/strategy.py +14 -14
  5. agno/knowledge/knowledge.py +156 -120
  6. agno/knowledge/reader/arxiv_reader.py +5 -5
  7. agno/knowledge/reader/csv_reader.py +6 -77
  8. agno/knowledge/reader/docx_reader.py +5 -5
  9. agno/knowledge/reader/firecrawl_reader.py +5 -5
  10. agno/knowledge/reader/json_reader.py +5 -5
  11. agno/knowledge/reader/markdown_reader.py +31 -9
  12. agno/knowledge/reader/pdf_reader.py +10 -123
  13. agno/knowledge/reader/reader_factory.py +65 -72
  14. agno/knowledge/reader/s3_reader.py +44 -114
  15. agno/knowledge/reader/text_reader.py +5 -5
  16. agno/knowledge/reader/url_reader.py +75 -31
  17. agno/knowledge/reader/web_search_reader.py +6 -29
  18. agno/knowledge/reader/website_reader.py +5 -5
  19. agno/knowledge/reader/wikipedia_reader.py +5 -5
  20. agno/knowledge/reader/youtube_reader.py +6 -6
  21. agno/knowledge/reranker/__init__.py +9 -0
  22. agno/knowledge/utils.py +10 -10
  23. agno/media.py +269 -268
  24. agno/models/aws/bedrock.py +3 -7
  25. agno/models/base.py +50 -54
  26. agno/models/google/gemini.py +11 -10
  27. agno/models/message.py +4 -4
  28. agno/models/ollama/chat.py +1 -1
  29. agno/models/openai/chat.py +33 -14
  30. agno/models/response.py +5 -5
  31. agno/os/app.py +40 -29
  32. agno/os/mcp.py +39 -59
  33. agno/os/router.py +547 -16
  34. agno/os/routers/evals/evals.py +197 -12
  35. agno/os/routers/knowledge/knowledge.py +428 -14
  36. agno/os/routers/memory/memory.py +250 -28
  37. agno/os/routers/metrics/metrics.py +125 -7
  38. agno/os/routers/session/session.py +393 -25
  39. agno/os/schema.py +55 -2
  40. agno/run/agent.py +37 -28
  41. agno/run/base.py +9 -19
  42. agno/run/team.py +110 -19
  43. agno/run/workflow.py +41 -28
  44. agno/team/team.py +808 -1080
  45. agno/tools/brightdata.py +3 -3
  46. agno/tools/cartesia.py +3 -5
  47. agno/tools/dalle.py +7 -4
  48. agno/tools/desi_vocal.py +2 -2
  49. agno/tools/e2b.py +6 -6
  50. agno/tools/eleven_labs.py +3 -3
  51. agno/tools/fal.py +4 -4
  52. agno/tools/function.py +7 -7
  53. agno/tools/giphy.py +2 -2
  54. agno/tools/lumalab.py +3 -3
  55. agno/tools/mcp.py +1 -2
  56. agno/tools/models/azure_openai.py +2 -2
  57. agno/tools/models/gemini.py +3 -3
  58. agno/tools/models/groq.py +3 -5
  59. agno/tools/models/nebius.py +2 -2
  60. agno/tools/models_labs.py +5 -5
  61. agno/tools/openai.py +4 -9
  62. agno/tools/opencv.py +3 -3
  63. agno/tools/replicate.py +7 -7
  64. agno/utils/events.py +5 -5
  65. agno/utils/gemini.py +1 -1
  66. agno/utils/log.py +52 -2
  67. agno/utils/mcp.py +57 -5
  68. agno/utils/models/aws_claude.py +1 -1
  69. agno/utils/models/claude.py +0 -8
  70. agno/utils/models/cohere.py +1 -1
  71. agno/utils/models/watsonx.py +1 -1
  72. agno/utils/openai.py +1 -1
  73. agno/utils/print_response/team.py +177 -73
  74. agno/utils/streamlit.py +27 -0
  75. agno/vectordb/lancedb/lance_db.py +82 -25
  76. agno/workflow/step.py +7 -7
  77. agno/workflow/types.py +13 -13
  78. agno/workflow/workflow.py +37 -28
  79. {agno-2.0.0rc1.dist-info → agno-2.0.1.dist-info}/METADATA +140 -1
  80. {agno-2.0.0rc1.dist-info → agno-2.0.1.dist-info}/RECORD +83 -84
  81. agno-2.0.1.dist-info/licenses/LICENSE +201 -0
  82. agno/knowledge/reader/gcs_reader.py +0 -67
  83. agno-2.0.0rc1.dist-info/licenses/LICENSE +0 -375
  84. {agno-2.0.0rc1.dist-info → agno-2.0.1.dist-info}/WHEEL +0 -0
  85. {agno-2.0.0rc1.dist-info → agno-2.0.1.dist-info}/top_level.txt +0 -0
agno/run/agent.py CHANGED
@@ -5,7 +5,7 @@ from typing import Any, Dict, List, Optional, Sequence, Union
5
5
 
6
6
  from pydantic import BaseModel
7
7
 
8
- from agno.media import AudioArtifact, AudioResponse, File, ImageArtifact, VideoArtifact
8
+ from agno.media import Audio, File, Image, Video
9
9
  from agno.models.message import Citations, Message
10
10
  from agno.models.metrics import Metrics
11
11
  from agno.models.response import ToolExecution
@@ -43,6 +43,8 @@ class RunEvent(str, Enum):
43
43
  output_model_response_started = "OutputModelResponseStarted"
44
44
  output_model_response_completed = "OutputModelResponseCompleted"
45
45
 
46
+ custom_event = "CustomEvent"
47
+
46
48
 
47
49
  @dataclass
48
50
  class BaseAgentRunEvent(BaseRunOutputEvent):
@@ -95,8 +97,8 @@ class RunContentEvent(BaseAgentRunEvent):
95
97
  content_type: str = "str"
96
98
  reasoning_content: Optional[str] = None
97
99
  citations: Optional[Citations] = None
98
- response_audio: Optional[AudioResponse] = None # Model audio response
99
- image: Optional[ImageArtifact] = None # Image attached to the response
100
+ response_audio: Optional[Audio] = None # Model audio response
101
+ image: Optional[Image] = None # Image attached to the response
100
102
  references: Optional[List[MessageReferences]] = None
101
103
  additional_input: Optional[List[Message]] = None
102
104
  reasoning_steps: Optional[List[ReasoningStep]] = None
@@ -117,10 +119,10 @@ class RunCompletedEvent(BaseAgentRunEvent):
117
119
  content_type: str = "str"
118
120
  reasoning_content: Optional[str] = None
119
121
  citations: Optional[Citations] = None
120
- images: Optional[List[ImageArtifact]] = None # Images attached to the response
121
- videos: Optional[List[VideoArtifact]] = None # Videos attached to the response
122
- audio: Optional[List[AudioArtifact]] = None # Audio attached to the response
123
- response_audio: Optional[AudioResponse] = None # Model audio response
122
+ images: Optional[List[Image]] = None # Images attached to the response
123
+ videos: Optional[List[Video]] = None # Videos attached to the response
124
+ audio: Optional[List[Audio]] = None # Audio attached to the response
125
+ response_audio: Optional[Audio] = None # Model audio response
124
126
  references: Optional[List[MessageReferences]] = None
125
127
  additional_input: Optional[List[Message]] = None
126
128
  reasoning_steps: Optional[List[ReasoningStep]] = None
@@ -201,9 +203,9 @@ class ToolCallCompletedEvent(BaseAgentRunEvent):
201
203
  event: str = RunEvent.tool_call_completed.value
202
204
  tool: Optional[ToolExecution] = None
203
205
  content: Optional[Any] = None
204
- images: Optional[List[ImageArtifact]] = None # Images produced by the tool call
205
- videos: Optional[List[VideoArtifact]] = None # Videos produced by the tool call
206
- audio: Optional[List[AudioArtifact]] = None # Audio produced by the tool call
206
+ images: Optional[List[Image]] = None # Images produced by the tool call
207
+ videos: Optional[List[Video]] = None # Videos produced by the tool call
208
+ audio: Optional[List[Audio]] = None # Audio produced by the tool call
207
209
 
208
210
 
209
211
  @dataclass
@@ -226,6 +228,11 @@ class OutputModelResponseCompletedEvent(BaseAgentRunEvent):
226
228
  event: str = RunEvent.output_model_response_completed.value
227
229
 
228
230
 
231
+ @dataclass
232
+ class CustomEvent(BaseAgentRunEvent):
233
+ event: str = RunEvent.custom_event.value
234
+
235
+
229
236
  RunOutputEvent = Union[
230
237
  RunStartedEvent,
231
238
  RunContentEvent,
@@ -246,6 +253,7 @@ RunOutputEvent = Union[
246
253
  ParserModelResponseCompletedEvent,
247
254
  OutputModelResponseStartedEvent,
248
255
  OutputModelResponseCompletedEvent,
256
+ CustomEvent,
249
257
  ]
250
258
 
251
259
 
@@ -270,6 +278,7 @@ RUN_EVENT_TYPE_REGISTRY = {
270
278
  RunEvent.parser_model_response_completed.value: ParserModelResponseCompletedEvent,
271
279
  RunEvent.output_model_response_started.value: OutputModelResponseStartedEvent,
272
280
  RunEvent.output_model_response_completed.value: OutputModelResponseCompletedEvent,
281
+ RunEvent.custom_event.value: CustomEvent,
273
282
  }
274
283
 
275
284
 
@@ -297,9 +306,9 @@ class RunInput:
297
306
  """
298
307
 
299
308
  input_content: Optional[Union[str, List, Dict, Message, BaseModel, List[Message]]] = None
300
- images: Optional[Sequence[ImageArtifact]] = None
301
- videos: Optional[Sequence[VideoArtifact]] = None
302
- audios: Optional[Sequence[AudioArtifact]] = None
309
+ images: Optional[Sequence[Image]] = None
310
+ videos: Optional[Sequence[Video]] = None
311
+ audios: Optional[Sequence[Audio]] = None
303
312
  files: Optional[Sequence[File]] = None
304
313
 
305
314
  def to_dict(self) -> Dict[str, Any]:
@@ -336,15 +345,15 @@ class RunInput:
336
345
  """Create RunInput from dictionary"""
337
346
  images = None
338
347
  if data.get("images"):
339
- images = [ImageArtifact.model_validate(img_data) for img_data in data["images"]]
348
+ images = [Image.model_validate(img_data) for img_data in data["images"]]
340
349
 
341
350
  videos = None
342
351
  if data.get("videos"):
343
- videos = [VideoArtifact.model_validate(vid_data) for vid_data in data["videos"]]
352
+ videos = [Video.model_validate(vid_data) for vid_data in data["videos"]]
344
353
 
345
354
  audios = None
346
355
  if data.get("audios"):
347
- audios = [AudioArtifact.model_validate(aud_data) for aud_data in data["audios"]]
356
+ audios = [Audio.model_validate(aud_data) for aud_data in data["audios"]]
348
357
 
349
358
  files = None
350
359
  if data.get("files"):
@@ -380,10 +389,10 @@ class RunOutput:
380
389
 
381
390
  tools: Optional[List[ToolExecution]] = None
382
391
 
383
- images: Optional[List[ImageArtifact]] = None # Images attached to the response
384
- videos: Optional[List[VideoArtifact]] = None # Videos attached to the response
385
- audio: Optional[List[AudioArtifact]] = None # Audio attached to the response
386
- response_audio: Optional[AudioResponse] = None # Model audio response
392
+ images: Optional[List[Image]] = None # Images attached to the response
393
+ videos: Optional[List[Video]] = None # Videos attached to the response
394
+ audio: Optional[List[Audio]] = None # Audio attached to the response
395
+ response_audio: Optional[Audio] = None # Model audio response
387
396
 
388
397
  # Input media and messages from user
389
398
  input: Optional[RunInput] = None
@@ -478,7 +487,7 @@ class RunOutput:
478
487
  if self.images is not None:
479
488
  _dict["images"] = []
480
489
  for img in self.images:
481
- if isinstance(img, ImageArtifact):
490
+ if isinstance(img, Image):
482
491
  _dict["images"].append(img.to_dict())
483
492
  else:
484
493
  _dict["images"].append(img)
@@ -486,7 +495,7 @@ class RunOutput:
486
495
  if self.videos is not None:
487
496
  _dict["videos"] = []
488
497
  for vid in self.videos:
489
- if isinstance(vid, VideoArtifact):
498
+ if isinstance(vid, Video):
490
499
  _dict["videos"].append(vid.to_dict())
491
500
  else:
492
501
  _dict["videos"].append(vid)
@@ -494,13 +503,13 @@ class RunOutput:
494
503
  if self.audio is not None:
495
504
  _dict["audio"] = []
496
505
  for aud in self.audio:
497
- if isinstance(aud, AudioArtifact):
506
+ if isinstance(aud, Audio):
498
507
  _dict["audio"].append(aud.to_dict())
499
508
  else:
500
509
  _dict["audio"].append(aud)
501
510
 
502
511
  if self.response_audio is not None:
503
- if isinstance(self.response_audio, AudioResponse):
512
+ if isinstance(self.response_audio, Audio):
504
513
  _dict["response_audio"] = self.response_audio.to_dict()
505
514
  else:
506
515
  _dict["response_audio"] = self.response_audio
@@ -556,16 +565,16 @@ class RunOutput:
556
565
  tools = [ToolExecution.from_dict(tool) for tool in tools] if tools else None
557
566
 
558
567
  images = data.pop("images", [])
559
- images = [ImageArtifact.model_validate(image) for image in images] if images else None
568
+ images = [Image.model_validate(image) for image in images] if images else None
560
569
 
561
570
  videos = data.pop("videos", [])
562
- videos = [VideoArtifact.model_validate(video) for video in videos] if videos else None
571
+ videos = [Video.model_validate(video) for video in videos] if videos else None
563
572
 
564
573
  audio = data.pop("audio", [])
565
- audio = [AudioArtifact.model_validate(audio) for audio in audio] if audio else None
574
+ audio = [Audio.model_validate(audio) for audio in audio] if audio else None
566
575
 
567
576
  response_audio = data.pop("response_audio", None)
568
- response_audio = AudioResponse.model_validate(response_audio) if response_audio else None
577
+ response_audio = Audio.model_validate(response_audio) if response_audio else None
569
578
 
570
579
  input_data = data.pop("input", None)
571
580
  input_obj = None
agno/run/base.py CHANGED
@@ -4,7 +4,7 @@ from typing import Any, Dict
4
4
 
5
5
  from pydantic import BaseModel
6
6
 
7
- from agno.media import AudioArtifact, AudioResponse, ImageArtifact, VideoArtifact
7
+ from agno.media import Audio, Image, Video
8
8
  from agno.models.message import Citations, Message, MessageReferences
9
9
  from agno.models.metrics import Metrics
10
10
  from agno.models.response import ToolExecution
@@ -60,21 +60,15 @@ class BaseRunOutputEvent:
60
60
  if hasattr(self, "images") and self.images is not None:
61
61
  _dict["images"] = []
62
62
  for img in self.images:
63
- if isinstance(img, ImageArtifact):
63
+ if isinstance(img, Image):
64
64
  _dict["images"].append(img.to_dict())
65
65
  else:
66
66
  _dict["images"].append(img)
67
67
 
68
- if hasattr(self, "image") and self.image is not None:
69
- if isinstance(self.image, ImageArtifact):
70
- _dict["image"] = self.image.to_dict()
71
- else:
72
- _dict["image"] = self.image
73
-
74
68
  if hasattr(self, "videos") and self.videos is not None:
75
69
  _dict["videos"] = []
76
70
  for vid in self.videos:
77
- if isinstance(vid, VideoArtifact):
71
+ if isinstance(vid, Video):
78
72
  _dict["videos"].append(vid.to_dict())
79
73
  else:
80
74
  _dict["videos"].append(vid)
@@ -82,13 +76,13 @@ class BaseRunOutputEvent:
82
76
  if hasattr(self, "audio") and self.audio is not None:
83
77
  _dict["audio"] = []
84
78
  for aud in self.audio:
85
- if isinstance(aud, AudioArtifact):
79
+ if isinstance(aud, Audio):
86
80
  _dict["audio"].append(aud.to_dict())
87
81
  else:
88
82
  _dict["audio"].append(aud)
89
83
 
90
84
  if hasattr(self, "response_audio") and self.response_audio is not None:
91
- if isinstance(self.response_audio, AudioResponse):
85
+ if isinstance(self.response_audio, Audio):
92
86
  _dict["response_audio"] = self.response_audio.to_dict()
93
87
  else:
94
88
  _dict["response_audio"] = self.response_audio
@@ -140,23 +134,19 @@ class BaseRunOutputEvent:
140
134
 
141
135
  images = data.pop("images", None)
142
136
  if images:
143
- data["images"] = [ImageArtifact.model_validate(image) for image in images]
144
-
145
- image = data.pop("image", None)
146
- if image:
147
- data["image"] = ImageArtifact.model_validate(image)
137
+ data["images"] = [Image.model_validate(image) for image in images]
148
138
 
149
139
  videos = data.pop("videos", None)
150
140
  if videos:
151
- data["videos"] = [VideoArtifact.model_validate(video) for video in videos]
141
+ data["videos"] = [Video.model_validate(video) for video in videos]
152
142
 
153
143
  audio = data.pop("audio", None)
154
144
  if audio:
155
- data["audio"] = [AudioArtifact.model_validate(audio) for audio in audio]
145
+ data["audio"] = [Audio.model_validate(audio) for audio in audio]
156
146
 
157
147
  response_audio = data.pop("response_audio", None)
158
148
  if response_audio:
159
- data["response_audio"] = AudioResponse.model_validate(response_audio)
149
+ data["response_audio"] = Audio.model_validate(response_audio)
160
150
 
161
151
  additional_input = data.pop("additional_input", None)
162
152
  if additional_input is not None:
agno/run/team.py CHANGED
@@ -1,11 +1,11 @@
1
1
  from dataclasses import asdict, dataclass, field
2
2
  from enum import Enum
3
3
  from time import time
4
- from typing import Any, Dict, List, Optional, Union
4
+ from typing import Any, Dict, List, Optional, Sequence, Union
5
5
 
6
6
  from pydantic import BaseModel
7
7
 
8
- from agno.media import AudioArtifact, AudioResponse, ImageArtifact, VideoArtifact
8
+ from agno.media import Audio, File, Image, Video
9
9
  from agno.models.message import Citations, Message
10
10
  from agno.models.metrics import Metrics
11
11
  from agno.models.response import ToolExecution
@@ -40,6 +40,8 @@ class TeamRunEvent(str, Enum):
40
40
  output_model_response_started = "TeamOutputModelResponseStarted"
41
41
  output_model_response_completed = "TeamOutputModelResponseCompleted"
42
42
 
43
+ custom_event = "CustomEvent"
44
+
43
45
 
44
46
  @dataclass
45
47
  class BaseTeamRunEvent(BaseRunOutputEvent):
@@ -96,8 +98,8 @@ class RunContentEvent(BaseTeamRunEvent):
96
98
  content_type: str = "str"
97
99
  reasoning_content: Optional[str] = None
98
100
  citations: Optional[Citations] = None
99
- response_audio: Optional[AudioResponse] = None # Model audio response
100
- image: Optional[ImageArtifact] = None # Image attached to the response
101
+ response_audio: Optional[Audio] = None # Model audio response
102
+ image: Optional[Image] = None # Image attached to the response
101
103
  references: Optional[List[MessageReferences]] = None
102
104
  additional_input: Optional[List[Message]] = None
103
105
  reasoning_steps: Optional[List[ReasoningStep]] = None
@@ -118,10 +120,10 @@ class RunCompletedEvent(BaseTeamRunEvent):
118
120
  content_type: str = "str"
119
121
  reasoning_content: Optional[str] = None
120
122
  citations: Optional[Citations] = None
121
- images: Optional[List[ImageArtifact]] = None # Images attached to the response
122
- videos: Optional[List[VideoArtifact]] = None # Videos attached to the response
123
- audio: Optional[List[AudioArtifact]] = None # Audio attached to the response
124
- response_audio: Optional[AudioResponse] = None # Model audio response
123
+ images: Optional[List[Image]] = None # Images attached to the response
124
+ videos: Optional[List[Video]] = None # Videos attached to the response
125
+ audio: Optional[List[Audio]] = None # Audio attached to the response
126
+ response_audio: Optional[Audio] = None # Model audio response
125
127
  references: Optional[List[MessageReferences]] = None
126
128
  additional_input: Optional[List[Message]] = None
127
129
  reasoning_steps: Optional[List[ReasoningStep]] = None
@@ -188,9 +190,9 @@ class ToolCallCompletedEvent(BaseTeamRunEvent):
188
190
  event: str = TeamRunEvent.tool_call_completed.value
189
191
  tool: Optional[ToolExecution] = None
190
192
  content: Optional[Any] = None
191
- images: Optional[List[ImageArtifact]] = None # Images produced by the tool call
192
- videos: Optional[List[VideoArtifact]] = None # Videos produced by the tool call
193
- audio: Optional[List[AudioArtifact]] = None # Audio produced by the tool call
193
+ images: Optional[List[Image]] = None # Images produced by the tool call
194
+ videos: Optional[List[Video]] = None # Videos produced by the tool call
195
+ audio: Optional[List[Audio]] = None # Audio produced by the tool call
194
196
 
195
197
 
196
198
  @dataclass
@@ -213,6 +215,11 @@ class OutputModelResponseCompletedEvent(BaseTeamRunEvent):
213
215
  event: str = TeamRunEvent.output_model_response_completed.value
214
216
 
215
217
 
218
+ @dataclass
219
+ class CustomEvent(BaseTeamRunEvent):
220
+ event: str = TeamRunEvent.custom_event.value
221
+
222
+
216
223
  TeamRunOutputEvent = Union[
217
224
  RunStartedEvent,
218
225
  RunContentEvent,
@@ -231,6 +238,7 @@ TeamRunOutputEvent = Union[
231
238
  ParserModelResponseCompletedEvent,
232
239
  OutputModelResponseStartedEvent,
233
240
  OutputModelResponseCompletedEvent,
241
+ CustomEvent,
234
242
  ]
235
243
 
236
244
  # Map event string to dataclass for team events
@@ -252,6 +260,7 @@ TEAM_RUN_EVENT_TYPE_REGISTRY = {
252
260
  TeamRunEvent.parser_model_response_completed.value: ParserModelResponseCompletedEvent,
253
261
  TeamRunEvent.output_model_response_started.value: OutputModelResponseStartedEvent,
254
262
  TeamRunEvent.output_model_response_completed.value: OutputModelResponseCompletedEvent,
263
+ TeamRunEvent.custom_event.value: CustomEvent,
255
264
  }
256
265
 
257
266
 
@@ -266,6 +275,76 @@ def team_run_output_event_from_dict(data: dict) -> BaseTeamRunEvent:
266
275
  return event_class.from_dict(data) # type: ignore
267
276
 
268
277
 
278
+ @dataclass
279
+ class TeamRunInput:
280
+ """Container for the raw input data passed to Agent.run().
281
+ This captures the original input exactly as provided by the user,
282
+ separate from the processed messages that go to the model.
283
+ Attributes:
284
+ input_content: The literal input message/content passed to run()
285
+ images: Images directly passed to run()
286
+ videos: Videos directly passed to run()
287
+ audios: Audio files directly passed to run()
288
+ files: Files directly passed to run()
289
+ """
290
+
291
+ input_content: Optional[Union[str, List, Dict, Message, BaseModel, List[Message]]] = None
292
+ images: Optional[Sequence[Image]] = None
293
+ videos: Optional[Sequence[Video]] = None
294
+ audios: Optional[Sequence[Audio]] = None
295
+ files: Optional[Sequence[File]] = None
296
+
297
+ def to_dict(self) -> Dict[str, Any]:
298
+ """Convert to dictionary representation"""
299
+ result: Dict[str, Any] = {}
300
+
301
+ if self.input_content is not None:
302
+ if isinstance(self.input_content, (str)):
303
+ result["input_content"] = self.input_content
304
+ elif isinstance(self.input_content, BaseModel):
305
+ result["input_content"] = self.input_content.model_dump(exclude_none=True)
306
+ elif isinstance(self.input_content, Message):
307
+ result["input_content"] = self.input_content.to_dict()
308
+ elif (
309
+ isinstance(self.input_content, list)
310
+ and self.input_content
311
+ and isinstance(self.input_content[0], Message)
312
+ ):
313
+ result["input_content"] = [m.to_dict() for m in self.input_content]
314
+ else:
315
+ result["input_content"] = self.input_content
316
+
317
+ if self.images:
318
+ result["images"] = [img.to_dict() for img in self.images]
319
+ if self.videos:
320
+ result["videos"] = [vid.to_dict() for vid in self.videos]
321
+ if self.audios:
322
+ result["audios"] = [aud.to_dict() for aud in self.audios]
323
+
324
+ return result
325
+
326
+ @classmethod
327
+ def from_dict(cls, data: Dict[str, Any]) -> "TeamRunInput":
328
+ """Create TeamRunInput from dictionary"""
329
+ images = None
330
+ if data.get("images"):
331
+ images = [Image.model_validate(img_data) for img_data in data["images"]]
332
+
333
+ videos = None
334
+ if data.get("videos"):
335
+ videos = [Video.model_validate(vid_data) for vid_data in data["videos"]]
336
+
337
+ audios = None
338
+ if data.get("audios"):
339
+ audios = [Audio.model_validate(aud_data) for aud_data in data["audios"]]
340
+
341
+ files = None
342
+ if data.get("files"):
343
+ files = [File.model_validate(file_data) for file_data in data["files"]]
344
+
345
+ return cls(input_content=data.get("input_content"), images=images, videos=videos, audios=audios, files=files)
346
+
347
+
269
348
  @dataclass
270
349
  class TeamRunOutput:
271
350
  """Response returned by Team.run() functions"""
@@ -287,11 +366,14 @@ class TeamRunOutput:
287
366
 
288
367
  tools: Optional[List[ToolExecution]] = None
289
368
 
290
- images: Optional[List[ImageArtifact]] = None # Images from member runs
291
- videos: Optional[List[VideoArtifact]] = None # Videos from member runs
292
- audio: Optional[List[AudioArtifact]] = None # Audio from member runs
369
+ images: Optional[List[Image]] = None # Images from member runs
370
+ videos: Optional[List[Video]] = None # Videos from member runs
371
+ audio: Optional[List[Audio]] = None # Audio from member runs
372
+
373
+ response_audio: Optional[Audio] = None # Model audio response
293
374
 
294
- response_audio: Optional[AudioResponse] = None # Model audio response
375
+ # Input media and messages from user
376
+ input: Optional[TeamRunInput] = None
295
377
 
296
378
  reasoning_content: Optional[str] = None
297
379
 
@@ -401,6 +483,9 @@ class TeamRunOutput:
401
483
  else:
402
484
  _dict["tools"].append(tool)
403
485
 
486
+ if self.input is not None:
487
+ _dict["input"] = self.input.to_dict()
488
+
404
489
  return _dict
405
490
 
406
491
  def to_json(self) -> str:
@@ -454,19 +539,24 @@ class TeamRunOutput:
454
539
  references = [MessageReferences.model_validate(reference) for reference in references]
455
540
 
456
541
  images = data.pop("images", [])
457
- images = [ImageArtifact.model_validate(image) for image in images] if images else None
542
+ images = [Image.model_validate(image) for image in images] if images else None
458
543
 
459
544
  videos = data.pop("videos", [])
460
- videos = [VideoArtifact.model_validate(video) for video in videos] if videos else None
545
+ videos = [Video.model_validate(video) for video in videos] if videos else None
461
546
 
462
547
  audio = data.pop("audio", [])
463
- audio = [AudioArtifact.model_validate(audio) for audio in audio] if audio else None
548
+ audio = [Audio.model_validate(audio) for audio in audio] if audio else None
464
549
 
465
550
  tools = data.pop("tools", [])
466
551
  tools = [ToolExecution.from_dict(tool) for tool in tools] if tools else None
467
552
 
468
553
  response_audio = data.pop("response_audio", None)
469
- response_audio = AudioResponse.model_validate(response_audio) if response_audio else None
554
+ response_audio = Audio.model_validate(response_audio) if response_audio else None
555
+
556
+ input_data = data.pop("input", None)
557
+ input_obj = None
558
+ if input_data:
559
+ input_obj = TeamRunInput.from_dict(input_data)
470
560
 
471
561
  metrics = data.pop("metrics", None)
472
562
  if metrics:
@@ -487,6 +577,7 @@ class TeamRunOutput:
487
577
  videos=videos,
488
578
  audio=audio,
489
579
  response_audio=response_audio,
580
+ input=input_obj,
490
581
  citations=citations,
491
582
  tools=tools,
492
583
  events=events,