agno 2.3.8__py3-none-any.whl → 2.3.10__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (66) hide show
  1. agno/agent/agent.py +134 -94
  2. agno/db/mysql/__init__.py +2 -1
  3. agno/db/mysql/async_mysql.py +2888 -0
  4. agno/db/mysql/mysql.py +17 -8
  5. agno/db/mysql/utils.py +139 -6
  6. agno/db/postgres/async_postgres.py +10 -5
  7. agno/db/postgres/postgres.py +7 -2
  8. agno/db/schemas/evals.py +1 -0
  9. agno/db/singlestore/singlestore.py +5 -1
  10. agno/db/sqlite/async_sqlite.py +3 -3
  11. agno/eval/__init__.py +10 -0
  12. agno/eval/accuracy.py +11 -8
  13. agno/eval/agent_as_judge.py +861 -0
  14. agno/eval/base.py +29 -0
  15. agno/eval/utils.py +2 -1
  16. agno/exceptions.py +7 -0
  17. agno/knowledge/embedder/openai.py +8 -8
  18. agno/knowledge/knowledge.py +1142 -176
  19. agno/media.py +22 -6
  20. agno/models/aws/claude.py +8 -7
  21. agno/models/base.py +61 -2
  22. agno/models/deepseek/deepseek.py +67 -0
  23. agno/models/google/gemini.py +134 -51
  24. agno/models/google/utils.py +22 -0
  25. agno/models/message.py +5 -0
  26. agno/models/openai/chat.py +4 -0
  27. agno/os/app.py +64 -74
  28. agno/os/interfaces/a2a/router.py +3 -4
  29. agno/os/interfaces/agui/router.py +2 -0
  30. agno/os/router.py +3 -1607
  31. agno/os/routers/agents/__init__.py +3 -0
  32. agno/os/routers/agents/router.py +581 -0
  33. agno/os/routers/agents/schema.py +261 -0
  34. agno/os/routers/evals/evals.py +26 -6
  35. agno/os/routers/evals/schemas.py +34 -2
  36. agno/os/routers/evals/utils.py +77 -18
  37. agno/os/routers/knowledge/knowledge.py +1 -1
  38. agno/os/routers/teams/__init__.py +3 -0
  39. agno/os/routers/teams/router.py +496 -0
  40. agno/os/routers/teams/schema.py +257 -0
  41. agno/os/routers/workflows/__init__.py +3 -0
  42. agno/os/routers/workflows/router.py +545 -0
  43. agno/os/routers/workflows/schema.py +75 -0
  44. agno/os/schema.py +1 -559
  45. agno/os/utils.py +139 -2
  46. agno/team/team.py +87 -24
  47. agno/tools/file_generation.py +12 -6
  48. agno/tools/firecrawl.py +15 -7
  49. agno/tools/function.py +37 -23
  50. agno/tools/shopify.py +1519 -0
  51. agno/tools/spotify.py +2 -5
  52. agno/utils/hooks.py +64 -5
  53. agno/utils/http.py +2 -2
  54. agno/utils/media.py +11 -1
  55. agno/utils/print_response/agent.py +8 -0
  56. agno/utils/print_response/team.py +8 -0
  57. agno/vectordb/pgvector/pgvector.py +88 -51
  58. agno/workflow/parallel.py +5 -3
  59. agno/workflow/step.py +14 -2
  60. agno/workflow/types.py +38 -2
  61. agno/workflow/workflow.py +12 -4
  62. {agno-2.3.8.dist-info → agno-2.3.10.dist-info}/METADATA +7 -2
  63. {agno-2.3.8.dist-info → agno-2.3.10.dist-info}/RECORD +66 -52
  64. {agno-2.3.8.dist-info → agno-2.3.10.dist-info}/WHEEL +0 -0
  65. {agno-2.3.8.dist-info → agno-2.3.10.dist-info}/licenses/LICENSE +0 -0
  66. {agno-2.3.8.dist-info → agno-2.3.10.dist-info}/top_level.txt +0 -0
agno/os/schema.py CHANGED
@@ -1,13 +1,11 @@
1
1
  from datetime import datetime, timezone
2
2
  from enum import Enum
3
3
  from typing import Any, Dict, Generic, List, Optional, TypeVar, Union
4
- from uuid import uuid4
5
4
 
6
5
  from pydantic import BaseModel, ConfigDict, Field
7
6
 
8
7
  from agno.agent import Agent
9
8
  from agno.db.base import SessionType
10
- from agno.models.message import Message
11
9
  from agno.os.config import (
12
10
  ChatConfig,
13
11
  EvalsConfig,
@@ -19,21 +17,11 @@ from agno.os.config import (
19
17
  )
20
18
  from agno.os.utils import (
21
19
  extract_input_media,
22
- format_team_tools,
23
- format_tools,
24
- get_agent_input_schema_dict,
25
20
  get_run_input,
26
21
  get_session_name,
27
- get_team_input_schema_dict,
28
- get_workflow_input_schema_dict,
29
22
  )
30
- from agno.run import RunContext
31
- from agno.run.agent import RunOutput
32
- from agno.run.team import TeamRunOutput
33
23
  from agno.session import AgentSession, TeamSession, WorkflowSession
34
24
  from agno.team.team import Team
35
- from agno.utils.agent import aexecute_instructions, aexecute_system_message
36
- from agno.workflow.agent import WorkflowAgent
37
25
  from agno.workflow.workflow import Workflow
38
26
 
39
27
 
@@ -179,553 +167,6 @@ class ModelResponse(BaseModel):
179
167
  provider: Optional[str] = Field(None, description="Model provider name")
180
168
 
181
169
 
182
- class AgentResponse(BaseModel):
183
- id: Optional[str] = None
184
- name: Optional[str] = None
185
- db_id: Optional[str] = None
186
- model: Optional[ModelResponse] = None
187
- tools: Optional[Dict[str, Any]] = None
188
- sessions: Optional[Dict[str, Any]] = None
189
- knowledge: Optional[Dict[str, Any]] = None
190
- memory: Optional[Dict[str, Any]] = None
191
- reasoning: Optional[Dict[str, Any]] = None
192
- default_tools: Optional[Dict[str, Any]] = None
193
- system_message: Optional[Dict[str, Any]] = None
194
- extra_messages: Optional[Dict[str, Any]] = None
195
- response_settings: Optional[Dict[str, Any]] = None
196
- streaming: Optional[Dict[str, Any]] = None
197
- metadata: Optional[Dict[str, Any]] = None
198
- input_schema: Optional[Dict[str, Any]] = None
199
-
200
- @classmethod
201
- async def from_agent(cls, agent: Agent) -> "AgentResponse":
202
- def filter_meaningful_config(d: Dict[str, Any], defaults: Dict[str, Any]) -> Optional[Dict[str, Any]]:
203
- """Filter out fields that match their default values, keeping only meaningful user configurations"""
204
- filtered = {}
205
- for key, value in d.items():
206
- if value is None:
207
- continue
208
- # Skip if value matches the default exactly
209
- if key in defaults and value == defaults[key]:
210
- continue
211
- # Keep non-default values
212
- filtered[key] = value
213
- return filtered if filtered else None
214
-
215
- # Define default values for filtering
216
- agent_defaults = {
217
- # Sessions defaults
218
- "add_history_to_context": False,
219
- "num_history_runs": 3,
220
- "enable_session_summaries": False,
221
- "search_session_history": False,
222
- "cache_session": False,
223
- # Knowledge defaults
224
- "add_references": False,
225
- "references_format": "json",
226
- "enable_agentic_knowledge_filters": False,
227
- # Memory defaults
228
- "enable_agentic_memory": False,
229
- "enable_user_memories": False,
230
- # Reasoning defaults
231
- "reasoning": False,
232
- "reasoning_min_steps": 1,
233
- "reasoning_max_steps": 10,
234
- # Default tools defaults
235
- "read_chat_history": False,
236
- "search_knowledge": True,
237
- "update_knowledge": False,
238
- "read_tool_call_history": False,
239
- # System message defaults
240
- "system_message_role": "system",
241
- "build_context": True,
242
- "markdown": False,
243
- "add_name_to_context": False,
244
- "add_datetime_to_context": False,
245
- "add_location_to_context": False,
246
- "resolve_in_context": True,
247
- # Extra messages defaults
248
- "user_message_role": "user",
249
- "build_user_context": True,
250
- # Response settings defaults
251
- "retries": 0,
252
- "delay_between_retries": 1,
253
- "exponential_backoff": False,
254
- "parse_response": True,
255
- "use_json_mode": False,
256
- # Streaming defaults
257
- "stream_events": False,
258
- "stream_intermediate_steps": False,
259
- }
260
-
261
- session_id = str(uuid4())
262
- run_id = str(uuid4())
263
- agent_tools = await agent.aget_tools(
264
- session=AgentSession(session_id=session_id, session_data={}),
265
- run_response=RunOutput(run_id=run_id, session_id=session_id),
266
- run_context=RunContext(run_id=run_id, session_id=session_id, user_id=agent.user_id),
267
- check_mcp_tools=False,
268
- )
269
- formatted_tools = format_tools(agent_tools) if agent_tools else None
270
-
271
- additional_input = agent.additional_input
272
- if additional_input and isinstance(additional_input[0], Message):
273
- additional_input = [message.to_dict() for message in additional_input] # type: ignore
274
-
275
- # Build model only if it has at least one non-null field
276
- model_name = agent.model.name if (agent.model and agent.model.name) else None
277
- model_provider = agent.model.provider if (agent.model and agent.model.provider) else None
278
- model_id = agent.model.id if (agent.model and agent.model.id) else None
279
- _agent_model_data: Dict[str, Any] = {}
280
- if model_name is not None:
281
- _agent_model_data["name"] = model_name
282
- if model_id is not None:
283
- _agent_model_data["model"] = model_id
284
- if model_provider is not None:
285
- _agent_model_data["provider"] = model_provider
286
-
287
- session_table = agent.db.session_table_name if agent.db else None
288
- knowledge_table = agent.db.knowledge_table_name if agent.db and agent.knowledge else None
289
-
290
- tools_info = {
291
- "tools": formatted_tools,
292
- "tool_call_limit": agent.tool_call_limit,
293
- "tool_choice": agent.tool_choice,
294
- }
295
-
296
- sessions_info = {
297
- "session_table": session_table,
298
- "add_history_to_context": agent.add_history_to_context,
299
- "enable_session_summaries": agent.enable_session_summaries,
300
- "num_history_runs": agent.num_history_runs,
301
- "search_session_history": agent.search_session_history,
302
- "num_history_sessions": agent.num_history_sessions,
303
- "cache_session": agent.cache_session,
304
- }
305
-
306
- knowledge_info = {
307
- "knowledge_table": knowledge_table,
308
- "enable_agentic_knowledge_filters": agent.enable_agentic_knowledge_filters,
309
- "knowledge_filters": agent.knowledge_filters,
310
- "references_format": agent.references_format,
311
- }
312
-
313
- memory_info: Optional[Dict[str, Any]] = None
314
- if agent.memory_manager is not None:
315
- memory_info = {
316
- "enable_agentic_memory": agent.enable_agentic_memory,
317
- "enable_user_memories": agent.enable_user_memories,
318
- "metadata": agent.metadata,
319
- "memory_table": agent.db.memory_table_name if agent.db and agent.enable_user_memories else None,
320
- }
321
-
322
- if agent.memory_manager.model is not None:
323
- memory_info["model"] = ModelResponse(
324
- name=agent.memory_manager.model.name,
325
- model=agent.memory_manager.model.id,
326
- provider=agent.memory_manager.model.provider,
327
- ).model_dump()
328
-
329
- reasoning_info: Dict[str, Any] = {
330
- "reasoning": agent.reasoning,
331
- "reasoning_agent_id": agent.reasoning_agent.id if agent.reasoning_agent else None,
332
- "reasoning_min_steps": agent.reasoning_min_steps,
333
- "reasoning_max_steps": agent.reasoning_max_steps,
334
- }
335
-
336
- if agent.reasoning_model:
337
- reasoning_info["reasoning_model"] = ModelResponse(
338
- name=agent.reasoning_model.name,
339
- model=agent.reasoning_model.id,
340
- provider=agent.reasoning_model.provider,
341
- ).model_dump()
342
-
343
- default_tools_info = {
344
- "read_chat_history": agent.read_chat_history,
345
- "search_knowledge": agent.search_knowledge,
346
- "update_knowledge": agent.update_knowledge,
347
- "read_tool_call_history": agent.read_tool_call_history,
348
- }
349
-
350
- instructions = agent.instructions if agent.instructions else None
351
- if instructions and callable(instructions):
352
- instructions = await aexecute_instructions(instructions=instructions, agent=agent)
353
-
354
- system_message = agent.system_message if agent.system_message else None
355
- if system_message and callable(system_message):
356
- system_message = await aexecute_system_message(system_message=system_message, agent=agent)
357
-
358
- system_message_info = {
359
- "system_message": str(system_message) if system_message else None,
360
- "system_message_role": agent.system_message_role,
361
- "build_context": agent.build_context,
362
- "description": agent.description,
363
- "instructions": instructions,
364
- "expected_output": agent.expected_output,
365
- "additional_context": agent.additional_context,
366
- "markdown": agent.markdown,
367
- "add_name_to_context": agent.add_name_to_context,
368
- "add_datetime_to_context": agent.add_datetime_to_context,
369
- "add_location_to_context": agent.add_location_to_context,
370
- "timezone_identifier": agent.timezone_identifier,
371
- "resolve_in_context": agent.resolve_in_context,
372
- }
373
-
374
- extra_messages_info = {
375
- "additional_input": additional_input, # type: ignore
376
- "user_message_role": agent.user_message_role,
377
- "build_user_context": agent.build_user_context,
378
- }
379
-
380
- response_settings_info: Dict[str, Any] = {
381
- "retries": agent.retries,
382
- "delay_between_retries": agent.delay_between_retries,
383
- "exponential_backoff": agent.exponential_backoff,
384
- "output_schema_name": agent.output_schema.__name__ if agent.output_schema else None,
385
- "parser_model_prompt": agent.parser_model_prompt,
386
- "parse_response": agent.parse_response,
387
- "structured_outputs": agent.structured_outputs,
388
- "use_json_mode": agent.use_json_mode,
389
- "save_response_to_file": agent.save_response_to_file,
390
- }
391
-
392
- if agent.parser_model:
393
- response_settings_info["parser_model"] = ModelResponse(
394
- name=agent.parser_model.name,
395
- model=agent.parser_model.id,
396
- provider=agent.parser_model.provider,
397
- ).model_dump()
398
-
399
- streaming_info = {
400
- "stream": agent.stream,
401
- "stream_events": agent.stream_events,
402
- "stream_intermediate_steps": agent.stream_intermediate_steps,
403
- }
404
-
405
- return AgentResponse(
406
- id=agent.id,
407
- name=agent.name,
408
- db_id=agent.db.id if agent.db else None,
409
- model=ModelResponse(**_agent_model_data) if _agent_model_data else None,
410
- tools=filter_meaningful_config(tools_info, {}),
411
- sessions=filter_meaningful_config(sessions_info, agent_defaults),
412
- knowledge=filter_meaningful_config(knowledge_info, agent_defaults),
413
- memory=filter_meaningful_config(memory_info, agent_defaults) if memory_info else None,
414
- reasoning=filter_meaningful_config(reasoning_info, agent_defaults),
415
- default_tools=filter_meaningful_config(default_tools_info, agent_defaults),
416
- system_message=filter_meaningful_config(system_message_info, agent_defaults),
417
- extra_messages=filter_meaningful_config(extra_messages_info, agent_defaults),
418
- response_settings=filter_meaningful_config(response_settings_info, agent_defaults),
419
- streaming=filter_meaningful_config(streaming_info, agent_defaults),
420
- metadata=agent.metadata,
421
- input_schema=get_agent_input_schema_dict(agent),
422
- )
423
-
424
-
425
- class TeamResponse(BaseModel):
426
- id: Optional[str] = None
427
- name: Optional[str] = None
428
- db_id: Optional[str] = None
429
- description: Optional[str] = None
430
- model: Optional[ModelResponse] = None
431
- tools: Optional[Dict[str, Any]] = None
432
- sessions: Optional[Dict[str, Any]] = None
433
- knowledge: Optional[Dict[str, Any]] = None
434
- memory: Optional[Dict[str, Any]] = None
435
- reasoning: Optional[Dict[str, Any]] = None
436
- default_tools: Optional[Dict[str, Any]] = None
437
- system_message: Optional[Dict[str, Any]] = None
438
- response_settings: Optional[Dict[str, Any]] = None
439
- streaming: Optional[Dict[str, Any]] = None
440
- members: Optional[List[Union[AgentResponse, "TeamResponse"]]] = None
441
- metadata: Optional[Dict[str, Any]] = None
442
- input_schema: Optional[Dict[str, Any]] = None
443
-
444
- @classmethod
445
- async def from_team(cls, team: Team) -> "TeamResponse":
446
- def filter_meaningful_config(d: Dict[str, Any], defaults: Dict[str, Any]) -> Optional[Dict[str, Any]]:
447
- """Filter out fields that match their default values, keeping only meaningful user configurations"""
448
- filtered = {}
449
- for key, value in d.items():
450
- if value is None:
451
- continue
452
- # Skip if value matches the default exactly
453
- if key in defaults and value == defaults[key]:
454
- continue
455
- # Keep non-default values
456
- filtered[key] = value
457
- return filtered if filtered else None
458
-
459
- # Define default values for filtering (similar to agent defaults)
460
- team_defaults = {
461
- # Sessions defaults
462
- "add_history_to_context": False,
463
- "num_history_runs": 3,
464
- "enable_session_summaries": False,
465
- "cache_session": False,
466
- # Knowledge defaults
467
- "add_references": False,
468
- "references_format": "json",
469
- "enable_agentic_knowledge_filters": False,
470
- # Memory defaults
471
- "enable_agentic_memory": False,
472
- "enable_user_memories": False,
473
- # Reasoning defaults
474
- "reasoning": False,
475
- "reasoning_min_steps": 1,
476
- "reasoning_max_steps": 10,
477
- # Default tools defaults
478
- "search_knowledge": True,
479
- "read_chat_history": False,
480
- "get_member_information_tool": False,
481
- # System message defaults
482
- "system_message_role": "system",
483
- "markdown": False,
484
- "add_datetime_to_context": False,
485
- "add_location_to_context": False,
486
- "resolve_in_context": True,
487
- # Response settings defaults
488
- "parse_response": True,
489
- "use_json_mode": False,
490
- # Streaming defaults
491
- "stream_events": False,
492
- "stream_intermediate_steps": False,
493
- "stream_member_events": False,
494
- }
495
-
496
- run_id = str(uuid4())
497
- session_id = str(uuid4())
498
- _tools = team._determine_tools_for_model(
499
- model=team.model, # type: ignore
500
- session=TeamSession(session_id=session_id, session_data={}),
501
- run_response=TeamRunOutput(run_id=run_id),
502
- run_context=RunContext(run_id=run_id, session_id=session_id, session_state={}),
503
- async_mode=True,
504
- team_run_context={},
505
- check_mcp_tools=False,
506
- )
507
- team_tools = _tools
508
- formatted_tools = format_team_tools(team_tools) if team_tools else None
509
-
510
- model_name = team.model.name or team.model.__class__.__name__ if team.model else None
511
- model_provider = team.model.provider or team.model.__class__.__name__ if team.model else ""
512
- model_id = team.model.id if team.model else None
513
-
514
- if model_provider and model_id:
515
- model_provider = f"{model_provider} {model_id}"
516
- elif model_name and model_id:
517
- model_provider = f"{model_name} {model_id}"
518
- elif model_id:
519
- model_provider = model_id
520
-
521
- session_table = team.db.session_table_name if team.db else None
522
- knowledge_table = team.db.knowledge_table_name if team.db and team.knowledge else None
523
-
524
- tools_info = {
525
- "tools": formatted_tools,
526
- "tool_call_limit": team.tool_call_limit,
527
- "tool_choice": team.tool_choice,
528
- }
529
-
530
- sessions_info = {
531
- "session_table": session_table,
532
- "add_history_to_context": team.add_history_to_context,
533
- "enable_session_summaries": team.enable_session_summaries,
534
- "num_history_runs": team.num_history_runs,
535
- "cache_session": team.cache_session,
536
- }
537
-
538
- knowledge_info = {
539
- "knowledge_table": knowledge_table,
540
- "enable_agentic_knowledge_filters": team.enable_agentic_knowledge_filters,
541
- "knowledge_filters": team.knowledge_filters,
542
- "references_format": team.references_format,
543
- }
544
-
545
- memory_info: Optional[Dict[str, Any]] = None
546
- if team.memory_manager is not None:
547
- memory_info = {
548
- "enable_agentic_memory": team.enable_agentic_memory,
549
- "enable_user_memories": team.enable_user_memories,
550
- "metadata": team.metadata,
551
- "memory_table": team.db.memory_table_name if team.db and team.enable_user_memories else None,
552
- }
553
-
554
- if team.memory_manager.model is not None:
555
- memory_info["model"] = ModelResponse(
556
- name=team.memory_manager.model.name,
557
- model=team.memory_manager.model.id,
558
- provider=team.memory_manager.model.provider,
559
- ).model_dump()
560
-
561
- reasoning_info: Dict[str, Any] = {
562
- "reasoning": team.reasoning,
563
- "reasoning_agent_id": team.reasoning_agent.id if team.reasoning_agent else None,
564
- "reasoning_min_steps": team.reasoning_min_steps,
565
- "reasoning_max_steps": team.reasoning_max_steps,
566
- }
567
-
568
- if team.reasoning_model:
569
- reasoning_info["reasoning_model"] = ModelResponse(
570
- name=team.reasoning_model.name,
571
- model=team.reasoning_model.id,
572
- provider=team.reasoning_model.provider,
573
- ).model_dump()
574
-
575
- default_tools_info = {
576
- "search_knowledge": team.search_knowledge,
577
- "read_chat_history": team.read_chat_history,
578
- "get_member_information_tool": team.get_member_information_tool,
579
- }
580
-
581
- team_instructions = team.instructions if team.instructions else None
582
- if team_instructions and callable(team_instructions):
583
- team_instructions = await aexecute_instructions(instructions=team_instructions, agent=team, team=team)
584
-
585
- team_system_message = team.system_message if team.system_message else None
586
- if team_system_message and callable(team_system_message):
587
- team_system_message = await aexecute_system_message(
588
- system_message=team_system_message, agent=team, team=team
589
- )
590
-
591
- system_message_info = {
592
- "system_message": team_system_message,
593
- "system_message_role": team.system_message_role,
594
- "description": team.description,
595
- "instructions": team_instructions,
596
- "expected_output": team.expected_output,
597
- "additional_context": team.additional_context,
598
- "markdown": team.markdown,
599
- "add_datetime_to_context": team.add_datetime_to_context,
600
- "add_location_to_context": team.add_location_to_context,
601
- "resolve_in_context": team.resolve_in_context,
602
- }
603
-
604
- response_settings_info: Dict[str, Any] = {
605
- "output_schema_name": team.output_schema.__name__ if team.output_schema else None,
606
- "parser_model_prompt": team.parser_model_prompt,
607
- "parse_response": team.parse_response,
608
- "use_json_mode": team.use_json_mode,
609
- }
610
-
611
- if team.parser_model:
612
- response_settings_info["parser_model"] = ModelResponse(
613
- name=team.parser_model.name,
614
- model=team.parser_model.id,
615
- provider=team.parser_model.provider,
616
- ).model_dump()
617
-
618
- streaming_info = {
619
- "stream": team.stream,
620
- "stream_events": team.stream_events,
621
- "stream_intermediate_steps": team.stream_intermediate_steps,
622
- "stream_member_events": team.stream_member_events,
623
- }
624
-
625
- # Build team model only if it has at least one non-null field
626
- _team_model_data: Dict[str, Any] = {}
627
- if team.model and team.model.name is not None:
628
- _team_model_data["name"] = team.model.name
629
- if team.model and team.model.id is not None:
630
- _team_model_data["model"] = team.model.id
631
- if team.model and team.model.provider is not None:
632
- _team_model_data["provider"] = team.model.provider
633
-
634
- members: List[Union[AgentResponse, TeamResponse]] = []
635
- for member in team.members:
636
- if isinstance(member, Agent):
637
- agent_response = await AgentResponse.from_agent(member)
638
- members.append(agent_response)
639
- if isinstance(member, Team):
640
- team_response = await TeamResponse.from_team(member)
641
- members.append(team_response)
642
-
643
- return TeamResponse(
644
- id=team.id,
645
- name=team.name,
646
- db_id=team.db.id if team.db else None,
647
- model=ModelResponse(**_team_model_data) if _team_model_data else None,
648
- tools=filter_meaningful_config(tools_info, {}),
649
- sessions=filter_meaningful_config(sessions_info, team_defaults),
650
- knowledge=filter_meaningful_config(knowledge_info, team_defaults),
651
- memory=filter_meaningful_config(memory_info, team_defaults) if memory_info else None,
652
- reasoning=filter_meaningful_config(reasoning_info, team_defaults),
653
- default_tools=filter_meaningful_config(default_tools_info, team_defaults),
654
- system_message=filter_meaningful_config(system_message_info, team_defaults),
655
- response_settings=filter_meaningful_config(response_settings_info, team_defaults),
656
- streaming=filter_meaningful_config(streaming_info, team_defaults),
657
- members=members if members else None,
658
- metadata=team.metadata,
659
- input_schema=get_team_input_schema_dict(team),
660
- )
661
-
662
-
663
- class WorkflowResponse(BaseModel):
664
- id: Optional[str] = Field(None, description="Unique identifier for the workflow")
665
- name: Optional[str] = Field(None, description="Name of the workflow")
666
- db_id: Optional[str] = Field(None, description="Database identifier")
667
- description: Optional[str] = Field(None, description="Description of the workflow")
668
- input_schema: Optional[Dict[str, Any]] = Field(None, description="Input schema for the workflow")
669
- steps: Optional[List[Dict[str, Any]]] = Field(None, description="List of workflow steps")
670
- agent: Optional[AgentResponse] = Field(None, description="Agent configuration if used")
671
- team: Optional[TeamResponse] = Field(None, description="Team configuration if used")
672
- metadata: Optional[Dict[str, Any]] = Field(None, description="Additional metadata")
673
- workflow_agent: bool = Field(False, description="Whether this workflow uses a WorkflowAgent")
674
-
675
- @classmethod
676
- async def _resolve_agents_and_teams_recursively(cls, steps: List[Dict[str, Any]]) -> List[Dict[str, Any]]:
677
- """Parse Agents and Teams into AgentResponse and TeamResponse objects.
678
-
679
- If the given steps have nested steps, recursively work on those."""
680
- if not steps:
681
- return steps
682
-
683
- def _prune_none(value: Any) -> Any:
684
- # Recursively remove None values from dicts and lists
685
- if isinstance(value, dict):
686
- return {k: _prune_none(v) for k, v in value.items() if v is not None}
687
- if isinstance(value, list):
688
- return [_prune_none(v) for v in value]
689
- return value
690
-
691
- for idx, step in enumerate(steps):
692
- if step.get("agent"):
693
- # Convert to dict and exclude fields that are None
694
- agent_response = await AgentResponse.from_agent(step.get("agent")) # type: ignore
695
- step["agent"] = agent_response.model_dump(exclude_none=True)
696
-
697
- if step.get("team"):
698
- team_response = await TeamResponse.from_team(step.get("team")) # type: ignore
699
- step["team"] = team_response.model_dump(exclude_none=True)
700
-
701
- if step.get("steps"):
702
- step["steps"] = await cls._resolve_agents_and_teams_recursively(step["steps"])
703
-
704
- # Prune None values in the entire step
705
- steps[idx] = _prune_none(step)
706
-
707
- return steps
708
-
709
- @classmethod
710
- async def from_workflow(cls, workflow: Workflow) -> "WorkflowResponse":
711
- workflow_dict = workflow.to_dict()
712
- steps = workflow_dict.get("steps")
713
-
714
- if steps:
715
- steps = await cls._resolve_agents_and_teams_recursively(steps)
716
-
717
- return cls(
718
- id=workflow.id,
719
- name=workflow.name,
720
- db_id=workflow.db.id if workflow.db else None,
721
- description=workflow.description,
722
- steps=steps,
723
- input_schema=get_workflow_input_schema_dict(workflow),
724
- metadata=workflow.metadata,
725
- workflow_agent=isinstance(workflow.agent, WorkflowAgent) if workflow.agent else False,
726
- )
727
-
728
-
729
170
  class WorkflowRunRequest(BaseModel):
730
171
  input: Dict[str, Any] = Field(..., description="Input parameters for the workflow run")
731
172
  user_id: Optional[str] = Field(None, description="User identifier for the workflow run")
@@ -924,6 +365,7 @@ class RunSchema(BaseModel):
924
365
  def from_dict(cls, run_dict: Dict[str, Any]) -> "RunSchema":
925
366
  run_input = get_run_input(run_dict)
926
367
  run_response_format = "text" if run_dict.get("content_type", "str") == "str" else "json"
368
+
927
369
  return cls(
928
370
  run_id=run_dict.get("run_id", ""),
929
371
  parent_run_id=run_dict.get("parent_run_id", ""),