agno 2.3.12__py3-none-any.whl → 2.3.14__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (55) hide show
  1. agno/agent/agent.py +1125 -1401
  2. agno/eval/__init__.py +21 -8
  3. agno/knowledge/embedder/azure_openai.py +0 -1
  4. agno/knowledge/embedder/google.py +1 -1
  5. agno/models/anthropic/claude.py +4 -1
  6. agno/models/azure/openai_chat.py +11 -5
  7. agno/models/base.py +8 -4
  8. agno/models/openai/chat.py +0 -2
  9. agno/models/openai/responses.py +2 -2
  10. agno/os/app.py +112 -5
  11. agno/os/auth.py +190 -3
  12. agno/os/config.py +9 -0
  13. agno/os/interfaces/a2a/router.py +619 -9
  14. agno/os/interfaces/a2a/utils.py +31 -32
  15. agno/os/middleware/__init__.py +2 -0
  16. agno/os/middleware/jwt.py +670 -108
  17. agno/os/router.py +0 -1
  18. agno/os/routers/agents/router.py +22 -4
  19. agno/os/routers/agents/schema.py +14 -1
  20. agno/os/routers/teams/router.py +20 -4
  21. agno/os/routers/teams/schema.py +14 -1
  22. agno/os/routers/workflows/router.py +88 -9
  23. agno/os/scopes.py +469 -0
  24. agno/os/utils.py +86 -53
  25. agno/reasoning/anthropic.py +85 -1
  26. agno/reasoning/azure_ai_foundry.py +93 -1
  27. agno/reasoning/deepseek.py +91 -1
  28. agno/reasoning/gemini.py +81 -1
  29. agno/reasoning/groq.py +103 -1
  30. agno/reasoning/manager.py +1244 -0
  31. agno/reasoning/ollama.py +93 -1
  32. agno/reasoning/openai.py +113 -1
  33. agno/reasoning/vertexai.py +85 -1
  34. agno/run/agent.py +11 -0
  35. agno/run/base.py +1 -1
  36. agno/run/team.py +11 -0
  37. agno/session/team.py +0 -3
  38. agno/team/team.py +1204 -1452
  39. agno/tools/postgres.py +1 -1
  40. agno/utils/cryptography.py +22 -0
  41. agno/utils/events.py +69 -2
  42. agno/utils/hooks.py +4 -10
  43. agno/utils/print_response/agent.py +52 -2
  44. agno/utils/print_response/team.py +141 -10
  45. agno/utils/prompts.py +8 -6
  46. agno/utils/string.py +46 -0
  47. agno/utils/team.py +1 -1
  48. agno/vectordb/chroma/chromadb.py +1 -0
  49. agno/vectordb/milvus/milvus.py +32 -3
  50. agno/vectordb/redis/redisdb.py +16 -2
  51. {agno-2.3.12.dist-info → agno-2.3.14.dist-info}/METADATA +3 -2
  52. {agno-2.3.12.dist-info → agno-2.3.14.dist-info}/RECORD +55 -52
  53. {agno-2.3.12.dist-info → agno-2.3.14.dist-info}/WHEEL +0 -0
  54. {agno-2.3.12.dist-info → agno-2.3.14.dist-info}/licenses/LICENSE +0 -0
  55. {agno-2.3.12.dist-info → agno-2.3.14.dist-info}/top_level.txt +0 -0
agno/tools/postgres.py CHANGED
@@ -110,7 +110,7 @@ class PostgresTools(Toolkit):
110
110
  """
111
111
  if not self.is_connected:
112
112
  return self.connect()
113
- return self._connection
113
+ return self._connection # type: ignore
114
114
 
115
115
  def __enter__(self):
116
116
  return self.connect()
@@ -0,0 +1,22 @@
1
+ from cryptography.hazmat.primitives import serialization
2
+ from cryptography.hazmat.primitives.asymmetric import rsa
3
+
4
+
5
+ def generate_rsa_keys():
6
+ """Generate RSA key pair for RS256 JWT signing/verification."""
7
+ private_key = rsa.generate_private_key(public_exponent=65537, key_size=2048)
8
+
9
+ # Private key PEM (used by auth server to sign tokens)
10
+ private_pem = private_key.private_bytes(
11
+ encoding=serialization.Encoding.PEM,
12
+ format=serialization.PrivateFormat.PKCS8,
13
+ encryption_algorithm=serialization.NoEncryption(),
14
+ )
15
+
16
+ # Public key PEM (used by AgentOS to verify tokens)
17
+ public_pem = private_key.public_key().public_bytes(
18
+ encoding=serialization.Encoding.PEM,
19
+ format=serialization.PublicFormat.SubjectPublicKeyInfo,
20
+ )
21
+
22
+ return private_pem.decode("utf-8"), public_pem.decode("utf-8")
agno/utils/events.py CHANGED
@@ -16,6 +16,7 @@ from agno.run.agent import (
16
16
  PreHookCompletedEvent,
17
17
  PreHookStartedEvent,
18
18
  ReasoningCompletedEvent,
19
+ ReasoningContentDeltaEvent,
19
20
  ReasoningStartedEvent,
20
21
  ReasoningStepEvent,
21
22
  RunCancelledEvent,
@@ -47,6 +48,7 @@ from agno.run.team import PostHookStartedEvent as TeamPostHookStartedEvent
47
48
  from agno.run.team import PreHookCompletedEvent as TeamPreHookCompletedEvent
48
49
  from agno.run.team import PreHookStartedEvent as TeamPreHookStartedEvent
49
50
  from agno.run.team import ReasoningCompletedEvent as TeamReasoningCompletedEvent
51
+ from agno.run.team import ReasoningContentDeltaEvent as TeamReasoningContentDeltaEvent
50
52
  from agno.run.team import ReasoningStartedEvent as TeamReasoningStartedEvent
51
53
  from agno.run.team import ReasoningStepEvent as TeamReasoningStepEvent
52
54
  from agno.run.team import RunCancelledEvent as TeamRunCancelledEvent
@@ -161,23 +163,41 @@ def create_run_continued_event(from_run_response: RunOutput) -> RunContinuedEven
161
163
  )
162
164
 
163
165
 
164
- def create_team_run_error_event(from_run_response: TeamRunOutput, error: str) -> TeamRunErrorEvent:
166
+ def create_team_run_error_event(
167
+ from_run_response: TeamRunOutput,
168
+ error: str,
169
+ error_type: Optional[str] = None,
170
+ error_id: Optional[str] = None,
171
+ additional_data: Optional[Dict[str, Any]] = None,
172
+ ) -> TeamRunErrorEvent:
165
173
  return TeamRunErrorEvent(
166
174
  session_id=from_run_response.session_id,
167
175
  team_id=from_run_response.team_id, # type: ignore
168
176
  team_name=from_run_response.team_name, # type: ignore
169
177
  run_id=from_run_response.run_id,
170
178
  content=error,
179
+ error_type=error_type,
180
+ error_id=error_id,
181
+ additional_data=additional_data,
171
182
  )
172
183
 
173
184
 
174
- def create_run_error_event(from_run_response: RunOutput, error: str) -> RunErrorEvent:
185
+ def create_run_error_event(
186
+ from_run_response: RunOutput,
187
+ error: str,
188
+ error_type: Optional[str] = None,
189
+ error_id: Optional[str] = None,
190
+ additional_data: Optional[Dict[str, Any]] = None,
191
+ ) -> RunErrorEvent:
175
192
  return RunErrorEvent(
176
193
  session_id=from_run_response.session_id,
177
194
  agent_id=from_run_response.agent_id, # type: ignore
178
195
  agent_name=from_run_response.agent_name, # type: ignore
179
196
  run_id=from_run_response.run_id,
180
197
  content=error,
198
+ error_type=error_type,
199
+ error_id=error_id,
200
+ additional_data=additional_data,
181
201
  )
182
202
 
183
203
 
@@ -421,6 +441,19 @@ def create_reasoning_step_event(
421
441
  )
422
442
 
423
443
 
444
+ def create_reasoning_content_delta_event(
445
+ from_run_response: RunOutput, reasoning_content: str
446
+ ) -> ReasoningContentDeltaEvent:
447
+ """Create an event for streaming reasoning content chunks."""
448
+ return ReasoningContentDeltaEvent(
449
+ session_id=from_run_response.session_id,
450
+ agent_id=from_run_response.agent_id, # type: ignore
451
+ agent_name=from_run_response.agent_name, # type: ignore
452
+ run_id=from_run_response.run_id,
453
+ reasoning_content=reasoning_content,
454
+ )
455
+
456
+
424
457
  def create_team_reasoning_step_event(
425
458
  from_run_response: TeamRunOutput, reasoning_step: ReasoningStep, reasoning_content: str
426
459
  ) -> TeamReasoningStepEvent:
@@ -435,6 +468,19 @@ def create_team_reasoning_step_event(
435
468
  )
436
469
 
437
470
 
471
+ def create_team_reasoning_content_delta_event(
472
+ from_run_response: TeamRunOutput, reasoning_content: str
473
+ ) -> TeamReasoningContentDeltaEvent:
474
+ """Create an event for streaming reasoning content chunks for Team."""
475
+ return TeamReasoningContentDeltaEvent(
476
+ session_id=from_run_response.session_id,
477
+ team_id=from_run_response.team_id, # type: ignore
478
+ team_name=from_run_response.team_name, # type: ignore
479
+ run_id=from_run_response.run_id,
480
+ reasoning_content=reasoning_content,
481
+ )
482
+
483
+
438
484
  def create_reasoning_completed_event(
439
485
  from_run_response: RunOutput, content: Optional[Any] = None, content_type: Optional[str] = None
440
486
  ) -> ReasoningCompletedEvent:
@@ -698,3 +744,24 @@ def handle_event(
698
744
  run_response.events = []
699
745
  run_response.events.append(event) # type: ignore
700
746
  return event
747
+
748
+
749
+ def add_error_event(
750
+ error: RunErrorEvent,
751
+ events: Optional[List[RunOutputEvent]],
752
+ ):
753
+ if events is None:
754
+ events = []
755
+ events.append(error)
756
+
757
+ return events
758
+
759
+
760
+ def add_team_error_event(
761
+ error: TeamRunErrorEvent,
762
+ events: Optional[List[Union[RunOutputEvent, TeamRunOutputEvent]]],
763
+ ):
764
+ if events is None:
765
+ events = []
766
+ events.append(error)
767
+ return events
agno/utils/hooks.py CHANGED
@@ -1,9 +1,7 @@
1
1
  from copy import deepcopy
2
- from typing import TYPE_CHECKING, Any, Callable, Dict, List, Optional, Union
3
-
4
- if TYPE_CHECKING:
5
- from agno.eval.base import BaseEval
2
+ from typing import Any, Callable, Dict, List, Optional, Union
6
3
 
4
+ from agno.eval.base import BaseEval
7
5
  from agno.guardrails.base import BaseGuardrail
8
6
  from agno.hooks.decorator import HOOK_RUN_IN_BACKGROUND_ATTR
9
7
  from agno.utils.log import log_warning
@@ -57,7 +55,7 @@ def should_run_hook_in_background(hook: Callable[..., Any]) -> bool:
57
55
 
58
56
 
59
57
  def normalize_pre_hooks(
60
- hooks: Optional[List[Union[Callable[..., Any], BaseGuardrail, "BaseEval"]]],
58
+ hooks: Optional[List[Union[Callable[..., Any], BaseGuardrail, BaseEval]]],
61
59
  async_mode: bool = False,
62
60
  ) -> Optional[List[Callable[..., Any]]]:
63
61
  """Normalize pre-hooks to a list format.
@@ -66,8 +64,6 @@ def normalize_pre_hooks(
66
64
  hooks: List of hook functions, guardrails, or eval instances
67
65
  async_mode: Whether to use async versions of methods
68
66
  """
69
- from agno.eval.base import BaseEval
70
-
71
67
  result_hooks: List[Callable[..., Any]] = []
72
68
 
73
69
  if hooks is not None:
@@ -102,7 +98,7 @@ def normalize_pre_hooks(
102
98
 
103
99
 
104
100
  def normalize_post_hooks(
105
- hooks: Optional[List[Union[Callable[..., Any], BaseGuardrail, "BaseEval"]]],
101
+ hooks: Optional[List[Union[Callable[..., Any], BaseGuardrail, BaseEval]]],
106
102
  async_mode: bool = False,
107
103
  ) -> Optional[List[Callable[..., Any]]]:
108
104
  """Normalize post-hooks to a list format.
@@ -111,8 +107,6 @@ def normalize_post_hooks(
111
107
  hooks: List of hook functions, guardrails, or eval instances
112
108
  async_mode: Whether to use async versions of methods
113
109
  """
114
- from agno.eval.base import BaseEval
115
-
116
110
  result_hooks: List[Callable[..., Any]] = []
117
111
 
118
112
  if hooks is not None:
@@ -134,6 +134,11 @@ def print_response_stream(
134
134
  )
135
135
  except Exception as e:
136
136
  log_warning(f"Failed to convert response to JSON: {e}")
137
+ elif agent.output_schema is not None and isinstance(response_event.content, dict):
138
+ try:
139
+ response_content_batch = JSON(json.dumps(response_event.content), indent=2) # type: ignore
140
+ except Exception as e:
141
+ log_warning(f"Failed to convert response to JSON: {e}")
137
142
  else:
138
143
  try:
139
144
  response_content_batch = JSON(json.dumps(response_event.content), indent=4)
@@ -141,6 +146,12 @@ def print_response_stream(
141
146
  log_warning(f"Failed to convert response to JSON: {e}")
142
147
  if hasattr(response_event, "reasoning_content") and response_event.reasoning_content is not None: # type: ignore
143
148
  _response_reasoning_content += response_event.reasoning_content # type: ignore
149
+
150
+ # Handle streaming reasoning content delta events
151
+ if response_event.event == RunEvent.reasoning_content_delta: # type: ignore
152
+ if hasattr(response_event, "reasoning_content") and response_event.reasoning_content is not None: # type: ignore
153
+ _response_reasoning_content += response_event.reasoning_content # type: ignore
154
+
144
155
  if hasattr(response_event, "reasoning_steps") and response_event.reasoning_steps is not None: # type: ignore
145
156
  reasoning_steps = response_event.reasoning_steps # type: ignore
146
157
 
@@ -325,6 +336,11 @@ async def aprint_response_stream(
325
336
  response_content_batch = JSON(resp.content.model_dump_json(exclude_none=True), indent=2) # type: ignore
326
337
  except Exception as e:
327
338
  log_warning(f"Failed to convert response to JSON: {e}")
339
+ elif agent.output_schema is not None and isinstance(resp.content, dict):
340
+ try:
341
+ response_content_batch = JSON(json.dumps(resp.content), indent=2) # type: ignore
342
+ except Exception as e:
343
+ log_warning(f"Failed to convert response to JSON: {e}")
328
344
  else:
329
345
  try:
330
346
  response_content_batch = JSON(json.dumps(resp.content), indent=4)
@@ -333,6 +349,11 @@ async def aprint_response_stream(
333
349
  if resp.reasoning_content is not None: # type: ignore
334
350
  _response_reasoning_content += resp.reasoning_content # type: ignore
335
351
 
352
+ # Handle streaming reasoning content delta events
353
+ if resp.event == RunEvent.reasoning_content_delta: # type: ignore
354
+ if hasattr(resp, "reasoning_content") and resp.reasoning_content is not None: # type: ignore
355
+ _response_reasoning_content += resp.reasoning_content # type: ignore
356
+
336
357
  if hasattr(resp, "reasoning_steps") and resp.reasoning_steps is not None: # type: ignore
337
358
  reasoning_steps = resp.reasoning_steps # type: ignore
338
359
 
@@ -494,11 +515,23 @@ def build_panels_stream(
494
515
  and response_event.citations is not None
495
516
  and response_event.citations.urls is not None
496
517
  ):
497
- md_content = "\n".join(
518
+ md_lines = []
519
+
520
+ # Add search queries if present
521
+ if response_event.citations.search_queries:
522
+ md_lines.append("**Search Queries:**")
523
+ for query in response_event.citations.search_queries:
524
+ md_lines.append(f"- {query}")
525
+ md_lines.append("") # Empty line before URLs
526
+
527
+ # Add URL citations
528
+ md_lines.extend(
498
529
  f"{i + 1}. [{citation.title or citation.url}]({citation.url})"
499
530
  for i, citation in enumerate(response_event.citations.urls)
500
531
  if citation.url # Only include citations with valid URLs
501
532
  )
533
+
534
+ md_content = "\n".join(md_lines)
502
535
  if md_content: # Only create panel if there are citations
503
536
  citations_panel = create_panel(
504
537
  content=Markdown(md_content),
@@ -871,6 +904,11 @@ def build_panels(
871
904
  response_content_batch = JSON(run_response.content.model_dump_json(exclude_none=True), indent=2)
872
905
  except Exception as e:
873
906
  log_warning(f"Failed to convert response to JSON: {e}")
907
+ elif output_schema is not None and isinstance(run_response.content, dict):
908
+ try:
909
+ response_content_batch = JSON(json.dumps(run_response.content), indent=2)
910
+ except Exception as e:
911
+ log_warning(f"Failed to convert response to JSON: {e}")
874
912
  else:
875
913
  try:
876
914
  response_content_batch = JSON(json.dumps(run_response.content), indent=4)
@@ -890,11 +928,23 @@ def build_panels(
890
928
  and run_response.citations is not None
891
929
  and run_response.citations.urls is not None
892
930
  ):
893
- md_content = "\n".join(
931
+ md_lines = []
932
+
933
+ # Add search queries if present
934
+ if run_response.citations.search_queries:
935
+ md_lines.append("**Search Queries:**")
936
+ for query in run_response.citations.search_queries:
937
+ md_lines.append(f"- {query}")
938
+ md_lines.append("") # Empty line before URLs
939
+
940
+ # Add URL citations
941
+ md_lines.extend(
894
942
  f"{i + 1}. [{citation.title or citation.url}]({citation.url})"
895
943
  for i, citation in enumerate(run_response.citations.urls)
896
944
  if citation.url # Only include citations with valid URLs
897
945
  )
946
+
947
+ md_content = "\n".join(md_lines)
898
948
  if md_content: # Only create panel if there are citations
899
949
  citations_panel = create_panel(
900
950
  content=Markdown(md_content),
@@ -1,3 +1,4 @@
1
+ import json
1
2
  from typing import TYPE_CHECKING, Any, Dict, List, Optional, Sequence, Set, Union, get_args
2
3
 
3
4
  from pydantic import BaseModel
@@ -229,11 +230,23 @@ def print_response(
229
230
  panels.append(member_response_panel)
230
231
 
231
232
  if member_response.citations is not None and member_response.citations.urls is not None:
232
- md_content = "\n".join(
233
+ md_lines = []
234
+
235
+ # Add search queries if present
236
+ if member_response.citations.search_queries:
237
+ md_lines.append("**Search Queries:**")
238
+ for query in member_response.citations.search_queries:
239
+ md_lines.append(f"- {query}")
240
+ md_lines.append("") # Empty line before URLs
241
+
242
+ # Add URL citations
243
+ md_lines.extend(
233
244
  f"{i + 1}. [{citation.title or citation.url}]({citation.url})"
234
245
  for i, citation in enumerate(member_response.citations.urls)
235
246
  if citation.url # Only include citations with valid URLs
236
247
  )
248
+
249
+ md_content = "\n".join(md_lines)
237
250
  if md_content: # Only create panel if there are citations
238
251
  citations_panel = create_panel(
239
252
  content=Markdown(md_content),
@@ -293,11 +306,23 @@ def print_response(
293
306
 
294
307
  # Add citations
295
308
  if run_response.citations is not None and run_response.citations.urls is not None:
296
- md_content = "\n".join(
309
+ md_lines = []
310
+
311
+ # Add search queries if present
312
+ if run_response.citations.search_queries:
313
+ md_lines.append("**Search Queries:**")
314
+ for query in run_response.citations.search_queries:
315
+ md_lines.append(f"- {query}")
316
+ md_lines.append("") # Empty line before URLs
317
+
318
+ # Add URL citations
319
+ md_lines.extend(
297
320
  f"{i + 1}. [{citation.title or citation.url}]({citation.url})"
298
321
  for i, citation in enumerate(run_response.citations.urls)
299
322
  if citation.url # Only include citations with valid URLs
300
323
  )
324
+
325
+ md_content = "\n".join(md_lines)
301
326
  if md_content: # Only create panel if there are citations
302
327
  citations_panel = create_panel(
303
328
  content=Markdown(md_content),
@@ -464,6 +489,11 @@ def print_response_stream(
464
489
  _response_content = JSON(resp.content.model_dump_json(exclude_none=True), indent=2) # type: ignore
465
490
  except Exception as e:
466
491
  log_warning(f"Failed to convert response to JSON: {e}")
492
+ elif team.output_schema is not None and isinstance(resp.content, dict):
493
+ try:
494
+ _response_content = JSON(json.dumps(resp.content), indent=2) # type: ignore
495
+ except Exception as e:
496
+ log_warning(f"Failed to convert response to JSON: {e}")
467
497
  if hasattr(resp, "reasoning_content") and resp.reasoning_content is not None: # type: ignore
468
498
  _response_reasoning_content += resp.reasoning_content # type: ignore
469
499
  if hasattr(resp, "reasoning_steps") and resp.reasoning_steps is not None: # type: ignore
@@ -660,11 +690,23 @@ def print_response_stream(
660
690
 
661
691
  # Add citations
662
692
  if hasattr(resp, "citations") and resp.citations is not None and resp.citations.urls is not None:
663
- md_content = "\n".join(
693
+ md_lines = []
694
+
695
+ # Add search queries if present
696
+ if resp.citations.search_queries:
697
+ md_lines.append("**Search Queries:**")
698
+ for query in resp.citations.search_queries:
699
+ md_lines.append(f"- {query}")
700
+ md_lines.append("") # Empty line before URLs
701
+
702
+ # Add URL citations
703
+ md_lines.extend(
664
704
  f"{i + 1}. [{citation.title or citation.url}]({citation.url})"
665
705
  for i, citation in enumerate(resp.citations.urls)
666
706
  if citation.url # Only include citations with valid URLs
667
707
  )
708
+
709
+ md_content = "\n".join(md_lines)
668
710
  if md_content: # Only create panel if there are citations
669
711
  citations_panel = create_panel(
670
712
  content=Markdown(md_content),
@@ -804,11 +846,23 @@ def print_response_stream(
804
846
 
805
847
  # Add citations if any
806
848
  if member_response.citations is not None and member_response.citations.urls is not None:
807
- md_content = "\n".join(
849
+ md_lines = []
850
+
851
+ # Add search queries if present
852
+ if member_response.citations.search_queries:
853
+ md_lines.append("**Search Queries:**")
854
+ for query in member_response.citations.search_queries:
855
+ md_lines.append(f"- {query}")
856
+ md_lines.append("") # Empty line before URLs
857
+
858
+ # Add URL citations
859
+ md_lines.extend(
808
860
  f"{i + 1}. [{citation.title or citation.url}]({citation.url})"
809
861
  for i, citation in enumerate(member_response.citations.urls)
810
862
  if citation.url # Only include citations with valid URLs
811
863
  )
864
+
865
+ md_content = "\n".join(md_lines)
812
866
  if md_content: # Only create panel if there are citations
813
867
  citations_panel = create_panel(
814
868
  content=Markdown(md_content),
@@ -868,11 +922,23 @@ def print_response_stream(
868
922
 
869
923
  # Add team citations
870
924
  if hasattr(resp, "citations") and resp.citations is not None and resp.citations.urls is not None:
871
- md_content = "\n".join(
925
+ md_lines = []
926
+
927
+ # Add search queries if present
928
+ if resp.citations.search_queries:
929
+ md_lines.append("**Search Queries:**")
930
+ for query in resp.citations.search_queries:
931
+ md_lines.append(f"- {query}")
932
+ md_lines.append("") # Empty line before URLs
933
+
934
+ # Add URL citations
935
+ md_lines.extend(
872
936
  f"{i + 1}. [{citation.title or citation.url}]({citation.url})"
873
937
  for i, citation in enumerate(resp.citations.urls)
874
938
  if citation.url # Only include citations with valid URLs
875
939
  )
940
+
941
+ md_content = "\n".join(md_lines)
876
942
  if md_content: # Only create panel if there are citations
877
943
  citations_panel = create_panel(
878
944
  content=Markdown(md_content),
@@ -1096,11 +1162,23 @@ async def aprint_response(
1096
1162
  panels.append(member_response_panel)
1097
1163
 
1098
1164
  if member_response.citations is not None and member_response.citations.urls is not None:
1099
- md_content = "\n".join(
1165
+ md_lines = []
1166
+
1167
+ # Add search queries if present
1168
+ if member_response.citations.search_queries:
1169
+ md_lines.append("**Search Queries:**")
1170
+ for query in member_response.citations.search_queries:
1171
+ md_lines.append(f"- {query}")
1172
+ md_lines.append("") # Empty line before URLs
1173
+
1174
+ # Add URL citations
1175
+ md_lines.extend(
1100
1176
  f"{i + 1}. [{citation.title or citation.url}]({citation.url})"
1101
1177
  for i, citation in enumerate(member_response.citations.urls)
1102
1178
  if citation.url # Only include citations with valid URLs
1103
1179
  )
1180
+
1181
+ md_content = "\n".join(md_lines)
1104
1182
  if md_content:
1105
1183
  citations_panel = create_panel(
1106
1184
  content=Markdown(md_content),
@@ -1158,11 +1236,23 @@ async def aprint_response(
1158
1236
 
1159
1237
  # Add citations
1160
1238
  if run_response.citations is not None and run_response.citations.urls is not None:
1161
- md_content = "\n".join(
1239
+ md_lines = []
1240
+
1241
+ # Add search queries if present
1242
+ if run_response.citations.search_queries:
1243
+ md_lines.append("**Search Queries:**")
1244
+ for query in run_response.citations.search_queries:
1245
+ md_lines.append(f"- {query}")
1246
+ md_lines.append("") # Empty line before URLs
1247
+
1248
+ # Add URL citations
1249
+ md_lines.extend(
1162
1250
  f"{i + 1}. [{citation.title or citation.url}]({citation.url})"
1163
1251
  for i, citation in enumerate(run_response.citations.urls)
1164
1252
  if citation.url # Only include citations with valid URLs
1165
1253
  )
1254
+
1255
+ md_content = "\n".join(md_lines)
1166
1256
  if md_content: # Only create panel if there are citations
1167
1257
  citations_panel = create_panel(
1168
1258
  content=Markdown(md_content),
@@ -1328,6 +1418,11 @@ async def aprint_response_stream(
1328
1418
  _response_content = JSON(resp.content.model_dump_json(exclude_none=True), indent=2) # type: ignore
1329
1419
  except Exception as e:
1330
1420
  log_warning(f"Failed to convert response to JSON: {e}")
1421
+ elif team.output_schema is not None and isinstance(resp.content, dict):
1422
+ try:
1423
+ _response_content = JSON(json.dumps(resp.content), indent=2) # type: ignore
1424
+ except Exception as e:
1425
+ log_warning(f"Failed to convert response to JSON: {e}")
1331
1426
  if hasattr(resp, "reasoning_content") and resp.reasoning_content is not None: # type: ignore
1332
1427
  _response_reasoning_content += resp.reasoning_content # type: ignore
1333
1428
  if hasattr(resp, "reasoning_steps") and resp.reasoning_steps is not None: # type: ignore
@@ -1524,11 +1619,23 @@ async def aprint_response_stream(
1524
1619
 
1525
1620
  # Add citations
1526
1621
  if hasattr(resp, "citations") and resp.citations is not None and resp.citations.urls is not None:
1527
- md_content = "\n".join(
1622
+ md_lines = []
1623
+
1624
+ # Add search queries if present
1625
+ if resp.citations.search_queries:
1626
+ md_lines.append("**Search Queries:**")
1627
+ for query in resp.citations.search_queries:
1628
+ md_lines.append(f"- {query}")
1629
+ md_lines.append("") # Empty line before URLs
1630
+
1631
+ # Add URL citations
1632
+ md_lines.extend(
1528
1633
  f"{i + 1}. [{citation.title or citation.url}]({citation.url})"
1529
1634
  for i, citation in enumerate(resp.citations.urls)
1530
1635
  if citation.url # Only include citations with valid URLs
1531
1636
  )
1637
+
1638
+ md_content = "\n".join(md_lines)
1532
1639
  if md_content: # Only create panel if there are citations
1533
1640
  citations_panel = create_panel(
1534
1641
  content=Markdown(md_content),
@@ -1685,11 +1792,23 @@ async def aprint_response_stream(
1685
1792
 
1686
1793
  # Add citations if any
1687
1794
  if member_response.citations is not None and member_response.citations.urls is not None:
1688
- md_content = "\n".join(
1795
+ md_lines = []
1796
+
1797
+ # Add search queries if present
1798
+ if member_response.citations.search_queries:
1799
+ md_lines.append("**Search Queries:**")
1800
+ for query in member_response.citations.search_queries:
1801
+ md_lines.append(f"- {query}")
1802
+ md_lines.append("") # Empty line before URLs
1803
+
1804
+ # Add URL citations
1805
+ md_lines.extend(
1689
1806
  f"{i + 1}. [{citation.title or citation.url}]({citation.url})"
1690
1807
  for i, citation in enumerate(member_response.citations.urls)
1691
1808
  if citation.url # Only include citations with valid URLs
1692
1809
  )
1810
+
1811
+ md_content = "\n".join(md_lines)
1693
1812
  if md_content: # Only create panel if there are citations
1694
1813
  citations_panel = create_panel(
1695
1814
  content=Markdown(md_content),
@@ -1749,11 +1868,23 @@ async def aprint_response_stream(
1749
1868
 
1750
1869
  # Add team citations
1751
1870
  if hasattr(resp, "citations") and resp.citations is not None and resp.citations.urls is not None:
1752
- md_content = "\n".join(
1871
+ md_lines = []
1872
+
1873
+ # Add search queries if present
1874
+ if resp.citations.search_queries:
1875
+ md_lines.append("**Search Queries:**")
1876
+ for query in resp.citations.search_queries:
1877
+ md_lines.append(f"- {query}")
1878
+ md_lines.append("") # Empty line before URLs
1879
+
1880
+ # Add URL citations
1881
+ md_lines.extend(
1753
1882
  f"{i + 1}. [{citation.title or citation.url}]({citation.url})"
1754
1883
  for i, citation in enumerate(resp.citations.urls)
1755
1884
  if citation.url # Only include citations with valid URLs
1756
1885
  )
1886
+
1887
+ md_content = "\n".join(md_lines)
1757
1888
  if md_content: # Only create panel if there are citations
1758
1889
  citations_panel = create_panel(
1759
1890
  content=Markdown(md_content),
agno/utils/prompts.py CHANGED
@@ -6,7 +6,7 @@ from pydantic import BaseModel
6
6
  from agno.utils.log import log_warning
7
7
 
8
8
 
9
- def get_json_output_prompt(output_schema: Union[str, list, BaseModel]) -> str:
9
+ def get_json_output_prompt(output_schema: Union[str, list, dict, BaseModel]) -> str:
10
10
  """Return the JSON output prompt for the Agent.
11
11
 
12
12
  This is added to the system prompt when the output_schema is set and structured_outputs is False.
@@ -22,11 +22,13 @@ def get_json_output_prompt(output_schema: Union[str, list, BaseModel]) -> str:
22
22
  json_output_prompt += "\n<json_fields>"
23
23
  json_output_prompt += f"\n{json.dumps(output_schema)}"
24
24
  json_output_prompt += "\n</json_fields>"
25
- elif (
26
- issubclass(type(output_schema), BaseModel)
27
- or issubclass(output_schema, BaseModel) # type: ignore
28
- or isinstance(output_schema, BaseModel)
29
- ): # type: ignore
25
+ elif isinstance(output_schema, dict):
26
+ json_output_prompt += "\n<json_fields>"
27
+ json_output_prompt += f"\n{json.dumps(output_schema)}"
28
+ json_output_prompt += "\n</json_fields>"
29
+ elif (isinstance(output_schema, type) and issubclass(output_schema, BaseModel)) or isinstance(
30
+ output_schema, BaseModel
31
+ ):
30
32
  json_schema = output_schema.model_json_schema()
31
33
  if json_schema is not None:
32
34
  response_model_properties = {}
agno/utils/string.py CHANGED
@@ -201,6 +201,52 @@ def parse_response_model_str(content: str, output_schema: Type[BaseModel]) -> Op
201
201
  return structured_output
202
202
 
203
203
 
204
+ def parse_response_dict_str(content: str) -> Optional[dict]:
205
+ """Parse dict from string content, extracting JSON if needed"""
206
+ from agno.utils.reasoning import extract_thinking_content
207
+
208
+ # Handle thinking content b/w <think> tags
209
+ if "</think>" in content:
210
+ reasoning_content, output_content = extract_thinking_content(content)
211
+ if reasoning_content:
212
+ content = output_content
213
+
214
+ # Clean content first to simplify all parsing attempts
215
+ cleaned_content = _clean_json_content(content)
216
+
217
+ try:
218
+ # First attempt: direct JSON parsing on cleaned content
219
+ return json.loads(cleaned_content)
220
+ except json.JSONDecodeError as e:
221
+ logger.warning(f"Failed to parse cleaned JSON: {e}")
222
+
223
+ # Second attempt: Extract individual JSON objects
224
+ candidate_jsons = _extract_json_objects(cleaned_content)
225
+
226
+ if len(candidate_jsons) == 1:
227
+ # Single JSON object - try to parse it directly
228
+ try:
229
+ return json.loads(candidate_jsons[0])
230
+ except json.JSONDecodeError:
231
+ pass
232
+
233
+ if len(candidate_jsons) > 1:
234
+ # Final attempt: Merge multiple JSON objects
235
+ merged_data: dict = {}
236
+ for candidate in candidate_jsons:
237
+ try:
238
+ obj = json.loads(candidate)
239
+ if isinstance(obj, dict):
240
+ merged_data.update(obj)
241
+ except json.JSONDecodeError:
242
+ continue
243
+ if merged_data:
244
+ return merged_data
245
+
246
+ logger.warning("All parsing attempts failed.")
247
+ return None
248
+
249
+
204
250
  def generate_id(seed: Optional[str] = None) -> str:
205
251
  """
206
252
  Generate a deterministic UUID5 based on a seed string.