agno 2.3.0__py3-none-any.whl → 2.3.1__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -5,7 +5,7 @@ from os import getenv
5
5
  from typing import Any, Dict, List, Optional, Type, Union
6
6
 
7
7
  import httpx
8
- from pydantic import BaseModel
8
+ from pydantic import BaseModel, ValidationError
9
9
 
10
10
  from agno.exceptions import ModelProviderError, ModelRateLimitError
11
11
  from agno.models.base import Model
@@ -80,6 +80,30 @@ class Claude(Model):
80
80
  "claude-3-5-haiku-latest",
81
81
  }
82
82
 
83
+ # Models that DO NOT support native structured outputs
84
+ # All future models are assumed to support structured outputs
85
+ NON_STRUCTURED_OUTPUT_MODELS = {
86
+ # Claude 3.x family (all versions)
87
+ "claude-3-opus-20240229",
88
+ "claude-3-sonnet-20240229",
89
+ "claude-3-haiku-20240307",
90
+ "claude-3-opus",
91
+ "claude-3-sonnet",
92
+ "claude-3-haiku",
93
+ # Claude 3.5 family (all versions except Sonnet 4.5)
94
+ "claude-3-5-sonnet-20240620",
95
+ "claude-3-5-sonnet-20241022",
96
+ "claude-3-5-sonnet",
97
+ "claude-3-5-haiku-20241022",
98
+ "claude-3-5-haiku-latest",
99
+ "claude-3-5-haiku",
100
+ # Claude Sonnet 4.x family (versions before 4.5)
101
+ "claude-sonnet-4-20250514",
102
+ "claude-sonnet-4",
103
+ # Claude Opus 4.x family (versions before 4.1)
104
+ # (Add any Opus 4.x models released before 4.1 if they exist)
105
+ }
106
+
83
107
  id: str = "claude-sonnet-4-5-20250929"
84
108
  name: str = "Claude"
85
109
  provider: str = "Anthropic"
@@ -118,6 +142,9 @@ class Claude(Model):
118
142
  # Validate thinking support immediately at model creation
119
143
  if self.thinking:
120
144
  self._validate_thinking_support()
145
+ # Set structured outputs capability flag for supported models
146
+ if self._supports_structured_outputs():
147
+ self.supports_native_structured_outputs = True
121
148
  # Set up skills configuration if skills are enabled
122
149
  if self.skills:
123
150
  self._setup_skills_configuration()
@@ -141,13 +168,72 @@ class Claude(Model):
141
168
  client_params["default_headers"] = self.default_headers
142
169
  return client_params
143
170
 
144
- def _has_beta_features(self) -> bool:
171
+ def _supports_structured_outputs(self) -> bool:
172
+ """
173
+ Check if the current model supports native structured outputs.
174
+
175
+ Returns:
176
+ bool: True if model supports structured outputs
177
+ """
178
+ # If model is in blacklist, it doesn't support structured outputs
179
+ if self.id in self.NON_STRUCTURED_OUTPUT_MODELS:
180
+ log_warning(
181
+ f"Model '{self.id}' does not support structured outputs. "
182
+ "Structured output features will not be available for this model."
183
+ )
184
+ return False
185
+
186
+ # Check for legacy model patterns that don't support structured outputs
187
+ if self.id.startswith("claude-3-"):
188
+ return False
189
+ if self.id.startswith("claude-sonnet-4-") and not self.id.startswith("claude-sonnet-4-5"):
190
+ return False
191
+ if self.id.startswith("claude-opus-4-") and not self.id.startswith("claude-opus-4-1"):
192
+ return False
193
+
194
+ return True
195
+
196
+ def _using_structured_outputs(
197
+ self,
198
+ response_format: Optional[Union[Dict, Type[BaseModel]]] = None,
199
+ tools: Optional[List[Dict[str, Any]]] = None,
200
+ ) -> bool:
201
+ """
202
+ Check if structured outputs are being used in this request.
203
+
204
+ Args:
205
+ response_format: Response format parameter
206
+ tools: Tools list to check for strict mode
207
+
208
+ Returns:
209
+ bool: True if structured outputs are in use
210
+ """
211
+ # Check for output_format usage
212
+ if response_format is not None and self._supports_structured_outputs():
213
+ return True
214
+
215
+ # Check for strict tools
216
+ if tools:
217
+ for tool in tools:
218
+ if tool.get("type") == "function":
219
+ func_def = tool.get("function", {})
220
+ if func_def.get("strict") is True:
221
+ return True
222
+
223
+ return False
224
+
225
+ def _has_beta_features(
226
+ self,
227
+ response_format: Optional[Union[Dict, Type[BaseModel]]] = None,
228
+ tools: Optional[List[Dict[str, Any]]] = None,
229
+ ) -> bool:
145
230
  """Check if the model has any Anthropic beta features enabled."""
146
231
  return (
147
232
  self.mcp_servers is not None
148
233
  or self.context_management is not None
149
234
  or self.skills is not None
150
235
  or self.betas is not None
236
+ or self._using_structured_outputs(response_format, tools)
151
237
  )
152
238
 
153
239
  def get_client(self) -> AnthropicClient:
@@ -230,7 +316,70 @@ class Claude(Model):
230
316
  if beta not in self.betas:
231
317
  self.betas.append(beta)
232
318
 
233
- def get_request_params(self) -> Dict[str, Any]:
319
+ def _ensure_additional_properties_false(self, schema: Dict[str, Any]) -> None:
320
+ """
321
+ Recursively ensure all object types have additionalProperties: false.
322
+ """
323
+ if isinstance(schema, dict):
324
+ if schema.get("type") == "object":
325
+ schema["additionalProperties"] = False
326
+
327
+ # Recursively process nested schemas
328
+ for key, value in schema.items():
329
+ if key in ["properties", "items", "allOf", "anyOf", "oneOf"]:
330
+ if isinstance(value, dict):
331
+ self._ensure_additional_properties_false(value)
332
+ elif isinstance(value, list):
333
+ for item in value:
334
+ if isinstance(item, dict):
335
+ self._ensure_additional_properties_false(item)
336
+
337
+ def _build_output_format(self, response_format: Optional[Union[Dict, Type[BaseModel]]]) -> Optional[Dict[str, Any]]:
338
+ """
339
+ Build Anthropic output_format parameter from response_format.
340
+
341
+ Args:
342
+ response_format: Pydantic model or dict format
343
+
344
+ Returns:
345
+ Dict with output_format structure or None
346
+ """
347
+ if response_format is None:
348
+ return None
349
+
350
+ if not self._supports_structured_outputs():
351
+ return None
352
+
353
+ # Handle Pydantic BaseModel
354
+ if isinstance(response_format, type) and issubclass(response_format, BaseModel):
355
+ try:
356
+ # Try to use Anthropic SDK's transform_schema helper if available
357
+ from anthropic import transform_schema
358
+
359
+ schema = transform_schema(response_format.model_json_schema())
360
+ except (ImportError, AttributeError):
361
+ # Fallback to direct schema conversion
362
+ schema = response_format.model_json_schema()
363
+ # Ensure additionalProperties is False
364
+ if isinstance(schema, dict):
365
+ if "additionalProperties" not in schema:
366
+ schema["additionalProperties"] = False
367
+ # Recursively ensure all object types have additionalProperties: false
368
+ self._ensure_additional_properties_false(schema)
369
+
370
+ return {"type": "json_schema", "schema": schema}
371
+
372
+ # Handle dict format (already in correct structure)
373
+ elif isinstance(response_format, dict):
374
+ return response_format
375
+
376
+ return None
377
+
378
+ def get_request_params(
379
+ self,
380
+ response_format: Optional[Union[Dict, Type[BaseModel]]] = None,
381
+ tools: Optional[List[Dict[str, Any]]] = None,
382
+ ) -> Dict[str, Any]:
234
383
  """
235
384
  Generate keyword arguments for API requests.
236
385
  """
@@ -251,8 +400,20 @@ class Claude(Model):
251
400
  _request_params["top_p"] = self.top_p
252
401
  if self.top_k:
253
402
  _request_params["top_k"] = self.top_k
254
- if self.betas:
255
- _request_params["betas"] = self.betas
403
+
404
+ # Build betas list - include existing betas and add new one if needed
405
+ betas_list = list(self.betas) if self.betas else []
406
+
407
+ # Add structured outputs beta header if using structured outputs
408
+ if self._using_structured_outputs(response_format, tools):
409
+ beta_header = "structured-outputs-2025-11-13"
410
+ if beta_header not in betas_list:
411
+ betas_list.append(beta_header)
412
+
413
+ # Include betas if any are present
414
+ if betas_list:
415
+ _request_params["betas"] = betas_list
416
+
256
417
  if self.context_management:
257
418
  _request_params["context_management"] = self.context_management
258
419
  if self.mcp_servers:
@@ -260,26 +421,51 @@ class Claude(Model):
260
421
  {k: v for k, v in asdict(server).items() if v is not None} for server in self.mcp_servers
261
422
  ]
262
423
  if self.skills:
263
- _request_params["betas"] = self.betas
264
424
  _request_params["container"] = {"skills": self.skills}
265
425
  if self.request_params:
266
426
  _request_params.update(self.request_params)
267
427
 
268
428
  return _request_params
269
429
 
430
+ def _validate_structured_outputs_usage(
431
+ self,
432
+ response_format: Optional[Union[Dict, Type[BaseModel]]] = None,
433
+ tools: Optional[List[Dict[str, Any]]] = None,
434
+ ) -> None:
435
+ """
436
+ Validate that structured outputs are only used with supported models.
437
+
438
+ Raises:
439
+ ValueError: If structured outputs are used with unsupported model
440
+ """
441
+ if not self._using_structured_outputs(response_format, tools):
442
+ return
443
+
444
+ if not self._supports_structured_outputs():
445
+ raise ValueError(f"Model '{self.id}' does not support structured outputs.\n\n")
446
+
270
447
  def _prepare_request_kwargs(
271
- self, system_message: str, tools: Optional[List[Dict[str, Any]]] = None
448
+ self,
449
+ system_message: str,
450
+ tools: Optional[List[Dict[str, Any]]] = None,
451
+ response_format: Optional[Union[Dict, Type[BaseModel]]] = None,
272
452
  ) -> Dict[str, Any]:
273
453
  """
274
454
  Prepare the request keyword arguments for the API call.
275
455
 
276
456
  Args:
277
457
  system_message (str): The concatenated system messages.
458
+ tools: Optional list of tools
459
+ response_format: Optional response format (Pydantic model or dict)
278
460
 
279
461
  Returns:
280
462
  Dict[str, Any]: The request keyword arguments.
281
463
  """
282
- request_kwargs = self.get_request_params().copy()
464
+ # Validate structured outputs usage
465
+ self._validate_structured_outputs_usage(response_format, tools)
466
+
467
+ # Pass response_format and tools to get_request_params for beta header handling
468
+ request_kwargs = self.get_request_params(response_format=response_format, tools=tools).copy()
283
469
  if system_message:
284
470
  if self.cache_system_prompt:
285
471
  cache_control = (
@@ -300,9 +486,15 @@ class Claude(Model):
300
486
  else:
301
487
  tools = [code_execution_tool]
302
488
 
489
+ # Format tools (this will handle strict mode)
303
490
  if tools:
304
491
  request_kwargs["tools"] = format_tools_for_model(tools)
305
492
 
493
+ # Build output_format if response_format is provided
494
+ output_format = self._build_output_format(response_format)
495
+ if output_format:
496
+ request_kwargs["output_format"] = output_format
497
+
306
498
  if request_kwargs:
307
499
  log_debug(f"Calling {self.provider} with request parameters: {request_kwargs}", log_level=2)
308
500
  return request_kwargs
@@ -324,9 +516,9 @@ class Claude(Model):
324
516
  run_response.metrics.set_time_to_first_token()
325
517
 
326
518
  chat_messages, system_message = format_messages(messages)
327
- request_kwargs = self._prepare_request_kwargs(system_message, tools)
519
+ request_kwargs = self._prepare_request_kwargs(system_message, tools=tools, response_format=response_format)
328
520
 
329
- if self._has_beta_features():
521
+ if self._has_beta_features(response_format=response_format, tools=tools):
330
522
  assistant_message.metrics.start_timer()
331
523
  provider_response = self.get_client().beta.messages.create(
332
524
  model=self.id,
@@ -387,14 +579,14 @@ class Claude(Model):
387
579
  APIStatusError: For other API-related errors
388
580
  """
389
581
  chat_messages, system_message = format_messages(messages)
390
- request_kwargs = self._prepare_request_kwargs(system_message, tools)
582
+ request_kwargs = self._prepare_request_kwargs(system_message, tools=tools, response_format=response_format)
391
583
 
392
584
  try:
393
585
  if run_response and run_response.metrics:
394
586
  run_response.metrics.set_time_to_first_token()
395
587
 
396
588
  # Beta features
397
- if self._has_beta_features():
589
+ if self._has_beta_features(response_format=response_format, tools=tools):
398
590
  assistant_message.metrics.start_timer()
399
591
  with self.get_client().beta.messages.stream(
400
592
  model=self.id,
@@ -402,7 +594,7 @@ class Claude(Model):
402
594
  **request_kwargs,
403
595
  ) as stream:
404
596
  for chunk in stream:
405
- yield self._parse_provider_response_delta(chunk) # type: ignore
597
+ yield self._parse_provider_response_delta(chunk, response_format=response_format) # type: ignore
406
598
  else:
407
599
  assistant_message.metrics.start_timer()
408
600
  with self.get_client().messages.stream(
@@ -411,7 +603,7 @@ class Claude(Model):
411
603
  **request_kwargs,
412
604
  ) as stream:
413
605
  for chunk in stream: # type: ignore
414
- yield self._parse_provider_response_delta(chunk) # type: ignore
606
+ yield self._parse_provider_response_delta(chunk, response_format=response_format) # type: ignore
415
607
 
416
608
  assistant_message.metrics.stop_timer()
417
609
 
@@ -447,10 +639,10 @@ class Claude(Model):
447
639
  run_response.metrics.set_time_to_first_token()
448
640
 
449
641
  chat_messages, system_message = format_messages(messages)
450
- request_kwargs = self._prepare_request_kwargs(system_message, tools)
642
+ request_kwargs = self._prepare_request_kwargs(system_message, tools=tools, response_format=response_format)
451
643
 
452
644
  # Beta features
453
- if self._has_beta_features():
645
+ if self._has_beta_features(response_format=response_format, tools=tools):
454
646
  assistant_message.metrics.start_timer()
455
647
  provider_response = await self.get_async_client().beta.messages.create(
456
648
  model=self.id,
@@ -512,9 +704,9 @@ class Claude(Model):
512
704
  run_response.metrics.set_time_to_first_token()
513
705
 
514
706
  chat_messages, system_message = format_messages(messages)
515
- request_kwargs = self._prepare_request_kwargs(system_message, tools)
707
+ request_kwargs = self._prepare_request_kwargs(system_message, tools=tools, response_format=response_format)
516
708
 
517
- if self._has_beta_features():
709
+ if self._has_beta_features(response_format=response_format, tools=tools):
518
710
  assistant_message.metrics.start_timer()
519
711
  async with self.get_async_client().beta.messages.stream(
520
712
  model=self.id,
@@ -522,7 +714,7 @@ class Claude(Model):
522
714
  **request_kwargs,
523
715
  ) as stream:
524
716
  async for chunk in stream:
525
- yield self._parse_provider_response_delta(chunk) # type: ignore
717
+ yield self._parse_provider_response_delta(chunk, response_format=response_format) # type: ignore
526
718
  else:
527
719
  assistant_message.metrics.start_timer()
528
720
  async with self.get_async_client().messages.stream(
@@ -531,7 +723,7 @@ class Claude(Model):
531
723
  **request_kwargs,
532
724
  ) as stream:
533
725
  async for chunk in stream: # type: ignore
534
- yield self._parse_provider_response_delta(chunk) # type: ignore
726
+ yield self._parse_provider_response_delta(chunk, response_format=response_format) # type: ignore
535
727
 
536
728
  assistant_message.metrics.stop_timer()
537
729
 
@@ -556,12 +748,18 @@ class Claude(Model):
556
748
  return tool_call_prompt
557
749
  return None
558
750
 
559
- def _parse_provider_response(self, response: Union[AnthropicMessage, BetaMessage], **kwargs) -> ModelResponse:
751
+ def _parse_provider_response(
752
+ self,
753
+ response: Union[AnthropicMessage, BetaMessage],
754
+ response_format: Optional[Union[Dict, Type[BaseModel]]] = None,
755
+ **kwargs,
756
+ ) -> ModelResponse:
560
757
  """
561
758
  Parse the Claude response into a ModelResponse.
562
759
 
563
760
  Args:
564
761
  response: Raw response from Anthropic
762
+ response_format: Optional response format for structured output parsing
565
763
 
566
764
  Returns:
567
765
  ModelResponse: Parsed response data
@@ -574,10 +772,32 @@ class Claude(Model):
574
772
  if response.content:
575
773
  for block in response.content:
576
774
  if block.type == "text":
775
+ text_content = block.text
776
+
577
777
  if model_response.content is None:
578
- model_response.content = block.text
778
+ model_response.content = text_content
579
779
  else:
580
- model_response.content += block.text
780
+ model_response.content += text_content
781
+
782
+ # Handle structured outputs (JSON outputs)
783
+ if (
784
+ response_format is not None
785
+ and isinstance(response_format, type)
786
+ and issubclass(response_format, BaseModel)
787
+ ):
788
+ if text_content:
789
+ try:
790
+ # Parse JSON from text content
791
+ parsed_data = json.loads(text_content)
792
+ # Validate against Pydantic model
793
+ model_response.parsed = response_format.model_validate(parsed_data)
794
+ log_debug(f"Successfully parsed structured output: {model_response.parsed}")
795
+ except json.JSONDecodeError as e:
796
+ log_warning(f"Failed to parse JSON from structured output: {e}")
797
+ except ValidationError as e:
798
+ log_warning(f"Failed to validate structured output against schema: {e}")
799
+ except Exception as e:
800
+ log_warning(f"Unexpected error parsing structured output: {e}")
581
801
 
582
802
  # Capture citations from the response
583
803
  if block.citations is not None:
@@ -669,12 +889,14 @@ class Claude(Model):
669
889
  ParsedBetaContentBlockStopEvent,
670
890
  ParsedBetaMessageStopEvent,
671
891
  ],
892
+ response_format: Optional[Union[Dict, Type[BaseModel]]] = None,
672
893
  ) -> ModelResponse:
673
894
  """
674
895
  Parse the Claude streaming response into ModelProviderResponse objects.
675
896
 
676
897
  Args:
677
898
  response: Raw response chunk from Anthropic
899
+ response_format: Optional response format for structured output parsing
678
900
 
679
901
  Returns:
680
902
  ModelResponse: Iterator of parsed response data
@@ -717,11 +939,24 @@ class Claude(Model):
717
939
  }
718
940
  ]
719
941
 
720
- # Capture citations from the final response
942
+ # Capture citations from the final response and handle structured outputs
721
943
  elif isinstance(response, (MessageStopEvent, ParsedBetaMessageStopEvent)):
944
+ # In streaming mode, content has already been emitted via ContentBlockDeltaEvent chunks
945
+ # Setting content here would cause duplication since _populate_stream_data accumulates with +=
946
+ # Keep content empty to avoid duplication
722
947
  model_response.content = ""
723
948
  model_response.citations = Citations(raw=[], urls=[], documents=[])
949
+
950
+ # Accumulate text content for structured output parsing (but don't set model_response.content)
951
+ # The text was already streamed via ContentBlockDeltaEvent chunks
952
+ accumulated_text = ""
953
+
724
954
  for block in response.message.content: # type: ignore
955
+ # Handle text blocks for structured output parsing
956
+ if block.type == "text":
957
+ accumulated_text += block.text
958
+
959
+ # Handle citations
725
960
  citations = getattr(block, "citations", None)
726
961
  if not citations:
727
962
  continue
@@ -736,6 +971,28 @@ class Claude(Model):
736
971
  DocumentCitation(document_title=citation.document_title, cited_text=citation.cited_text)
737
972
  )
738
973
 
974
+ # Handle structured outputs (JSON outputs) from accumulated text
975
+ # Note: We parse from accumulated_text but don't set model_response.content to avoid duplication
976
+ # The content was already streamed via ContentBlockDeltaEvent chunks
977
+ if (
978
+ response_format is not None
979
+ and isinstance(response_format, type)
980
+ and issubclass(response_format, BaseModel)
981
+ ):
982
+ if accumulated_text:
983
+ try:
984
+ # Parse JSON from accumulated text content
985
+ parsed_data = json.loads(accumulated_text)
986
+ # Validate against Pydantic model
987
+ model_response.parsed = response_format.model_validate(parsed_data)
988
+ log_debug(f"Successfully parsed structured output from stream: {model_response.parsed}")
989
+ except json.JSONDecodeError as e:
990
+ log_warning(f"Failed to parse JSON from structured output in stream: {e}")
991
+ except ValidationError as e:
992
+ log_warning(f"Failed to validate structured output against schema in stream: {e}")
993
+ except Exception as e:
994
+ log_warning(f"Unexpected error parsing structured output in stream: {e}")
995
+
739
996
  # Capture context management information if present
740
997
  if self.context_management is not None and hasattr(response.message, "context_management"): # type: ignore
741
998
  context_mgmt = response.message.context_management # type: ignore
agno/os/app.py CHANGED
@@ -316,16 +316,16 @@ class AgentOS:
316
316
  """Initialize and configure all agents for AgentOS usage."""
317
317
  if not self.agents:
318
318
  return
319
-
320
319
  for agent in self.agents:
321
320
  # Track all MCP tools to later handle their connection
322
321
  if agent.tools:
323
322
  for tool in agent.tools:
324
- # Checking if the tool is a MCPTools or MultiMCPTools instance
325
- type_name = type(tool).__name__
326
- if type_name in ("MCPTools", "MultiMCPTools"):
327
- if tool not in self.mcp_tools:
328
- self.mcp_tools.append(tool)
323
+ # Checking if the tool is an instance of MCPTools, MultiMCPTools, or a subclass of those
324
+ if hasattr(type(tool), "__mro__"):
325
+ mro_names = {cls.__name__ for cls in type(tool).__mro__}
326
+ if mro_names & {"MCPTools", "MultiMCPTools"}:
327
+ if tool not in self.mcp_tools:
328
+ self.mcp_tools.append(tool)
329
329
 
330
330
  agent.initialize_agent()
331
331
 
agno/session/workflow.py CHANGED
@@ -2,10 +2,9 @@ from __future__ import annotations
2
2
 
3
3
  import time
4
4
  from dataclasses import dataclass
5
- from typing import Any, Dict, List, Mapping, Optional, Tuple
5
+ from typing import Any, Dict, List, Mapping, Optional, Tuple, Union
6
6
 
7
7
  from pydantic import BaseModel
8
- from pymongo.cursor import Union
9
8
 
10
9
  from agno.models.message import Message
11
10
  from agno.run.agent import RunOutput
@@ -0,0 +1,151 @@
1
+ from __future__ import annotations
2
+
3
+ import os
4
+ from io import BytesIO
5
+ from typing import Any, List, Optional
6
+ from uuid import uuid4
7
+
8
+ from agno.media import Image
9
+ from agno.tools import Toolkit
10
+ from agno.tools.function import ToolResult
11
+ from agno.utils.log import log_debug, logger
12
+
13
+ try:
14
+ from google import genai
15
+ from google.genai import types
16
+ from PIL import Image as PILImage
17
+
18
+ except ImportError as exc:
19
+ missing = []
20
+ try:
21
+ from google.genai import types
22
+ except ImportError:
23
+ missing.append("google-genai")
24
+
25
+ try:
26
+ from PIL import Image as PILImage
27
+ except ImportError:
28
+ missing.append("Pillow")
29
+
30
+ raise ImportError(
31
+ f"Missing required package(s): {', '.join(missing)}. Install using: pip install {' '.join(missing)}"
32
+ ) from exc
33
+
34
+
35
+ # Note: Expand this list as new models become supported by the Google Content Generation API.
36
+ ALLOWED_MODELS = ["gemini-2.5-flash-image"]
37
+ ALLOWED_RATIOS = ["1:1", "2:3", "3:2", "3:4", "4:3", "4:5", "5:4", "9:16", "16:9", "21:9"]
38
+
39
+
40
+ class NanoBananaTools(Toolkit):
41
+ def __init__(
42
+ self,
43
+ model: str = "gemini-2.5-flash-image",
44
+ aspect_ratio: str = "1:1",
45
+ api_key: Optional[str] = None,
46
+ enable_create_image: bool = True,
47
+ **kwargs,
48
+ ):
49
+ self.model = model
50
+ self.aspect_ratio = aspect_ratio
51
+ self.api_key = api_key or os.getenv("GOOGLE_API_KEY")
52
+
53
+ # Validate model
54
+ if model not in ALLOWED_MODELS:
55
+ raise ValueError(f"Invalid model '{model}'. Supported: {', '.join(ALLOWED_MODELS)}")
56
+
57
+ if self.aspect_ratio not in ALLOWED_RATIOS:
58
+ raise ValueError(f"Invalid aspect_ratio '{self.aspect_ratio}'. Supported: {', '.join(ALLOWED_RATIOS)}")
59
+
60
+ if not self.api_key:
61
+ raise ValueError("GOOGLE_API_KEY not set. Export it: `export GOOGLE_API_KEY=<your-key>`")
62
+
63
+ tools: List[Any] = []
64
+ if enable_create_image:
65
+ tools.append(self.create_image)
66
+
67
+ super().__init__(name="nano_banana", tools=tools, **kwargs)
68
+
69
+ def create_image(self, prompt: str) -> ToolResult:
70
+ """Generate an image from a text prompt."""
71
+ try:
72
+ client = genai.Client(api_key=self.api_key)
73
+ log_debug(f"NanoBanana generating image with prompt: {prompt}")
74
+
75
+ cfg = types.GenerateContentConfig(
76
+ response_modalities=["IMAGE"],
77
+ image_config=types.ImageConfig(aspect_ratio=self.aspect_ratio),
78
+ )
79
+
80
+ response = client.models.generate_content(
81
+ model=self.model,
82
+ contents=[prompt],
83
+ config=cfg,
84
+ )
85
+
86
+ generated_images: List[Image] = []
87
+ response_str = ""
88
+
89
+ if not hasattr(response, "candidates") or not response.candidates:
90
+ logger.warning("No candidates in response")
91
+ return ToolResult(content="No images were generated in the response")
92
+
93
+ # Process each candidate
94
+ for candidate in response.candidates:
95
+ if not hasattr(candidate, "content") or not candidate.content or not candidate.content.parts:
96
+ continue
97
+
98
+ for part in candidate.content.parts:
99
+ if hasattr(part, "text") and part.text:
100
+ response_str += part.text + "\n"
101
+
102
+ if hasattr(part, "inline_data") and part.inline_data:
103
+ try:
104
+ # Extract image data from the blob
105
+ image_data = part.inline_data.data
106
+ mime_type = getattr(part.inline_data, "mime_type", "image/png")
107
+
108
+ if image_data:
109
+ pil_img = PILImage.open(BytesIO(image_data))
110
+
111
+ # Save to buffer with proper format
112
+ buffer = BytesIO()
113
+ image_format = "PNG" if "png" in mime_type.lower() else "JPEG"
114
+ pil_img.save(buffer, format=image_format)
115
+ buffer.seek(0)
116
+
117
+ agno_img = Image(
118
+ id=str(uuid4()),
119
+ content=buffer.getvalue(),
120
+ original_prompt=prompt,
121
+ )
122
+ generated_images.append(agno_img)
123
+
124
+ log_debug(f"Successfully processed image with ID: {agno_img.id}")
125
+ response_str += f"Image generated successfully (ID: {agno_img.id}).\n"
126
+
127
+ except Exception as img_exc:
128
+ logger.error(f"Failed to process image data: {img_exc}")
129
+ response_str += f"Failed to process image: {img_exc}\n"
130
+
131
+ if hasattr(response, "usage_metadata") and response.usage_metadata:
132
+ log_debug(
133
+ f"Token usage - Prompt: {response.usage_metadata.prompt_token_count}, "
134
+ f"Response: {response.usage_metadata.candidates_token_count}, "
135
+ f"Total: {response.usage_metadata.total_token_count}"
136
+ )
137
+
138
+ if generated_images:
139
+ return ToolResult(
140
+ content=response_str.strip() or "Image(s) generated successfully",
141
+ images=generated_images,
142
+ )
143
+ else:
144
+ return ToolResult(
145
+ content=response_str.strip() or "No images were generated",
146
+ images=None,
147
+ )
148
+
149
+ except Exception as exc:
150
+ logger.error(f"NanoBanana image generation failed: {exc}")
151
+ return ToolResult(content=f"Error generating image: {str(exc)}")
@@ -320,6 +320,7 @@ def format_messages(messages: List[Message]) -> Tuple[List[Dict[str, str]], str]
320
320
  def format_tools_for_model(tools: Optional[List[Dict[str, Any]]] = None) -> Optional[List[Dict[str, Any]]]:
321
321
  """
322
322
  Transforms function definitions into a format accepted by the Anthropic API.
323
+ Now supports strict mode for structured outputs.
323
324
  """
324
325
  if not tools:
325
326
  return None
@@ -352,7 +353,14 @@ def format_tools_for_model(tools: Optional[List[Dict[str, Any]]] = None) -> Opti
352
353
  "type": parameters.get("type", "object"),
353
354
  "properties": input_properties,
354
355
  "required": required_params,
356
+ "additionalProperties": False,
355
357
  },
356
358
  }
359
+
360
+ # Add strict mode if specified (check both function dict and tool_def top level)
361
+ strict_mode = func_def.get("strict") or tool_def.get("strict")
362
+ if strict_mode is True:
363
+ tool["strict"] = True
364
+
357
365
  parsed_tools.append(tool)
358
366
  return parsed_tools
agno/workflow/workflow.py CHANGED
@@ -237,7 +237,7 @@ class Workflow:
237
237
  self.num_history_runs = num_history_runs
238
238
  self._workflow_session: Optional[WorkflowSession] = None
239
239
 
240
- if stream_intermediate_steps is not None:
240
+ if stream_intermediate_steps:
241
241
  warnings.warn(
242
242
  "The 'stream_intermediate_steps' parameter is deprecated and will be removed in future versions. Use 'stream_events' instead.",
243
243
  DeprecationWarning,
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.4
2
2
  Name: agno
3
- Version: 2.3.0
3
+ Version: 2.3.1
4
4
  Summary: Agno: a lightweight library for building Multi-Agent Systems
5
5
  Author-email: Ashpreet Bedi <ashpreet@agno.com>
6
6
  Project-URL: homepage, https://agno.com
@@ -181,7 +181,7 @@ agno/models/utils.py,sha256=jxAIIG2y7KBypwFlc87GzFnvogRpGLfd-wwr6KXZIj8,7269
181
181
  agno/models/aimlapi/__init__.py,sha256=XQcFRvt4qJ8ol9nCC0XKEkVEDivdNf3nZNoJZMZ5m8M,78
182
182
  agno/models/aimlapi/aimlapi.py,sha256=9Qh-b8HvFSvmPP3VBNGT00qy9izHLMWgR-KDQCE5CM0,1493
183
183
  agno/models/anthropic/__init__.py,sha256=nbReX3p17JCwfrMDR9hR7-OaEFZm80I7dng93dl-Fhw,77
184
- agno/models/anthropic/claude.py,sha256=GseMjJOk0jGhrDXah2C204iI0U2fhgBdIE9gM05msCQ,34663
184
+ agno/models/anthropic/claude.py,sha256=B71JeZlncEj2VPTlvc17vAP344r5GH4Yid_iedb5AHU,46481
185
185
  agno/models/aws/__init__.py,sha256=TbcwQwv9A7KjqBM5RQBR8x46GvyyCxbBCjwkpjfVGKE,352
186
186
  agno/models/aws/bedrock.py,sha256=ScZcGwOMh-N0DfArXtDVzKy467QPAN0OS8llBNAc8cQ,28880
187
187
  agno/models/aws/claude.py,sha256=JPwHHn9ixzgG2DV1gH9T-bCLQpZ9BoxP3yZRbvKAnj0,16256
@@ -262,7 +262,7 @@ agno/models/vllm/vllm.py,sha256=UtiiSvUR4pG_1CzuhY5MWduRgzM2hGVTakKJ6ZBdQmo,2730
262
262
  agno/models/xai/__init__.py,sha256=ukcCxnCHxTtkJNA2bAMTX4MhCv1wJcbiq8ZIfYczIxs,55
263
263
  agno/models/xai/xai.py,sha256=jA6_39tfapkjkHKdzbKaNq1t9qIvO1IaZY1hQqEmFVs,4181
264
264
  agno/os/__init__.py,sha256=h8oQu7vhD5RZf09jkyM_Kt1Kdq_d5kFB9gJju8QPwcY,55
265
- agno/os/app.py,sha256=3Lizp7xfYscsaITr8aeyK7zEhWdzyshdQFjG2QMj-S4,34108
265
+ agno/os/app.py,sha256=sAwBkRFAuJqix_KtHwrYEaViD7wlQTZ80fbzzh9WUro,34228
266
266
  agno/os/auth.py,sha256=FyBtAKWtg-qSunCas5m5pK1dVEmikOSZvcCp5r25tTA,1844
267
267
  agno/os/config.py,sha256=QPGxENF2yezEOp0yV9OXU-FBs4_vYSXkxbbSol51wPE,2932
268
268
  agno/os/mcp.py,sha256=7lAiELFmwcF-eN_pOIJVjun9r5dFcQfPTHD_rP1Zu-s,10318
@@ -331,7 +331,7 @@ agno/session/__init__.py,sha256=p6eqzWcLSHiMex2yZvkwv2yrFUNdGs21TGMS49xrEC4,376
331
331
  agno/session/agent.py,sha256=8vVtwwUC5moGWdRcG99Ik6Ay7gbFRrPPnT1ncOUFQIg,10365
332
332
  agno/session/summary.py,sha256=9JnDyQyggckd3zx6L8Q5f-lglZvrFQxvPjGU8gLCgR4,10292
333
333
  agno/session/team.py,sha256=-MkB6qQCrnXLKko8L5s9fJOWPsjeK5Gx0SXEPoOwSFQ,13437
334
- agno/session/workflow.py,sha256=o0uVbuz5wSaA1oCTRsMTg_9xj5w0PurSMW9q-n9loj0,19716
334
+ agno/session/workflow.py,sha256=nPHnh1N0SJby5JRjysCUI-kTDCelQMFfqosEnnLzPIg,19690
335
335
  agno/team/__init__.py,sha256=toHidBOo5M3n_TIVtIKHgcDbLL9HR-_U-YQYuIt_XtE,847
336
336
  agno/team/team.py,sha256=Xoq4PiUSfORn5K-FAlrQOXcGIL-asASB5HH7JrMTQtk,405486
337
337
  agno/tools/__init__.py,sha256=jNll2sELhPPbqm5nPeT4_uyzRO2_KRTW-8Or60kioS0,210
@@ -397,6 +397,7 @@ agno/tools/memory.py,sha256=vpMoKtCqs3m6vkuqmZ4fW9IRf1OhXHQGGaq3exJK0Xo,18449
397
397
  agno/tools/mlx_transcribe.py,sha256=kuiYZAM5ZAdkiOfFdbGJsCb0gacnJRtSTFzuX8eWGLw,6379
398
398
  agno/tools/models_labs.py,sha256=E91DZDJOU2ldgI3U_ueFiZcXLLWfbh0MsZ2pYP9c0eQ,7933
399
399
  agno/tools/moviepy_video.py,sha256=ssSIOO16HcdCqZEYy38TWM5c6_ZOnCw-3ZU3kzuM15c,12888
400
+ agno/tools/nano_banana.py,sha256=4fVvqe98vp0mjotEs95eIhK0DEU9RlhrgPZKabmwKpU,5859
400
401
  agno/tools/neo4j.py,sha256=JIgZM0kIWAiyhiS3T_nzgkKeocZwwVpfTawGIpjn4K8,5180
401
402
  agno/tools/newspaper.py,sha256=GAmJ7vBeO3uGS-xHDJ1FxySuecOpiYCJ9y9R5hw3GJk,1281
402
403
  agno/tools/newspaper4k.py,sha256=7uYUg1yCK2j7OW4K1QqmLasMDR7K5_A3CJaa6bmYzaI,3048
@@ -503,7 +504,7 @@ agno/utils/whatsapp.py,sha256=242VwGOdbgkxVeIj4D899mpT3GnG_IpcaKnd5qebhTA,9936
503
504
  agno/utils/yaml_io.py,sha256=cwTqCE-eBGoi87KLDcwB6iyWe0NcvEmadQjWL1cQ7jE,860
504
505
  agno/utils/models/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
505
506
  agno/utils/models/ai_foundry.py,sha256=PmhETWhdqZCq8NbDe-MdZVuRXx6DbVOePCyPFiPLceo,1511
506
- agno/utils/models/claude.py,sha256=-XChDFdqYHMhvVyrbQhAKuuhp4q8QhVky1Q1FokRUGU,12008
507
+ agno/utils/models/claude.py,sha256=pkHo2UY7eESM3KrmwCG5TxYHDC9Sl9UStHQrY9V1tXQ,12336
507
508
  agno/utils/models/cohere.py,sha256=wir2K9u4RmOwq7T7n_2UPZFHfwmnrt_u91Psd_DFqdE,3266
508
509
  agno/utils/models/llama.py,sha256=Z5fdOFUFnov1JgUDcP6ICK3M7o64UB1fkcwAs2XaZkM,2515
509
510
  agno/utils/models/mistral.py,sha256=SVcJ8Q8SFeadNwCr8BARbET0gvGiylmaDKzcSJ9kWq0,4189
@@ -569,9 +570,9 @@ agno/workflow/router.py,sha256=w7fPxQI2CDZp15zSZXgmvnYM5Td2FFFBrYbcXPwVb-Y,31604
569
570
  agno/workflow/step.py,sha256=_sge_L8WBWSYJRNtgzrfCWIPjrWyani1rCRTkQZu3EM,73296
570
571
  agno/workflow/steps.py,sha256=NXAOgQ8bssgl-6K1Fxd9zLm1m3ranPnMFJp-SM-GmA8,26706
571
572
  agno/workflow/types.py,sha256=T8O0CuKe48MRuPtgdDlECJQL8mgJ4TKClaw9hHN3Ebw,19149
572
- agno/workflow/workflow.py,sha256=9kfdX-jAtWYvhw-UA45x9iPJu9LHCRl8kkxtwAigCzc,189550
573
- agno-2.3.0.dist-info/licenses/LICENSE,sha256=QwcOLU5TJoTeUhuIXzhdCEEDDvorGiC6-3YTOl4TecE,11356
574
- agno-2.3.0.dist-info/METADATA,sha256=UMAIDFuLVaH-f9uHZsDkp-Vz8TM2lqYCyNnXTC09qwo,28850
575
- agno-2.3.0.dist-info/WHEEL,sha256=_zCd3N1l69ArxyTb8rzEoP9TpbYXkqRFSNOD5OuxnTs,91
576
- agno-2.3.0.dist-info/top_level.txt,sha256=MKyeuVesTyOKIXUhc-d_tPa2Hrh0oTA4LM0izowpx70,5
577
- agno-2.3.0.dist-info/RECORD,,
573
+ agno/workflow/workflow.py,sha256=XjwOxrJrWUvsWJH7qYJs-0BX7z6xyzX9mbw8dMNevUg,189538
574
+ agno-2.3.1.dist-info/licenses/LICENSE,sha256=QwcOLU5TJoTeUhuIXzhdCEEDDvorGiC6-3YTOl4TecE,11356
575
+ agno-2.3.1.dist-info/METADATA,sha256=c7m0u0MGZBkZnPgVpeN7KureVZKSEJDgfKefA3tvEUo,28850
576
+ agno-2.3.1.dist-info/WHEEL,sha256=_zCd3N1l69ArxyTb8rzEoP9TpbYXkqRFSNOD5OuxnTs,91
577
+ agno-2.3.1.dist-info/top_level.txt,sha256=MKyeuVesTyOKIXUhc-d_tPa2Hrh0oTA4LM0izowpx70,5
578
+ agno-2.3.1.dist-info/RECORD,,
File without changes