agno 1.7.10__py3-none-any.whl → 1.7.12__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (43) hide show
  1. agno/agent/agent.py +13 -0
  2. agno/app/fastapi/app.py +3 -1
  3. agno/app/fastapi/async_router.py +1 -1
  4. agno/app/playground/app.py +1 -0
  5. agno/document/chunking/semantic.py +1 -3
  6. agno/document/reader/markdown_reader.py +2 -7
  7. agno/document/reader/text_reader.py +2 -2
  8. agno/embedder/google.py +17 -5
  9. agno/knowledge/agent.py +4 -5
  10. agno/knowledge/gcs/pdf.py +105 -1
  11. agno/knowledge/markdown.py +15 -2
  12. agno/knowledge/website.py +4 -1
  13. agno/media.py +2 -0
  14. agno/models/aws/bedrock.py +51 -21
  15. agno/models/dashscope/__init__.py +5 -0
  16. agno/models/dashscope/dashscope.py +81 -0
  17. agno/models/google/gemini.py +56 -19
  18. agno/models/openai/chat.py +8 -1
  19. agno/models/openai/responses.py +57 -23
  20. agno/models/qwen/__init__.py +5 -0
  21. agno/reasoning/default.py +7 -1
  22. agno/reasoning/helpers.py +7 -1
  23. agno/run/response.py +4 -0
  24. agno/run/team.py +4 -0
  25. agno/storage/dynamodb.py +18 -22
  26. agno/storage/in_memory.py +234 -0
  27. agno/team/team.py +175 -67
  28. agno/tools/brandfetch.py +210 -0
  29. agno/tools/bravesearch.py +7 -7
  30. agno/tools/calculator.py +8 -8
  31. agno/tools/discord.py +11 -11
  32. agno/tools/github.py +10 -18
  33. agno/tools/trafilatura.py +372 -0
  34. agno/tools/youtube.py +12 -11
  35. agno/vectordb/clickhouse/clickhousedb.py +1 -1
  36. agno/vectordb/milvus/milvus.py +89 -1
  37. agno/workflow/workflow.py +3 -0
  38. {agno-1.7.10.dist-info → agno-1.7.12.dist-info}/METADATA +4 -1
  39. {agno-1.7.10.dist-info → agno-1.7.12.dist-info}/RECORD +43 -37
  40. {agno-1.7.10.dist-info → agno-1.7.12.dist-info}/WHEEL +0 -0
  41. {agno-1.7.10.dist-info → agno-1.7.12.dist-info}/entry_points.txt +0 -0
  42. {agno-1.7.10.dist-info → agno-1.7.12.dist-info}/licenses/LICENSE +0 -0
  43. {agno-1.7.10.dist-info → agno-1.7.12.dist-info}/top_level.txt +0 -0
@@ -32,6 +32,7 @@ try:
32
32
  Part,
33
33
  ThinkingConfig,
34
34
  Tool,
35
+ UrlContext,
35
36
  )
36
37
  from google.genai.types import (
37
38
  File as GeminiFile,
@@ -68,6 +69,7 @@ class Gemini(Model):
68
69
  search: bool = False
69
70
  grounding: bool = False
70
71
  grounding_dynamic_threshold: Optional[float] = None
72
+ url_context: bool = False
71
73
 
72
74
  temperature: Optional[float] = None
73
75
  top_p: Optional[float] = None
@@ -115,7 +117,6 @@ class Gemini(Model):
115
117
  """
116
118
  if self.client:
117
119
  return self.client
118
-
119
120
  client_params: Dict[str, Any] = {}
120
121
  vertexai = self.vertexai or getenv("GOOGLE_GENAI_USE_VERTEXAI", "false").lower() == "true"
121
122
 
@@ -199,26 +200,34 @@ class Gemini(Model):
199
200
  if thinking_config_params:
200
201
  config["thinking_config"] = ThinkingConfig(**thinking_config_params)
201
202
 
202
- if self.grounding and self.search:
203
- log_info("Both grounding and search are enabled. Grounding will take precedence.")
204
- self.search = False
203
+ # Build tools array based on enabled built-in tools
204
+ builtin_tools = []
205
205
 
206
206
  if self.grounding:
207
- log_info("Grounding enabled. External tools will be disabled.")
208
- config["tools"] = [
207
+ log_info("Grounding enabled. This is a legacy tool. For Gemini 2.0+ Please use enable `search` flag instead.")
208
+ builtin_tools.append(
209
209
  Tool(
210
210
  google_search=GoogleSearchRetrieval(
211
211
  dynamic_retrieval_config=DynamicRetrievalConfig(
212
212
  dynamic_threshold=self.grounding_dynamic_threshold
213
213
  )
214
214
  )
215
- ),
216
- ]
215
+ )
216
+ )
217
217
 
218
- elif self.search:
219
- log_info("Search enabled. External tools will be disabled.")
220
- config["tools"] = [Tool(google_search=GoogleSearch())]
218
+ if self.search:
219
+ log_info("Google Search enabled.")
220
+ builtin_tools.append(Tool(google_search=GoogleSearch()))
221
221
 
222
+ if self.url_context:
223
+ log_info("URL context enabled.")
224
+ builtin_tools.append(Tool(url_context=UrlContext()))
225
+
226
+ # Set tools in config
227
+ if builtin_tools:
228
+ if tools:
229
+ log_info("Built-in tools enabled. External tools will be disabled.")
230
+ config["tools"] = builtin_tools
222
231
  elif tools:
223
232
  config["tools"] = [format_function_definitions(tools)]
224
233
 
@@ -388,7 +397,10 @@ class Gemini(Model):
388
397
  message_parts: List[Any] = []
389
398
 
390
399
  # Function calls
391
- if (not content or role == "model") and message.tool_calls is not None and len(message.tool_calls) > 0:
400
+ if role == "model" and message.tool_calls is not None and len(message.tool_calls) > 0:
401
+ if content is not None:
402
+ content_str = content if isinstance(content, str) else str(content)
403
+ message_parts.append(Part.from_text(text=content_str))
392
404
  for tool_call in message.tool_calls:
393
405
  message_parts.append(
394
406
  Part.from_function_call(
@@ -396,7 +408,7 @@ class Gemini(Model):
396
408
  args=json.loads(tool_call["function"]["arguments"]),
397
409
  )
398
410
  )
399
- # Function results
411
+ # Function call results
400
412
  elif message.tool_calls is not None and len(message.tool_calls) > 0:
401
413
  for tool_call in message.tool_calls:
402
414
  message_parts.append(
@@ -758,13 +770,15 @@ class Gemini(Model):
758
770
 
759
771
  model_response.tool_calls.append(tool_call)
760
772
 
773
+ citations = Citations()
774
+ citations_raw = {}
775
+ citations_urls = []
776
+
761
777
  if response.candidates and response.candidates[0].grounding_metadata is not None:
762
- citations = Citations()
763
778
  grounding_metadata = response.candidates[0].grounding_metadata.model_dump()
764
- citations.raw = grounding_metadata
779
+ citations_raw["grounding_metadata"] = grounding_metadata
765
780
 
766
- # Extract url and title
767
- chunks = grounding_metadata.pop("grounding_chunks", None) or []
781
+ chunks = grounding_metadata.get("grounding_chunks", [])
768
782
  citation_pairs = [
769
783
  (chunk.get("web", {}).get("uri"), chunk.get("web", {}).get("title"))
770
784
  for chunk in chunks
@@ -772,8 +786,31 @@ class Gemini(Model):
772
786
  ]
773
787
 
774
788
  # Create citation objects from filtered pairs
775
- citations.urls = [UrlCitation(url=url, title=title) for url, title in citation_pairs]
776
-
789
+ grounding_urls = [UrlCitation(url=url, title=title) for url, title in citation_pairs]
790
+ citations_urls.extend(grounding_urls)
791
+
792
+ # Handle URLs from URL context tool
793
+ if (
794
+ response.candidates
795
+ and hasattr(response.candidates[0], "url_context_metadata")
796
+ and response.candidates[0].url_context_metadata is not None
797
+ ):
798
+ url_context_metadata = response.candidates[0].url_context_metadata.model_dump()
799
+ citations_raw["url_context_metadata"] = url_context_metadata
800
+
801
+ url_metadata_list = url_context_metadata.get("url_metadata", [])
802
+ for url_meta in url_metadata_list:
803
+ retrieved_url = url_meta.get("retrieved_url")
804
+ status = url_meta.get("url_retrieval_status", "UNKNOWN")
805
+ if retrieved_url and status == "URL_RETRIEVAL_STATUS_SUCCESS":
806
+ # Avoid duplicate URLs
807
+ existing_urls = [citation.url for citation in citations_urls]
808
+ if retrieved_url not in existing_urls:
809
+ citations_urls.append(UrlCitation(url=retrieved_url, title=retrieved_url))
810
+
811
+ if citations_raw or citations_urls:
812
+ citations.raw = citations_raw if citations_raw else None
813
+ citations.urls = citations_urls if citations_urls else None
777
814
  model_response.citations = citations
778
815
 
779
816
  # Extract usage metadata if present
@@ -1,7 +1,7 @@
1
1
  from collections.abc import AsyncIterator
2
2
  from dataclasses import dataclass
3
3
  from os import getenv
4
- from typing import Any, Dict, Iterator, List, Optional, Type, Union
4
+ from typing import Any, Dict, Iterator, List, Literal, Optional, Type, Union
5
5
 
6
6
  import httpx
7
7
  from pydantic import BaseModel
@@ -45,6 +45,7 @@ class OpenAIChat(Model):
45
45
  # Request parameters
46
46
  store: Optional[bool] = None
47
47
  reasoning_effort: Optional[str] = None
48
+ verbosity: Optional[Literal["low", "medium", "high"]] = None
48
49
  metadata: Optional[Dict[str, Any]] = None
49
50
  frequency_penalty: Optional[float] = None
50
51
  logit_bias: Optional[Any] = None
@@ -159,6 +160,7 @@ class OpenAIChat(Model):
159
160
  base_params = {
160
161
  "store": self.store,
161
162
  "reasoning_effort": self.reasoning_effort,
163
+ "verbosity": self.verbosity,
162
164
  "frequency_penalty": self.frequency_penalty,
163
165
  "logit_bias": self.logit_bias,
164
166
  "logprobs": self.logprobs,
@@ -227,6 +229,8 @@ class OpenAIChat(Model):
227
229
  model_dict.update(
228
230
  {
229
231
  "store": self.store,
232
+ "reasoning_effort": self.reasoning_effort,
233
+ "verbosity": self.verbosity,
230
234
  "frequency_penalty": self.frequency_penalty,
231
235
  "logit_bias": self.logit_bias,
232
236
  "logprobs": self.logprobs,
@@ -694,6 +698,9 @@ class OpenAIChat(Model):
694
698
  if choice_delta.tool_calls is not None:
695
699
  model_response.tool_calls = choice_delta.tool_calls # type: ignore
696
700
 
701
+ if hasattr(choice_delta, "reasoning_content") and choice_delta.reasoning_content is not None:
702
+ model_response.reasoning_content = choice_delta.reasoning_content
703
+
697
704
  # Add audio if present
698
705
  if hasattr(choice_delta, "audio") and choice_delta.audio is not None:
699
706
  try:
@@ -42,6 +42,8 @@ class OpenAIResponses(Model):
42
42
  metadata: Optional[Dict[str, Any]] = None
43
43
  parallel_tool_calls: Optional[bool] = None
44
44
  reasoning: Optional[Dict[str, Any]] = None
45
+ verbosity: Optional[Literal["low", "medium", "high"]] = None
46
+ reasoning_effort: Optional[Literal["minimal", "medium", "high"]] = None
45
47
  store: Optional[bool] = None
46
48
  temperature: Optional[float] = None
47
49
  top_p: Optional[float] = None
@@ -176,7 +178,6 @@ class OpenAIResponses(Model):
176
178
  "max_tool_calls": self.max_tool_calls,
177
179
  "metadata": self.metadata,
178
180
  "parallel_tool_calls": self.parallel_tool_calls,
179
- "reasoning": self.reasoning,
180
181
  "store": self.store,
181
182
  "temperature": self.temperature,
182
183
  "top_p": self.top_p,
@@ -184,21 +185,37 @@ class OpenAIResponses(Model):
184
185
  "user": self.user,
185
186
  "service_tier": self.service_tier,
186
187
  }
188
+
189
+ # Handle reasoning parameter - convert reasoning_effort to reasoning format
190
+ if self.reasoning is not None:
191
+ base_params["reasoning"] = self.reasoning
192
+ elif self.reasoning_effort is not None:
193
+ base_params["reasoning"] = {"effort": self.reasoning_effort}
194
+
195
+ # Build text parameter
196
+ text_params: Dict[str, Any] = {}
197
+
198
+ # Add verbosity if specified
199
+ if self.verbosity is not None:
200
+ text_params["verbosity"] = self.verbosity
201
+
187
202
  # Set the response format
188
203
  if response_format is not None:
189
204
  if isinstance(response_format, type) and issubclass(response_format, BaseModel):
190
205
  schema = get_response_schema_for_provider(response_format, "openai")
191
- base_params["text"] = {
192
- "format": {
193
- "type": "json_schema",
194
- "name": response_format.__name__,
195
- "schema": schema,
196
- "strict": True,
197
- }
206
+ text_params["format"] = {
207
+ "type": "json_schema",
208
+ "name": response_format.__name__,
209
+ "schema": schema,
210
+ "strict": True,
198
211
  }
199
212
  else:
200
213
  # JSON mode
201
- base_params["text"] = {"format": {"type": "json_object"}}
214
+ text_params["format"] = {"type": "json_object"}
215
+
216
+ # Add text parameter if there are any text-level params
217
+ if text_params:
218
+ base_params["text"] = text_params
202
219
 
203
220
  # Filter out None values
204
221
  request_params: Dict[str, Any] = {k: v for k, v in base_params.items() if v is not None}
@@ -372,6 +389,17 @@ class OpenAIResponses(Model):
372
389
  previous_response_id = msg.provider_data["response_id"]
373
390
  break
374
391
 
392
+ # Build a mapping from function_call id (fc_*) → call_id (call_*) from prior assistant tool_calls
393
+ fc_id_to_call_id: Dict[str, str] = {}
394
+ for msg in messages:
395
+ tool_calls = getattr(msg, "tool_calls", None)
396
+ if tool_calls:
397
+ for tc in tool_calls:
398
+ fc_id = tc.get("id")
399
+ call_id = tc.get("call_id") or fc_id
400
+ if isinstance(fc_id, str) and isinstance(call_id, str):
401
+ fc_id_to_call_id[fc_id] = call_id
402
+
375
403
  for message in messages:
376
404
  if message.role in ["user", "system"]:
377
405
  message_dict: Dict[str, Any] = {
@@ -398,27 +426,32 @@ class OpenAIResponses(Model):
398
426
 
399
427
  formatted_messages.append(message_dict)
400
428
 
429
+ # Tool call result
401
430
  elif message.role == "tool":
402
431
  if message.tool_call_id and message.content is not None:
432
+ function_call_id = message.tool_call_id
433
+ # Normalize: if a fc_* id was provided, translate to its corresponding call_* id
434
+ if isinstance(function_call_id, str) and function_call_id in fc_id_to_call_id:
435
+ call_id_value = fc_id_to_call_id[function_call_id]
436
+ else:
437
+ call_id_value = function_call_id
403
438
  formatted_messages.append(
404
- {"type": "function_call_output", "call_id": message.tool_call_id, "output": message.content}
439
+ {"type": "function_call_output", "call_id": call_id_value, "output": message.content}
405
440
  )
441
+ # Tool Calls
406
442
  elif message.tool_calls is not None and len(message.tool_calls) > 0:
407
- if self._using_reasoning_model():
408
- # Only include prior function_call items when we are NOT using
409
- # previous_response_id. When previous_response_id is present, the
410
- # Responses API already knows about earlier output items (including
411
- # reasoning/function_call), and re-sending them can trigger validation
412
- # errors (e.g., missing required reasoning item).
413
- if previous_response_id is not None:
414
- continue
443
+ # Only skip re-sending prior function_call items when we have a previous_response_id
444
+ # (reasoning models). For non-reasoning models, we must include the prior function_call
445
+ # so the API can associate the subsequent function_call_output by call_id.
446
+ if self._using_reasoning_model() and previous_response_id is not None:
447
+ continue
415
448
 
416
449
  for tool_call in message.tool_calls:
417
450
  formatted_messages.append(
418
451
  {
419
452
  "type": "function_call",
420
- "id": tool_call["id"],
421
- "call_id": tool_call["call_id"],
453
+ "id": tool_call.get("id"),
454
+ "call_id": tool_call.get("call_id", tool_call.get("id")),
422
455
  "name": tool_call["function"]["name"],
423
456
  "arguments": tool_call["function"]["arguments"],
424
457
  "status": "completed",
@@ -719,7 +752,8 @@ class OpenAIResponses(Model):
719
752
  model_response.tool_calls.append(
720
753
  {
721
754
  "id": output.id,
722
- "call_id": output.call_id,
755
+ # Store additional call_id from OpenAI responses
756
+ "call_id": output.call_id or output.id,
723
757
  "type": "function",
724
758
  "function": {
725
759
  "name": output.name,
@@ -809,8 +843,8 @@ class OpenAIResponses(Model):
809
843
  item = stream_event.item
810
844
  if item.type == "function_call":
811
845
  tool_use = {
812
- "id": item.id,
813
- "call_id": item.call_id,
846
+ "id": getattr(item, "id", None),
847
+ "call_id": getattr(item, "call_id", None) or getattr(item, "id", None),
814
848
  "type": "function",
815
849
  "function": {
816
850
  "name": item.name,
@@ -0,0 +1,5 @@
1
+ from agno.models.dashscope.dashscope import DashScope as Qwen
2
+
3
+ __all__ = [
4
+ "Qwen",
5
+ ]
agno/reasoning/default.py CHANGED
@@ -1,7 +1,7 @@
1
1
  from __future__ import annotations
2
2
 
3
3
  from textwrap import dedent
4
- from typing import Callable, Dict, List, Literal, Optional, Union
4
+ from typing import Any, Callable, Dict, List, Literal, Optional, Union
5
5
 
6
6
  from agno.models.base import Model
7
7
  from agno.reasoning.step import ReasoningSteps
@@ -19,6 +19,9 @@ def get_default_reasoning_agent(
19
19
  telemetry: bool = True,
20
20
  debug_mode: bool = False,
21
21
  debug_level: Literal[1, 2] = 1,
22
+ session_state: Optional[Dict[str, Any]] = None,
23
+ context: Optional[Dict[str, Any]] = None,
24
+ extra_data: Optional[Dict[str, Any]] = None,
22
25
  ) -> Optional["Agent"]: # type: ignore # noqa: F821
23
26
  from agno.agent import Agent
24
27
 
@@ -87,6 +90,9 @@ def get_default_reasoning_agent(
87
90
  telemetry=telemetry,
88
91
  debug_mode=debug_mode,
89
92
  debug_level=debug_level,
93
+ session_state=session_state,
94
+ context=context,
95
+ extra_data=extra_data,
90
96
  )
91
97
 
92
98
  agent.model.show_tool_calls = False # type: ignore
agno/reasoning/helpers.py CHANGED
@@ -1,4 +1,4 @@
1
- from typing import List, Literal
1
+ from typing import Any, Dict, List, Literal, Optional
2
2
 
3
3
  from agno.models.base import Model
4
4
  from agno.models.message import Message
@@ -13,6 +13,9 @@ def get_reasoning_agent(
13
13
  telemetry: bool = False,
14
14
  debug_mode: bool = False,
15
15
  debug_level: Literal[1, 2] = 1,
16
+ session_state: Optional[Dict[str, Any]] = None,
17
+ context: Optional[Dict[str, Any]] = None,
18
+ extra_data: Optional[Dict[str, Any]] = None,
16
19
  ) -> "Agent": # type: ignore # noqa: F821
17
20
  from agno.agent import Agent
18
21
 
@@ -22,6 +25,9 @@ def get_reasoning_agent(
22
25
  telemetry=telemetry,
23
26
  debug_mode=debug_mode,
24
27
  debug_level=debug_level,
28
+ session_state=session_state,
29
+ context=context,
30
+ extra_data=extra_data,
25
31
  )
26
32
 
27
33
 
agno/run/response.py CHANGED
@@ -420,6 +420,9 @@ class RunResponse:
420
420
  messages = data.pop("messages", None)
421
421
  messages = [Message.model_validate(message) for message in messages] if messages else None
422
422
 
423
+ citations = data.pop("citations", None)
424
+ citations = Citations.model_validate(citations) if citations else None
425
+
423
426
  tools = data.pop("tools", [])
424
427
  tools = [ToolExecution.from_dict(tool) for tool in tools] if tools else None
425
428
 
@@ -441,6 +444,7 @@ class RunResponse:
441
444
 
442
445
  return cls(
443
446
  messages=messages,
447
+ citations=citations,
444
448
  tools=tools,
445
449
  images=images,
446
450
  audio=audio,
agno/run/team.py CHANGED
@@ -416,6 +416,9 @@ class TeamRunResponse:
416
416
  response_audio = data.pop("response_audio", None)
417
417
  response_audio = AudioResponse.model_validate(response_audio) if response_audio else None
418
418
 
419
+ citations = data.pop("citations", None)
420
+ citations = Citations.model_validate(citations) if citations else None
421
+
419
422
  # To make it backwards compatible
420
423
  if "event" in data:
421
424
  data.pop("event")
@@ -428,6 +431,7 @@ class TeamRunResponse:
428
431
  videos=videos,
429
432
  audio=audio,
430
433
  response_audio=response_audio,
434
+ citations=citations,
431
435
  tools=tools,
432
436
  events=events,
433
437
  **data,
agno/storage/dynamodb.py CHANGED
@@ -30,19 +30,23 @@ class DynamoDbStorage(Storage):
30
30
  endpoint_url: Optional[str] = None,
31
31
  create_table_if_not_exists: bool = True,
32
32
  mode: Optional[Literal["agent", "team", "workflow", "workflow_v2"]] = "agent",
33
+ create_table_read_capacity_units: int = 5,
34
+ create_table_write_capacity_units: int = 5,
33
35
  ):
34
36
  """
35
37
  Initialize the DynamoDbStorage.
36
38
 
37
39
  Args:
38
40
  table_name (str): The name of the DynamoDB table.
39
- region_name (Optional[str]): AWS region name.
40
41
  profile_name (Optional[str]): AWS profile name to use for credentials.
42
+ region_name (Optional[str]): AWS region name.
41
43
  aws_access_key_id (Optional[str]): AWS access key ID.
42
44
  aws_secret_access_key (Optional[str]): AWS secret access key.
43
45
  endpoint_url (Optional[str]): The complete URL to use for the constructed client.
44
46
  create_table_if_not_exists (bool): Whether to create the table if it does not exist.
45
47
  mode (Optional[Literal["agent", "team", "workflow", "workflow_v2"]]): The mode of the storage.
48
+ create_table_read_capacity_units Optional[int]: Read capacity units for created table (default: 5).
49
+ create_table_write_capacity_units Optional[int]: Write capacity units for created table (default: 5).
46
50
  """
47
51
  super().__init__(mode)
48
52
  self.table_name = table_name
@@ -52,6 +56,8 @@ class DynamoDbStorage(Storage):
52
56
  self.aws_access_key_id = aws_access_key_id
53
57
  self.aws_secret_access_key = aws_secret_access_key
54
58
  self.create_table_if_not_exists = create_table_if_not_exists
59
+ self.create_table_read_capacity_units = create_table_read_capacity_units
60
+ self.create_table_write_capacity_units = create_table_write_capacity_units
55
61
 
56
62
  # Create session using profile name if provided
57
63
  if self.profile_name:
@@ -96,6 +102,11 @@ class DynamoDbStorage(Storage):
96
102
  """
97
103
  Create the DynamoDB table if it does not exist.
98
104
  """
105
+ provisioned_throughput = {
106
+ "ReadCapacityUnits": self.create_table_read_capacity_units,
107
+ "WriteCapacityUnits": self.create_table_write_capacity_units,
108
+ }
109
+
99
110
  try:
100
111
  # Check if table exists
101
112
  self.dynamodb.meta.client.describe_table(TableName=self.table_name)
@@ -141,10 +152,7 @@ class DynamoDbStorage(Storage):
141
152
  {"AttributeName": "created_at", "KeyType": "RANGE"},
142
153
  ],
143
154
  "Projection": {"ProjectionType": "ALL"},
144
- "ProvisionedThroughput": {
145
- "ReadCapacityUnits": 5,
146
- "WriteCapacityUnits": 5,
147
- },
155
+ "ProvisionedThroughput": provisioned_throughput,
148
156
  }
149
157
  ]
150
158
  if self.mode == "agent":
@@ -156,10 +164,7 @@ class DynamoDbStorage(Storage):
156
164
  {"AttributeName": "created_at", "KeyType": "RANGE"},
157
165
  ],
158
166
  "Projection": {"ProjectionType": "ALL"},
159
- "ProvisionedThroughput": {
160
- "ReadCapacityUnits": 5,
161
- "WriteCapacityUnits": 5,
162
- },
167
+ "ProvisionedThroughput": provisioned_throughput,
163
168
  }
164
169
  )
165
170
  elif self.mode == "team":
@@ -171,10 +176,7 @@ class DynamoDbStorage(Storage):
171
176
  {"AttributeName": "created_at", "KeyType": "RANGE"},
172
177
  ],
173
178
  "Projection": {"ProjectionType": "ALL"},
174
- "ProvisionedThroughput": {
175
- "ReadCapacityUnits": 5,
176
- "WriteCapacityUnits": 5,
177
- },
179
+ "ProvisionedThroughput": provisioned_throughput,
178
180
  }
179
181
  )
180
182
  elif self.mode == "workflow":
@@ -186,10 +188,7 @@ class DynamoDbStorage(Storage):
186
188
  {"AttributeName": "created_at", "KeyType": "RANGE"},
187
189
  ],
188
190
  "Projection": {"ProjectionType": "ALL"},
189
- "ProvisionedThroughput": {
190
- "ReadCapacityUnits": 5,
191
- "WriteCapacityUnits": 5,
192
- },
191
+ "ProvisionedThroughput": provisioned_throughput,
193
192
  }
194
193
  )
195
194
  elif self.mode == "workflow_v2":
@@ -201,10 +200,7 @@ class DynamoDbStorage(Storage):
201
200
  {"AttributeName": "created_at", "KeyType": "RANGE"},
202
201
  ],
203
202
  "Projection": {"ProjectionType": "ALL"},
204
- "ProvisionedThroughput": {
205
- "ReadCapacityUnits": 5,
206
- "WriteCapacityUnits": 5,
207
- },
203
+ "ProvisionedThroughput": provisioned_throughput,
208
204
  }
209
205
  )
210
206
  # Create the table
@@ -213,7 +209,7 @@ class DynamoDbStorage(Storage):
213
209
  KeySchema=[{"AttributeName": "session_id", "KeyType": "HASH"}],
214
210
  AttributeDefinitions=attribute_definitions,
215
211
  GlobalSecondaryIndexes=secondary_indexes,
216
- ProvisionedThroughput={"ReadCapacityUnits": 5, "WriteCapacityUnits": 5},
212
+ ProvisionedThroughput=provisioned_throughput,
217
213
  )
218
214
  # Wait until the table exists.
219
215
  self.table.wait_until_exists()