agno 2.0.2__py3-none-any.whl → 2.0.4__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (63) hide show
  1. agno/agent/agent.py +164 -87
  2. agno/db/dynamo/dynamo.py +8 -0
  3. agno/db/firestore/firestore.py +8 -1
  4. agno/db/gcs_json/gcs_json_db.py +9 -0
  5. agno/db/json/json_db.py +8 -0
  6. agno/db/mongo/mongo.py +10 -1
  7. agno/db/mysql/mysql.py +10 -0
  8. agno/db/postgres/postgres.py +16 -8
  9. agno/db/redis/redis.py +6 -0
  10. agno/db/singlestore/schemas.py +1 -1
  11. agno/db/singlestore/singlestore.py +8 -1
  12. agno/db/sqlite/sqlite.py +9 -1
  13. agno/db/utils.py +14 -0
  14. agno/knowledge/chunking/fixed.py +1 -1
  15. agno/knowledge/knowledge.py +91 -65
  16. agno/knowledge/reader/base.py +3 -0
  17. agno/knowledge/reader/csv_reader.py +1 -1
  18. agno/knowledge/reader/json_reader.py +1 -1
  19. agno/knowledge/reader/markdown_reader.py +5 -5
  20. agno/knowledge/reader/s3_reader.py +0 -12
  21. agno/knowledge/reader/text_reader.py +5 -5
  22. agno/models/base.py +2 -2
  23. agno/models/cerebras/cerebras.py +5 -3
  24. agno/models/cerebras/cerebras_openai.py +5 -3
  25. agno/models/google/gemini.py +33 -11
  26. agno/models/litellm/chat.py +1 -1
  27. agno/models/openai/chat.py +3 -0
  28. agno/models/openai/responses.py +81 -40
  29. agno/models/response.py +5 -0
  30. agno/models/siliconflow/__init__.py +5 -0
  31. agno/models/siliconflow/siliconflow.py +25 -0
  32. agno/os/app.py +4 -1
  33. agno/os/auth.py +24 -14
  34. agno/os/interfaces/slack/router.py +1 -1
  35. agno/os/interfaces/whatsapp/router.py +2 -0
  36. agno/os/router.py +187 -76
  37. agno/os/routers/evals/utils.py +9 -9
  38. agno/os/routers/health.py +26 -0
  39. agno/os/routers/knowledge/knowledge.py +11 -11
  40. agno/os/routers/session/session.py +24 -8
  41. agno/os/schema.py +8 -2
  42. agno/run/agent.py +5 -2
  43. agno/run/base.py +6 -3
  44. agno/run/team.py +11 -3
  45. agno/run/workflow.py +69 -12
  46. agno/session/team.py +1 -0
  47. agno/team/team.py +196 -93
  48. agno/tools/mcp.py +1 -0
  49. agno/tools/mem0.py +11 -17
  50. agno/tools/memory.py +419 -0
  51. agno/tools/workflow.py +279 -0
  52. agno/utils/audio.py +27 -0
  53. agno/utils/common.py +90 -1
  54. agno/utils/print_response/agent.py +6 -2
  55. agno/utils/streamlit.py +14 -8
  56. agno/vectordb/chroma/chromadb.py +8 -2
  57. agno/workflow/step.py +111 -13
  58. agno/workflow/workflow.py +16 -13
  59. {agno-2.0.2.dist-info → agno-2.0.4.dist-info}/METADATA +1 -1
  60. {agno-2.0.2.dist-info → agno-2.0.4.dist-info}/RECORD +63 -58
  61. {agno-2.0.2.dist-info → agno-2.0.4.dist-info}/WHEEL +0 -0
  62. {agno-2.0.2.dist-info → agno-2.0.4.dist-info}/licenses/LICENSE +0 -0
  63. {agno-2.0.2.dist-info → agno-2.0.4.dist-info}/top_level.txt +0 -0
agno/tools/workflow.py ADDED
@@ -0,0 +1,279 @@
1
+ import json
2
+ from textwrap import dedent
3
+ from typing import Any, Dict, Optional
4
+
5
+ from pydantic import BaseModel
6
+
7
+ from agno.tools import Toolkit
8
+ from agno.utils.log import log_debug, log_error
9
+ from agno.workflow.workflow import Workflow, WorkflowRunOutput
10
+
11
+
12
+ class RunWorkflowInput(BaseModel):
13
+ input_data: str
14
+ additional_data: Optional[Dict[str, Any]] = None
15
+
16
+
17
+ class WorkflowTools(Toolkit):
18
+ def __init__(
19
+ self,
20
+ workflow: Workflow,
21
+ enable_run_workflow: bool = True,
22
+ enable_think: bool = False,
23
+ enable_analyze: bool = False,
24
+ all: bool = False,
25
+ instructions: Optional[str] = None,
26
+ add_instructions: bool = True,
27
+ add_few_shot: bool = False,
28
+ few_shot_examples: Optional[str] = None,
29
+ async_mode: bool = False,
30
+ **kwargs,
31
+ ):
32
+ # Add instructions for using this toolkit
33
+ if instructions is None:
34
+ self.instructions = self.DEFAULT_INSTRUCTIONS
35
+ if add_few_shot:
36
+ if few_shot_examples is not None:
37
+ self.instructions += "\n" + few_shot_examples
38
+ else:
39
+ self.instructions = instructions
40
+
41
+ # The workflow to execute
42
+ self.workflow: Workflow = workflow
43
+
44
+ super().__init__(
45
+ name="workflow_tools",
46
+ instructions=self.instructions,
47
+ add_instructions=add_instructions,
48
+ auto_register=False,
49
+ **kwargs,
50
+ )
51
+
52
+ if enable_think or all:
53
+ if async_mode:
54
+ self.register(self.async_think, name="think")
55
+ else:
56
+ self.register(self.think, name="think")
57
+ if enable_run_workflow or all:
58
+ if async_mode:
59
+ self.register(self.async_run_workflow, name="run_workflow")
60
+ else:
61
+ self.register(self.run_workflow, name="run_workflow")
62
+ if enable_analyze or all:
63
+ if async_mode:
64
+ self.register(self.async_analyze, name="analyze")
65
+ else:
66
+ self.register(self.analyze, name="analyze")
67
+
68
+ def think(self, session_state: Dict[str, Any], thought: str) -> str:
69
+ """Use this tool as a scratchpad to reason about the workflow execution, refine your approach, brainstorm workflow inputs, or revise your plan.
70
+ Call `Think` whenever you need to figure out what to do next, analyze the user's requirements, plan workflow inputs, or decide on execution strategy.
71
+ You should use this tool as frequently as needed.
72
+ Args:
73
+ thought: Your thought process and reasoning about workflow execution.
74
+ """
75
+ try:
76
+ log_debug(f"Workflow Thought: {thought}")
77
+
78
+ # Add the thought to the session state
79
+ if session_state is None:
80
+ session_state = {}
81
+ if "workflow_thoughts" not in session_state:
82
+ session_state["workflow_thoughts"] = []
83
+ session_state["workflow_thoughts"].append(thought)
84
+
85
+ # Return the full log of thoughts and the new thought
86
+ thoughts = "\n".join([f"- {t}" for t in session_state["workflow_thoughts"]])
87
+ formatted_thoughts = dedent(
88
+ f"""Workflow Thoughts:
89
+ {thoughts}
90
+ """
91
+ ).strip()
92
+ return formatted_thoughts
93
+ except Exception as e:
94
+ log_error(f"Error recording workflow thought: {e}")
95
+ return f"Error recording workflow thought: {e}"
96
+
97
+ async def async_think(self, session_state: Dict[str, Any], thought: str) -> str:
98
+ """Use this tool as a scratchpad to reason about the workflow execution, refine your approach, brainstorm workflow inputs, or revise your plan.
99
+ Call `Think` whenever you need to figure out what to do next, analyze the user's requirements, plan workflow inputs, or decide on execution strategy.
100
+ You should use this tool as frequently as needed.
101
+ Args:
102
+ thought: Your thought process and reasoning about workflow execution.
103
+ """
104
+ try:
105
+ log_debug(f"Workflow Thought: {thought}")
106
+
107
+ # Add the thought to the session state
108
+ if session_state is None:
109
+ session_state = {}
110
+ if "workflow_thoughts" not in session_state:
111
+ session_state["workflow_thoughts"] = []
112
+ session_state["workflow_thoughts"].append(thought)
113
+
114
+ # Return the full log of thoughts and the new thought
115
+ thoughts = "\n".join([f"- {t}" for t in session_state["workflow_thoughts"]])
116
+ formatted_thoughts = dedent(
117
+ f"""Workflow Thoughts:
118
+ {thoughts}
119
+ """
120
+ ).strip()
121
+ return formatted_thoughts
122
+ except Exception as e:
123
+ log_error(f"Error recording workflow thought: {e}")
124
+ return f"Error recording workflow thought: {e}"
125
+
126
+ def run_workflow(
127
+ self,
128
+ session_state: Dict[str, Any],
129
+ input: RunWorkflowInput,
130
+ ) -> str:
131
+ """Use this tool to execute the workflow with the specified inputs and parameters.
132
+ After thinking through the requirements, use this tool to run the workflow with appropriate inputs.
133
+ Args:
134
+ input_data: The input data for the workflow (use a `str` for a simple input)
135
+ additional_data: The additional data for the workflow. This is a dictionary of key-value pairs that will be passed to the workflow. E.g. {"topic": "food", "style": "Humour"}
136
+ """
137
+ try:
138
+ log_debug(f"Running workflow with input: {input.input_data}")
139
+
140
+ user_id = session_state.get("current_user_id")
141
+ session_id = session_state.get("current_session_id")
142
+
143
+ # Execute the workflow
144
+ result: WorkflowRunOutput = self.workflow.run(
145
+ input=input.input_data,
146
+ user_id=user_id,
147
+ session_id=session_id,
148
+ session_state=session_state,
149
+ additional_data=input.additional_data,
150
+ )
151
+
152
+ if "workflow_results" not in session_state:
153
+ session_state["workflow_results"] = []
154
+
155
+ session_state["workflow_results"].append(result.to_dict())
156
+
157
+ return json.dumps(result.to_dict(), indent=2)
158
+
159
+ except Exception as e:
160
+ log_error(f"Error running workflow: {e}")
161
+ return f"Error running workflow: {e}"
162
+
163
+ async def async_run_workflow(
164
+ self,
165
+ session_state: Dict[str, Any],
166
+ input: RunWorkflowInput,
167
+ ) -> str:
168
+ """Use this tool to execute the workflow with the specified inputs and parameters.
169
+ After thinking through the requirements, use this tool to run the workflow with appropriate inputs.
170
+ Args:
171
+ input_data: The input data for the workflow (use a `str` for a simple input)
172
+ additional_data: The additional data for the workflow. This is a dictionary of key-value pairs that will be passed to the workflow. E.g. {"topic": "food", "style": "Humour"}
173
+ """
174
+ try:
175
+ log_debug(f"Running workflow with input: {input.input_data}")
176
+
177
+ user_id = session_state.get("current_user_id")
178
+ session_id = session_state.get("current_session_id")
179
+
180
+ # Execute the workflow
181
+ result: WorkflowRunOutput = await self.workflow.arun(
182
+ input=input.input_data,
183
+ user_id=user_id,
184
+ session_id=session_id,
185
+ session_state=session_state,
186
+ additional_data=input.additional_data,
187
+ )
188
+
189
+ if "workflow_results" not in session_state:
190
+ session_state["workflow_results"] = []
191
+
192
+ session_state["workflow_results"].append(result.to_dict())
193
+
194
+ return json.dumps(result.to_dict(), indent=2)
195
+
196
+ except Exception as e:
197
+ log_error(f"Error running workflow: {e}")
198
+ return f"Error running workflow: {e}"
199
+
200
+ def analyze(self, session_state: Dict[str, Any], analysis: str) -> str:
201
+ """Use this tool to evaluate whether the workflow execution results are correct and sufficient.
202
+ If not, go back to "Think" or "Run" with refined inputs or parameters.
203
+ Args:
204
+ analysis: Your analysis of the workflow execution results.
205
+ """
206
+ try:
207
+ log_debug(f"Workflow Analysis: {analysis}")
208
+
209
+ # Add the analysis to the session state
210
+ if session_state is None:
211
+ session_state = {}
212
+ if "workflow_analysis" not in session_state:
213
+ session_state["workflow_analysis"] = []
214
+ session_state["workflow_analysis"].append(analysis)
215
+
216
+ # Return the full log of analysis and the new analysis
217
+ analysis_log = "\n".join([f"- {a}" for a in session_state["workflow_analysis"]])
218
+ formatted_analysis = dedent(
219
+ f"""Workflow Analysis:
220
+ {analysis_log}
221
+ """
222
+ ).strip()
223
+ return formatted_analysis
224
+ except Exception as e:
225
+ log_error(f"Error recording workflow analysis: {e}")
226
+ return f"Error recording workflow analysis: {e}"
227
+
228
+ async def async_analyze(self, session_state: Dict[str, Any], analysis: str) -> str:
229
+ """Use this tool to evaluate whether the workflow execution results are correct and sufficient.
230
+ If not, go back to "Think" or "Run" with refined inputs or parameters.
231
+ Args:
232
+ analysis: Your analysis of the workflow execution results.
233
+ """
234
+ try:
235
+ log_debug(f"Workflow Analysis: {analysis}")
236
+
237
+ # Add the analysis to the session state
238
+ if session_state is None:
239
+ session_state = {}
240
+ if "workflow_analysis" not in session_state:
241
+ session_state["workflow_analysis"] = []
242
+ session_state["workflow_analysis"].append(analysis)
243
+
244
+ # Return the full log of analysis and the new analysis
245
+ analysis_log = "\n".join([f"- {a}" for a in session_state["workflow_analysis"]])
246
+ formatted_analysis = dedent(
247
+ f"""Workflow Analysis:
248
+ {analysis_log}
249
+ """
250
+ ).strip()
251
+ return formatted_analysis
252
+ except Exception as e:
253
+ log_error(f"Error recording workflow analysis: {e}")
254
+ return f"Error recording workflow analysis: {e}"
255
+
256
+ DEFAULT_INSTRUCTIONS = dedent("""\
257
+ You have access to the Think, Run Workflow, and Analyze tools that will help you execute workflows and analyze their results. Use these tools as frequently as needed to successfully complete workflow-based tasks.
258
+ ## How to use the Think, Run Workflow, and Analyze tools:
259
+
260
+ 1. **Think**
261
+ - Purpose: A scratchpad for planning workflow execution, brainstorming inputs, and refining your approach. You never reveal your "Think" content to the user.
262
+ - Usage: Call `think` whenever you need to figure out what workflow inputs to use, analyze requirements, or decide on execution strategy before (or after) you run the workflow.
263
+ 2. **Run Workflow**
264
+ - Purpose: Executes the workflow with specified inputs and parameters.
265
+ - Usage: Call `run_workflow` with appropriate input data whenever you want to execute the workflow.
266
+ - For all workflows, start with simple inputs and gradually increase complexity
267
+ 3. **Analyze**
268
+ - Purpose: Evaluate whether the workflow execution results are correct and sufficient. If not, go back to "Think" or "Run Workflow" with refined inputs.
269
+ - Usage: Call `analyze` after getting workflow results to verify the quality and correctness of the execution. Consider:
270
+ - Completeness: Did the workflow complete all expected steps?
271
+ - Quality: Are the results accurate and meet the requirements?
272
+ - Errors: Were there any failures or unexpected behaviors?
273
+ **Important Guidelines**:
274
+ - Do not include your internal chain-of-thought in direct user responses.
275
+ - Use "Think" to reason internally. These notes are never exposed to the user.
276
+ - When you provide a final answer to the user, be clear, concise, and based on the workflow results.
277
+ - If workflow execution fails or produces unexpected results, acknowledge limitations and explain what went wrong.
278
+ - Synthesize information from multiple workflow runs if you execute the workflow several times with different inputs.\
279
+ """)
agno/utils/audio.py CHANGED
@@ -1,5 +1,6 @@
1
1
  import base64
2
2
  import os
3
+ import wave
3
4
 
4
5
  from agno.utils.log import log_info
5
6
 
@@ -20,3 +21,29 @@ def write_audio_to_file(audio, filename: str):
20
21
  with open(filename, "wb") as f:
21
22
  f.write(wav_bytes)
22
23
  log_info(f"Audio file saved to {filename}")
24
+
25
+
26
+ def write_wav_audio_to_file(
27
+ filename: str, pcm_data: bytes, channels: int = 1, rate: int = 24000, sample_width: int = 2
28
+ ):
29
+ """
30
+ Create a WAV file from raw PCM audio data.
31
+
32
+ Args:
33
+ filename: The filepath to save the WAV file to
34
+ pcm_data: Raw PCM audio data as bytes
35
+ channels: Number of audio channels (1 for mono, 2 for stereo)
36
+ rate: Sample rate in Hz (e.g., 24000, 44100, 48000)
37
+ sample_width: Sample width in bytes (1, 2, or 4)
38
+ """
39
+ # Create directory if it doesn't exist
40
+ if os.path.dirname(filename):
41
+ os.makedirs(os.path.dirname(filename), exist_ok=True)
42
+
43
+ with wave.open(filename, "wb") as wf:
44
+ wf.setnchannels(channels)
45
+ wf.setsampwidth(sample_width)
46
+ wf.setframerate(rate)
47
+ wf.writeframes(pcm_data)
48
+
49
+ log_info(f"WAV file saved to {filename}")
agno/utils/common.py CHANGED
@@ -1,5 +1,5 @@
1
1
  from dataclasses import asdict
2
- from typing import Any, List, Optional, Type
2
+ from typing import Any, List, Optional, Set, Type, Union, get_type_hints
3
3
 
4
4
 
5
5
  def isinstanceany(obj: Any, class_list: List[Type]) -> bool:
@@ -41,3 +41,92 @@ def nested_model_dump(value):
41
41
  elif isinstance(value, list):
42
42
  return [nested_model_dump(item) for item in value]
43
43
  return value
44
+
45
+
46
+ def is_typed_dict(cls: Type[Any]) -> bool:
47
+ """Check if a class is a TypedDict"""
48
+ return (
49
+ hasattr(cls, "__annotations__")
50
+ and hasattr(cls, "__total__")
51
+ and hasattr(cls, "__required_keys__")
52
+ and hasattr(cls, "__optional_keys__")
53
+ )
54
+
55
+
56
+ def check_type_compatibility(value: Any, expected_type: Type) -> bool:
57
+ """Basic type compatibility checking."""
58
+ from typing import get_args, get_origin
59
+
60
+ # Handle None/Optional types
61
+ if value is None:
62
+ return (
63
+ type(None) in get_args(expected_type) if hasattr(expected_type, "__args__") else expected_type is type(None)
64
+ )
65
+
66
+ # Handle Union types (including Optional)
67
+ origin = get_origin(expected_type)
68
+ if origin is Union:
69
+ return any(check_type_compatibility(value, arg) for arg in get_args(expected_type))
70
+
71
+ # Handle List types
72
+ if origin is list or expected_type is list:
73
+ if not isinstance(value, list):
74
+ return False
75
+ if origin is list and get_args(expected_type):
76
+ element_type = get_args(expected_type)[0]
77
+ return all(check_type_compatibility(item, element_type) for item in value)
78
+ return True
79
+
80
+ if expected_type in (str, int, float, bool):
81
+ return isinstance(value, expected_type)
82
+
83
+ if expected_type is Any:
84
+ return True
85
+
86
+ try:
87
+ return isinstance(value, expected_type)
88
+ except TypeError:
89
+ return True
90
+
91
+
92
+ def validate_typed_dict(data: dict, schema_cls) -> dict:
93
+ """Validate input data against a TypedDict schema."""
94
+ if not isinstance(data, dict):
95
+ raise ValueError(f"Expected dict for TypedDict {schema_cls.__name__}, got {type(data)}")
96
+
97
+ # Get type hints from the TypedDict
98
+ try:
99
+ type_hints = get_type_hints(schema_cls)
100
+ except Exception as e:
101
+ raise ValueError(f"Could not get type hints for TypedDict {schema_cls.__name__}: {e}")
102
+
103
+ # Get required and optional keys
104
+ required_keys: Set[str] = getattr(schema_cls, "__required_keys__", set())
105
+ optional_keys: Set[str] = getattr(schema_cls, "__optional_keys__", set())
106
+ all_keys = required_keys | optional_keys
107
+
108
+ # Check for missing required fields
109
+ missing_required = required_keys - set(data.keys())
110
+ if missing_required:
111
+ raise ValueError(f"Missing required fields in TypedDict {schema_cls.__name__}: {missing_required}")
112
+
113
+ # Check for unexpected fields
114
+ unexpected_fields = set(data.keys()) - all_keys
115
+ if unexpected_fields:
116
+ raise ValueError(f"Unexpected fields in TypedDict {schema_cls.__name__}: {unexpected_fields}")
117
+
118
+ # Basic type checking for provided fields
119
+ validated_data = {}
120
+ for field_name, value in data.items():
121
+ if field_name in type_hints:
122
+ expected_type = type_hints[field_name]
123
+
124
+ # Handle simple type checking
125
+ if not check_type_compatibility(value, expected_type):
126
+ raise ValueError(
127
+ f"Field '{field_name}' expected type {expected_type}, got {type(value)} with value {value}"
128
+ )
129
+
130
+ validated_data[field_name] = value
131
+
132
+ return validated_data
@@ -112,7 +112,9 @@ def print_response_stream(
112
112
  if response_event.event == RunEvent.run_content: # type: ignore
113
113
  if hasattr(response_event, "content"):
114
114
  if isinstance(response_event.content, str):
115
- _response_content += response_event.content
115
+ # Don't accumulate text content, parser_model will replace it
116
+ if not (agent.parser_model is not None and agent.output_schema is not None):
117
+ _response_content += response_event.content
116
118
  elif agent.output_schema is not None and isinstance(response_event.content, BaseModel):
117
119
  try:
118
120
  response_content_batch = JSON( # type: ignore
@@ -289,7 +291,9 @@ async def aprint_response_stream(
289
291
 
290
292
  if resp.event == RunEvent.run_content: # type: ignore
291
293
  if isinstance(resp.content, str):
292
- _response_content += resp.content
294
+ # Don't accumulate text content, parser_model will replace it
295
+ if not (agent.parser_model is not None and agent.output_schema is not None):
296
+ _response_content += resp.content
293
297
  elif agent.output_schema is not None and isinstance(resp.content, BaseModel):
294
298
  try:
295
299
  response_content_batch = JSON(resp.content.model_dump_json(exclude_none=True), indent=2) # type: ignore
agno/utils/streamlit.py CHANGED
@@ -1,14 +1,20 @@
1
1
  from datetime import datetime
2
2
  from typing import Any, Callable, Dict, List, Optional
3
3
 
4
- import streamlit as st
5
-
6
- from agno.agent import Agent
7
- from agno.db.base import SessionType
8
- from agno.models.anthropic import Claude
9
- from agno.models.google import Gemini
10
- from agno.models.openai import OpenAIChat
11
- from agno.utils.log import logger
4
+ try:
5
+ from agno.agent import Agent
6
+ from agno.db.base import SessionType
7
+ from agno.models.anthropic import Claude
8
+ from agno.models.google import Gemini
9
+ from agno.models.openai import OpenAIChat
10
+ from agno.utils.log import logger
11
+ except ImportError:
12
+ raise ImportError("`agno` not installed. Please install using `pip install agno`")
13
+
14
+ try:
15
+ import streamlit as st
16
+ except ImportError:
17
+ raise ImportError("`streamlit` not installed. Please install using `pip install streamlit`")
12
18
 
13
19
 
14
20
  def add_message(role: str, content: str, tool_calls: Optional[List[Dict[str, Any]]] = None) -> None:
@@ -766,9 +766,15 @@ class ChromaDb(VectorDb):
766
766
  updated_metadatas.append(updated_meta)
767
767
 
768
768
  # Update the documents
769
+ # Filter out None values from metadata as ChromaDB doesn't accept them
770
+ cleaned_metadatas = []
771
+ for meta in updated_metadatas:
772
+ cleaned_meta = {k: v for k, v in meta.items() if v is not None}
773
+ cleaned_metadatas.append(cleaned_meta)
774
+
769
775
  # Convert to the expected type for ChromaDB
770
- chroma_metadatas = cast(List[Mapping[str, Union[str, int, float, bool, None]]], updated_metadatas)
771
- collection.update(ids=ids, metadatas=chroma_metadatas)
776
+ chroma_metadatas = cast(List[Mapping[str, Union[str, int, float, bool]]], cleaned_metadatas)
777
+ collection.update(ids=ids, metadatas=chroma_metadatas) # type: ignore
772
778
  logger.debug(f"Updated metadata for {len(ids)} documents with content_id: {content_id}")
773
779
 
774
780
  except TypeError as te: