agno 2.2.9__py3-none-any.whl → 2.2.11__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (43) hide show
  1. agno/agent/agent.py +27 -5
  2. agno/db/dynamo/utils.py +1 -1
  3. agno/db/firestore/utils.py +1 -1
  4. agno/db/gcs_json/utils.py +1 -1
  5. agno/db/in_memory/utils.py +1 -1
  6. agno/db/json/utils.py +1 -1
  7. agno/db/mongo/utils.py +3 -3
  8. agno/db/mysql/utils.py +1 -1
  9. agno/db/postgres/utils.py +1 -1
  10. agno/db/redis/utils.py +1 -1
  11. agno/db/singlestore/utils.py +1 -1
  12. agno/db/sqlite/utils.py +1 -1
  13. agno/knowledge/chunking/agentic.py +8 -9
  14. agno/knowledge/chunking/strategy.py +59 -15
  15. agno/knowledge/embedder/sentence_transformer.py +6 -2
  16. agno/knowledge/reader/base.py +6 -2
  17. agno/knowledge/utils.py +20 -0
  18. agno/models/anthropic/claude.py +45 -9
  19. agno/models/base.py +4 -0
  20. agno/os/app.py +35 -19
  21. agno/os/routers/health.py +5 -3
  22. agno/os/routers/knowledge/knowledge.py +43 -17
  23. agno/os/routers/knowledge/schemas.py +4 -3
  24. agno/run/agent.py +11 -1
  25. agno/team/team.py +20 -3
  26. agno/tools/file_generation.py +4 -4
  27. agno/tools/gmail.py +179 -0
  28. agno/tools/parallel.py +314 -0
  29. agno/utils/models/claude.py +2 -1
  30. agno/workflow/agent.py +2 -2
  31. agno/workflow/condition.py +26 -4
  32. agno/workflow/loop.py +9 -0
  33. agno/workflow/parallel.py +39 -16
  34. agno/workflow/router.py +25 -4
  35. agno/workflow/step.py +163 -91
  36. agno/workflow/steps.py +9 -0
  37. agno/workflow/types.py +20 -1
  38. agno/workflow/workflow.py +117 -30
  39. {agno-2.2.9.dist-info → agno-2.2.11.dist-info}/METADATA +4 -1
  40. {agno-2.2.9.dist-info → agno-2.2.11.dist-info}/RECORD +43 -42
  41. {agno-2.2.9.dist-info → agno-2.2.11.dist-info}/WHEEL +0 -0
  42. {agno-2.2.9.dist-info → agno-2.2.11.dist-info}/licenses/LICENSE +0 -0
  43. {agno-2.2.9.dist-info → agno-2.2.11.dist-info}/top_level.txt +0 -0
agno/tools/parallel.py ADDED
@@ -0,0 +1,314 @@
1
+ import json
2
+ from os import getenv
3
+ from typing import Any, Dict, List, Optional
4
+
5
+ from agno.tools import Toolkit
6
+ from agno.utils.log import log_error
7
+
8
+ try:
9
+ from parallel import Parallel as ParallelClient
10
+ except ImportError:
11
+ raise ImportError("`parallel-web` not installed. Please install using `pip install parallel-web`")
12
+
13
+
14
+ class CustomJSONEncoder(json.JSONEncoder):
15
+ """Custom JSON encoder that handles non-serializable types by converting them to strings."""
16
+
17
+ def default(self, obj):
18
+ try:
19
+ return super().default(obj)
20
+ except TypeError:
21
+ return str(obj)
22
+
23
+
24
+ class ParallelTools(Toolkit):
25
+ """
26
+ ParallelTools provides access to Parallel's web search and extraction APIs.
27
+
28
+ Parallel offers powerful APIs optimized for AI agents:
29
+ - Search API: AI-optimized web search that returns relevant excerpts tailored for LLMs
30
+ - Extract API: Extract content from specific URLs in clean markdown format, handling JavaScript-heavy pages and PDFs
31
+
32
+ Args:
33
+ api_key (Optional[str]): Parallel API key. If not provided, will use PARALLEL_API_KEY environment variable.
34
+ enable_search (bool): Enable Search API functionality. Default is True.
35
+ enable_extract (bool): Enable Extract API functionality. Default is True.
36
+ all (bool): Enable all tools. Overrides individual flags when True. Default is False.
37
+ max_results (int): Default maximum number of results for search operations. Default is 10.
38
+ max_chars_per_result (int): Default maximum characters per result for search operations. Default is 10000.
39
+ beta_version (str): Beta API version header. Default is "search-extract-2025-10-10".
40
+ mode (Optional[str]): Default search mode. Options: "one-shot" or "agentic". Default is None.
41
+ include_domains (Optional[List[str]]): Default domains to restrict results to. Default is None.
42
+ exclude_domains (Optional[List[str]]): Default domains to exclude from results. Default is None.
43
+ max_age_seconds (Optional[int]): Default cache age threshold (minimum 600). Default is None.
44
+ timeout_seconds (Optional[float]): Default timeout for content retrieval. Default is None.
45
+ disable_cache_fallback (Optional[bool]): Default cache fallback behavior. Default is None.
46
+ """
47
+
48
+ def __init__(
49
+ self,
50
+ api_key: Optional[str] = None,
51
+ enable_search: bool = True,
52
+ enable_extract: bool = True,
53
+ all: bool = False,
54
+ max_results: int = 10,
55
+ max_chars_per_result: int = 10000,
56
+ beta_version: str = "search-extract-2025-10-10",
57
+ mode: Optional[str] = None,
58
+ include_domains: Optional[List[str]] = None,
59
+ exclude_domains: Optional[List[str]] = None,
60
+ max_age_seconds: Optional[int] = None,
61
+ timeout_seconds: Optional[float] = None,
62
+ disable_cache_fallback: Optional[bool] = None,
63
+ **kwargs,
64
+ ):
65
+ self.api_key: Optional[str] = api_key or getenv("PARALLEL_API_KEY")
66
+ if not self.api_key:
67
+ log_error("PARALLEL_API_KEY not set. Please set the PARALLEL_API_KEY environment variable.")
68
+
69
+ self.max_results = max_results
70
+ self.max_chars_per_result = max_chars_per_result
71
+ self.beta_version = beta_version
72
+ self.mode = mode
73
+ self.include_domains = include_domains
74
+ self.exclude_domains = exclude_domains
75
+ self.max_age_seconds = max_age_seconds
76
+ self.timeout_seconds = timeout_seconds
77
+ self.disable_cache_fallback = disable_cache_fallback
78
+
79
+ self.parallel_client = ParallelClient(
80
+ api_key=self.api_key, default_headers={"parallel-beta": self.beta_version}
81
+ )
82
+
83
+ tools: List[Any] = []
84
+ if all or enable_search:
85
+ tools.append(self.parallel_search)
86
+ if all or enable_extract:
87
+ tools.append(self.parallel_extract)
88
+
89
+ super().__init__(name="parallel_tools", tools=tools, **kwargs)
90
+
91
+ def parallel_search(
92
+ self,
93
+ objective: Optional[str] = None,
94
+ search_queries: Optional[List[str]] = None,
95
+ max_results: Optional[int] = None,
96
+ max_chars_per_result: Optional[int] = None,
97
+ ) -> str:
98
+ """Use this function to search the web using Parallel's Search API with a natural language objective.
99
+ You must provide at least one of objective or search_queries.
100
+
101
+ Args:
102
+ objective (Optional[str]): Natural-language description of what the web search is trying to find.
103
+ search_queries (Optional[List[str]]): Traditional keyword queries with optional search operators.
104
+ max_results (Optional[int]): Upper bound on results returned. Overrides constructor default.
105
+ max_chars_per_result (Optional[int]): Upper bound on total characters per url for excerpts.
106
+
107
+ Returns:
108
+ str: A JSON formatted string containing the search results with URLs, titles, publish dates, and relevant excerpts.
109
+ """
110
+ try:
111
+ if not objective and not search_queries:
112
+ return json.dumps({"error": "Please provide at least one of: objective or search_queries"}, indent=2)
113
+
114
+ # Use instance defaults if not provided
115
+ final_max_results = max_results if max_results is not None else self.max_results
116
+
117
+ search_params: Dict[str, Any] = {
118
+ "max_results": final_max_results,
119
+ }
120
+
121
+ # Add objective if provided
122
+ if objective:
123
+ search_params["objective"] = objective
124
+
125
+ # Add search_queries if provided
126
+ if search_queries:
127
+ search_params["search_queries"] = search_queries
128
+
129
+ # Add mode from constructor default
130
+ if self.mode:
131
+ search_params["mode"] = self.mode
132
+
133
+ # Add excerpts configuration
134
+ excerpts_config: Dict[str, Any] = {}
135
+ final_max_chars = max_chars_per_result if max_chars_per_result is not None else self.max_chars_per_result
136
+ if final_max_chars is not None:
137
+ excerpts_config["max_chars_per_result"] = final_max_chars
138
+
139
+ if excerpts_config:
140
+ search_params["excerpts"] = excerpts_config
141
+
142
+ # Add source_policy from constructor defaults
143
+ source_policy: Dict[str, Any] = {}
144
+ if self.include_domains:
145
+ source_policy["include_domains"] = self.include_domains
146
+ if self.exclude_domains:
147
+ source_policy["exclude_domains"] = self.exclude_domains
148
+
149
+ if source_policy:
150
+ search_params["source_policy"] = source_policy
151
+
152
+ # Add fetch_policy from constructor defaults
153
+ fetch_policy: Dict[str, Any] = {}
154
+ if self.max_age_seconds is not None:
155
+ fetch_policy["max_age_seconds"] = self.max_age_seconds
156
+ if self.timeout_seconds is not None:
157
+ fetch_policy["timeout_seconds"] = self.timeout_seconds
158
+ if self.disable_cache_fallback is not None:
159
+ fetch_policy["disable_cache_fallback"] = self.disable_cache_fallback
160
+
161
+ if fetch_policy:
162
+ search_params["fetch_policy"] = fetch_policy
163
+
164
+ search_result = self.parallel_client.beta.search(**search_params)
165
+
166
+ # Use model_dump() if available, otherwise convert to dict
167
+ try:
168
+ if hasattr(search_result, "model_dump"):
169
+ return json.dumps(search_result.model_dump(), cls=CustomJSONEncoder)
170
+ except Exception:
171
+ pass
172
+
173
+ # Manually format the results
174
+ formatted_results: Dict[str, Any] = {
175
+ "search_id": getattr(search_result, "search_id", ""),
176
+ "results": [],
177
+ }
178
+
179
+ if hasattr(search_result, "results") and search_result.results:
180
+ results_list: List[Dict[str, Any]] = []
181
+ for result in search_result.results:
182
+ formatted_result: Dict[str, Any] = {
183
+ "title": getattr(result, "title", ""),
184
+ "url": getattr(result, "url", ""),
185
+ "publish_date": getattr(result, "publish_date", ""),
186
+ "excerpt": getattr(result, "excerpt", ""),
187
+ }
188
+ results_list.append(formatted_result)
189
+ formatted_results["results"] = results_list
190
+
191
+ if hasattr(search_result, "warnings"):
192
+ formatted_results["warnings"] = search_result.warnings
193
+
194
+ if hasattr(search_result, "usage"):
195
+ formatted_results["usage"] = search_result.usage
196
+
197
+ return json.dumps(formatted_results, cls=CustomJSONEncoder, indent=2)
198
+
199
+ except Exception as e:
200
+ log_error(f"Error searching Parallel for objective '{objective}': {e}")
201
+ return json.dumps({"error": f"Search failed: {str(e)}"}, indent=2)
202
+
203
+ def parallel_extract(
204
+ self,
205
+ urls: List[str],
206
+ objective: Optional[str] = None,
207
+ search_queries: Optional[List[str]] = None,
208
+ excerpts: bool = True,
209
+ max_chars_per_excerpt: Optional[int] = None,
210
+ full_content: bool = False,
211
+ max_chars_for_full_content: Optional[int] = None,
212
+ ) -> str:
213
+ """Use this function to extract content from specific URLs using Parallel's Extract API.
214
+
215
+ Args:
216
+ urls (List[str]): List of public URLs to extract content from.
217
+ objective (Optional[str]): Search focus to guide content extraction.
218
+ search_queries (Optional[List[str]]): Keywords for targeting relevant content.
219
+ excerpts (bool): Include relevant text snippets.
220
+ max_chars_per_excerpt (Optional[int]): Upper bound on total characters per url. Only used when excerpts is True.
221
+ full_content (bool): Include complete page text.
222
+ max_chars_for_full_content (Optional[int]): Limit on characters per url. Only used when full_content is True.
223
+
224
+ Returns:
225
+ str: A JSON formatted string containing extracted content with titles, publish dates, excerpts and/or full content.
226
+ """
227
+ try:
228
+ if not urls:
229
+ return json.dumps({"error": "Please provide at least one URL to extract"}, indent=2)
230
+
231
+ extract_params: Dict[str, Any] = {
232
+ "urls": urls,
233
+ }
234
+
235
+ # Add objective if provided
236
+ if objective:
237
+ extract_params["objective"] = objective
238
+
239
+ # Add search_queries if provided
240
+ if search_queries:
241
+ extract_params["search_queries"] = search_queries
242
+
243
+ # Add excerpts configuration
244
+ if excerpts and max_chars_per_excerpt is not None:
245
+ extract_params["excerpts"] = {"max_chars_per_result": max_chars_per_excerpt}
246
+ else:
247
+ extract_params["excerpts"] = excerpts
248
+
249
+ # Add full_content configuration
250
+ if full_content and max_chars_for_full_content is not None:
251
+ extract_params["full_content"] = {"max_chars_per_result": max_chars_for_full_content}
252
+ else:
253
+ extract_params["full_content"] = full_content
254
+
255
+ # Add fetch_policy from constructor defaults
256
+ fetch_policy: Dict[str, Any] = {}
257
+ if self.max_age_seconds is not None:
258
+ fetch_policy["max_age_seconds"] = self.max_age_seconds
259
+ if self.timeout_seconds is not None:
260
+ fetch_policy["timeout_seconds"] = self.timeout_seconds
261
+ if self.disable_cache_fallback is not None:
262
+ fetch_policy["disable_cache_fallback"] = self.disable_cache_fallback
263
+
264
+ if fetch_policy:
265
+ extract_params["fetch_policy"] = fetch_policy
266
+
267
+ extract_result = self.parallel_client.beta.extract(**extract_params)
268
+
269
+ # Use model_dump() if available, otherwise convert to dict
270
+ try:
271
+ if hasattr(extract_result, "model_dump"):
272
+ return json.dumps(extract_result.model_dump(), cls=CustomJSONEncoder)
273
+ except Exception:
274
+ pass
275
+
276
+ # Manually format the results
277
+ formatted_results: Dict[str, Any] = {
278
+ "extract_id": getattr(extract_result, "extract_id", ""),
279
+ "results": [],
280
+ "errors": [],
281
+ }
282
+
283
+ if hasattr(extract_result, "results") and extract_result.results:
284
+ results_list: List[Dict[str, Any]] = []
285
+ for result in extract_result.results:
286
+ formatted_result: Dict[str, Any] = {
287
+ "url": getattr(result, "url", ""),
288
+ "title": getattr(result, "title", ""),
289
+ "publish_date": getattr(result, "publish_date", ""),
290
+ }
291
+
292
+ if excerpts and hasattr(result, "excerpts"):
293
+ formatted_result["excerpts"] = result.excerpts
294
+
295
+ if full_content and hasattr(result, "full_content"):
296
+ formatted_result["full_content"] = result.full_content
297
+
298
+ results_list.append(formatted_result)
299
+ formatted_results["results"] = results_list
300
+
301
+ if hasattr(extract_result, "errors") and extract_result.errors:
302
+ formatted_results["errors"] = extract_result.errors
303
+
304
+ if hasattr(extract_result, "warnings"):
305
+ formatted_results["warnings"] = extract_result.warnings
306
+
307
+ if hasattr(extract_result, "usage"):
308
+ formatted_results["usage"] = extract_result.usage
309
+
310
+ return json.dumps(formatted_results, cls=CustomJSONEncoder, indent=2)
311
+
312
+ except Exception as e:
313
+ log_error(f"Error extracting from Parallel: {e}")
314
+ return json.dumps({"error": f"Extract failed: {str(e)}"}, indent=2)
@@ -68,6 +68,8 @@ def _format_image_for_message(image: Image) -> Optional[Dict[str, Any]]:
68
68
  }
69
69
 
70
70
  try:
71
+ img_type = None
72
+
71
73
  # Case 0: Image is an Anthropic uploaded file
72
74
  if image.content is not None and hasattr(image.content, "id"):
73
75
  content_bytes = image.content
@@ -80,7 +82,6 @@ def _format_image_for_message(image: Image) -> Optional[Dict[str, Any]]:
80
82
  import os
81
83
  from urllib.parse import urlparse
82
84
 
83
- img_type = None
84
85
  if image.url:
85
86
  parsed_url = urlparse(image.url)
86
87
  _, ext = os.path.splitext(parsed_url.path)
agno/workflow/agent.py CHANGED
@@ -268,7 +268,7 @@ Guidelines:
268
268
  user_id=session_from_db.user_id,
269
269
  execution_input=workflow_execution_input,
270
270
  workflow_run_response=workflow_run_response,
271
- session_state=run_context.session_state,
271
+ run_context=run_context,
272
272
  stream_events=True,
273
273
  websocket_handler=websocket_handler,
274
274
  ):
@@ -286,7 +286,7 @@ Guidelines:
286
286
  user_id=session_from_db.user_id,
287
287
  execution_input=workflow_execution_input,
288
288
  workflow_run_response=workflow_run_response,
289
- session_state=run_context.session_state,
289
+ run_context=run_context,
290
290
  )
291
291
 
292
292
  if isinstance(result.content, str):
@@ -4,6 +4,7 @@ from typing import Any, AsyncIterator, Awaitable, Callable, Dict, Iterator, List
4
4
  from uuid import uuid4
5
5
 
6
6
  from agno.run.agent import RunOutputEvent
7
+ from agno.run.base import RunContext
7
8
  from agno.run.team import TeamRunOutputEvent
8
9
  from agno.run.workflow import (
9
10
  ConditionExecutionCompletedEvent,
@@ -175,6 +176,7 @@ class Condition:
175
176
  user_id: Optional[str] = None,
176
177
  workflow_run_response: Optional[WorkflowRunOutput] = None,
177
178
  store_executor_outputs: bool = True,
179
+ run_context: Optional[RunContext] = None,
178
180
  session_state: Optional[Dict[str, Any]] = None,
179
181
  workflow_session: Optional[WorkflowSession] = None,
180
182
  add_workflow_history_to_steps: Optional[bool] = False,
@@ -188,7 +190,11 @@ class Condition:
188
190
  self._prepare_steps()
189
191
 
190
192
  # Evaluate the condition
191
- condition_result = self._evaluate_condition(step_input, session_state)
193
+ if run_context is not None and run_context.session_state is not None:
194
+ condition_result = self._evaluate_condition(step_input, session_state=run_context.session_state)
195
+ else:
196
+ condition_result = self._evaluate_condition(step_input, session_state=session_state)
197
+
192
198
  log_debug(f"Condition {self.name} evaluated to: {condition_result}")
193
199
 
194
200
  if not condition_result:
@@ -214,6 +220,7 @@ class Condition:
214
220
  user_id=user_id,
215
221
  workflow_run_response=workflow_run_response,
216
222
  store_executor_outputs=store_executor_outputs,
223
+ run_context=run_context,
217
224
  session_state=session_state,
218
225
  workflow_session=workflow_session,
219
226
  add_workflow_history_to_steps=add_workflow_history_to_steps,
@@ -284,6 +291,7 @@ class Condition:
284
291
  workflow_run_response: Optional[WorkflowRunOutput] = None,
285
292
  step_index: Optional[Union[int, tuple]] = None,
286
293
  store_executor_outputs: bool = True,
294
+ run_context: Optional[RunContext] = None,
287
295
  session_state: Optional[Dict[str, Any]] = None,
288
296
  parent_step_id: Optional[str] = None,
289
297
  workflow_session: Optional[WorkflowSession] = None,
@@ -298,7 +306,10 @@ class Condition:
298
306
  self._prepare_steps()
299
307
 
300
308
  # Evaluate the condition
301
- condition_result = self._evaluate_condition(step_input, session_state)
309
+ if run_context is not None and run_context.session_state is not None:
310
+ condition_result = self._evaluate_condition(step_input, session_state=run_context.session_state)
311
+ else:
312
+ condition_result = self._evaluate_condition(step_input, session_state=session_state)
302
313
  log_debug(f"Condition {self.name} evaluated to: {condition_result}")
303
314
 
304
315
  # Considering both stream_events and stream_intermediate_steps (deprecated)
@@ -363,6 +374,7 @@ class Condition:
363
374
  workflow_run_response=workflow_run_response,
364
375
  step_index=child_step_index,
365
376
  store_executor_outputs=store_executor_outputs,
377
+ run_context=run_context,
366
378
  session_state=session_state,
367
379
  parent_step_id=conditional_step_id,
368
380
  workflow_session=workflow_session,
@@ -447,6 +459,7 @@ class Condition:
447
459
  user_id: Optional[str] = None,
448
460
  workflow_run_response: Optional[WorkflowRunOutput] = None,
449
461
  store_executor_outputs: bool = True,
462
+ run_context: Optional[RunContext] = None,
450
463
  session_state: Optional[Dict[str, Any]] = None,
451
464
  workflow_session: Optional[WorkflowSession] = None,
452
465
  add_workflow_history_to_steps: Optional[bool] = False,
@@ -460,7 +473,10 @@ class Condition:
460
473
  self._prepare_steps()
461
474
 
462
475
  # Evaluate the condition
463
- condition_result = await self._aevaluate_condition(step_input, session_state)
476
+ if run_context is not None and run_context.session_state is not None:
477
+ condition_result = await self._aevaluate_condition(step_input, session_state=run_context.session_state)
478
+ else:
479
+ condition_result = await self._aevaluate_condition(step_input, session_state=session_state)
464
480
  log_debug(f"Condition {self.name} evaluated to: {condition_result}")
465
481
 
466
482
  if not condition_result:
@@ -488,6 +504,7 @@ class Condition:
488
504
  user_id=user_id,
489
505
  workflow_run_response=workflow_run_response,
490
506
  store_executor_outputs=store_executor_outputs,
507
+ run_context=run_context,
491
508
  session_state=session_state,
492
509
  workflow_session=workflow_session,
493
510
  add_workflow_history_to_steps=add_workflow_history_to_steps,
@@ -556,6 +573,7 @@ class Condition:
556
573
  workflow_run_response: Optional[WorkflowRunOutput] = None,
557
574
  step_index: Optional[Union[int, tuple]] = None,
558
575
  store_executor_outputs: bool = True,
576
+ run_context: Optional[RunContext] = None,
559
577
  session_state: Optional[Dict[str, Any]] = None,
560
578
  parent_step_id: Optional[str] = None,
561
579
  workflow_session: Optional[WorkflowSession] = None,
@@ -570,7 +588,10 @@ class Condition:
570
588
  self._prepare_steps()
571
589
 
572
590
  # Evaluate the condition
573
- condition_result = await self._aevaluate_condition(step_input, session_state)
591
+ if run_context is not None and run_context.session_state is not None:
592
+ condition_result = await self._aevaluate_condition(step_input, session_state=run_context.session_state)
593
+ else:
594
+ condition_result = await self._aevaluate_condition(step_input, session_state=session_state)
574
595
  log_debug(f"Condition {self.name} evaluated to: {condition_result}")
575
596
 
576
597
  # Considering both stream_events and stream_intermediate_steps (deprecated)
@@ -637,6 +658,7 @@ class Condition:
637
658
  workflow_run_response=workflow_run_response,
638
659
  step_index=child_step_index,
639
660
  store_executor_outputs=store_executor_outputs,
661
+ run_context=run_context,
640
662
  session_state=session_state,
641
663
  parent_step_id=conditional_step_id,
642
664
  workflow_session=workflow_session,
agno/workflow/loop.py CHANGED
@@ -4,6 +4,7 @@ from typing import Any, AsyncIterator, Awaitable, Callable, Dict, Iterator, List
4
4
  from uuid import uuid4
5
5
 
6
6
  from agno.run.agent import RunOutputEvent
7
+ from agno.run.base import RunContext
7
8
  from agno.run.team import TeamRunOutputEvent
8
9
  from agno.run.workflow import (
9
10
  LoopExecutionCompletedEvent,
@@ -132,6 +133,7 @@ class Loop:
132
133
  user_id: Optional[str] = None,
133
134
  workflow_run_response: Optional[WorkflowRunOutput] = None,
134
135
  store_executor_outputs: bool = True,
136
+ run_context: Optional[RunContext] = None,
135
137
  session_state: Optional[Dict[str, Any]] = None,
136
138
  workflow_session: Optional[WorkflowSession] = None,
137
139
  add_workflow_history_to_steps: Optional[bool] = False,
@@ -160,6 +162,7 @@ class Loop:
160
162
  user_id=user_id,
161
163
  workflow_run_response=workflow_run_response,
162
164
  store_executor_outputs=store_executor_outputs,
165
+ run_context=run_context,
163
166
  session_state=session_state,
164
167
  workflow_session=workflow_session,
165
168
  add_workflow_history_to_steps=add_workflow_history_to_steps,
@@ -232,6 +235,7 @@ class Loop:
232
235
  workflow_run_response: Optional[WorkflowRunOutput] = None,
233
236
  step_index: Optional[Union[int, tuple]] = None,
234
237
  store_executor_outputs: bool = True,
238
+ run_context: Optional[RunContext] = None,
235
239
  session_state: Optional[Dict[str, Any]] = None,
236
240
  parent_step_id: Optional[str] = None,
237
241
  workflow_session: Optional[WorkflowSession] = None,
@@ -311,6 +315,7 @@ class Loop:
311
315
  workflow_run_response=workflow_run_response,
312
316
  step_index=composite_step_index,
313
317
  store_executor_outputs=store_executor_outputs,
318
+ run_context=run_context,
314
319
  session_state=session_state,
315
320
  parent_step_id=loop_step_id,
316
321
  add_workflow_history_to_steps=add_workflow_history_to_steps,
@@ -428,6 +433,7 @@ class Loop:
428
433
  user_id: Optional[str] = None,
429
434
  workflow_run_response: Optional[WorkflowRunOutput] = None,
430
435
  store_executor_outputs: bool = True,
436
+ run_context: Optional[RunContext] = None,
431
437
  session_state: Optional[Dict[str, Any]] = None,
432
438
  workflow_session: Optional[WorkflowSession] = None,
433
439
  add_workflow_history_to_steps: Optional[bool] = False,
@@ -458,6 +464,7 @@ class Loop:
458
464
  user_id=user_id,
459
465
  workflow_run_response=workflow_run_response,
460
466
  store_executor_outputs=store_executor_outputs,
467
+ run_context=run_context,
461
468
  session_state=session_state,
462
469
  workflow_session=workflow_session,
463
470
  add_workflow_history_to_steps=add_workflow_history_to_steps,
@@ -533,6 +540,7 @@ class Loop:
533
540
  workflow_run_response: Optional[WorkflowRunOutput] = None,
534
541
  step_index: Optional[Union[int, tuple]] = None,
535
542
  store_executor_outputs: bool = True,
543
+ run_context: Optional[RunContext] = None,
536
544
  session_state: Optional[Dict[str, Any]] = None,
537
545
  parent_step_id: Optional[str] = None,
538
546
  workflow_session: Optional[WorkflowSession] = None,
@@ -612,6 +620,7 @@ class Loop:
612
620
  workflow_run_response=workflow_run_response,
613
621
  step_index=composite_step_index,
614
622
  store_executor_outputs=store_executor_outputs,
623
+ run_context=run_context,
615
624
  session_state=session_state,
616
625
  parent_step_id=loop_step_id,
617
626
  workflow_session=workflow_session,