agentex-sdk 0.4.12__py3-none-any.whl → 0.4.14__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
agentex/_constants.py CHANGED
@@ -6,9 +6,9 @@ RAW_RESPONSE_HEADER = "X-Stainless-Raw-Response"
6
6
  OVERRIDE_CAST_TO_HEADER = "____stainless_override_cast_to"
7
7
 
8
8
  # default timeout is 1 minute
9
- DEFAULT_TIMEOUT = httpx.Timeout(timeout=60, connect=5.0)
10
- DEFAULT_MAX_RETRIES = 2
11
- DEFAULT_CONNECTION_LIMITS = httpx.Limits(max_connections=100, max_keepalive_connections=20)
9
+ DEFAULT_TIMEOUT = httpx.Timeout(timeout=300, connect=5.0)
10
+ DEFAULT_MAX_RETRIES = 0
11
+ DEFAULT_CONNECTION_LIMITS = httpx.Limits(max_connections=1000, max_keepalive_connections=1000)
12
12
 
13
13
  INITIAL_RETRY_DELAY = 0.5
14
14
  MAX_RETRY_DELAY = 8.0
agentex/_version.py CHANGED
@@ -1,4 +1,4 @@
1
1
  # File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
2
2
 
3
3
  __title__ = "agentex"
4
- __version__ = "0.4.12" # x-release-please-version
4
+ __version__ = "0.4.14" # x-release-please-version
@@ -59,6 +59,7 @@ class ACPModule:
59
59
  start_to_close_timeout: timedelta = timedelta(seconds=5),
60
60
  heartbeat_timeout: timedelta = timedelta(seconds=5),
61
61
  retry_policy: RetryPolicy = DEFAULT_RETRY_POLICY,
62
+ request: dict[str, Any] | None = None,
62
63
  ) -> Task:
63
64
  """
64
65
  Create a new task.
@@ -71,6 +72,7 @@ class ACPModule:
71
72
  start_to_close_timeout: The start to close timeout for the task.
72
73
  heartbeat_timeout: The heartbeat timeout for the task.
73
74
  retry_policy: The retry policy for the task.
75
+ request: Additional request context including headers to forward to the agent.
74
76
 
75
77
  Returns:
76
78
  The task entry.
@@ -85,6 +87,7 @@ class ACPModule:
85
87
  params=params,
86
88
  trace_id=trace_id,
87
89
  parent_span_id=parent_span_id,
90
+ request=request,
88
91
  ),
89
92
  response_type=Task,
90
93
  start_to_close_timeout=start_to_close_timeout,
@@ -99,6 +102,7 @@ class ACPModule:
99
102
  params=params,
100
103
  trace_id=trace_id,
101
104
  parent_span_id=parent_span_id,
105
+ request=request,
102
106
  )
103
107
 
104
108
  async def send_event(
@@ -112,15 +116,22 @@ class ACPModule:
112
116
  start_to_close_timeout: timedelta = timedelta(seconds=5),
113
117
  heartbeat_timeout: timedelta = timedelta(seconds=5),
114
118
  retry_policy: RetryPolicy = DEFAULT_RETRY_POLICY,
119
+ request: dict[str, Any] | None = None,
115
120
  ) -> Event:
116
121
  """
117
122
  Send an event to a task.
118
123
 
119
124
  Args:
120
125
  task_id: The ID of the task to send the event to.
121
- data: The data to send to the event.
126
+ content: The content to send to the event.
122
127
  agent_id: The ID of the agent to send the event to.
123
128
  agent_name: The name of the agent to send the event to.
129
+ trace_id: The trace ID for the event.
130
+ parent_span_id: The parent span ID for the event.
131
+ start_to_close_timeout: The start to close timeout for the event.
132
+ heartbeat_timeout: The heartbeat timeout for the event.
133
+ retry_policy: The retry policy for the event.
134
+ request: Additional request context including headers to forward to the agent.
124
135
 
125
136
  Returns:
126
137
  The event entry.
@@ -135,6 +146,7 @@ class ACPModule:
135
146
  content=content,
136
147
  trace_id=trace_id,
137
148
  parent_span_id=parent_span_id,
149
+ request=request,
138
150
  ),
139
151
  response_type=None,
140
152
  start_to_close_timeout=start_to_close_timeout,
@@ -149,6 +161,7 @@ class ACPModule:
149
161
  content=content,
150
162
  trace_id=trace_id,
151
163
  parent_span_id=parent_span_id,
164
+ request=request,
152
165
  )
153
166
 
154
167
  async def send_message(
@@ -162,15 +175,22 @@ class ACPModule:
162
175
  start_to_close_timeout: timedelta = timedelta(seconds=5),
163
176
  heartbeat_timeout: timedelta = timedelta(seconds=5),
164
177
  retry_policy: RetryPolicy = DEFAULT_RETRY_POLICY,
178
+ request: dict[str, Any] | None = None,
165
179
  ) -> List[TaskMessage]:
166
180
  """
167
181
  Send a message to a task.
168
182
 
169
183
  Args:
170
- task_id: The ID of the task to send the message to.
171
184
  content: The task message content to send to the task.
185
+ task_id: The ID of the task to send the message to.
172
186
  agent_id: The ID of the agent to send the message to.
173
187
  agent_name: The name of the agent to send the message to.
188
+ trace_id: The trace ID for the message.
189
+ parent_span_id: The parent span ID for the message.
190
+ start_to_close_timeout: The start to close timeout for the message.
191
+ heartbeat_timeout: The heartbeat timeout for the message.
192
+ retry_policy: The retry policy for the message.
193
+ request: Additional request context including headers to forward to the agent.
174
194
 
175
195
  Returns:
176
196
  The message entry.
@@ -185,6 +205,7 @@ class ACPModule:
185
205
  content=content,
186
206
  trace_id=trace_id,
187
207
  parent_span_id=parent_span_id,
208
+ request=request,
188
209
  ),
189
210
  response_type=TaskMessage,
190
211
  start_to_close_timeout=start_to_close_timeout,
@@ -199,6 +220,7 @@ class ACPModule:
199
220
  content=content,
200
221
  trace_id=trace_id,
201
222
  parent_span_id=parent_span_id,
223
+ request=request,
202
224
  )
203
225
 
204
226
  async def cancel_task(
@@ -212,6 +234,7 @@ class ACPModule:
212
234
  start_to_close_timeout: timedelta = timedelta(seconds=5),
213
235
  heartbeat_timeout: timedelta = timedelta(seconds=5),
214
236
  retry_policy: RetryPolicy = DEFAULT_RETRY_POLICY,
237
+ request: dict[str, Any] | None = None,
215
238
  ) -> Task:
216
239
  """
217
240
  Cancel a task by sending cancel request to the agent that owns the task.
@@ -226,6 +249,7 @@ class ACPModule:
226
249
  start_to_close_timeout: The start to close timeout for the task.
227
250
  heartbeat_timeout: The heartbeat timeout for the task.
228
251
  retry_policy: The retry policy for the task.
252
+ request: Additional request context including headers to forward to the agent.
229
253
 
230
254
  Returns:
231
255
  The task entry.
@@ -244,6 +268,7 @@ class ACPModule:
244
268
  agent_name=agent_name,
245
269
  trace_id=trace_id,
246
270
  parent_span_id=parent_span_id,
271
+ request=request,
247
272
  ),
248
273
  response_type=None,
249
274
  start_to_close_timeout=start_to_close_timeout,
@@ -258,4 +283,5 @@ class ACPModule:
258
283
  agent_name=agent_name,
259
284
  trace_id=trace_id,
260
285
  parent_span_id=parent_span_id,
286
+ request=request,
261
287
  )
@@ -81,14 +81,13 @@ class OpenAIModule:
81
81
  tools: list[Tool] | None = None,
82
82
  output_type: type[Any] | AgentOutputSchemaBase | None = None,
83
83
  tool_use_behavior: (
84
- Literal["run_llm_again", "stop_on_first_tool"]
85
- | StopAtTools
86
- | ToolsToFinalOutputFunction
84
+ Literal["run_llm_again", "stop_on_first_tool"] | StopAtTools | ToolsToFinalOutputFunction
87
85
  ) = "run_llm_again",
88
86
  mcp_timeout_seconds: int | None = None,
89
87
  input_guardrails: list[InputGuardrail] | None = None,
90
88
  output_guardrails: list[OutputGuardrail] | None = None,
91
89
  max_turns: int | None = None,
90
+ previous_response_id: str | None = None,
92
91
  ) -> SerializableRunResult | RunResult:
93
92
  """
94
93
  Run an agent without streaming or TaskMessage creation.
@@ -116,6 +115,7 @@ class OpenAIModule:
116
115
  input_guardrails: Optional list of input guardrails to run on initial user input.
117
116
  output_guardrails: Optional list of output guardrails to run on final agent output.
118
117
  max_turns: Maximum number of turns the agent can take. Uses Runner's default if None.
118
+ previous_response_id: Optional previous response ID for conversation continuity.
119
119
 
120
120
  Returns:
121
121
  Union[SerializableRunResult, RunResult]: SerializableRunResult when in Temporal, RunResult otherwise.
@@ -139,6 +139,7 @@ class OpenAIModule:
139
139
  input_guardrails=input_guardrails,
140
140
  output_guardrails=output_guardrails,
141
141
  max_turns=max_turns,
142
+ previous_response_id=previous_response_id,
142
143
  )
143
144
  return await ActivityHelpers.execute_activity(
144
145
  activity_name=OpenAIActivityName.RUN_AGENT,
@@ -167,6 +168,7 @@ class OpenAIModule:
167
168
  input_guardrails=input_guardrails,
168
169
  output_guardrails=output_guardrails,
169
170
  max_turns=max_turns,
171
+ previous_response_id=previous_response_id,
170
172
  )
171
173
 
172
174
  async def run_agent_auto_send(
@@ -188,14 +190,13 @@ class OpenAIModule:
188
190
  tools: list[Tool] | None = None,
189
191
  output_type: type[Any] | AgentOutputSchemaBase | None = None,
190
192
  tool_use_behavior: (
191
- Literal["run_llm_again", "stop_on_first_tool"]
192
- | StopAtTools
193
- | ToolsToFinalOutputFunction
193
+ Literal["run_llm_again", "stop_on_first_tool"] | StopAtTools | ToolsToFinalOutputFunction
194
194
  ) = "run_llm_again",
195
195
  mcp_timeout_seconds: int | None = None,
196
196
  input_guardrails: list[InputGuardrail] | None = None,
197
197
  output_guardrails: list[OutputGuardrail] | None = None,
198
198
  max_turns: int | None = None,
199
+ previous_response_id: str | None = None,
199
200
  ) -> SerializableRunResult | RunResult:
200
201
  """
201
202
  Run an agent with automatic TaskMessage creation.
@@ -222,6 +223,7 @@ class OpenAIModule:
222
223
  input_guardrails: Optional list of input guardrails to run on initial user input.
223
224
  output_guardrails: Optional list of output guardrails to run on final agent output.
224
225
  max_turns: Maximum number of turns the agent can take. Uses Runner's default if None.
226
+ previous_response_id: Optional previous response ID for conversation continuity.
225
227
 
226
228
  Returns:
227
229
  Union[SerializableRunResult, RunResult]: SerializableRunResult when in Temporal, RunResult otherwise.
@@ -246,6 +248,7 @@ class OpenAIModule:
246
248
  input_guardrails=input_guardrails,
247
249
  output_guardrails=output_guardrails,
248
250
  max_turns=max_turns,
251
+ previous_response_id=previous_response_id,
249
252
  )
250
253
  return await ActivityHelpers.execute_activity(
251
254
  activity_name=OpenAIActivityName.RUN_AGENT_AUTO_SEND,
@@ -275,6 +278,7 @@ class OpenAIModule:
275
278
  input_guardrails=input_guardrails,
276
279
  output_guardrails=output_guardrails,
277
280
  max_turns=max_turns,
281
+ previous_response_id=previous_response_id,
278
282
  )
279
283
 
280
284
  async def run_agent_streamed(
@@ -292,14 +296,13 @@ class OpenAIModule:
292
296
  tools: list[Tool] | None = None,
293
297
  output_type: type[Any] | AgentOutputSchemaBase | None = None,
294
298
  tool_use_behavior: (
295
- Literal["run_llm_again", "stop_on_first_tool"]
296
- | StopAtTools
297
- | ToolsToFinalOutputFunction
299
+ Literal["run_llm_again", "stop_on_first_tool"] | StopAtTools | ToolsToFinalOutputFunction
298
300
  ) = "run_llm_again",
299
301
  mcp_timeout_seconds: int | None = None,
300
302
  input_guardrails: list[InputGuardrail] | None = None,
301
303
  output_guardrails: list[OutputGuardrail] | None = None,
302
304
  max_turns: int | None = None,
305
+ previous_response_id: str | None = None,
303
306
  ) -> RunResultStreaming:
304
307
  """
305
308
  Run an agent with streaming enabled but no TaskMessage creation.
@@ -330,6 +333,7 @@ class OpenAIModule:
330
333
  input_guardrails: Optional list of input guardrails to run on initial user input.
331
334
  output_guardrails: Optional list of output guardrails to run on final agent output.
332
335
  max_turns: Maximum number of turns the agent can take. Uses Runner's default if None.
336
+ previous_response_id: Optional previous response ID for conversation continuity.
333
337
 
334
338
  Returns:
335
339
  RunResultStreaming: The result of the agent run with streaming.
@@ -363,6 +367,7 @@ class OpenAIModule:
363
367
  input_guardrails=input_guardrails,
364
368
  output_guardrails=output_guardrails,
365
369
  max_turns=max_turns,
370
+ previous_response_id=previous_response_id,
366
371
  )
367
372
 
368
373
  async def run_agent_streamed_auto_send(
@@ -384,14 +389,13 @@ class OpenAIModule:
384
389
  tools: list[Tool] | None = None,
385
390
  output_type: type[Any] | AgentOutputSchemaBase | None = None,
386
391
  tool_use_behavior: (
387
- Literal["run_llm_again", "stop_on_first_tool"]
388
- | StopAtTools
389
- | ToolsToFinalOutputFunction
392
+ Literal["run_llm_again", "stop_on_first_tool"] | StopAtTools | ToolsToFinalOutputFunction
390
393
  ) = "run_llm_again",
391
394
  mcp_timeout_seconds: int | None = None,
392
395
  input_guardrails: list[InputGuardrail] | None = None,
393
396
  output_guardrails: list[OutputGuardrail] | None = None,
394
397
  max_turns: int | None = None,
398
+ previous_response_id: str | None = None,
395
399
  ) -> SerializableRunResultStreaming | RunResultStreaming:
396
400
  """
397
401
  Run an agent with streaming enabled and automatic TaskMessage creation.
@@ -418,6 +422,7 @@ class OpenAIModule:
418
422
  tool_use_behavior: Optional tool use behavior.
419
423
  mcp_timeout_seconds: Optional param to set the timeout threshold for the MCP servers. Defaults to 5 seconds.
420
424
  max_turns: Maximum number of turns the agent can take. Uses Runner's default if None.
425
+ previous_response_id: Optional previous response ID for conversation continuity.
421
426
 
422
427
  Returns:
423
428
  Union[SerializableRunResultStreaming, RunResultStreaming]: SerializableRunResultStreaming when in Temporal, RunResultStreaming otherwise.
@@ -441,7 +446,7 @@ class OpenAIModule:
441
446
  mcp_timeout_seconds=mcp_timeout_seconds,
442
447
  input_guardrails=input_guardrails,
443
448
  output_guardrails=output_guardrails,
444
- max_turns=max_turns
449
+ max_turns=max_turns,
445
450
  )
446
451
  return await ActivityHelpers.execute_activity(
447
452
  activity_name=OpenAIActivityName.RUN_AGENT_STREAMED_AUTO_SEND,
@@ -471,4 +476,5 @@ class OpenAIModule:
471
476
  input_guardrails=input_guardrails,
472
477
  output_guardrails=output_guardrails,
473
478
  max_turns=max_turns,
479
+ previous_response_id=previous_response_id,
474
480
  )
@@ -22,7 +22,7 @@ logger = make_logger(__name__)
22
22
  console = Console()
23
23
 
24
24
  TEMPORAL_WORKER_KEY = "temporal-worker"
25
- AGENTEX_AGENTS_HELM_CHART_VERSION = "0.1.6-v1-beta"
25
+ AGENTEX_AGENTS_HELM_CHART_VERSION = "0.1.7-v2-beta"
26
26
 
27
27
 
28
28
  class InputDeployOverrides(BaseModel):
@@ -9,6 +9,10 @@ from agentex.types.task import Task
9
9
  from agentex.types.task_message import TaskMessage
10
10
  from agentex.types.task_message_content import TaskMessageContent
11
11
  from agentex.types.task_message_content_param import TaskMessageContentParam
12
+ from agentex.types.agent_rpc_params import (
13
+ ParamsCancelTaskRequest as RpcParamsCancelTaskRequest,
14
+ ParamsSendEventRequest as RpcParamsSendEventRequest,
15
+ )
12
16
 
13
17
  logger = make_logger(__name__)
14
18
 
@@ -30,6 +34,7 @@ class ACPService:
30
34
  params: dict[str, Any] | None = None,
31
35
  trace_id: str | None = None,
32
36
  parent_span_id: str | None = None,
37
+ request: dict[str, Any] | None = None,
33
38
  ) -> Task:
34
39
  trace = self._tracer.trace(trace_id=trace_id)
35
40
  async with trace.span(
@@ -43,6 +48,10 @@ class ACPService:
43
48
  },
44
49
  ) as span:
45
50
  heartbeat_if_in_workflow("task create")
51
+
52
+ # Extract headers from request; pass-through to agent
53
+ extra_headers = request.get("headers") if request else None
54
+
46
55
  if agent_name:
47
56
  json_rpc_response = await self._agentex_client.agents.rpc_by_name(
48
57
  agent_name=agent_name,
@@ -51,6 +60,7 @@ class ACPService:
51
60
  "name": name,
52
61
  "params": params,
53
62
  },
63
+ extra_headers=extra_headers,
54
64
  )
55
65
  elif agent_id:
56
66
  json_rpc_response = await self._agentex_client.agents.rpc(
@@ -60,6 +70,7 @@ class ACPService:
60
70
  "name": name,
61
71
  "params": params,
62
72
  },
73
+ extra_headers=extra_headers,
63
74
  )
64
75
  else:
65
76
  raise ValueError("Either agent_name or agent_id must be provided")
@@ -78,6 +89,7 @@ class ACPService:
78
89
  task_name: str | None = None,
79
90
  trace_id: str | None = None,
80
91
  parent_span_id: str | None = None,
92
+ request: dict[str, Any] | None = None,
81
93
  ) -> List[TaskMessage]:
82
94
  trace = self._tracer.trace(trace_id=trace_id)
83
95
  async with trace.span(
@@ -92,6 +104,10 @@ class ACPService:
92
104
  },
93
105
  ) as span:
94
106
  heartbeat_if_in_workflow("message send")
107
+
108
+ # Extract headers from request; pass-through to agent
109
+ extra_headers = request.get("headers") if request else None
110
+
95
111
  if agent_name:
96
112
  json_rpc_response = await self._agentex_client.agents.rpc_by_name(
97
113
  agent_name=agent_name,
@@ -101,6 +117,7 @@ class ACPService:
101
117
  "content": cast(TaskMessageContentParam, content.model_dump()),
102
118
  "stream": False,
103
119
  },
120
+ extra_headers=extra_headers,
104
121
  )
105
122
  elif agent_id:
106
123
  json_rpc_response = await self._agentex_client.agents.rpc(
@@ -111,12 +128,13 @@ class ACPService:
111
128
  "content": cast(TaskMessageContentParam, content.model_dump()),
112
129
  "stream": False,
113
130
  },
131
+ extra_headers=extra_headers,
114
132
  )
115
133
  else:
116
134
  raise ValueError("Either agent_name or agent_id must be provided")
117
135
 
118
136
  task_messages: List[TaskMessage] = []
119
- logger.info(f"json_rpc_response: {json_rpc_response}")
137
+ logger.info("json_rpc_response: %s", json_rpc_response)
120
138
  if isinstance(json_rpc_response.result, list):
121
139
  for message in json_rpc_response.result:
122
140
  task_message = TaskMessage.model_validate(message)
@@ -137,6 +155,7 @@ class ACPService:
137
155
  task_name: str | None = None,
138
156
  trace_id: str | None = None,
139
157
  parent_span_id: str | None = None,
158
+ request: dict[str, Any] | None = None,
140
159
  ) -> Event:
141
160
  trace = self._tracer.trace(trace_id=trace_id)
142
161
  async with trace.span(
@@ -146,27 +165,33 @@ class ACPService:
146
165
  "agent_id": agent_id,
147
166
  "agent_name": agent_name,
148
167
  "task_id": task_id,
168
+ "task_name": task_name,
149
169
  "content": content,
150
170
  },
151
171
  ) as span:
152
172
  heartbeat_if_in_workflow("event send")
173
+
174
+ # Extract headers from request; pass-through to agent
175
+ extra_headers = request.get("headers") if request else None
176
+
177
+ rpc_event_params: RpcParamsSendEventRequest = {
178
+ "task_id": task_id,
179
+ "task_name": task_name,
180
+ "content": cast(TaskMessageContentParam, content.model_dump()),
181
+ }
153
182
  if agent_name:
154
183
  json_rpc_response = await self._agentex_client.agents.rpc_by_name(
155
184
  agent_name=agent_name,
156
185
  method="event/send",
157
- params={
158
- "task_id": task_id,
159
- "content": cast(TaskMessageContentParam, content.model_dump()),
160
- },
186
+ params=rpc_event_params,
187
+ extra_headers=extra_headers,
161
188
  )
162
189
  elif agent_id:
163
190
  json_rpc_response = await self._agentex_client.agents.rpc(
164
191
  agent_id=agent_id,
165
192
  method="event/send",
166
- params={
167
- "task_id": task_id,
168
- "content": cast(TaskMessageContentParam, content.model_dump()),
169
- },
193
+ params=rpc_event_params,
194
+ extra_headers=extra_headers,
170
195
  )
171
196
  else:
172
197
  raise ValueError("Either agent_name or agent_id must be provided")
@@ -184,7 +209,27 @@ class ACPService:
184
209
  agent_name: str | None = None,
185
210
  trace_id: str | None = None,
186
211
  parent_span_id: str | None = None,
187
- ) -> Task:
212
+ request: dict[str, Any] | None = None,
213
+ ) -> Task:
214
+ """
215
+ Cancel a task by sending cancel request to the agent that owns the task.
216
+
217
+ Args:
218
+ task_id: ID of the task to cancel (passed to agent in params)
219
+ task_name: Name of the task to cancel (passed to agent in params)
220
+ agent_id: ID of the agent that owns the task
221
+ agent_name: Name of the agent that owns the task
222
+ trace_id: Trace ID for tracing
223
+ parent_span_id: Parent span ID for tracing
224
+ request: Additional request context including headers to forward to the agent
225
+
226
+ Returns:
227
+ Task entry representing the cancelled task
228
+
229
+ Raises:
230
+ ValueError: If neither agent_name nor agent_id is provided,
231
+ or if neither task_name nor task_id is provided
232
+ """
188
233
  # Require agent identification
189
234
  if not agent_name and not agent_id:
190
235
  raise ValueError("Either agent_name or agent_id must be provided to identify the agent that owns the task")
@@ -192,7 +237,6 @@ class ACPService:
192
237
  # Require task identification
193
238
  if not task_name and not task_id:
194
239
  raise ValueError("Either task_name or task_id must be provided to identify the task to cancel")
195
-
196
240
  trace = self._tracer.trace(trace_id=trace_id)
197
241
  async with trace.span(
198
242
  parent_id=parent_span_id,
@@ -206,8 +250,11 @@ class ACPService:
206
250
  ) as span:
207
251
  heartbeat_if_in_workflow("task cancel")
208
252
 
253
+ # Extract headers from request; pass-through to agent
254
+ extra_headers = request.get("headers") if request else None
255
+
209
256
  # Build params for the agent (task identification)
210
- params = {}
257
+ params: RpcParamsCancelTaskRequest = {}
211
258
  if task_id:
212
259
  params["task_id"] = task_id
213
260
  if task_name:
@@ -219,12 +266,15 @@ class ACPService:
219
266
  agent_name=agent_name,
220
267
  method="task/cancel",
221
268
  params=params,
269
+ extra_headers=extra_headers,
222
270
  )
223
271
  else: # agent_id is provided (validated above)
272
+ assert agent_id is not None
224
273
  json_rpc_response = await self._agentex_client.agents.rpc(
225
274
  agent_id=agent_id,
226
275
  method="task/cancel",
227
276
  params=params,
277
+ extra_headers=extra_headers,
228
278
  )
229
279
 
230
280
  task_entry = Task.model_validate(json_rpc_response.result)