amazon-bedrock-haystack 4.0.0__tar.gz → 4.1.0__tar.gz

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (41) hide show
  1. {amazon_bedrock_haystack-4.0.0 → amazon_bedrock_haystack-4.1.0}/CHANGELOG.md +18 -0
  2. {amazon_bedrock_haystack-4.0.0 → amazon_bedrock_haystack-4.1.0}/PKG-INFO +1 -1
  3. {amazon_bedrock_haystack-4.0.0 → amazon_bedrock_haystack-4.1.0}/src/haystack_integrations/components/generators/amazon_bedrock/chat/chat_generator.py +29 -8
  4. {amazon_bedrock_haystack-4.0.0 → amazon_bedrock_haystack-4.1.0}/src/haystack_integrations/components/generators/amazon_bedrock/chat/utils.py +53 -15
  5. {amazon_bedrock_haystack-4.0.0 → amazon_bedrock_haystack-4.1.0}/tests/test_chat_generator.py +46 -6
  6. {amazon_bedrock_haystack-4.0.0 → amazon_bedrock_haystack-4.1.0}/tests/test_chat_generator_utils.py +174 -0
  7. {amazon_bedrock_haystack-4.0.0 → amazon_bedrock_haystack-4.1.0}/tests/test_document_image_embedder.py +1 -1
  8. {amazon_bedrock_haystack-4.0.0 → amazon_bedrock_haystack-4.1.0}/.gitignore +0 -0
  9. {amazon_bedrock_haystack-4.0.0 → amazon_bedrock_haystack-4.1.0}/LICENSE.txt +0 -0
  10. {amazon_bedrock_haystack-4.0.0 → amazon_bedrock_haystack-4.1.0}/README.md +0 -0
  11. {amazon_bedrock_haystack-4.0.0 → amazon_bedrock_haystack-4.1.0}/examples/bedrock_ranker_example.py +0 -0
  12. {amazon_bedrock_haystack-4.0.0 → amazon_bedrock_haystack-4.1.0}/examples/chatgenerator_example.py +0 -0
  13. {amazon_bedrock_haystack-4.0.0 → amazon_bedrock_haystack-4.1.0}/examples/embedders_generator_with_rag_example.py +0 -0
  14. {amazon_bedrock_haystack-4.0.0 → amazon_bedrock_haystack-4.1.0}/pydoc/config.yml +0 -0
  15. {amazon_bedrock_haystack-4.0.0 → amazon_bedrock_haystack-4.1.0}/pyproject.toml +0 -0
  16. {amazon_bedrock_haystack-4.0.0 → amazon_bedrock_haystack-4.1.0}/src/haystack_integrations/common/amazon_bedrock/__init__.py +0 -0
  17. {amazon_bedrock_haystack-4.0.0 → amazon_bedrock_haystack-4.1.0}/src/haystack_integrations/common/amazon_bedrock/errors.py +0 -0
  18. {amazon_bedrock_haystack-4.0.0 → amazon_bedrock_haystack-4.1.0}/src/haystack_integrations/common/amazon_bedrock/utils.py +0 -0
  19. {amazon_bedrock_haystack-4.0.0 → amazon_bedrock_haystack-4.1.0}/src/haystack_integrations/common/py.typed +0 -0
  20. {amazon_bedrock_haystack-4.0.0 → amazon_bedrock_haystack-4.1.0}/src/haystack_integrations/components/embedders/amazon_bedrock/__init__.py +0 -0
  21. {amazon_bedrock_haystack-4.0.0 → amazon_bedrock_haystack-4.1.0}/src/haystack_integrations/components/embedders/amazon_bedrock/document_embedder.py +0 -0
  22. {amazon_bedrock_haystack-4.0.0 → amazon_bedrock_haystack-4.1.0}/src/haystack_integrations/components/embedders/amazon_bedrock/document_image_embedder.py +0 -0
  23. {amazon_bedrock_haystack-4.0.0 → amazon_bedrock_haystack-4.1.0}/src/haystack_integrations/components/embedders/amazon_bedrock/text_embedder.py +0 -0
  24. {amazon_bedrock_haystack-4.0.0 → amazon_bedrock_haystack-4.1.0}/src/haystack_integrations/components/embedders/py.typed +0 -0
  25. {amazon_bedrock_haystack-4.0.0 → amazon_bedrock_haystack-4.1.0}/src/haystack_integrations/components/generators/amazon_bedrock/__init__.py +0 -0
  26. {amazon_bedrock_haystack-4.0.0 → amazon_bedrock_haystack-4.1.0}/src/haystack_integrations/components/generators/amazon_bedrock/adapters.py +0 -0
  27. {amazon_bedrock_haystack-4.0.0 → amazon_bedrock_haystack-4.1.0}/src/haystack_integrations/components/generators/amazon_bedrock/chat/__init__.py +0 -0
  28. {amazon_bedrock_haystack-4.0.0 → amazon_bedrock_haystack-4.1.0}/src/haystack_integrations/components/generators/amazon_bedrock/generator.py +0 -0
  29. {amazon_bedrock_haystack-4.0.0 → amazon_bedrock_haystack-4.1.0}/src/haystack_integrations/components/generators/py.typed +0 -0
  30. {amazon_bedrock_haystack-4.0.0 → amazon_bedrock_haystack-4.1.0}/src/haystack_integrations/components/rankers/amazon_bedrock/__init__.py +0 -0
  31. {amazon_bedrock_haystack-4.0.0 → amazon_bedrock_haystack-4.1.0}/src/haystack_integrations/components/rankers/amazon_bedrock/ranker.py +0 -0
  32. {amazon_bedrock_haystack-4.0.0 → amazon_bedrock_haystack-4.1.0}/src/haystack_integrations/components/rankers/py.typed +0 -0
  33. {amazon_bedrock_haystack-4.0.0 → amazon_bedrock_haystack-4.1.0}/tests/__init__.py +0 -0
  34. {amazon_bedrock_haystack-4.0.0 → amazon_bedrock_haystack-4.1.0}/tests/conftest.py +0 -0
  35. {amazon_bedrock_haystack-4.0.0 → amazon_bedrock_haystack-4.1.0}/tests/test_document_embedder.py +0 -0
  36. {amazon_bedrock_haystack-4.0.0 → amazon_bedrock_haystack-4.1.0}/tests/test_files/apple.jpg +0 -0
  37. {amazon_bedrock_haystack-4.0.0 → amazon_bedrock_haystack-4.1.0}/tests/test_files/haystack-logo.png +0 -0
  38. {amazon_bedrock_haystack-4.0.0 → amazon_bedrock_haystack-4.1.0}/tests/test_files/sample_pdf_1.pdf +0 -0
  39. {amazon_bedrock_haystack-4.0.0 → amazon_bedrock_haystack-4.1.0}/tests/test_generator.py +0 -0
  40. {amazon_bedrock_haystack-4.0.0 → amazon_bedrock_haystack-4.1.0}/tests/test_ranker.py +0 -0
  41. {amazon_bedrock_haystack-4.0.0 → amazon_bedrock_haystack-4.1.0}/tests/test_text_embedder.py +0 -0
@@ -1,11 +1,29 @@
1
1
  # Changelog
2
2
 
3
+ ## [integrations/amazon_bedrock-v4.0.0] - 2025-08-29
4
+
5
+ ### 🚀 Features
6
+
7
+ - [**breaking**] Update AmazonBedrockChatGenerator to use the new fields in `StreamingChunk` (#2216)
8
+ - [**breaking**] Use `ReasoningContent` to store reasoning content in `ChatMessage` instead of `ChatMessage.meta` (#2226)
9
+
10
+
11
+ ### 🧹 Chores
12
+
13
+ - Standardize readmes - part 2 (#2205)
14
+
3
15
  ## [integrations/amazon_bedrock-v3.11.0] - 2025-08-21
4
16
 
5
17
  ### 🚀 Features
6
18
 
7
19
  - Add `AmazonBedrockDocumentImageEmbedder` component (#2185)
8
20
 
21
+ ### 🧹 Chores
22
+
23
+ - Add framework name into UserAgent header for bedrock integration (#2168)
24
+ - Standardize readmes - part 1 (#2202)
25
+
26
+
9
27
  ## [integrations/amazon_bedrock-v3.10.0] - 2025-08-06
10
28
 
11
29
  ### 🚀 Features
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.4
2
2
  Name: amazon-bedrock-haystack
3
- Version: 4.0.0
3
+ Version: 4.1.0
4
4
  Summary: An integration of Amazon Bedrock as an AmazonBedrockGenerator component.
5
5
  Project-URL: Documentation, https://github.com/deepset-ai/haystack-core-integrations/tree/main/integrations/amazon_bedrock#readme
6
6
  Project-URL: Issues, https://github.com/deepset-ai/haystack-core-integrations/issues
@@ -27,6 +27,7 @@ from haystack_integrations.components.generators.amazon_bedrock.chat.utils impor
27
27
  _parse_completion_response,
28
28
  _parse_streaming_response,
29
29
  _parse_streaming_response_async,
30
+ _validate_guardrail_config,
30
31
  )
31
32
 
32
33
  logger = logging.getLogger(__name__)
@@ -154,10 +155,11 @@ class AmazonBedrockChatGenerator:
154
155
  aws_region_name: Optional[Secret] = Secret.from_env_var(["AWS_DEFAULT_REGION"], strict=False), # noqa: B008
155
156
  aws_profile_name: Optional[Secret] = Secret.from_env_var(["AWS_PROFILE"], strict=False), # noqa: B008
156
157
  generation_kwargs: Optional[Dict[str, Any]] = None,
157
- stop_words: Optional[List[str]] = None,
158
158
  streaming_callback: Optional[StreamingCallbackT] = None,
159
159
  boto3_config: Optional[Dict[str, Any]] = None,
160
160
  tools: Optional[Union[List[Tool], Toolset]] = None,
161
+ *,
162
+ guardrail_config: Optional[Dict[str, str]] = None,
161
163
  ) -> None:
162
164
  """
163
165
  Initializes the `AmazonBedrockChatGenerator` with the provided parameters. The parameters are passed to the
@@ -179,10 +181,6 @@ class AmazonBedrockChatGenerator:
179
181
  :param generation_kwargs: Keyword arguments sent to the model. These parameters are specific to a model.
180
182
  You can find the model specific arguments in the AWS Bedrock API
181
183
  [documentation](https://docs.aws.amazon.com/bedrock/latest/userguide/model-parameters.html).
182
- :param stop_words: A list of stop words that stop the model from generating more text
183
- when encountered. You can provide them using this parameter or using the model's `generation_kwargs`
184
- under a model's specific key for stop words.
185
- For example, you can provide stop words for Anthropic Claude in the `stop_sequences` key.
186
184
  :param streaming_callback: A callback function called when a new token is received from the stream.
187
185
  By default, the model is not set up for streaming. To enable streaming, set this parameter to a callback
188
186
  function that handles the streaming chunks. The callback function receives a
@@ -190,6 +188,19 @@ class AmazonBedrockChatGenerator:
190
188
  the streaming mode on.
191
189
  :param boto3_config: The configuration for the boto3 client.
192
190
  :param tools: A list of Tool objects or a Toolset that the model can use. Each tool should have a unique name.
191
+ :param guardrail_config: Optional configuration for a guardrail that has been created in Amazon Bedrock.
192
+ This must be provided as a dictionary matching either
193
+ [GuardrailConfiguration](https://docs.aws.amazon.com/bedrock/latest/APIReference/API_GuardrailConfiguration.html).
194
+ or, in streaming mode (when `streaming_callback` is set),
195
+ [GuardrailStreamConfiguration](https://docs.aws.amazon.com/bedrock/latest/APIReference/API_runtime_GuardrailStreamConfiguration.html).
196
+ If `trace` is set to `enabled`, the guardrail trace will be included under the `trace` key in the `meta`
197
+ attribute of the resulting `ChatMessage`.
198
+ Note: Enabling guardrails in streaming mode may introduce additional latency.
199
+ To manage this, you can adjust the `streamProcessingMode` parameter.
200
+ See the
201
+ [Guardrails Streaming documentation](https://docs.aws.amazon.com/bedrock/latest/userguide/guardrails-streaming.html)
202
+ for more information.
203
+
193
204
 
194
205
  :raises ValueError: If the model name is empty or None.
195
206
  :raises AmazonBedrockConfigurationError: If the AWS environment is not configured correctly or the model is
@@ -204,12 +215,15 @@ class AmazonBedrockChatGenerator:
204
215
  self.aws_session_token = aws_session_token
205
216
  self.aws_region_name = aws_region_name
206
217
  self.aws_profile_name = aws_profile_name
207
- self.stop_words = stop_words or []
208
218
  self.streaming_callback = streaming_callback
209
219
  self.boto3_config = boto3_config
220
+
210
221
  _check_duplicate_tool_names(list(tools or [])) # handles Toolset as well
211
222
  self.tools = tools
212
223
 
224
+ _validate_guardrail_config(guardrail_config=guardrail_config, streaming=streaming_callback is not None)
225
+ self.guardrail_config = guardrail_config
226
+
213
227
  def resolve_secret(secret: Optional[Secret]) -> Optional[str]:
214
228
  return secret.resolve_value() if secret else None
215
229
 
@@ -237,7 +251,6 @@ class AmazonBedrockChatGenerator:
237
251
  raise AmazonBedrockConfigurationError(msg) from exception
238
252
 
239
253
  self.generation_kwargs = generation_kwargs or {}
240
- self.stop_words = stop_words or []
241
254
  self.async_session: Optional[aioboto3.Session] = None
242
255
 
243
256
  def _get_async_session(self) -> aioboto3.Session:
@@ -291,11 +304,11 @@ class AmazonBedrockChatGenerator:
291
304
  aws_region_name=self.aws_region_name.to_dict() if self.aws_region_name else None,
292
305
  aws_profile_name=self.aws_profile_name.to_dict() if self.aws_profile_name else None,
293
306
  model=self.model,
294
- stop_words=self.stop_words,
295
307
  generation_kwargs=self.generation_kwargs,
296
308
  streaming_callback=callback_name,
297
309
  boto3_config=self.boto3_config,
298
310
  tools=serialize_tools_or_toolset(self.tools),
311
+ guardrail_config=self.guardrail_config,
299
312
  )
300
313
 
301
314
  @classmethod
@@ -308,6 +321,12 @@ class AmazonBedrockChatGenerator:
308
321
  Instance of `AmazonBedrockChatGenerator`.
309
322
  """
310
323
  init_params = data.get("init_parameters", {})
324
+
325
+ stop_words = init_params.pop("stop_words", None)
326
+ msg = "stop_words parameter will be ignored. Use the `stopSequences` key in `generation_kwargs` instead."
327
+ if stop_words:
328
+ logger.warning(msg)
329
+
311
330
  serialized_callback_handler = init_params.get("streaming_callback")
312
331
  if serialized_callback_handler:
313
332
  data["init_parameters"]["streaming_callback"] = deserialize_callable(serialized_callback_handler)
@@ -387,6 +406,8 @@ class AmazonBedrockChatGenerator:
387
406
  params["toolConfig"] = tool_config
388
407
  if additional_fields:
389
408
  params["additionalModelRequestFields"] = additional_fields
409
+ if self.guardrail_config:
410
+ params["guardrailConfig"] = self.guardrail_config
390
411
 
391
412
  # overloads that exhaust finite Literals(bool) not treated as exhaustive
392
413
  # see https://github.com/python/mypy/issues/14764
@@ -273,6 +273,7 @@ def _parse_completion_response(response_body: Dict[str, Any], model: str) -> Lis
273
273
  :param model: The model ID used for generation, included in message metadata.
274
274
  :returns: List of ChatMessage objects containing the assistant's response(s) with appropriate metadata.
275
275
  """
276
+
276
277
  replies = []
277
278
  if "output" in response_body and "message" in response_body["output"]:
278
279
  message = response_body["output"]["message"]
@@ -280,7 +281,7 @@ def _parse_completion_response(response_body: Dict[str, Any], model: str) -> Lis
280
281
  content_blocks = message["content"]
281
282
 
282
283
  # Common meta information
283
- base_meta = {
284
+ meta = {
284
285
  "model": model,
285
286
  "index": 0,
286
287
  "finish_reason": FINISH_REASON_MAPPING.get(response_body.get("stopReason", "")),
@@ -291,6 +292,9 @@ def _parse_completion_response(response_body: Dict[str, Any], model: str) -> Lis
291
292
  "total_tokens": response_body.get("usage", {}).get("totalTokens", 0),
292
293
  },
293
294
  }
295
+ # guardrail trace
296
+ if "trace" in response_body:
297
+ meta["trace"] = response_body["trace"]
294
298
 
295
299
  # Process all content blocks and combine them into a single message
296
300
  text_content = []
@@ -329,7 +333,7 @@ def _parse_completion_response(response_body: Dict[str, Any], model: str) -> Lis
329
333
  ChatMessage.from_assistant(
330
334
  " ".join(text_content),
331
335
  tool_calls=tool_calls,
332
- meta=base_meta,
336
+ meta=meta,
333
337
  reasoning=ReasoningContent(
334
338
  reasoning_text=reasoning_text, extra={"reasoning_contents": reasoning_contents}
335
339
  )
@@ -355,6 +359,7 @@ def _convert_event_to_streaming_chunk(
355
359
  :param component_info: ComponentInfo object
356
360
  :returns: StreamingChunk object containing the content and metadata extracted from the event.
357
361
  """
362
+
358
363
  # Initialize an empty StreamingChunk to return if no relevant event is found
359
364
  # (e.g. for messageStart and contentBlockStop)
360
365
  base_meta = {"model": model, "received_at": datetime.now(timezone.utc).isoformat()}
@@ -426,19 +431,23 @@ def _convert_event_to_streaming_chunk(
426
431
  meta=base_meta,
427
432
  )
428
433
 
429
- elif "metadata" in event and "usage" in event["metadata"]:
430
- metadata = event["metadata"]
431
- streaming_chunk = StreamingChunk(
432
- content="",
433
- meta={
434
- **base_meta,
435
- "usage": {
436
- "prompt_tokens": metadata["usage"].get("inputTokens", 0),
437
- "completion_tokens": metadata["usage"].get("outputTokens", 0),
438
- "total_tokens": metadata["usage"].get("totalTokens", 0),
439
- },
440
- },
441
- )
434
+ elif "metadata" in event:
435
+ event_meta = event["metadata"]
436
+ chunk_meta: Dict[str, Any] = {**base_meta}
437
+
438
+ if "usage" in event_meta:
439
+ usage = event_meta["usage"]
440
+ chunk_meta["usage"] = {
441
+ "prompt_tokens": usage.get("inputTokens", 0),
442
+ "completion_tokens": usage.get("outputTokens", 0),
443
+ "total_tokens": usage.get("totalTokens", 0),
444
+ }
445
+ if "trace" in event_meta:
446
+ chunk_meta["trace"] = event_meta["trace"]
447
+
448
+ # Only create chunk if we added usage or trace data
449
+ if len(chunk_meta) > len(base_meta):
450
+ streaming_chunk = StreamingChunk(content="", meta=chunk_meta)
442
451
 
443
452
  streaming_chunk.component_info = component_info
444
453
 
@@ -547,8 +556,15 @@ def _parse_streaming_response(
547
556
  content_block_idxs.add(content_block_idx)
548
557
  streaming_callback(streaming_chunk)
549
558
  chunks.append(streaming_chunk)
559
+
550
560
  reply = _convert_streaming_chunks_to_chat_message(chunks=chunks)
561
+
562
+ # both the reasoning content and the trace are ignored in _convert_streaming_chunks_to_chat_message
563
+ # so we need to process them separately
551
564
  reasoning_content = _process_reasoning_contents(chunks=chunks)
565
+ if chunks[-1].meta and "trace" in chunks[-1].meta:
566
+ reply.meta["trace"] = chunks[-1].meta["trace"]
567
+
552
568
  reply = ChatMessage.from_assistant(
553
569
  text=reply.text,
554
570
  meta=reply.meta,
@@ -556,6 +572,7 @@ def _parse_streaming_response(
556
572
  tool_calls=reply.tool_calls,
557
573
  reasoning=reasoning_content,
558
574
  )
575
+
559
576
  return [reply]
560
577
 
561
578
 
@@ -594,3 +611,24 @@ async def _parse_streaming_response_async(
594
611
  reasoning=reasoning_content,
595
612
  )
596
613
  return [reply]
614
+
615
+
616
+ def _validate_guardrail_config(guardrail_config: Optional[Dict[str, str]] = None, streaming: bool = False) -> None:
617
+ """
618
+ Validate the guardrail configuration.
619
+
620
+ :param guardrail_config: The guardrail configuration.
621
+ :param streaming: Whether the streaming is enabled.
622
+
623
+ :raises ValueError: If the guardrail configuration is invalid.
624
+ """
625
+ if guardrail_config is None:
626
+ return
627
+
628
+ required_fields = {"guardrailIdentifier", "guardrailVersion"}
629
+ if not required_fields.issubset(guardrail_config):
630
+ msg = "`guardrailIdentifier` and `guardrailVersion` fields are required in guardrail configuration."
631
+ raise ValueError(msg)
632
+ if not streaming and "streamProcessingMode" in guardrail_config:
633
+ msg = "`streamProcessingMode` field is only supported for streaming (when `streaming_callback` is not None)."
634
+ raise ValueError(msg)
@@ -1,3 +1,4 @@
1
+ import os
1
2
  from typing import Any, Dict, Optional
2
3
 
3
4
  import pytest
@@ -122,6 +123,7 @@ class TestAmazonBedrockChatGenerator:
122
123
  generation_kwargs={"temperature": 0.7},
123
124
  streaming_callback=print_streaming_chunk,
124
125
  boto3_config=boto3_config,
126
+ guardrail_config={"guardrailIdentifier": "test", "guardrailVersion": "test"},
125
127
  )
126
128
  expected_dict = {
127
129
  "type": CLASS_TYPE,
@@ -133,10 +135,10 @@ class TestAmazonBedrockChatGenerator:
133
135
  "aws_profile_name": {"type": "env_var", "env_vars": ["AWS_PROFILE"], "strict": False},
134
136
  "model": "cohere.command-r-plus-v1:0",
135
137
  "generation_kwargs": {"temperature": 0.7},
136
- "stop_words": [],
137
138
  "streaming_callback": "haystack.components.generators.utils.print_streaming_chunk",
138
139
  "boto3_config": boto3_config,
139
140
  "tools": None,
141
+ "guardrail_config": {"guardrailIdentifier": "test", "guardrailVersion": "test"},
140
142
  },
141
143
  }
142
144
 
@@ -165,6 +167,8 @@ class TestAmazonBedrockChatGenerator:
165
167
  "streaming_callback": "haystack.components.generators.utils.print_streaming_chunk",
166
168
  "boto3_config": boto3_config,
167
169
  "tools": None,
170
+ "stop_words": ["stop"], # this parameter will be ignored
171
+ "guardrail_config": None,
168
172
  },
169
173
  }
170
174
  )
@@ -224,8 +228,7 @@ class TestAmazonBedrockChatGenerator:
224
228
 
225
229
  generator = AmazonBedrockChatGenerator(
226
230
  model="anthropic.claude-3-5-sonnet-20240620-v1:0",
227
- generation_kwargs={"temperature": 0.7},
228
- stop_words=["eviscerate"],
231
+ generation_kwargs={"temperature": 0.7, "stopSequences": ["eviscerate"]},
229
232
  streaming_callback=print_streaming_chunk,
230
233
  tools=[tool],
231
234
  )
@@ -252,8 +255,7 @@ class TestAmazonBedrockChatGenerator:
252
255
  "aws_region_name": {"type": "env_var", "env_vars": ["AWS_DEFAULT_REGION"], "strict": False},
253
256
  "aws_profile_name": {"type": "env_var", "env_vars": ["AWS_PROFILE"], "strict": False},
254
257
  "model": "anthropic.claude-3-5-sonnet-20240620-v1:0",
255
- "generation_kwargs": {"temperature": 0.7},
256
- "stop_words": ["eviscerate"],
258
+ "generation_kwargs": {"temperature": 0.7, "stopSequences": ["eviscerate"]},
257
259
  "streaming_callback": "haystack.components.generators.utils.print_streaming_chunk",
258
260
  "boto3_config": None,
259
261
  "tools": [
@@ -270,6 +272,7 @@ class TestAmazonBedrockChatGenerator:
270
272
  },
271
273
  }
272
274
  ],
275
+ "guardrail_config": None,
273
276
  },
274
277
  }
275
278
  },
@@ -283,7 +286,7 @@ class TestAmazonBedrockChatGenerator:
283
286
 
284
287
  def test_prepare_request_params_tool_config(self, top_song_tool_config, mock_boto3_session, set_env_variables):
285
288
  generator = AmazonBedrockChatGenerator(model="anthropic.claude-3-5-sonnet-20240620-v1:0")
286
- request_params, callback = generator._prepare_request_params(
289
+ request_params, _ = generator._prepare_request_params(
287
290
  messages=[ChatMessage.from_user("What's the capital of France?")],
288
291
  generation_kwargs={"toolConfig": top_song_tool_config},
289
292
  tools=None,
@@ -291,6 +294,17 @@ class TestAmazonBedrockChatGenerator:
291
294
  assert request_params["messages"] == [{"content": [{"text": "What's the capital of France?"}], "role": "user"}]
292
295
  assert request_params["toolConfig"] == top_song_tool_config
293
296
 
297
+ def test_prepare_request_params_guardrail_config(self, mock_boto3_session, set_env_variables):
298
+ generator = AmazonBedrockChatGenerator(
299
+ model="anthropic.claude-3-5-sonnet-20240620-v1:0",
300
+ guardrail_config={"guardrailIdentifier": "test", "guardrailVersion": "test"},
301
+ )
302
+ request_params, _ = generator._prepare_request_params(
303
+ messages=[ChatMessage.from_user("What's the capital of France?")],
304
+ )
305
+ assert request_params["messages"] == [{"content": [{"text": "What's the capital of France?"}], "role": "user"}]
306
+ assert request_params["guardrailConfig"] == {"guardrailIdentifier": "test", "guardrailVersion": "test"}
307
+
294
308
 
295
309
  # In the CI, those tests are skipped if AWS Authentication fails
296
310
  @pytest.mark.integration
@@ -653,6 +667,32 @@ class TestAmazonBedrockChatGeneratorInference:
653
667
  assert len(final_message.text) > 0
654
668
  assert "hello" in final_message.text.lower()
655
669
 
670
+ @pytest.mark.skipif(
671
+ not os.getenv("AWS_BEDROCK_GUARDRAIL_ID") or not os.getenv("AWS_BEDROCK_GUARDRAIL_VERSION"),
672
+ reason=(
673
+ "Export AWS_BEDROCK_GUARDRAIL_ID and AWS_BEDROCK_GUARDRAIL_VERSION environment variables corresponding"
674
+ "to a Bedrock Guardrail to run this test."
675
+ ),
676
+ )
677
+ @pytest.mark.parametrize("streaming_callback", [None, print_streaming_chunk])
678
+ def test_live_run_with_guardrail(self, streaming_callback):
679
+ messages = [ChatMessage.from_user("Should I invest in Tesla or Apple?")]
680
+ component = AmazonBedrockChatGenerator(
681
+ model="anthropic.claude-3-5-sonnet-20240620-v1:0",
682
+ guardrail_config={
683
+ "guardrailIdentifier": os.getenv("AWS_BEDROCK_GUARDRAIL_ID"),
684
+ "guardrailVersion": os.getenv("AWS_BEDROCK_GUARDRAIL_VERSION"),
685
+ "trace": "enabled",
686
+ },
687
+ streaming_callback=streaming_callback,
688
+ )
689
+ results = component.run(messages=messages)
690
+
691
+ assert results["replies"][0].meta["finish_reason"] == "content_filter"
692
+ assert results["replies"][0].text == "Sorry, the model cannot answer this question."
693
+ assert "trace" in results["replies"][0].meta
694
+ assert "guardrail" in results["replies"][0].meta["trace"]
695
+
656
696
  @pytest.mark.parametrize("model_name", [MODELS_TO_TEST_WITH_TOOLS[0]]) # just one model is enough
657
697
  def test_pipeline_with_amazon_bedrock_chat_generator(self, model_name, tools):
658
698
  """
@@ -21,6 +21,7 @@ from haystack_integrations.components.generators.amazon_bedrock.chat.utils impor
21
21
  _format_tools,
22
22
  _parse_completion_response,
23
23
  _parse_streaming_response,
24
+ _validate_guardrail_config,
24
25
  )
25
26
 
26
27
 
@@ -603,6 +604,71 @@ class TestAmazonBedrockChatGeneratorUtils:
603
604
  )
604
605
  assert replies[0] == expected_message
605
606
 
607
+ def test_extract_replies_with_guardrail(self, mock_boto3_session):
608
+ model = "anthropic.claude-3-5-sonnet-20240620-v1:0"
609
+
610
+ trace = {
611
+ "guardrail": {
612
+ "inputAssessment": {
613
+ "test_guardrail_id": {
614
+ "topicPolicy": {
615
+ "topics": [
616
+ {"name": "Investments topic", "type": "DENY", "action": "BLOCKED", "detected": True}
617
+ ]
618
+ },
619
+ "invocationMetrics": {
620
+ "guardrailProcessingLatency": 273,
621
+ "usage": {
622
+ "topicPolicyUnits": 1,
623
+ "contentPolicyUnits": 0,
624
+ "wordPolicyUnits": 0,
625
+ "sensitiveInformationPolicyUnits": 0,
626
+ "sensitiveInformationPolicyFreeUnits": 0,
627
+ "contextualGroundingPolicyUnits": 0,
628
+ "contentPolicyImageUnits": 0,
629
+ },
630
+ "guardrailCoverage": {"textCharacters": {"guarded": 48, "total": 48}},
631
+ },
632
+ }
633
+ },
634
+ "actionReason": "Guardrail blocked.",
635
+ }
636
+ }
637
+
638
+ response_body = {
639
+ "ResponseMetadata": {
640
+ "RequestId": "7f2b43ef-fb52-40e4-ab14-8cc1edaf5013",
641
+ "HTTPStatusCode": 200,
642
+ "HTTPHeaders": {
643
+ "date": "Thu, 18 Sep 2025 09:14:48 GMT",
644
+ "content-type": "application/json",
645
+ "content-length": "835",
646
+ "connection": "keep-alive",
647
+ "x-amzn-requestid": "7f2b43ef-fb52-40e4-ab14-8cc1edaf5013",
648
+ },
649
+ "RetryAttempts": 0,
650
+ },
651
+ "output": {
652
+ "message": {"role": "assistant", "content": [{"text": "Sorry, the model cannot answer this question."}]}
653
+ },
654
+ "stopReason": "guardrail_intervened",
655
+ "usage": {"inputTokens": 0, "outputTokens": 0, "totalTokens": 0},
656
+ "metrics": {"latencyMs": 316},
657
+ "trace": trace,
658
+ }
659
+
660
+ replies = _parse_completion_response(response_body, model)
661
+ assert len(replies) == 1
662
+ assert replies[0].text == "Sorry, the model cannot answer this question."
663
+ assert replies[0].role == ChatRole.ASSISTANT
664
+ assert replies[0].meta == {
665
+ "model": model,
666
+ "finish_reason": "content_filter",
667
+ "index": 0,
668
+ "usage": {"prompt_tokens": 0, "completion_tokens": 0, "total_tokens": 0},
669
+ "trace": trace,
670
+ }
671
+
606
672
  def test_process_streaming_response_one_tool_call(self, mock_boto3_session):
607
673
  """
608
674
  Test that process_streaming_response correctly handles streaming events and accumulates responses
@@ -1034,6 +1100,82 @@ class TestAmazonBedrockChatGeneratorUtils:
1034
1100
  ]
1035
1101
  assert replies == expected_messages
1036
1102
 
1103
+ def test_parse_streaming_response_with_guardrail(self, mock_boto3_session):
1104
+ model = "anthropic.claude-3-5-sonnet-20240620-v1:0"
1105
+ type_ = (
1106
+ "haystack_integrations.components.generators.amazon_bedrock.chat.chat_generator.AmazonBedrockChatGenerator"
1107
+ )
1108
+ streaming_chunks = []
1109
+
1110
+ trace = {
1111
+ "guardrail": {
1112
+ "inputAssessment": {
1113
+ "vodp82dpe5xv": {
1114
+ "test_guardrail_id": {
1115
+ "topicPolicy": {
1116
+ "topics": [
1117
+ {"name": "Investments topic", "type": "DENY", "action": "BLOCKED", "detected": True}
1118
+ ]
1119
+ },
1120
+ "invocationMetrics": {
1121
+ "guardrailProcessingLatency": 299,
1122
+ "usage": {
1123
+ "topicPolicyUnits": 1,
1124
+ "contentPolicyUnits": 0,
1125
+ "wordPolicyUnits": 0,
1126
+ "sensitiveInformationPolicyUnits": 0,
1127
+ "sensitiveInformationPolicyFreeUnits": 0,
1128
+ "contextualGroundingPolicyUnits": 0,
1129
+ "contentPolicyImageUnits": 0,
1130
+ },
1131
+ "guardrailCoverage": {"textCharacters": {"guarded": 48, "total": 48}},
1132
+ },
1133
+ }
1134
+ },
1135
+ "actionReason": "Guardrail blocked.",
1136
+ }
1137
+ }
1138
+ }
1139
+
1140
+ events = [
1141
+ {"messageStart": {"role": "assistant"}},
1142
+ {
1143
+ "contentBlockDelta": {
1144
+ "delta": {"text": "Sorry, the model cannot answer this question."},
1145
+ "contentBlockIndex": 0,
1146
+ }
1147
+ },
1148
+ {"contentBlockStop": {"contentBlockIndex": 0}},
1149
+ {"messageStop": {"stopReason": "guardrail_intervened"}},
1150
+ {
1151
+ "metadata": {
1152
+ "usage": {"inputTokens": 0, "outputTokens": 0, "totalTokens": 0},
1153
+ "metrics": {"latencyMs": 334},
1154
+ "trace": trace,
1155
+ }
1156
+ },
1157
+ ]
1158
+
1159
+ def test_callback(chunk: StreamingChunk):
1160
+ streaming_chunks.append(chunk)
1161
+
1162
+ replies = _parse_streaming_response(events, test_callback, model, ComponentInfo(type=type_))
1163
+
1164
+ expected_messages = [
1165
+ ChatMessage.from_assistant(
1166
+ text="Sorry, the model cannot answer this question.",
1167
+ meta={
1168
+ "completion_start_time": ANY,
1169
+ "model": model,
1170
+ "index": 0,
1171
+ "finish_reason": "content_filter",
1172
+ "usage": {"prompt_tokens": 0, "completion_tokens": 0, "total_tokens": 0},
1173
+ "trace": trace,
1174
+ },
1175
+ )
1176
+ ]
1177
+ assert replies == expected_messages
1178
+
1037
1179
  def test_convert_streaming_chunks_to_chat_message_tool_call_with_empty_arguments(self):
1038
1180
  chunks = [
1039
1181
  StreamingChunk(
@@ -1229,3 +1371,35 @@ class TestAmazonBedrockChatGeneratorUtils:
1229
1371
  assert message._meta["index"] == 0
1230
1372
  assert message._meta["finish_reason"] == "tool_calls"
1231
1373
  assert message._meta["usage"] == {"completion_tokens": 84, "prompt_tokens": 349, "total_tokens": 433}
1374
+
1375
+ def test_validate_guardrail_config_with_valid_configs(self):
1376
+ _validate_guardrail_config(guardrail_config=None, streaming=False)
1377
+ _validate_guardrail_config(
1378
+ guardrail_config={"guardrailIdentifier": "test", "guardrailVersion": "test"}, streaming=False
1379
+ )
1380
+ _validate_guardrail_config(
1381
+ guardrail_config={"guardrailIdentifier": "test", "guardrailVersion": "test"}, streaming=True
1382
+ )
1383
+ _validate_guardrail_config(
1384
+ guardrail_config={
1385
+ "guardrailIdentifier": "test",
1386
+ "guardrailVersion": "test",
1387
+ "streamProcessingMode": "enabled",
1388
+ },
1389
+ streaming=True,
1390
+ )
1391
+
1392
+ def test_validate_guardrail_config_with_invalid_configs(self):
1393
+ with pytest.raises(ValueError, match="`guardrailIdentifier` and `guardrailVersion` fields are required"):
1394
+ _validate_guardrail_config(guardrail_config={"guardrailIdentifier": "test"}, streaming=False)
1395
+ with pytest.raises(ValueError, match="`guardrailIdentifier` and `guardrailVersion` fields are required"):
1396
+ _validate_guardrail_config(guardrail_config={"guardrailVersion": "test"}, streaming=False)
1397
+ with pytest.raises(ValueError, match="`streamProcessingMode` field is only supported for streaming"):
1398
+ _validate_guardrail_config(
1399
+ guardrail_config={
1400
+ "guardrailIdentifier": "test",
1401
+ "guardrailVersion": "test",
1402
+ "streamProcessingMode": "test",
1403
+ },
1404
+ streaming=False,
1405
+ )
@@ -195,7 +195,7 @@ class TestAmazonBedrockDocumentImageEmbedder:
195
195
  # Process images directly
196
196
  for doc in docs:
197
197
  image_byte_stream = ByteStream.from_file_path(filepath=doc.meta["file_path"], mime_type="image/jpeg")
198
- mime_type, base64_image = _encode_image_to_base64(image_byte_stream)
198
+ _, base64_image = _encode_image_to_base64(image_byte_stream)
199
199
  base64_images.append(base64_image)
200
200
 
201
201
  result = embedder._embed_titan(images=base64_images)