amazon-bedrock-haystack 3.7.0__tar.gz → 3.9.0__tar.gz

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (37) hide show
  1. {amazon_bedrock_haystack-3.7.0 → amazon_bedrock_haystack-3.9.0}/CHANGELOG.md +30 -0
  2. {amazon_bedrock_haystack-3.7.0 → amazon_bedrock_haystack-3.9.0}/PKG-INFO +2 -2
  3. {amazon_bedrock_haystack-3.7.0 → amazon_bedrock_haystack-3.9.0}/pyproject.toml +2 -9
  4. {amazon_bedrock_haystack-3.7.0 → amazon_bedrock_haystack-3.9.0}/src/haystack_integrations/components/generators/amazon_bedrock/chat/chat_generator.py +36 -4
  5. {amazon_bedrock_haystack-3.7.0 → amazon_bedrock_haystack-3.9.0}/src/haystack_integrations/components/generators/amazon_bedrock/chat/utils.py +54 -5
  6. {amazon_bedrock_haystack-3.7.0 → amazon_bedrock_haystack-3.9.0}/tests/conftest.py +6 -0
  7. {amazon_bedrock_haystack-3.7.0 → amazon_bedrock_haystack-3.9.0}/tests/test_chat_generator.py +23 -1
  8. {amazon_bedrock_haystack-3.7.0 → amazon_bedrock_haystack-3.9.0}/tests/test_chat_generator_utils.py +65 -3
  9. amazon_bedrock_haystack-3.9.0/tests/test_files/apple.jpg +0 -0
  10. {amazon_bedrock_haystack-3.7.0 → amazon_bedrock_haystack-3.9.0}/.gitignore +0 -0
  11. {amazon_bedrock_haystack-3.7.0 → amazon_bedrock_haystack-3.9.0}/LICENSE.txt +0 -0
  12. {amazon_bedrock_haystack-3.7.0 → amazon_bedrock_haystack-3.9.0}/README.md +0 -0
  13. {amazon_bedrock_haystack-3.7.0 → amazon_bedrock_haystack-3.9.0}/examples/bedrock_ranker_example.py +0 -0
  14. {amazon_bedrock_haystack-3.7.0 → amazon_bedrock_haystack-3.9.0}/examples/chatgenerator_example.py +0 -0
  15. {amazon_bedrock_haystack-3.7.0 → amazon_bedrock_haystack-3.9.0}/examples/embedders_generator_with_rag_example.py +0 -0
  16. {amazon_bedrock_haystack-3.7.0 → amazon_bedrock_haystack-3.9.0}/pydoc/config.yml +0 -0
  17. {amazon_bedrock_haystack-3.7.0 → amazon_bedrock_haystack-3.9.0}/src/haystack_integrations/common/amazon_bedrock/__init__.py +0 -0
  18. {amazon_bedrock_haystack-3.7.0 → amazon_bedrock_haystack-3.9.0}/src/haystack_integrations/common/amazon_bedrock/errors.py +0 -0
  19. {amazon_bedrock_haystack-3.7.0 → amazon_bedrock_haystack-3.9.0}/src/haystack_integrations/common/amazon_bedrock/utils.py +0 -0
  20. {amazon_bedrock_haystack-3.7.0 → amazon_bedrock_haystack-3.9.0}/src/haystack_integrations/common/py.typed +0 -0
  21. {amazon_bedrock_haystack-3.7.0 → amazon_bedrock_haystack-3.9.0}/src/haystack_integrations/components/embedders/amazon_bedrock/__init__.py +0 -0
  22. {amazon_bedrock_haystack-3.7.0 → amazon_bedrock_haystack-3.9.0}/src/haystack_integrations/components/embedders/amazon_bedrock/document_embedder.py +0 -0
  23. {amazon_bedrock_haystack-3.7.0 → amazon_bedrock_haystack-3.9.0}/src/haystack_integrations/components/embedders/amazon_bedrock/text_embedder.py +0 -0
  24. {amazon_bedrock_haystack-3.7.0 → amazon_bedrock_haystack-3.9.0}/src/haystack_integrations/components/embedders/py.typed +0 -0
  25. {amazon_bedrock_haystack-3.7.0 → amazon_bedrock_haystack-3.9.0}/src/haystack_integrations/components/generators/amazon_bedrock/__init__.py +0 -0
  26. {amazon_bedrock_haystack-3.7.0 → amazon_bedrock_haystack-3.9.0}/src/haystack_integrations/components/generators/amazon_bedrock/adapters.py +0 -0
  27. {amazon_bedrock_haystack-3.7.0 → amazon_bedrock_haystack-3.9.0}/src/haystack_integrations/components/generators/amazon_bedrock/chat/__init__.py +0 -0
  28. {amazon_bedrock_haystack-3.7.0 → amazon_bedrock_haystack-3.9.0}/src/haystack_integrations/components/generators/amazon_bedrock/generator.py +0 -0
  29. {amazon_bedrock_haystack-3.7.0 → amazon_bedrock_haystack-3.9.0}/src/haystack_integrations/components/generators/py.typed +0 -0
  30. {amazon_bedrock_haystack-3.7.0 → amazon_bedrock_haystack-3.9.0}/src/haystack_integrations/components/rankers/amazon_bedrock/__init__.py +0 -0
  31. {amazon_bedrock_haystack-3.7.0 → amazon_bedrock_haystack-3.9.0}/src/haystack_integrations/components/rankers/amazon_bedrock/ranker.py +0 -0
  32. {amazon_bedrock_haystack-3.7.0 → amazon_bedrock_haystack-3.9.0}/src/haystack_integrations/components/rankers/py.typed +0 -0
  33. {amazon_bedrock_haystack-3.7.0 → amazon_bedrock_haystack-3.9.0}/tests/__init__.py +0 -0
  34. {amazon_bedrock_haystack-3.7.0 → amazon_bedrock_haystack-3.9.0}/tests/test_document_embedder.py +0 -0
  35. {amazon_bedrock_haystack-3.7.0 → amazon_bedrock_haystack-3.9.0}/tests/test_generator.py +0 -0
  36. {amazon_bedrock_haystack-3.7.0 → amazon_bedrock_haystack-3.9.0}/tests/test_ranker.py +0 -0
  37. {amazon_bedrock_haystack-3.7.0 → amazon_bedrock_haystack-3.9.0}/tests/test_text_embedder.py +0 -0
@@ -1,5 +1,35 @@
1
1
  # Changelog
2
2
 
3
+ ## [integrations/amazon_bedrock-v3.8.0] - 2025-07-04
4
+
5
+ ### 🚀 Features
6
+
7
+ - Pass component_info to StreamingChunk in AmazonBedrockChatGenerator (#2042)
8
+
9
+ ### 🧹 Chores
10
+
11
+ - Remove black (#1985)
12
+ - Improve typing for select_streaming_callback (#2008)
13
+
14
+
15
+ ## [integrations/amazon_bedrock-v3.7.0] - 2025-06-11
16
+
17
+ ### 🐛 Bug Fixes
18
+
19
+ - Fix Bedrock types + add py.typed (#1912)
20
+ - Bedrock - do not assume connection issues in case of ClientError (#1921)
21
+
22
+ ### ⚙️ CI
23
+
24
+ - Bedrock - improve worfklow; skip tests from CI (#1773)
25
+
26
+ ### 🧹 Chores
27
+
28
+ - Update bedrock_ranker_example.py (#1740)
29
+ - Align core-integrations Hatch scripts (#1898)
30
+ - Update md files for new hatch scripts (#1911)
31
+
32
+
3
33
  ## [integrations/amazon_bedrock-v3.6.2] - 2025-05-13
4
34
 
5
35
  ### 🧹 Chores
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.4
2
2
  Name: amazon-bedrock-haystack
3
- Version: 3.7.0
3
+ Version: 3.9.0
4
4
  Summary: An integration of Amazon Bedrock as an AmazonBedrockGenerator component.
5
5
  Project-URL: Documentation, https://github.com/deepset-ai/haystack-core-integrations/tree/main/integrations/amazon_bedrock#readme
6
6
  Project-URL: Issues, https://github.com/deepset-ai/haystack-core-integrations/issues
@@ -21,7 +21,7 @@ Classifier: Programming Language :: Python :: Implementation :: PyPy
21
21
  Requires-Python: >=3.9
22
22
  Requires-Dist: aioboto3>=14.0.0
23
23
  Requires-Dist: boto3>=1.28.57
24
- Requires-Dist: haystack-ai>=2.13.1
24
+ Requires-Dist: haystack-ai>=2.16.0
25
25
  Description-Content-Type: text/markdown
26
26
 
27
27
  # amazon-bedrock-haystack
@@ -23,7 +23,7 @@ classifiers = [
23
23
  "Programming Language :: Python :: Implementation :: CPython",
24
24
  "Programming Language :: Python :: Implementation :: PyPy",
25
25
  ]
26
- dependencies = ["haystack-ai>=2.13.1", "boto3>=1.28.57", "aioboto3>=14.0.0"]
26
+ dependencies = ["haystack-ai>=2.16.0", "boto3>=1.28.57", "aioboto3>=14.0.0"]
27
27
 
28
28
  [project.urls]
29
29
  Documentation = "https://github.com/deepset-ai/haystack-core-integrations/tree/main/integrations/amazon_bedrock#readme"
@@ -58,6 +58,7 @@ dependencies = [
58
58
  "pytest-rerunfailures",
59
59
  "mypy",
60
60
  "pip",
61
+ "pillow", # image resizing
61
62
  ]
62
63
 
63
64
  [tool.hatch.envs.test.scripts]
@@ -86,11 +87,6 @@ module = [
86
87
  ]
87
88
  ignore_missing_imports = true
88
89
 
89
- [tool.black]
90
- target-version = ["py38"]
91
- line-length = 120
92
- skip-string-normalization = true
93
-
94
90
  [tool.ruff]
95
91
  target-version = "py38"
96
92
  line-length = 120
@@ -168,9 +164,6 @@ exclude_lines = ["no cov", "if __name__ == .__main__.:", "if TYPE_CHECKING:"]
168
164
  [tool.pytest.ini_options]
169
165
  addopts = "--strict-markers"
170
166
  markers = [
171
- "unit: unit tests",
172
167
  "integration: integration tests",
173
- "embedders: embedders tests",
174
- "generators: generators tests",
175
168
  ]
176
169
  log_cli = true
@@ -5,7 +5,7 @@ from botocore.config import Config
5
5
  from botocore.eventstream import EventStream
6
6
  from botocore.exceptions import ClientError
7
7
  from haystack import component, default_from_dict, default_to_dict, logging
8
- from haystack.dataclasses import ChatMessage, StreamingCallbackT, select_streaming_callback
8
+ from haystack.dataclasses import ChatMessage, ComponentInfo, StreamingCallbackT, select_streaming_callback
9
9
  from haystack.tools import (
10
10
  Tool,
11
11
  Toolset,
@@ -56,6 +56,22 @@ class AmazonBedrockChatGenerator:
56
56
  client.run(messages, generation_kwargs={"max_tokens": 512})
57
57
  ```
58
58
 
59
+ ### Multimodal example
60
+ ```python
61
+ from haystack.dataclasses import ChatMessage, ImageContent
62
+ from haystack_integrations.components.generators.amazon_bedrock import AmazonBedrockChatGenerator
63
+
64
+ generator = AmazonBedrockChatGenerator(model="anthropic.claude-3-5-sonnet-20240620-v1:0")
65
+
66
+ image_content = ImageContent.from_file_path(file_path="apple.jpg")
67
+
68
+ message = ChatMessage.from_user(content_parts=["Describe the image using 10 words at most.", image_content])
69
+
70
+ response = generator.run(messages=[message])["replies"][0].text
71
+
72
+ print(response)
73
+ > The image shows a red apple.
74
+
59
75
  ### Tool usage example
60
76
  # AmazonBedrockChatGenerator supports Haystack's unified tool architecture, allowing tools to be used
61
77
  # across different chat generators. The same tool definitions and usage patterns work consistently
@@ -371,7 +387,9 @@ class AmazonBedrockChatGenerator:
371
387
  if additional_fields:
372
388
  params["additionalModelRequestFields"] = additional_fields
373
389
 
374
- callback = select_streaming_callback(
390
+ # overloads that exhaust finite Literals(bool) not treated as exhaustive
391
+ # see https://github.com/python/mypy/issues/14764
392
+ callback = select_streaming_callback( # type: ignore[call-overload]
375
393
  init_callback=self.streaming_callback,
376
394
  runtime_callback=streaming_callback,
377
395
  requires_async=requires_async,
@@ -406,6 +424,8 @@ class AmazonBedrockChatGenerator:
406
424
  :raises AmazonBedrockInferenceError:
407
425
  If the Bedrock inference API call fails.
408
426
  """
427
+ component_info = ComponentInfo.from_component(self)
428
+
409
429
  params, callback = self._prepare_request_params(
410
430
  messages=messages,
411
431
  streaming_callback=streaming_callback,
@@ -422,7 +442,12 @@ class AmazonBedrockChatGenerator:
422
442
  msg = "No stream found in the response."
423
443
  raise AmazonBedrockInferenceError(msg)
424
444
  # the type of streaming callback is checked in _prepare_request_params, but mypy doesn't know
425
- replies = _parse_streaming_response(response_stream, callback, self.model) # type: ignore[arg-type]
445
+ replies = _parse_streaming_response(
446
+ response_stream=response_stream,
447
+ streaming_callback=callback, # type: ignore[arg-type]
448
+ model=self.model,
449
+ component_info=component_info,
450
+ )
426
451
  else:
427
452
  response = self.client.converse(**params)
428
453
  replies = _parse_completion_response(response, self.model)
@@ -459,6 +484,8 @@ class AmazonBedrockChatGenerator:
459
484
  :raises AmazonBedrockInferenceError:
460
485
  If the Bedrock inference API call fails.
461
486
  """
487
+ component_info = ComponentInfo.from_component(self)
488
+
462
489
  params, callback = self._prepare_request_params(
463
490
  messages=messages,
464
491
  streaming_callback=streaming_callback,
@@ -479,7 +506,12 @@ class AmazonBedrockChatGenerator:
479
506
  msg = "No stream found in the response."
480
507
  raise AmazonBedrockInferenceError(msg)
481
508
  # the type of streaming callback is checked in _prepare_request_params, but mypy doesn't know
482
- replies = await _parse_streaming_response_async(response_stream, callback, self.model) # type: ignore[arg-type]
509
+ replies = await _parse_streaming_response_async(
510
+ response_stream=response_stream,
511
+ streaming_callback=callback, # type: ignore[arg-type]
512
+ model=self.model,
513
+ component_info=component_info,
514
+ )
483
515
  else:
484
516
  response = await async_client.converse(**params)
485
517
  replies = _parse_completion_response(response, self.model)
@@ -1,3 +1,4 @@
1
+ import base64
1
2
  import json
2
3
  from datetime import datetime, timezone
3
4
  from typing import Any, Dict, List, Optional, Tuple
@@ -8,8 +9,11 @@ from haystack.dataclasses import (
8
9
  AsyncStreamingCallbackT,
9
10
  ChatMessage,
10
11
  ChatRole,
12
+ ComponentInfo,
13
+ ImageContent,
11
14
  StreamingChunk,
12
15
  SyncStreamingCallbackT,
16
+ TextContent,
13
17
  ToolCall,
14
18
  )
15
19
  from haystack.tools import Tool
@@ -17,6 +21,10 @@ from haystack.tools import Tool
17
21
  logger = logging.getLogger(__name__)
18
22
 
19
23
 
24
+ # see https://docs.aws.amazon.com/bedrock/latest/APIReference/API_runtime_ImageBlock.html for supported formats
25
+ IMAGE_SUPPORTED_FORMATS = ["png", "jpeg", "gif", "webp"]
26
+
27
+
20
28
  # Haystack to Bedrock util methods
21
29
  def _format_tools(tools: Optional[List[Tool]] = None) -> Optional[Dict[str, Any]]:
22
30
  """
@@ -149,6 +157,39 @@ def _repair_tool_result_messages(bedrock_formatted_messages: List[Dict[str, Any]
149
157
  return [msg for _, msg in repaired_bedrock_formatted_messages]
150
158
 
151
159
 
160
+ def _format_text_image_message(message: ChatMessage) -> Dict[str, Any]:
161
+ """
162
+ Format a Haystack ChatMessage containing text and optional image content into Bedrock format.
163
+
164
+ :param message: Haystack ChatMessage.
165
+ :returns: Dictionary representing the message in Bedrock's expected format.
166
+ :raises ValueError: If image content is found in an assistant message or an unsupported image format is used.
167
+ """
168
+ content_parts = message._content
169
+
170
+ bedrock_content_blocks: List[Dict[str, Any]] = []
171
+ for part in content_parts:
172
+ if isinstance(part, TextContent):
173
+ bedrock_content_blocks.append({"text": part.text})
174
+
175
+ elif isinstance(part, ImageContent):
176
+ if message.is_from(ChatRole.ASSISTANT):
177
+ err_msg = "Image content is not supported for assistant messages"
178
+ raise ValueError(err_msg)
179
+
180
+ image_format = part.mime_type.split("/")[-1] if part.mime_type else None
181
+ if image_format not in IMAGE_SUPPORTED_FORMATS:
182
+ err_msg = (
183
+ f"Unsupported image format: {image_format}. "
184
+ f"Bedrock supports the following image formats: {IMAGE_SUPPORTED_FORMATS}"
185
+ )
186
+ raise ValueError(err_msg)
187
+ source = {"bytes": base64.b64decode(part.base64_image)}
188
+ bedrock_content_blocks.append({"image": {"format": image_format, "source": source}})
189
+
190
+ return {"role": message.role.value, "content": bedrock_content_blocks}
191
+
192
+
152
193
  def _format_messages(messages: List[ChatMessage]) -> Tuple[List[Dict[str, Any]], List[Dict[str, Any]]]:
153
194
  """
154
195
  Format a list of Haystack ChatMessages to the format expected by Bedrock API.
@@ -174,8 +215,7 @@ def _format_messages(messages: List[ChatMessage]) -> Tuple[List[Dict[str, Any]],
174
215
  elif msg.tool_call_results:
175
216
  bedrock_formatted_messages.append(_format_tool_result_message(msg))
176
217
  else:
177
- # regular user or assistant messages with only text content
178
- bedrock_formatted_messages.append({"role": msg.role.value, "content": [{"text": msg.text}]})
218
+ bedrock_formatted_messages.append(_format_text_image_message(msg))
179
219
 
180
220
  repaired_bedrock_formatted_messages = _repair_tool_result_messages(bedrock_formatted_messages)
181
221
  return system_prompts, repaired_bedrock_formatted_messages
@@ -235,7 +275,9 @@ def _parse_completion_response(response_body: Dict[str, Any], model: str) -> Lis
235
275
 
236
276
 
237
277
  # Bedrock streaming to Haystack util methods
238
- def _convert_event_to_streaming_chunk(event: Dict[str, Any], model: str) -> StreamingChunk:
278
+ def _convert_event_to_streaming_chunk(
279
+ event: Dict[str, Any], model: str, component_info: ComponentInfo
280
+ ) -> StreamingChunk:
239
281
  """
240
282
  Convert a Bedrock streaming event to a Haystack StreamingChunk.
241
283
 
@@ -244,6 +286,7 @@ def _convert_event_to_streaming_chunk(event: Dict[str, Any], model: str) -> Stre
244
286
 
245
287
  :param event: Dictionary containing a Bedrock streaming event.
246
288
  :param model: The model ID used for generation, included in chunk metadata.
289
+ :param component_info: ComponentInfo object
247
290
  :returns: StreamingChunk object containing the content and metadata extracted from the event.
248
291
  """
249
292
  # Initialize an empty StreamingChunk to return if no relevant event is found
@@ -358,6 +401,8 @@ def _convert_event_to_streaming_chunk(event: Dict[str, Any], model: str) -> Stre
358
401
  },
359
402
  )
360
403
 
404
+ streaming_chunk.component_info = component_info
405
+
361
406
  return streaming_chunk
362
407
 
363
408
 
@@ -438,6 +483,7 @@ def _parse_streaming_response(
438
483
  response_stream: EventStream,
439
484
  streaming_callback: SyncStreamingCallbackT,
440
485
  model: str,
486
+ component_info: ComponentInfo,
441
487
  ) -> List[ChatMessage]:
442
488
  """
443
489
  Parse a streaming response from Bedrock.
@@ -445,11 +491,12 @@ def _parse_streaming_response(
445
491
  :param response_stream: EventStream from Bedrock API
446
492
  :param streaming_callback: Callback for streaming chunks
447
493
  :param model: The model ID used for generation
494
+ :param component_info: ComponentInfo object
448
495
  :return: List of ChatMessage objects
449
496
  """
450
497
  chunks: List[StreamingChunk] = []
451
498
  for event in response_stream:
452
- streaming_chunk = _convert_event_to_streaming_chunk(event=event, model=model)
499
+ streaming_chunk = _convert_event_to_streaming_chunk(event=event, model=model, component_info=component_info)
453
500
  streaming_callback(streaming_chunk)
454
501
  chunks.append(streaming_chunk)
455
502
  replies = [_convert_streaming_chunks_to_chat_message(chunks=chunks)]
@@ -460,6 +507,7 @@ async def _parse_streaming_response_async(
460
507
  response_stream: EventStream,
461
508
  streaming_callback: AsyncStreamingCallbackT,
462
509
  model: str,
510
+ component_info: ComponentInfo,
463
511
  ) -> List[ChatMessage]:
464
512
  """
465
513
  Parse a streaming response from Bedrock.
@@ -467,11 +515,12 @@ async def _parse_streaming_response_async(
467
515
  :param response_stream: EventStream from Bedrock API
468
516
  :param streaming_callback: Callback for streaming chunks
469
517
  :param model: The model ID used for generation
518
+ :param component_info: ComponentInfo object
470
519
  :return: List of ChatMessage objects
471
520
  """
472
521
  chunks: List[StreamingChunk] = []
473
522
  async for event in response_stream:
474
- streaming_chunk = _convert_event_to_streaming_chunk(event=event, model=model)
523
+ streaming_chunk = _convert_event_to_streaming_chunk(event=event, model=model, component_info=component_info)
475
524
  await streaming_callback(streaming_chunk)
476
525
  chunks.append(streaming_chunk)
477
526
  replies = [_convert_streaming_chunks_to_chat_message(chunks=chunks)]
@@ -1,3 +1,4 @@
1
+ from pathlib import Path
1
2
  from unittest.mock import patch
2
3
 
3
4
  import pytest
@@ -24,3 +25,8 @@ def mock_boto3_session():
24
25
  def mock_aioboto3_session():
25
26
  with patch("aioboto3.Session") as mock_client:
26
27
  yield mock_client
28
+
29
+
30
+ @pytest.fixture()
31
+ def test_files_path():
32
+ return Path(__file__).parent / "test_files"
@@ -4,7 +4,7 @@ import pytest
4
4
  from haystack import Pipeline
5
5
  from haystack.components.generators.utils import print_streaming_chunk
6
6
  from haystack.components.tools import ToolInvoker
7
- from haystack.dataclasses import ChatMessage, ChatRole, StreamingChunk
7
+ from haystack.dataclasses import ChatMessage, ChatRole, ImageContent, StreamingChunk
8
8
  from haystack.tools import Tool
9
9
 
10
10
  from haystack_integrations.components.generators.amazon_bedrock import AmazonBedrockChatGenerator
@@ -24,6 +24,10 @@ MODELS_TO_TEST_WITH_TOOLS = [
24
24
  # so far we've discovered these models support streaming and tool use
25
25
  STREAMING_TOOL_MODELS = ["anthropic.claude-3-5-sonnet-20240620-v1:0", "cohere.command-r-plus-v1:0"]
26
26
 
27
+ MODELS_TO_TEST_WITH_IMAGE_INPUT = [
28
+ "us.anthropic.claude-sonnet-4-20250514-v1:0",
29
+ ]
30
+
27
31
 
28
32
  def weather(city: str):
29
33
  """Get weather for a given city."""
@@ -288,6 +292,23 @@ class TestAmazonBedrockChatGeneratorInference:
288
292
  assert "prompt_tokens" in first_reply.meta["usage"]
289
293
  assert "completion_tokens" in first_reply.meta["usage"]
290
294
 
295
+ @pytest.mark.parametrize("model_name", MODELS_TO_TEST_WITH_IMAGE_INPUT)
296
+ def test_run_with_image_input(self, model_name, test_files_path):
297
+ client = AmazonBedrockChatGenerator(model=model_name)
298
+
299
+ image_path = test_files_path / "apple.jpg"
300
+ image_content = ImageContent.from_file_path(image_path, size=(100, 100))
301
+
302
+ chat_message = ChatMessage.from_user(content_parts=["What's in the image? Max 5 words.", image_content])
303
+
304
+ response = client.run([chat_message])
305
+
306
+ first_reply = response["replies"][0]
307
+ assert isinstance(first_reply, ChatMessage)
308
+ assert ChatMessage.is_from(first_reply, ChatRole.ASSISTANT)
309
+ assert first_reply.text
310
+ assert "apple" in first_reply.text.lower()
311
+
291
312
  @pytest.mark.parametrize("model_name", MODELS_TO_TEST)
292
313
  def test_default_inference_with_streaming(self, model_name, chat_messages):
293
314
  streaming_callback_called = False
@@ -298,6 +319,7 @@ class TestAmazonBedrockChatGeneratorInference:
298
319
  streaming_callback_called = True
299
320
  assert isinstance(chunk, StreamingChunk)
300
321
  assert chunk.content is not None
322
+ assert chunk.component_info is not None
301
323
  if not paris_found_in_response:
302
324
  paris_found_in_response = "paris" in chunk.content.lower()
303
325
 
@@ -1,9 +1,12 @@
1
+ import base64
2
+
1
3
  import pytest
2
- from haystack.dataclasses import ChatMessage, ChatRole, StreamingChunk, ToolCall
4
+ from haystack.dataclasses import ChatMessage, ChatRole, ComponentInfo, ImageContent, StreamingChunk, ToolCall
3
5
  from haystack.tools import Tool
4
6
 
5
7
  from haystack_integrations.components.generators.amazon_bedrock.chat.utils import (
6
8
  _format_messages,
9
+ _format_text_image_message,
7
10
  _format_tools,
8
11
  _parse_completion_response,
9
12
  _parse_streaming_response,
@@ -105,6 +108,48 @@ class TestAmazonBedrockChatGeneratorUtils:
105
108
  {"role": "assistant", "content": [{"text": "The weather in Paris is sunny and 25°C."}]},
106
109
  ]
107
110
 
111
+ def test_format_text_image_message(self):
112
+ plain_assistant_message = ChatMessage.from_assistant("This is a test message.")
113
+ formatted_message = _format_text_image_message(plain_assistant_message)
114
+ assert formatted_message == {
115
+ "role": "assistant",
116
+ "content": [{"text": "This is a test message."}],
117
+ }
118
+
119
+ plain_user_message = ChatMessage.from_user("This is a test message.")
120
+ formatted_message = _format_text_image_message(plain_user_message)
121
+ assert formatted_message == {
122
+ "role": "user",
123
+ "content": [{"text": "This is a test message."}],
124
+ }
125
+
126
+ base64_image = "iVBORw0KGgoAAAANSUhEUgAAAAEAAAABCAQAAAC1HAwCAAAAC0lEQVR42mP8/x8AAwMCAO+ip1sAAAAASUVORK5CYII="
127
+ image_content = ImageContent(base64_image)
128
+ image_message = ChatMessage.from_user(content_parts=["This is a test message.", image_content])
129
+ formatted_message = _format_text_image_message(image_message)
130
+ assert formatted_message == {
131
+ "role": "user",
132
+ "content": [
133
+ {"text": "This is a test message."},
134
+ {"image": {"format": "png", "source": {"bytes": base64.b64decode(base64_image)}}},
135
+ ],
136
+ }
137
+
138
+ def test_format_text_image_message_errors(self):
139
+ base64_image = "iVBORw0KGgoAAAANSUhEUgAAAAEAAAABCAQAAAC1HAwCAAAAC0lEQVR42mP8/x8AAwMCAO+ip1sAAAAASUVORK5CYII="
140
+ image_content = ImageContent(base64_image)
141
+ assistant_message_with_image = ChatMessage.from_user(content_parts=["This is a test message.", image_content])
142
+ assistant_message_with_image._role = ChatRole.ASSISTANT
143
+ with pytest.raises(ValueError):
144
+ _format_text_image_message(assistant_message_with_image)
145
+
146
+ image_content_unsupported_format = ImageContent(base64_image, mime_type="image/tiff")
147
+ image_message = ChatMessage.from_user(
148
+ content_parts=["This is a test message.", image_content_unsupported_format]
149
+ )
150
+ with pytest.raises(ValueError):
151
+ _format_text_image_message(image_message)
152
+
108
153
  def test_formate_messages_multi_tool(self):
109
154
  messages = [
110
155
  ChatMessage.from_user("What is the weather in Berlin and Paris?"),
@@ -339,6 +384,9 @@ class TestAmazonBedrockChatGeneratorUtils:
339
384
  Test that process_streaming_response correctly handles streaming events and accumulates responses
340
385
  """
341
386
  model = "anthropic.claude-3-5-sonnet-20240620-v1:0"
387
+ type_ = (
388
+ "haystack_integrations.components.generators.amazon_bedrock.chat.chat_generator.AmazonBedrockChatGenerator"
389
+ )
342
390
  streaming_chunks = []
343
391
 
344
392
  def test_callback(chunk: StreamingChunk):
@@ -379,7 +427,11 @@ class TestAmazonBedrockChatGeneratorUtils:
379
427
  },
380
428
  ]
381
429
 
382
- replies = _parse_streaming_response(events, test_callback, model)
430
+ component_info = ComponentInfo(
431
+ type=type_,
432
+ )
433
+
434
+ replies = _parse_streaming_response(events, test_callback, model, component_info)
383
435
  # Pop completion_start_time since it will always change
384
436
  replies[0].meta.pop("completion_start_time")
385
437
  expected_messages = [
@@ -413,6 +465,9 @@ class TestAmazonBedrockChatGeneratorUtils:
413
465
  "type": "function",
414
466
  }
415
467
  ]
468
+ for chunk in streaming_chunks:
469
+ assert chunk.component_info.type == type_
470
+ assert chunk.component_info.name is None # not in a pipeline
416
471
 
417
472
  # Verify final replies
418
473
  assert len(replies) == 1
@@ -420,6 +475,9 @@ class TestAmazonBedrockChatGeneratorUtils:
420
475
 
421
476
  def test_parse_streaming_response_with_two_tool_calls(self, mock_boto3_session):
422
477
  model = "anthropic.claude-3-5-sonnet-20240620-v1:0"
478
+ type_ = (
479
+ "haystack_integrations.components.generators.amazon_bedrock.chat.chat_generator.AmazonBedrockChatGenerator"
480
+ )
423
481
  streaming_chunks = []
424
482
 
425
483
  def test_callback(chunk: StreamingChunk):
@@ -468,7 +526,11 @@ class TestAmazonBedrockChatGeneratorUtils:
468
526
  },
469
527
  ]
470
528
 
471
- replies = _parse_streaming_response(events, test_callback, model)
529
+ component_info = ComponentInfo(
530
+ type=type_,
531
+ )
532
+
533
+ replies = _parse_streaming_response(events, test_callback, model, component_info)
472
534
  # Pop completion_start_time since it will always change
473
535
  replies[0].meta.pop("completion_start_time")
474
536
  expected_messages = [