amazon-bedrock-haystack 3.6.2__tar.gz → 3.8.0__tar.gz

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (36) hide show
  1. {amazon_bedrock_haystack-3.6.2 → amazon_bedrock_haystack-3.8.0}/CHANGELOG.md +25 -0
  2. {amazon_bedrock_haystack-3.6.2 → amazon_bedrock_haystack-3.8.0}/PKG-INFO +11 -6
  3. {amazon_bedrock_haystack-3.6.2 → amazon_bedrock_haystack-3.8.0}/README.md +9 -4
  4. {amazon_bedrock_haystack-3.6.2 → amazon_bedrock_haystack-3.8.0}/examples/bedrock_ranker_example.py +3 -3
  5. {amazon_bedrock_haystack-3.6.2 → amazon_bedrock_haystack-3.8.0}/pyproject.toml +37 -42
  6. {amazon_bedrock_haystack-3.6.2 → amazon_bedrock_haystack-3.8.0}/src/haystack_integrations/common/amazon_bedrock/utils.py +3 -3
  7. amazon_bedrock_haystack-3.8.0/src/haystack_integrations/common/py.typed +0 -0
  8. {amazon_bedrock_haystack-3.6.2 → amazon_bedrock_haystack-3.8.0}/src/haystack_integrations/components/embedders/amazon_bedrock/document_embedder.py +3 -11
  9. {amazon_bedrock_haystack-3.6.2 → amazon_bedrock_haystack-3.8.0}/src/haystack_integrations/components/embedders/amazon_bedrock/text_embedder.py +2 -6
  10. amazon_bedrock_haystack-3.8.0/src/haystack_integrations/components/embedders/py.typed +0 -0
  11. {amazon_bedrock_haystack-3.6.2 → amazon_bedrock_haystack-3.8.0}/src/haystack_integrations/components/generators/amazon_bedrock/adapters.py +11 -10
  12. {amazon_bedrock_haystack-3.6.2 → amazon_bedrock_haystack-3.8.0}/src/haystack_integrations/components/generators/amazon_bedrock/chat/chat_generator.py +30 -18
  13. {amazon_bedrock_haystack-3.6.2 → amazon_bedrock_haystack-3.8.0}/src/haystack_integrations/components/generators/amazon_bedrock/chat/utils.py +14 -4
  14. {amazon_bedrock_haystack-3.6.2 → amazon_bedrock_haystack-3.8.0}/src/haystack_integrations/components/generators/amazon_bedrock/generator.py +2 -6
  15. amazon_bedrock_haystack-3.8.0/src/haystack_integrations/components/generators/py.typed +0 -0
  16. {amazon_bedrock_haystack-3.6.2 → amazon_bedrock_haystack-3.8.0}/src/haystack_integrations/components/rankers/amazon_bedrock/ranker.py +8 -8
  17. amazon_bedrock_haystack-3.8.0/src/haystack_integrations/components/rankers/py.typed +0 -0
  18. {amazon_bedrock_haystack-3.6.2 → amazon_bedrock_haystack-3.8.0}/tests/test_chat_generator.py +3 -15
  19. {amazon_bedrock_haystack-3.6.2 → amazon_bedrock_haystack-3.8.0}/tests/test_chat_generator_utils.py +20 -3
  20. {amazon_bedrock_haystack-3.6.2 → amazon_bedrock_haystack-3.8.0}/tests/test_ranker.py +1 -8
  21. {amazon_bedrock_haystack-3.6.2 → amazon_bedrock_haystack-3.8.0}/.gitignore +0 -0
  22. {amazon_bedrock_haystack-3.6.2 → amazon_bedrock_haystack-3.8.0}/LICENSE.txt +0 -0
  23. {amazon_bedrock_haystack-3.6.2 → amazon_bedrock_haystack-3.8.0}/examples/chatgenerator_example.py +0 -0
  24. {amazon_bedrock_haystack-3.6.2 → amazon_bedrock_haystack-3.8.0}/examples/embedders_generator_with_rag_example.py +0 -0
  25. {amazon_bedrock_haystack-3.6.2 → amazon_bedrock_haystack-3.8.0}/pydoc/config.yml +0 -0
  26. {amazon_bedrock_haystack-3.6.2 → amazon_bedrock_haystack-3.8.0}/src/haystack_integrations/common/amazon_bedrock/__init__.py +0 -0
  27. {amazon_bedrock_haystack-3.6.2 → amazon_bedrock_haystack-3.8.0}/src/haystack_integrations/common/amazon_bedrock/errors.py +0 -0
  28. {amazon_bedrock_haystack-3.6.2 → amazon_bedrock_haystack-3.8.0}/src/haystack_integrations/components/embedders/amazon_bedrock/__init__.py +0 -0
  29. {amazon_bedrock_haystack-3.6.2 → amazon_bedrock_haystack-3.8.0}/src/haystack_integrations/components/generators/amazon_bedrock/__init__.py +0 -0
  30. {amazon_bedrock_haystack-3.6.2 → amazon_bedrock_haystack-3.8.0}/src/haystack_integrations/components/generators/amazon_bedrock/chat/__init__.py +0 -0
  31. {amazon_bedrock_haystack-3.6.2 → amazon_bedrock_haystack-3.8.0}/src/haystack_integrations/components/rankers/amazon_bedrock/__init__.py +0 -0
  32. {amazon_bedrock_haystack-3.6.2 → amazon_bedrock_haystack-3.8.0}/tests/__init__.py +0 -0
  33. {amazon_bedrock_haystack-3.6.2 → amazon_bedrock_haystack-3.8.0}/tests/conftest.py +0 -0
  34. {amazon_bedrock_haystack-3.6.2 → amazon_bedrock_haystack-3.8.0}/tests/test_document_embedder.py +0 -0
  35. {amazon_bedrock_haystack-3.6.2 → amazon_bedrock_haystack-3.8.0}/tests/test_generator.py +0 -0
  36. {amazon_bedrock_haystack-3.6.2 → amazon_bedrock_haystack-3.8.0}/tests/test_text_embedder.py +0 -0
@@ -1,5 +1,30 @@
1
1
  # Changelog
2
2
 
3
+ ## [integrations/amazon_bedrock-v3.7.0] - 2025-06-11
4
+
5
+ ### 🐛 Bug Fixes
6
+
7
+ - Fix Bedrock types + add py.typed (#1912)
8
+ - Bedrock - do not assume connection issues in case of ClientError (#1921)
9
+
10
+ ### ⚙️ CI
11
+
12
+ - Bedrock - improve worfklow; skip tests from CI (#1773)
13
+
14
+ ### 🧹 Chores
15
+
16
+ - Update bedrock_ranker_example.py (#1740)
17
+ - Align core-integrations Hatch scripts (#1898)
18
+ - Update md files for new hatch scripts (#1911)
19
+
20
+
21
+ ## [integrations/amazon_bedrock-v3.6.2] - 2025-05-13
22
+
23
+ ### 🧹 Chores
24
+
25
+ - Extend error message for unknown model family in AmazonBedrockGenerator (#1733)
26
+
27
+
3
28
  ## [integrations/amazon_bedrock-v3.6.1] - 2025-05-13
4
29
 
5
30
  ### 🚜 Refactor
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.4
2
2
  Name: amazon-bedrock-haystack
3
- Version: 3.6.2
3
+ Version: 3.8.0
4
4
  Summary: An integration of Amazon Bedrock as an AmazonBedrockGenerator component.
5
5
  Project-URL: Documentation, https://github.com/deepset-ai/haystack-core-integrations/tree/main/integrations/amazon_bedrock#readme
6
6
  Project-URL: Issues, https://github.com/deepset-ai/haystack-core-integrations/issues
@@ -21,7 +21,7 @@ Classifier: Programming Language :: Python :: Implementation :: PyPy
21
21
  Requires-Python: >=3.9
22
22
  Requires-Dist: aioboto3>=14.0.0
23
23
  Requires-Dist: boto3>=1.28.57
24
- Requires-Dist: haystack-ai>=2.13.1
24
+ Requires-Dist: haystack-ai>=2.15.1
25
25
  Description-Content-Type: text/markdown
26
26
 
27
27
  # amazon-bedrock-haystack
@@ -52,13 +52,18 @@ pip install hatch
52
52
 
53
53
  With `hatch` installed, to run all the tests:
54
54
  ```
55
- hatch run test
55
+ hatch run test:all
56
56
  ```
57
- > Note: there are no integration tests for this project.
58
57
 
59
- To run the linters `ruff` and `mypy`:
58
+ To format your code and perform linting using Ruff (with automatic fixes), run:
60
59
  ```
61
- hatch run lint:all
60
+ hatch run fmt
61
+ ```
62
+
63
+ To check for static type errors, run:
64
+
65
+ ```console
66
+ $ hatch run test:types
62
67
  ```
63
68
 
64
69
  ## License
@@ -26,13 +26,18 @@ pip install hatch
26
26
 
27
27
  With `hatch` installed, to run all the tests:
28
28
  ```
29
- hatch run test
29
+ hatch run test:all
30
30
  ```
31
- > Note: there are no integration tests for this project.
32
31
 
33
- To run the linters `ruff` and `mypy`:
32
+ To format your code and perform linting using Ruff (with automatic fixes), run:
34
33
  ```
35
- hatch run lint:all
34
+ hatch run fmt
35
+ ```
36
+
37
+ To check for static type errors, run:
38
+
39
+ ```console
40
+ $ hatch run test:types
36
41
  ```
37
42
 
38
43
  ## License
@@ -3,14 +3,14 @@ import os
3
3
  from haystack import Document
4
4
  from haystack.utils import Secret
5
5
 
6
- from haystack_integrations.components.rankers.amazon_bedrock import BedrockRanker
6
+ from haystack_integrations.components.rankers.amazon_bedrock import AmazonBedrockRanker
7
7
 
8
8
  # Set up AWS credentials
9
9
  # You can also set these as environment variables
10
10
  aws_profile_name = os.environ.get("AWS_PROFILE") or "default"
11
11
  aws_region_name = os.environ.get("AWS_DEFAULT_REGION") or "eu-central-1"
12
- # Initialize the BedrockRanker with AWS credentials
13
- ranker = BedrockRanker(
12
+ # Initialize the AmazonBedrockRanker with AWS credentials
13
+ ranker = AmazonBedrockRanker(
14
14
  model="cohere.rerank-v3-5:0",
15
15
  top_k=2,
16
16
  aws_profile_name=Secret.from_token(aws_profile_name),
@@ -23,7 +23,7 @@ classifiers = [
23
23
  "Programming Language :: Python :: Implementation :: CPython",
24
24
  "Programming Language :: Python :: Implementation :: PyPy",
25
25
  ]
26
- dependencies = ["haystack-ai>=2.13.1", "boto3>=1.28.57", "aioboto3>=14.0.0"]
26
+ dependencies = ["haystack-ai>=2.15.1", "boto3>=1.28.57", "aioboto3>=14.0.0"]
27
27
 
28
28
  [project.urls]
29
29
  Documentation = "https://github.com/deepset-ai/haystack-core-integrations/tree/main/integrations/amazon_bedrock#readme"
@@ -43,38 +43,48 @@ git_describe_command = 'git describe --tags --match="integrations/amazon_bedrock
43
43
 
44
44
  [tool.hatch.envs.default]
45
45
  installer = "uv"
46
- dependencies = [
47
- "coverage[toml]>=6.5",
48
- "haystack-pydoc-tools",
49
- "pytest",
50
- "pytest-asyncio",
51
- "pytest-rerunfailures",
52
- ]
46
+ dependencies = ["haystack-pydoc-tools", "ruff"]
53
47
 
54
48
  [tool.hatch.envs.default.scripts]
55
- test = "pytest {args:tests}"
56
- test-cov = "coverage run -m pytest {args:tests}"
57
- test-cov-retry = "test-cov --reruns 3 --reruns-delay 30 -x"
58
- cov-report = ["- coverage combine", "coverage report"]
59
- cov = ["test-cov", "cov-report"]
60
- cov-retry = ["test-cov-retry", "cov-report"]
61
49
  docs = ["pydoc-markdown pydoc/config.yml"]
50
+ fmt = "ruff check --fix {args} && ruff format {args}"
51
+ fmt-check = "ruff check {args} && ruff format --check {args}"
62
52
 
63
- [tool.hatch.envs.lint]
64
- installer = "uv"
65
- detached = true
66
- dependencies = ["pip", "black>=23.1.0", "mypy>=1.0.0", "ruff>=0.0.243"]
53
+ [tool.hatch.envs.test]
54
+ dependencies = [
55
+ "pytest",
56
+ "pytest-asyncio",
57
+ "pytest-cov",
58
+ "pytest-rerunfailures",
59
+ "mypy",
60
+ "pip",
61
+ ]
67
62
 
68
- [tool.hatch.envs.lint.scripts]
69
- typing = "mypy --install-types --non-interactive --explicit-package-bases {args:src/ tests}"
70
- style = ["ruff check {args:.}", "black --check --diff {args:.}"]
71
- fmt = ["black {args:.}", "ruff check --fix {args:.}", "style"]
72
- all = ["style", "typing"]
63
+ [tool.hatch.envs.test.scripts]
64
+ unit = 'pytest -m "not integration" {args:tests}'
65
+ integration = 'pytest -m "integration" {args:tests}'
66
+ all = 'pytest {args:tests}'
67
+ cov-retry = 'all --cov=haystack_integrations --reruns 3 --reruns-delay 30 -x'
73
68
 
74
- [tool.black]
75
- target-version = ["py38"]
76
- line-length = 120
77
- skip-string-normalization = true
69
+ types = """mypy -p haystack_integrations.common.amazon_bedrock \
70
+ -p haystack_integrations.components.embedders.amazon_bedrock \
71
+ -p haystack_integrations.components.generators.amazon_bedrock \
72
+ -p haystack_integrations.components.rankers.amazon_bedrock {args}"""
73
+
74
+ [tool.mypy]
75
+ install_types = true
76
+ non_interactive = true
77
+ check_untyped_defs = true
78
+ disallow_incomplete_defs = true
79
+
80
+ [[tool.mypy.overrides]]
81
+ # unfortunately, unofficial type stubs do not play well with some features of Bedrock
82
+ module = [
83
+ "aioboto3.*",
84
+ "botocore.*",
85
+ "boto3.*",
86
+ ]
87
+ ignore_missing_imports = true
78
88
 
79
89
  [tool.ruff]
80
90
  target-version = "py38"
@@ -150,24 +160,9 @@ omit = ["*/tests/*", "*/__init__.py"]
150
160
  show_missing = true
151
161
  exclude_lines = ["no cov", "if __name__ == .__main__.:", "if TYPE_CHECKING:"]
152
162
 
153
- [[tool.mypy.overrides]]
154
- module = [
155
- "aioboto3.*",
156
- "botocore.*",
157
- "boto3.*",
158
- "haystack.*",
159
- "haystack_integrations.*",
160
- "pytest.*",
161
- "numpy.*",
162
- ]
163
- ignore_missing_imports = true
164
-
165
163
  [tool.pytest.ini_options]
166
164
  addopts = "--strict-markers"
167
165
  markers = [
168
- "unit: unit tests",
169
166
  "integration: integration tests",
170
- "embedders: embedders tests",
171
- "generators: generators tests",
172
167
  ]
173
168
  log_cli = true
@@ -1,4 +1,4 @@
1
- from typing import Optional, Union
1
+ from typing import Any, Optional, Union
2
2
 
3
3
  import aioboto3
4
4
  import boto3
@@ -22,7 +22,7 @@ def get_aws_session(
22
22
  aws_region_name: Optional[str] = None,
23
23
  aws_profile_name: Optional[str] = None,
24
24
  async_mode: bool = False,
25
- **kwargs,
25
+ **kwargs: Any,
26
26
  ) -> Union[boto3.Session, aioboto3.Session]:
27
27
  """
28
28
  Creates an AWS Session with the given parameters.
@@ -62,7 +62,7 @@ def get_aws_session(
62
62
  raise AWSConfigurationError(msg) from e
63
63
 
64
64
 
65
- def aws_configured(**kwargs) -> bool:
65
+ def aws_configured(**kwargs: Any) -> bool:
66
66
  """
67
67
  Checks whether AWS configuration is provided.
68
68
  :param kwargs: The kwargs passed down to the generator.
@@ -74,7 +74,7 @@ class AmazonBedrockDocumentEmbedder:
74
74
  meta_fields_to_embed: Optional[List[str]] = None,
75
75
  embedding_separator: str = "\n",
76
76
  boto3_config: Optional[Dict[str, Any]] = None,
77
- **kwargs,
77
+ **kwargs: Any,
78
78
  ) -> None:
79
79
  """
80
80
  Initializes the AmazonBedrockDocumentEmbedder with the provided parameters. The parameters are passed to the
@@ -186,11 +186,7 @@ class AmazonBedrockDocumentEmbedder:
186
186
  body=json.dumps(body), modelId=self.model, accept="*/*", contentType="application/json"
187
187
  )
188
188
  except ClientError as exception:
189
- msg = (
190
- f"Could not connect to Amazon Bedrock model {self.model}. "
191
- f"Make sure your AWS environment is configured correctly, "
192
- f"the model is available in the configured AWS region, and you have access."
193
- )
189
+ msg = f"Could not perform inference for Amazon Bedrock model {self.model} due to:\n{exception}"
194
190
  raise AmazonBedrockInferenceError(msg) from exception
195
191
 
196
192
  response_body = json.loads(response.get("body").read())
@@ -217,11 +213,7 @@ class AmazonBedrockDocumentEmbedder:
217
213
  body=json.dumps(body), modelId=self.model, accept="*/*", contentType="application/json"
218
214
  )
219
215
  except ClientError as exception:
220
- msg = (
221
- f"Could not connect to Amazon Bedrock model {self.model}. "
222
- f"Make sure your AWS environment is configured correctly, "
223
- f"the model is available in the configured AWS region, and you have access."
224
- )
216
+ msg = f"Could not perform inference for Amazon Bedrock model {self.model} due to:\n{exception}"
225
217
  raise AmazonBedrockInferenceError(msg) from exception
226
218
 
227
219
  response_body = json.loads(response.get("body").read())
@@ -63,7 +63,7 @@ class AmazonBedrockTextEmbedder:
63
63
  aws_region_name: Optional[Secret] = Secret.from_env_var("AWS_DEFAULT_REGION", strict=False), # noqa: B008
64
64
  aws_profile_name: Optional[Secret] = Secret.from_env_var("AWS_PROFILE", strict=False), # noqa: B008
65
65
  boto3_config: Optional[Dict[str, Any]] = None,
66
- **kwargs,
66
+ **kwargs: Any,
67
67
  ) -> None:
68
68
  """
69
69
  Initializes the AmazonBedrockTextEmbedder with the provided parameters. The parameters are passed to the
@@ -160,11 +160,7 @@ class AmazonBedrockTextEmbedder:
160
160
  body=json.dumps(body), modelId=self.model, accept="*/*", contentType="application/json"
161
161
  )
162
162
  except ClientError as exception:
163
- msg = (
164
- f"Could not connect to Amazon Bedrock model {self.model}. "
165
- f"Make sure your AWS environment is configured correctly, "
166
- f"the model is available in the configured AWS region, and you have access."
167
- )
163
+ msg = f"Could not perform inference for Amazon Bedrock model {self.model} due to:\n{exception}"
168
164
  raise AmazonBedrockInferenceError(msg) from exception
169
165
 
170
166
  response_body = json.loads(response.get("body").read())
@@ -1,8 +1,9 @@
1
1
  import json
2
2
  from abc import ABC, abstractmethod
3
- from typing import Any, Callable, Dict, List, Optional
3
+ from typing import Any, Dict, List, Optional
4
4
 
5
- from haystack.dataclasses import StreamingChunk
5
+ from botocore.eventstream import EventStream
6
+ from haystack.dataclasses import StreamingChunk, SyncStreamingCallbackT
6
7
 
7
8
 
8
9
  class BedrockModelAdapter(ABC):
@@ -23,7 +24,7 @@ class BedrockModelAdapter(ABC):
23
24
  self.max_length = max_length
24
25
 
25
26
  @abstractmethod
26
- def prepare_body(self, prompt: str, **inference_kwargs) -> Dict[str, Any]:
27
+ def prepare_body(self, prompt: str, **inference_kwargs: Any) -> Dict[str, Any]:
27
28
  """
28
29
  Prepares the body for the Amazon Bedrock request.
29
30
  Each subclass should implement this method to prepare the request body for the specific model.
@@ -44,7 +45,7 @@ class BedrockModelAdapter(ABC):
44
45
  responses = [completion.lstrip() for completion in completions]
45
46
  return responses
46
47
 
47
- def get_stream_responses(self, stream, streaming_callback: Callable[[StreamingChunk], None]) -> List[str]:
48
+ def get_stream_responses(self, stream: EventStream, streaming_callback: SyncStreamingCallbackT) -> List[str]:
48
49
  """
49
50
  Extracts the responses from the Amazon Bedrock streaming response.
50
51
 
@@ -122,7 +123,7 @@ class AnthropicClaudeAdapter(BedrockModelAdapter):
122
123
  self.thinking_tag_end = f"</{self.thinking_tag}>\n\n" if self.thinking_tag else "\n\n"
123
124
  super().__init__(model_kwargs, max_length)
124
125
 
125
- def prepare_body(self, prompt: str, **inference_kwargs) -> Dict[str, Any]:
126
+ def prepare_body(self, prompt: str, **inference_kwargs: Any) -> Dict[str, Any]:
126
127
  """
127
128
  Prepares the body for the Claude model
128
129
 
@@ -210,7 +211,7 @@ class MistralAdapter(BedrockModelAdapter):
210
211
  Adapter for the Mistral models.
211
212
  """
212
213
 
213
- def prepare_body(self, prompt: str, **inference_kwargs) -> Dict[str, Any]:
214
+ def prepare_body(self, prompt: str, **inference_kwargs: Any) -> Dict[str, Any]:
214
215
  """
215
216
  Prepares the body for the Mistral model
216
217
 
@@ -260,7 +261,7 @@ class CohereCommandAdapter(BedrockModelAdapter):
260
261
  Adapter for the Cohere Command model.
261
262
  """
262
263
 
263
- def prepare_body(self, prompt: str, **inference_kwargs) -> Dict[str, Any]:
264
+ def prepare_body(self, prompt: str, **inference_kwargs: Any) -> Dict[str, Any]:
264
265
  """
265
266
  Prepares the body for the Command model
266
267
 
@@ -372,7 +373,7 @@ class AI21LabsJurassic2Adapter(BedrockModelAdapter):
372
373
  Model adapter for AI21 Labs' Jurassic 2 models.
373
374
  """
374
375
 
375
- def prepare_body(self, prompt: str, **inference_kwargs) -> Dict[str, Any]:
376
+ def prepare_body(self, prompt: str, **inference_kwargs: Any) -> Dict[str, Any]:
376
377
  """Prepares the body for the Jurassic 2 model.
377
378
 
378
379
  :param prompt: The prompt to be sent to the model.
@@ -410,7 +411,7 @@ class AmazonTitanAdapter(BedrockModelAdapter):
410
411
  Adapter for Amazon's Titan models.
411
412
  """
412
413
 
413
- def prepare_body(self, prompt: str, **inference_kwargs) -> Dict[str, Any]:
414
+ def prepare_body(self, prompt: str, **inference_kwargs: Any) -> Dict[str, Any]:
414
415
  """
415
416
  Prepares the body for the Titan model
416
417
 
@@ -456,7 +457,7 @@ class MetaLlamaAdapter(BedrockModelAdapter):
456
457
  Adapter for Meta's Llama2 models.
457
458
  """
458
459
 
459
- def prepare_body(self, prompt: str, **inference_kwargs) -> Dict[str, Any]:
460
+ def prepare_body(self, prompt: str, **inference_kwargs: Any) -> Dict[str, Any]:
460
461
  """
461
462
  Prepares the body for the Llama2 model
462
463
 
@@ -5,7 +5,7 @@ from botocore.config import Config
5
5
  from botocore.eventstream import EventStream
6
6
  from botocore.exceptions import ClientError
7
7
  from haystack import component, default_from_dict, default_to_dict, logging
8
- from haystack.dataclasses import ChatMessage, StreamingCallbackT, select_streaming_callback
8
+ from haystack.dataclasses import ChatMessage, ComponentInfo, StreamingCallbackT, select_streaming_callback
9
9
  from haystack.tools import (
10
10
  Tool,
11
11
  Toolset,
@@ -197,12 +197,6 @@ class AmazonBedrockChatGenerator:
197
197
  def resolve_secret(secret: Optional[Secret]) -> Optional[str]:
198
198
  return secret.resolve_value() if secret else None
199
199
 
200
- aws_access_key_id = resolve_secret(aws_access_key_id)
201
- aws_secret_access_key = resolve_secret(aws_secret_access_key)
202
- aws_session_token = resolve_secret(aws_session_token)
203
- aws_region_name = resolve_secret(aws_region_name)
204
- aws_profile_name = resolve_secret(aws_profile_name)
205
-
206
200
  config: Optional[Config] = None
207
201
  if self.boto3_config:
208
202
  config = Config(**self.boto3_config)
@@ -210,11 +204,11 @@ class AmazonBedrockChatGenerator:
210
204
  try:
211
205
  # sync session
212
206
  session = get_aws_session(
213
- aws_access_key_id=aws_access_key_id,
214
- aws_secret_access_key=aws_secret_access_key,
215
- aws_session_token=aws_session_token,
216
- aws_region_name=aws_region_name,
217
- aws_profile_name=aws_profile_name,
207
+ aws_access_key_id=resolve_secret(aws_access_key_id),
208
+ aws_secret_access_key=resolve_secret(aws_secret_access_key),
209
+ aws_session_token=resolve_secret(aws_session_token),
210
+ aws_region_name=resolve_secret(aws_region_name),
211
+ aws_profile_name=resolve_secret(aws_profile_name),
218
212
  )
219
213
  self.client = session.client("bedrock-runtime", config=config)
220
214
 
@@ -227,7 +221,7 @@ class AmazonBedrockChatGenerator:
227
221
 
228
222
  self.generation_kwargs = generation_kwargs or {}
229
223
  self.stop_words = stop_words or []
230
- self.async_session = None
224
+ self.async_session: Optional[aioboto3.Session] = None
231
225
 
232
226
  def _get_async_session(self) -> aioboto3.Session:
233
227
  """
@@ -377,7 +371,9 @@ class AmazonBedrockChatGenerator:
377
371
  if additional_fields:
378
372
  params["additionalModelRequestFields"] = additional_fields
379
373
 
380
- callback = select_streaming_callback(
374
+ # overloads that exhaust finite Literals(bool) not treated as exhaustive
375
+ # see https://github.com/python/mypy/issues/14764
376
+ callback = select_streaming_callback( # type: ignore[call-overload]
381
377
  init_callback=self.streaming_callback,
382
378
  runtime_callback=streaming_callback,
383
379
  requires_async=requires_async,
@@ -412,6 +408,8 @@ class AmazonBedrockChatGenerator:
412
408
  :raises AmazonBedrockInferenceError:
413
409
  If the Bedrock inference API call fails.
414
410
  """
411
+ component_info = ComponentInfo.from_component(self)
412
+
415
413
  params, callback = self._prepare_request_params(
416
414
  messages=messages,
417
415
  streaming_callback=streaming_callback,
@@ -427,12 +425,18 @@ class AmazonBedrockChatGenerator:
427
425
  if not response_stream:
428
426
  msg = "No stream found in the response."
429
427
  raise AmazonBedrockInferenceError(msg)
430
- replies = _parse_streaming_response(response_stream, callback, self.model)
428
+ # the type of streaming callback is checked in _prepare_request_params, but mypy doesn't know
429
+ replies = _parse_streaming_response(
430
+ response_stream=response_stream,
431
+ streaming_callback=callback, # type: ignore[arg-type]
432
+ model=self.model,
433
+ component_info=component_info,
434
+ )
431
435
  else:
432
436
  response = self.client.converse(**params)
433
437
  replies = _parse_completion_response(response, self.model)
434
438
  except ClientError as exception:
435
- msg = f"Could not generate inference for Amazon Bedrock model {self.model} due: {exception}"
439
+ msg = f"Could not perform inference for Amazon Bedrock model {self.model} due to:\n{exception}"
436
440
  raise AmazonBedrockInferenceError(msg) from exception
437
441
 
438
442
  return {"replies": replies}
@@ -464,6 +468,8 @@ class AmazonBedrockChatGenerator:
464
468
  :raises AmazonBedrockInferenceError:
465
469
  If the Bedrock inference API call fails.
466
470
  """
471
+ component_info = ComponentInfo.from_component(self)
472
+
467
473
  params, callback = self._prepare_request_params(
468
474
  messages=messages,
469
475
  streaming_callback=streaming_callback,
@@ -483,13 +489,19 @@ class AmazonBedrockChatGenerator:
483
489
  if not response_stream:
484
490
  msg = "No stream found in the response."
485
491
  raise AmazonBedrockInferenceError(msg)
486
- replies = await _parse_streaming_response_async(response_stream, callback, self.model)
492
+ # the type of streaming callback is checked in _prepare_request_params, but mypy doesn't know
493
+ replies = await _parse_streaming_response_async(
494
+ response_stream=response_stream,
495
+ streaming_callback=callback, # type: ignore[arg-type]
496
+ model=self.model,
497
+ component_info=component_info,
498
+ )
487
499
  else:
488
500
  response = await async_client.converse(**params)
489
501
  replies = _parse_completion_response(response, self.model)
490
502
 
491
503
  except ClientError as exception:
492
- msg = f"Could not generate inference for Amazon Bedrock model {self.model} due: {exception}"
504
+ msg = f"Could not perform inference for Amazon Bedrock model {self.model} due to:\n{exception}"
493
505
  raise AmazonBedrockInferenceError(msg) from exception
494
506
 
495
507
  return {"replies": replies}
@@ -8,6 +8,7 @@ from haystack.dataclasses import (
8
8
  AsyncStreamingCallbackT,
9
9
  ChatMessage,
10
10
  ChatRole,
11
+ ComponentInfo,
11
12
  StreamingChunk,
12
13
  SyncStreamingCallbackT,
13
14
  ToolCall,
@@ -46,7 +47,7 @@ def _format_tool_call_message(tool_call_message: ChatMessage) -> Dict[str, Any]:
46
47
  :returns:
47
48
  Dictionary representing the tool call message in Bedrock's expected format
48
49
  """
49
- content = []
50
+ content: List[Dict[str, Any]] = []
50
51
  # Tool call message can contain text
51
52
  if tool_call_message.text:
52
53
  content.append({"text": tool_call_message.text})
@@ -235,7 +236,9 @@ def _parse_completion_response(response_body: Dict[str, Any], model: str) -> Lis
235
236
 
236
237
 
237
238
  # Bedrock streaming to Haystack util methods
238
- def _convert_event_to_streaming_chunk(event: Dict[str, Any], model: str) -> StreamingChunk:
239
+ def _convert_event_to_streaming_chunk(
240
+ event: Dict[str, Any], model: str, component_info: ComponentInfo
241
+ ) -> StreamingChunk:
239
242
  """
240
243
  Convert a Bedrock streaming event to a Haystack StreamingChunk.
241
244
 
@@ -244,6 +247,7 @@ def _convert_event_to_streaming_chunk(event: Dict[str, Any], model: str) -> Stre
244
247
 
245
248
  :param event: Dictionary containing a Bedrock streaming event.
246
249
  :param model: The model ID used for generation, included in chunk metadata.
250
+ :param component_info: ComponentInfo object
247
251
  :returns: StreamingChunk object containing the content and metadata extracted from the event.
248
252
  """
249
253
  # Initialize an empty StreamingChunk to return if no relevant event is found
@@ -358,6 +362,8 @@ def _convert_event_to_streaming_chunk(event: Dict[str, Any], model: str) -> Stre
358
362
  },
359
363
  )
360
364
 
365
+ streaming_chunk.component_info = component_info
366
+
361
367
  return streaming_chunk
362
368
 
363
369
 
@@ -438,6 +444,7 @@ def _parse_streaming_response(
438
444
  response_stream: EventStream,
439
445
  streaming_callback: SyncStreamingCallbackT,
440
446
  model: str,
447
+ component_info: ComponentInfo,
441
448
  ) -> List[ChatMessage]:
442
449
  """
443
450
  Parse a streaming response from Bedrock.
@@ -445,11 +452,12 @@ def _parse_streaming_response(
445
452
  :param response_stream: EventStream from Bedrock API
446
453
  :param streaming_callback: Callback for streaming chunks
447
454
  :param model: The model ID used for generation
455
+ :param component_info: ComponentInfo object
448
456
  :return: List of ChatMessage objects
449
457
  """
450
458
  chunks: List[StreamingChunk] = []
451
459
  for event in response_stream:
452
- streaming_chunk = _convert_event_to_streaming_chunk(event=event, model=model)
460
+ streaming_chunk = _convert_event_to_streaming_chunk(event=event, model=model, component_info=component_info)
453
461
  streaming_callback(streaming_chunk)
454
462
  chunks.append(streaming_chunk)
455
463
  replies = [_convert_streaming_chunks_to_chat_message(chunks=chunks)]
@@ -460,6 +468,7 @@ async def _parse_streaming_response_async(
460
468
  response_stream: EventStream,
461
469
  streaming_callback: AsyncStreamingCallbackT,
462
470
  model: str,
471
+ component_info: ComponentInfo,
463
472
  ) -> List[ChatMessage]:
464
473
  """
465
474
  Parse a streaming response from Bedrock.
@@ -467,11 +476,12 @@ async def _parse_streaming_response_async(
467
476
  :param response_stream: EventStream from Bedrock API
468
477
  :param streaming_callback: Callback for streaming chunks
469
478
  :param model: The model ID used for generation
479
+ :param component_info: ComponentInfo object
470
480
  :return: List of ChatMessage objects
471
481
  """
472
482
  chunks: List[StreamingChunk] = []
473
483
  async for event in response_stream:
474
- streaming_chunk = _convert_event_to_streaming_chunk(event=event, model=model)
484
+ streaming_chunk = _convert_event_to_streaming_chunk(event=event, model=model, component_info=component_info)
475
485
  await streaming_callback(streaming_chunk)
476
486
  chunks.append(streaming_chunk)
477
487
  replies = [_convert_streaming_chunks_to_chat_message(chunks=chunks)]
@@ -107,7 +107,7 @@ class AmazonBedrockGenerator:
107
107
  streaming_callback: Optional[Callable[[StreamingChunk], None]] = None,
108
108
  boto3_config: Optional[Dict[str, Any]] = None,
109
109
  model_family: Optional[MODEL_FAMILIES] = None,
110
- **kwargs,
110
+ **kwargs: Any,
111
111
  ) -> None:
112
112
  """
113
113
  Create a new `AmazonBedrockGenerator` instance.
@@ -234,11 +234,7 @@ class AmazonBedrockGenerator:
234
234
  metadata = response.get("ResponseMetadata", {})
235
235
 
236
236
  except ClientError as exception:
237
- msg = (
238
- f"Could not connect to Amazon Bedrock model {self.model}. "
239
- f"Make sure your AWS environment is configured correctly, "
240
- f"the model is available in the configured AWS region, and you have access."
241
- )
237
+ msg = f"Could not perform inference for Amazon Bedrock model {self.model} due to:\n{exception}"
242
238
  raise AmazonBedrockInferenceError(msg) from exception
243
239
 
244
240
  return {"replies": replies, "meta": metadata}
@@ -256,15 +256,15 @@ class AmazonBedrockRanker:
256
256
  sorted_docs.append(doc)
257
257
 
258
258
  return {"documents": sorted_docs}
259
- except ClientError as exception:
260
- msg = f"Could not inference Amazon Bedrock model {self.model_name} due to: {exception}"
259
+ except ClientError as client_error:
260
+ msg = f"Could not perform inference for Amazon Bedrock model {self.model_name} due to:\n{client_error}"
261
+ raise AmazonBedrockInferenceError(msg) from client_error
262
+ except KeyError as key_error:
263
+ msg = f"Unexpected response format from Amazon Bedrock: {key_error}"
264
+ raise AmazonBedrockInferenceError(msg) from key_error
265
+ except Exception as exception:
266
+ msg = f"Error during Amazon Bedrock API call: {exception}"
261
267
  raise AmazonBedrockInferenceError(msg) from exception
262
- except KeyError as e:
263
- msg = f"Unexpected response format from Amazon Bedrock: {e!s}"
264
- raise AmazonBedrockInferenceError(msg) from e
265
- except Exception as e:
266
- msg = f"Error during Amazon Bedrock API call: {e!s}"
267
- raise AmazonBedrockInferenceError(msg) from e
268
268
 
269
269
 
270
270
  class BedrockRanker(AmazonBedrockRanker):
@@ -1,4 +1,3 @@
1
- import os
2
1
  from typing import Any, Dict, Optional
3
2
 
4
3
  import pytest
@@ -265,14 +264,8 @@ class TestAmazonBedrockChatGenerator:
265
264
  assert request_params["toolConfig"] == top_song_tool_config
266
265
 
267
266
 
267
+ # In the CI, those tests are skipped if AWS Authentication fails
268
268
  @pytest.mark.integration
269
- @pytest.mark.skipif(
270
- not os.environ.get("AWS_CI_ROLE_ARN", None) and not os.environ.get("AWS_REGION", None),
271
- reason=(
272
- "Skipping test because AWS_CI_ROLE_ARN and AWS_REGION environment variables are not set. "
273
- "This test requires AWS credentials to run."
274
- ),
275
- )
276
269
  class TestAmazonBedrockChatGeneratorInference:
277
270
  @pytest.mark.parametrize("model_name", MODELS_TO_TEST)
278
271
  def test_default_inference_params(self, model_name, chat_messages):
@@ -305,6 +298,7 @@ class TestAmazonBedrockChatGeneratorInference:
305
298
  streaming_callback_called = True
306
299
  assert isinstance(chunk, StreamingChunk)
307
300
  assert chunk.content is not None
301
+ assert chunk.component_info is not None
308
302
  if not paris_found_in_response:
309
303
  paris_found_in_response = "paris" in chunk.content.lower()
310
304
 
@@ -437,14 +431,8 @@ class TestAmazonBedrockChatGeneratorInference:
437
431
  )
438
432
 
439
433
 
434
+ # In the CI, those tests are skipped if AWS Authentication fails
440
435
  @pytest.mark.integration
441
- @pytest.mark.skipif(
442
- not os.environ.get("AWS_CI_ROLE_ARN", None) and not os.environ.get("AWS_REGION", None),
443
- reason=(
444
- "Skipping test because AWS_CI_ROLE_ARN and AWS_REGION environment variables are not set. "
445
- "This test requires AWS credentials to run."
446
- ),
447
- )
448
436
  class TestAmazonBedrockChatGeneratorAsyncInference:
449
437
  """
450
438
  Test class for async inference functionality of AmazonBedrockChatGenerator
@@ -1,5 +1,5 @@
1
1
  import pytest
2
- from haystack.dataclasses import ChatMessage, ChatRole, StreamingChunk, ToolCall
2
+ from haystack.dataclasses import ChatMessage, ChatRole, ComponentInfo, StreamingChunk, ToolCall
3
3
  from haystack.tools import Tool
4
4
 
5
5
  from haystack_integrations.components.generators.amazon_bedrock.chat.utils import (
@@ -339,6 +339,9 @@ class TestAmazonBedrockChatGeneratorUtils:
339
339
  Test that process_streaming_response correctly handles streaming events and accumulates responses
340
340
  """
341
341
  model = "anthropic.claude-3-5-sonnet-20240620-v1:0"
342
+ type_ = (
343
+ "haystack_integrations.components.generators.amazon_bedrock.chat.chat_generator.AmazonBedrockChatGenerator"
344
+ )
342
345
  streaming_chunks = []
343
346
 
344
347
  def test_callback(chunk: StreamingChunk):
@@ -379,7 +382,11 @@ class TestAmazonBedrockChatGeneratorUtils:
379
382
  },
380
383
  ]
381
384
 
382
- replies = _parse_streaming_response(events, test_callback, model)
385
+ component_info = ComponentInfo(
386
+ type=type_,
387
+ )
388
+
389
+ replies = _parse_streaming_response(events, test_callback, model, component_info)
383
390
  # Pop completion_start_time since it will always change
384
391
  replies[0].meta.pop("completion_start_time")
385
392
  expected_messages = [
@@ -413,6 +420,9 @@ class TestAmazonBedrockChatGeneratorUtils:
413
420
  "type": "function",
414
421
  }
415
422
  ]
423
+ for chunk in streaming_chunks:
424
+ assert chunk.component_info.type == type_
425
+ assert chunk.component_info.name is None # not in a pipeline
416
426
 
417
427
  # Verify final replies
418
428
  assert len(replies) == 1
@@ -420,6 +430,9 @@ class TestAmazonBedrockChatGeneratorUtils:
420
430
 
421
431
  def test_parse_streaming_response_with_two_tool_calls(self, mock_boto3_session):
422
432
  model = "anthropic.claude-3-5-sonnet-20240620-v1:0"
433
+ type_ = (
434
+ "haystack_integrations.components.generators.amazon_bedrock.chat.chat_generator.AmazonBedrockChatGenerator"
435
+ )
423
436
  streaming_chunks = []
424
437
 
425
438
  def test_callback(chunk: StreamingChunk):
@@ -468,7 +481,11 @@ class TestAmazonBedrockChatGeneratorUtils:
468
481
  },
469
482
  ]
470
483
 
471
- replies = _parse_streaming_response(events, test_callback, model)
484
+ component_info = ComponentInfo(
485
+ type=type_,
486
+ )
487
+
488
+ replies = _parse_streaming_response(events, test_callback, model, component_info)
472
489
  # Pop completion_start_time since it will always change
473
490
  replies[0].meta.pop("completion_start_time")
474
491
  expected_messages = [
@@ -1,4 +1,3 @@
1
- import os
2
1
  from unittest.mock import MagicMock, patch
3
2
 
4
3
  import pytest
@@ -52,14 +51,8 @@ def test_bedrock_ranker_run(mock_aws_session):
52
51
  assert result["documents"][1].score == 0.7
53
52
 
54
53
 
54
+ # In the CI, those tests are skipped if AWS Authentication fails
55
55
  @pytest.mark.integration
56
- @pytest.mark.skipif(
57
- not os.environ.get("AWS_CI_ROLE_ARN", None) and not os.environ.get("AWS_REGION", None),
58
- reason=(
59
- "Skipping test because AWS_CI_ROLE_ARN and AWS_REGION environment variables are not set. "
60
- "This test requires AWS credentials to run."
61
- ),
62
- )
63
56
  def test_amazon_bedrock_ranker_live_run():
64
57
  ranker = AmazonBedrockRanker(
65
58
  model="cohere.rerank-v3-5:0",