amazon-bedrock-haystack 3.6.1__py3-none-any.whl → 3.7.0__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.4
2
2
  Name: amazon-bedrock-haystack
3
- Version: 3.6.1
3
+ Version: 3.7.0
4
4
  Summary: An integration of Amazon Bedrock as an AmazonBedrockGenerator component.
5
5
  Project-URL: Documentation, https://github.com/deepset-ai/haystack-core-integrations/tree/main/integrations/amazon_bedrock#readme
6
6
  Project-URL: Issues, https://github.com/deepset-ai/haystack-core-integrations/issues
@@ -52,13 +52,18 @@ pip install hatch
52
52
 
53
53
  With `hatch` installed, to run all the tests:
54
54
  ```
55
- hatch run test
55
+ hatch run test:all
56
56
  ```
57
- > Note: there are no integration tests for this project.
58
57
 
59
- To run the linters `ruff` and `mypy`:
58
+ To format your code and perform linting using Ruff (with automatic fixes), run:
60
59
  ```
61
- hatch run lint:all
60
+ hatch run fmt
61
+ ```
62
+
63
+ To check for static type errors, run:
64
+
65
+ ```console
66
+ $ hatch run test:types
62
67
  ```
63
68
 
64
69
  ## License
@@ -1,18 +1,22 @@
1
+ haystack_integrations/common/py.typed,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
1
2
  haystack_integrations/common/amazon_bedrock/__init__.py,sha256=6GZ8Y3Lw0rLOsOAqi6Tu5mZC977UzQvgDxKpOWr8IQw,110
2
3
  haystack_integrations/common/amazon_bedrock/errors.py,sha256=ReheDbY7L3EJkWcUoih6lWHjbPHg2TlUs9SnXIKK7Gg,744
3
- haystack_integrations/common/amazon_bedrock/utils.py,sha256=dHUWzHYT0A8_eLDpVkwDhmDpprYbFlWsGg0FOS0uF0I,2720
4
+ haystack_integrations/common/amazon_bedrock/utils.py,sha256=ASAwEhInF9F6rhL4CbXFQUFU1pSdscWvG6jcrXkEUhc,2735
5
+ haystack_integrations/components/embedders/py.typed,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
4
6
  haystack_integrations/components/embedders/amazon_bedrock/__init__.py,sha256=CFqYmAVq2aavlMkZHYScKHOTwwETdRzRZITMqGhJ9Kw,298
5
- haystack_integrations/components/embedders/amazon_bedrock/document_embedder.py,sha256=ZdtM6HpHQwbKfjmfOK6gkIQPPbI0n8_pWRrR6lyXmr8,13321
6
- haystack_integrations/components/embedders/amazon_bedrock/text_embedder.py,sha256=gpvu6IMoycUXrn4r1OH5yEIheiDxHf2T5fdJJO4DfW0,9202
7
+ haystack_integrations/components/embedders/amazon_bedrock/document_embedder.py,sha256=YBVlFIo9t2qzVkNWaFKc-FNRo7R_pKfHmqNRkoMZ9K0,12952
8
+ haystack_integrations/components/embedders/amazon_bedrock/text_embedder.py,sha256=KNvsUP-YZD17_zVBwMs42v0S2uuTE_ajMaj9bjt1XlE,9036
9
+ haystack_integrations/components/generators/py.typed,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
7
10
  haystack_integrations/components/generators/amazon_bedrock/__init__.py,sha256=lv4NouIVm78YavUssWQrHHP_81u-7j21qW8v1kZMJPQ,284
8
- haystack_integrations/components/generators/amazon_bedrock/adapters.py,sha256=cnlfmie4HfEX4nipSXSDk_3koy7HYZ-ezimGN6BozQ0,19543
9
- haystack_integrations/components/generators/amazon_bedrock/generator.py,sha256=NgywyiKYazEbsLAcGcOPUT4blWhYYOJ9WjO-HWDvu7I,14576
11
+ haystack_integrations/components/generators/amazon_bedrock/adapters.py,sha256=yBC-3YwV6qAwSXMtdZiLSYh2lUpPQIDy7Efl7w-Cu-k,19640
12
+ haystack_integrations/components/generators/amazon_bedrock/generator.py,sha256=c_saV5zxFYQVJT0Hzo80lKty46itL0Dp31VuDueYa3M,14716
10
13
  haystack_integrations/components/generators/amazon_bedrock/chat/__init__.py,sha256=6GZ8Y3Lw0rLOsOAqi6Tu5mZC977UzQvgDxKpOWr8IQw,110
11
- haystack_integrations/components/generators/amazon_bedrock/chat/chat_generator.py,sha256=M9I0sB8LFrXCgoyr5ik2ZPPHyB0b4nJFKX7GARKsk8Y,23384
12
- haystack_integrations/components/generators/amazon_bedrock/chat/utils.py,sha256=kBSaU_ZqzL-7a7nplezjb4XRBy51pt-4VULoX5lq21A,21148
14
+ haystack_integrations/components/generators/amazon_bedrock/chat/chat_generator.py,sha256=nhen_XcdypQT2K8312gjYdPYrNK_NtBKcR3gPfYzVwU,23461
15
+ haystack_integrations/components/generators/amazon_bedrock/chat/utils.py,sha256=EGDcJZFzXKjIJnD8sw1M9YHIPPm05drw1KbY_Lx1kUI,21170
16
+ haystack_integrations/components/rankers/py.typed,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
13
17
  haystack_integrations/components/rankers/amazon_bedrock/__init__.py,sha256=Zrc3BSVkEaXYpliEi6hKG9bqW4J7DNk93p50SuoyT1Q,107
14
- haystack_integrations/components/rankers/amazon_bedrock/ranker.py,sha256=x4QEVkbFM-jMFHx-xmk571wtrohnPLtkIWMhCyg4_II,12278
15
- amazon_bedrock_haystack-3.6.1.dist-info/METADATA,sha256=oRIb-2Nv642N0wmapQgykocVVjxzwM7rG1_TNOoF6Vs,2225
16
- amazon_bedrock_haystack-3.6.1.dist-info/WHEEL,sha256=qtCwoSJWgHk21S1Kb4ihdzI2rlJ1ZKaIurTj_ngOhyQ,87
17
- amazon_bedrock_haystack-3.6.1.dist-info/licenses/LICENSE.txt,sha256=B05uMshqTA74s-0ltyHKI6yoPfJ3zYgQbvcXfDVGFf8,10280
18
- amazon_bedrock_haystack-3.6.1.dist-info/RECORD,,
18
+ haystack_integrations/components/rankers/amazon_bedrock/ranker.py,sha256=enAjf2QyDwfpidKkFCdLz954cx-Tjh9emrOS3vINJDg,12344
19
+ amazon_bedrock_haystack-3.7.0.dist-info/METADATA,sha256=2n5u93lnpV3M8NL_7GaR7dHqRnoq8vzV6v73sTjS008,2287
20
+ amazon_bedrock_haystack-3.7.0.dist-info/WHEEL,sha256=qtCwoSJWgHk21S1Kb4ihdzI2rlJ1ZKaIurTj_ngOhyQ,87
21
+ amazon_bedrock_haystack-3.7.0.dist-info/licenses/LICENSE.txt,sha256=B05uMshqTA74s-0ltyHKI6yoPfJ3zYgQbvcXfDVGFf8,10280
22
+ amazon_bedrock_haystack-3.7.0.dist-info/RECORD,,
@@ -1,4 +1,4 @@
1
- from typing import Optional, Union
1
+ from typing import Any, Optional, Union
2
2
 
3
3
  import aioboto3
4
4
  import boto3
@@ -22,7 +22,7 @@ def get_aws_session(
22
22
  aws_region_name: Optional[str] = None,
23
23
  aws_profile_name: Optional[str] = None,
24
24
  async_mode: bool = False,
25
- **kwargs,
25
+ **kwargs: Any,
26
26
  ) -> Union[boto3.Session, aioboto3.Session]:
27
27
  """
28
28
  Creates an AWS Session with the given parameters.
@@ -62,7 +62,7 @@ def get_aws_session(
62
62
  raise AWSConfigurationError(msg) from e
63
63
 
64
64
 
65
- def aws_configured(**kwargs) -> bool:
65
+ def aws_configured(**kwargs: Any) -> bool:
66
66
  """
67
67
  Checks whether AWS configuration is provided.
68
68
  :param kwargs: The kwargs passed down to the generator.
File without changes
@@ -74,7 +74,7 @@ class AmazonBedrockDocumentEmbedder:
74
74
  meta_fields_to_embed: Optional[List[str]] = None,
75
75
  embedding_separator: str = "\n",
76
76
  boto3_config: Optional[Dict[str, Any]] = None,
77
- **kwargs,
77
+ **kwargs: Any,
78
78
  ) -> None:
79
79
  """
80
80
  Initializes the AmazonBedrockDocumentEmbedder with the provided parameters. The parameters are passed to the
@@ -186,11 +186,7 @@ class AmazonBedrockDocumentEmbedder:
186
186
  body=json.dumps(body), modelId=self.model, accept="*/*", contentType="application/json"
187
187
  )
188
188
  except ClientError as exception:
189
- msg = (
190
- f"Could not connect to Amazon Bedrock model {self.model}. "
191
- f"Make sure your AWS environment is configured correctly, "
192
- f"the model is available in the configured AWS region, and you have access."
193
- )
189
+ msg = f"Could not perform inference for Amazon Bedrock model {self.model} due to:\n{exception}"
194
190
  raise AmazonBedrockInferenceError(msg) from exception
195
191
 
196
192
  response_body = json.loads(response.get("body").read())
@@ -217,11 +213,7 @@ class AmazonBedrockDocumentEmbedder:
217
213
  body=json.dumps(body), modelId=self.model, accept="*/*", contentType="application/json"
218
214
  )
219
215
  except ClientError as exception:
220
- msg = (
221
- f"Could not connect to Amazon Bedrock model {self.model}. "
222
- f"Make sure your AWS environment is configured correctly, "
223
- f"the model is available in the configured AWS region, and you have access."
224
- )
216
+ msg = f"Could not perform inference for Amazon Bedrock model {self.model} due to:\n{exception}"
225
217
  raise AmazonBedrockInferenceError(msg) from exception
226
218
 
227
219
  response_body = json.loads(response.get("body").read())
@@ -63,7 +63,7 @@ class AmazonBedrockTextEmbedder:
63
63
  aws_region_name: Optional[Secret] = Secret.from_env_var("AWS_DEFAULT_REGION", strict=False), # noqa: B008
64
64
  aws_profile_name: Optional[Secret] = Secret.from_env_var("AWS_PROFILE", strict=False), # noqa: B008
65
65
  boto3_config: Optional[Dict[str, Any]] = None,
66
- **kwargs,
66
+ **kwargs: Any,
67
67
  ) -> None:
68
68
  """
69
69
  Initializes the AmazonBedrockTextEmbedder with the provided parameters. The parameters are passed to the
@@ -160,11 +160,7 @@ class AmazonBedrockTextEmbedder:
160
160
  body=json.dumps(body), modelId=self.model, accept="*/*", contentType="application/json"
161
161
  )
162
162
  except ClientError as exception:
163
- msg = (
164
- f"Could not connect to Amazon Bedrock model {self.model}. "
165
- f"Make sure your AWS environment is configured correctly, "
166
- f"the model is available in the configured AWS region, and you have access."
167
- )
163
+ msg = f"Could not perform inference for Amazon Bedrock model {self.model} due to:\n{exception}"
168
164
  raise AmazonBedrockInferenceError(msg) from exception
169
165
 
170
166
  response_body = json.loads(response.get("body").read())
File without changes
@@ -1,8 +1,9 @@
1
1
  import json
2
2
  from abc import ABC, abstractmethod
3
- from typing import Any, Callable, Dict, List, Optional
3
+ from typing import Any, Dict, List, Optional
4
4
 
5
- from haystack.dataclasses import StreamingChunk
5
+ from botocore.eventstream import EventStream
6
+ from haystack.dataclasses import StreamingChunk, SyncStreamingCallbackT
6
7
 
7
8
 
8
9
  class BedrockModelAdapter(ABC):
@@ -23,7 +24,7 @@ class BedrockModelAdapter(ABC):
23
24
  self.max_length = max_length
24
25
 
25
26
  @abstractmethod
26
- def prepare_body(self, prompt: str, **inference_kwargs) -> Dict[str, Any]:
27
+ def prepare_body(self, prompt: str, **inference_kwargs: Any) -> Dict[str, Any]:
27
28
  """
28
29
  Prepares the body for the Amazon Bedrock request.
29
30
  Each subclass should implement this method to prepare the request body for the specific model.
@@ -44,7 +45,7 @@ class BedrockModelAdapter(ABC):
44
45
  responses = [completion.lstrip() for completion in completions]
45
46
  return responses
46
47
 
47
- def get_stream_responses(self, stream, streaming_callback: Callable[[StreamingChunk], None]) -> List[str]:
48
+ def get_stream_responses(self, stream: EventStream, streaming_callback: SyncStreamingCallbackT) -> List[str]:
48
49
  """
49
50
  Extracts the responses from the Amazon Bedrock streaming response.
50
51
 
@@ -122,7 +123,7 @@ class AnthropicClaudeAdapter(BedrockModelAdapter):
122
123
  self.thinking_tag_end = f"</{self.thinking_tag}>\n\n" if self.thinking_tag else "\n\n"
123
124
  super().__init__(model_kwargs, max_length)
124
125
 
125
- def prepare_body(self, prompt: str, **inference_kwargs) -> Dict[str, Any]:
126
+ def prepare_body(self, prompt: str, **inference_kwargs: Any) -> Dict[str, Any]:
126
127
  """
127
128
  Prepares the body for the Claude model
128
129
 
@@ -210,7 +211,7 @@ class MistralAdapter(BedrockModelAdapter):
210
211
  Adapter for the Mistral models.
211
212
  """
212
213
 
213
- def prepare_body(self, prompt: str, **inference_kwargs) -> Dict[str, Any]:
214
+ def prepare_body(self, prompt: str, **inference_kwargs: Any) -> Dict[str, Any]:
214
215
  """
215
216
  Prepares the body for the Mistral model
216
217
 
@@ -260,7 +261,7 @@ class CohereCommandAdapter(BedrockModelAdapter):
260
261
  Adapter for the Cohere Command model.
261
262
  """
262
263
 
263
- def prepare_body(self, prompt: str, **inference_kwargs) -> Dict[str, Any]:
264
+ def prepare_body(self, prompt: str, **inference_kwargs: Any) -> Dict[str, Any]:
264
265
  """
265
266
  Prepares the body for the Command model
266
267
 
@@ -372,7 +373,7 @@ class AI21LabsJurassic2Adapter(BedrockModelAdapter):
372
373
  Model adapter for AI21 Labs' Jurassic 2 models.
373
374
  """
374
375
 
375
- def prepare_body(self, prompt: str, **inference_kwargs) -> Dict[str, Any]:
376
+ def prepare_body(self, prompt: str, **inference_kwargs: Any) -> Dict[str, Any]:
376
377
  """Prepares the body for the Jurassic 2 model.
377
378
 
378
379
  :param prompt: The prompt to be sent to the model.
@@ -410,7 +411,7 @@ class AmazonTitanAdapter(BedrockModelAdapter):
410
411
  Adapter for Amazon's Titan models.
411
412
  """
412
413
 
413
- def prepare_body(self, prompt: str, **inference_kwargs) -> Dict[str, Any]:
414
+ def prepare_body(self, prompt: str, **inference_kwargs: Any) -> Dict[str, Any]:
414
415
  """
415
416
  Prepares the body for the Titan model
416
417
 
@@ -456,7 +457,7 @@ class MetaLlamaAdapter(BedrockModelAdapter):
456
457
  Adapter for Meta's Llama2 models.
457
458
  """
458
459
 
459
- def prepare_body(self, prompt: str, **inference_kwargs) -> Dict[str, Any]:
460
+ def prepare_body(self, prompt: str, **inference_kwargs: Any) -> Dict[str, Any]:
460
461
  """
461
462
  Prepares the body for the Llama2 model
462
463
 
@@ -197,12 +197,6 @@ class AmazonBedrockChatGenerator:
197
197
  def resolve_secret(secret: Optional[Secret]) -> Optional[str]:
198
198
  return secret.resolve_value() if secret else None
199
199
 
200
- aws_access_key_id = resolve_secret(aws_access_key_id)
201
- aws_secret_access_key = resolve_secret(aws_secret_access_key)
202
- aws_session_token = resolve_secret(aws_session_token)
203
- aws_region_name = resolve_secret(aws_region_name)
204
- aws_profile_name = resolve_secret(aws_profile_name)
205
-
206
200
  config: Optional[Config] = None
207
201
  if self.boto3_config:
208
202
  config = Config(**self.boto3_config)
@@ -210,11 +204,11 @@ class AmazonBedrockChatGenerator:
210
204
  try:
211
205
  # sync session
212
206
  session = get_aws_session(
213
- aws_access_key_id=aws_access_key_id,
214
- aws_secret_access_key=aws_secret_access_key,
215
- aws_session_token=aws_session_token,
216
- aws_region_name=aws_region_name,
217
- aws_profile_name=aws_profile_name,
207
+ aws_access_key_id=resolve_secret(aws_access_key_id),
208
+ aws_secret_access_key=resolve_secret(aws_secret_access_key),
209
+ aws_session_token=resolve_secret(aws_session_token),
210
+ aws_region_name=resolve_secret(aws_region_name),
211
+ aws_profile_name=resolve_secret(aws_profile_name),
218
212
  )
219
213
  self.client = session.client("bedrock-runtime", config=config)
220
214
 
@@ -227,7 +221,7 @@ class AmazonBedrockChatGenerator:
227
221
 
228
222
  self.generation_kwargs = generation_kwargs or {}
229
223
  self.stop_words = stop_words or []
230
- self.async_session = None
224
+ self.async_session: Optional[aioboto3.Session] = None
231
225
 
232
226
  def _get_async_session(self) -> aioboto3.Session:
233
227
  """
@@ -427,12 +421,13 @@ class AmazonBedrockChatGenerator:
427
421
  if not response_stream:
428
422
  msg = "No stream found in the response."
429
423
  raise AmazonBedrockInferenceError(msg)
430
- replies = _parse_streaming_response(response_stream, callback, self.model)
424
+ # the type of streaming callback is checked in _prepare_request_params, but mypy doesn't know
425
+ replies = _parse_streaming_response(response_stream, callback, self.model) # type: ignore[arg-type]
431
426
  else:
432
427
  response = self.client.converse(**params)
433
428
  replies = _parse_completion_response(response, self.model)
434
429
  except ClientError as exception:
435
- msg = f"Could not generate inference for Amazon Bedrock model {self.model} due: {exception}"
430
+ msg = f"Could not perform inference for Amazon Bedrock model {self.model} due to:\n{exception}"
436
431
  raise AmazonBedrockInferenceError(msg) from exception
437
432
 
438
433
  return {"replies": replies}
@@ -483,13 +478,14 @@ class AmazonBedrockChatGenerator:
483
478
  if not response_stream:
484
479
  msg = "No stream found in the response."
485
480
  raise AmazonBedrockInferenceError(msg)
486
- replies = await _parse_streaming_response_async(response_stream, callback, self.model)
481
+ # the type of streaming callback is checked in _prepare_request_params, but mypy doesn't know
482
+ replies = await _parse_streaming_response_async(response_stream, callback, self.model) # type: ignore[arg-type]
487
483
  else:
488
484
  response = await async_client.converse(**params)
489
485
  replies = _parse_completion_response(response, self.model)
490
486
 
491
487
  except ClientError as exception:
492
- msg = f"Could not generate inference for Amazon Bedrock model {self.model} due: {exception}"
488
+ msg = f"Could not perform inference for Amazon Bedrock model {self.model} due to:\n{exception}"
493
489
  raise AmazonBedrockInferenceError(msg) from exception
494
490
 
495
491
  return {"replies": replies}
@@ -46,7 +46,7 @@ def _format_tool_call_message(tool_call_message: ChatMessage) -> Dict[str, Any]:
46
46
  :returns:
47
47
  Dictionary representing the tool call message in Bedrock's expected format
48
48
  """
49
- content = []
49
+ content: List[Dict[str, Any]] = []
50
50
  # Tool call message can contain text
51
51
  if tool_call_message.text:
52
52
  content.append({"text": tool_call_message.text})
@@ -107,7 +107,7 @@ class AmazonBedrockGenerator:
107
107
  streaming_callback: Optional[Callable[[StreamingChunk], None]] = None,
108
108
  boto3_config: Optional[Dict[str, Any]] = None,
109
109
  model_family: Optional[MODEL_FAMILIES] = None,
110
- **kwargs,
110
+ **kwargs: Any,
111
111
  ) -> None:
112
112
  """
113
113
  Create a new `AmazonBedrockGenerator` instance.
@@ -234,11 +234,7 @@ class AmazonBedrockGenerator:
234
234
  metadata = response.get("ResponseMetadata", {})
235
235
 
236
236
  except ClientError as exception:
237
- msg = (
238
- f"Could not connect to Amazon Bedrock model {self.model}. "
239
- f"Make sure your AWS environment is configured correctly, "
240
- f"the model is available in the configured AWS region, and you have access."
241
- )
237
+ msg = f"Could not perform inference for Amazon Bedrock model {self.model} due to:\n{exception}"
242
238
  raise AmazonBedrockInferenceError(msg) from exception
243
239
 
244
240
  return {"replies": replies, "meta": metadata}
@@ -269,7 +265,11 @@ class AmazonBedrockGenerator:
269
265
 
270
266
  msg = (
271
267
  f"Could not auto-detect model family of {model}. "
272
- f"`model_family` parameter must be one of {get_args(cls.MODEL_FAMILIES)}."
268
+ f"`model_family` parameter must be one of {get_args(cls.MODEL_FAMILIES)}. "
269
+ f"We highly recommend using the `AmazonBedrockChatGenerator` instead. "
270
+ f"It has additional support for Amazon's Nova Canvas, Nova Lite, "
271
+ f"Nova Pro, DeepSeek's DeepSeek-R1, and more models. "
272
+ f"See https://haystack.deepset.ai/integrations/amazon-bedrock"
273
273
  )
274
274
  raise AmazonBedrockConfigurationError(msg)
275
275
 
File without changes
@@ -256,15 +256,15 @@ class AmazonBedrockRanker:
256
256
  sorted_docs.append(doc)
257
257
 
258
258
  return {"documents": sorted_docs}
259
- except ClientError as exception:
260
- msg = f"Could not inference Amazon Bedrock model {self.model_name} due to: {exception}"
259
+ except ClientError as client_error:
260
+ msg = f"Could not perform inference for Amazon Bedrock model {self.model_name} due to:\n{client_error}"
261
+ raise AmazonBedrockInferenceError(msg) from client_error
262
+ except KeyError as key_error:
263
+ msg = f"Unexpected response format from Amazon Bedrock: {key_error}"
264
+ raise AmazonBedrockInferenceError(msg) from key_error
265
+ except Exception as exception:
266
+ msg = f"Error during Amazon Bedrock API call: {exception}"
261
267
  raise AmazonBedrockInferenceError(msg) from exception
262
- except KeyError as e:
263
- msg = f"Unexpected response format from Amazon Bedrock: {e!s}"
264
- raise AmazonBedrockInferenceError(msg) from e
265
- except Exception as e:
266
- msg = f"Error during Amazon Bedrock API call: {e!s}"
267
- raise AmazonBedrockInferenceError(msg) from e
268
268
 
269
269
 
270
270
  class BedrockRanker(AmazonBedrockRanker):
File without changes