haystack-experimental 0.15.2__py3-none-any.whl → 0.17.0__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (29) hide show
  1. haystack_experimental/chat_message_stores/in_memory.py +3 -3
  2. haystack_experimental/chat_message_stores/types.py +2 -2
  3. haystack_experimental/components/agents/agent.py +264 -124
  4. haystack_experimental/components/agents/human_in_the_loop/dataclasses.py +6 -6
  5. haystack_experimental/components/agents/human_in_the_loop/errors.py +1 -5
  6. haystack_experimental/components/agents/human_in_the_loop/strategies.py +10 -10
  7. haystack_experimental/components/agents/human_in_the_loop/types.py +5 -5
  8. haystack_experimental/components/agents/human_in_the_loop/user_interfaces.py +2 -2
  9. haystack_experimental/components/generators/chat/openai.py +11 -11
  10. haystack_experimental/components/preprocessors/__init__.py +1 -3
  11. haystack_experimental/components/retrievers/chat_message_retriever.py +4 -4
  12. haystack_experimental/components/retrievers/types/protocol.py +3 -3
  13. haystack_experimental/components/summarizers/llm_summarizer.py +7 -7
  14. haystack_experimental/core/pipeline/breakpoint.py +6 -6
  15. haystack_experimental/dataclasses/breakpoints.py +2 -2
  16. haystack_experimental/memory_stores/__init__.py +7 -0
  17. haystack_experimental/memory_stores/mem0/__init__.py +16 -0
  18. haystack_experimental/memory_stores/mem0/memory_store.py +323 -0
  19. haystack_experimental/memory_stores/types/__init__.py +7 -0
  20. haystack_experimental/memory_stores/types/protocol.py +94 -0
  21. haystack_experimental/utils/hallucination_risk_calculator/dataclasses.py +9 -9
  22. haystack_experimental/utils/hallucination_risk_calculator/openai_planner.py +4 -4
  23. haystack_experimental/utils/hallucination_risk_calculator/skeletonization.py +5 -5
  24. {haystack_experimental-0.15.2.dist-info → haystack_experimental-0.17.0.dist-info}/METADATA +8 -11
  25. {haystack_experimental-0.15.2.dist-info → haystack_experimental-0.17.0.dist-info}/RECORD +28 -24
  26. haystack_experimental/components/preprocessors/embedding_based_document_splitter.py +0 -430
  27. {haystack_experimental-0.15.2.dist-info → haystack_experimental-0.17.0.dist-info}/WHEEL +0 -0
  28. {haystack_experimental-0.15.2.dist-info → haystack_experimental-0.17.0.dist-info}/licenses/LICENSE +0 -0
  29. {haystack_experimental-0.15.2.dist-info → haystack_experimental-0.17.0.dist-info}/licenses/LICENSE-MIT.txt +0 -0
@@ -0,0 +1,323 @@
1
+ # SPDX-FileCopyrightText: 2022-present deepset GmbH <info@deepset.ai>
2
+ #
3
+ # SPDX-License-Identifier: Apache-2.0
4
+
5
+ from typing import Any
6
+
7
+ from haystack import default_from_dict, default_to_dict, logging
8
+ from haystack.dataclasses.chat_message import ChatMessage
9
+ from haystack.lazy_imports import LazyImport
10
+ from haystack.utils import Secret, deserialize_secrets_inplace
11
+
12
+ with LazyImport(message="Run 'pip install mem0ai'") as mem0_import:
13
+ from mem0 import MemoryClient # pylint: disable=import-error
14
+
15
+ logger = logging.getLogger(__name__)
16
+
17
+
18
+ class Mem0MemoryStore:
19
+ """
20
+ A memory store implementation using Mem0 as the backend.
21
+ """
22
+
23
+ def __init__(
24
+ self,
25
+ *,
26
+ api_key: Secret = Secret.from_env_var("MEM0_API_KEY"),
27
+ ):
28
+ """
29
+ Initialize the Mem0 memory store.
30
+
31
+ :param api_key: The Mem0 API key. You can also set it using `MEM0_API_KEY` environment variable.
32
+ """
33
+
34
+ mem0_import.check()
35
+ self.api_key = api_key
36
+ self.client = MemoryClient(
37
+ api_key=self.api_key.resolve_value(),
38
+ )
39
+
40
+ def to_dict(self) -> dict[str, Any]:
41
+ """Serialize the store configuration to a dictionary."""
42
+ return default_to_dict(
43
+ self,
44
+ api_key=self.api_key.to_dict(),
45
+ )
46
+
47
+ @classmethod
48
+ def from_dict(cls, data: dict[str, Any]) -> "Mem0MemoryStore":
49
+ """Deserialize the store from a dictionary."""
50
+ deserialize_secrets_inplace(data["init_parameters"], keys=["api_key"])
51
+
52
+ return default_from_dict(cls, data)
53
+
54
+ def add_memories(
55
+ self,
56
+ *,
57
+ messages: list[ChatMessage],
58
+ infer: bool = True,
59
+ user_id: str | None = None,
60
+ run_id: str | None = None,
61
+ agent_id: str | None = None,
62
+ async_mode: bool = False,
63
+ **kwargs: Any,
64
+ ) -> list[dict[str, Any]]:
65
+ """
66
+ Add ChatMessage memories to Mem0.
67
+
68
+ :param messages: List of ChatMessage objects with memory metadata
69
+ :param infer: Whether to infer facts from the messages. If False, the whole message will
70
+ be added as a memory.
71
+ :param user_id: The user ID to to store and retrieve memories from the memory store.
72
+ :param run_id: The run ID to to store and retrieve memories from the memory store.
73
+ :param agent_id: The agent ID to to store and retrieve memories from the memory store.
74
+ If you want Mem0 to store chat messages from the assistant, you need to set the agent_id.
75
+ :param async_mode: Whether to add memories asynchronously.
76
+ If True, the method will return immediately and the memories will be added in the background.
77
+ :param kwargs: Additional keyword arguments to pass to the Mem0 client.add method.
78
+ Note: ChatMessage.meta in the list of messages will be ignored because Mem0 doesn't allow
79
+ passing metadata for each message in the list. You can pass metadata for the whole memory
80
+ by passing the `metadata` keyword argument to the method.
81
+ :returns: List of objects with the memory_id and the memory
82
+ """
83
+ added_ids = []
84
+ ids = self._get_ids(user_id, run_id, agent_id)
85
+ instructions = """
86
+ Store all memories from the user and suggestions from the assistant.
87
+ """
88
+
89
+ self.client.project.update(custom_instructions=instructions)
90
+ mem0_messages = []
91
+ for message in messages:
92
+ if not message.text:
93
+ continue
94
+ # we save the role of the message in the metadata
95
+ mem0_messages.append({"content": message.text, "role": message.role.value})
96
+ try:
97
+ status = self.client.add(messages=mem0_messages, infer=infer, **ids, async_mode=async_mode, **kwargs)
98
+ if status:
99
+ for result in status["results"]:
100
+ memory_id = {"memory_id": result.get("id"), "memory": result["memory"]}
101
+ added_ids.append(memory_id)
102
+ except Exception as e:
103
+ raise RuntimeError(f"Failed to add memory message: {e}") from e
104
+ return added_ids
105
+
106
+ def search_memories(
107
+ self,
108
+ *,
109
+ query: str | None = None,
110
+ filters: dict[str, Any] | None = None,
111
+ top_k: int = 5,
112
+ user_id: str | None = None,
113
+ run_id: str | None = None,
114
+ agent_id: str | None = None,
115
+ include_memory_metadata: bool = False,
116
+ **kwargs: Any,
117
+ ) -> list[ChatMessage]:
118
+ """
119
+ Search for memories in Mem0.
120
+
121
+ If filters are not provided, at least one of user_id, run_id, or agent_id must be set.
122
+ If filters are provided, the search will be scoped to the provided filters and the other ids will be ignored.
123
+ :param query: Text query to search for. If not provided, all memories will be returned.
124
+ :param filters: Haystack filters to apply on search. For more details on Haystack filters, see https://docs.haystack.deepset.ai/docs/metadata-filtering
125
+ :param top_k: Maximum number of results to return
126
+ :param user_id: The user ID to to store and retrieve memories from the memory store.
127
+ :param run_id: The run ID to to store and retrieve memories from the memory store.
128
+ :param agent_id: The agent ID to to store and retrieve memories from the memory store.
129
+ If you want Mem0 to store chat messages from the assistant, you need to set the agent_id.
130
+ :param include_memory_metadata: Whether to include the mem0 related metadata for the
131
+ retrieved memory in the ChatMessage.
132
+ If True, the metadata will include the mem0 related metadata i.e. memory_id, score, etc.
133
+ in the `mem0_memory_metadata` key.
134
+ If False, the `ChatMessage.meta` will only contain the user defined metadata.
135
+ :param kwargs: Additional keyword arguments to pass to the Mem0 client.
136
+ If query is passed, the kwargs will be passed to the Mem0 client.search method.
137
+ If query is not passed, the kwargs will be passed to the Mem0 client.get_all method.
138
+ :returns: List of ChatMessage memories matching the criteria
139
+ """
140
+ # Prepare filters for Mem0
141
+
142
+ if filters:
143
+ mem0_filters = self.normalize_filters(filters)
144
+ else:
145
+ ids = self._get_ids(user_id, run_id, agent_id)
146
+ if len(ids) == 1:
147
+ mem0_filters = dict(ids)
148
+ else:
149
+ mem0_filters = {"AND": [{key: value} for key, value in ids.items()]}
150
+ try:
151
+ if not query:
152
+ memories = self.client.get_all(filters=mem0_filters, **kwargs)
153
+ else:
154
+ memories = self.client.search(
155
+ query=query,
156
+ top_k=top_k,
157
+ filters=mem0_filters,
158
+ **kwargs,
159
+ )
160
+ messages = []
161
+ for memory in memories["results"]:
162
+ meta = memory["metadata"].copy() if memory["metadata"] else {}
163
+ # we also include the mem0 related metadata i.e. memory_id, score, etc.
164
+ if include_memory_metadata:
165
+ meta["retrieved_memory_metadata"] = memory.copy()
166
+ meta["retrieved_memory_metadata"].pop("memory")
167
+ messages.append(ChatMessage.from_system(text=memory["memory"], meta=meta))
168
+ return messages
169
+
170
+ except Exception as e:
171
+ raise RuntimeError(f"Failed to search memories: {e}") from e
172
+
173
+ def search_memories_as_single_message(
174
+ self,
175
+ *,
176
+ query: str | None = None,
177
+ filters: dict[str, Any] | None = None,
178
+ top_k: int = 5,
179
+ user_id: str | None = None,
180
+ run_id: str | None = None,
181
+ agent_id: str | None = None,
182
+ **kwargs: Any,
183
+ ) -> ChatMessage:
184
+ """
185
+ Search for memories in Mem0 and return a single ChatMessage object.
186
+
187
+ If filters are not provided, at least one of user_id, run_id, or agent_id must be set.
188
+ If filters are provided, the search will be scoped to the provided filters and the other ids will be ignored.
189
+ :param query: Text query to search for. If not provided, all memories will be returned.
190
+ :param filters: Additional filters to apply on search. For more details on mem0 filters, see https://mem0.ai/docs/search/
191
+ :param top_k: Maximum number of results to return
192
+ :param user_id: The user ID to to store and retrieve memories from the memory store.
193
+ :param run_id: The run ID to to store and retrieve memories from the memory store.
194
+ :param agent_id: The agent ID to to store and retrieve memories from the memory store.
195
+ If you want Mem0 to store chat messages from the assistant, you need to set the agent_id.
196
+ :param kwargs: Additional keyword arguments to pass to the Mem0 client.
197
+ If query is passed, the kwargs will be passed to the Mem0 client.search method.
198
+ If query is not passed, the kwargs will be passed to the Mem0 client.get_all method.
199
+ :returns: A single ChatMessage object with the memories matching the criteria
200
+ """
201
+ # Prepare filters for Mem0
202
+ if filters:
203
+ mem0_filters = self.normalize_filters(filters)
204
+ else:
205
+ ids = self._get_ids(user_id, run_id, agent_id)
206
+ if len(ids) == 1:
207
+ mem0_filters = dict(ids)
208
+ else:
209
+ mem0_filters = {"AND": [{key: value} for key, value in ids.items()]}
210
+
211
+ try:
212
+ if not query:
213
+ memories = self.client.get_all(filters=mem0_filters, **kwargs)
214
+ else:
215
+ memories = self.client.search(
216
+ query=query,
217
+ top_k=top_k,
218
+ filters=mem0_filters,
219
+ **kwargs,
220
+ )
221
+
222
+ # we combine the memories into a single string
223
+ combined_memory = "\n".join(
224
+ f"- MEMORY #{idx + 1}: {memory['memory']}" for idx, memory in enumerate(memories["results"])
225
+ )
226
+
227
+ return ChatMessage.from_system(text=combined_memory)
228
+
229
+ except Exception as e:
230
+ raise RuntimeError(f"Failed to search memories: {e}") from e
231
+
232
+ def delete_all_memories(
233
+ self,
234
+ *,
235
+ user_id: str | None = None,
236
+ run_id: str | None = None,
237
+ agent_id: str | None = None,
238
+ **kwargs: Any,
239
+ ) -> None:
240
+ """
241
+ Delete memory records from Mem0.
242
+
243
+ At least one of user_id, run_id, or agent_id must be set.
244
+ :param user_id: The user ID to delete memories from.
245
+ :param run_id: The run ID to delete memories from.
246
+ :param agent_id: The agent ID to delete memories from.
247
+ :param kwargs: Additional keyword arguments to pass to the Mem0 client.delete_all method.
248
+ """
249
+ ids = self._get_ids(user_id, run_id, agent_id)
250
+
251
+ try:
252
+ self.client.delete_all(**ids, **kwargs)
253
+ logger.info("All memories deleted successfully for scope {ids}", ids=ids)
254
+ except Exception as e:
255
+ raise RuntimeError(f"Failed to delete memories with scope {ids}: {e}") from e
256
+
257
+ def delete_memory(self, memory_id: str, **kwargs: Any) -> None:
258
+ """
259
+ Delete memory from Mem0.
260
+
261
+ :param memory_id: The ID of the memory to delete.
262
+ :param kwargs: Additional keyword arguments to pass to the Mem0 client.delete method.
263
+ """
264
+ try:
265
+ self.client.delete(memory_id=memory_id, **kwargs)
266
+ logger.info("Memory deleted successfully for memory_id {memory_id}", memory_id=memory_id)
267
+ except Exception as e:
268
+ raise RuntimeError(f"Failed to delete memory {memory_id}: {e}") from e
269
+
270
+ def _get_ids(
271
+ self, user_id: str | None = None, run_id: str | None = None, agent_id: str | None = None
272
+ ) -> dict[str, Any]:
273
+ """
274
+ Check that at least one of the ids is set.
275
+
276
+ Return the set ids as a dictionary.
277
+ """
278
+ if not user_id and not run_id and not agent_id:
279
+ raise ValueError("At least one of user_id, run_id, or agent_id must be set")
280
+ ids = {
281
+ "user_id": user_id,
282
+ "run_id": run_id,
283
+ "agent_id": agent_id,
284
+ }
285
+ return {key: value for key, value in ids.items() if value is not None}
286
+
287
+ @staticmethod
288
+ def normalize_filters(filters: dict[str, Any]) -> dict[str, Any]:
289
+ """
290
+ Convert Haystack filters to Mem0 filters.
291
+ """
292
+
293
+ def convert_comparison(condition: dict[str, Any]) -> dict[str, Any]:
294
+ operator_map = {
295
+ "==": lambda field, value: {field: value},
296
+ "!=": lambda field, value: {field: {"ne": value}},
297
+ ">": lambda field, value: {field: {"gt": value}},
298
+ ">=": lambda field, value: {field: {"gte": value}},
299
+ "<": lambda field, value: {field: {"lt": value}},
300
+ "<=": lambda field, value: {field: {"lte": value}},
301
+ "in": lambda field, value: {field: {"in": value if isinstance(value, list) else [value]}},
302
+ "not in": lambda field, value: {field: {"ne": value}},
303
+ }
304
+ field = condition["field"]
305
+ value = condition["value"]
306
+ operator = condition["operator"]
307
+ if operator not in operator_map:
308
+ raise ValueError(f"Unsupported operator {operator}")
309
+ return operator_map[operator](field, value)
310
+
311
+ def convert_logic(node: dict[str, Any]) -> dict[str, Any]:
312
+ operator = node["operator"].upper()
313
+ if operator not in ("AND", "OR", "NOT"):
314
+ raise ValueError(f"Unsupported logic operator {operator}")
315
+ mem0_conditions = [convert_node(cond) for cond in node["conditions"]]
316
+ return {operator: mem0_conditions}
317
+
318
+ def convert_node(node: dict[str, Any]) -> dict[str, Any]:
319
+ if "field" in node:
320
+ return convert_comparison(node)
321
+ return convert_logic(node)
322
+
323
+ return convert_node(filters)
@@ -0,0 +1,7 @@
1
+ # SPDX-FileCopyrightText: 2022-present deepset GmbH <info@deepset.ai>
2
+ #
3
+ # SPDX-License-Identifier: Apache-2.0
4
+
5
+ from .protocol import MemoryStore
6
+
7
+ __all__ = ["MemoryStore"]
@@ -0,0 +1,94 @@
1
+ # SPDX-FileCopyrightText: 2022-present deepset GmbH <info@deepset.ai>
2
+ #
3
+ # SPDX-License-Identifier: Apache-2.0
4
+
5
+ from typing import Any, Protocol
6
+
7
+ from haystack.dataclasses import ChatMessage
8
+
9
+ # Ellipsis are needed for the type checker, it's safe to disable module-wide
10
+ # pylint: disable=unnecessary-ellipsis
11
+
12
+
13
+ class MemoryStore(Protocol):
14
+ """
15
+ Stores ChatMessage-based memories to be used by agents and components.
16
+
17
+ Implementations typically persist user- and agent-specific memories and
18
+ support adding, searching, and deleting memories.
19
+ """
20
+
21
+ def to_dict(self) -> dict[str, Any]:
22
+ """
23
+ Serializes this memory store to a dictionary.
24
+ """
25
+ ...
26
+
27
+ @classmethod
28
+ def from_dict(cls, data: dict[str, Any]) -> "MemoryStore":
29
+ """
30
+ Deserializes the memory store from a dictionary.
31
+ """
32
+ ...
33
+
34
+ def add_memories(
35
+ self,
36
+ *,
37
+ messages: list[ChatMessage],
38
+ user_id: str | None = None,
39
+ **kwargs: Any,
40
+ ) -> None:
41
+ """
42
+ Add ChatMessage memories to the store.
43
+
44
+ :param messages: List of ChatMessage objects with memory metadata.
45
+ :param user_id: User ID to scope memories.
46
+ :param kwargs: Additional keyword arguments to pass to the add method.
47
+ """
48
+ ...
49
+
50
+ def search_memories(
51
+ self,
52
+ *,
53
+ query: str | None = None,
54
+ filters: dict[str, Any] | None = None,
55
+ top_k: int = 5,
56
+ user_id: str | None = None,
57
+ **kwargs: Any,
58
+ ) -> list[ChatMessage]:
59
+ """
60
+ Search for memories in the store.
61
+
62
+ :param query: Text query to search for. If not provided, all memories may be returned.
63
+ :param filters: Haystack filters to apply on search.
64
+ :param top_k: Maximum number of results to return.
65
+ :param user_id: User ID to scope memories.
66
+ :param kwargs: Additional keyword arguments to pass to the search method.
67
+
68
+ :returns: List of ChatMessage memories matching the criteria.
69
+ """
70
+ ...
71
+
72
+ def delete_all_memories(
73
+ self,
74
+ *,
75
+ user_id: str | None = None,
76
+ **kwargs: Any,
77
+ ) -> None:
78
+ """
79
+ Delete all memories in the given scope.
80
+
81
+ In case of multiple optional ids, one of the ids must be set.
82
+ :param user_id: User ID to delete memories.
83
+ :param kwargs: Additional keyword arguments to pass to the delete method.
84
+ """
85
+ ...
86
+
87
+ def delete_memory(self, memory_id: str, **kwargs: Any) -> None:
88
+ """
89
+ Delete a single memory by its ID.
90
+
91
+ :param memory_id: The ID of the memory to delete.
92
+ :param kwargs: Additional keyword arguments to pass to the delete method.
93
+ """
94
+ ...
@@ -3,7 +3,7 @@
3
3
  # Modified by deepset, 2025.
4
4
  # Licensed under the Apache License, Version 2.0 (see LICENSE-APACHE).
5
5
  from dataclasses import dataclass
6
- from typing import Literal, Optional
6
+ from typing import Literal
7
7
 
8
8
 
9
9
  @dataclass
@@ -51,13 +51,13 @@ class OpenAIItem:
51
51
  prompt: str
52
52
  n_samples: int = 3
53
53
  m: int = 6
54
- seeds: Optional[list[int]] = None
55
- fields_to_erase: Optional[list[str]] = None # evidence-based mode
54
+ seeds: list[int] | None = None
55
+ fields_to_erase: list[str] | None = None # evidence-based mode
56
56
  mask_token: str = "[…]"
57
57
  skeleton_policy: Literal["auto", "evidence_erase", "closed_book"] = "auto"
58
- attempted: Optional[bool] = None
59
- answered_correctly: Optional[bool] = None
60
- meta: Optional[dict] = None
58
+ attempted: bool | None = None
59
+ answered_correctly: bool | None = None
60
+ meta: dict | None = None
61
61
 
62
62
 
63
63
  @dataclass
@@ -71,6 +71,6 @@ class ItemMetrics:
71
71
  roh_bound: float
72
72
  decision_answer: bool
73
73
  rationale: str
74
- attempted: Optional[bool] = None
75
- answered_correctly: Optional[bool] = None
76
- meta: Optional[dict] = None
74
+ attempted: bool | None = None
75
+ answered_correctly: bool | None = None
76
+ meta: dict | None = None
@@ -5,7 +5,7 @@
5
5
  import json
6
6
  import re
7
7
  import time
8
- from typing import Any, Optional, Sequence, Union
8
+ from typing import Any, Sequence
9
9
 
10
10
  from haystack.components.generators.chat.openai import OpenAIChatGenerator
11
11
  from openai.types.chat import ChatCompletionSystemMessageParam, ChatCompletionUserMessageParam
@@ -24,7 +24,7 @@ _DECISION_ALLOWED = ("answer", "refuse")
24
24
 
25
25
  def _decision_messages_closed_book(
26
26
  user_prompt: str,
27
- ) -> list[Union[ChatCompletionSystemMessageParam, ChatCompletionUserMessageParam]]:
27
+ ) -> list[ChatCompletionSystemMessageParam | ChatCompletionUserMessageParam]:
28
28
  system = (
29
29
  "You are a safety-critical QA assistant operating **without external evidence**. "
30
30
  "Decide whether to answer based on your pretrained knowledge and the prompt alone. "
@@ -36,7 +36,7 @@ def _decision_messages_closed_book(
36
36
 
37
37
  def _decision_messages_evidence(
38
38
  user_prompt: str,
39
- ) -> list[Union[ChatCompletionSystemMessageParam, ChatCompletionUserMessageParam]]:
39
+ ) -> list[ChatCompletionSystemMessageParam | ChatCompletionUserMessageParam]:
40
40
  system = (
41
41
  "You are a safety-critical QA assistant. Decide whether to answer based on the "
42
42
  "provided prompt and its internal evidence/context. If evidence is insufficient or "
@@ -160,7 +160,7 @@ class OpenAIPlanner:
160
160
  backend: OpenAIChatGenerator,
161
161
  temperature: float = 0.5,
162
162
  max_tokens_decision: int = 8,
163
- q_floor: Optional[float] = None,
163
+ q_floor: float | None = None,
164
164
  ) -> None:
165
165
  """
166
166
  Initialize OpenAIPlanner with given parameters.
@@ -4,7 +4,7 @@
4
4
  # Licensed under the Apache License, Version 2.0 (see LICENSE-APACHE).
5
5
  import random
6
6
  import re
7
- from typing import Optional, Sequence
7
+ from typing import Sequence
8
8
 
9
9
  _ERASE_DEFAULT_FIELDS = ["Evidence", "Context", "Citations", "References", "Notes", "Passage", "Snippet"]
10
10
 
@@ -14,7 +14,7 @@ _NUMBER = re.compile(r"\b\d+(?:\.\d+)?\b")
14
14
  _QUOTED = re.compile(r"([“\"'])(.+?)\1")
15
15
 
16
16
 
17
- def _skeletonize_prompt(text: str, fields_to_erase: Optional[Sequence[str]] = None, mask_token: str = "[…]") -> str:
17
+ def _skeletonize_prompt(text: str, fields_to_erase: Sequence[str] | None = None, mask_token: str = "[…]") -> str:
18
18
  fields = list(fields_to_erase) if fields_to_erase else list(_ERASE_DEFAULT_FIELDS)
19
19
  out = text
20
20
  for field in fields:
@@ -105,7 +105,7 @@ def _make_skeletons_evidence_erase(
105
105
  text: str,
106
106
  m: int,
107
107
  seeds: Sequence[int],
108
- fields_to_erase: Optional[Sequence[str]] = None,
108
+ fields_to_erase: Sequence[str] | None = None,
109
109
  mask_token: str = "[…]",
110
110
  preserve_roles: bool = True,
111
111
  ) -> list[str]:
@@ -130,8 +130,8 @@ def _make_skeleton_ensemble_auto(
130
130
  *,
131
131
  text: str,
132
132
  m: int = 6,
133
- seeds: Optional[Sequence[int]] = None,
134
- fields_to_erase: Optional[Sequence[str]] = None,
133
+ seeds: Sequence[int] | None = None,
134
+ fields_to_erase: Sequence[str] | None = None,
135
135
  mask_token: str = "[…]",
136
136
  skeleton_policy: str = "auto",
137
137
  ) -> list[str]:
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.4
2
2
  Name: haystack-experimental
3
- Version: 0.15.2
3
+ Version: 0.17.0
4
4
  Summary: Experimental components and features for the Haystack LLM framework.
5
5
  Project-URL: CI: GitHub, https://github.com/deepset-ai/haystack-experimental/actions
6
6
  Project-URL: GitHub: issues, https://github.com/deepset-ai/haystack-experimental/issues
@@ -17,15 +17,13 @@ Classifier: License :: OSI Approved :: Apache Software License
17
17
  Classifier: Operating System :: OS Independent
18
18
  Classifier: Programming Language :: Python
19
19
  Classifier: Programming Language :: Python :: 3
20
- Classifier: Programming Language :: Python :: 3.9
21
20
  Classifier: Programming Language :: Python :: 3.10
22
21
  Classifier: Programming Language :: Python :: 3.11
23
22
  Classifier: Programming Language :: Python :: 3.12
24
23
  Classifier: Programming Language :: Python :: 3.13
25
24
  Classifier: Topic :: Scientific/Engineering :: Artificial Intelligence
26
- Requires-Python: >=3.9
25
+ Requires-Python: >=3.10
27
26
  Requires-Dist: haystack-ai
28
- Requires-Dist: lazy-imports<1.2.0
29
27
  Requires-Dist: rich
30
28
  Description-Content-Type: text/markdown
31
29
 
@@ -74,19 +72,17 @@ that includes it. Once it reaches the end of its lifespan, the experiment will b
74
72
 
75
73
  | Name | Type | Expected End Date | Dependencies | Cookbook | Discussion |
76
74
  |-----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------|---------------------------------------|-------------------|--------------|------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------|---------------|
77
- | [`EmbeddingBasedDocumentSplitter`][8] | EmbeddingBasedDocumentSplitter | August 2025 | None | None | [Discuss][7] |
78
75
  | [`OpenAIChatGenerator`][9] | Chat Generator Component | November 2025 | None | <a href="https://colab.research.google.com/github/deepset-ai/haystack-cookbook/blob/main/notebooks/hallucination_score_calculator.ipynb" target="_parent"><img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open In Colab"/> | [Discuss][10] |
79
76
  | [`MarkdownHeaderLevelInferrer`][15] | Preprocessor | January 2025 | None | None | [Discuss][16] |
80
77
  | [`Agent`][17]; [Confirmation Policies][18]; [ConfirmationUIs][19]; [ConfirmationStrategies][20]; [`ConfirmationUIResult` and `ToolExecutionDecision`][21] [HITLBreakpointException][22] | Human in the Loop | December 2025 | rich | None | [Discuss][23] |
81
78
  | [`LLMSummarizer`][24] | Document Summarizer | January 2025 | None | None | [Discuss][25] |
82
79
  | [`InMemoryChatMessageStore`][1]; [`ChatMessageRetriever`][2]; [`ChatMessageWriter`][3] | Chat Message Store, Retriever, Writer | February 2025 | None | <a href="https://colab.research.google.com/github/deepset-ai/haystack-cookbook/blob/main/notebooks/conversational_rag_using_memory.ipynb" target="_parent"><img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open In Colab"/> | [Discuss][4] |
80
+ | [`Mem0MemoryStore`][26] | MemoryStore | February 2025 | mem0ai | None | -- |
83
81
 
84
82
  [1]: https://github.com/deepset-ai/haystack-experimental/blob/main/haystack_experimental/chat_message_stores/in_memory.py
85
83
  [2]: https://github.com/deepset-ai/haystack-experimental/blob/main/haystack_experimental/components/retrievers/chat_message_retriever.py
86
84
  [3]: https://github.com/deepset-ai/haystack-experimental/blob/main/haystack_experimental/components/writers/chat_message_writer.py
87
85
  [4]: https://github.com/deepset-ai/haystack-experimental/discussions/75
88
- [7]: https://github.com/deepset-ai/haystack-experimental/discussions/356
89
- [8]: https://github.com/deepset-ai/haystack-experimental/blob/main/haystack_experimental/components/preprocessors/embedding_based_document_splitter.py
90
86
  [9]: https://github.com/deepset-ai/haystack-experimental/blob/main/haystack_experimental/components/generators/chat/openai.py
91
87
  [10]: https://github.com/deepset-ai/haystack-experimental/discussions/361
92
88
  [15]: https://github.com/deepset-ai/haystack-experimental/blob/main/haystack_experimental/components/preprocessors/md_header_level_inferrer.py
@@ -100,6 +96,7 @@ that includes it. Once it reaches the end of its lifespan, the experiment will b
100
96
  [23]: https://github.com/deepset-ai/haystack-experimental/discussions/381
101
97
  [24]: https://github.com/deepset-ai/haystack-experimental/blob/main/haystack_experimental/components/sumarizers/llm_summarizer.py
102
98
  [25]: https://github.com/deepset-ai/haystack-experimental/discussions/382
99
+ [26]: https://github.com/deepset-ai/haystack-experimental/blob/main/haystack_experimental/memory_stores/mem0/memory_store.py
103
100
 
104
101
  ### Adopted experiments
105
102
  | Name | Type | Final release |
@@ -112,10 +109,10 @@ that includes it. Once it reaches the end of its lifespan, the experiment will b
112
109
  | `SuperComponent` | Simplify Pipeline development | 0.8.0 |
113
110
  | `Pipeline` | Pipeline breakpoints for debugging | 0.12.0 |
114
111
  | `ImageContent`; Image Converters; multimodal support in `OpenAIChatGenerator` and `AmazonBedrockChatGenerator`; `ChatPromptBuilder` refactoring; `SentenceTransformersDocumentImageEmbedder`; `LLMDocumentContentExtractor`; new `Routers` | Multimodality | 0.12.0 |
115
- | `QueryExpander` | Query Expansion Component | 0.14.3 |
116
- | `MultiQueryEmbeddingRetriever` | MultiQueryEmbeddingRetriever | 0.14.3 |
117
- | `MultiQueryTextRetriever` | MultiQueryTextRetriever | 0.14.3 |
118
-
112
+ | `QueryExpander` | Query Expansion Component | 0.14.3 |
113
+ | `MultiQueryEmbeddingRetriever` | MultiQueryEmbeddingRetriever | 0.14.3 |
114
+ | `MultiQueryTextRetriever` | MultiQueryTextRetriever | 0.14.3 |
115
+ | `EmbeddingBasedDocumentSplitter` | Document Splitting | 0.15.2 |
119
116
 
120
117
  ### Discontinued experiments
121
118