rasa-pro 3.13.0.dev1__py3-none-any.whl → 3.13.0.dev2__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of rasa-pro might be problematic. Click here for more details.

Files changed (58) hide show
  1. rasa/core/actions/action.py +0 -6
  2. rasa/core/channels/voice_ready/audiocodes.py +52 -17
  3. rasa/core/channels/voice_stream/audiocodes.py +53 -9
  4. rasa/core/channels/voice_stream/genesys.py +146 -16
  5. rasa/core/information_retrieval/faiss.py +6 -1
  6. rasa/core/information_retrieval/information_retrieval.py +40 -2
  7. rasa/core/information_retrieval/milvus.py +7 -2
  8. rasa/core/information_retrieval/qdrant.py +7 -2
  9. rasa/core/policies/enterprise_search_policy.py +61 -301
  10. rasa/core/policies/flows/flow_executor.py +3 -38
  11. rasa/core/processor.py +27 -6
  12. rasa/core/utils.py +53 -0
  13. rasa/dialogue_understanding/commands/cancel_flow_command.py +4 -59
  14. rasa/dialogue_understanding/commands/start_flow_command.py +0 -41
  15. rasa/dialogue_understanding/generator/command_generator.py +67 -0
  16. rasa/dialogue_understanding/generator/command_parser.py +1 -1
  17. rasa/dialogue_understanding/generator/llm_based_command_generator.py +4 -13
  18. rasa/dialogue_understanding/generator/prompt_templates/command_prompt_template.jinja2 +1 -1
  19. rasa/dialogue_understanding/generator/prompt_templates/command_prompt_v2_gpt_4o_2024_11_20_template.jinja2 +20 -1
  20. rasa/dialogue_understanding/generator/single_step/compact_llm_command_generator.py +7 -0
  21. rasa/dialogue_understanding/patterns/default_flows_for_patterns.yml +0 -61
  22. rasa/dialogue_understanding/processor/command_processor.py +7 -65
  23. rasa/dialogue_understanding/stack/utils.py +0 -38
  24. rasa/dialogue_understanding_test/io.py +13 -8
  25. rasa/document_retrieval/__init__.py +0 -0
  26. rasa/document_retrieval/constants.py +32 -0
  27. rasa/document_retrieval/document_post_processor.py +351 -0
  28. rasa/document_retrieval/document_post_processor_prompt_template.jinja2 +0 -0
  29. rasa/document_retrieval/document_retriever.py +333 -0
  30. rasa/document_retrieval/knowledge_base_connectors/__init__.py +0 -0
  31. rasa/document_retrieval/knowledge_base_connectors/api_connector.py +39 -0
  32. rasa/document_retrieval/knowledge_base_connectors/knowledge_base_connector.py +34 -0
  33. rasa/document_retrieval/knowledge_base_connectors/vector_store_connector.py +226 -0
  34. rasa/document_retrieval/query_rewriter.py +234 -0
  35. rasa/document_retrieval/query_rewriter_prompt_template.jinja2 +8 -0
  36. rasa/engine/recipes/default_components.py +2 -0
  37. rasa/shared/core/constants.py +0 -8
  38. rasa/shared/core/domain.py +12 -3
  39. rasa/shared/core/flows/flow.py +0 -17
  40. rasa/shared/core/flows/flows_yaml_schema.json +3 -38
  41. rasa/shared/core/flows/steps/collect.py +5 -18
  42. rasa/shared/core/flows/utils.py +1 -16
  43. rasa/shared/core/slot_mappings.py +11 -5
  44. rasa/shared/nlu/constants.py +0 -1
  45. rasa/shared/utils/common.py +11 -1
  46. rasa/shared/utils/llm.py +1 -1
  47. rasa/tracing/instrumentation/attribute_extractors.py +10 -7
  48. rasa/tracing/instrumentation/instrumentation.py +12 -12
  49. rasa/validator.py +1 -123
  50. rasa/version.py +1 -1
  51. {rasa_pro-3.13.0.dev1.dist-info → rasa_pro-3.13.0.dev2.dist-info}/METADATA +1 -1
  52. {rasa_pro-3.13.0.dev1.dist-info → rasa_pro-3.13.0.dev2.dist-info}/RECORD +55 -47
  53. rasa/core/actions/action_handle_digressions.py +0 -164
  54. rasa/dialogue_understanding/commands/handle_digressions_command.py +0 -144
  55. rasa/dialogue_understanding/patterns/handle_digressions.py +0 -81
  56. {rasa_pro-3.13.0.dev1.dist-info → rasa_pro-3.13.0.dev2.dist-info}/NOTICE +0 -0
  57. {rasa_pro-3.13.0.dev1.dist-info → rasa_pro-3.13.0.dev2.dist-info}/WHEEL +0 -0
  58. {rasa_pro-3.13.0.dev1.dist-info → rasa_pro-3.13.0.dev2.dist-info}/entry_points.txt +0 -0
@@ -4,9 +4,6 @@ from typing import List, Optional, Set, Tuple
4
4
  from rasa.dialogue_understanding.patterns.collect_information import (
5
5
  CollectInformationPatternFlowStackFrame,
6
6
  )
7
- from rasa.dialogue_understanding.patterns.continue_interrupted import (
8
- ContinueInterruptedPatternFlowStackFrame,
9
- )
10
7
  from rasa.dialogue_understanding.stack.dialogue_stack import DialogueStack
11
8
  from rasa.dialogue_understanding.stack.frames import (
12
9
  BaseFlowStackFrame,
@@ -221,38 +218,3 @@ def get_collect_steps_excluding_ask_before_filling_for_active_flow(
221
218
  for step in active_flow.get_collect_steps()
222
219
  if not step.ask_before_filling
223
220
  )
224
-
225
-
226
- def remove_digression_from_stack(stack: DialogueStack, flow_id: str) -> DialogueStack:
227
- """Remove a specific flow frame from the stack and other frames that reference it.
228
-
229
- The main use-case is to prevent duplicate digressions from being added to the stack.
230
-
231
- Args:
232
- stack: The dialogue stack.
233
- flow_id: The flow to remove.
234
-
235
- Returns:
236
- The updated dialogue stack.
237
- """
238
- updated_stack = stack.copy()
239
- original_frames = updated_stack.frames[:]
240
- found_digression_index = -1
241
- for index, frame in enumerate(original_frames):
242
- if isinstance(frame, BaseFlowStackFrame) and frame.flow_id == flow_id:
243
- updated_stack.frames.pop(index)
244
- found_digression_index = index
245
-
246
- # we also need to remove the `ContinueInterruptedPatternFlowStackFrame`
247
- elif (
248
- isinstance(frame, ContinueInterruptedPatternFlowStackFrame)
249
- and frame.previous_flow_name == flow_id
250
- and found_digression_index + 1 == index
251
- ):
252
- # we know that this frame is always added after the digressing flow frame
253
- # that was blocked previously by action_block_digressions,
254
- # so this check would occur after the digressing flow was popped.
255
- # Therefore, we need to update the index dynamically before popping.
256
- updated_stack.frames.pop(index - 1)
257
-
258
- return updated_stack
@@ -329,14 +329,19 @@ def print_prompt(step: FailedTestStep) -> None:
329
329
  rich.print(
330
330
  f"[bold] prompt name [/bold]: {prompt_data[KEY_PROMPT_NAME]}"
331
331
  )
332
- rich.print(
333
- f"[bold] prompt tokens [/bold]: {prompt_data[KEY_PROMPT_TOKENS]}"
334
- )
335
- rich.print(
336
- f"[bold] completion tokens[/bold]: "
337
- f"{prompt_data[KEY_COMPLETION_TOKENS]}"
338
- )
339
- rich.print(f"[bold] latency [/bold]: {prompt_data[KEY_LATENCY]}")
332
+ if KEY_PROMPT_TOKENS in prompt_data:
333
+ rich.print(
334
+ f"[bold] prompt tokens [/bold]: {prompt_data[KEY_PROMPT_TOKENS]}" # noqa: E501
335
+ )
336
+ if KEY_COMPLETION_TOKENS in prompt_data:
337
+ rich.print(
338
+ f"[bold] completion tokens[/bold]: "
339
+ f"{prompt_data[KEY_COMPLETION_TOKENS]}"
340
+ )
341
+ if KEY_LATENCY in prompt_data:
342
+ rich.print(
343
+ f"[bold] latency [/bold]: {prompt_data[KEY_LATENCY]}"
344
+ )
340
345
  if KEY_SYSTEM_PROMPT in prompt_data:
341
346
  rich.print(
342
347
  f"[bold] system prompt [/bold]: "
File without changes
@@ -0,0 +1,32 @@
1
+ # keys for storing information in the message object
2
+ from rasa.shared.constants import OPENAI_PROVIDER, PROVIDER_CONFIG_KEY
3
+ from rasa.shared.utils.llm import DEFAULT_OPENAI_EMBEDDING_MODEL_NAME
4
+
5
+ SEARCH_QUERY_KEY = "search_query"
6
+ RETRIEVED_DOCUMENTS_KEY = "retrieved_documents"
7
+ POST_PROCESSED_DOCUMENTS_KEY = "post_processed_documents"
8
+
9
+ # config keys
10
+ THRESHOLD_CONFIG_KEY = "threshold"
11
+ K_CONFIG_KEY = "k"
12
+ VECTOR_STORE_TYPE_CONFIG_KEY = "type"
13
+ VECTOR_STORE_CONFIG_KEY = "vector_store"
14
+ CONNECTOR_CONFIG_KEY = "connector"
15
+ SOURCE_PROPERTY = "source"
16
+ POST_PROCESSING_CONFIG_KEY = "post_processing"
17
+ QUERY_REWRITING_CONFIG_KEY = "query_rewriting"
18
+ USE_LLM_PROPERTY = "use_generative_llm"
19
+
20
+ # default values
21
+ DEFAULT_THRESHOLD = 0.0
22
+ DEFAULT_K = 3
23
+ DEFAULT_VECTOR_STORE_TYPE = "faiss"
24
+ DEFAULT_EMBEDDINGS_CONFIG = {
25
+ PROVIDER_CONFIG_KEY: OPENAI_PROVIDER,
26
+ "model": DEFAULT_OPENAI_EMBEDDING_MODEL_NAME,
27
+ }
28
+ DEFAULT_VECTOR_STORE = {
29
+ VECTOR_STORE_TYPE_CONFIG_KEY: DEFAULT_VECTOR_STORE_TYPE,
30
+ SOURCE_PROPERTY: "./docs",
31
+ THRESHOLD_CONFIG_KEY: DEFAULT_THRESHOLD,
32
+ }
@@ -0,0 +1,351 @@
1
+ import asyncio
2
+ import importlib.resources
3
+ from enum import Enum
4
+ from functools import lru_cache
5
+ from typing import Any, Dict, Optional, Text
6
+
7
+ import structlog
8
+ from jinja2 import Template
9
+
10
+ import rasa.shared.utils.io
11
+ from rasa.core.information_retrieval import SearchResult, SearchResultList
12
+ from rasa.dialogue_understanding.utils import add_prompt_to_message_parse_data
13
+ from rasa.engine.storage.resource import Resource
14
+ from rasa.engine.storage.storage import ModelStorage
15
+ from rasa.shared.constants import (
16
+ LLM_CONFIG_KEY,
17
+ MODEL_CONFIG_KEY,
18
+ OPENAI_PROVIDER,
19
+ PROMPT_TEMPLATE_CONFIG_KEY,
20
+ PROVIDER_CONFIG_KEY,
21
+ TEXT,
22
+ TIMEOUT_CONFIG_KEY,
23
+ )
24
+ from rasa.shared.core.trackers import DialogueStateTracker
25
+ from rasa.shared.exceptions import FileIOException, ProviderClientAPIException
26
+ from rasa.shared.nlu.training_data.message import Message
27
+ from rasa.shared.providers.llm.llm_client import LLMClient
28
+ from rasa.shared.providers.llm.llm_response import LLMResponse
29
+ from rasa.shared.utils.health_check.health_check import perform_llm_health_check
30
+ from rasa.shared.utils.health_check.llm_health_check_mixin import LLMHealthCheckMixin
31
+ from rasa.shared.utils.llm import (
32
+ DEFAULT_OPENAI_GENERATE_MODEL_NAME,
33
+ DEFAULT_OPENAI_MAX_GENERATED_TOKENS,
34
+ get_prompt_template,
35
+ llm_factory,
36
+ resolve_model_client_config,
37
+ tracker_as_readable_transcript,
38
+ )
39
+
40
+ TYPE_CONFIG_KEY = "type"
41
+ EMBEDDING_MODEL_KEY = "embedding_model_name"
42
+
43
+ DEFAULT_EMBEDDING_MODEL_NAME = "sentence-transformers/all-MiniLM-L6-v2"
44
+ DOCUMENT_POST_PROCESSOR_PROMPT_FILE_NAME = (
45
+ "document_post_processor_prompt_template.jina2"
46
+ )
47
+ DEFAULT_LLM_CONFIG = {
48
+ PROVIDER_CONFIG_KEY: OPENAI_PROVIDER,
49
+ MODEL_CONFIG_KEY: DEFAULT_OPENAI_GENERATE_MODEL_NAME,
50
+ "temperature": 0.3,
51
+ "max_tokens": DEFAULT_OPENAI_MAX_GENERATED_TOKENS,
52
+ TIMEOUT_CONFIG_KEY: 5,
53
+ }
54
+ DEFAULT_DOCUMENT_POST_PROCESSOR_PROMPT_TEMPLATE = importlib.resources.read_text(
55
+ "rasa.document_retrieval",
56
+ "document_post_processor_prompt_template.jinja2",
57
+ )
58
+
59
+ structlogger = structlog.get_logger()
60
+
61
+
62
+ class PostProcessingType(Enum):
63
+ PLAIN = "PLAIN"
64
+ AGGREGATED_SUMMARY = "AGGREGATED_SUMMARY"
65
+ INDIVIDUAL_SUMMARIES = "INDIVIDUAL_SUMMARIES"
66
+ BINARY_LLM = "BINARY_LLM"
67
+ BINARY_EMBEDDING_MODEL = "BINARY_EMBEDDING_MODEL"
68
+ FINAL_ANSWER = "FINAL_ANSWER"
69
+
70
+ def __str__(self) -> str:
71
+ return self.value
72
+
73
+
74
+ class DocumentPostProcessor(LLMHealthCheckMixin):
75
+ @classmethod
76
+ def get_default_config(cls) -> Dict[str, Any]:
77
+ """The default config for the document post processor."""
78
+ return {
79
+ TYPE_CONFIG_KEY: PostProcessingType.PLAIN,
80
+ LLM_CONFIG_KEY: DEFAULT_LLM_CONFIG,
81
+ PROMPT_TEMPLATE_CONFIG_KEY: DEFAULT_DOCUMENT_POST_PROCESSOR_PROMPT_TEMPLATE,
82
+ }
83
+
84
+ def __init__(
85
+ self,
86
+ config: Dict[str, Any],
87
+ model_storage: ModelStorage,
88
+ resource: Resource,
89
+ prompt_template: Optional[str] = None,
90
+ ):
91
+ self.config = {**self.get_default_config(), **config}
92
+ self.config[LLM_CONFIG_KEY] = resolve_model_client_config(
93
+ self.config.get(LLM_CONFIG_KEY), DocumentPostProcessor.__name__
94
+ )
95
+ self.prompt_template = prompt_template or get_prompt_template(
96
+ config.get(PROMPT_TEMPLATE_CONFIG_KEY),
97
+ DEFAULT_DOCUMENT_POST_PROCESSOR_PROMPT_TEMPLATE,
98
+ )
99
+
100
+ self._model_storage = model_storage
101
+ self._resource = resource
102
+
103
+ @classmethod
104
+ def load(
105
+ cls,
106
+ config: Dict[Text, Any],
107
+ model_storage: ModelStorage,
108
+ resource: Resource,
109
+ **kwargs: Any,
110
+ ) -> "DocumentPostProcessor":
111
+ """Load document post processor."""
112
+ llm_config = resolve_model_client_config(config.get(LLM_CONFIG_KEY, {}))
113
+ perform_llm_health_check(
114
+ llm_config,
115
+ DEFAULT_LLM_CONFIG,
116
+ "document_post_processor.load",
117
+ DocumentPostProcessor.__name__,
118
+ )
119
+
120
+ # load prompt template
121
+ prompt_template = None
122
+ try:
123
+ with model_storage.read_from(resource) as path:
124
+ prompt_template = rasa.shared.utils.io.read_file(
125
+ path / DOCUMENT_POST_PROCESSOR_PROMPT_FILE_NAME
126
+ )
127
+ except (FileNotFoundError, FileIOException) as e:
128
+ structlogger.warning(
129
+ "document_post_processor.load_prompt_template.failed",
130
+ error=e,
131
+ resource=resource.name,
132
+ )
133
+
134
+ return DocumentPostProcessor(config, model_storage, resource, prompt_template)
135
+
136
+ def persist(self) -> None:
137
+ with self._model_storage.write_to(self._resource) as path:
138
+ rasa.shared.utils.io.write_text_file(
139
+ self.prompt_template, path / DOCUMENT_POST_PROCESSOR_PROMPT_FILE_NAME
140
+ )
141
+
142
+ async def process_documents(
143
+ self,
144
+ message: Message,
145
+ search_query: str,
146
+ documents: SearchResultList,
147
+ tracker: DialogueStateTracker,
148
+ ) -> SearchResultList:
149
+ processing_type = self.config.get(TYPE_CONFIG_KEY)
150
+
151
+ llm = llm_factory(self.config.get(LLM_CONFIG_KEY), DEFAULT_LLM_CONFIG)
152
+
153
+ if processing_type == PostProcessingType.AGGREGATED_SUMMARY.value:
154
+ return await self._create_aggregated_summary(documents, llm)
155
+
156
+ elif processing_type == PostProcessingType.INDIVIDUAL_SUMMARIES.value:
157
+ return await self._create_individual_summaries(documents, llm)
158
+
159
+ elif processing_type == PostProcessingType.BINARY_LLM.value:
160
+ return await self._check_documents_relevance_to_user_query(
161
+ message, search_query, documents, llm, tracker
162
+ )
163
+
164
+ elif processing_type == PostProcessingType.BINARY_EMBEDDING_MODEL.value:
165
+ return (
166
+ await self._check_documents_relevance_to_user_query_using_modern_bert(
167
+ search_query,
168
+ documents,
169
+ )
170
+ )
171
+
172
+ elif processing_type == PostProcessingType.PLAIN.value:
173
+ return documents
174
+
175
+ elif processing_type == PostProcessingType.FINAL_ANSWER.value:
176
+ return await self._generate_final_answer(message, documents, llm, tracker)
177
+
178
+ else:
179
+ raise ValueError(f"Invalid postprocessing type: {processing_type}")
180
+
181
+ @lru_cache
182
+ def compile_template(self, template: str) -> Template:
183
+ """Compile the prompt template.
184
+
185
+ Compiling the template is an expensive operation,
186
+ so we cache the result.
187
+ """
188
+ return Template(template)
189
+
190
+ def render_prompt(self, data: Dict) -> str:
191
+ # TODO: This should probably be fixed, as the default prompt template is empty
192
+ # If there are default templates for summarization they should be created,
193
+ # and ideally be initialized based on the processing type.
194
+ prompt_template = get_prompt_template(
195
+ self.config.get(PROMPT_TEMPLATE_CONFIG_KEY),
196
+ DEFAULT_DOCUMENT_POST_PROCESSOR_PROMPT_TEMPLATE,
197
+ )
198
+ return self.compile_template(prompt_template).render(**data)
199
+
200
+ async def _invoke_llm(self, prompt: str, llm: LLMClient) -> Optional[LLMResponse]:
201
+ try:
202
+ return await llm.acompletion(prompt)
203
+ except Exception as e:
204
+ # unfortunately, langchain does not wrap LLM exceptions which means
205
+ # we have to catch all exceptions here
206
+ structlogger.error("document_post_processor.llm.error", error=e)
207
+ raise ProviderClientAPIException(
208
+ message="LLM call exception", original_exception=e
209
+ )
210
+
211
+ async def _create_aggregated_summary(
212
+ self, documents: SearchResultList, llm: LLMClient
213
+ ) -> SearchResultList:
214
+ prompt = self.render_prompt(
215
+ {"retrieval_results": [doc.text for doc in documents.results]}
216
+ )
217
+
218
+ llm_response = await self._invoke_llm(prompt, llm)
219
+ aggregated_summary = LLMResponse.ensure_llm_response(llm_response)
220
+
221
+ aggregated_result = SearchResult(
222
+ text=aggregated_summary.choices[0], metadata={}
223
+ )
224
+
225
+ return SearchResultList(results=[aggregated_result], metadata={})
226
+
227
+ async def _create_individual_summaries(
228
+ self, documents: SearchResultList, llm: LLMClient
229
+ ) -> SearchResultList:
230
+ tasks = []
231
+
232
+ for doc in documents.results:
233
+ prompt_template = self.render_prompt({"retrieval_results": doc.text})
234
+ prompt = prompt_template.format(doc.text, llm)
235
+ tasks.append(asyncio.create_task(self._invoke_llm(prompt, llm)))
236
+
237
+ llm_responses = await asyncio.gather(*tasks)
238
+ summarized_contents = [
239
+ LLMResponse.ensure_llm_response(summary) for summary in llm_responses
240
+ ]
241
+
242
+ results = [
243
+ SearchResult(text=summary.choices[0], metadata={})
244
+ for summary in summarized_contents
245
+ ]
246
+ return SearchResultList(results=results, metadata={})
247
+
248
+ async def _check_documents_relevance_to_user_query(
249
+ self,
250
+ message: Message,
251
+ search_query: str,
252
+ documents: SearchResultList,
253
+ llm: LLMClient,
254
+ tracker: DialogueStateTracker,
255
+ ) -> SearchResultList:
256
+ # If no documents were retrieved from the vector store, the
257
+ # documents seem to be irrelevant. Respond with "NO".
258
+ if not documents.results:
259
+ return SearchResultList(
260
+ results=[
261
+ SearchResult(
262
+ text="NO",
263
+ metadata={},
264
+ )
265
+ ],
266
+ metadata={},
267
+ )
268
+
269
+ prompt_data = {
270
+ "search_query": search_query,
271
+ "relevant_documents": documents,
272
+ "conversation": tracker_as_readable_transcript(tracker, max_turns=10),
273
+ }
274
+
275
+ prompt = self.render_prompt(prompt_data)
276
+
277
+ llm_response = await self._invoke_llm(prompt, llm)
278
+ documents_relevance = LLMResponse.ensure_llm_response(llm_response)
279
+
280
+ aggregated_result = SearchResult(
281
+ text=documents_relevance.choices[0],
282
+ metadata={},
283
+ )
284
+
285
+ add_prompt_to_message_parse_data(
286
+ message=message,
287
+ component_name=self.__class__.__name__,
288
+ prompt_name="document_post_processor",
289
+ user_prompt=prompt,
290
+ llm_response=llm_response,
291
+ )
292
+ structlogger.debug(
293
+ "document_post_processor._check_documents_relevance_to_user_query",
294
+ prompt=prompt,
295
+ documents=[d.text for d in documents.results],
296
+ llm_response=llm_response,
297
+ )
298
+
299
+ return SearchResultList(results=[aggregated_result], metadata={})
300
+
301
+ async def _check_documents_relevance_to_user_query_using_modern_bert(
302
+ self,
303
+ search_query: str,
304
+ documents: SearchResultList,
305
+ threshold: float = 0.5,
306
+ ) -> SearchResultList:
307
+ import torch
308
+ from sentence_transformers import SentenceTransformer
309
+
310
+ self.model = SentenceTransformer(
311
+ self.config.get(EMBEDDING_MODEL_KEY, DEFAULT_EMBEDDING_MODEL_NAME),
312
+ trust_remote_code=True,
313
+ )
314
+
315
+ query_embeddings = self.model.encode(["search_query: " + search_query])
316
+ doc_embeddings = self.model.encode(
317
+ ["search_document: " + doc.text for doc in documents.results]
318
+ )
319
+
320
+ similarities = self.model.similarity(query_embeddings, doc_embeddings)
321
+
322
+ is_any_doc_relevant = torch.any(similarities > threshold).item()
323
+
324
+ return SearchResultList(
325
+ results=[
326
+ SearchResult(text="YES" if is_any_doc_relevant else "NO", metadata={})
327
+ ],
328
+ metadata={},
329
+ )
330
+
331
+ async def _generate_final_answer(
332
+ self,
333
+ message: Message,
334
+ documents: SearchResultList,
335
+ llm: LLMClient,
336
+ tracker: DialogueStateTracker,
337
+ ) -> SearchResultList:
338
+ input = {
339
+ "current_conversation": tracker_as_readable_transcript(tracker),
340
+ "relevant_documents": documents,
341
+ "user_message": message.get(TEXT),
342
+ }
343
+ prompt = self.render_prompt(input)
344
+ response = await self._invoke_llm(prompt, llm)
345
+ response_text = response.choices[0] if response else ""
346
+ search_result = SearchResult(text=response_text, metadata={})
347
+ results = SearchResultList(
348
+ results=[search_result],
349
+ metadata={},
350
+ )
351
+ return results