rasa-pro 3.11.3a1.dev4__py3-none-any.whl → 3.11.3a1.dev6__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of rasa-pro might be problematic. Click here for more details.

@@ -17,6 +17,15 @@ from jsonschema import Draft202012Validator
17
17
 
18
18
  import rasa.core
19
19
  import rasa.shared.utils.io
20
+ from rasa.core.actions.constants import (
21
+ TEXT,
22
+ ELEMENTS,
23
+ QUICK_REPLIES,
24
+ BUTTONS,
25
+ ATTACHMENT,
26
+ IMAGE,
27
+ CUSTOM,
28
+ )
20
29
  from rasa.core.actions.custom_action_executor import (
21
30
  CustomActionExecutor,
22
31
  NoEndpointCustomActionExecutor,
@@ -93,6 +102,7 @@ from rasa.shared.nlu.constants import (
93
102
  INTENT_NAME_KEY,
94
103
  INTENT_RANKING_KEY,
95
104
  )
105
+ from rasa.shared.nlu.constants import PROMPTS
96
106
  from rasa.shared.utils.io import raise_warning
97
107
  from rasa.shared.utils.schemas.events import EVENTS_SCHEMA
98
108
  from rasa.utils.endpoints import ClientResponseError, EndpointConfig
@@ -255,18 +265,19 @@ def action_for_name_or_text(
255
265
  def create_bot_utterance(message: Dict[Text, Any]) -> BotUttered:
256
266
  """Create BotUttered event from message."""
257
267
  bot_message = BotUttered(
258
- text=message.pop("text", None),
268
+ text=message.pop(TEXT, None),
259
269
  data={
260
- "elements": message.pop("elements", None),
261
- "quick_replies": message.pop("quick_replies", None),
262
- "buttons": message.pop("buttons", None),
270
+ ELEMENTS: message.pop(ELEMENTS, None),
271
+ QUICK_REPLIES: message.pop(QUICK_REPLIES, None),
272
+ BUTTONS: message.pop(BUTTONS, None),
263
273
  # for legacy / compatibility reasons we need to set the image
264
274
  # to be the attachment if there is no other attachment (the
265
275
  # `.get` is intentional - no `pop` as we still need the image`
266
276
  # property to set it in the following line)
267
- "attachment": message.pop("attachment", None) or message.get("image", None),
268
- "image": message.pop("image", None),
269
- "custom": message.pop("custom", None),
277
+ ATTACHMENT: message.pop(ATTACHMENT, None) or message.get(IMAGE, None),
278
+ IMAGE: message.pop(IMAGE, None),
279
+ CUSTOM: message.pop(CUSTOM, None),
280
+ PROMPTS: message.pop(PROMPTS, None),
270
281
  },
271
282
  metadata=message,
272
283
  )
@@ -3,3 +3,11 @@ SELECTIVE_DOMAIN = "enable_selective_domain"
3
3
 
4
4
  SSL_CLIENT_CERT_FIELD = "ssl_client_cert"
5
5
  SSL_CLIENT_KEY_FIELD = "ssl_client_key"
6
+
7
+ TEXT = "text"
8
+ ELEMENTS = "elements"
9
+ QUICK_REPLIES = "quick_replies"
10
+ BUTTONS = "buttons"
11
+ ATTACHMENT = "attachment"
12
+ IMAGE = "image"
13
+ CUSTOM = "custom"
@@ -218,7 +218,7 @@ class SocketIOInput(InputChannel):
218
218
  return None
219
219
  return SocketIOOutput(self.sio, self.bot_message_evt)
220
220
 
221
- async def on_new_tracker_dump(self, sender_id: str, tracker_dump: str):
221
+ async def on_new_tracker_dump(self, sender_id: str, tracker_dump: str) -> None:
222
222
  if self.sio:
223
223
  await self.sio.emit("tracker", tracker_dump, room=sender_id)
224
224
 
@@ -2,6 +2,7 @@ from typing import Any, Dict, Optional, Text
2
2
 
3
3
  import structlog
4
4
  from jinja2 import Template
5
+
5
6
  from rasa import telemetry
6
7
  from rasa.core.nlg.response import TemplatedNaturalLanguageGenerator
7
8
  from rasa.core.nlg.summarize import summarize_conversation
@@ -18,6 +19,12 @@ from rasa.shared.constants import (
18
19
  from rasa.shared.core.domain import KEY_RESPONSES_TEXT, Domain
19
20
  from rasa.shared.core.events import BotUttered, UserUttered
20
21
  from rasa.shared.core.trackers import DialogueStateTracker
22
+ from rasa.shared.nlu.constants import (
23
+ PROMPTS,
24
+ KEY_USER_PROMPT,
25
+ KEY_LLM_RESPONSE_METADATA,
26
+ )
27
+ from rasa.shared.providers.llm.llm_response import LLMResponse
21
28
  from rasa.shared.utils.health_check.llm_health_check_mixin import LLMHealthCheckMixin
22
29
  from rasa.shared.utils.llm import (
23
30
  DEFAULT_OPENAI_GENERATE_MODEL_NAME,
@@ -124,6 +131,48 @@ class ContextualResponseRephraser(
124
131
  ContextualResponseRephraser.__name__,
125
132
  )
126
133
 
134
+ @classmethod
135
+ def _add_prompt_and_llm_metadata_to_response(
136
+ cls,
137
+ response: Dict[str, Any],
138
+ prompt_name: str,
139
+ user_prompt: str,
140
+ llm_response: Optional["LLMResponse"] = None,
141
+ ) -> Dict[str, Any]:
142
+ """Stores the prompt and LLMResponse metadata to response.
143
+
144
+ Args:
145
+ response: The response to add the prompt and LLMResponse metadata to.
146
+ prompt_name: A name identifying prompt usage.
147
+ user_prompt: The user prompt that was sent to the LLM.
148
+ llm_response: The response object from the LLM (None if no response).
149
+ """
150
+ from rasa.dialogue_understanding.utils import record_commands_and_prompts
151
+
152
+ if not record_commands_and_prompts:
153
+ return response
154
+
155
+ prompt_data: Dict[Text, Any] = {
156
+ KEY_USER_PROMPT: user_prompt,
157
+ }
158
+
159
+ if llm_response is not None:
160
+ prompt_data[KEY_LLM_RESPONSE_METADATA] = llm_response.to_dict()
161
+ else:
162
+ prompt_data[KEY_LLM_RESPONSE_METADATA] = None
163
+
164
+ prompt_tuple = (prompt_name, prompt_data)
165
+
166
+ component_name = cls.__name__
167
+ component_prompts = response.get(PROMPTS, {})
168
+ if component_name in component_prompts:
169
+ component_prompts[component_name].append(prompt_tuple)
170
+ else:
171
+ component_prompts[component_name] = [prompt_tuple]
172
+
173
+ response[PROMPTS] = component_prompts
174
+ return response
175
+
127
176
  def _last_message_if_human(self, tracker: DialogueStateTracker) -> Optional[str]:
128
177
  """Returns the latest message from the tracker.
129
178
 
@@ -142,20 +191,21 @@ class ContextualResponseRephraser(
142
191
  return None
143
192
  return None
144
193
 
145
- async def _generate_llm_response(self, prompt: str) -> Optional[str]:
146
- """Use LLM to generate a response.
194
+ async def _generate_llm_response(self, prompt: str) -> Optional[LLMResponse]:
195
+ """
196
+ Use LLM to generate a response, returning an LLMResponse object
197
+ containing both the generated text (choices) and metadata.
147
198
 
148
199
  Args:
149
- prompt: the prompt to send to the LLM
200
+ prompt: The prompt to send to the LLM.
150
201
 
151
202
  Returns:
152
- generated text
203
+ An LLMResponse object if successful, otherwise None.
153
204
  """
154
205
  llm = llm_factory(self.llm_config, DEFAULT_LLM_CONFIG)
155
206
 
156
207
  try:
157
- llm_response = await llm.acompletion(prompt)
158
- return llm_response.choices[0]
208
+ return await llm.acompletion(prompt)
159
209
  except Exception as e:
160
210
  # unfortunately, langchain does not wrap LLM exceptions which means
161
211
  # we have to catch all exceptions here
@@ -255,11 +305,21 @@ class ContextualResponseRephraser(
255
305
  or self.llm_property(MODEL_NAME_CONFIG_KEY),
256
306
  llm_model_group_id=self.llm_property(MODEL_GROUP_ID_CONFIG_KEY),
257
307
  )
258
- if not (updated_text := await self._generate_llm_response(prompt)):
259
- # If the LLM fails to generate a response, we
260
- # return the original response.
308
+ llm_response = await self._generate_llm_response(prompt)
309
+ llm_response = LLMResponse.ensure_llm_response(llm_response)
310
+
311
+ response = self._add_prompt_and_llm_metadata_to_response(
312
+ response=response,
313
+ prompt_name="rephrase_prompt",
314
+ user_prompt=prompt,
315
+ llm_response=llm_response,
316
+ )
317
+
318
+ if llm_response is None or not llm_response.choices:
319
+ # If the LLM fails to generate a response, return the original response.
261
320
  return response
262
321
 
322
+ updated_text = llm_response.choices[0]
263
323
  structlogger.debug(
264
324
  "nlg.rewrite.complete",
265
325
  response_text=response_text,
@@ -2,6 +2,7 @@ import importlib.resources
2
2
  import json
3
3
  import re
4
4
  from typing import TYPE_CHECKING, Any, Dict, List, Optional, Text
5
+
5
6
  import dotenv
6
7
  import structlog
7
8
  from jinja2 import Template
@@ -63,11 +64,17 @@ from rasa.shared.core.events import Event, UserUttered, BotUttered
63
64
  from rasa.shared.core.generator import TrackerWithCachedStates
64
65
  from rasa.shared.core.trackers import DialogueStateTracker, EventVerbosity
65
66
  from rasa.shared.exceptions import RasaException, FileIOException
67
+ from rasa.shared.nlu.constants import (
68
+ PROMPTS,
69
+ KEY_USER_PROMPT,
70
+ KEY_LLM_RESPONSE_METADATA,
71
+ )
66
72
  from rasa.shared.nlu.training_data.training_data import TrainingData
67
73
  from rasa.shared.providers.embedding._langchain_embedding_client_adapter import (
68
74
  _LangchainEmbeddingClientAdapter,
69
75
  )
70
76
  from rasa.shared.providers.llm.llm_client import LLMClient
77
+ from rasa.shared.providers.llm.llm_response import LLMResponse
71
78
  from rasa.shared.utils.cli import print_error_and_exit
72
79
  from rasa.shared.utils.health_check.embeddings_health_check_mixin import (
73
80
  EmbeddingsHealthCheckMixin,
@@ -272,6 +279,50 @@ class EnterpriseSearchPolicy(LLMHealthCheckMixin, EmbeddingsHealthCheckMixin, Po
272
279
  # Wrap the embedding client in the adapter
273
280
  return _LangchainEmbeddingClientAdapter(client)
274
281
 
282
+ @classmethod
283
+ def _add_prompt_and_llm_response_to_latest_message(
284
+ cls,
285
+ tracker: DialogueStateTracker,
286
+ prompt_name: str,
287
+ user_prompt: str,
288
+ llm_response: Optional[LLMResponse] = None,
289
+ ) -> None:
290
+ """Stores the prompt and LLMResponse metadata in the tracker.
291
+
292
+ Args:
293
+ tracker: The DialogueStateTracker containing the current conversation state.
294
+ prompt_name: A name identifying prompt usage.
295
+ user_prompt: The user prompt that was sent to the LLM.
296
+ llm_response: The response object from the LLM (None if no response).
297
+ """
298
+ from rasa.dialogue_understanding.utils import record_commands_and_prompts
299
+
300
+ if not record_commands_and_prompts:
301
+ return
302
+
303
+ if not tracker.latest_message:
304
+ return
305
+
306
+ parse_data = tracker.latest_message.parse_data
307
+ if parse_data is not None and PROMPTS not in parse_data:
308
+ parse_data[PROMPTS] = {} # type: ignore[literal-required]
309
+
310
+ component_name = cls.__name__
311
+ component_prompts = parse_data[PROMPTS].get(component_name, []) # type: ignore[literal-required]
312
+
313
+ prompt_data: Dict[Text, Any] = {
314
+ KEY_USER_PROMPT: user_prompt,
315
+ }
316
+
317
+ if llm_response is not None:
318
+ prompt_data[KEY_LLM_RESPONSE_METADATA] = llm_response.to_dict()
319
+ else:
320
+ prompt_data[KEY_LLM_RESPONSE_METADATA] = None
321
+
322
+ prompt_tuple = (prompt_name, prompt_data)
323
+ component_prompts.append(prompt_tuple)
324
+ parse_data[PROMPTS][component_name] = component_prompts # type: ignore[literal-required]
325
+
275
326
  def train( # type: ignore[override]
276
327
  self,
277
328
  training_trackers: List[TrackerWithCachedStates],
@@ -498,13 +549,27 @@ class EnterpriseSearchPolicy(LLMHealthCheckMixin, EmbeddingsHealthCheckMixin, Po
498
549
 
499
550
  if self.use_llm:
500
551
  prompt = self._render_prompt(tracker, documents.results)
501
- llm_answer = await self._generate_llm_answer(llm, prompt)
552
+ response = await self._generate_llm_answer(llm, prompt)
553
+ llm_response = LLMResponse.ensure_llm_response(response)
554
+
555
+ self._add_prompt_and_llm_response_to_latest_message(
556
+ tracker=tracker,
557
+ prompt_name="enterprise_search_prompt",
558
+ user_prompt=prompt,
559
+ llm_response=llm_response,
560
+ )
561
+
562
+ if llm_response is None or not llm_response.choices:
563
+ logger.debug(f"{logger_key}.no_llm_response")
564
+ response = None
565
+ else:
566
+ llm_answer = llm_response.choices[0]
502
567
 
503
- if self.citation_enabled:
504
- llm_answer = self.post_process_citations(llm_answer)
568
+ if self.citation_enabled:
569
+ llm_answer = self.post_process_citations(llm_answer)
505
570
 
506
- logger.debug(f"{logger_key}.llm_answer", llm_answer=llm_answer)
507
- response = llm_answer
571
+ logger.debug(f"{logger_key}.llm_answer", llm_answer=llm_answer)
572
+ response = llm_answer
508
573
  else:
509
574
  response = documents.results[0].metadata.get("answer", None)
510
575
  if not response:
@@ -516,7 +581,6 @@ class EnterpriseSearchPolicy(LLMHealthCheckMixin, EmbeddingsHealthCheckMixin, Po
516
581
  "enterprise_search_policy.predict_action_probabilities.no_llm",
517
582
  search_results=documents,
518
583
  )
519
-
520
584
  if response is None:
521
585
  return self._create_prediction_internal_error(domain, tracker)
522
586
 
@@ -581,10 +645,18 @@ class EnterpriseSearchPolicy(LLMHealthCheckMixin, EmbeddingsHealthCheckMixin, Po
581
645
 
582
646
  async def _generate_llm_answer(
583
647
  self, llm: LLMClient, prompt: Text
584
- ) -> Optional[Text]:
648
+ ) -> Optional[LLMResponse]:
649
+ """Fetches an LLM completion for the provided prompt.
650
+
651
+ Args:
652
+ llm: The LLM client used to get the completion.
653
+ prompt: The prompt text to send to the model.
654
+
655
+ Returns:
656
+ An LLMResponse object, or None if the call fails.
657
+ """
585
658
  try:
586
- llm_response = await llm.acompletion(prompt)
587
- llm_answer = llm_response.choices[0]
659
+ return await llm.acompletion(prompt)
588
660
  except Exception as e:
589
661
  # unfortunately, langchain does not wrap LLM exceptions which means
590
662
  # we have to catch all exceptions here
@@ -592,9 +664,7 @@ class EnterpriseSearchPolicy(LLMHealthCheckMixin, EmbeddingsHealthCheckMixin, Po
592
664
  "enterprise_search_policy._generate_llm_answer.llm_error",
593
665
  error=e,
594
666
  )
595
- llm_answer = None
596
-
597
- return llm_answer
667
+ return None
598
668
 
599
669
  def _create_prediction(
600
670
  self,
@@ -0,0 +1 @@
1
+ RASA_RECORD_COMMANDS_AND_PROMPTS_ENV_VAR_NAME = "RASA_RECORD_COMMANDS_AND_PROMPTS"
@@ -26,8 +26,10 @@ from rasa.shared.nlu.constants import (
26
26
  PROMPTS,
27
27
  KEY_USER_PROMPT,
28
28
  KEY_SYSTEM_PROMPT,
29
+ KEY_LLM_RESPONSE_METADATA,
29
30
  )
30
31
  from rasa.shared.nlu.training_data.message import Message
32
+ from rasa.shared.providers.llm.llm_response import LLMResponse
31
33
  from rasa.shared.utils.llm import DEFAULT_MAX_USER_INPUT_CHARACTERS
32
34
 
33
35
  structlogger = structlog.get_logger()
@@ -399,6 +401,7 @@ class CommandGenerator:
399
401
  prompt_name: str,
400
402
  user_prompt: str,
401
403
  system_prompt: Optional[str] = None,
404
+ llm_response: Optional[LLMResponse] = None,
402
405
  ) -> None:
403
406
  """Add prompt to the message parse data.
404
407
 
@@ -411,14 +414,16 @@ class CommandGenerator:
411
414
  "fill_slots_prompt",
412
415
  {
413
416
  "user_prompt": <prompt content>",
414
- "system_prompt": <prompt content>"
417
+ "system_prompt": <prompt content>",
418
+ "llm_response_metadata": <metadata dict from LLMResponse>
415
419
  }
416
420
  ),
417
421
  (
418
422
  "handle_flows_prompt",
419
423
  {
420
424
  "user_prompt": <prompt content>",
421
- "system_prompt": <prompt content>"
425
+ "system_prompt": <prompt content>",
426
+ "llm_response_metadata": <metadata dict from LLMResponse>
422
427
  }
423
428
  ),
424
429
  ],
@@ -427,7 +432,8 @@ class CommandGenerator:
427
432
  "prompt_template",
428
433
  {
429
434
  "user_prompt": <prompt content>",
430
- "system_prompt": <prompt content>"
435
+ "system_prompt": <prompt content>",
436
+ "llm_response_metadata": <metadata dict from LLMResponse>
431
437
  }
432
438
  ),
433
439
  ]
@@ -440,13 +446,17 @@ class CommandGenerator:
440
446
  if not record_commands_and_prompts:
441
447
  return
442
448
 
443
- prompt_tuple = (
444
- prompt_name,
445
- {
446
- KEY_USER_PROMPT: user_prompt,
447
- **({KEY_SYSTEM_PROMPT: system_prompt} if system_prompt else {}),
448
- },
449
- )
449
+ prompt_data: Dict[Text, Any] = {
450
+ KEY_USER_PROMPT: user_prompt,
451
+ **({KEY_SYSTEM_PROMPT: system_prompt} if system_prompt else {}),
452
+ }
453
+
454
+ if llm_response is not None:
455
+ prompt_data[KEY_LLM_RESPONSE_METADATA] = llm_response.to_dict()
456
+ else:
457
+ prompt_data[KEY_LLM_RESPONSE_METADATA] = None
458
+
459
+ prompt_tuple = (prompt_name, prompt_data)
450
460
 
451
461
  if message.get(PROMPTS) is not None:
452
462
  prompts = message.get(PROMPTS)
@@ -32,6 +32,7 @@ from rasa.shared.exceptions import ProviderClientAPIException
32
32
  from rasa.shared.nlu.constants import FLOWS_IN_PROMPT
33
33
  from rasa.shared.nlu.training_data.message import Message
34
34
  from rasa.shared.nlu.training_data.training_data import TrainingData
35
+ from rasa.shared.providers.llm.llm_response import LLMResponse
35
36
  from rasa.shared.utils.health_check.llm_health_check_mixin import LLMHealthCheckMixin
36
37
  from rasa.shared.utils.llm import (
37
38
  allowed_values_for_slot,
@@ -304,22 +305,21 @@ class LLMBasedCommandGenerator(
304
305
  )
305
306
  return filtered_flows
306
307
 
307
- async def invoke_llm(self, prompt: Text) -> Optional[Text]:
308
+ async def invoke_llm(self, prompt: Text) -> Optional[LLMResponse]:
308
309
  """Use LLM to generate a response.
309
310
 
310
311
  Args:
311
312
  prompt: The prompt to send to the LLM.
312
313
 
313
314
  Returns:
314
- The generated text.
315
+ An LLMResponse object.
315
316
 
316
317
  Raises:
317
- ProviderClientAPIException if an error during API call.
318
+ ProviderClientAPIException: If an error occurs during the LLM API call.
318
319
  """
319
320
  llm = llm_factory(self.config.get(LLM_CONFIG_KEY), DEFAULT_LLM_CONFIG)
320
321
  try:
321
- llm_response = await llm.acompletion(prompt)
322
- return llm_response.choices[0]
322
+ return await llm.acompletion(prompt)
323
323
  except Exception as e:
324
324
  # unfortunately, langchain does not wrap LLM exceptions which means
325
325
  # we have to catch all exceptions here
@@ -10,6 +10,7 @@ from rasa.engine.recipes.default_recipe import DefaultV1Recipe
10
10
  from rasa.engine.storage.resource import Resource
11
11
  from rasa.engine.storage.storage import ModelStorage
12
12
  from rasa.shared.exceptions import ProviderClientAPIException
13
+ from rasa.shared.providers.llm.llm_response import LLMResponse
13
14
  from rasa.shared.utils.io import raise_deprecation_warning
14
15
 
15
16
  structlogger = structlog.get_logger()
@@ -53,7 +54,7 @@ class LLMCommandGenerator(SingleStepLLMCommandGenerator):
53
54
  **kwargs,
54
55
  )
55
56
 
56
- async def invoke_llm(self, prompt: Text) -> Optional[Text]:
57
+ async def invoke_llm(self, prompt: Text) -> Optional[LLMResponse]:
57
58
  try:
58
59
  return await super().invoke_llm(prompt)
59
60
  except ProviderClientAPIException:
@@ -51,6 +51,7 @@ from rasa.shared.core.trackers import DialogueStateTracker
51
51
  from rasa.shared.exceptions import ProviderClientAPIException
52
52
  from rasa.shared.nlu.constants import TEXT
53
53
  from rasa.shared.nlu.training_data.message import Message
54
+ from rasa.shared.providers.llm.llm_response import LLMResponse
54
55
  from rasa.shared.utils.io import deep_container_fingerprint
55
56
  from rasa.shared.utils.llm import (
56
57
  get_prompt_template,
@@ -535,7 +536,12 @@ class MultiStepLLMCommandGenerator(LLMBasedCommandGenerator):
535
536
  prompt=prompt,
536
537
  )
537
538
 
538
- actions = await self.invoke_llm(prompt)
539
+ response = await self.invoke_llm(prompt)
540
+ llm_response = LLMResponse.ensure_llm_response(response)
541
+ actions = None
542
+ if llm_response and llm_response.choices:
543
+ actions = llm_response.choices[0]
544
+
539
545
  structlogger.debug(
540
546
  "multi_step_llm_command_generator"
541
547
  ".predict_commands_for_active_flow"
@@ -547,10 +553,11 @@ class MultiStepLLMCommandGenerator(LLMBasedCommandGenerator):
547
553
 
548
554
  if commands:
549
555
  self._add_prompt_to_message_parse_data(
550
- message,
551
- MultiStepLLMCommandGenerator.__name__,
552
- "fill_slots_for_active_flow_prompt",
553
- prompt,
556
+ message=message,
557
+ component_name=MultiStepLLMCommandGenerator.__name__,
558
+ prompt_name="fill_slots_for_active_flow_prompt",
559
+ user_prompt=prompt,
560
+ llm_response=llm_response,
554
561
  )
555
562
 
556
563
  return commands
@@ -584,7 +591,12 @@ class MultiStepLLMCommandGenerator(LLMBasedCommandGenerator):
584
591
  prompt=prompt,
585
592
  )
586
593
 
587
- actions = await self.invoke_llm(prompt)
594
+ response = await self.invoke_llm(prompt)
595
+ llm_response = LLMResponse.ensure_llm_response(response)
596
+ actions = None
597
+ if llm_response and llm_response.choices:
598
+ actions = llm_response.choices[0]
599
+
588
600
  structlogger.debug(
589
601
  "multi_step_llm_command_generator"
590
602
  ".predict_commands_for_handling_flows"
@@ -598,10 +610,11 @@ class MultiStepLLMCommandGenerator(LLMBasedCommandGenerator):
598
610
 
599
611
  if commands:
600
612
  self._add_prompt_to_message_parse_data(
601
- message,
602
- MultiStepLLMCommandGenerator.__name__,
603
- "handle_flows_prompt",
604
- prompt,
613
+ message=message,
614
+ component_name=MultiStepLLMCommandGenerator.__name__,
615
+ prompt_name="handle_flows_prompt",
616
+ user_prompt=prompt,
617
+ llm_response=llm_response,
605
618
  )
606
619
 
607
620
  return commands
@@ -668,7 +681,12 @@ class MultiStepLLMCommandGenerator(LLMBasedCommandGenerator):
668
681
  prompt=prompt,
669
682
  )
670
683
 
671
- actions = await self.invoke_llm(prompt)
684
+ response = await self.invoke_llm(prompt)
685
+ llm_response = LLMResponse.ensure_llm_response(response)
686
+ actions = None
687
+ if llm_response and llm_response.choices:
688
+ actions = llm_response.choices[0]
689
+
672
690
  structlogger.debug(
673
691
  "multi_step_llm_command_generator"
674
692
  ".predict_commands_for_newly_started_flow"
@@ -695,10 +713,11 @@ class MultiStepLLMCommandGenerator(LLMBasedCommandGenerator):
695
713
 
696
714
  if commands:
697
715
  self._add_prompt_to_message_parse_data(
698
- message,
699
- MultiStepLLMCommandGenerator.__name__,
700
- "fill_slots_for_new_flow_prompt",
701
- prompt,
716
+ message=message,
717
+ component_name=MultiStepLLMCommandGenerator.__name__,
718
+ prompt_name="fill_slots_for_new_flow_prompt",
719
+ user_prompt=prompt,
720
+ llm_response=llm_response,
702
721
  )
703
722
 
704
723
  return commands
@@ -46,6 +46,7 @@ from rasa.shared.core.trackers import DialogueStateTracker
46
46
  from rasa.shared.exceptions import ProviderClientAPIException
47
47
  from rasa.shared.nlu.constants import TEXT, LLM_COMMANDS, LLM_PROMPT
48
48
  from rasa.shared.nlu.training_data.message import Message
49
+ from rasa.shared.providers.llm.llm_response import LLMResponse
49
50
  from rasa.shared.utils.io import deep_container_fingerprint
50
51
  from rasa.shared.utils.llm import (
51
52
  get_prompt_template,
@@ -264,13 +265,16 @@ class SingleStepLLMCommandGenerator(LLMBasedCommandGenerator):
264
265
  prompt=flow_prompt,
265
266
  )
266
267
 
267
- action_list = await self.invoke_llm(flow_prompt)
268
+ response = await self.invoke_llm(flow_prompt)
269
+ llm_response = LLMResponse.ensure_llm_response(response)
268
270
  # The check for 'None' maintains compatibility with older versions
269
271
  # of LLMCommandGenerator. In previous implementations, 'invoke_llm'
270
272
  # might return 'None' to indicate a failure to generate actions.
271
- if action_list is None:
273
+ if llm_response is None or not llm_response.choices:
272
274
  return [ErrorCommand()]
273
275
 
276
+ action_list = llm_response.choices[0]
277
+
274
278
  log_llm(
275
279
  logger=structlogger,
276
280
  log_module="SingleStepLLMCommandGenerator",
@@ -285,10 +289,11 @@ class SingleStepLLMCommandGenerator(LLMBasedCommandGenerator):
285
289
  message, SingleStepLLMCommandGenerator.__name__, commands
286
290
  )
287
291
  self._add_prompt_to_message_parse_data(
288
- message,
289
- SingleStepLLMCommandGenerator.__name__,
290
- "command_generator_prompt",
291
- flow_prompt,
292
+ message=message,
293
+ component_name=SingleStepLLMCommandGenerator.__name__,
294
+ prompt_name="command_generator_prompt",
295
+ user_prompt=flow_prompt,
296
+ llm_response=llm_response,
292
297
  )
293
298
 
294
299
  return commands
@@ -1,7 +1,14 @@
1
1
  from contextlib import contextmanager
2
2
  from typing import Generator
3
3
 
4
- record_commands_and_prompts = False
4
+ from rasa.dialogue_understanding.constants import (
5
+ RASA_RECORD_COMMANDS_AND_PROMPTS_ENV_VAR_NAME,
6
+ )
7
+ from rasa.utils.common import get_bool_env_variable
8
+
9
+ record_commands_and_prompts = get_bool_env_variable(
10
+ RASA_RECORD_COMMANDS_AND_PROMPTS_ENV_VAR_NAME, False
11
+ )
5
12
 
6
13
 
7
14
  @contextmanager
@@ -1,12 +1,16 @@
1
+ import os
1
2
  import shlex
2
3
  import subprocess
3
- from rasa.__main__ import main
4
- import os
4
+ import uuid
5
+ from dataclasses import dataclass
5
6
  from typing import List
7
+
6
8
  import structlog
7
- from dataclasses import dataclass
8
- import uuid
9
9
 
10
+ from rasa.__main__ import main
11
+ from rasa.dialogue_understanding.constants import (
12
+ RASA_RECORD_COMMANDS_AND_PROMPTS_ENV_VAR_NAME,
13
+ )
10
14
  from rasa.model_manager import config
11
15
  from rasa.model_manager.utils import ensure_base_directory_exists, logs_path
12
16
 
@@ -43,6 +47,7 @@ def _create_warm_rasa_process() -> WarmRasaProcess:
43
47
 
44
48
  envs = os.environ.copy()
45
49
  envs["RASA_TELEMETRY_ENABLED"] = "false"
50
+ envs[RASA_RECORD_COMMANDS_AND_PROMPTS_ENV_VAR_NAME] = "true"
46
51
 
47
52
  log_id = uuid.uuid4().hex
48
53
  log_path = logs_path(log_id)
@@ -2,14 +2,10 @@ import abc
2
2
  import copy
3
3
  import json
4
4
  import logging
5
- import structlog
6
5
  import re
7
- from abc import ABC
8
-
9
- import jsonpickle
10
6
  import time
11
7
  import uuid
12
- from dateutil import parser
8
+ from abc import ABC
13
9
  from datetime import datetime
14
10
  from typing import (
15
11
  List,
@@ -24,11 +20,14 @@ from typing import (
24
20
  Tuple,
25
21
  TypeVar,
26
22
  )
23
+ from typing import Union
24
+
25
+ import jsonpickle
26
+ import structlog
27
+ from dateutil import parser
27
28
 
28
29
  import rasa.shared.utils.common
29
30
  import rasa.shared.utils.io
30
- from typing import Union
31
-
32
31
  from rasa.shared.constants import DOCS_URL_TRAINING_DATA
33
32
  from rasa.shared.core.constants import (
34
33
  LOOP_NAME,
@@ -62,7 +61,7 @@ from rasa.shared.nlu.constants import (
62
61
  ENTITY_ATTRIBUTE_END,
63
62
  FULL_RETRIEVAL_INTENT_NAME_KEY,
64
63
  )
65
-
64
+ from rasa.shared.nlu.constants import PROMPTS
66
65
 
67
66
  if TYPE_CHECKING:
68
67
  from typing_extensions import TypedDict
@@ -98,6 +97,7 @@ if TYPE_CHECKING:
98
97
  ENTITIES: List[EntityPrediction],
99
98
  "message_id": Optional[Text],
100
99
  "metadata": Dict,
100
+ PROMPTS: Dict,
101
101
  },
102
102
  total=False,
103
103
  )
@@ -6,6 +6,7 @@ PREDICTED_COMMANDS = "predicted_commands"
6
6
  PROMPTS = "prompts"
7
7
  KEY_USER_PROMPT = "user_prompt"
8
8
  KEY_SYSTEM_PROMPT = "system_prompt"
9
+ KEY_LLM_RESPONSE_METADATA = "llm_response_metadata"
9
10
  LLM_COMMANDS = "llm_commands" # needed for fine-tuning
10
11
  LLM_PROMPT = "llm_prompt" # needed for fine-tuning
11
12
  FLOWS_FROM_SEMANTIC_SEARCH = "flows_from_semantic_search"
@@ -1,5 +1,8 @@
1
1
  from dataclasses import dataclass, field, asdict
2
- from typing import Dict, List, Optional
2
+ from typing import Dict, List, Optional, Text, Any, Union
3
+ import structlog
4
+
5
+ structlogger = structlog.get_logger()
3
6
 
4
7
 
5
8
  @dataclass
@@ -16,6 +19,18 @@ class LLMUsage:
16
19
  def __post_init__(self) -> None:
17
20
  self.total_tokens = self.prompt_tokens + self.completion_tokens
18
21
 
22
+ @classmethod
23
+ def from_dict(cls, data: Dict[Text, Any]) -> "LLMUsage":
24
+ """
25
+ Creates an LLMUsage object from a dictionary.
26
+ If any keys are missing, they will default to zero
27
+ or whatever default you prefer.
28
+ """
29
+ return cls(
30
+ prompt_tokens=data.get("prompt_tokens"),
31
+ completion_tokens=data.get("completion_tokens"),
32
+ )
33
+
19
34
  def to_dict(self) -> dict:
20
35
  """Converts the LLMUsage dataclass instance into a dictionary."""
21
36
  return asdict(self)
@@ -42,6 +57,32 @@ class LLMResponse:
42
57
  """Optional dictionary for storing additional information related to the
43
58
  completion that may not be covered by other fields."""
44
59
 
60
+ @classmethod
61
+ def from_dict(cls, data: Dict[Text, Any]) -> "LLMResponse":
62
+ """
63
+ Creates an LLMResponse from a dictionary.
64
+ """
65
+ usage_data = data.get("usage", {})
66
+ usage_obj = LLMUsage.from_dict(usage_data) if usage_data else None
67
+
68
+ return cls(
69
+ id=data["id"],
70
+ choices=data["choices"],
71
+ created=data["created"],
72
+ model=data.get("model"),
73
+ usage=usage_obj,
74
+ additional_info=data.get("additional_info"),
75
+ )
76
+
77
+ @classmethod
78
+ def ensure_llm_response(cls, response: Union[str, "LLMResponse"]) -> "LLMResponse":
79
+ if isinstance(response, LLMResponse):
80
+ return response
81
+
82
+ structlogger.warn("llm_response.deprecated_response_type", response=response)
83
+ data = {"id": None, "choices": [response], "created": None}
84
+ return LLMResponse.from_dict(data)
85
+
45
86
  def to_dict(self) -> dict:
46
87
  """Converts the LLMResponse dataclass instance into a dictionary."""
47
88
  result = asdict(self)
@@ -161,7 +161,7 @@ FLOW_CANCELLED = {
161
161
  }
162
162
  }
163
163
  DIALOGUE_STACK_UPDATED = {
164
- "properties": {"event": {"const": "stack"}, "update": {"type": "array"}}
164
+ "properties": {"event": {"const": "stack"}, "update": {"type": "string"}}
165
165
  }
166
166
  ROUTING_SESSION_ENDED = {"properties": {"event": {"const": "routing_session_ended"}}}
167
167
 
@@ -578,6 +578,7 @@ def extract_attrs_for_run_step(
578
578
  tracker: DialogueStateTracker,
579
579
  available_actions: List[str],
580
580
  flows: FlowsList,
581
+ previous_step_id: Text,
581
582
  ) -> Dict[str, Any]:
582
583
  current_context = extract_current_context_attribute(stack)
583
584
 
@@ -586,6 +587,7 @@ def extract_attrs_for_run_step(
586
587
  "step_description": step.description if step.description else "None",
587
588
  "current_flow_id": flow.id,
588
589
  "current_context": json.dumps(current_context),
590
+ "previous_step_id": previous_step_id,
589
591
  }
590
592
 
591
593
 
rasa/version.py CHANGED
@@ -1,3 +1,3 @@
1
1
  # this file will automatically be changed,
2
2
  # do not add anything but the version number here!
3
- __version__ = "3.11.3a1.dev4"
3
+ __version__ = "3.11.3a1.dev6"
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.1
2
2
  Name: rasa-pro
3
- Version: 3.11.3a1.dev4
3
+ Version: 3.11.3a1.dev6
4
4
  Summary: State-of-the-art open-core Conversational AI framework for Enterprises that natively leverages generative AI for effortless assistant development.
5
5
  Home-page: https://rasa.com
6
6
  Keywords: nlp,machine-learning,machine-learning-library,bot,bots,botkit,rasa conversational-agents,conversational-ai,chatbot,chatbot-framework,bot-framework
@@ -92,7 +92,7 @@ rasa/cli/x.py,sha256=C7dLtYXAkD-uj7hNj7Pz5YbOupp2yRcMjQbsEVqXUJ8,6825
92
92
  rasa/constants.py,sha256=YrrBiJUc0cL5Xrsap6IioNbQ6dKaqDiueqHmMIYkpF0,1348
93
93
  rasa/core/__init__.py,sha256=DYHLve7F1yQBVOZTA63efVIwLiULMuihOfdpzw1j0os,457
94
94
  rasa/core/actions/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
95
- rasa/core/actions/action.py,sha256=3tXb_DAKEzguq5zDuV1j04Fd5uYvwQckc4GR_EoDVYE,45286
95
+ rasa/core/actions/action.py,sha256=5AY1h4o4m14uKoGZgaNViwkREgabF5_x0Fww49upBIk,45492
96
96
  rasa/core/actions/action_clean_stack.py,sha256=xUP-2ipPsPAnAiwP17c-ezmHPSrV4JSUZr-eSgPQwIs,2279
97
97
  rasa/core/actions/action_exceptions.py,sha256=hghzXYN6VeHC-O_O7WiPesCNV86ZTkHgG90ZnQcbai8,724
98
98
  rasa/core/actions/action_hangup.py,sha256=wpXunkGC71krAYZD3BbqzlHLZxNg1mIviwWz0j9Go-c,994
@@ -101,7 +101,7 @@ rasa/core/actions/action_run_slot_rejections.py,sha256=F16a9aMJAw27Rh9wUJu0KYSAP
101
101
  rasa/core/actions/action_trigger_chitchat.py,sha256=cJcLg_RhfZx-JyomcBOJabnliuj8Fs1nLvONwPCIbpI,1084
102
102
  rasa/core/actions/action_trigger_flow.py,sha256=7pye_4iR_9xedyTntS9l49uEmTf5UXjE0hEFgOodfyw,3487
103
103
  rasa/core/actions/action_trigger_search.py,sha256=xKzSHZIi1bcadgzXJwtP_ZLWKz-ehmHUNmesR1brr0s,1064
104
- rasa/core/actions/constants.py,sha256=gfgdWmj-OJ5xTcTAS1OcXQ3dgcTiHO98NC-SGyKlTjs,161
104
+ rasa/core/actions/constants.py,sha256=7fba-a21R58JMCc6RCr3FJsVZDZtNds-Jv1RHThPhj0,310
105
105
  rasa/core/actions/custom_action_executor.py,sha256=SWsy35tsWZTSTvYyXdSqSV8efz_f3OA-dYOh_I_QXy0,6169
106
106
  rasa/core/actions/direct_custom_actions_executor.py,sha256=IzxRnPF92zs3WX-p9DoFq51Vf0QwfE6prB_AlyEEllc,3746
107
107
  rasa/core/actions/e2e_stub_custom_action_executor.py,sha256=D-kECC1QjVLv4owNxstW2xJPPsXTGfGepvquMeWB_ec,2282
@@ -254,7 +254,7 @@ rasa/core/channels/rasa_chat.py,sha256=XGZ7QLyQHhB-m7EjetDNEBSjAa2mEFqU-e-FuS9z3
254
254
  rasa/core/channels/rest.py,sha256=YDBnbdrlvaYL7Efy3cm2LbbSm7cBAFDhmcypojHXbog,7227
255
255
  rasa/core/channels/rocketchat.py,sha256=HWOMxXLuwadYEYIMMP-z6RqAJzMGZDLklpgqLOipXF0,5998
256
256
  rasa/core/channels/slack.py,sha256=3b8OZQ_gih5XBwhQ1q4BbBUC1SCAPaO9AoJEn2NaoQE,24405
257
- rasa/core/channels/socketio.py,sha256=g8IfIFjcAVC1MZve2N8IKXVPpEQzpFaCQ4q3pECS0j4,13334
257
+ rasa/core/channels/socketio.py,sha256=qTxwow7BA4XMwzlSKAh2W2amQiBqtL_3WqnUc0rjY_s,13342
258
258
  rasa/core/channels/telegram.py,sha256=5BrNECFM3qe9XjNpDb8Q9fbqCT5aKr5L6IH21W8sum8,10651
259
259
  rasa/core/channels/twilio.py,sha256=GsdjfplZdBj0fRB60bSggPF1DXFZ_x18V_dlcDy5VFs,5943
260
260
  rasa/core/channels/vier_cvg.py,sha256=PfvSluQqgJbP0JzZPFUvum3z7H55JPPeobcD-z5zCkw,13544
@@ -307,7 +307,7 @@ rasa/core/lock_store.py,sha256=fgdufUYXHEiTcD7NCCqgDAQRRtt7jrKafENHqFKOyi0,12504
307
307
  rasa/core/migrate.py,sha256=XNeYdiRytBmBNubOQ8KZOT_wR1o9aOpHHfBU9PCB2eg,14626
308
308
  rasa/core/nlg/__init__.py,sha256=0eQOZ0fB35b18oVhRFczcH30jJHgO8WXFhnbXGOxJek,240
309
309
  rasa/core/nlg/callback.py,sha256=rFkDe7CSAETASRefpERUT6-DHWPs0UXhx8x4tZ1QE0M,5238
310
- rasa/core/nlg/contextual_response_rephraser.py,sha256=RqYig6NFnaXcW5vkAUSb54XWoBkeVWm2WYDCsafthBY,11055
310
+ rasa/core/nlg/contextual_response_rephraser.py,sha256=O6jGUs-vgy3ldhUYPLofwylZpbAObfFKLmpoQ8-CZqk,13173
311
311
  rasa/core/nlg/generator.py,sha256=YZ_rh--MeyzA6oXRqr_Ng-jcmPgbCmWMJJrquPmo__8,8436
312
312
  rasa/core/nlg/interpolator.py,sha256=Dc-J2Vf6vPPUbwIgZQm3AJDGvMaFTsh9Citd4CYuA9U,5189
313
313
  rasa/core/nlg/response.py,sha256=aHpy9BgjO7ub6v-sVPiQqutUA_7-UD1l3DJGVeQyp4k,5888
@@ -315,7 +315,7 @@ rasa/core/nlg/summarize.py,sha256=JO6VCfM_RnU0QX8Us42YkNOxC0ESKV1xcVH_sCW27ZU,21
315
315
  rasa/core/persistor.py,sha256=0BZvrA1xObxVtADWLVapj4NOmvqIEen1LKoMOdtZ63s,20337
316
316
  rasa/core/policies/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
317
317
  rasa/core/policies/ensemble.py,sha256=AjNOEy2Iubbe-LdKaoFUXG8ch6yPrg3bTvcTcAPmeOs,12959
318
- rasa/core/policies/enterprise_search_policy.py,sha256=nG1vgZO5woxvXCZWayYXQzZkmxPemfsL0c62QkZcgcI,34126
318
+ rasa/core/policies/enterprise_search_policy.py,sha256=JZgzBI6TB0joUZnhBc08ADrA66PPNBCcSW6ARAUm5ew,36718
319
319
  rasa/core/policies/enterprise_search_prompt_template.jinja2,sha256=dCS_seyBGxMQoMsOjjvPp0dd31OSzZCJSZeev1FJK5Q,1187
320
320
  rasa/core/policies/enterprise_search_prompt_with_citation_template.jinja2,sha256=vRQBs3q13UmvRRgqA8-DmRtM7tqZP2ngwMVJ4gy7lE0,3302
321
321
  rasa/core/policies/flow_policy.py,sha256=wGb1l_59cGM9ZaexSIK5uXFi618739oNfLOxx2FC0_Y,7490
@@ -378,21 +378,22 @@ rasa/dialogue_understanding/commands/skip_question_command.py,sha256=bSrUFOHUz1o
378
378
  rasa/dialogue_understanding/commands/start_flow_command.py,sha256=a0Yk8xpBpFgC3Hkh4J8kAudz4s4ZLQWuoDq_a63lQXM,3309
379
379
  rasa/dialogue_understanding/commands/user_silence_command.py,sha256=QtqsMU5mrbUp5dla2yGSpxXfIfi_h6Eu72mTDZQ_aTU,1724
380
380
  rasa/dialogue_understanding/commands/utils.py,sha256=OiyLFGEsrfFSIJcvBY6lTIIXqDY9OxaikVGtcl4Kokk,1911
381
+ rasa/dialogue_understanding/constants.py,sha256=YcELaIss69Hnroclvn90Dl4Suk3S6e3t0UoIbUaXG2A,83
381
382
  rasa/dialogue_understanding/generator/__init__.py,sha256=Ykeb2wQ1DuiUWAWO0hLIPSTK1_Ktiq9DZXF6D3ugN78,764
382
- rasa/dialogue_understanding/generator/command_generator.py,sha256=Egdy-g46BGBw-iP-dKBM3sca-X-2SyBQL5NPyKTiHWw,15974
383
+ rasa/dialogue_understanding/generator/command_generator.py,sha256=Hc_19NVERoEU3pEREWU8RxJSdTxc-JXzRKVYGy5UYk4,16572
383
384
  rasa/dialogue_understanding/generator/constants.py,sha256=9Nwjo2Qobioetr9SyyQxsGvEPSbKCVS5ZX1GGJtbA0E,716
384
385
  rasa/dialogue_understanding/generator/flow_document_template.jinja2,sha256=f4H6vVd-_nX_RtutMh1xD3ZQE_J2OyuPHAtiltfiAPY,253
385
386
  rasa/dialogue_understanding/generator/flow_retrieval.py,sha256=MkwUgQA9xRlAQUdWF2cBEX2tW2PQhBsq2Jsy2vmqWY4,17891
386
- rasa/dialogue_understanding/generator/llm_based_command_generator.py,sha256=hzHUUMPmIZaLZkFRBgVK42l2nTUn04H4W8GpBBF1XIs,17554
387
- rasa/dialogue_understanding/generator/llm_command_generator.py,sha256=yQ8aAMsTKGSARroJq0TfKVLe3ShYl8K8oklDk_KGies,2459
387
+ rasa/dialogue_understanding/generator/llm_based_command_generator.py,sha256=VO3ZrotELyfKY_LEw8FJ4bPGTRjYbUvQy4Q6Z5rcPCI,17592
388
+ rasa/dialogue_understanding/generator/llm_command_generator.py,sha256=QpNXhjB9ugtPV8XAHmKjbJtOiI1yE9rC2osbsI_A4ZY,2529
388
389
  rasa/dialogue_understanding/generator/multi_step/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
389
390
  rasa/dialogue_understanding/generator/multi_step/fill_slots_prompt.jinja2,sha256=Y0m673tAML3cFPaLM-urMXDsBYUUcXIw9YUpkAhGUuA,2933
390
391
  rasa/dialogue_understanding/generator/multi_step/handle_flows_prompt.jinja2,sha256=8l93_QBKBYnqLICVdiTu5ejZDE8F36BU8-qwba0px44,1927
391
- rasa/dialogue_understanding/generator/multi_step/multi_step_llm_command_generator.py,sha256=zw1N0UyEOzYfgm3sFP8ptZ92fSLszwiACM4Vqwt8lIo,33527
392
+ rasa/dialogue_understanding/generator/multi_step/multi_step_llm_command_generator.py,sha256=CrTEgkhky6s5O7NohqZ9gCD8G0YLq4rKM49ujhrNzr4,34418
392
393
  rasa/dialogue_understanding/generator/nlu_command_adapter.py,sha256=pzd1q-syU_QuqTRcfd_GsXyOJaxfApqh_LsOKuEN46g,9332
393
394
  rasa/dialogue_understanding/generator/single_step/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
394
395
  rasa/dialogue_understanding/generator/single_step/command_prompt_template.jinja2,sha256=nMayu-heJYH1QmcL1cFmXb8SeiJzfdDR_9Oy5IRUXsM,3937
395
- rasa/dialogue_understanding/generator/single_step/single_step_llm_command_generator.py,sha256=hhFnxzc8lji7UZsFaVK-GTkyJ-34jaN-IhWcebDJhBI,18493
396
+ rasa/dialogue_understanding/generator/single_step/single_step_llm_command_generator.py,sha256=prDAi8i6PrkkljkfI1qh7kL0BXiRzxLvl4XBcMaqqqI,18780
396
397
  rasa/dialogue_understanding/patterns/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
397
398
  rasa/dialogue_understanding/patterns/cancel.py,sha256=IQ4GVHNnNCqwKRLlAqBtLsgolcbPPnHsHdb3aOAFhEs,3868
398
399
  rasa/dialogue_understanding/patterns/cannot_handle.py,sha256=pg0zJHl-hDBnl6y9IyxZzW57yuMdfD8xI8eiK6EVrG8,1406
@@ -424,7 +425,7 @@ rasa/dialogue_understanding/stack/frames/flow_stack_frame.py,sha256=W4mEmihIN5Bi
424
425
  rasa/dialogue_understanding/stack/frames/pattern_frame.py,sha256=EVrYWv5dCP7XTvNV-HqtOOrseP-IkF0jD2_JacAvIYw,235
425
426
  rasa/dialogue_understanding/stack/frames/search_frame.py,sha256=rJ9og28k_udUIjP-2Z5xeb_2T5HvCzwDCnxVG9K7lws,728
426
427
  rasa/dialogue_understanding/stack/utils.py,sha256=ysH6-IeMwNnKbF1__uMlq6I8zaGXFdMEpw1iYdEz4kA,7650
427
- rasa/dialogue_understanding/utils.py,sha256=ENXT_1ALY1Ev6Gs8jNz3dm3TC91Y5psp2Np6_L4cHXI,332
428
+ rasa/dialogue_understanding/utils.py,sha256=tw9O_fhuspk64v99B5_lwNZjBIMlpjIKekpyFzMylJ8,566
428
429
  rasa/dialogue_understanding_test/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
429
430
  rasa/dialogue_understanding_test/constants.py,sha256=rZhBIQV5jFzPTuDtK5WSwS8YKKDLaZ7TMQsaSQwNA2g,486
430
431
  rasa/dialogue_understanding_test/du_test_case.py,sha256=Is3brWanixDNXKq_Kr43tcUc4PjoiN-IfJBRwKnL4hU,3656
@@ -520,7 +521,7 @@ rasa/model_manager/socket_bridge.py,sha256=klKaFA-PKNfha5ir0xKqba3Al6igYu3cD7BLI
520
521
  rasa/model_manager/studio_jwt_auth.py,sha256=eZ_srnbL2sKIKgx0OZIp29NbIrH2J8PlI8Or0lLg_Xo,2644
521
522
  rasa/model_manager/trainer_service.py,sha256=90WYl4fclgPLcLfFgDOtai9VahZx_ikn20PIMg_eSQM,10347
522
523
  rasa/model_manager/utils.py,sha256=tgj215CsJreqc4Ym8tAvv-hBieAC94nL0c4caPWIcZM,2643
523
- rasa/model_manager/warm_rasa_process.py,sha256=xFNP-ANZfUBKs_Sur2deAT2qqatWD3_XZJcUgQy2iiQ,5716
524
+ rasa/model_manager/warm_rasa_process.py,sha256=L6nYjI1vgEjT5zSc13HkS8t-16t7iOGkKZnXuNRf5sc,5887
524
525
  rasa/model_service.py,sha256=nj0wNoByYWg5WVd5GtIc5V-RhpVR_xspi-MeNQxurLE,3753
525
526
  rasa/model_testing.py,sha256=h0QUpJu6p_TDse3aHjCfYwI6OGH47b3Iuo5Ot0HQADM,14959
526
527
  rasa/model_training.py,sha256=gvmJ6bN6TdX6H6qnO5y14I_aYeqi_h1Dxfpavks3paU,21687
@@ -591,7 +592,7 @@ rasa/shared/core/command_payload_reader.py,sha256=Vhiop9LWFawaEruRifBBrVmoEJ-fj1
591
592
  rasa/shared/core/constants.py,sha256=WNFzABG-eiVREBL6aDZAmcNDiSmuSbvWuxXIMoX2Iv8,5704
592
593
  rasa/shared/core/conversation.py,sha256=tw1fD2XB3gOdQjDI8hHo5TAAmE2JYNogQGWe3rE929w,1385
593
594
  rasa/shared/core/domain.py,sha256=SsRLbLIEZ-coPTEwr-XxU_O-X-0mR466YLvXJJOAEpc,81247
594
- rasa/shared/core/events.py,sha256=6yuOrZs8hZaR0FV1nC58l1u6qE4fegwrvL5nH1w7xY4,83719
595
+ rasa/shared/core/events.py,sha256=989wHh_6d6XF8PQkeCZfY3MNgePCbLK9BiY8JQVXmQ0,83790
595
596
  rasa/shared/core/flows/__init__.py,sha256=HszhIvEARpmyxABFc1MKYvj8oy04WiZW1xmCdToakbs,181
596
597
  rasa/shared/core/flows/flow.py,sha256=XzF9RUxLNyiGndnpvECV4pMczzc6g7UtgwokyXAoaTY,21496
597
598
  rasa/shared/core/flows/flow_path.py,sha256=xstwahZBU5cfMY46mREA4NoOGlKLBRAqeP_mJ3UZqOI,2283
@@ -643,7 +644,7 @@ rasa/shared/importers/rasa.py,sha256=877EU8qPZSMBk5VAVAAUhfsh6vatRJrYOqWz1YGR6p8
643
644
  rasa/shared/importers/remote_importer.py,sha256=fKLQskaCVPpD5cCMQ9sR71cZZlSIP-SSv3J3o2kra2w,7696
644
645
  rasa/shared/importers/utils.py,sha256=Gi3BM5RUr-9nX_Ujf-g-tt19_bKPizmQIi6eAflDAmo,1289
645
646
  rasa/shared/nlu/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
646
- rasa/shared/nlu/constants.py,sha256=KUYpaGAjwBwdUV8TZupei-xWAcb8RmaqhXNF8SMVwqU,1773
647
+ rasa/shared/nlu/constants.py,sha256=oq-eaTMXRvT1mE8pFhxf1Jvc8vlZGIeOSdY7YQlKd2Q,1825
647
648
  rasa/shared/nlu/interpreter.py,sha256=eCNJp61nQYTGVf4aJi8SCWb46jxZY6-C1M1LFxMyQTM,188
648
649
  rasa/shared/nlu/training_data/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
649
650
  rasa/shared/nlu/training_data/entities_parser.py,sha256=fC-VIso07so6E9b6KrQXOBC-ZUGCQGvnMvzVwiAO1GQ,6729
@@ -695,7 +696,7 @@ rasa/shared/providers/llm/azure_openai_llm_client.py,sha256=A6sg2bvulNczuzu1J0V7
695
696
  rasa/shared/providers/llm/default_litellm_llm_client.py,sha256=1oiUIXr_U5ldyBQZ8cnrV3P7Qw9kMw1yvaVg6mjKkHU,3940
696
697
  rasa/shared/providers/llm/litellm_router_llm_client.py,sha256=llko2DfOpiLMpHxnW26I1Hb1wTn7VmZ_yu43GRXhqwQ,6815
697
698
  rasa/shared/providers/llm/llm_client.py,sha256=6-gMsEJqquhUPGXzNiq_ybM_McLWxAJ_QhbmWcLnb_Q,2358
698
- rasa/shared/providers/llm/llm_response.py,sha256=Ltmc8yk9cAqtK8QgwfZZywudM5ZQsT4y_AKAQ3q05hA,1490
699
+ rasa/shared/providers/llm/llm_response.py,sha256=rdXMBxbyz4vKnG-0b-NPsyiA1rehrvkU6Tjx1usX2BE,2871
699
700
  rasa/shared/providers/llm/openai_llm_client.py,sha256=uDdcugBcO3sfxbduc00eqaZdrJP0VFX5dkBd2Dem47M,4844
700
701
  rasa/shared/providers/llm/rasa_llm_client.py,sha256=SpgWn3uHHEezIcyvMfi468zRLw_W8VF6sIs-VIhElPc,3357
701
702
  rasa/shared/providers/llm/self_hosted_llm_client.py,sha256=98FaF0-lYnytC46ulhrCAQjUKy9TI0U2QILml__UCzc,9170
@@ -717,7 +718,7 @@ rasa/shared/utils/pykwalify_extensions.py,sha256=4W8gde8C6QpGCY_t9IEmaZSgjMuie1x
717
718
  rasa/shared/utils/schemas/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
718
719
  rasa/shared/utils/schemas/config.yml,sha256=czxSADw9hOIZdhvFP8pVUQo810hs9_C8ZGfCPx17taM,27
719
720
  rasa/shared/utils/schemas/domain.yml,sha256=b2k4ZYSV-QL3hGjDaRg8rfoqaTh4hbhDc_hBlMB8cuI,3409
720
- rasa/shared/utils/schemas/events.py,sha256=9sg_w4VeFMksyl-uscUht1TErf1gfKR56agyYSvl2c4,6912
721
+ rasa/shared/utils/schemas/events.py,sha256=T8kSex2UpgmToqib6KyrgNYBK5WS8OwqdO4Jv-TEJ4I,6913
721
722
  rasa/shared/utils/schemas/model_config.yml,sha256=OravyVWalSwjiXYRarRzg0tiRnUFHe1q4-5Wj1TEeFk,811
722
723
  rasa/shared/utils/schemas/stories.yml,sha256=DV3wAFnv1leD7kV-FH-GQihF1QX5oKHc8Eb24mxjizc,4737
723
724
  rasa/shared/utils/yaml.py,sha256=HpG4whRyFMEJ39YEMd-X1HBJL6C2cAwvPlMGzqq74z0,37638
@@ -735,7 +736,7 @@ rasa/tracing/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
735
736
  rasa/tracing/config.py,sha256=kA-xEY2oAc07gw1RzGeMuNnDKd_ZrVXT_B63pxGW-uI,12860
736
737
  rasa/tracing/constants.py,sha256=N_MJLStE3IkmPKQCQv42epd3jdBMJ4Ith1dVO65N5ho,2425
737
738
  rasa/tracing/instrumentation/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
738
- rasa/tracing/instrumentation/attribute_extractors.py,sha256=zGbDKfULtSfdxAVUK1tM45QF4X5OoIAxV5AMKUBF50Y,26006
739
+ rasa/tracing/instrumentation/attribute_extractors.py,sha256=YntngMpBuKfuipydmhR6zmeS_1N0_nhiwazeRHFrdTc,26080
739
740
  rasa/tracing/instrumentation/instrumentation.py,sha256=5g_Hp9CE7bqIKUVfLcpGan0s2SK3h5rikjumpADs4SY,51103
740
741
  rasa/tracing/instrumentation/intentless_policy_instrumentation.py,sha256=8AdMOy_2mlKnlmt-muV8-eoT8jA52GXDzM0avejfg8A,4821
741
742
  rasa/tracing/instrumentation/metrics.py,sha256=ByfKshoxNOqjKZwKTulqL71s5b3WugqLfjha3So0OEU,10534
@@ -776,9 +777,9 @@ rasa/utils/train_utils.py,sha256=f1NWpp5y6al0dzoQyyio4hc4Nf73DRoRSHDzEK6-C4E,212
776
777
  rasa/utils/url_tools.py,sha256=JQcHL2aLqLHu82k7_d9imUoETCm2bmlHaDpOJ-dKqBc,1218
777
778
  rasa/utils/yaml.py,sha256=KjbZq5C94ZP7Jdsw8bYYF7HASI6K4-C_kdHfrnPLpSI,2000
778
779
  rasa/validator.py,sha256=wl5IKiyDmk6FlDcGO2Js-H-gHPeqVqUJ6hB4fgN0xjI,66796
779
- rasa/version.py,sha256=GA1IzBOw2Is9Gh8r2ooq5d5oRHY4Cr5V2c_YGCN_jaI,124
780
- rasa_pro-3.11.3a1.dev4.dist-info/METADATA,sha256=UoBAUZRnGQGLH0j91R1nxARNIGeoD7wlzdqqMuPmb_U,10798
781
- rasa_pro-3.11.3a1.dev4.dist-info/NOTICE,sha256=7HlBoMHJY9CL2GlYSfTQ-PZsVmLmVkYmMiPlTjhuCqA,218
782
- rasa_pro-3.11.3a1.dev4.dist-info/WHEEL,sha256=sP946D7jFCHeNz5Iq4fL4Lu-PrWrFsgfLXbbkciIZwg,88
783
- rasa_pro-3.11.3a1.dev4.dist-info/entry_points.txt,sha256=ckJ2SfEyTPgBqj_I6vm_tqY9dZF_LAPJZA335Xp0Q9U,43
784
- rasa_pro-3.11.3a1.dev4.dist-info/RECORD,,
780
+ rasa/version.py,sha256=Bhg94N2gHc9Q25ztAiy105xQbjhAUGrjG2rKzFAvRpg,124
781
+ rasa_pro-3.11.3a1.dev6.dist-info/METADATA,sha256=ab_MAK0yJM6BOlDfR49clYgd1lmlHG-MFTZztDSaIGs,10798
782
+ rasa_pro-3.11.3a1.dev6.dist-info/NOTICE,sha256=7HlBoMHJY9CL2GlYSfTQ-PZsVmLmVkYmMiPlTjhuCqA,218
783
+ rasa_pro-3.11.3a1.dev6.dist-info/WHEEL,sha256=sP946D7jFCHeNz5Iq4fL4Lu-PrWrFsgfLXbbkciIZwg,88
784
+ rasa_pro-3.11.3a1.dev6.dist-info/entry_points.txt,sha256=ckJ2SfEyTPgBqj_I6vm_tqY9dZF_LAPJZA335Xp0Q9U,43
785
+ rasa_pro-3.11.3a1.dev6.dist-info/RECORD,,