rasa-pro 3.11.3a1.dev4__py3-none-any.whl → 3.11.3a1.dev5__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of rasa-pro might be problematic. Click here for more details.

@@ -93,6 +93,7 @@ from rasa.shared.nlu.constants import (
93
93
  INTENT_NAME_KEY,
94
94
  INTENT_RANKING_KEY,
95
95
  )
96
+ from rasa.shared.nlu.constants import PROMPTS
96
97
  from rasa.shared.utils.io import raise_warning
97
98
  from rasa.shared.utils.schemas.events import EVENTS_SCHEMA
98
99
  from rasa.utils.endpoints import ClientResponseError, EndpointConfig
@@ -267,6 +268,7 @@ def create_bot_utterance(message: Dict[Text, Any]) -> BotUttered:
267
268
  "attachment": message.pop("attachment", None) or message.get("image", None),
268
269
  "image": message.pop("image", None),
269
270
  "custom": message.pop("custom", None),
271
+ PROMPTS: message.pop(PROMPTS, None),
270
272
  },
271
273
  metadata=message,
272
274
  )
@@ -218,7 +218,7 @@ class SocketIOInput(InputChannel):
218
218
  return None
219
219
  return SocketIOOutput(self.sio, self.bot_message_evt)
220
220
 
221
- async def on_new_tracker_dump(self, sender_id: str, tracker_dump: str):
221
+ async def on_new_tracker_dump(self, sender_id: str, tracker_dump: str) -> None:
222
222
  if self.sio:
223
223
  await self.sio.emit("tracker", tracker_dump, room=sender_id)
224
224
 
@@ -2,9 +2,11 @@ from typing import Any, Dict, Optional, Text
2
2
 
3
3
  import structlog
4
4
  from jinja2 import Template
5
+
5
6
  from rasa import telemetry
6
7
  from rasa.core.nlg.response import TemplatedNaturalLanguageGenerator
7
8
  from rasa.core.nlg.summarize import summarize_conversation
9
+ from rasa.dialogue_understanding.utils import record_commands_and_prompts
8
10
  from rasa.shared.constants import (
9
11
  LLM_CONFIG_KEY,
10
12
  MODEL_CONFIG_KEY,
@@ -18,6 +20,12 @@ from rasa.shared.constants import (
18
20
  from rasa.shared.core.domain import KEY_RESPONSES_TEXT, Domain
19
21
  from rasa.shared.core.events import BotUttered, UserUttered
20
22
  from rasa.shared.core.trackers import DialogueStateTracker
23
+ from rasa.shared.nlu.constants import (
24
+ PROMPTS,
25
+ KEY_USER_PROMPT,
26
+ KEY_LLM_RESPONSE_METADATA,
27
+ )
28
+ from rasa.shared.providers.llm.llm_response import LLMResponse
21
29
  from rasa.shared.utils.health_check.llm_health_check_mixin import LLMHealthCheckMixin
22
30
  from rasa.shared.utils.llm import (
23
31
  DEFAULT_OPENAI_GENERATE_MODEL_NAME,
@@ -124,6 +132,44 @@ class ContextualResponseRephraser(
124
132
  ContextualResponseRephraser.__name__,
125
133
  )
126
134
 
135
+ @classmethod
136
+ def _add_prompt_and_llm_metadata_to_response(
137
+ cls,
138
+ response: Dict[str, Any],
139
+ prompt_name: str,
140
+ user_prompt: str,
141
+ llm_response: Optional["LLMResponse"] = None,
142
+ ) -> Dict[str, Any]:
143
+ """Stores the prompt and LLMResponse metadata to response.
144
+
145
+ Args:
146
+ response: The response to add the prompt and LLMResponse metadata to.
147
+ prompt_name: A name identifying prompt usage.
148
+ user_prompt: The user prompt that was sent to the LLM.
149
+ llm_response: The response object from the LLM (None if no response).
150
+ """
151
+ if not record_commands_and_prompts:
152
+ return response
153
+
154
+ prompt_data: Dict[Text, Any] = {
155
+ KEY_USER_PROMPT: user_prompt,
156
+ }
157
+
158
+ if llm_response is not None:
159
+ prompt_data[KEY_LLM_RESPONSE_METADATA] = llm_response.to_dict()
160
+
161
+ prompt_tuple = (prompt_name, prompt_data)
162
+
163
+ component_name = cls.__name__
164
+ existing_prompts = response.get(PROMPTS, {})
165
+ if component_name in existing_prompts:
166
+ existing_prompts[component_name].append(prompt_tuple)
167
+ else:
168
+ existing_prompts[component_name] = [prompt_tuple]
169
+
170
+ response[PROMPTS] = existing_prompts
171
+ return response
172
+
127
173
  def _last_message_if_human(self, tracker: DialogueStateTracker) -> Optional[str]:
128
174
  """Returns the latest message from the tracker.
129
175
 
@@ -142,20 +188,24 @@ class ContextualResponseRephraser(
142
188
  return None
143
189
  return None
144
190
 
145
- async def _generate_llm_response(self, prompt: str) -> Optional[str]:
146
- """Use LLM to generate a response.
191
+ async def _generate_llm_response(self, prompt: str) -> Optional[LLMResponse]:
192
+ """
193
+ Use LLM to generate a response, returning an LLMResponse object
194
+ containing both the generated text (choices) and metadata.
147
195
 
148
196
  Args:
149
- prompt: the prompt to send to the LLM
197
+ prompt: The prompt to send to the LLM.
150
198
 
151
199
  Returns:
152
- generated text
200
+ An LLMResponse object if successful, otherwise None.
153
201
  """
154
202
  llm = llm_factory(self.llm_config, DEFAULT_LLM_CONFIG)
155
203
 
156
204
  try:
157
- llm_response = await llm.acompletion(prompt)
158
- return llm_response.choices[0]
205
+ raw_response = await llm.acompletion(prompt)
206
+ response_dict = raw_response.to_dict()
207
+ return LLMResponse.from_dict(response_dict)
208
+
159
209
  except Exception as e:
160
210
  # unfortunately, langchain does not wrap LLM exceptions which means
161
211
  # we have to catch all exceptions here
@@ -255,11 +305,20 @@ class ContextualResponseRephraser(
255
305
  or self.llm_property(MODEL_NAME_CONFIG_KEY),
256
306
  llm_model_group_id=self.llm_property(MODEL_GROUP_ID_CONFIG_KEY),
257
307
  )
258
- if not (updated_text := await self._generate_llm_response(prompt)):
259
- # If the LLM fails to generate a response, we
260
- # return the original response.
308
+ llm_response = await self._generate_llm_response(prompt)
309
+
310
+ response = self._add_prompt_and_llm_metadata_to_response(
311
+ response=response,
312
+ prompt_name="rephrase_prompt",
313
+ user_prompt=prompt,
314
+ llm_response=llm_response,
315
+ )
316
+
317
+ if llm_response is None or not llm_response.choices:
318
+ # If the LLM fails to generate a response, return the original response.
261
319
  return response
262
320
 
321
+ updated_text = llm_response.choices[0]
263
322
  structlogger.debug(
264
323
  "nlg.rewrite.complete",
265
324
  response_text=response_text,
@@ -2,6 +2,7 @@ import importlib.resources
2
2
  import json
3
3
  import re
4
4
  from typing import TYPE_CHECKING, Any, Dict, List, Optional, Text
5
+
5
6
  import dotenv
6
7
  import structlog
7
8
  from jinja2 import Template
@@ -37,6 +38,7 @@ from rasa.dialogue_understanding.stack.frames import (
37
38
  SearchStackFrame,
38
39
  )
39
40
  from rasa.dialogue_understanding.stack.frames import PatternFlowStackFrame
41
+ from rasa.dialogue_understanding.utils import record_commands_and_prompts
40
42
  from rasa.engine.graph import ExecutionContext
41
43
  from rasa.engine.recipes.default_recipe import DefaultV1Recipe
42
44
  from rasa.engine.storage.resource import Resource
@@ -63,11 +65,17 @@ from rasa.shared.core.events import Event, UserUttered, BotUttered
63
65
  from rasa.shared.core.generator import TrackerWithCachedStates
64
66
  from rasa.shared.core.trackers import DialogueStateTracker, EventVerbosity
65
67
  from rasa.shared.exceptions import RasaException, FileIOException
68
+ from rasa.shared.nlu.constants import (
69
+ PROMPTS,
70
+ KEY_USER_PROMPT,
71
+ KEY_LLM_RESPONSE_METADATA,
72
+ )
66
73
  from rasa.shared.nlu.training_data.training_data import TrainingData
67
74
  from rasa.shared.providers.embedding._langchain_embedding_client_adapter import (
68
75
  _LangchainEmbeddingClientAdapter,
69
76
  )
70
77
  from rasa.shared.providers.llm.llm_client import LLMClient
78
+ from rasa.shared.providers.llm.llm_response import LLMResponse
71
79
  from rasa.shared.utils.cli import print_error_and_exit
72
80
  from rasa.shared.utils.health_check.embeddings_health_check_mixin import (
73
81
  EmbeddingsHealthCheckMixin,
@@ -272,6 +280,45 @@ class EnterpriseSearchPolicy(LLMHealthCheckMixin, EmbeddingsHealthCheckMixin, Po
272
280
  # Wrap the embedding client in the adapter
273
281
  return _LangchainEmbeddingClientAdapter(client)
274
282
 
283
+ @classmethod
284
+ def _store_prompt_and_llm_response_in_tracker(
285
+ cls,
286
+ tracker: DialogueStateTracker,
287
+ prompt_name: str,
288
+ user_prompt: str,
289
+ llm_response: Optional[LLMResponse] = None,
290
+ ) -> None:
291
+ """Stores the prompt and LLMResponse metadata in the tracker.
292
+
293
+ Args:
294
+ tracker: The DialogueStateTracker containing the current conversation state.
295
+ prompt_name: A name identifying prompt usage.
296
+ user_prompt: The user prompt that was sent to the LLM.
297
+ llm_response: The response object from the LLM (None if no response).
298
+ """
299
+ if not record_commands_and_prompts:
300
+ return
301
+
302
+ if not tracker.latest_message:
303
+ return
304
+
305
+ parse_data = tracker.latest_message.parse_data
306
+ if parse_data is not None and PROMPTS not in parse_data:
307
+ parse_data[PROMPTS] = {} # type: ignore[literal-required]
308
+
309
+ component_name = cls.__name__
310
+ existing_prompts = parse_data[PROMPTS].get(component_name, []) # type: ignore[literal-required]
311
+
312
+ prompt_data: Dict[Text, Any] = {
313
+ KEY_USER_PROMPT: user_prompt,
314
+ }
315
+ if llm_response is not None:
316
+ prompt_data[KEY_LLM_RESPONSE_METADATA] = llm_response.to_dict()
317
+
318
+ prompt_tuple = (prompt_name, prompt_data)
319
+ existing_prompts.append(prompt_tuple)
320
+ parse_data[PROMPTS][component_name] = existing_prompts # type: ignore[literal-required]
321
+
275
322
  def train( # type: ignore[override]
276
323
  self,
277
324
  training_trackers: List[TrackerWithCachedStates],
@@ -498,13 +545,26 @@ class EnterpriseSearchPolicy(LLMHealthCheckMixin, EmbeddingsHealthCheckMixin, Po
498
545
 
499
546
  if self.use_llm:
500
547
  prompt = self._render_prompt(tracker, documents.results)
501
- llm_answer = await self._generate_llm_answer(llm, prompt)
548
+ llm_response = await self._generate_llm_answer(llm, prompt)
549
+
550
+ self._store_prompt_and_llm_response_in_tracker(
551
+ tracker=tracker,
552
+ prompt_name="enterprise_search_prompt",
553
+ user_prompt=prompt,
554
+ llm_response=llm_response,
555
+ )
502
556
 
503
- if self.citation_enabled:
504
- llm_answer = self.post_process_citations(llm_answer)
557
+ if llm_response is None or not llm_response.choices:
558
+ logger.debug(f"{logger_key}.no_llm_response")
559
+ response = None
560
+ else:
561
+ llm_answer = llm_response.choices[0]
505
562
 
506
- logger.debug(f"{logger_key}.llm_answer", llm_answer=llm_answer)
507
- response = llm_answer
563
+ if self.citation_enabled:
564
+ llm_answer = self.post_process_citations(llm_answer)
565
+
566
+ logger.debug(f"{logger_key}.llm_answer", llm_answer=llm_answer)
567
+ response = llm_answer
508
568
  else:
509
569
  response = documents.results[0].metadata.get("answer", None)
510
570
  if not response:
@@ -516,7 +576,6 @@ class EnterpriseSearchPolicy(LLMHealthCheckMixin, EmbeddingsHealthCheckMixin, Po
516
576
  "enterprise_search_policy.predict_action_probabilities.no_llm",
517
577
  search_results=documents,
518
578
  )
519
-
520
579
  if response is None:
521
580
  return self._create_prediction_internal_error(domain, tracker)
522
581
 
@@ -581,10 +640,21 @@ class EnterpriseSearchPolicy(LLMHealthCheckMixin, EmbeddingsHealthCheckMixin, Po
581
640
 
582
641
  async def _generate_llm_answer(
583
642
  self, llm: LLMClient, prompt: Text
584
- ) -> Optional[Text]:
643
+ ) -> Optional[LLMResponse]:
644
+ """Fetches an LLM completion for the provided prompt.
645
+
646
+ Args:
647
+ llm: The LLM client used to get the completion.
648
+ prompt: The prompt text to send to the model.
649
+
650
+ Returns:
651
+ An LLMResponse object, or None if the call fails.
652
+ """
585
653
  try:
586
- llm_response = await llm.acompletion(prompt)
587
- llm_answer = llm_response.choices[0]
654
+ raw_response = await llm.acompletion(prompt)
655
+ response_dict = raw_response.to_dict()
656
+ return LLMResponse.from_dict(response_dict)
657
+
588
658
  except Exception as e:
589
659
  # unfortunately, langchain does not wrap LLM exceptions which means
590
660
  # we have to catch all exceptions here
@@ -592,9 +662,7 @@ class EnterpriseSearchPolicy(LLMHealthCheckMixin, EmbeddingsHealthCheckMixin, Po
592
662
  "enterprise_search_policy._generate_llm_answer.llm_error",
593
663
  error=e,
594
664
  )
595
- llm_answer = None
596
-
597
- return llm_answer
665
+ return None
598
666
 
599
667
  def _create_prediction(
600
668
  self,
@@ -26,8 +26,10 @@ from rasa.shared.nlu.constants import (
26
26
  PROMPTS,
27
27
  KEY_USER_PROMPT,
28
28
  KEY_SYSTEM_PROMPT,
29
+ KEY_LLM_RESPONSE_METADATA,
29
30
  )
30
31
  from rasa.shared.nlu.training_data.message import Message
32
+ from rasa.shared.providers.llm.llm_response import LLMResponse
31
33
  from rasa.shared.utils.llm import DEFAULT_MAX_USER_INPUT_CHARACTERS
32
34
 
33
35
  structlogger = structlog.get_logger()
@@ -399,6 +401,7 @@ class CommandGenerator:
399
401
  prompt_name: str,
400
402
  user_prompt: str,
401
403
  system_prompt: Optional[str] = None,
404
+ llm_response: Optional[LLMResponse] = None,
402
405
  ) -> None:
403
406
  """Add prompt to the message parse data.
404
407
 
@@ -411,14 +414,16 @@ class CommandGenerator:
411
414
  "fill_slots_prompt",
412
415
  {
413
416
  "user_prompt": <prompt content>",
414
- "system_prompt": <prompt content>"
417
+ "system_prompt": <prompt content>",
418
+ "llm_response_metadata": <metadata dict from LLMResponse>
415
419
  }
416
420
  ),
417
421
  (
418
422
  "handle_flows_prompt",
419
423
  {
420
424
  "user_prompt": <prompt content>",
421
- "system_prompt": <prompt content>"
425
+ "system_prompt": <prompt content>",
426
+ "llm_response_metadata": <metadata dict from LLMResponse>
422
427
  }
423
428
  ),
424
429
  ],
@@ -427,7 +432,8 @@ class CommandGenerator:
427
432
  "prompt_template",
428
433
  {
429
434
  "user_prompt": <prompt content>",
430
- "system_prompt": <prompt content>"
435
+ "system_prompt": <prompt content>",
436
+ "llm_response_metadata": <metadata dict from LLMResponse>
431
437
  }
432
438
  ),
433
439
  ]
@@ -440,13 +446,15 @@ class CommandGenerator:
440
446
  if not record_commands_and_prompts:
441
447
  return
442
448
 
443
- prompt_tuple = (
444
- prompt_name,
445
- {
446
- KEY_USER_PROMPT: user_prompt,
447
- **({KEY_SYSTEM_PROMPT: system_prompt} if system_prompt else {}),
448
- },
449
- )
449
+ prompt_data: Dict[Text, Any] = {
450
+ KEY_USER_PROMPT: user_prompt,
451
+ **({KEY_SYSTEM_PROMPT: system_prompt} if system_prompt else {}),
452
+ }
453
+
454
+ if llm_response is not None:
455
+ prompt_data[KEY_LLM_RESPONSE_METADATA] = llm_response.to_dict()
456
+
457
+ prompt_tuple = (prompt_name, prompt_data)
450
458
 
451
459
  if message.get(PROMPTS) is not None:
452
460
  prompts = message.get(PROMPTS)
@@ -32,6 +32,7 @@ from rasa.shared.exceptions import ProviderClientAPIException
32
32
  from rasa.shared.nlu.constants import FLOWS_IN_PROMPT
33
33
  from rasa.shared.nlu.training_data.message import Message
34
34
  from rasa.shared.nlu.training_data.training_data import TrainingData
35
+ from rasa.shared.providers.llm.llm_response import LLMResponse
35
36
  from rasa.shared.utils.health_check.llm_health_check_mixin import LLMHealthCheckMixin
36
37
  from rasa.shared.utils.llm import (
37
38
  allowed_values_for_slot,
@@ -304,22 +305,23 @@ class LLMBasedCommandGenerator(
304
305
  )
305
306
  return filtered_flows
306
307
 
307
- async def invoke_llm(self, prompt: Text) -> Optional[Text]:
308
+ async def invoke_llm(self, prompt: Text) -> Optional[LLMResponse]:
308
309
  """Use LLM to generate a response.
309
310
 
310
311
  Args:
311
312
  prompt: The prompt to send to the LLM.
312
313
 
313
314
  Returns:
314
- The generated text.
315
+ An LLMResponse object.
315
316
 
316
317
  Raises:
317
- ProviderClientAPIException if an error during API call.
318
+ ProviderClientAPIException: If an error occurs during the LLM API call.
318
319
  """
319
320
  llm = llm_factory(self.config.get(LLM_CONFIG_KEY), DEFAULT_LLM_CONFIG)
320
321
  try:
321
- llm_response = await llm.acompletion(prompt)
322
- return llm_response.choices[0]
322
+ raw_response = await llm.acompletion(prompt)
323
+ response_dict = raw_response.to_dict()
324
+ return LLMResponse.from_dict(response_dict)
323
325
  except Exception as e:
324
326
  # unfortunately, langchain does not wrap LLM exceptions which means
325
327
  # we have to catch all exceptions here
@@ -10,6 +10,7 @@ from rasa.engine.recipes.default_recipe import DefaultV1Recipe
10
10
  from rasa.engine.storage.resource import Resource
11
11
  from rasa.engine.storage.storage import ModelStorage
12
12
  from rasa.shared.exceptions import ProviderClientAPIException
13
+ from rasa.shared.providers.llm.llm_response import LLMResponse
13
14
  from rasa.shared.utils.io import raise_deprecation_warning
14
15
 
15
16
  structlogger = structlog.get_logger()
@@ -53,7 +54,7 @@ class LLMCommandGenerator(SingleStepLLMCommandGenerator):
53
54
  **kwargs,
54
55
  )
55
56
 
56
- async def invoke_llm(self, prompt: Text) -> Optional[Text]:
57
+ async def invoke_llm(self, prompt: Text) -> Optional[LLMResponse]:
57
58
  try:
58
59
  return await super().invoke_llm(prompt)
59
60
  except ProviderClientAPIException:
@@ -535,7 +535,11 @@ class MultiStepLLMCommandGenerator(LLMBasedCommandGenerator):
535
535
  prompt=prompt,
536
536
  )
537
537
 
538
- actions = await self.invoke_llm(prompt)
538
+ llm_response = await self.invoke_llm(prompt)
539
+ actions = None
540
+ if llm_response and llm_response.choices:
541
+ actions = llm_response.choices[0]
542
+
539
543
  structlogger.debug(
540
544
  "multi_step_llm_command_generator"
541
545
  ".predict_commands_for_active_flow"
@@ -547,10 +551,11 @@ class MultiStepLLMCommandGenerator(LLMBasedCommandGenerator):
547
551
 
548
552
  if commands:
549
553
  self._add_prompt_to_message_parse_data(
550
- message,
551
- MultiStepLLMCommandGenerator.__name__,
552
- "fill_slots_for_active_flow_prompt",
553
- prompt,
554
+ message=message,
555
+ component_name=MultiStepLLMCommandGenerator.__name__,
556
+ prompt_name="fill_slots_for_active_flow_prompt",
557
+ user_prompt=prompt,
558
+ llm_response=llm_response,
554
559
  )
555
560
 
556
561
  return commands
@@ -584,7 +589,11 @@ class MultiStepLLMCommandGenerator(LLMBasedCommandGenerator):
584
589
  prompt=prompt,
585
590
  )
586
591
 
587
- actions = await self.invoke_llm(prompt)
592
+ llm_response = await self.invoke_llm(prompt)
593
+ actions = None
594
+ if llm_response and llm_response.choices:
595
+ actions = llm_response.choices[0]
596
+
588
597
  structlogger.debug(
589
598
  "multi_step_llm_command_generator"
590
599
  ".predict_commands_for_handling_flows"
@@ -598,10 +607,11 @@ class MultiStepLLMCommandGenerator(LLMBasedCommandGenerator):
598
607
 
599
608
  if commands:
600
609
  self._add_prompt_to_message_parse_data(
601
- message,
602
- MultiStepLLMCommandGenerator.__name__,
603
- "handle_flows_prompt",
604
- prompt,
610
+ message=message,
611
+ component_name=MultiStepLLMCommandGenerator.__name__,
612
+ prompt_name="handle_flows_prompt",
613
+ user_prompt=prompt,
614
+ llm_response=llm_response,
605
615
  )
606
616
 
607
617
  return commands
@@ -668,7 +678,11 @@ class MultiStepLLMCommandGenerator(LLMBasedCommandGenerator):
668
678
  prompt=prompt,
669
679
  )
670
680
 
671
- actions = await self.invoke_llm(prompt)
681
+ llm_response = await self.invoke_llm(prompt)
682
+ actions = None
683
+ if llm_response and llm_response.choices:
684
+ actions = llm_response.choices[0]
685
+
672
686
  structlogger.debug(
673
687
  "multi_step_llm_command_generator"
674
688
  ".predict_commands_for_newly_started_flow"
@@ -695,10 +709,11 @@ class MultiStepLLMCommandGenerator(LLMBasedCommandGenerator):
695
709
 
696
710
  if commands:
697
711
  self._add_prompt_to_message_parse_data(
698
- message,
699
- MultiStepLLMCommandGenerator.__name__,
700
- "fill_slots_for_new_flow_prompt",
701
- prompt,
712
+ message=message,
713
+ component_name=MultiStepLLMCommandGenerator.__name__,
714
+ prompt_name="fill_slots_for_new_flow_prompt",
715
+ user_prompt=prompt,
716
+ llm_response=llm_response,
702
717
  )
703
718
 
704
719
  return commands
@@ -264,13 +264,15 @@ class SingleStepLLMCommandGenerator(LLMBasedCommandGenerator):
264
264
  prompt=flow_prompt,
265
265
  )
266
266
 
267
- action_list = await self.invoke_llm(flow_prompt)
267
+ llm_response = await self.invoke_llm(flow_prompt)
268
268
  # The check for 'None' maintains compatibility with older versions
269
269
  # of LLMCommandGenerator. In previous implementations, 'invoke_llm'
270
270
  # might return 'None' to indicate a failure to generate actions.
271
- if action_list is None:
271
+ if llm_response is None or not llm_response.choices:
272
272
  return [ErrorCommand()]
273
273
 
274
+ action_list = llm_response.choices[0]
275
+
274
276
  log_llm(
275
277
  logger=structlogger,
276
278
  log_module="SingleStepLLMCommandGenerator",
@@ -285,10 +287,11 @@ class SingleStepLLMCommandGenerator(LLMBasedCommandGenerator):
285
287
  message, SingleStepLLMCommandGenerator.__name__, commands
286
288
  )
287
289
  self._add_prompt_to_message_parse_data(
288
- message,
289
- SingleStepLLMCommandGenerator.__name__,
290
- "command_generator_prompt",
291
- flow_prompt,
290
+ message=message,
291
+ component_name=SingleStepLLMCommandGenerator.__name__,
292
+ prompt_name="command_generator_prompt",
293
+ user_prompt=flow_prompt,
294
+ llm_response=llm_response,
292
295
  )
293
296
 
294
297
  return commands
@@ -2,14 +2,10 @@ import abc
2
2
  import copy
3
3
  import json
4
4
  import logging
5
- import structlog
6
5
  import re
7
- from abc import ABC
8
-
9
- import jsonpickle
10
6
  import time
11
7
  import uuid
12
- from dateutil import parser
8
+ from abc import ABC
13
9
  from datetime import datetime
14
10
  from typing import (
15
11
  List,
@@ -24,11 +20,14 @@ from typing import (
24
20
  Tuple,
25
21
  TypeVar,
26
22
  )
23
+ from typing import Union
24
+
25
+ import jsonpickle
26
+ import structlog
27
+ from dateutil import parser
27
28
 
28
29
  import rasa.shared.utils.common
29
30
  import rasa.shared.utils.io
30
- from typing import Union
31
-
32
31
  from rasa.shared.constants import DOCS_URL_TRAINING_DATA
33
32
  from rasa.shared.core.constants import (
34
33
  LOOP_NAME,
@@ -62,7 +61,7 @@ from rasa.shared.nlu.constants import (
62
61
  ENTITY_ATTRIBUTE_END,
63
62
  FULL_RETRIEVAL_INTENT_NAME_KEY,
64
63
  )
65
-
64
+ from rasa.shared.nlu.constants import PROMPTS
66
65
 
67
66
  if TYPE_CHECKING:
68
67
  from typing_extensions import TypedDict
@@ -98,6 +97,7 @@ if TYPE_CHECKING:
98
97
  ENTITIES: List[EntityPrediction],
99
98
  "message_id": Optional[Text],
100
99
  "metadata": Dict,
100
+ PROMPTS: Dict,
101
101
  },
102
102
  total=False,
103
103
  )
@@ -6,6 +6,7 @@ PREDICTED_COMMANDS = "predicted_commands"
6
6
  PROMPTS = "prompts"
7
7
  KEY_USER_PROMPT = "user_prompt"
8
8
  KEY_SYSTEM_PROMPT = "system_prompt"
9
+ KEY_LLM_RESPONSE_METADATA = "llm_response_metadata"
9
10
  LLM_COMMANDS = "llm_commands" # needed for fine-tuning
10
11
  LLM_PROMPT = "llm_prompt" # needed for fine-tuning
11
12
  FLOWS_FROM_SEMANTIC_SEARCH = "flows_from_semantic_search"
@@ -1,5 +1,5 @@
1
1
  from dataclasses import dataclass, field, asdict
2
- from typing import Dict, List, Optional
2
+ from typing import Dict, List, Optional, Text, Any
3
3
 
4
4
 
5
5
  @dataclass
@@ -16,6 +16,18 @@ class LLMUsage:
16
16
  def __post_init__(self) -> None:
17
17
  self.total_tokens = self.prompt_tokens + self.completion_tokens
18
18
 
19
+ @classmethod
20
+ def from_dict(cls, data: Dict[Text, Any]) -> "LLMUsage":
21
+ """
22
+ Creates an LLMUsage object from a dictionary.
23
+ If any keys are missing, they will default to zero
24
+ or whatever default you prefer.
25
+ """
26
+ return cls(
27
+ prompt_tokens=data.get("prompt_tokens"),
28
+ completion_tokens=data.get("completion_tokens"),
29
+ )
30
+
19
31
  def to_dict(self) -> dict:
20
32
  """Converts the LLMUsage dataclass instance into a dictionary."""
21
33
  return asdict(self)
@@ -42,6 +54,23 @@ class LLMResponse:
42
54
  """Optional dictionary for storing additional information related to the
43
55
  completion that may not be covered by other fields."""
44
56
 
57
+ @classmethod
58
+ def from_dict(cls, data: Dict[Text, Any]) -> "LLMResponse":
59
+ """
60
+ Creates an LLMResponse from a dictionary.
61
+ """
62
+ usage_data = data.get("usage")
63
+ usage_obj = LLMUsage.from_dict(usage_data) if usage_data else None
64
+
65
+ return cls(
66
+ id=data["id"],
67
+ choices=data["choices"],
68
+ created=data["created"],
69
+ model=data.get("model"),
70
+ usage=usage_obj,
71
+ additional_info=data.get("additional_info"),
72
+ )
73
+
45
74
  def to_dict(self) -> dict:
46
75
  """Converts the LLMResponse dataclass instance into a dictionary."""
47
76
  result = asdict(self)
@@ -161,7 +161,7 @@ FLOW_CANCELLED = {
161
161
  }
162
162
  }
163
163
  DIALOGUE_STACK_UPDATED = {
164
- "properties": {"event": {"const": "stack"}, "update": {"type": "array"}}
164
+ "properties": {"event": {"const": "stack"}, "update": {"type": "string"}}
165
165
  }
166
166
  ROUTING_SESSION_ENDED = {"properties": {"event": {"const": "routing_session_ended"}}}
167
167
 
@@ -578,6 +578,7 @@ def extract_attrs_for_run_step(
578
578
  tracker: DialogueStateTracker,
579
579
  available_actions: List[str],
580
580
  flows: FlowsList,
581
+ previous_step_id: Text,
581
582
  ) -> Dict[str, Any]:
582
583
  current_context = extract_current_context_attribute(stack)
583
584
 
@@ -586,6 +587,7 @@ def extract_attrs_for_run_step(
586
587
  "step_description": step.description if step.description else "None",
587
588
  "current_flow_id": flow.id,
588
589
  "current_context": json.dumps(current_context),
590
+ "previous_step_id": previous_step_id,
589
591
  }
590
592
 
591
593
 
rasa/version.py CHANGED
@@ -1,3 +1,3 @@
1
1
  # this file will automatically be changed,
2
2
  # do not add anything but the version number here!
3
- __version__ = "3.11.3a1.dev4"
3
+ __version__ = "3.11.3a1.dev5"
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.1
2
2
  Name: rasa-pro
3
- Version: 3.11.3a1.dev4
3
+ Version: 3.11.3a1.dev5
4
4
  Summary: State-of-the-art open-core Conversational AI framework for Enterprises that natively leverages generative AI for effortless assistant development.
5
5
  Home-page: https://rasa.com
6
6
  Keywords: nlp,machine-learning,machine-learning-library,bot,bots,botkit,rasa conversational-agents,conversational-ai,chatbot,chatbot-framework,bot-framework
@@ -92,7 +92,7 @@ rasa/cli/x.py,sha256=C7dLtYXAkD-uj7hNj7Pz5YbOupp2yRcMjQbsEVqXUJ8,6825
92
92
  rasa/constants.py,sha256=YrrBiJUc0cL5Xrsap6IioNbQ6dKaqDiueqHmMIYkpF0,1348
93
93
  rasa/core/__init__.py,sha256=DYHLve7F1yQBVOZTA63efVIwLiULMuihOfdpzw1j0os,457
94
94
  rasa/core/actions/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
95
- rasa/core/actions/action.py,sha256=3tXb_DAKEzguq5zDuV1j04Fd5uYvwQckc4GR_EoDVYE,45286
95
+ rasa/core/actions/action.py,sha256=H4Mr5WvXHwgZ5ws7ZyAlQ_ESx5ryZvv7cAgNjtyYCn8,45381
96
96
  rasa/core/actions/action_clean_stack.py,sha256=xUP-2ipPsPAnAiwP17c-ezmHPSrV4JSUZr-eSgPQwIs,2279
97
97
  rasa/core/actions/action_exceptions.py,sha256=hghzXYN6VeHC-O_O7WiPesCNV86ZTkHgG90ZnQcbai8,724
98
98
  rasa/core/actions/action_hangup.py,sha256=wpXunkGC71krAYZD3BbqzlHLZxNg1mIviwWz0j9Go-c,994
@@ -254,7 +254,7 @@ rasa/core/channels/rasa_chat.py,sha256=XGZ7QLyQHhB-m7EjetDNEBSjAa2mEFqU-e-FuS9z3
254
254
  rasa/core/channels/rest.py,sha256=YDBnbdrlvaYL7Efy3cm2LbbSm7cBAFDhmcypojHXbog,7227
255
255
  rasa/core/channels/rocketchat.py,sha256=HWOMxXLuwadYEYIMMP-z6RqAJzMGZDLklpgqLOipXF0,5998
256
256
  rasa/core/channels/slack.py,sha256=3b8OZQ_gih5XBwhQ1q4BbBUC1SCAPaO9AoJEn2NaoQE,24405
257
- rasa/core/channels/socketio.py,sha256=g8IfIFjcAVC1MZve2N8IKXVPpEQzpFaCQ4q3pECS0j4,13334
257
+ rasa/core/channels/socketio.py,sha256=qTxwow7BA4XMwzlSKAh2W2amQiBqtL_3WqnUc0rjY_s,13342
258
258
  rasa/core/channels/telegram.py,sha256=5BrNECFM3qe9XjNpDb8Q9fbqCT5aKr5L6IH21W8sum8,10651
259
259
  rasa/core/channels/twilio.py,sha256=GsdjfplZdBj0fRB60bSggPF1DXFZ_x18V_dlcDy5VFs,5943
260
260
  rasa/core/channels/vier_cvg.py,sha256=PfvSluQqgJbP0JzZPFUvum3z7H55JPPeobcD-z5zCkw,13544
@@ -307,7 +307,7 @@ rasa/core/lock_store.py,sha256=fgdufUYXHEiTcD7NCCqgDAQRRtt7jrKafENHqFKOyi0,12504
307
307
  rasa/core/migrate.py,sha256=XNeYdiRytBmBNubOQ8KZOT_wR1o9aOpHHfBU9PCB2eg,14626
308
308
  rasa/core/nlg/__init__.py,sha256=0eQOZ0fB35b18oVhRFczcH30jJHgO8WXFhnbXGOxJek,240
309
309
  rasa/core/nlg/callback.py,sha256=rFkDe7CSAETASRefpERUT6-DHWPs0UXhx8x4tZ1QE0M,5238
310
- rasa/core/nlg/contextual_response_rephraser.py,sha256=RqYig6NFnaXcW5vkAUSb54XWoBkeVWm2WYDCsafthBY,11055
310
+ rasa/core/nlg/contextual_response_rephraser.py,sha256=YGBmSyXnaZMXC6AHQNuDEnP19Ak_rP6FzZHTTCdTk9E,13134
311
311
  rasa/core/nlg/generator.py,sha256=YZ_rh--MeyzA6oXRqr_Ng-jcmPgbCmWMJJrquPmo__8,8436
312
312
  rasa/core/nlg/interpolator.py,sha256=Dc-J2Vf6vPPUbwIgZQm3AJDGvMaFTsh9Citd4CYuA9U,5189
313
313
  rasa/core/nlg/response.py,sha256=aHpy9BgjO7ub6v-sVPiQqutUA_7-UD1l3DJGVeQyp4k,5888
@@ -315,7 +315,7 @@ rasa/core/nlg/summarize.py,sha256=JO6VCfM_RnU0QX8Us42YkNOxC0ESKV1xcVH_sCW27ZU,21
315
315
  rasa/core/persistor.py,sha256=0BZvrA1xObxVtADWLVapj4NOmvqIEen1LKoMOdtZ63s,20337
316
316
  rasa/core/policies/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
317
317
  rasa/core/policies/ensemble.py,sha256=AjNOEy2Iubbe-LdKaoFUXG8ch6yPrg3bTvcTcAPmeOs,12959
318
- rasa/core/policies/enterprise_search_policy.py,sha256=nG1vgZO5woxvXCZWayYXQzZkmxPemfsL0c62QkZcgcI,34126
318
+ rasa/core/policies/enterprise_search_policy.py,sha256=-gcmJ8rS0tFMHXs_w_RTlVhhXHss_VqyS44OfkcWhEw,36674
319
319
  rasa/core/policies/enterprise_search_prompt_template.jinja2,sha256=dCS_seyBGxMQoMsOjjvPp0dd31OSzZCJSZeev1FJK5Q,1187
320
320
  rasa/core/policies/enterprise_search_prompt_with_citation_template.jinja2,sha256=vRQBs3q13UmvRRgqA8-DmRtM7tqZP2ngwMVJ4gy7lE0,3302
321
321
  rasa/core/policies/flow_policy.py,sha256=wGb1l_59cGM9ZaexSIK5uXFi618739oNfLOxx2FC0_Y,7490
@@ -379,20 +379,20 @@ rasa/dialogue_understanding/commands/start_flow_command.py,sha256=a0Yk8xpBpFgC3H
379
379
  rasa/dialogue_understanding/commands/user_silence_command.py,sha256=QtqsMU5mrbUp5dla2yGSpxXfIfi_h6Eu72mTDZQ_aTU,1724
380
380
  rasa/dialogue_understanding/commands/utils.py,sha256=OiyLFGEsrfFSIJcvBY6lTIIXqDY9OxaikVGtcl4Kokk,1911
381
381
  rasa/dialogue_understanding/generator/__init__.py,sha256=Ykeb2wQ1DuiUWAWO0hLIPSTK1_Ktiq9DZXF6D3ugN78,764
382
- rasa/dialogue_understanding/generator/command_generator.py,sha256=Egdy-g46BGBw-iP-dKBM3sca-X-2SyBQL5NPyKTiHWw,15974
382
+ rasa/dialogue_understanding/generator/command_generator.py,sha256=RCrfvsvIGl9TlhJtiicHoondNb5DAjNvlo3zv0qZ_1w,16500
383
383
  rasa/dialogue_understanding/generator/constants.py,sha256=9Nwjo2Qobioetr9SyyQxsGvEPSbKCVS5ZX1GGJtbA0E,716
384
384
  rasa/dialogue_understanding/generator/flow_document_template.jinja2,sha256=f4H6vVd-_nX_RtutMh1xD3ZQE_J2OyuPHAtiltfiAPY,253
385
385
  rasa/dialogue_understanding/generator/flow_retrieval.py,sha256=MkwUgQA9xRlAQUdWF2cBEX2tW2PQhBsq2Jsy2vmqWY4,17891
386
- rasa/dialogue_understanding/generator/llm_based_command_generator.py,sha256=hzHUUMPmIZaLZkFRBgVK42l2nTUn04H4W8GpBBF1XIs,17554
387
- rasa/dialogue_understanding/generator/llm_command_generator.py,sha256=yQ8aAMsTKGSARroJq0TfKVLe3ShYl8K8oklDk_KGies,2459
386
+ rasa/dialogue_understanding/generator/llm_based_command_generator.py,sha256=O9sIoDTup2g7l1Uqy6LqMBi-hwZ3OpJk90ZwzhltMtc,17707
387
+ rasa/dialogue_understanding/generator/llm_command_generator.py,sha256=QpNXhjB9ugtPV8XAHmKjbJtOiI1yE9rC2osbsI_A4ZY,2529
388
388
  rasa/dialogue_understanding/generator/multi_step/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
389
389
  rasa/dialogue_understanding/generator/multi_step/fill_slots_prompt.jinja2,sha256=Y0m673tAML3cFPaLM-urMXDsBYUUcXIw9YUpkAhGUuA,2933
390
390
  rasa/dialogue_understanding/generator/multi_step/handle_flows_prompt.jinja2,sha256=8l93_QBKBYnqLICVdiTu5ejZDE8F36BU8-qwba0px44,1927
391
- rasa/dialogue_understanding/generator/multi_step/multi_step_llm_command_generator.py,sha256=zw1N0UyEOzYfgm3sFP8ptZ92fSLszwiACM4Vqwt8lIo,33527
391
+ rasa/dialogue_understanding/generator/multi_step/multi_step_llm_command_generator.py,sha256=E80aJUrvDgsWcAF3IphpYVT2x0-DcsI-ISwOY38vOlg,34172
392
392
  rasa/dialogue_understanding/generator/nlu_command_adapter.py,sha256=pzd1q-syU_QuqTRcfd_GsXyOJaxfApqh_LsOKuEN46g,9332
393
393
  rasa/dialogue_understanding/generator/single_step/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
394
394
  rasa/dialogue_understanding/generator/single_step/command_prompt_template.jinja2,sha256=nMayu-heJYH1QmcL1cFmXb8SeiJzfdDR_9Oy5IRUXsM,3937
395
- rasa/dialogue_understanding/generator/single_step/single_step_llm_command_generator.py,sha256=hhFnxzc8lji7UZsFaVK-GTkyJ-34jaN-IhWcebDJhBI,18493
395
+ rasa/dialogue_understanding/generator/single_step/single_step_llm_command_generator.py,sha256=a72P3SzuSaF0Mmm4b3k4jT4zOGE_RFXBDRlHOI7Px0g,18656
396
396
  rasa/dialogue_understanding/patterns/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
397
397
  rasa/dialogue_understanding/patterns/cancel.py,sha256=IQ4GVHNnNCqwKRLlAqBtLsgolcbPPnHsHdb3aOAFhEs,3868
398
398
  rasa/dialogue_understanding/patterns/cannot_handle.py,sha256=pg0zJHl-hDBnl6y9IyxZzW57yuMdfD8xI8eiK6EVrG8,1406
@@ -591,7 +591,7 @@ rasa/shared/core/command_payload_reader.py,sha256=Vhiop9LWFawaEruRifBBrVmoEJ-fj1
591
591
  rasa/shared/core/constants.py,sha256=WNFzABG-eiVREBL6aDZAmcNDiSmuSbvWuxXIMoX2Iv8,5704
592
592
  rasa/shared/core/conversation.py,sha256=tw1fD2XB3gOdQjDI8hHo5TAAmE2JYNogQGWe3rE929w,1385
593
593
  rasa/shared/core/domain.py,sha256=SsRLbLIEZ-coPTEwr-XxU_O-X-0mR466YLvXJJOAEpc,81247
594
- rasa/shared/core/events.py,sha256=6yuOrZs8hZaR0FV1nC58l1u6qE4fegwrvL5nH1w7xY4,83719
594
+ rasa/shared/core/events.py,sha256=989wHh_6d6XF8PQkeCZfY3MNgePCbLK9BiY8JQVXmQ0,83790
595
595
  rasa/shared/core/flows/__init__.py,sha256=HszhIvEARpmyxABFc1MKYvj8oy04WiZW1xmCdToakbs,181
596
596
  rasa/shared/core/flows/flow.py,sha256=XzF9RUxLNyiGndnpvECV4pMczzc6g7UtgwokyXAoaTY,21496
597
597
  rasa/shared/core/flows/flow_path.py,sha256=xstwahZBU5cfMY46mREA4NoOGlKLBRAqeP_mJ3UZqOI,2283
@@ -643,7 +643,7 @@ rasa/shared/importers/rasa.py,sha256=877EU8qPZSMBk5VAVAAUhfsh6vatRJrYOqWz1YGR6p8
643
643
  rasa/shared/importers/remote_importer.py,sha256=fKLQskaCVPpD5cCMQ9sR71cZZlSIP-SSv3J3o2kra2w,7696
644
644
  rasa/shared/importers/utils.py,sha256=Gi3BM5RUr-9nX_Ujf-g-tt19_bKPizmQIi6eAflDAmo,1289
645
645
  rasa/shared/nlu/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
646
- rasa/shared/nlu/constants.py,sha256=KUYpaGAjwBwdUV8TZupei-xWAcb8RmaqhXNF8SMVwqU,1773
646
+ rasa/shared/nlu/constants.py,sha256=oq-eaTMXRvT1mE8pFhxf1Jvc8vlZGIeOSdY7YQlKd2Q,1825
647
647
  rasa/shared/nlu/interpreter.py,sha256=eCNJp61nQYTGVf4aJi8SCWb46jxZY6-C1M1LFxMyQTM,188
648
648
  rasa/shared/nlu/training_data/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
649
649
  rasa/shared/nlu/training_data/entities_parser.py,sha256=fC-VIso07so6E9b6KrQXOBC-ZUGCQGvnMvzVwiAO1GQ,6729
@@ -695,7 +695,7 @@ rasa/shared/providers/llm/azure_openai_llm_client.py,sha256=A6sg2bvulNczuzu1J0V7
695
695
  rasa/shared/providers/llm/default_litellm_llm_client.py,sha256=1oiUIXr_U5ldyBQZ8cnrV3P7Qw9kMw1yvaVg6mjKkHU,3940
696
696
  rasa/shared/providers/llm/litellm_router_llm_client.py,sha256=llko2DfOpiLMpHxnW26I1Hb1wTn7VmZ_yu43GRXhqwQ,6815
697
697
  rasa/shared/providers/llm/llm_client.py,sha256=6-gMsEJqquhUPGXzNiq_ybM_McLWxAJ_QhbmWcLnb_Q,2358
698
- rasa/shared/providers/llm/llm_response.py,sha256=Ltmc8yk9cAqtK8QgwfZZywudM5ZQsT4y_AKAQ3q05hA,1490
698
+ rasa/shared/providers/llm/llm_response.py,sha256=HedtASFXW2GFWS4OAmk-wSjn5dRDFWB8dAkAO2Kdd_M,2426
699
699
  rasa/shared/providers/llm/openai_llm_client.py,sha256=uDdcugBcO3sfxbduc00eqaZdrJP0VFX5dkBd2Dem47M,4844
700
700
  rasa/shared/providers/llm/rasa_llm_client.py,sha256=SpgWn3uHHEezIcyvMfi468zRLw_W8VF6sIs-VIhElPc,3357
701
701
  rasa/shared/providers/llm/self_hosted_llm_client.py,sha256=98FaF0-lYnytC46ulhrCAQjUKy9TI0U2QILml__UCzc,9170
@@ -717,7 +717,7 @@ rasa/shared/utils/pykwalify_extensions.py,sha256=4W8gde8C6QpGCY_t9IEmaZSgjMuie1x
717
717
  rasa/shared/utils/schemas/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
718
718
  rasa/shared/utils/schemas/config.yml,sha256=czxSADw9hOIZdhvFP8pVUQo810hs9_C8ZGfCPx17taM,27
719
719
  rasa/shared/utils/schemas/domain.yml,sha256=b2k4ZYSV-QL3hGjDaRg8rfoqaTh4hbhDc_hBlMB8cuI,3409
720
- rasa/shared/utils/schemas/events.py,sha256=9sg_w4VeFMksyl-uscUht1TErf1gfKR56agyYSvl2c4,6912
720
+ rasa/shared/utils/schemas/events.py,sha256=T8kSex2UpgmToqib6KyrgNYBK5WS8OwqdO4Jv-TEJ4I,6913
721
721
  rasa/shared/utils/schemas/model_config.yml,sha256=OravyVWalSwjiXYRarRzg0tiRnUFHe1q4-5Wj1TEeFk,811
722
722
  rasa/shared/utils/schemas/stories.yml,sha256=DV3wAFnv1leD7kV-FH-GQihF1QX5oKHc8Eb24mxjizc,4737
723
723
  rasa/shared/utils/yaml.py,sha256=HpG4whRyFMEJ39YEMd-X1HBJL6C2cAwvPlMGzqq74z0,37638
@@ -735,7 +735,7 @@ rasa/tracing/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
735
735
  rasa/tracing/config.py,sha256=kA-xEY2oAc07gw1RzGeMuNnDKd_ZrVXT_B63pxGW-uI,12860
736
736
  rasa/tracing/constants.py,sha256=N_MJLStE3IkmPKQCQv42epd3jdBMJ4Ith1dVO65N5ho,2425
737
737
  rasa/tracing/instrumentation/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
738
- rasa/tracing/instrumentation/attribute_extractors.py,sha256=zGbDKfULtSfdxAVUK1tM45QF4X5OoIAxV5AMKUBF50Y,26006
738
+ rasa/tracing/instrumentation/attribute_extractors.py,sha256=YntngMpBuKfuipydmhR6zmeS_1N0_nhiwazeRHFrdTc,26080
739
739
  rasa/tracing/instrumentation/instrumentation.py,sha256=5g_Hp9CE7bqIKUVfLcpGan0s2SK3h5rikjumpADs4SY,51103
740
740
  rasa/tracing/instrumentation/intentless_policy_instrumentation.py,sha256=8AdMOy_2mlKnlmt-muV8-eoT8jA52GXDzM0avejfg8A,4821
741
741
  rasa/tracing/instrumentation/metrics.py,sha256=ByfKshoxNOqjKZwKTulqL71s5b3WugqLfjha3So0OEU,10534
@@ -776,9 +776,9 @@ rasa/utils/train_utils.py,sha256=f1NWpp5y6al0dzoQyyio4hc4Nf73DRoRSHDzEK6-C4E,212
776
776
  rasa/utils/url_tools.py,sha256=JQcHL2aLqLHu82k7_d9imUoETCm2bmlHaDpOJ-dKqBc,1218
777
777
  rasa/utils/yaml.py,sha256=KjbZq5C94ZP7Jdsw8bYYF7HASI6K4-C_kdHfrnPLpSI,2000
778
778
  rasa/validator.py,sha256=wl5IKiyDmk6FlDcGO2Js-H-gHPeqVqUJ6hB4fgN0xjI,66796
779
- rasa/version.py,sha256=GA1IzBOw2Is9Gh8r2ooq5d5oRHY4Cr5V2c_YGCN_jaI,124
780
- rasa_pro-3.11.3a1.dev4.dist-info/METADATA,sha256=UoBAUZRnGQGLH0j91R1nxARNIGeoD7wlzdqqMuPmb_U,10798
781
- rasa_pro-3.11.3a1.dev4.dist-info/NOTICE,sha256=7HlBoMHJY9CL2GlYSfTQ-PZsVmLmVkYmMiPlTjhuCqA,218
782
- rasa_pro-3.11.3a1.dev4.dist-info/WHEEL,sha256=sP946D7jFCHeNz5Iq4fL4Lu-PrWrFsgfLXbbkciIZwg,88
783
- rasa_pro-3.11.3a1.dev4.dist-info/entry_points.txt,sha256=ckJ2SfEyTPgBqj_I6vm_tqY9dZF_LAPJZA335Xp0Q9U,43
784
- rasa_pro-3.11.3a1.dev4.dist-info/RECORD,,
779
+ rasa/version.py,sha256=30hhHbpQCDfDXVFyr3PTuEccWoj0mUrtnPGHp39doZ0,124
780
+ rasa_pro-3.11.3a1.dev5.dist-info/METADATA,sha256=m_N49daQs1B-kgsaTa3rRAlRqpG2oNRc1LHIA7Oc6JA,10798
781
+ rasa_pro-3.11.3a1.dev5.dist-info/NOTICE,sha256=7HlBoMHJY9CL2GlYSfTQ-PZsVmLmVkYmMiPlTjhuCqA,218
782
+ rasa_pro-3.11.3a1.dev5.dist-info/WHEEL,sha256=sP946D7jFCHeNz5Iq4fL4Lu-PrWrFsgfLXbbkciIZwg,88
783
+ rasa_pro-3.11.3a1.dev5.dist-info/entry_points.txt,sha256=ckJ2SfEyTPgBqj_I6vm_tqY9dZF_LAPJZA335Xp0Q9U,43
784
+ rasa_pro-3.11.3a1.dev5.dist-info/RECORD,,