rasa-pro 3.12.14__py3-none-any.whl → 3.12.16__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of rasa-pro might be problematic. Click here for more details.

@@ -1137,12 +1137,15 @@ class ActionSendText(Action):
1137
1137
  tracker: "DialogueStateTracker",
1138
1138
  domain: "Domain",
1139
1139
  metadata: Optional[Dict[Text, Any]] = None,
1140
+ create_bot_uttered_event: bool = True,
1140
1141
  ) -> List[Event]:
1141
1142
  """Runs action. Please see parent class for the full docstring."""
1142
1143
  fallback = {"text": ""}
1143
1144
  metadata_copy = copy.deepcopy(metadata) if metadata else {}
1144
1145
  message = metadata_copy.get("message", fallback)
1145
- return [create_bot_utterance(message)]
1146
+ if create_bot_uttered_event:
1147
+ return [create_bot_utterance(message)]
1148
+ return []
1146
1149
 
1147
1150
 
1148
1151
  class ActionExtractSlots(Action):
@@ -5,7 +5,10 @@ from jinja2 import Template
5
5
 
6
6
  from rasa import telemetry
7
7
  from rasa.core.nlg.response import TemplatedNaturalLanguageGenerator
8
- from rasa.core.nlg.summarize import summarize_conversation
8
+ from rasa.core.nlg.summarize import (
9
+ _count_multiple_utterances_as_single_turn,
10
+ summarize_conversation,
11
+ )
9
12
  from rasa.shared.constants import (
10
13
  LLM_CONFIG_KEY,
11
14
  MAX_COMPLETION_TOKENS_CONFIG_KEY,
@@ -55,6 +58,7 @@ RESPONSE_SUMMARISE_CONVERSATION_KEY = "summarize_conversation"
55
58
  DEFAULT_REPHRASE_ALL = False
56
59
  DEFAULT_SUMMARIZE_HISTORY = True
57
60
  DEFAULT_MAX_HISTORICAL_TURNS = 5
61
+ DEFAULT_COUNT_MULTIPLE_UTTERANCES_AS_SINGLE_TURN = True
58
62
 
59
63
  DEFAULT_LLM_CONFIG = {
60
64
  PROVIDER_CONFIG_KEY: OPENAI_PROVIDER,
@@ -72,6 +76,7 @@ its meaning. Use simple {{language}}.
72
76
  Context / previous conversation with the user:
73
77
  {{history}}
74
78
 
79
+ Last user message:
75
80
  {{current_input}}
76
81
 
77
82
  Suggested AI Response: {{suggested_response}}
@@ -124,6 +129,11 @@ class ContextualResponseRephraser(
124
129
  "max_historical_turns", DEFAULT_MAX_HISTORICAL_TURNS
125
130
  )
126
131
 
132
+ self.count_multiple_utterances_as_single_turn = self.nlg_endpoint.kwargs.get(
133
+ "count_multiple_utterances_as_single_turn",
134
+ DEFAULT_COUNT_MULTIPLE_UTTERANCES_AS_SINGLE_TURN,
135
+ )
136
+
127
137
  self.llm_config = resolve_model_client_config(
128
138
  self.nlg_endpoint.kwargs.get(LLM_CONFIG_KEY),
129
139
  ContextualResponseRephraser.__name__,
@@ -262,8 +272,16 @@ class ContextualResponseRephraser(
262
272
  Returns:
263
273
  The history for the prompt.
264
274
  """
275
+ # Count multiple utterances by bot/user as single turn in conversation history
276
+ turns_wrapper = (
277
+ _count_multiple_utterances_as_single_turn
278
+ if self.count_multiple_utterances_as_single_turn
279
+ else None
280
+ )
265
281
  llm = llm_factory(self.llm_config, DEFAULT_LLM_CONFIG)
266
- return await summarize_conversation(tracker, llm, max_turns=5)
282
+ return await summarize_conversation(
283
+ tracker, llm, max_turns=5, turns_wrapper=turns_wrapper
284
+ )
267
285
 
268
286
  async def rephrase(
269
287
  self,
@@ -285,19 +303,26 @@ class ContextualResponseRephraser(
285
303
 
286
304
  prompt_template_text = self._template_for_response_rephrasing(response)
287
305
 
288
- # Retrieve inputs for the dynamic prompt
289
- latest_message = self._last_message_if_human(tracker)
290
- current_input = f"{USER}: {latest_message}" if latest_message else ""
306
+ # Last user message (=current input) should always be in prompt if available
307
+ last_message_by_user = getattr(tracker.latest_message, "text", "")
308
+ current_input = (
309
+ f"{USER}: {last_message_by_user}" if last_message_by_user else ""
310
+ )
291
311
 
292
312
  # Only summarise conversation history if flagged
293
313
  if self.summarize_history:
294
314
  history = await self._create_history(tracker)
295
315
  else:
296
- # make sure the transcript/history contains the last user utterance
316
+ # Count multiple utterances by bot/user as single turn
317
+ turns_wrapper = (
318
+ _count_multiple_utterances_as_single_turn
319
+ if self.count_multiple_utterances_as_single_turn
320
+ else None
321
+ )
297
322
  max_turns = max(self.max_historical_turns, 1)
298
- history = tracker_as_readable_transcript(tracker, max_turns=max_turns)
299
- # the history already contains the current input
300
- current_input = ""
323
+ history = tracker_as_readable_transcript(
324
+ tracker, max_turns=max_turns, turns_wrapper=turns_wrapper
325
+ )
301
326
 
302
327
  prompt = Template(prompt_template_text).render(
303
328
  history=history,
@@ -1,4 +1,5 @@
1
- from typing import Optional
1
+ from itertools import groupby
2
+ from typing import Callable, List, Optional
2
3
 
3
4
  import structlog
4
5
  from jinja2 import Template
@@ -23,20 +24,49 @@ SUMMARY_PROMPT_TEMPLATE = Template(_DEFAULT_SUMMARIZER_TEMPLATE)
23
24
  MAX_TURNS_DEFAULT = 20
24
25
 
25
26
 
27
+ def _count_multiple_utterances_as_single_turn(transcript: List[str]) -> List[str]:
28
+ """Counts multiple utterances as a single turn.
29
+
30
+ Args:
31
+ transcript: the lines of the transcript
32
+
33
+ Returns:
34
+ transcript: with multiple utterances counted as a single turn
35
+ """
36
+ if not transcript:
37
+ return []
38
+
39
+ def get_speaker_label(line: str) -> str:
40
+ return line.partition(": ")[0] if ": " in line else ""
41
+
42
+ modified_transcript = [
43
+ f"{speaker}: {' '.join(line.partition(': ')[2] for line in group)}"
44
+ for speaker, group in groupby(transcript, key=get_speaker_label)
45
+ if speaker
46
+ ]
47
+
48
+ return modified_transcript
49
+
50
+
26
51
  def _create_summarization_prompt(
27
- tracker: DialogueStateTracker, max_turns: Optional[int]
52
+ tracker: DialogueStateTracker,
53
+ max_turns: Optional[int],
54
+ turns_wrapper: Optional[Callable[[List[str]], List[str]]],
28
55
  ) -> str:
29
56
  """Creates an LLM prompt to summarize the conversation in the tracker.
30
57
 
31
58
  Args:
32
59
  tracker: tracker of the conversation to be summarized
33
60
  max_turns: maximum number of turns to summarize
61
+ turns_wrapper: optional function to wrap the turns
34
62
 
35
63
 
36
64
  Returns:
37
65
  The prompt to summarize the conversation.
38
66
  """
39
- transcript = tracker_as_readable_transcript(tracker, max_turns=max_turns)
67
+ transcript = tracker_as_readable_transcript(
68
+ tracker, max_turns=max_turns, turns_wrapper=turns_wrapper
69
+ )
40
70
  return SUMMARY_PROMPT_TEMPLATE.render(
41
71
  conversation=transcript,
42
72
  )
@@ -46,6 +76,7 @@ async def summarize_conversation(
46
76
  tracker: DialogueStateTracker,
47
77
  llm: LLMClient,
48
78
  max_turns: Optional[int] = MAX_TURNS_DEFAULT,
79
+ turns_wrapper: Optional[Callable[[List[str]], List[str]]] = None,
49
80
  ) -> str:
50
81
  """Summarizes the dialogue using the LLM.
51
82
 
@@ -53,11 +84,12 @@ async def summarize_conversation(
53
84
  tracker: the tracker to summarize
54
85
  llm: the LLM to use for summarization
55
86
  max_turns: maximum number of turns to summarize
87
+ turns_wrapper: optional function to wrap the turns
56
88
 
57
89
  Returns:
58
90
  The summary of the dialogue.
59
91
  """
60
- prompt = _create_summarization_prompt(tracker, max_turns)
92
+ prompt = _create_summarization_prompt(tracker, max_turns, turns_wrapper)
61
93
  try:
62
94
  llm_response = await llm.acompletion(prompt)
63
95
  summarization = llm_response.choices[0].strip()
@@ -66,6 +98,8 @@ async def summarize_conversation(
66
98
  )
67
99
  return summarization
68
100
  except Exception as e:
69
- transcript = tracker_as_readable_transcript(tracker, max_turns=max_turns)
101
+ transcript = tracker_as_readable_transcript(
102
+ tracker, max_turns=max_turns, turns_wrapper=turns_wrapper
103
+ )
70
104
  structlogger.error("summarization.error", error=e)
71
105
  return transcript
@@ -1,6 +1,6 @@
1
1
  from __future__ import annotations
2
2
 
3
- from typing import Any, Dict, List, Text
3
+ from typing import Any, Dict, List, Optional, Text
4
4
 
5
5
  import rasa.dialogue_understanding.processor.command_processor
6
6
  from rasa.engine.graph import ExecutionContext, GraphComponent
@@ -37,8 +37,8 @@ class CommandProcessorComponent(GraphComponent):
37
37
  self,
38
38
  tracker: DialogueStateTracker,
39
39
  flows: FlowsList,
40
- story_graph: StoryGraph,
41
- domain: Domain,
40
+ domain: Optional[Domain] = None,
41
+ story_graph: Optional[StoryGraph] = None,
42
42
  ) -> List[Event]:
43
43
  """Execute commands to update tracker state."""
44
44
  return rasa.dialogue_understanding.processor.command_processor.execute_commands(
@@ -396,7 +396,9 @@ class DefaultV1Recipe(Recipe):
396
396
  return preprocessors
397
397
 
398
398
  def _get_needs_from_args(
399
- self, component: Type[GraphComponent], fn_name: str
399
+ self,
400
+ component: Type[GraphComponent],
401
+ fn_name: str,
400
402
  ) -> Dict[str, str]:
401
403
  """Get the needed arguments from the method on the component.
402
404
 
@@ -434,6 +436,7 @@ class DefaultV1Recipe(Recipe):
434
436
  parameters = {
435
437
  name
436
438
  for name, param in sig.parameters.items()
439
+ # only consider parameters which are positional or keyword
437
440
  if param.kind == param.POSITIONAL_OR_KEYWORD
438
441
  }
439
442
 
@@ -752,8 +755,28 @@ class DefaultV1Recipe(Recipe):
752
755
  predict_config, predict_nodes, train_nodes, preprocessors
753
756
  )
754
757
 
758
+ # The `story_graph_provider` is only needed if the intentless policy is used.
759
+ # If it is not used, we can remove it from the nodes as it slows down the
760
+ # loading time if users have a large number of stories.
761
+ if not self._intentless_policy_used(predict_nodes):
762
+ # Removes the `story_graph_provider` from the nodes
763
+ predict_nodes.pop("story_graph_provider", None)
764
+ if "command_processor" in predict_nodes:
765
+ # Removes story_graph from the command processor inputs
766
+ predict_nodes["command_processor"].needs.pop("story_graph", None)
767
+
755
768
  return predict_nodes
756
769
 
770
+ @staticmethod
771
+ def _intentless_policy_used(nodes: Dict[Text, SchemaNode]) -> bool:
772
+ """Checks if the intentless policy is used in the nodes."""
773
+ from rasa.core import IntentlessPolicy
774
+
775
+ for schema_node in nodes.values():
776
+ if schema_node.matches_type(IntentlessPolicy):
777
+ return True
778
+ return False
779
+
757
780
  def _add_nlu_predict_nodes(
758
781
  self,
759
782
  last_run_node: Text,
@@ -924,7 +947,8 @@ class DefaultV1Recipe(Recipe):
924
947
  predict_nodes["command_processor"] = SchemaNode(
925
948
  **DEFAULT_PREDICT_KWARGS,
926
949
  needs=self._get_needs_from_args(
927
- CommandProcessorComponent, "execute_commands"
950
+ CommandProcessorComponent,
951
+ "execute_commands",
928
952
  ),
929
953
  uses=CommandProcessorComponent,
930
954
  fn="execute_commands",
rasa/shared/utils/llm.py CHANGED
@@ -9,6 +9,7 @@ from typing import (
9
9
  Any,
10
10
  Callable,
11
11
  Dict,
12
+ List,
12
13
  Literal,
13
14
  Optional,
14
15
  Text,
@@ -193,6 +194,7 @@ def tracker_as_readable_transcript(
193
194
  human_prefix: str = USER,
194
195
  ai_prefix: str = AI,
195
196
  max_turns: Optional[int] = 20,
197
+ turns_wrapper: Optional[Callable[[List[str]], List[str]]] = None,
196
198
  ) -> str:
197
199
  """Creates a readable dialogue from a tracker.
198
200
 
@@ -201,6 +203,7 @@ def tracker_as_readable_transcript(
201
203
  human_prefix: the prefix to use for human utterances
202
204
  ai_prefix: the prefix to use for ai utterances
203
205
  max_turns: the maximum number of turns to include in the transcript
206
+ turns_wrapper: optional function to wrap the turns in a custom way
204
207
 
205
208
  Example:
206
209
  >>> tracker = Tracker(
@@ -237,8 +240,11 @@ def tracker_as_readable_transcript(
237
240
  elif isinstance(event, BotUttered):
238
241
  transcript.append(f"{ai_prefix}: {sanitize_message_for_prompt(event.text)}")
239
242
 
240
- if max_turns:
241
- transcript = transcript[-max_turns:]
243
+ # turns_wrapper to count multiple utterances by bot/user as single turn
244
+ if turns_wrapper:
245
+ transcript = turns_wrapper(transcript)
246
+ # otherwise, just take the last `max_turns` lines of the transcript
247
+ transcript = transcript[-max_turns if max_turns is not None else None :]
242
248
 
243
249
  return "\n".join(transcript)
244
250
 
@@ -680,7 +686,6 @@ def get_prompt_template(
680
686
  Returns:
681
687
  The prompt template.
682
688
  """
683
-
684
689
  try:
685
690
  if jinja_file_path is not None:
686
691
  prompt_template = rasa.shared.utils.io.read_file(jinja_file_path)
rasa/version.py CHANGED
@@ -1,3 +1,3 @@
1
1
  # this file will automatically be changed,
2
2
  # do not add anything but the version number here!
3
- __version__ = "3.12.14"
3
+ __version__ = "3.12.16"
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.3
2
2
  Name: rasa-pro
3
- Version: 3.12.14
3
+ Version: 3.12.16
4
4
  Summary: State-of-the-art open-core Conversational AI framework for Enterprises that natively leverages generative AI for effortless assistant development.
5
5
  Keywords: nlp,machine-learning,machine-learning-library,bot,bots,botkit,rasa conversational-agents,conversational-ai,chatbot,chatbot-framework,bot-framework
6
6
  Author: Rasa Technologies GmbH
@@ -92,7 +92,7 @@ rasa/cli/x.py,sha256=C7dLtYXAkD-uj7hNj7Pz5YbOupp2yRcMjQbsEVqXUJ8,6825
92
92
  rasa/constants.py,sha256=5OMUcJ_gjn8qglY37DeUS4g5xe2VZAiLIv8IKwIGWJ0,1364
93
93
  rasa/core/__init__.py,sha256=wTSmsFlgK0Ylvuyq20q9APwpT5xyVJYZfzhs4rrkciM,456
94
94
  rasa/core/actions/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
95
- rasa/core/actions/action.py,sha256=_QfY3ngSF2sf2Y3QDPJo7Nd6F_FA6_zDWgw1OQSLkEk,42676
95
+ rasa/core/actions/action.py,sha256=N57qGlqJHXD_jbN3R6WxMrz-AxW96gQ5GPZssmwTlvg,42782
96
96
  rasa/core/actions/action_clean_stack.py,sha256=xUP-2ipPsPAnAiwP17c-ezmHPSrV4JSUZr-eSgPQwIs,2279
97
97
  rasa/core/actions/action_exceptions.py,sha256=hghzXYN6VeHC-O_O7WiPesCNV86ZTkHgG90ZnQcbai8,724
98
98
  rasa/core/actions/action_hangup.py,sha256=o5iklHG-F9IcRgWis5C6AumVXznxzAV3o9zdduhozEM,994
@@ -318,11 +318,11 @@ rasa/core/lock_store.py,sha256=weupfBiYMz-B_N-LAONCvp-po1uPRdie9imLYn7hFDU,12504
318
318
  rasa/core/migrate.py,sha256=h1dOpXxmVmZlbLVGy1yOU_Obp2KzRiOiL0iuEacA0Cg,14618
319
319
  rasa/core/nlg/__init__.py,sha256=jZuQAhOUcxO-KqqHGqICHSY3oDeXlUiGr2trQDYfG6o,240
320
320
  rasa/core/nlg/callback.py,sha256=0zDQsOa3uV66G3smCVQ9cUdvj-it8tFneIzqShM7NeI,5208
321
- rasa/core/nlg/contextual_response_rephraser.py,sha256=usLMDY5BBbpUJYmx6hcVs0kzCHfr1cX-MCNuQWfnCm4,13678
321
+ rasa/core/nlg/contextual_response_rephraser.py,sha256=kW4C54rWh17a011hMesR1AzYPOurDNdbUIh775O4qsQ,14567
322
322
  rasa/core/nlg/generator.py,sha256=iMTqt0sPRMc55ontZU1svQVPKixDojBXN-cFuOvLMGo,11647
323
323
  rasa/core/nlg/interpolator.py,sha256=hEOhqfMXrAqTZiqjg2t6ZfTK6DJQ5IiX4tJIz2b8Fbw,5190
324
324
  rasa/core/nlg/response.py,sha256=SecKyoBQjEnZr4t-Gg5fkUpkozwGT2lzswIKgD63Dac,7248
325
- rasa/core/nlg/summarize.py,sha256=liXcbJMBm0NaaSH0LwlSs1l0dTby0OEprSzeKeyRyv0,2109
325
+ rasa/core/nlg/summarize.py,sha256=Snm3u80JTIwUuFLGpnLi3AdOnnVxFoBFbNlAfQLG7pE,3193
326
326
  rasa/core/nlg/translate.py,sha256=ZXRvysqXGdtHBJ7x3YkW6zfmnb9DuEGHCMTL41v-M8M,2112
327
327
  rasa/core/persistor.py,sha256=7LCZHAwCM-xrUI38aaJ5dkxJvLdJXWI1TEUKsBo4_EE,21295
328
328
  rasa/core/policies/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
@@ -437,7 +437,7 @@ rasa/dialogue_understanding/patterns/user_silence.py,sha256=xP-QMnd-MsybH5z4g01h
437
437
  rasa/dialogue_understanding/patterns/validate_slot.py,sha256=hqd5AEGT3M3HLNhMwuI9W9kZNCvgU6GyI-2xc2b4kz8,2085
438
438
  rasa/dialogue_understanding/processor/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
439
439
  rasa/dialogue_understanding/processor/command_processor.py,sha256=3Q7vux5xN1oMwF7SPpbgm4934G_M9nF1dLDPiw_pAIk,29952
440
- rasa/dialogue_understanding/processor/command_processor_component.py,sha256=nvp_q-vM2ZEa7sbNMjRhEeuvmCwVWQl1ckTf0UAXuH4,1606
440
+ rasa/dialogue_understanding/processor/command_processor_component.py,sha256=rkErI_Uo7s3LsEojUSGSRbWGyGaX7GtGOYSJn0V-TI4,1650
441
441
  rasa/dialogue_understanding/stack/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
442
442
  rasa/dialogue_understanding/stack/dialogue_stack.py,sha256=cYV6aQeh0EuOJHODDqK3biqXozYTX8baPgLwHhPxFqs,5244
443
443
  rasa/dialogue_understanding/stack/frames/__init__.py,sha256=wczg4PXtwGlCcRWT4gdtwgO-ZHVDcEYG11qDMe5hRNw,656
@@ -496,7 +496,7 @@ rasa/engine/loader.py,sha256=fCE3L3uweuGrluyisqkbayeIxqo1NUE-EnlwjwwaFcY,1922
496
496
  rasa/engine/recipes/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
497
497
  rasa/engine/recipes/config_files/default_config.yml,sha256=E1sAW6Qq_T0QXBDK8NzkhkmSESX9g8Op85h5aCVbYlA,1194
498
498
  rasa/engine/recipes/default_components.py,sha256=pFPA5ljoqQcy7s8mhQDEFFKY-2nqEiGfmr5Db06hVjU,3948
499
- rasa/engine/recipes/default_recipe.py,sha256=K35i3COhsmH2bK9Tl5RpoF4pFl-TXPWSeBSI4cwD118,46509
499
+ rasa/engine/recipes/default_recipe.py,sha256=Lh0N-DJmEKpMz9gLJT82f7lR7oXV5mGM09b9FVOiJ9o,47596
500
500
  rasa/engine/recipes/graph_recipe.py,sha256=vG5HkGWgJh2_F7IBKnylKZ5LJMpGx0THWbO7QQ-ElmE,3391
501
501
  rasa/engine/recipes/recipe.py,sha256=aIzV78BiUEE8B8hY5pkNPu-85CwSjCb6YaJvurz7n6c,3346
502
502
  rasa/engine/runner/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
@@ -758,7 +758,7 @@ rasa/shared/utils/health_check/embeddings_health_check_mixin.py,sha256=ASOzDtI3i
758
758
  rasa/shared/utils/health_check/health_check.py,sha256=izixrbc9BxFSsjzwoIw9U0w0VKSX5gMwhey8bcwe1wc,9709
759
759
  rasa/shared/utils/health_check/llm_health_check_mixin.py,sha256=ANP5Q68TRX8p4wWkRCAISsWBV1iYYeGnqWILnR1NawE,957
760
760
  rasa/shared/utils/io.py,sha256=AhuECoXGO367NvWRCBu99utEtTQnyxWVJyKOOpLePpg,15917
761
- rasa/shared/utils/llm.py,sha256=FNZvQ96vJ-_JWJsbkMB7DcKpKqoRWqbRIXaHH1fuD74,31872
761
+ rasa/shared/utils/llm.py,sha256=XF_zF0fobuTiFDMvpRCs8BJ3kCXSXMqzyWaP9rxaYRs,32257
762
762
  rasa/shared/utils/pykwalify_extensions.py,sha256=2fvaysurCST_EMelCsECzkBgvClKYbdHb2Ty9rZhszw,1846
763
763
  rasa/shared/utils/schemas/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
764
764
  rasa/shared/utils/schemas/config.yml,sha256=czxSADw9hOIZdhvFP8pVUQo810hs9_C8ZGfCPx17taM,27
@@ -822,9 +822,9 @@ rasa/utils/train_utils.py,sha256=ClJx-6x3-h3Vt6mskacgkcCUJTMXjFPe3zAcy_DfmaU,212
822
822
  rasa/utils/url_tools.py,sha256=dZ1HGkVdWTJB7zYEdwoDIrEuyX9HE5WsxKKFVsXBLE0,1218
823
823
  rasa/utils/yaml.py,sha256=KjbZq5C94ZP7Jdsw8bYYF7HASI6K4-C_kdHfrnPLpSI,2000
824
824
  rasa/validator.py,sha256=524VlFTYK0B3iXYveVD6BDC3K0j1QfpzJ9O-TAWczmc,83166
825
- rasa/version.py,sha256=PMp-1E28OWut004RIQzUYhsHS7UgOt8Jqa62KVG7EK8,118
826
- rasa_pro-3.12.14.dist-info/METADATA,sha256=HSngZfB44mZjD5FQS4gF8mVWm_v4EPKTU5PSNwJQAzk,10609
827
- rasa_pro-3.12.14.dist-info/NOTICE,sha256=7HlBoMHJY9CL2GlYSfTQ-PZsVmLmVkYmMiPlTjhuCqA,218
828
- rasa_pro-3.12.14.dist-info/WHEEL,sha256=fGIA9gx4Qxk2KDKeNJCbOEwSrmLtjWCwzBz351GyrPQ,88
829
- rasa_pro-3.12.14.dist-info/entry_points.txt,sha256=ckJ2SfEyTPgBqj_I6vm_tqY9dZF_LAPJZA335Xp0Q9U,43
830
- rasa_pro-3.12.14.dist-info/RECORD,,
825
+ rasa/version.py,sha256=07xkyNZWFnv5AhIz6R_2NuxaE4Mp0k4OyT6ZE4N3CmI,118
826
+ rasa_pro-3.12.16.dist-info/METADATA,sha256=wpF6aeIqVLR9V5eCk9H7gvSaeyvA34-ztviscgSr4F8,10609
827
+ rasa_pro-3.12.16.dist-info/NOTICE,sha256=7HlBoMHJY9CL2GlYSfTQ-PZsVmLmVkYmMiPlTjhuCqA,218
828
+ rasa_pro-3.12.16.dist-info/WHEEL,sha256=fGIA9gx4Qxk2KDKeNJCbOEwSrmLtjWCwzBz351GyrPQ,88
829
+ rasa_pro-3.12.16.dist-info/entry_points.txt,sha256=ckJ2SfEyTPgBqj_I6vm_tqY9dZF_LAPJZA335Xp0Q9U,43
830
+ rasa_pro-3.12.16.dist-info/RECORD,,