rasa-pro 3.12.20__py3-none-any.whl → 3.12.21__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of rasa-pro might be problematic. Click here for more details.

@@ -1,7 +1,9 @@
1
+ import glob
1
2
  import importlib.resources
2
3
  import json
4
+ import os.path
3
5
  import re
4
- from typing import TYPE_CHECKING, Any, Dict, List, Optional, Text
6
+ from typing import TYPE_CHECKING, Any, Dict, List, Optional, Text, Tuple
5
7
 
6
8
  import dotenv
7
9
  import structlog
@@ -162,6 +164,8 @@ DEFAULT_ENTERPRISE_SEARCH_PROMPT_WITH_CITATION_TEMPLATE = importlib.resources.re
162
164
  "rasa.core.policies", "enterprise_search_prompt_with_citation_template.jinja2"
163
165
  )
164
166
 
167
+ _ENTERPRISE_SEARCH_CITATION_PATTERN = re.compile(r"\[([^\]]+)\]")
168
+
165
169
 
166
170
  class VectorStoreConnectionError(RasaException):
167
171
  """Exception raised for errors in connecting to the vector store."""
@@ -378,9 +382,11 @@ class EnterpriseSearchPolicy(LLMHealthCheckMixin, EmbeddingsHealthCheckMixin, Po
378
382
 
379
383
  if store_type == DEFAULT_VECTOR_STORE_TYPE:
380
384
  logger.info("enterprise_search_policy.train.faiss")
385
+ docs_folder = self.vector_store_config.get(SOURCE_PROPERTY)
386
+ self._validate_documents_folder(docs_folder)
381
387
  with self._model_storage.write_to(self._resource) as path:
382
388
  self.vector_store = FAISS_Store(
383
- docs_folder=self.vector_store_config.get(SOURCE_PROPERTY),
389
+ docs_folder=docs_folder,
384
390
  embeddings=embeddings,
385
391
  index_path=path,
386
392
  create_index=True,
@@ -760,6 +766,33 @@ class EnterpriseSearchPolicy(LLMHealthCheckMixin, EmbeddingsHealthCheckMixin, Po
760
766
  result[domain.index_for_action(action_name)] = score # type: ignore[assignment]
761
767
  return result
762
768
 
769
+ @classmethod
770
+ def _validate_documents_folder(cls, docs_folder: str) -> None:
771
+ if not os.path.exists(docs_folder) or not os.path.isdir(docs_folder):
772
+ error_message = (
773
+ f"Document source directory does not exist or is not a "
774
+ f"directory: '{docs_folder}'. "
775
+ "Please specify a valid path to the documents source directory in the "
776
+ "vector_store configuration."
777
+ )
778
+ logger.error(
779
+ "enterprise_search_policy.train.faiss.invalid_source_directory",
780
+ message=error_message,
781
+ )
782
+ print_error_and_exit(error_message)
783
+
784
+ docs = glob.glob(os.path.join(docs_folder, "*.txt"), recursive=True)
785
+ if not docs or len(docs) < 1:
786
+ error_message = (
787
+ f"Document source directory is empty: '{docs_folder}'. "
788
+ "Please add documents to this directory or specify a different one."
789
+ )
790
+ logger.error(
791
+ "enterprise_search_policy.train.faiss.source_directory_empty",
792
+ message=error_message,
793
+ )
794
+ print_error_and_exit(error_message)
795
+
763
796
  @classmethod
764
797
  def load(
765
798
  cls,
@@ -833,7 +866,7 @@ class EnterpriseSearchPolicy(LLMHealthCheckMixin, EmbeddingsHealthCheckMixin, Po
833
866
  return None
834
867
 
835
868
  source = merged_config.get(VECTOR_STORE_PROPERTY, {}).get(SOURCE_PROPERTY)
836
- if not source:
869
+ if not source or not os.path.exists(source) or not os.path.isdir(source):
837
870
  return None
838
871
 
839
872
  docs = FAISS_Store.load_documents(source)
@@ -870,10 +903,18 @@ class EnterpriseSearchPolicy(LLMHealthCheckMixin, EmbeddingsHealthCheckMixin, Po
870
903
 
871
904
  @staticmethod
872
905
  def post_process_citations(llm_answer: str) -> str:
873
- """Post-process the LLM answer.
874
-
875
- Re-writes the bracketed numbers to start from 1 and
876
- re-arranges the sources to follow the enumeration order.
906
+ """Post-processes the LLM answer to correctly number and sort citations and
907
+ sources.
908
+
909
+ - Handles both single `[1]` and grouped `[1, 3]` citations.
910
+ - Rewrites the numbers in square brackets in the answer text to start from 1
911
+ and be sorted within each group.
912
+ - Reorders the sources according to the order of their first appearance
913
+ in the text.
914
+ - Removes citations from the text that point to sources missing from
915
+ the source list.
916
+ - Keeps sources that are not cited in the text, placing them at the end
917
+ of the list.
877
918
 
878
919
  Args:
879
920
  llm_answer: The LLM answer.
@@ -887,77 +928,160 @@ class EnterpriseSearchPolicy(LLMHealthCheckMixin, EmbeddingsHealthCheckMixin, Po
887
928
 
888
929
  # Split llm_answer into answer and citations
889
930
  try:
890
- answer, citations = llm_answer.rsplit("Sources:", 1)
931
+ answer_part, sources_part = llm_answer.rsplit("Sources:", 1)
891
932
  except ValueError:
892
- # if there is no "Sources:" in the llm_answer
893
- return llm_answer
894
-
895
- # Find all source references in the answer
896
- pattern = r"\[\s*(\d+(?:\s*,\s*\d+)*)\s*\]"
897
- matches = re.findall(pattern, answer)
898
- old_source_indices = [
899
- int(num.strip()) for match in matches for num in match.split(",")
900
- ]
933
+ # if there is no "Sources:" separator, return the original llm_answer
934
+ return llm_answer.strip()
935
+
936
+ # Parse the sources block to extract valid sources and other lines
937
+ valid_sources, other_source_lines = EnterpriseSearchPolicy._parse_sources_block(
938
+ sources_part
939
+ )
940
+
941
+ # Find all unique, valid citations in the answer text in their order
942
+ # of appearance
943
+ cited_order = EnterpriseSearchPolicy._get_cited_order(
944
+ answer_part, valid_sources
945
+ )
946
+
947
+ # Create a mapping from the old source numbers to the new, sequential numbers.
948
+ # For example, if the citation order in the text was [3, 1, 2], this map
949
+ # becomes {3: 1, 1: 2, 2: 3}. This allows for a quick lookup when rewriting
950
+ # the citations
951
+ renumbering_map = {
952
+ old_num: new_num + 1 for new_num, old_num in enumerate(cited_order)
953
+ }
954
+
955
+ # Rewrite the citations in the answer text based on the renumbering map
956
+ processed_answer = EnterpriseSearchPolicy._rewrite_answer_citations(
957
+ answer_part, renumbering_map
958
+ )
959
+
960
+ # Build the new list of sources
961
+ new_sources_list = EnterpriseSearchPolicy._build_final_sources_list(
962
+ cited_order,
963
+ renumbering_map,
964
+ valid_sources,
965
+ other_source_lines,
966
+ )
967
+
968
+ if len(new_sources_list) > 0:
969
+ processed_answer += "\nSources:\n" + "\n".join(new_sources_list)
901
970
 
902
- # Map old source references to the correct enumeration
903
- renumber_mapping = {num: idx + 1 for idx, num in enumerate(old_source_indices)}
904
-
905
- # remove whitespace from original source citations in answer
906
- for match in matches:
907
- answer = answer.replace(f"[{match}]", f"[{match.replace(' ', '')}]")
908
-
909
- new_answer = []
910
- for word in answer.split():
911
- matches = re.findall(pattern, word)
912
- if matches:
913
- for match in matches:
914
- if "," in match:
915
- old_indices = [
916
- int(num.strip()) for num in match.split(",") if num
917
- ]
918
- new_indices = [
919
- renumber_mapping[old_index]
920
- for old_index in old_indices
921
- if old_index in renumber_mapping
922
- ]
923
- if not new_indices:
924
- continue
925
-
926
- word = word.replace(
927
- match, f"{', '.join(map(str, new_indices))}"
928
- )
929
- else:
930
- old_index = int(match.strip("[].,:;?!"))
931
- new_index = renumber_mapping.get(old_index)
932
- if not new_index:
933
- continue
934
-
935
- word = word.replace(str(old_index), str(new_index))
936
- new_answer.append(word)
937
-
938
- # join the words
939
- joined_answer = " ".join(new_answer)
940
- joined_answer += "\nSources:\n"
941
-
942
- new_sources: List[str] = []
943
-
944
- for line in citations.split("\n"):
945
- pattern = r"(?<=\[)\d+"
946
- match = re.search(pattern, line)
971
+ return processed_answer
972
+
973
+ @staticmethod
974
+ def _parse_sources_block(sources_part: str) -> Tuple[Dict[int, str], List[str]]:
975
+ """Parses the sources block from the LLM response.
976
+ Returns a tuple containing:
977
+ - A dictionary of valid sources matching the "[1] ..." format,
978
+ where the key is the source number
979
+ - A list of other source lines that do not match the specified format
980
+ """
981
+ valid_sources: Dict[int, str] = {}
982
+ other_source_lines: List[str] = []
983
+ source_line_pattern = re.compile(r"^\s*\[(\d+)\](.*)")
984
+
985
+ source_lines = sources_part.strip().split("\n")
986
+
987
+ for line in source_lines:
988
+ line = line.strip()
989
+ if not line:
990
+ continue
991
+
992
+ match = source_line_pattern.match(line)
947
993
  if match:
948
- old_index = int(match.group(0))
949
- new_index = renumber_mapping[old_index]
950
- # replace only the first occurrence of the old index
951
- line = line.replace(f"[{old_index}]", f"[{new_index}]", 1)
994
+ num = int(match.group(1))
995
+ valid_sources[num] = line
996
+ else:
997
+ other_source_lines.append(line)
998
+
999
+ return valid_sources, other_source_lines
1000
+
1001
+ @staticmethod
1002
+ def _get_cited_order(
1003
+ answer_part: str, available_sources: Dict[int, str]
1004
+ ) -> List[int]:
1005
+ """Find all unique, valid citations in the answer text in their order
1006
+ # of appearance
1007
+ """
1008
+ cited_order: List[int] = []
1009
+ seen_indices = set()
1010
+
1011
+ for match in _ENTERPRISE_SEARCH_CITATION_PATTERN.finditer(answer_part):
1012
+ content = match.group(1)
1013
+ indices_str = [s.strip() for s in content.split(",")]
1014
+ for index_str in indices_str:
1015
+ if index_str.isdigit():
1016
+ index = int(index_str)
1017
+ if index in available_sources and index not in seen_indices:
1018
+ cited_order.append(index)
1019
+ seen_indices.add(index)
1020
+
1021
+ return cited_order
1022
+
1023
+ @staticmethod
1024
+ def _rewrite_answer_citations(
1025
+ answer_part: str, renumber_map: Dict[int, int]
1026
+ ) -> str:
1027
+ """Rewrites the citations in the answer text based on the renumbering map."""
1028
+
1029
+ def replacer(match: re.Match) -> str:
1030
+ content = match.group(1)
1031
+ old_indices_str = [s.strip() for s in content.split(",")]
1032
+ new_indices = [
1033
+ renumber_map[int(s)]
1034
+ for s in old_indices_str
1035
+ if s.isdigit() and int(s) in renumber_map
1036
+ ]
1037
+ if not new_indices:
1038
+ return ""
1039
+
1040
+ return f"[{', '.join(map(str, sorted(list(set(new_indices)))))}]"
1041
+
1042
+ processed_answer = _ENTERPRISE_SEARCH_CITATION_PATTERN.sub(
1043
+ replacer, answer_part
1044
+ )
1045
+
1046
+ # Clean up formatting after replacements
1047
+ processed_answer = re.sub(r"\s+([,.?])", r"\1", processed_answer)
1048
+ processed_answer = processed_answer.replace("[]", " ")
1049
+ processed_answer = re.sub(r"\s+", " ", processed_answer)
1050
+ processed_answer = processed_answer.strip()
1051
+
1052
+ return processed_answer
1053
+
1054
+ @staticmethod
1055
+ def _build_final_sources_list(
1056
+ cited_order: List[int],
1057
+ renumbering_map: Dict[int, int],
1058
+ valid_sources: Dict[int, str],
1059
+ other_source_lines: List[str],
1060
+ ) -> List[str]:
1061
+ """Builds the final list of sources based on the cited order and
1062
+ renumbering map.
1063
+ """
1064
+ new_sources_list: List[str] = []
1065
+
1066
+ # First, add the sorted, used sources
1067
+ for old_num in cited_order:
1068
+ new_num = renumbering_map[old_num]
1069
+ source_line = valid_sources[old_num]
1070
+ new_sources_list.append(
1071
+ source_line.replace(f"[{old_num}]", f"[{new_num}]", 1)
1072
+ )
952
1073
 
953
- # insert the line into the new_index position
954
- new_sources.insert(new_index - 1, line)
955
- elif line.strip():
956
- new_sources.append(line)
1074
+ # Then, add the unused but validly numbered sources
1075
+ used_source_nums = set(cited_order)
1076
+ # Sort by number to ensure a consistent order for uncited sources
1077
+ for num, line in sorted(valid_sources.items()):
1078
+ if num not in used_source_nums:
1079
+ new_sources_list.append(line)
957
1080
 
958
- joined_sources = "\n".join(new_sources)
1081
+ # Finally, add any other source lines
1082
+ new_sources_list.extend(other_source_lines)
959
1083
 
960
- return joined_answer + joined_sources
1084
+ return new_sources_list
961
1085
 
962
1086
  @classmethod
963
1087
  def _perform_health_checks(
@@ -64,12 +64,6 @@ from rasa.shared.nlu.constants import COMMANDS
64
64
 
65
65
  structlogger = structlog.get_logger()
66
66
 
67
- CANNOT_HANDLE_REASON = (
68
- "A command generator attempted to set a slot "
69
- "with a value extracted by an extractor "
70
- "that is incompatible with the slot mapping type."
71
- )
72
-
73
67
 
74
68
  def contains_command(commands: List[Command], typ: Type[Command]) -> bool:
75
69
  """Check if a list of commands contains a command of a given type.
@@ -588,6 +582,11 @@ def clean_up_slot_command(
588
582
  "command_processor.clean_up_slot_command.skip_command_slot_not_in_domain",
589
583
  command=command,
590
584
  )
585
+ resulting_commands.append(
586
+ CannotHandleCommand(
587
+ reason="The slot predicted by the LLM is not defined in the domain."
588
+ )
589
+ )
591
590
  return resulting_commands
592
591
 
593
592
  if not should_slot_be_set(slot, command, resulting_commands):
@@ -606,7 +605,10 @@ def clean_up_slot_command(
606
605
  for command in resulting_commands
607
606
  )
608
607
 
609
- cannot_handle = CannotHandleCommand(reason=CANNOT_HANDLE_REASON)
608
+ cannot_handle = CannotHandleCommand(
609
+ reason="A command generator attempted to set a slot with a value extracted "
610
+ "by an extractor that is incompatible with the slot mapping type."
611
+ )
610
612
  if not slot_command_exists_already and cannot_handle not in resulting_commands:
611
613
  resulting_commands.append(cannot_handle)
612
614
 
@@ -640,9 +642,9 @@ def clean_up_slot_command(
640
642
  resulting_commands.append(command)
641
643
  return resulting_commands
642
644
 
643
- if (slot := tracker.slots.get(command.name)) is not None and slot.value == str(
644
- command.value
645
- ):
645
+ if (slot := tracker.slots.get(command.name)) is not None and str(
646
+ slot.value
647
+ ) == str(command.value):
646
648
  # the slot is already set, we don't need to set it again
647
649
  structlogger.debug(
648
650
  "command_processor.clean_up_slot_command.skip_command_slot_already_set",
@@ -9,8 +9,8 @@ from rasa.e2e_test.e2e_test_case import ActualStepOutput, TestCase, TestStep, Te
9
9
  from rasa.e2e_test.e2e_test_runner import TEST_TURNS_TYPE, E2ETestRunner
10
10
  from rasa.llm_fine_tuning.conversations import Conversation, ConversationStep
11
11
  from rasa.llm_fine_tuning.storage import StorageContext
12
- from rasa.shared.core.constants import USER
13
- from rasa.shared.core.events import UserUttered
12
+ from rasa.shared.core.constants import BOT, USER
13
+ from rasa.shared.core.events import BotUttered, UserUttered
14
14
  from rasa.shared.core.trackers import DialogueStateTracker
15
15
  from rasa.shared.exceptions import FinetuningDataPreparationException
16
16
  from rasa.shared.nlu.constants import LLM_COMMANDS, LLM_PROMPT
@@ -83,16 +83,18 @@ def generate_conversation(
83
83
  Conversation.
84
84
  """
85
85
  steps = []
86
- tracker_event_indices = [
87
- i for i, event in enumerate(tracker.events) if isinstance(event, UserUttered)
88
- ]
89
-
90
- if len(test_case.steps) != len(tracker_event_indices):
91
- raise FinetuningDataPreparationException(
92
- "Number of test case steps and tracker events do not match."
93
- )
94
86
 
95
87
  if assertions_used:
88
+ tracker_event_indices = [
89
+ i
90
+ for i, event in enumerate(tracker.events)
91
+ if isinstance(event, UserUttered)
92
+ ]
93
+ if len(test_case.steps) != len(tracker_event_indices):
94
+ raise FinetuningDataPreparationException(
95
+ "Number of test case steps and tracker events do not match."
96
+ )
97
+
96
98
  # we only have user steps, extract the bot response from the bot uttered
97
99
  # events of the test turn
98
100
  for i, (original_step, tracker_event_index) in enumerate(
@@ -110,8 +112,30 @@ def generate_conversation(
110
112
  )
111
113
  steps.extend(_create_bot_test_steps(test_turns[i]))
112
114
  else:
115
+ tracker_event_indices = [
116
+ i
117
+ for i, event in enumerate(tracker.events)
118
+ if isinstance(event, UserUttered) or isinstance(event, BotUttered)
119
+ ]
120
+
121
+ # Generally, we expect one or more bot response(s) for each user utterance
122
+ # in the test case, so that we can evaluate the actual bot response.
123
+ # If the test case ends with one or more user utterance(s) instead,
124
+ # we should thus trim those from the test case steps.
125
+ # This only applies to test cases that have at least one bot utterance;
126
+ # otherwise, all test case steps would be removed.
127
+ has_bot_utterance = any(step.actor == BOT for step in test_case.steps)
128
+ i = len(test_case.steps)
129
+ if has_bot_utterance:
130
+ while i > 0 and test_case.steps[i - 1].actor == USER:
131
+ i -= 1
132
+ test_case_steps = test_case.steps[:i]
133
+
134
+ # If the number of test case steps and tracker events differ,
135
+ # using zip ensures we only process pairs that exist in both lists.
136
+ # Prevents index errors and ensures we don't process unmatched steps or events.
113
137
  for i, (original_step, tracker_event_index) in enumerate(
114
- zip(test_case.steps, tracker_event_indices)
138
+ zip(test_case_steps, tracker_event_indices)
115
139
  ):
116
140
  if original_step.actor == USER:
117
141
  previous_turn = _get_previous_actual_step_output(test_turns, i)
@@ -127,6 +151,14 @@ def generate_conversation(
127
151
  else:
128
152
  steps.append(original_step)
129
153
 
154
+ # the tracker should only include events up to the last bot utterance
155
+ # so that the resulting transcript ends with the last bot utterance too
156
+ # only applies to test cases that have at least one bot utterance
157
+ if has_bot_utterance and test_case.steps and test_case.steps[-1].actor == USER:
158
+ event_to_go_to = tracker_event_indices[len(test_case_steps)] - 1
159
+ timestamp = tracker.events[event_to_go_to].timestamp
160
+ tracker = tracker.travel_back_in_time(timestamp)
161
+
130
162
  # Some messages in an e2e test case could be mapped to commands via
131
163
  # 'NLUCommandAdapter', e.g. the message will not be annotated with a prompt and
132
164
  # commands pair. Only convert steps that have a prompt and commands present into a
@@ -181,6 +181,7 @@ class SetSlotExtractor(Enum):
181
181
  # the keys for `State` (USER, PREVIOUS_ACTION, SLOTS, ACTIVE_LOOP)
182
182
  # represent the origin of a `SubState`
183
183
  USER = "user"
184
+ BOT = "bot"
184
185
  SLOTS = "slots"
185
186
 
186
187
  USE_TEXT_FOR_FEATURIZATION = "use_text_for_featurization"
rasa/version.py CHANGED
@@ -1,3 +1,3 @@
1
1
  # this file will automatically be changed,
2
2
  # do not add anything but the version number here!
3
- __version__ = "3.12.20"
3
+ __version__ = "3.12.21"
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.3
2
2
  Name: rasa-pro
3
- Version: 3.12.20
3
+ Version: 3.12.21
4
4
  Summary: State-of-the-art open-core Conversational AI framework for Enterprises that natively leverages generative AI for effortless assistant development.
5
5
  Keywords: nlp,machine-learning,machine-learning-library,bot,bots,botkit,rasa conversational-agents,conversational-ai,chatbot,chatbot-framework,bot-framework
6
6
  Author: Rasa Technologies GmbH
@@ -327,7 +327,7 @@ rasa/core/nlg/translate.py,sha256=ZXRvysqXGdtHBJ7x3YkW6zfmnb9DuEGHCMTL41v-M8M,21
327
327
  rasa/core/persistor.py,sha256=7LCZHAwCM-xrUI38aaJ5dkxJvLdJXWI1TEUKsBo4_EE,21295
328
328
  rasa/core/policies/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
329
329
  rasa/core/policies/ensemble.py,sha256=XoHxU0jcb_io_LBOpjJffylzqtGEB7CH9ivhRyO8pDc,12960
330
- rasa/core/policies/enterprise_search_policy.py,sha256=cb48JXKZq7FBCNc4yE-VhSVNyvMRTiBEQxQNtTru9pQ,37152
330
+ rasa/core/policies/enterprise_search_policy.py,sha256=QKN8mLEjDqVgLb78z3w_IJWanvaSacj4_-BxEUBNBKw,41961
331
331
  rasa/core/policies/enterprise_search_prompt_template.jinja2,sha256=dCS_seyBGxMQoMsOjjvPp0dd31OSzZCJSZeev1FJK5Q,1187
332
332
  rasa/core/policies/enterprise_search_prompt_with_citation_template.jinja2,sha256=va9rpP97dN3PKoJZOVfyuISt3cPBlb10Pqyz25RwO_Q,3294
333
333
  rasa/core/policies/flow_policy.py,sha256=597G62hrLF_CAMCvu-TPRldFnjMP2XEIkhcIaPWcQAc,7489
@@ -436,7 +436,7 @@ rasa/dialogue_understanding/patterns/skip_question.py,sha256=fJ1MC0WEEtS-BpnGJEf
436
436
  rasa/dialogue_understanding/patterns/user_silence.py,sha256=xP-QMnd-MsybH5z4g01hBv4OLOHcw6m3rc26LQfe2zo,1140
437
437
  rasa/dialogue_understanding/patterns/validate_slot.py,sha256=hqd5AEGT3M3HLNhMwuI9W9kZNCvgU6GyI-2xc2b4kz8,2085
438
438
  rasa/dialogue_understanding/processor/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
439
- rasa/dialogue_understanding/processor/command_processor.py,sha256=wxTHxslb2jHGMEQ9CMwEbabhbb5iZPXOvYX_GsCNY1c,30017
439
+ rasa/dialogue_understanding/processor/command_processor.py,sha256=dSakuFOQuYwatYafiFCbr3P_g59BnjPAC1iBb2wfSKk,30169
440
440
  rasa/dialogue_understanding/processor/command_processor_component.py,sha256=rkErI_Uo7s3LsEojUSGSRbWGyGaX7GtGOYSJn0V-TI4,1650
441
441
  rasa/dialogue_understanding/stack/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
442
442
  rasa/dialogue_understanding/stack/dialogue_stack.py,sha256=cYV6aQeh0EuOJHODDqK3biqXozYTX8baPgLwHhPxFqs,5244
@@ -534,7 +534,7 @@ rasa/hooks.py,sha256=5ZMrqNz323w56MMY6E8jeZ_YXgRqq8p-yi18S2XOmbo,4061
534
534
  rasa/jupyter.py,sha256=TCYVD4QPQIMmfA6ZwDUBOBTAECwCwbU2XOkosodLO9k,1782
535
535
  rasa/keys,sha256=2Stg1fstgJ203cOoW1B2gGMY29fhEnjIfTVxKv_fqPo,101
536
536
  rasa/llm_fine_tuning/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
537
- rasa/llm_fine_tuning/annotation_module.py,sha256=PKYbYtgGQ0tm0PnLANzC3FDTjeRkLH_AJi4RUAkgx4Q,9533
537
+ rasa/llm_fine_tuning/annotation_module.py,sha256=7vKwesRLvtKQAt9etHIT51HN8D21dSR3smNY7aIbGx4,11267
538
538
  rasa/llm_fine_tuning/conversations.py,sha256=qzoTFQiwADmzL9mocqML4a-nAgEu6hlOSE3K87LvhM0,4272
539
539
  rasa/llm_fine_tuning/llm_data_preparation_module.py,sha256=Vh6HHDvH1ueaNgBWnzIA7ymcTwHpqVvKxIPAnMKZtyY,7153
540
540
  rasa/llm_fine_tuning/paraphrasing/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
@@ -628,7 +628,7 @@ rasa/shared/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
628
628
  rasa/shared/constants.py,sha256=u9GnSSQYRjYN_mjd7XHMGgoVc6ipoiZQuLt3bFOF0O0,12264
629
629
  rasa/shared/core/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
630
630
  rasa/shared/core/command_payload_reader.py,sha256=puHYsp9xbX0YQm2L1NDBItOFmdzI7AzmfGefgcHiCc0,3871
631
- rasa/shared/core/constants.py,sha256=gwIZHjQYafHnBlMe9_jUiIPm17hxYG9R1MOCtxeC1Ns,6337
631
+ rasa/shared/core/constants.py,sha256=kirMr9_Ls18szCCo3U80fx6AAkLu73tu2OGaLlu4U9s,6349
632
632
  rasa/shared/core/conversation.py,sha256=0nUhcbQkPDnO3_Rig7oiinrWmPy5fsVQs_U6Fx1hG5c,1384
633
633
  rasa/shared/core/domain.py,sha256=piJu4Kr2exC9ehC3e2oNaxPxXkeIhOYoQJQQOuzMw18,81638
634
634
  rasa/shared/core/events.py,sha256=kTUWSpDepj3kpjjXveYXz3h2XcIQV3Sq8h7MTbx5fMw,86489
@@ -822,9 +822,9 @@ rasa/utils/train_utils.py,sha256=ClJx-6x3-h3Vt6mskacgkcCUJTMXjFPe3zAcy_DfmaU,212
822
822
  rasa/utils/url_tools.py,sha256=dZ1HGkVdWTJB7zYEdwoDIrEuyX9HE5WsxKKFVsXBLE0,1218
823
823
  rasa/utils/yaml.py,sha256=KjbZq5C94ZP7Jdsw8bYYF7HASI6K4-C_kdHfrnPLpSI,2000
824
824
  rasa/validator.py,sha256=524VlFTYK0B3iXYveVD6BDC3K0j1QfpzJ9O-TAWczmc,83166
825
- rasa/version.py,sha256=Rxqvg0GxwQ7GJ1Bk3AZXfIXCMz_pbaBjJE_a9rih1Y8,118
826
- rasa_pro-3.12.20.dist-info/METADATA,sha256=pedBTuBkPO6rXzgZSv3EDHKuYjfVOpNqQhKORJ2MbZg,10609
827
- rasa_pro-3.12.20.dist-info/NOTICE,sha256=7HlBoMHJY9CL2GlYSfTQ-PZsVmLmVkYmMiPlTjhuCqA,218
828
- rasa_pro-3.12.20.dist-info/WHEEL,sha256=fGIA9gx4Qxk2KDKeNJCbOEwSrmLtjWCwzBz351GyrPQ,88
829
- rasa_pro-3.12.20.dist-info/entry_points.txt,sha256=ckJ2SfEyTPgBqj_I6vm_tqY9dZF_LAPJZA335Xp0Q9U,43
830
- rasa_pro-3.12.20.dist-info/RECORD,,
825
+ rasa/version.py,sha256=qwNQwF1ZW-TW5WmAk9DusZovcswu5IOz6HUAl37Qbfk,118
826
+ rasa_pro-3.12.21.dist-info/METADATA,sha256=CPpfxe6f-18tMn3193QUqhGe6v_u9QFSW58VENd40hU,10609
827
+ rasa_pro-3.12.21.dist-info/NOTICE,sha256=7HlBoMHJY9CL2GlYSfTQ-PZsVmLmVkYmMiPlTjhuCqA,218
828
+ rasa_pro-3.12.21.dist-info/WHEEL,sha256=fGIA9gx4Qxk2KDKeNJCbOEwSrmLtjWCwzBz351GyrPQ,88
829
+ rasa_pro-3.12.21.dist-info/entry_points.txt,sha256=ckJ2SfEyTPgBqj_I6vm_tqY9dZF_LAPJZA335Xp0Q9U,43
830
+ rasa_pro-3.12.21.dist-info/RECORD,,