rasa-pro 3.12.0.dev7__py3-none-any.whl → 3.12.0.dev9__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of rasa-pro might be problematic. Click here for more details.

Files changed (27) hide show
  1. rasa/dialogue_understanding/commands/can_not_handle_command.py +2 -2
  2. rasa/dialogue_understanding/commands/cancel_flow_command.py +2 -2
  3. rasa/dialogue_understanding/commands/change_flow_command.py +2 -2
  4. rasa/dialogue_understanding/commands/chit_chat_answer_command.py +2 -2
  5. rasa/dialogue_understanding/commands/clarify_command.py +2 -2
  6. rasa/dialogue_understanding/commands/human_handoff_command.py +2 -2
  7. rasa/dialogue_understanding/commands/knowledge_answer_command.py +2 -2
  8. rasa/dialogue_understanding/commands/repeat_bot_messages_command.py +2 -2
  9. rasa/dialogue_understanding/commands/set_slot_command.py +2 -2
  10. rasa/dialogue_understanding/commands/skip_question_command.py +2 -2
  11. rasa/dialogue_understanding/commands/start_flow_command.py +2 -2
  12. rasa/dialogue_understanding/commands/utils.py +1 -1
  13. rasa/dialogue_understanding/generator/command_parser.py +1 -1
  14. rasa/dialogue_understanding/generator/llm_based_command_generator.py +2 -1
  15. rasa/dialogue_understanding/generator/single_step/command_prompt_template.jinja2 +39 -41
  16. rasa/dialogue_understanding_test/du_test_case.py +88 -12
  17. rasa/dialogue_understanding_test/du_test_result.py +96 -19
  18. rasa/dialogue_understanding_test/io.py +73 -16
  19. rasa/shared/providers/llm/_base_litellm_client.py +40 -0
  20. rasa/shared/utils/llm.py +86 -1
  21. rasa/tracing/instrumentation/attribute_extractors.py +10 -0
  22. rasa/version.py +1 -1
  23. {rasa_pro-3.12.0.dev7.dist-info → rasa_pro-3.12.0.dev9.dist-info}/METADATA +1 -1
  24. {rasa_pro-3.12.0.dev7.dist-info → rasa_pro-3.12.0.dev9.dist-info}/RECORD +27 -27
  25. {rasa_pro-3.12.0.dev7.dist-info → rasa_pro-3.12.0.dev9.dist-info}/NOTICE +0 -0
  26. {rasa_pro-3.12.0.dev7.dist-info → rasa_pro-3.12.0.dev9.dist-info}/WHEEL +0 -0
  27. {rasa_pro-3.12.0.dev7.dist-info → rasa_pro-3.12.0.dev9.dist-info}/entry_points.txt +0 -0
@@ -74,7 +74,7 @@ class CannotHandleCommand(Command):
74
74
 
75
75
  def to_dsl(self) -> str:
76
76
  """Converts the command to a DSL string."""
77
- return "CannotHandle()"
77
+ return "cannot handle"
78
78
 
79
79
  @classmethod
80
80
  def from_dsl(cls, match: re.Match, **kwargs: Any) -> CannotHandleCommand:
@@ -86,4 +86,4 @@ class CannotHandleCommand(Command):
86
86
 
87
87
  @staticmethod
88
88
  def regex_pattern() -> str:
89
- return r"CannotHandle\(\)"
89
+ return r"^cannot handle$"
@@ -124,7 +124,7 @@ class CancelFlowCommand(Command):
124
124
 
125
125
  def to_dsl(self) -> str:
126
126
  """Converts the command to a DSL string."""
127
- return "CancelFlow()"
127
+ return "cancel"
128
128
 
129
129
  @classmethod
130
130
  def from_dsl(cls, match: re.Match, **kwargs: Any) -> CancelFlowCommand:
@@ -133,4 +133,4 @@ class CancelFlowCommand(Command):
133
133
 
134
134
  @staticmethod
135
135
  def regex_pattern() -> str:
136
- return r"CancelFlow\(\)"
136
+ return r"^cancel$"
@@ -48,7 +48,7 @@ class ChangeFlowCommand(Command):
48
48
 
49
49
  def to_dsl(self) -> str:
50
50
  """Converts the command to a DSL string."""
51
- return "ChangeFlow()"
51
+ return "change"
52
52
 
53
53
  @staticmethod
54
54
  def from_dsl(match: re.Match, **kwargs: Any) -> ChangeFlowCommand:
@@ -57,4 +57,4 @@ class ChangeFlowCommand(Command):
57
57
 
58
58
  @staticmethod
59
59
  def regex_pattern() -> str:
60
- return r"ChangeFlow\(\)"
60
+ return r"^change"
@@ -59,7 +59,7 @@ class ChitChatAnswerCommand(FreeFormAnswerCommand):
59
59
 
60
60
  def to_dsl(self) -> str:
61
61
  """Converts the command to a DSL string."""
62
- return "ChitChat()"
62
+ return "chat"
63
63
 
64
64
  @classmethod
65
65
  def from_dsl(cls, match: re.Match, **kwargs: Any) -> ChitChatAnswerCommand:
@@ -68,4 +68,4 @@ class ChitChatAnswerCommand(FreeFormAnswerCommand):
68
68
 
69
69
  @staticmethod
70
70
  def regex_pattern() -> str:
71
- return r"ChitChat\(\)"
71
+ return r"^chat$"
@@ -89,7 +89,7 @@ class ClarifyCommand(Command):
89
89
 
90
90
  def to_dsl(self) -> str:
91
91
  """Converts the command to a DSL string."""
92
- return f"Clarify({', '.join(self.options)})"
92
+ return f"clarify {' '.join(self.options)}"
93
93
 
94
94
  @classmethod
95
95
  def from_dsl(cls, match: re.Match, **kwargs: Any) -> Optional[ClarifyCommand]:
@@ -99,4 +99,4 @@ class ClarifyCommand(Command):
99
99
 
100
100
  @staticmethod
101
101
  def regex_pattern() -> str:
102
- return r"Clarify\(([\"\'a-zA-Z0-9_, ]*)\)"
102
+ return r"^clarify([\"\'a-zA-Z0-9_, ]*)$"
@@ -66,7 +66,7 @@ class HumanHandoffCommand(Command):
66
66
 
67
67
  def to_dsl(self) -> str:
68
68
  """Converts the command to a DSL string."""
69
- return "HumanHandoff()"
69
+ return "hand over"
70
70
 
71
71
  @classmethod
72
72
  def from_dsl(cls, match: re.Match, **kwargs: Any) -> HumanHandoffCommand:
@@ -75,4 +75,4 @@ class HumanHandoffCommand(Command):
75
75
 
76
76
  @staticmethod
77
77
  def regex_pattern() -> str:
78
- return r"HumanHandoff\(\)"
78
+ return r"^hand over$"
@@ -59,7 +59,7 @@ class KnowledgeAnswerCommand(FreeFormAnswerCommand):
59
59
 
60
60
  def to_dsl(self) -> str:
61
61
  """Converts the command to a DSL string."""
62
- return "SearchAndReply()"
62
+ return "search"
63
63
 
64
64
  @classmethod
65
65
  def from_dsl(cls, match: re.Match, **kwargs: Any) -> KnowledgeAnswerCommand:
@@ -68,4 +68,4 @@ class KnowledgeAnswerCommand(FreeFormAnswerCommand):
68
68
 
69
69
  @staticmethod
70
70
  def regex_pattern() -> str:
71
- return r"SearchAndReply\(\)"
71
+ return r"^search$"
@@ -60,7 +60,7 @@ class RepeatBotMessagesCommand(Command):
60
60
 
61
61
  def to_dsl(self) -> str:
62
62
  """Converts the command to a DSL string."""
63
- return "RepeatLastBotMessages()"
63
+ return "repeat message"
64
64
 
65
65
  @classmethod
66
66
  def from_dsl(cls, match: re.Match, **kwargs: Any) -> RepeatBotMessagesCommand:
@@ -69,4 +69,4 @@ class RepeatBotMessagesCommand(Command):
69
69
 
70
70
  @staticmethod
71
71
  def regex_pattern() -> str:
72
- return r"RepeatLastBotMessages\(\)"
72
+ return r"^repeat message$"
@@ -170,7 +170,7 @@ class SetSlotCommand(Command):
170
170
 
171
171
  def to_dsl(self) -> str:
172
172
  """Converts the command to a DSL string."""
173
- return f"SetSlot({self.name}, {self.value})"
173
+ return f"set {self.name} {self.value}"
174
174
 
175
175
  @classmethod
176
176
  def from_dsl(cls, match: re.Match, **kwargs: Any) -> SetSlotCommand:
@@ -181,4 +181,4 @@ class SetSlotCommand(Command):
181
181
 
182
182
  @staticmethod
183
183
  def regex_pattern() -> str:
184
- return r"""SetSlot\(['"]?([a-zA-Z_][a-zA-Z0-9_-]*)['"]?, ?['"]?(.*)['"]?\)"""
184
+ return r"""^set ['"]?([a-zA-Z_][a-zA-Z0-9_-]*)['"]? ['"]?(.+?)['"]?$"""
@@ -75,7 +75,7 @@ class SkipQuestionCommand(Command):
75
75
 
76
76
  def to_dsl(self) -> str:
77
77
  """Converts the command to a DSL string."""
78
- return "SkipQuestion()"
78
+ return "skip"
79
79
 
80
80
  @classmethod
81
81
  def from_dsl(cls, match: re.Match, **kwargs: Any) -> SkipQuestionCommand:
@@ -84,4 +84,4 @@ class SkipQuestionCommand(Command):
84
84
 
85
85
  @staticmethod
86
86
  def regex_pattern() -> str:
87
- return r"SkipQuestion\(\)"
87
+ return r"^skip$"
@@ -110,7 +110,7 @@ class StartFlowCommand(Command):
110
110
 
111
111
  def to_dsl(self) -> str:
112
112
  """Converts the command to a DSL string."""
113
- return f"StartFlow({self.flow})"
113
+ return f"start {self.flow}"
114
114
 
115
115
  @classmethod
116
116
  def from_dsl(cls, match: re.Match, **kwargs: Any) -> Optional[StartFlowCommand]:
@@ -119,4 +119,4 @@ class StartFlowCommand(Command):
119
119
 
120
120
  @staticmethod
121
121
  def regex_pattern() -> str:
122
- return r"StartFlow\(['\"]?([a-zA-Z0-9_-]+)['\"]?\)"
122
+ return r"^start ['\"]?([a-zA-Z0-9_-]+)['\"]?$"
@@ -27,7 +27,7 @@ def extract_cleaned_options(options_str: str) -> List[str]:
27
27
  """Extract and clean options from a string."""
28
28
  return sorted(
29
29
  opt.strip().strip('"').strip("'")
30
- for opt in options_str.split(",")
30
+ for opt in options_str.split(" ")
31
31
  if opt.strip()
32
32
  )
33
33
 
@@ -125,7 +125,7 @@ def _parse_standard_commands(
125
125
  commands: List[Command] = []
126
126
  for command_clz in standard_commands:
127
127
  pattern = _get_compiled_pattern(command_clz.regex_pattern())
128
- if match := pattern.search(action):
128
+ if match := pattern.search(action.strip()):
129
129
  parsed_command = command_clz.from_dsl(match, **kwargs)
130
130
  if _additional_parsing_fn := _get_additional_parsing_logic(command_clz):
131
131
  parsed_command = _additional_parsing_fn(parsed_command, flows, **kwargs)
@@ -357,7 +357,8 @@ class LLMBasedCommandGenerator(
357
357
  "slots": slots_with_info,
358
358
  }
359
359
  )
360
- return result
360
+
361
+ return sorted(result, key=lambda x: x["name"])
361
362
 
362
363
  @staticmethod
363
364
  def is_extractable(
@@ -1,60 +1,58 @@
1
1
  Your task is to analyze the current conversation context and generate a list of actions to start new business processes that we call flows, to extract slots, or respond to small talk and knowledge requests.
2
2
 
3
- These are the flows that can be started, with their description and slots:
3
+
4
+ ## Available Actions:
5
+ * Starting a flow, described by "start flow_name". For example, "start transfer_money" or "start list_contacts"
6
+ * Slot setting, described by "set slot_name slot_value". For example, "set transfer_money_recipient Freddy". Can be used to correct and change previously set values.
7
+ * Cancelling the current flow, described by "cancel"
8
+ * Clarifying which flow should be started in ambiguous cases. For example, "clarify list_contacts add_contact remove_contact" if the user just wrote "contacts" and there are multiple potential candidates.
9
+ * Skipping the current question when the user explicitly asks for it, described by "skip".
10
+ * Responding to knowledge-oriented user messages, described by "search"
11
+ * Responding to a casual, non-task-oriented user message, described by "chat".
12
+ * Handing over to a human, in case the user seems frustrated or explicitly asks to speak to one, described by "hand over".
13
+
14
+
15
+ ## General Tips
16
+ * Do not fill slots with abstract values or placeholders.
17
+ * Only use information provided by the user.
18
+ * Use clarification in ambiguous cases.
19
+ * Multiple flows can be started. If a user wants to digress into a second flow, you do not need to cancel the current flow.
20
+ * Strictly adhere to the provided action format.
21
+ * For categorical slots try to match the user message with potential slot values. Use "other" if you cannot match it
22
+ * Focus on the last message and take it one step at a time.
23
+ * Use the previous conversation steps only to aid understanding.
24
+
25
+
26
+ ## Available Flows:
4
27
  {% for flow in available_flows %}
5
- {{ flow.name }}: {{ flow.description }}
28
+ * {{ flow.name }}: {{ flow.description }}
6
29
  {% for slot in flow.slots -%}
7
- slot: {{ slot.name }}{% if slot.description %} ({{ slot.description }}){% endif %}{% if slot.allowed_values %}, allowed values: {{ slot.allowed_values }}{% endif %}
30
+ * {{ slot.name }}{% if slot.description %} ({{ slot.description }}){% endif %}{% if slot.allowed_values %}, allowed values: {{ slot.allowed_values }}{% endif %}
8
31
  {% endfor %}
9
32
  {%- endfor %}
10
33
 
11
- ===
12
- Here is what happened previously in the conversation:
13
- {{ current_conversation }}
14
34
 
15
- ===
35
+ ## Current State
16
36
  {% if current_flow != None %}
17
37
  You are currently in the flow "{{ current_flow }}".
18
38
  You have just asked the user for the slot "{{ current_slot }}"{% if current_slot_description %} ({{ current_slot_description }}){% endif %}.
19
39
 
20
40
  {% if flow_slots|length > 0 %}
21
- Here are the slots of the currently active flow:
41
+ Here are the slots of the flow "{{ current_flow }}":
22
42
  {% for slot in flow_slots -%}
23
- - name: {{ slot.name }}, value: {{ slot.value }}, type: {{ slot.type }}, description: {{ slot.description}}{% if slot.allowed_values %}, allowed values: {{ slot.allowed_values }}{% endif %}
43
+ * name: {{ slot.name }}, value: {{ slot.value }}, type: {{ slot.type }}, description: {{ slot.description}}{% if slot.allowed_values %}, allowed values: {{ slot.allowed_values }}{% endif %}
24
44
  {% endfor %}
25
45
  {% endif %}
26
46
  {% else %}
27
- You are currently not in any flow and so there are no active slots.
28
- This means you can only set a slot if you first start a flow that requires that slot.
29
- {% endif %}
30
- If you start a flow, first start the flow and then optionally fill that flow's slots with information the user provided in their message.
31
-
32
- The user just said """{{ user_message }}""".
33
-
34
- ===
35
- Based on this information generate a list of actions you want to take. Your job is to start flows and to fill slots where appropriate. Any logic of what happens afterwards is handled by the flow engine. These are your available actions:
36
- * Slot setting, described by "SetSlot(slot_name, slot_value)". An example would be "SetSlot(recipient, Freddy)"
37
- * Starting another flow, described by "StartFlow(flow_name)". An example would be "StartFlow(transfer_money)"
38
- * Cancelling the current flow, described by "CancelFlow()"
39
- * Clarifying which flow should be started. An example would be Clarify(list_contacts, add_contact, remove_contact) if the user just wrote "contacts" and there are multiple potential candidates. It also works with a single flow name to confirm you understood correctly, as in Clarify(transfer_money).
40
- * Intercepting and handle user messages with the intent to bypass the current step in the flow, described by "SkipQuestion()". Examples of user skip phrases are: "Go to the next question", "Ask me something else".
41
- * Responding to knowledge-oriented user messages, described by "SearchAndReply()"
42
- * Responding to a casual, non-task-oriented user message, described by "ChitChat()".
43
- * Handing off to a human, in case the user seems frustrated or explicitly asks to speak to one, described by "HumanHandoff()".
44
- {% if is_repeat_command_enabled %}
45
- * Repeat the last bot messages, described by "RepeatLastBotMessages()". This is useful when the user asks to repeat the last bot messages.
47
+ You are currently not inside any flow.
46
48
  {% endif %}
47
49
 
48
- ===
49
- Write out the actions you want to take, one per line, in the order they should take place.
50
- Do not fill slots with abstract values or placeholders.
51
- Only use information provided by the user.
52
- Only start a flow if it's completely clear what the user wants. Imagine you were a person reading this message. If it's not 100% clear, clarify the next step.
53
- Don't be overly confident. Take a conservative approach and clarify before proceeding.
54
- If the user asks for two things which seem contradictory, clarify before starting a flow.
55
- If it's not clear whether the user wants to skip the step or to cancel the flow, cancel the flow.
56
- Strictly adhere to the provided action types listed above.
57
- Focus on the last message and take it one step at a time.
58
- Use the previous conversation steps only to aid understanding.
59
-
60
- Your action list:
50
+
51
+ ## Conversation History
52
+ {{ current_conversation }}
53
+
54
+
55
+ ## Task
56
+ Create an action list with one action per line in response to the users last message: """{{ user_message }}""".
57
+
58
+ Your action list:
@@ -1,6 +1,6 @@
1
1
  from typing import Any, Dict, Iterator, List, Optional, Tuple
2
2
 
3
- from pydantic import BaseModel, ConfigDict, Field
3
+ from pydantic import BaseModel, Field
4
4
 
5
5
  from rasa.dialogue_understanding.commands import Command
6
6
  from rasa.dialogue_understanding.generator.command_parser import parse_commands
@@ -20,10 +20,17 @@ from rasa.dialogue_understanding_test.constants import (
20
20
  from rasa.shared.core.flows import FlowsList
21
21
  from rasa.shared.nlu.constants import (
22
22
  KEY_COMPONENT_NAME,
23
+ KEY_LATENCY,
24
+ KEY_LLM_RESPONSE_METADATA,
23
25
  KEY_PROMPT_NAME,
26
+ KEY_SYSTEM_PROMPT,
24
27
  KEY_USER_PROMPT,
25
28
  )
26
29
 
30
+ KEY_USAGE = "usage"
31
+ KEY_PROMPT_TOKENS = "prompt_tokens"
32
+ KEY_COMPLETION_TOKENS = "completion_tokens"
33
+
27
34
 
28
35
  class DialogueUnderstandingOutput(BaseModel):
29
36
  """Output containing prompts and generated commands by component.
@@ -62,8 +69,6 @@ class DialogueUnderstandingOutput(BaseModel):
62
69
  # List of prompts
63
70
  prompts: Optional[List[Dict[str, Any]]] = None
64
71
 
65
- model_config = ConfigDict(frozen=True)
66
-
67
72
  def get_predicted_commands(self) -> List[Command]:
68
73
  """Get all commands from the output."""
69
74
  return [
@@ -82,15 +87,31 @@ class DialogueUnderstandingOutput(BaseModel):
82
87
  if predicted_commands
83
88
  ]
84
89
 
85
- def get_component_name_to_user_prompts(self) -> Dict[str, List[Tuple[str, str]]]:
86
- """Return a dictionary of component names to a list of prompts.
90
+ def get_component_name_to_prompt_info(self) -> Dict[str, List[Dict[str, Any]]]:
91
+ """Return a dictionary of component names to prompt information.
87
92
 
88
- The prompts are represented as tuples of (prompt_name, user_prompt).
93
+ The prompt information includes the prompt name, user prompt, system prompt,
94
+ latency, and usage information.
95
+ The return dict is of the form:
96
+ {
97
+ "component_name": [
98
+ {
99
+ "prompt_name": "...",
100
+ "user_prompt": "...",
101
+ "system_prompt": "...",
102
+ "latency": 0.1,
103
+ "prompt_tokens": 10,
104
+ "completion_tokens": 20
105
+ },
106
+ ...
107
+ ],
108
+ ...
109
+ }
89
110
  """
90
111
  if self.prompts is None:
91
112
  return {}
92
113
 
93
- data: Dict[str, List[Tuple[str, str]]] = {}
114
+ data: Dict[str, List[Dict[str, Any]]] = {}
94
115
  relevant_component_names = self.get_component_names_that_predicted_commands()
95
116
 
96
117
  for prompt_data in self.prompts:
@@ -99,12 +120,31 @@ class DialogueUnderstandingOutput(BaseModel):
99
120
  if component_name not in relevant_component_names:
100
121
  continue
101
122
 
102
- prompt_name = prompt_data[KEY_PROMPT_NAME]
103
- user_prompt = prompt_data[KEY_USER_PROMPT]
104
-
105
123
  if component_name not in data:
106
124
  data[component_name] = []
107
- data[component_name].append((prompt_name, user_prompt))
125
+
126
+ prompt_info = {
127
+ KEY_PROMPT_NAME: prompt_data[KEY_PROMPT_NAME],
128
+ KEY_USER_PROMPT: prompt_data[KEY_USER_PROMPT],
129
+ }
130
+
131
+ latency = prompt_data.get(KEY_LLM_RESPONSE_METADATA, {}).get(KEY_LATENCY)
132
+ if latency:
133
+ prompt_info[KEY_LATENCY] = latency
134
+
135
+ if prompt_data.get(KEY_SYSTEM_PROMPT):
136
+ prompt_info[KEY_SYSTEM_PROMPT] = prompt_data[KEY_SYSTEM_PROMPT]
137
+
138
+ usage_object = prompt_data.get(KEY_LLM_RESPONSE_METADATA, {}).get(KEY_USAGE)
139
+ if usage_object:
140
+ if usage_object.get(KEY_PROMPT_TOKENS):
141
+ prompt_info[KEY_PROMPT_TOKENS] = usage_object.get(KEY_PROMPT_TOKENS)
142
+ if usage_object.get(KEY_COMPLETION_TOKENS):
143
+ prompt_info[KEY_COMPLETION_TOKENS] = usage_object.get(
144
+ KEY_COMPLETION_TOKENS
145
+ )
146
+
147
+ data[component_name].append(prompt_info)
108
148
 
109
149
  return data
110
150
 
@@ -149,7 +189,7 @@ class DialogueUnderstandingTestStep(BaseModel):
149
189
  Args:
150
190
  step: Dictionary containing the step.
151
191
  flows: List of flows.
152
- custom_commands: Custom commands to use in the test case.
192
+ custom_command_classes: Custom commands to use in the test case.
153
193
  remove_default_commands: Default commands to remove from the test case.
154
194
 
155
195
  Returns:
@@ -210,6 +250,42 @@ class DialogueUnderstandingTestStep(BaseModel):
210
250
 
211
251
  return ""
212
252
 
253
+ def get_latencies(self) -> List[float]:
254
+ if self.dialogue_understanding_output is None:
255
+ return []
256
+
257
+ prompts = self.dialogue_understanding_output.get_component_name_to_prompt_info()
258
+
259
+ return [
260
+ prompt_data.get(KEY_LATENCY, 0.0)
261
+ for prompt in prompts.values()
262
+ for prompt_data in prompt
263
+ ]
264
+
265
+ def get_completion_tokens(self) -> List[int]:
266
+ if self.dialogue_understanding_output is None:
267
+ return []
268
+
269
+ prompts = self.dialogue_understanding_output.get_component_name_to_prompt_info()
270
+
271
+ return [
272
+ prompt_data.get(KEY_COMPLETION_TOKENS, 0)
273
+ for prompt in prompts.values()
274
+ for prompt_data in prompt
275
+ ]
276
+
277
+ def get_prompt_tokens(self) -> List[int]:
278
+ if self.dialogue_understanding_output is None:
279
+ return []
280
+
281
+ prompts = self.dialogue_understanding_output.get_component_name_to_prompt_info()
282
+
283
+ return [
284
+ prompt_data.get(KEY_PROMPT_TOKENS, 0)
285
+ for prompt in prompts.values()
286
+ for prompt_data in prompt
287
+ ]
288
+
213
289
 
214
290
  class DialogueUnderstandingTestCase(BaseModel):
215
291
  name: str
@@ -1,6 +1,8 @@
1
+ import copy
1
2
  import typing
2
3
  from typing import Any, Dict, List, Optional, Text
3
4
 
5
+ import numpy as np
4
6
  from pydantic import BaseModel
5
7
 
6
8
  from rasa.dialogue_understanding.commands import Command
@@ -9,6 +11,10 @@ from rasa.dialogue_understanding_test.du_test_case import (
9
11
  DialogueUnderstandingTestStep,
10
12
  )
11
13
  from rasa.dialogue_understanding_test.utils import get_command_comparison
14
+ from rasa.shared.nlu.constants import (
15
+ KEY_SYSTEM_PROMPT,
16
+ KEY_USER_PROMPT,
17
+ )
12
18
 
13
19
  if typing.TYPE_CHECKING:
14
20
  from rasa.dialogue_understanding_test.command_metric_calculation import (
@@ -25,6 +31,9 @@ OUTPUT_USER_UTTERANCES_ACCURACY = "user_utterances_accuracy"
25
31
  OUTPUT_NUMBER_OF_PASSED_USER_UTTERANCES = "number_of_passed_user_utterances"
26
32
  OUTPUT_NUMBER_OF_FAILED_USER_UTTERANCES = "number_of_failed_user_utterances"
27
33
  OUTPUT_COMMAND_METRICS = "command_metrics"
34
+ OUTPUT_LATENCY_METRICS = "latency"
35
+ OUTPUT_COMPLETION_TOKEN_METRICS = "completion_token"
36
+ OUTPUT_PROMPT_TOKEN_METRICS = "prompt_token"
28
37
  OUTPUT_NAMES_OF_FAILED_TESTS = "names_of_failed_tests"
29
38
  OUTPUT_NAMES_OF_PASSED_TESTS = "names_of_passed_tests"
30
39
  OUTPUT_LLM_COMMAND_GENERATOR_CONFIG = "llm_command_generator_config"
@@ -50,7 +59,7 @@ class FailedTestStep(BaseModel):
50
59
  error_line: int
51
60
  pass_status: bool
52
61
  command_generators: List[str]
53
- prompt: Optional[Dict[str, Any]] = None
62
+ prompts: Optional[Dict[str, List[Dict[str, Any]]]] = None
54
63
  expected_commands: List[Command]
55
64
  predicted_commands: Dict[str, List[Command]]
56
65
  conversation_with_diff: List[str]
@@ -66,14 +75,14 @@ class FailedTestStep(BaseModel):
66
75
  line_number = step.line or -1
67
76
 
68
77
  predicted_commands: Dict[str, List[Command]] = {}
69
- prompts: Optional[Dict[str, Any]] = None
78
+ prompts: Optional[Dict[str, List[Dict[str, Any]]]] = None
70
79
  command_generators: List[str] = []
71
80
 
72
81
  if step.dialogue_understanding_output:
73
82
  predicted_commands = step.dialogue_understanding_output.commands
74
83
  command_generators = step.dialogue_understanding_output.get_component_names_that_predicted_commands() # noqa: E501
75
84
  prompts = (
76
- step.dialogue_understanding_output.get_component_name_to_user_prompts()
85
+ step.dialogue_understanding_output.get_component_name_to_prompt_info()
77
86
  )
78
87
 
79
88
  step_index = test_case.steps.index(step)
@@ -89,13 +98,13 @@ class FailedTestStep(BaseModel):
89
98
  error_line=line_number,
90
99
  pass_status=False,
91
100
  command_generators=command_generators,
92
- prompt=prompts,
101
+ prompts=prompts,
93
102
  expected_commands=step.commands or [],
94
103
  predicted_commands=predicted_commands,
95
104
  conversation_with_diff=conversation_with_diff,
96
105
  )
97
106
 
98
- def to_dict(self, output_prompt: bool) -> Dict[Text, Any]:
107
+ def to_dict(self, output_prompt: bool) -> Dict[str, Any]:
99
108
  step_info = {
100
109
  "file": self.file,
101
110
  "test_case": self.test_case_name,
@@ -115,19 +124,17 @@ class FailedTestStep(BaseModel):
115
124
  ],
116
125
  }
117
126
 
118
- if output_prompt and self.prompt:
119
- step_info["prompts"] = [
120
- {
121
- component: [
122
- {
123
- "prompt_name": prompt_name,
124
- "prompt_content": prompt_content,
125
- }
126
- for prompt_name, prompt_content in prompts
127
- ],
128
- }
129
- for component, prompts in self.prompt.items()
130
- ]
127
+ if output_prompt and self.prompts:
128
+ step_info["prompts"] = copy.deepcopy(self.prompts)
129
+ elif self.prompts:
130
+ prompts = copy.deepcopy(self.prompts)
131
+ # remove user and system prompts
132
+ for prompt_data in prompts.values():
133
+ for prompt_info in prompt_data:
134
+ prompt_info.pop(KEY_USER_PROMPT, None)
135
+ prompt_info.pop(KEY_SYSTEM_PROMPT, None)
136
+
137
+ step_info["prompts"] = prompts
131
138
 
132
139
  return step_info
133
140
 
@@ -153,6 +160,9 @@ class DialogueUnderstandingTestSuiteResult:
153
160
  self.names_of_passed_tests: List[str] = []
154
161
  self.failed_test_steps: List[FailedTestStep] = []
155
162
  self.llm_config: Optional[Dict[str, Any]] = None
163
+ self.latency_metrics: Dict[str, float] = {}
164
+ self.prompt_token_metrics: Dict[str, float] = {}
165
+ self.completion_token_metrics: Dict[str, float] = {}
156
166
 
157
167
  @classmethod
158
168
  def from_results(
@@ -206,6 +216,16 @@ class DialogueUnderstandingTestSuiteResult:
206
216
  failing_test_results
207
217
  )
208
218
 
219
+ instance.latency_metrics = cls.get_latency_metrics(
220
+ failing_test_results, passing_test_results
221
+ )
222
+ instance.prompt_token_metrics = cls.get_prompt_token_metrics(
223
+ failing_test_results, passing_test_results
224
+ )
225
+ instance.completion_token_metrics = cls.get_completion_token_metrics(
226
+ failing_test_results, passing_test_results
227
+ )
228
+
209
229
  instance.llm_config = llm_config
210
230
 
211
231
  return instance
@@ -264,7 +284,60 @@ class DialogueUnderstandingTestSuiteResult:
264
284
 
265
285
  return failed_test_steps
266
286
 
267
- def to_dict(self, output_prompt: bool = False) -> Dict[Text, Any]:
287
+ @staticmethod
288
+ def _calculate_percentiles(values: List[float]) -> Dict[str, float]:
289
+ return {
290
+ "p50": float(np.percentile(values, 50)) if values else 0.0,
291
+ "p90": float(np.percentile(values, 90)) if values else 0.0,
292
+ "p99": float(np.percentile(values, 99)) if values else 0.0,
293
+ }
294
+
295
+ @classmethod
296
+ def get_latency_metrics(
297
+ cls,
298
+ failing_test_results: List["DialogueUnderstandingTestResult"],
299
+ passing_test_results: List["DialogueUnderstandingTestResult"],
300
+ ) -> Dict[str, float]:
301
+ latencies = [
302
+ latency
303
+ for result in failing_test_results + passing_test_results
304
+ for step in result.test_case.steps
305
+ for latency in step.get_latencies()
306
+ ]
307
+
308
+ return cls._calculate_percentiles(latencies)
309
+
310
+ @classmethod
311
+ def get_prompt_token_metrics(
312
+ cls,
313
+ failing_test_results: List["DialogueUnderstandingTestResult"],
314
+ passing_test_results: List["DialogueUnderstandingTestResult"],
315
+ ) -> Dict[str, float]:
316
+ tokens = [
317
+ token_count
318
+ for result in failing_test_results + passing_test_results
319
+ for step in result.test_case.steps
320
+ for token_count in step.get_prompt_tokens()
321
+ ]
322
+
323
+ return cls._calculate_percentiles(tokens)
324
+
325
+ @classmethod
326
+ def get_completion_token_metrics(
327
+ cls,
328
+ failing_test_results: List["DialogueUnderstandingTestResult"],
329
+ passing_test_results: List["DialogueUnderstandingTestResult"],
330
+ ) -> Dict[str, float]:
331
+ tokens = [
332
+ token_count
333
+ for result in failing_test_results + passing_test_results
334
+ for step in result.test_case.steps
335
+ for token_count in step.get_completion_tokens()
336
+ ]
337
+
338
+ return cls._calculate_percentiles(tokens)
339
+
340
+ def to_dict(self, output_prompt: bool = False) -> Dict[str, Any]:
268
341
  """Builds a dictionary for writing test results to a YML file.
269
342
 
270
343
  Args:
@@ -292,6 +365,10 @@ class DialogueUnderstandingTestSuiteResult:
292
365
 
293
366
  result_dict[OUTPUT_COMMAND_METRICS] = cmd_metrics_output
294
367
 
368
+ result_dict[OUTPUT_LATENCY_METRICS] = self.latency_metrics
369
+ result_dict[OUTPUT_PROMPT_TOKEN_METRICS] = self.prompt_token_metrics
370
+ result_dict[OUTPUT_COMPLETION_TOKEN_METRICS] = self.completion_token_metrics
371
+
295
372
  result_dict[OUTPUT_NAMES_OF_PASSED_TESTS] = self.names_of_passed_tests
296
373
  result_dict[OUTPUT_NAMES_OF_FAILED_TESTS] = self.names_of_failed_tests
297
374
 
@@ -7,6 +7,10 @@ import rich
7
7
  import rasa.shared.data
8
8
  from rasa.dialogue_understanding_test.command_metric_calculation import CommandMetrics
9
9
  from rasa.dialogue_understanding_test.constants import SCHEMA_FILE_PATH
10
+ from rasa.dialogue_understanding_test.du_test_case import (
11
+ KEY_COMPLETION_TOKENS,
12
+ KEY_PROMPT_TOKENS,
13
+ )
10
14
  from rasa.dialogue_understanding_test.du_test_result import (
11
15
  DialogueUnderstandingTestSuiteResult,
12
16
  FailedTestStep,
@@ -31,6 +35,12 @@ from rasa.e2e_test.utils.io import (
31
35
  validate_test_case,
32
36
  )
33
37
  from rasa.shared.core.flows import FlowsList
38
+ from rasa.shared.nlu.constants import (
39
+ KEY_LATENCY,
40
+ KEY_PROMPT_NAME,
41
+ KEY_SYSTEM_PROMPT,
42
+ KEY_USER_PROMPT,
43
+ )
34
44
  from rasa.shared.utils.yaml import (
35
45
  read_schema_file,
36
46
  validate_yaml_content_using_schema,
@@ -188,13 +198,11 @@ def print_test_results(
188
198
  """Print the result of the test run.
189
199
 
190
200
  Example output (truncated for brevity):
191
- =================================================================== FAILURES
192
- ===================================================================
201
+ ====== FAILURES ======
193
202
 
194
- ---------------- test_case:
195
- /Users/milos/Projects/rasa-calm-demo/dialogue_understanding_tests/
196
- immediate_cancellation_and_start_of_new_flow.yml::
197
- user immediately cancels and starts new flow -----------------
203
+ ---------------- test_case: rasa-calm-demo/dialogue_understanding_tests/
204
+ immediate_cancellation_and_start_of_new_flow.yml::user immediately cancels
205
+ and starts new flow -----------------
198
206
 
199
207
  == failure starting at user message 'I want to send money'.
200
208
 
@@ -208,7 +216,7 @@ def print_test_results(
208
216
  SetSlot(transfer_money_amount_of_money, 878) |
209
217
 
210
218
  ...
211
-
219
+ ====== COMMAND METRICS ======
212
220
  set slot (2 commands in total):
213
221
  tp: 0 fp: 1 fn: 2
214
222
  precision: 0.0000
@@ -221,9 +229,22 @@ def print_test_results(
221
229
  recall : 1.0000
222
230
  f1 : 1.0000
223
231
 
224
- =================================== 1 failed test cases, 0 passed test cases
225
- ===================================
226
- ======== 2 failed user steps, 1 passed user steps (accuracy: 0.3333) ========
232
+ ...
233
+ ====== LATENCY METRICS ======
234
+ p50: 0.00065571
235
+ p90: 0.00074687
236
+ p99: 0.00077837
237
+ ====== PROMPT TOKEN METRICS ======
238
+ p50: 1336.00
239
+ p90: 1389.50
240
+ p99: 1401.65
241
+ ====== COMPLETION TOKEN METRICS ======
242
+ p50: 12.00
243
+ p90: 15.80
244
+ p99: 16.88
245
+
246
+ ====== 1 failed test cases, 0 passed test cases ======
247
+ ====== 2 failed user steps, 1 passed user steps (accuracy: 0.3333) ======
227
248
 
228
249
  Args:
229
250
  test_suite_result: Test results suite containing the test results.
@@ -253,6 +274,7 @@ def print_test_results(
253
274
  print_failed_cases(test_suite_result, output_prompt=output_prompt)
254
275
 
255
276
  print_command_summary(test_suite_result.command_metrics)
277
+ print_latency_and_token_metrics(test_suite_result)
256
278
  print_final_line(test_suite_result)
257
279
 
258
280
 
@@ -290,16 +312,33 @@ def print_failed_cases(
290
312
 
291
313
 
292
314
  def print_prompt(step: FailedTestStep) -> None:
293
- if step.prompt is None:
315
+ if step.prompts is None:
294
316
  return
295
- prompt_data = step.prompt
317
+ prompts = step.prompts
296
318
 
297
319
  rich.print("\n[red3]-- PROMPT(s) --[/red3]")
298
- for component, prompts in prompt_data.items():
320
+ for component, component_prompts in prompts.items():
299
321
  rich.print(f"[bold]{component}[/bold]")
300
- for subcomponent, prompt in prompts:
301
- rich.print(f" [bold]{subcomponent}[/bold]")
302
- rich.print(f" {prompt}")
322
+ for prompt_data in component_prompts:
323
+ rich.print(
324
+ f"[bold] prompt name [/bold]: {prompt_data[KEY_PROMPT_NAME]}"
325
+ )
326
+ rich.print(
327
+ f"[bold] prompt tokens [/bold]: {prompt_data[KEY_PROMPT_TOKENS]}"
328
+ )
329
+ rich.print(
330
+ f"[bold] completion tokens[/bold]: "
331
+ f"{prompt_data[KEY_COMPLETION_TOKENS]}"
332
+ )
333
+ rich.print(f"[bold] latency [/bold]: {prompt_data[KEY_LATENCY]}")
334
+ if KEY_SYSTEM_PROMPT in prompt_data:
335
+ rich.print(
336
+ f"[bold] system prompt [/bold]: "
337
+ f"{prompt_data[KEY_SYSTEM_PROMPT]}"
338
+ )
339
+ rich.print(
340
+ f"[bold] user prompt [/bold]: {prompt_data[KEY_USER_PROMPT]}"
341
+ )
303
342
 
304
343
 
305
344
  def print_command_summary(metrics: Dict[str, CommandMetrics]) -> None:
@@ -331,6 +370,24 @@ def print_command_summary(metrics: Dict[str, CommandMetrics]) -> None:
331
370
  )
332
371
 
333
372
 
373
+ def print_latency_and_token_metrics(
374
+ result: DialogueUnderstandingTestSuiteResult,
375
+ ) -> None:
376
+ """Print the latency and token metrics."""
377
+ print()
378
+ rasa.shared.utils.cli.print_info(rasa.shared.utils.cli.pad("LATENCY METRICS"))
379
+ for key, value in result.latency_metrics.items():
380
+ rasa.shared.utils.cli.print_info(f"{key}: {value:.8f}")
381
+ rasa.shared.utils.cli.print_info(rasa.shared.utils.cli.pad("PROMPT TOKEN METRICS"))
382
+ for key, value in result.prompt_token_metrics.items():
383
+ rasa.shared.utils.cli.print_info(f"{key}: {value:.2f}")
384
+ rasa.shared.utils.cli.print_info(
385
+ rasa.shared.utils.cli.pad("COMPLETION TOKEN METRICS")
386
+ )
387
+ for key, value in result.completion_token_metrics.items():
388
+ rasa.shared.utils.cli.print_info(f"{key}: {value:.2f}")
389
+
390
+
334
391
  def print_final_line(test_suite_result: DialogueUnderstandingTestSuiteResult) -> None:
335
392
  """Print the final line of the test output.
336
393
 
@@ -181,6 +181,46 @@ class _BaseLiteLLMClient:
181
181
  )
182
182
  raise ProviderClientAPIException(e, message)
183
183
 
184
+ @suppress_logs(log_level=logging.WARNING)
185
+ async def acompletion_with_system(
186
+ self, formatted_messages: Union[List[str], str]
187
+ ) -> LLMResponse:
188
+ """Asynchronously generate completions for given list of messages.
189
+
190
+ Args:
191
+ messages: List of messages or a single message to generate the
192
+ completion for.
193
+
194
+ Returns:
195
+ List of message completions.
196
+
197
+ Raises:
198
+ ProviderClientAPIException: If the API request fails.
199
+ """
200
+ try:
201
+ # formatted_messages = self._format_messages(messages)
202
+ arguments = resolve_environment_variables(self._completion_fn_args)
203
+ response = await acompletion(messages=formatted_messages, **arguments)
204
+ return self._format_response(response)
205
+ except Exception as e:
206
+ message = ""
207
+ from rasa.shared.providers.llm.self_hosted_llm_client import (
208
+ SelfHostedLLMClient,
209
+ )
210
+
211
+ if isinstance(self, SelfHostedLLMClient):
212
+ message = (
213
+ "If you are using 'provider=self-hosted' to call a hosted vllm "
214
+ "server make sure your config is correctly setup. You should have "
215
+ "the following mandatory keys in your config: "
216
+ "provider=self-hosted; "
217
+ "model='<your-vllm-model-name>'; "
218
+ "api_base='your-hosted-vllm-serv'."
219
+ "In case you are getting OpenAI connection errors, such as missing "
220
+ "API key, your configuration is incorrect."
221
+ )
222
+ raise ProviderClientAPIException(e, message)
223
+
184
224
  def _format_messages(self, messages: Union[List[str], str]) -> List[Dict[str, str]]:
185
225
  """Formats messages (or a single message) to OpenAI format."""
186
226
  if isinstance(messages, str):
rasa/shared/utils/llm.py CHANGED
@@ -6,6 +6,7 @@ from typing import (
6
6
  Any,
7
7
  Callable,
8
8
  Dict,
9
+ List,
9
10
  Optional,
10
11
  Text,
11
12
  Type,
@@ -237,6 +238,90 @@ def tracker_as_readable_transcript(
237
238
  return "\n".join(transcript)
238
239
 
239
240
 
241
+ def sanitize_command_for_prompt(cmd_dict):
242
+ command = ""
243
+ if cmd_dict["command"] == "start flow":
244
+ command = f"start {cmd_dict['flow']}"
245
+ elif cmd_dict["command"] == "set slot":
246
+ command = f"set {cmd_dict['name']} {cmd_dict['value']}"
247
+ elif cmd_dict["command"] == "skip question":
248
+ command = "skip"
249
+ elif cmd_dict["command"] == "clarify":
250
+ command = f"clarify {' '.join(cmd_dict['options'])}"
251
+ elif cmd_dict["command"] == "knowledge":
252
+ command = "search"
253
+ elif cmd_dict["command"] == "chitchat":
254
+ command = "chat"
255
+ elif cmd_dict["command"] == "cancel flow":
256
+ command = "cancel"
257
+ else:
258
+ command = cmd_dict["command"]
259
+
260
+ return command
261
+
262
+
263
+ def tracker_as_message_list(
264
+ tracker: "DialogueStateTracker",
265
+ max_turns: Optional[int] = 20,
266
+ ) -> List[str]:
267
+ """Creates a readable dialogue from a tracker.
268
+
269
+ Args:
270
+ tracker: the tracker to convert
271
+ max_turns: the maximum number of turns to include in the transcript
272
+
273
+ Example:
274
+ >>> tracker = Tracker(
275
+ ... sender_id="test",
276
+ ... slots=[],
277
+ ... events=[
278
+ ... UserUttered("hello"),
279
+ ... BotUttered("hi"),
280
+ ... ],
281
+ ... )
282
+ >>> tracker_as_readable_transcript(tracker)
283
+ USER: hello
284
+ AI: hi
285
+
286
+ Returns:
287
+ A string representing the transcript of the tracker
288
+ """
289
+ messages = []
290
+
291
+ # using `applied_events` rather than `events` means that only events after the
292
+ # most recent `Restart` or `SessionStarted` are included in the transcript
293
+ # last_commands = None
294
+ for event in tracker.applied_events():
295
+ if isinstance(event, UserUttered):
296
+ if event.has_triggered_error:
297
+ first_error = event.error_commands[0]
298
+ error_type = first_error.get("error_type")
299
+ message = ERROR_PLACEHOLDER.get(
300
+ error_type, ERROR_PLACEHOLDER["default"]
301
+ )
302
+ else:
303
+ message = sanitize_message_for_prompt(event.text)
304
+ # last_commands = event.commands
305
+ messages.append({"role": "user", "content": message})
306
+ # messages.append({"role": "system", "content": ' \n '.join([sanitize_command_for_prompt(cmd) for cmd in last_commands])}) # noqa: E501
307
+ # transcript.append(f"{human_prefix}: {message}")
308
+
309
+ elif isinstance(event, BotUttered):
310
+ messages.append(
311
+ {
312
+ "role": "assistant",
313
+ "content": f"{sanitize_message_for_prompt(event.text)}",
314
+ }
315
+ )
316
+ # transcript.append(f"{ai_prefix}: {sanitize_message_for_prompt(event.text)}") # noqa: E501
317
+
318
+ if max_turns:
319
+ messages = messages[-max_turns:]
320
+ # transcript = transcript[-max_turns:]
321
+
322
+ return messages
323
+
324
+
240
325
  def sanitize_message_for_prompt(text: Optional[str]) -> str:
241
326
  """Removes new lines from a string.
242
327
 
@@ -680,7 +765,7 @@ def allowed_values_for_slot(slot: Slot) -> Union[str, None]:
680
765
  if isinstance(slot, BooleanSlot):
681
766
  return str([True, False])
682
767
  if isinstance(slot, CategoricalSlot):
683
- return str([v for v in slot.values if v != "__other__"])
768
+ return str([v for v in slot.values if v != "__other__"] + ["other"])
684
769
  else:
685
770
  return None
686
771
 
@@ -24,12 +24,15 @@ from rasa.dialogue_understanding.stack.dialogue_stack import DialogueStack
24
24
  from rasa.dialogue_understanding_test.du_test_result import (
25
25
  KEY_TEST_CASES_ACCURACY,
26
26
  KEY_USER_UTTERANCES_ACCURACY,
27
+ OUTPUT_COMPLETION_TOKEN_METRICS,
28
+ OUTPUT_LATENCY_METRICS,
27
29
  OUTPUT_NAMES_OF_FAILED_TESTS,
28
30
  OUTPUT_NAMES_OF_PASSED_TESTS,
29
31
  OUTPUT_NUMBER_OF_FAILED_TESTS,
30
32
  OUTPUT_NUMBER_OF_FAILED_USER_UTTERANCES,
31
33
  OUTPUT_NUMBER_OF_PASSED_TESTS,
32
34
  OUTPUT_NUMBER_OF_PASSED_USER_UTTERANCES,
35
+ OUTPUT_PROMPT_TOKEN_METRICS,
33
36
  OUTPUT_TEST_CASES_ACCURACY,
34
37
  OUTPUT_USER_UTTERANCES_ACCURACY,
35
38
  DialogueUnderstandingTestSuiteResult,
@@ -636,6 +639,13 @@ def extract_attrs_for_du_print_test_results(
636
639
  for key, value in test_suite_result.llm_config.items():
637
640
  attributes_dict[f"llm_config_0_{key}"] = value
638
641
 
642
+ for key, value in test_suite_result.latency_metrics.items():
643
+ attributes_dict[f"{OUTPUT_LATENCY_METRICS}_{key}"] = value
644
+ for key, value in test_suite_result.prompt_token_metrics.items():
645
+ attributes_dict[f"{OUTPUT_PROMPT_TOKEN_METRICS}_{key}"] = value
646
+ for key, value in test_suite_result.completion_token_metrics.items():
647
+ attributes_dict[f"{OUTPUT_COMPLETION_TOKEN_METRICS}_{key}"] = value
648
+
639
649
  return attributes_dict
640
650
 
641
651
 
rasa/version.py CHANGED
@@ -1,3 +1,3 @@
1
1
  # this file will automatically be changed,
2
2
  # do not add anything but the version number here!
3
- __version__ = "3.12.0.dev7"
3
+ __version__ = "3.12.0.dev9"
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.1
2
2
  Name: rasa-pro
3
- Version: 3.12.0.dev7
3
+ Version: 3.12.0.dev9
4
4
  Summary: State-of-the-art open-core Conversational AI framework for Enterprises that natively leverages generative AI for effortless assistant development.
5
5
  Home-page: https://rasa.com
6
6
  Keywords: nlp,machine-learning,machine-learning-library,bot,bots,botkit,rasa conversational-agents,conversational-ai,chatbot,chatbot-framework,bot-framework
@@ -364,36 +364,36 @@ rasa/dialogue_understanding/coexistence/intent_based_router.py,sha256=JlYBZdScnh
364
364
  rasa/dialogue_understanding/coexistence/llm_based_router.py,sha256=UTLMZpynSRt8qnQSSV-dTgFQbBHaMEgrLIUWCMyH1BE,11354
365
365
  rasa/dialogue_understanding/coexistence/router_template.jinja2,sha256=CHWFreN0sv1EbPh-hf5AlCt3zxy2_llX1Pdn9Q11Y18,357
366
366
  rasa/dialogue_understanding/commands/__init__.py,sha256=F-pLETYRUjhIkjjDfXGUuPsK_ac1HcLmJkrUUP0RhME,2259
367
- rasa/dialogue_understanding/commands/can_not_handle_command.py,sha256=2ferTQRUR_Ibr2Qa-Dma71DzVT6Tfy3-6iUhFrFFeZ4,2831
368
- rasa/dialogue_understanding/commands/cancel_flow_command.py,sha256=Um2ouQkfR_Za66uRiKjjZKI5fuYG76uvk3hFW3w4XV8,4625
369
- rasa/dialogue_understanding/commands/change_flow_command.py,sha256=8tjULfNQXsH3ZWTsMlrzbVgHLU26catdHTWzcYgSBWI,1735
370
- rasa/dialogue_understanding/commands/chit_chat_answer_command.py,sha256=GtYU3FtpwoAQti2_zW_T3x7iJABu2Yyb0eqUd7takhg,2152
371
- rasa/dialogue_understanding/commands/clarify_command.py,sha256=y5iEESwalNmQ7MZDbS5ut5KZwqEp_4GF6kBrvwUlRBE,3432
367
+ rasa/dialogue_understanding/commands/can_not_handle_command.py,sha256=z7aGn7aBMdXTsn7lzN1RvXlYi8WN7gtFm21qcJF8Hko,2829
368
+ rasa/dialogue_understanding/commands/cancel_flow_command.py,sha256=Tc5xXQw_P8XlIApL6CblFf1nnfnWGUqsMSIkVV_K_bE,4613
369
+ rasa/dialogue_understanding/commands/change_flow_command.py,sha256=gQyL1UqRNPdYXKVQi3hlAHOdpcCWLkhL34M3A_PzSR0,1722
370
+ rasa/dialogue_understanding/commands/chit_chat_answer_command.py,sha256=JnOMAaWhyynlTTzLpi663BOSs8hYNZyxrjUnSq0Qle8,2140
371
+ rasa/dialogue_understanding/commands/clarify_command.py,sha256=7ZlgA_gmOCj0xcP3sCNn7frk75SqiuWpS0O0HWuxjNE,3428
372
372
  rasa/dialogue_understanding/commands/command.py,sha256=riYYO0Lvm7T2vydDOfFFi7IquJWdyX_jH8d7qX0PB2c,2993
373
373
  rasa/dialogue_understanding/commands/correct_slots_command.py,sha256=lnSfYXNdTwQflOeMRXh1rV4qn6Bgx85S1yaXHtiKKAs,10461
374
374
  rasa/dialogue_understanding/commands/error_command.py,sha256=LTEsxkGGGZR6wEEGuTtQ4K4EK_u2UFhNK4eAKyPfyME,2436
375
375
  rasa/dialogue_understanding/commands/free_form_answer_command.py,sha256=XlQrHXrcOemzu1LHZiDhBAluiSlnUQ2V7ET5Z-aG7gc,224
376
376
  rasa/dialogue_understanding/commands/handle_code_change_command.py,sha256=Cp2e1iD0zacXmljJ8vDXHJu9Fp6BwB7cGx8NF748akw,2192
377
- rasa/dialogue_understanding/commands/human_handoff_command.py,sha256=WlGswi5Wlc3jsXp6hsU3ANMdZMqGk33rxeX0kAyiwgw,2285
378
- rasa/dialogue_understanding/commands/knowledge_answer_command.py,sha256=S2AP2_H-KFnSRUKqG3kz083dR_JTXKMWkjhzi9mEDPQ,2166
377
+ rasa/dialogue_understanding/commands/human_handoff_command.py,sha256=bxSTNbLz05nN_gL14TsVural4S9-Du1teVXy1JDASP8,2275
378
+ rasa/dialogue_understanding/commands/knowledge_answer_command.py,sha256=IvFcV8rhnDchzvWfJncJ34OFkXHiQ3yWytkGFWNuSho,2146
379
379
  rasa/dialogue_understanding/commands/noop_command.py,sha256=aIaLBjSV84qy9X4aGlJfMIYhF57maH5CiKNWL_-giD4,1485
380
- rasa/dialogue_understanding/commands/repeat_bot_messages_command.py,sha256=9SaRXsqdgC5p-2JIa8T3fsc6sVcsLf1FL90HtjW5Uek,2243
380
+ rasa/dialogue_understanding/commands/repeat_bot_messages_command.py,sha256=6oitCpsYtLjOUBGe4cIx2OOZnw9ygi4nKwWOg66R2aA,2225
381
381
  rasa/dialogue_understanding/commands/restart_command.py,sha256=vvmucwlVtfh6VMgdOn5hZfsP9U5HhfbDeBSG2IndX0Y,1639
382
382
  rasa/dialogue_understanding/commands/session_end_command.py,sha256=ZecUpYZDTX_68_kV1Hv4i317bbeBeVHHyhW_A7r5yzs,1770
383
383
  rasa/dialogue_understanding/commands/session_start_command.py,sha256=FA4yocMnFt5bn2dmXj48S4Pq_yTlEnOBxgK_mq-qAxg,1704
384
- rasa/dialogue_understanding/commands/set_slot_command.py,sha256=NgrN3IgzjCCIWNjrySdSUyQBQSlomwdqYUFOE1HZSRw,5994
385
- rasa/dialogue_understanding/commands/skip_question_command.py,sha256=45NvtkqMaNX05T2KmTInwGSTWAeRokgdF7CXWpHnL48,2608
386
- rasa/dialogue_understanding/commands/start_flow_command.py,sha256=yQJNiAey5ETInxwYSNARabCL2f2QDVKuBQrIyvtPt1A,3793
384
+ rasa/dialogue_understanding/commands/set_slot_command.py,sha256=ENpMuAbrlNA0hX3ABbIxQPNAL4a7sk7HZvNTZGhoiRE,5982
385
+ rasa/dialogue_understanding/commands/skip_question_command.py,sha256=ZPstoD-A2jnW8AszKsZ10WtDEeAtoCh6swESwk7fpDA,2588
386
+ rasa/dialogue_understanding/commands/start_flow_command.py,sha256=WYgynvL1sqlOr_I5VXI5OHqWaSRUexvFUvEsgI9WvPw,3783
387
387
  rasa/dialogue_understanding/commands/user_silence_command.py,sha256=DQjRfZk09sV1o2emnLkmX7cZpsJwBHNeJGBDQVkejjY,1686
388
- rasa/dialogue_understanding/commands/utils.py,sha256=vz3fNF31cCIUoWQ6HP6_nI4E30MBFIfacCZb6QKx634,1748
388
+ rasa/dialogue_understanding/commands/utils.py,sha256=OV6hJjlb2WWOHCaIxtRXbbKHKSNGX2het_KZNoomrBI,1748
389
389
  rasa/dialogue_understanding/constants.py,sha256=YcELaIss69Hnroclvn90Dl4Suk3S6e3t0UoIbUaXG2A,83
390
390
  rasa/dialogue_understanding/generator/__init__.py,sha256=Ykeb2wQ1DuiUWAWO0hLIPSTK1_Ktiq9DZXF6D3ugN78,764
391
391
  rasa/dialogue_understanding/generator/command_generator.py,sha256=OTyE9_CcorpjTOgdI-u0J9ryBwlDdd2foJLA3jQJ9_Y,11959
392
- rasa/dialogue_understanding/generator/command_parser.py,sha256=HWLKJoXBOQVByk__rVbReJJ8u7au9Wit0MXvuHEAZHg,6422
392
+ rasa/dialogue_understanding/generator/command_parser.py,sha256=E-P1giFoSzPwgg7FqPEx0-4XWUAKidn8HT2Bn70DzmM,6430
393
393
  rasa/dialogue_understanding/generator/constants.py,sha256=ntP5xmTlS3b_6uVT7LKvWzxbmf5_9tWZ5eFY1RLBtqU,716
394
394
  rasa/dialogue_understanding/generator/flow_document_template.jinja2,sha256=f4H6vVd-_nX_RtutMh1xD3ZQE_J2OyuPHAtiltfiAPY,253
395
395
  rasa/dialogue_understanding/generator/flow_retrieval.py,sha256=wlGnMj17-X1-siQmdSvOd7K61sRzBf82MQEL2pqDQMI,17891
396
- rasa/dialogue_understanding/generator/llm_based_command_generator.py,sha256=o8Dt-uIrgKqsRGvlMy0TpqLhf2DHSE4O5BDuHpKR5EQ,16161
396
+ rasa/dialogue_understanding/generator/llm_based_command_generator.py,sha256=uQKQFNoayY5klvhPERje7jQmsM4oULlYQNlULx7fsko,16195
397
397
  rasa/dialogue_understanding/generator/llm_command_generator.py,sha256=QpNXhjB9ugtPV8XAHmKjbJtOiI1yE9rC2osbsI_A4ZY,2529
398
398
  rasa/dialogue_understanding/generator/multi_step/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
399
399
  rasa/dialogue_understanding/generator/multi_step/fill_slots_prompt.jinja2,sha256=Y0m673tAML3cFPaLM-urMXDsBYUUcXIw9YUpkAhGUuA,2933
@@ -401,7 +401,7 @@ rasa/dialogue_understanding/generator/multi_step/handle_flows_prompt.jinja2,sha2
401
401
  rasa/dialogue_understanding/generator/multi_step/multi_step_llm_command_generator.py,sha256=U8Sj6KxQZw5v-RdkuH-5fJYEvKDfIG2SnPXfsEZuG2Y,31034
402
402
  rasa/dialogue_understanding/generator/nlu_command_adapter.py,sha256=-DFGkeRKQwU0-0ZJpVjxvgAUGq8pK9NXavAUONT0Wo8,9383
403
403
  rasa/dialogue_understanding/generator/single_step/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
404
- rasa/dialogue_understanding/generator/single_step/command_prompt_template.jinja2,sha256=nMayu-heJYH1QmcL1cFmXb8SeiJzfdDR_9Oy5IRUXsM,3937
404
+ rasa/dialogue_understanding/generator/single_step/command_prompt_template.jinja2,sha256=k6s9Gj4ULCwfuqRbX41yW0aJwQN_T_ShfWlGeZDT308,2863
405
405
  rasa/dialogue_understanding/generator/single_step/single_step_llm_command_generator.py,sha256=KznnZzf3RtvRK8sSfMw4tCaWzXMJf0Mw66S73lA22uk,15724
406
406
  rasa/dialogue_understanding/generator/utils.py,sha256=fRACEJ2bEgEgSq90TUrQFLTrLqtJqtbb64mgAXklMQ4,1911
407
407
  rasa/dialogue_understanding/patterns/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
@@ -441,11 +441,11 @@ rasa/dialogue_understanding_test/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRk
441
441
  rasa/dialogue_understanding_test/command_comparison.py,sha256=LvCZGgZVFpKjWqaZE5OqPClM5xDNdFZQ4FslvNerB7s,1812
442
442
  rasa/dialogue_understanding_test/command_metric_calculation.py,sha256=ys1BobRxqEhsfKk5Op9OB_IXUIsGGSiK6ox8246W-9E,3889
443
443
  rasa/dialogue_understanding_test/constants.py,sha256=G63FEzswDUOonTxoXQicEJwI6ICkSx3YP1ILkGH1ijw,790
444
- rasa/dialogue_understanding_test/du_test_case.py,sha256=P82xu4wkyIrhPdQWHp2wMYG6YVkUgaDFNSSDDN6Znv8,11277
445
- rasa/dialogue_understanding_test/du_test_result.py,sha256=ITnY5rtPAW-DTPXQA9yp3gyFfKRUjIZ_IVBTc3AjReY,11861
444
+ rasa/dialogue_understanding_test/du_test_case.py,sha256=dRb3htpUpSKzArQZOgW8a3J2Wi85oVTLjk2gq4OELQU,13801
445
+ rasa/dialogue_understanding_test/du_test_result.py,sha256=TlpmimfnwtNfu5UMFLogjSZrkfpBYrHgCeMOQdXQjhw,14822
446
446
  rasa/dialogue_understanding_test/du_test_runner.py,sha256=ZG-TNfu-Ak9l_gg9NNadzKzARgICJ9wlsYooCBi1WKU,11943
447
447
  rasa/dialogue_understanding_test/du_test_schema.yml,sha256=zgIhb6PE8LnoigVmv4NbU3cjSsr2SkGoO-5Xh4Et9KA,4767
448
- rasa/dialogue_understanding_test/io.py,sha256=ah26adacAU57YX7rhL3fMLNKeVddVURzUaWddXl8N0k,12707
448
+ rasa/dialogue_understanding_test/io.py,sha256=nXmauJp14yMPvKTCHbSnIgvd1u8B_piSCXwxYpfSvWQ,14656
449
449
  rasa/dialogue_understanding_test/test_case_simulation/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
450
450
  rasa/dialogue_understanding_test/test_case_simulation/exception.py,sha256=RJV8CfoGKmfpC3d28y7IBKfmcAZSm2Vs6p0GkiCHlcc,1034
451
451
  rasa/dialogue_understanding_test/test_case_simulation/test_case_tracker_simulator.py,sha256=hL7NisfRWe_QuiETH1hkqN6U7IRi8fZ2ggQ4JleHnzk,12994
@@ -715,7 +715,7 @@ rasa/shared/providers/embedding/huggingface_local_embedding_client.py,sha256=Zo3
715
715
  rasa/shared/providers/embedding/litellm_router_embedding_client.py,sha256=Y5XNwWl1kxoOLs8ZMY3nhPn__XSPOeg8ED-dQQuegF4,4517
716
716
  rasa/shared/providers/embedding/openai_embedding_client.py,sha256=XNRGE7apo2v3kWRrtgxE-Gq4rvNko3IiXtvgC4krDYE,5429
717
717
  rasa/shared/providers/llm/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
718
- rasa/shared/providers/llm/_base_litellm_client.py,sha256=Kwa8s96g5AijiKiOO9-UKaLWYJ4iHEuyepsc5ZkTEP4,10004
718
+ rasa/shared/providers/llm/_base_litellm_client.py,sha256=CwxV-3zAFAgxYAfHkbUhAvFWlcbd2u8aqb8X5WdT8FU,11703
719
719
  rasa/shared/providers/llm/azure_openai_llm_client.py,sha256=HBm5f_bPwOXP2N-zEMfnhAdMQxDHGHp8JKC62gnj4mg,12976
720
720
  rasa/shared/providers/llm/default_litellm_llm_client.py,sha256=Gu9l-RNaKXAUsEmG8DIIGSPHtW9vMEyVNWvOVADiJcU,3940
721
721
  rasa/shared/providers/llm/litellm_router_llm_client.py,sha256=hPYGMZUUjpnRCFNvHSSuaKK_OxHV2spr9eItkwYYnBI,6816
@@ -737,7 +737,7 @@ rasa/shared/utils/health_check/embeddings_health_check_mixin.py,sha256=ASOzDtI3i
737
737
  rasa/shared/utils/health_check/health_check.py,sha256=izixrbc9BxFSsjzwoIw9U0w0VKSX5gMwhey8bcwe1wc,9709
738
738
  rasa/shared/utils/health_check/llm_health_check_mixin.py,sha256=ANP5Q68TRX8p4wWkRCAISsWBV1iYYeGnqWILnR1NawE,957
739
739
  rasa/shared/utils/io.py,sha256=AhuECoXGO367NvWRCBu99utEtTQnyxWVJyKOOpLePpg,15917
740
- rasa/shared/utils/llm.py,sha256=6mfcRdKZVvB7JLVN_2oj6tnIgPfiPqQ2hYnTyK8mwP4,25545
740
+ rasa/shared/utils/llm.py,sha256=rgYEjdbMXp0nUJXrhXmvCChXZzH3QtADY9IXM4EeAPE,28455
741
741
  rasa/shared/utils/pykwalify_extensions.py,sha256=36vfuD9gSreZvMIZ_qIPpykXhYGuZu2BpoEMVX50Is4,883
742
742
  rasa/shared/utils/schemas/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
743
743
  rasa/shared/utils/schemas/config.yml,sha256=czxSADw9hOIZdhvFP8pVUQo810hs9_C8ZGfCPx17taM,27
@@ -760,7 +760,7 @@ rasa/tracing/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
760
760
  rasa/tracing/config.py,sha256=j5N6s-GX3idNH9FO-0z10KduVg2ovzsE-u5ve87249U,12860
761
761
  rasa/tracing/constants.py,sha256=N_MJLStE3IkmPKQCQv42epd3jdBMJ4Ith1dVO65N5ho,2425
762
762
  rasa/tracing/instrumentation/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
763
- rasa/tracing/instrumentation/attribute_extractors.py,sha256=yzs6mMCvJgPMxnuESJBn_QfRZq8lyUfhicVa4Y1-ONw,28955
763
+ rasa/tracing/instrumentation/attribute_extractors.py,sha256=-LjX91Y94Jalccm5tMpbYvXF25abg0NVFr0MKJJOB-Q,29478
764
764
  rasa/tracing/instrumentation/instrumentation.py,sha256=K4h_u3VY-eKPKUP8UXKEHsCy5UpXw0QGQ2MdlQFBeWE,51861
765
765
  rasa/tracing/instrumentation/intentless_policy_instrumentation.py,sha256=RgixI0FVIzBz19E3onidUpSEwjkAh8paA5_w07PMzFo,4821
766
766
  rasa/tracing/instrumentation/metrics.py,sha256=N4Zxo8P84p4VH6vGai1oRurIUifXPtMrZh1BlpPB7kg,10534
@@ -801,9 +801,9 @@ rasa/utils/train_utils.py,sha256=ClJx-6x3-h3Vt6mskacgkcCUJTMXjFPe3zAcy_DfmaU,212
801
801
  rasa/utils/url_tools.py,sha256=dZ1HGkVdWTJB7zYEdwoDIrEuyX9HE5WsxKKFVsXBLE0,1218
802
802
  rasa/utils/yaml.py,sha256=KjbZq5C94ZP7Jdsw8bYYF7HASI6K4-C_kdHfrnPLpSI,2000
803
803
  rasa/validator.py,sha256=Uh5R1JDmIRl0aNprh9FfHND8UKiNJTNYBrMdBDVxEFM,67516
804
- rasa/version.py,sha256=ZidWAktMLMvwbHEMlVU7E2f6M0gBaoHOhrxbP9X0u-g,122
805
- rasa_pro-3.12.0.dev7.dist-info/METADATA,sha256=YNfDp8Z3qUiOSQ4He_M5ddASCrUenlBybBk7LTv3XEE,10730
806
- rasa_pro-3.12.0.dev7.dist-info/NOTICE,sha256=7HlBoMHJY9CL2GlYSfTQ-PZsVmLmVkYmMiPlTjhuCqA,218
807
- rasa_pro-3.12.0.dev7.dist-info/WHEEL,sha256=sP946D7jFCHeNz5Iq4fL4Lu-PrWrFsgfLXbbkciIZwg,88
808
- rasa_pro-3.12.0.dev7.dist-info/entry_points.txt,sha256=ckJ2SfEyTPgBqj_I6vm_tqY9dZF_LAPJZA335Xp0Q9U,43
809
- rasa_pro-3.12.0.dev7.dist-info/RECORD,,
804
+ rasa/version.py,sha256=qcAHxWgFY5aqvH50NTP65caloSkSJzqEtt_Ot16T45I,122
805
+ rasa_pro-3.12.0.dev9.dist-info/METADATA,sha256=krUZcbUYLAse0nHTYflyjwWmCcRvkm0qHLwUNe2C9gU,10730
806
+ rasa_pro-3.12.0.dev9.dist-info/NOTICE,sha256=7HlBoMHJY9CL2GlYSfTQ-PZsVmLmVkYmMiPlTjhuCqA,218
807
+ rasa_pro-3.12.0.dev9.dist-info/WHEEL,sha256=sP946D7jFCHeNz5Iq4fL4Lu-PrWrFsgfLXbbkciIZwg,88
808
+ rasa_pro-3.12.0.dev9.dist-info/entry_points.txt,sha256=ckJ2SfEyTPgBqj_I6vm_tqY9dZF_LAPJZA335Xp0Q9U,43
809
+ rasa_pro-3.12.0.dev9.dist-info/RECORD,,