rasa-pro 3.14.0.dev1__py3-none-any.whl → 3.14.0.dev20250818__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of rasa-pro might be problematic. Click here for more details.

Files changed (147) hide show
  1. rasa/api.py +0 -5
  2. rasa/cli/arguments/default_arguments.py +0 -12
  3. rasa/cli/arguments/run.py +0 -2
  4. rasa/cli/dialogue_understanding_test.py +0 -4
  5. rasa/cli/e2e_test.py +0 -4
  6. rasa/cli/inspect.py +0 -3
  7. rasa/cli/llm_fine_tuning.py +0 -5
  8. rasa/cli/run.py +0 -4
  9. rasa/cli/shell.py +0 -3
  10. rasa/constants.py +0 -6
  11. rasa/core/actions/action.py +2 -42
  12. rasa/core/agent.py +0 -16
  13. rasa/core/available_endpoints.py +0 -30
  14. rasa/core/channels/inspector/dist/assets/{arc-2e78c586.js → arc-1ddec37b.js} +1 -1
  15. rasa/core/channels/inspector/dist/assets/{blockDiagram-38ab4fdb-806b712e.js → blockDiagram-38ab4fdb-18af387c.js} +1 -1
  16. rasa/core/channels/inspector/dist/assets/{c4Diagram-3d4e48cf-0745efa9.js → c4Diagram-3d4e48cf-250127a3.js} +1 -1
  17. rasa/core/channels/inspector/dist/assets/channel-59f6d54b.js +1 -0
  18. rasa/core/channels/inspector/dist/assets/{classDiagram-70f12bd4-7bd1082b.js → classDiagram-70f12bd4-c3388b34.js} +1 -1
  19. rasa/core/channels/inspector/dist/assets/{classDiagram-v2-f2320105-d937ba49.js → classDiagram-v2-f2320105-9c893a82.js} +1 -1
  20. rasa/core/channels/inspector/dist/assets/clone-26177ddb.js +1 -0
  21. rasa/core/channels/inspector/dist/assets/{createText-2e5e7dd3-a2a564ca.js → createText-2e5e7dd3-c111213b.js} +1 -1
  22. rasa/core/channels/inspector/dist/assets/{edges-e0da2a9e-b5256940.js → edges-e0da2a9e-812a729d.js} +1 -1
  23. rasa/core/channels/inspector/dist/assets/{erDiagram-9861fffd-e6883ad2.js → erDiagram-9861fffd-fd5051bc.js} +1 -1
  24. rasa/core/channels/inspector/dist/assets/{flowDb-956e92f1-e576fc02.js → flowDb-956e92f1-3287ac02.js} +1 -1
  25. rasa/core/channels/inspector/dist/assets/{flowDiagram-66a62f08-2e298d01.js → flowDiagram-66a62f08-692fb0b2.js} +1 -1
  26. rasa/core/channels/inspector/dist/assets/flowDiagram-v2-96b9c2cf-29c03f5a.js +1 -0
  27. rasa/core/channels/inspector/dist/assets/{flowchart-elk-definition-4a651766-dd7b150a.js → flowchart-elk-definition-4a651766-008376f1.js} +1 -1
  28. rasa/core/channels/inspector/dist/assets/{ganttDiagram-c361ad54-5b79575c.js → ganttDiagram-c361ad54-df330a69.js} +1 -1
  29. rasa/core/channels/inspector/dist/assets/{gitGraphDiagram-72cf32ee-3016f40a.js → gitGraphDiagram-72cf32ee-e03676fb.js} +1 -1
  30. rasa/core/channels/inspector/dist/assets/{graph-3e19170f.js → graph-46fad2ba.js} +1 -1
  31. rasa/core/channels/inspector/dist/assets/{index-3862675e-eb9c86de.js → index-3862675e-a484ac55.js} +1 -1
  32. rasa/core/channels/inspector/dist/assets/index-a003633f.js +1335 -0
  33. rasa/core/channels/inspector/dist/assets/{infoDiagram-f8f76790-b4280e4d.js → infoDiagram-f8f76790-3f9e6ec2.js} +1 -1
  34. rasa/core/channels/inspector/dist/assets/{journeyDiagram-49397b02-556091f8.js → journeyDiagram-49397b02-79f72383.js} +1 -1
  35. rasa/core/channels/inspector/dist/assets/{layout-08436411.js → layout-aad098e5.js} +1 -1
  36. rasa/core/channels/inspector/dist/assets/{line-683c4f3b.js → line-219ab7ae.js} +1 -1
  37. rasa/core/channels/inspector/dist/assets/{linear-cee6d791.js → linear-2cddbe62.js} +1 -1
  38. rasa/core/channels/inspector/dist/assets/{mindmap-definition-fc14e90a-a0bf0b1a.js → mindmap-definition-fc14e90a-1d41ed99.js} +1 -1
  39. rasa/core/channels/inspector/dist/assets/{pieDiagram-8a3498a8-3730d5c4.js → pieDiagram-8a3498a8-cc496ee8.js} +1 -1
  40. rasa/core/channels/inspector/dist/assets/{quadrantDiagram-120e2f19-12a20fed.js → quadrantDiagram-120e2f19-84d32884.js} +1 -1
  41. rasa/core/channels/inspector/dist/assets/{requirementDiagram-deff3bca-b9732102.js → requirementDiagram-deff3bca-c0deb984.js} +1 -1
  42. rasa/core/channels/inspector/dist/assets/{sankeyDiagram-04a897e0-a2e72776.js → sankeyDiagram-04a897e0-b9d7fd62.js} +1 -1
  43. rasa/core/channels/inspector/dist/assets/{sequenceDiagram-704730f1-8b7a76bb.js → sequenceDiagram-704730f1-7d517565.js} +1 -1
  44. rasa/core/channels/inspector/dist/assets/{stateDiagram-587899a1-e65853ac.js → stateDiagram-587899a1-98ef9b27.js} +1 -1
  45. rasa/core/channels/inspector/dist/assets/{stateDiagram-v2-d93cdb3a-6f58a44b.js → stateDiagram-v2-d93cdb3a-cee70748.js} +1 -1
  46. rasa/core/channels/inspector/dist/assets/{styles-6aaf32cf-df25b934.js → styles-6aaf32cf-3f9d1c96.js} +1 -1
  47. rasa/core/channels/inspector/dist/assets/{styles-9a916d00-88357141.js → styles-9a916d00-67471923.js} +1 -1
  48. rasa/core/channels/inspector/dist/assets/{styles-c10674c1-d600174d.js → styles-c10674c1-bd093fb7.js} +1 -1
  49. rasa/core/channels/inspector/dist/assets/{svgDrawCommon-08f97a94-4adc3e0b.js → svgDrawCommon-08f97a94-675794e8.js} +1 -1
  50. rasa/core/channels/inspector/dist/assets/{timeline-definition-85554ec2-42816fa1.js → timeline-definition-85554ec2-0ac67617.js} +1 -1
  51. rasa/core/channels/inspector/dist/assets/{xychartDiagram-e933f94c-621eb66a.js → xychartDiagram-e933f94c-c018dc37.js} +1 -1
  52. rasa/core/channels/inspector/dist/index.html +1 -1
  53. rasa/core/channels/inspector/src/components/DialogueStack.tsx +5 -7
  54. rasa/core/channels/inspector/src/helpers/formatters.ts +3 -24
  55. rasa/core/channels/inspector/src/theme/base/styles.ts +1 -19
  56. rasa/core/channels/inspector/src/types.ts +0 -4
  57. rasa/core/constants.py +0 -4
  58. rasa/core/policies/enterprise_search_policy.py +2 -4
  59. rasa/core/policies/flow_policy.py +2 -2
  60. rasa/core/policies/flows/flow_executor.py +35 -374
  61. rasa/core/processor.py +1 -6
  62. rasa/core/run.py +1 -8
  63. rasa/core/utils.py +1 -21
  64. rasa/dialogue_understanding/commands/__init__.py +0 -8
  65. rasa/dialogue_understanding/commands/cancel_flow_command.py +4 -97
  66. rasa/dialogue_understanding/commands/chit_chat_answer_command.py +0 -11
  67. rasa/dialogue_understanding/commands/knowledge_answer_command.py +0 -11
  68. rasa/dialogue_understanding/commands/start_flow_command.py +8 -129
  69. rasa/dialogue_understanding/commands/utils.py +2 -6
  70. rasa/dialogue_understanding/generator/command_parser.py +0 -4
  71. rasa/dialogue_understanding/generator/llm_based_command_generator.py +12 -50
  72. rasa/dialogue_understanding/generator/single_step/compact_llm_command_generator.py +6 -7
  73. rasa/dialogue_understanding/generator/single_step/search_ready_llm_command_generator.py +6 -7
  74. rasa/dialogue_understanding/generator/single_step/single_step_based_llm_command_generator.py +2 -41
  75. rasa/dialogue_understanding/patterns/continue_interrupted.py +1 -163
  76. rasa/dialogue_understanding/patterns/default_flows_for_patterns.yml +7 -51
  77. rasa/dialogue_understanding/stack/dialogue_stack.py +2 -123
  78. rasa/dialogue_understanding/stack/frames/flow_stack_frame.py +0 -57
  79. rasa/dialogue_understanding/stack/utils.py +2 -3
  80. rasa/dialogue_understanding_test/du_test_runner.py +2 -7
  81. rasa/e2e_test/e2e_test_runner.py +0 -5
  82. rasa/server.py +0 -10
  83. rasa/shared/constants.py +0 -5
  84. rasa/shared/core/constants.py +1 -12
  85. rasa/shared/core/domain.py +5 -5
  86. rasa/shared/core/events.py +0 -319
  87. rasa/shared/core/flows/flows_list.py +2 -2
  88. rasa/shared/core/flows/flows_yaml_schema.json +186 -101
  89. rasa/shared/core/flows/steps/call.py +5 -51
  90. rasa/shared/core/flows/validation.py +7 -45
  91. rasa/shared/core/flows/yaml_flows_io.py +3 -3
  92. rasa/shared/providers/llm/_base_litellm_client.py +7 -39
  93. rasa/shared/providers/llm/litellm_router_llm_client.py +4 -8
  94. rasa/shared/providers/llm/llm_client.py +3 -7
  95. rasa/shared/providers/llm/llm_response.py +0 -49
  96. rasa/shared/providers/llm/self_hosted_llm_client.py +4 -8
  97. rasa/shared/utils/llm.py +5 -28
  98. rasa/shared/utils/schemas/events.py +0 -42
  99. rasa/studio/upload.py +7 -4
  100. rasa/tracing/instrumentation/instrumentation.py +2 -4
  101. rasa/utils/common.py +0 -53
  102. rasa/version.py +1 -1
  103. {rasa_pro-3.14.0.dev1.dist-info → rasa_pro-3.14.0.dev20250818.dist-info}/METADATA +2 -3
  104. {rasa_pro-3.14.0.dev1.dist-info → rasa_pro-3.14.0.dev20250818.dist-info}/RECORD +107 -143
  105. rasa/agents/__init__.py +0 -0
  106. rasa/agents/agent_factory.py +0 -122
  107. rasa/agents/agent_manager.py +0 -162
  108. rasa/agents/constants.py +0 -31
  109. rasa/agents/core/__init__.py +0 -0
  110. rasa/agents/core/agent_protocol.py +0 -108
  111. rasa/agents/core/types.py +0 -70
  112. rasa/agents/exceptions.py +0 -8
  113. rasa/agents/protocol/__init__.py +0 -5
  114. rasa/agents/protocol/a2a/__init__.py +0 -0
  115. rasa/agents/protocol/a2a/a2a_agent.py +0 -51
  116. rasa/agents/protocol/mcp/__init__.py +0 -0
  117. rasa/agents/protocol/mcp/mcp_base_agent.py +0 -697
  118. rasa/agents/protocol/mcp/mcp_open_agent.py +0 -275
  119. rasa/agents/protocol/mcp/mcp_task_agent.py +0 -447
  120. rasa/agents/schemas/__init__.py +0 -6
  121. rasa/agents/schemas/agent_input.py +0 -24
  122. rasa/agents/schemas/agent_output.py +0 -26
  123. rasa/agents/schemas/agent_tool_result.py +0 -51
  124. rasa/agents/schemas/agent_tool_schema.py +0 -112
  125. rasa/agents/templates/__init__.py +0 -0
  126. rasa/agents/templates/mcp_open_agent_prompt_template.jinja2 +0 -15
  127. rasa/agents/templates/mcp_task_agent_prompt_template.jinja2 +0 -13
  128. rasa/agents/utils.py +0 -72
  129. rasa/core/available_agents.py +0 -196
  130. rasa/core/channels/inspector/dist/assets/channel-c436ca7c.js +0 -1
  131. rasa/core/channels/inspector/dist/assets/clone-50dd656b.js +0 -1
  132. rasa/core/channels/inspector/dist/assets/flowDiagram-v2-96b9c2cf-2b2aeaf8.js +0 -1
  133. rasa/core/channels/inspector/dist/assets/index-1bd9135e.js +0 -1353
  134. rasa/core/policies/flows/mcp_tool_executor.py +0 -240
  135. rasa/dialogue_understanding/commands/continue_agent_command.py +0 -91
  136. rasa/dialogue_understanding/commands/restart_agent_command.py +0 -146
  137. rasa/dialogue_understanding/generator/prompt_templates/agent_command_prompt_v2_claude_3_5_sonnet_20240620_template.jinja2 +0 -61
  138. rasa/dialogue_understanding/generator/prompt_templates/agent_command_prompt_v2_gpt_4o_2024_11_20_template.jinja2 +0 -61
  139. rasa/dialogue_understanding/generator/prompt_templates/agent_command_prompt_v3_claude_3_5_sonnet_20240620_template.jinja2 +0 -81
  140. rasa/dialogue_understanding/generator/prompt_templates/agent_command_prompt_v3_gpt_4o_2024_11_20_template.jinja2 +0 -81
  141. rasa/shared/agents/__init__.py +0 -0
  142. rasa/shared/agents/utils.py +0 -35
  143. rasa/shared/utils/mcp/__init__.py +0 -0
  144. rasa/shared/utils/mcp/server_connection.py +0 -157
  145. {rasa_pro-3.14.0.dev1.dist-info → rasa_pro-3.14.0.dev20250818.dist-info}/NOTICE +0 -0
  146. {rasa_pro-3.14.0.dev1.dist-info → rasa_pro-3.14.0.dev20250818.dist-info}/WHEEL +0 -0
  147. {rasa_pro-3.14.0.dev1.dist-info → rasa_pro-3.14.0.dev20250818.dist-info}/entry_points.txt +0 -0
@@ -21,7 +21,7 @@ from rasa.shared.providers._ssl_verification_utils import (
21
21
  ensure_ssl_certificates_for_litellm_non_openai_based_clients,
22
22
  ensure_ssl_certificates_for_litellm_openai_based_clients,
23
23
  )
24
- from rasa.shared.providers.llm.llm_response import LLMResponse, LLMToolCall, LLMUsage
24
+ from rasa.shared.providers.llm.llm_response import LLMResponse, LLMUsage
25
25
  from rasa.shared.utils.io import resolve_environment_variables, suppress_logs
26
26
 
27
27
  structlogger = structlog.get_logger()
@@ -126,9 +126,7 @@ class _BaseLiteLLMClient:
126
126
  raise ProviderClientValidationError(event_info)
127
127
 
128
128
  @suppress_logs(log_level=logging.WARNING)
129
- def completion(
130
- self, messages: Union[List[dict], List[str], str], **kwargs: Any
131
- ) -> LLMResponse:
129
+ def completion(self, messages: Union[List[dict], List[str], str]) -> LLMResponse:
132
130
  """Synchronously generate completions for given list of messages.
133
131
 
134
132
  Args:
@@ -140,7 +138,6 @@ class _BaseLiteLLMClient:
140
138
  - a list of messages. Each message is a string and will be formatted
141
139
  as a user message.
142
140
  - a single message as a string which will be formatted as user message.
143
- **kwargs: Additional parameters to pass to the completion call.
144
141
 
145
142
  Returns:
146
143
  List of message completions.
@@ -150,19 +147,15 @@ class _BaseLiteLLMClient:
150
147
  """
151
148
  try:
152
149
  formatted_messages = self._get_formatted_messages(messages)
153
- arguments = cast(
154
- Dict[str, Any], resolve_environment_variables(self._completion_fn_args)
155
- )
156
- response = completion(
157
- messages=formatted_messages, **{**arguments, **kwargs}
158
- )
150
+ arguments = resolve_environment_variables(self._completion_fn_args)
151
+ response = completion(messages=formatted_messages, **arguments)
159
152
  return self._format_response(response)
160
153
  except Exception as e:
161
154
  raise ProviderClientAPIException(e)
162
155
 
163
156
  @suppress_logs(log_level=logging.WARNING)
164
157
  async def acompletion(
165
- self, messages: Union[List[dict], List[str], str], **kwargs: Any
158
+ self, messages: Union[List[dict], List[str], str]
166
159
  ) -> LLMResponse:
167
160
  """Asynchronously generate completions for given list of messages.
168
161
 
@@ -175,7 +168,6 @@ class _BaseLiteLLMClient:
175
168
  - a list of messages. Each message is a string and will be formatted
176
169
  as a user message.
177
170
  - a single message as a string which will be formatted as user message.
178
- **kwargs: Additional parameters to pass to the completion call.
179
171
 
180
172
  Returns:
181
173
  List of message completions.
@@ -185,12 +177,8 @@ class _BaseLiteLLMClient:
185
177
  """
186
178
  try:
187
179
  formatted_messages = self._get_formatted_messages(messages)
188
- arguments = cast(
189
- Dict[str, Any], resolve_environment_variables(self._completion_fn_args)
190
- )
191
- response = await acompletion(
192
- messages=formatted_messages, **{**arguments, **kwargs}
193
- )
180
+ arguments = resolve_environment_variables(self._completion_fn_args)
181
+ response = await acompletion(messages=formatted_messages, **arguments)
194
182
  return self._format_response(response)
195
183
  except Exception as e:
196
184
  message = ""
@@ -258,32 +246,12 @@ class _BaseLiteLLMClient:
258
246
  else 0
259
247
  )
260
248
  formatted_response.usage = LLMUsage(prompt_tokens, completion_tokens)
261
-
262
- # Extract tool calls from all choices
263
- formatted_response.tool_calls = self._extract_tool_calls(response)
264
-
265
249
  structlogger.debug(
266
250
  "base_litellm_client.formatted_response",
267
251
  formatted_response=formatted_response.to_dict(),
268
252
  )
269
253
  return formatted_response
270
254
 
271
- def _extract_tool_calls(self, response: Any) -> List[LLMToolCall]:
272
- """Extract tool calls from response choices.
273
-
274
- Args:
275
- response: List of response choices from LiteLLM
276
-
277
- Returns:
278
- List of LLMToolCall objects, empty if no tool calls found
279
- """
280
- return [
281
- LLMToolCall.from_litellm(tool_call)
282
- for choice in response.choices
283
- if choice.message.tool_calls
284
- for tool_call in choice.message.tool_calls
285
- ]
286
-
287
255
  def _format_text_completion_response(self, response: Any) -> LLMResponse:
288
256
  """Parses the LiteLLM text completion response to Rasa format."""
289
257
  formatted_response = LLMResponse(
@@ -122,9 +122,7 @@ class LiteLLMRouterLLMClient(_BaseLiteLLMRouterClient, _BaseLiteLLMClient):
122
122
  raise ProviderClientAPIException(e)
123
123
 
124
124
  @suppress_logs(log_level=logging.WARNING)
125
- def completion(
126
- self, messages: Union[List[dict], List[str], str], **kwargs: Any
127
- ) -> LLMResponse:
125
+ def completion(self, messages: Union[List[dict], List[str], str]) -> LLMResponse:
128
126
  """
129
127
  Synchronously generate completions for given list of messages.
130
128
 
@@ -142,7 +140,6 @@ class LiteLLMRouterLLMClient(_BaseLiteLLMRouterClient, _BaseLiteLLMClient):
142
140
  - a list of messages. Each message is a string and will be formatted
143
141
  as a user message.
144
142
  - a single message as a string which will be formatted as user message.
145
- **kwargs: Additional parameters to pass to the completion call.
146
143
  Returns:
147
144
  List of message completions.
148
145
  Raises:
@@ -153,7 +150,7 @@ class LiteLLMRouterLLMClient(_BaseLiteLLMRouterClient, _BaseLiteLLMClient):
153
150
  try:
154
151
  formatted_messages = self._format_messages(messages)
155
152
  response = self.router_client.completion(
156
- messages=formatted_messages, **{**self._completion_fn_args, **kwargs}
153
+ messages=formatted_messages, **self._completion_fn_args
157
154
  )
158
155
  return self._format_response(response)
159
156
  except Exception as e:
@@ -161,7 +158,7 @@ class LiteLLMRouterLLMClient(_BaseLiteLLMRouterClient, _BaseLiteLLMClient):
161
158
 
162
159
  @suppress_logs(log_level=logging.WARNING)
163
160
  async def acompletion(
164
- self, messages: Union[List[dict], List[str], str], **kwargs: Any
161
+ self, messages: Union[List[dict], List[str], str]
165
162
  ) -> LLMResponse:
166
163
  """
167
164
  Asynchronously generate completions for given list of messages.
@@ -180,7 +177,6 @@ class LiteLLMRouterLLMClient(_BaseLiteLLMRouterClient, _BaseLiteLLMClient):
180
177
  - a list of messages. Each message is a string and will be formatted
181
178
  as a user message.
182
179
  - a single message as a string which will be formatted as user message.
183
- **kwargs: Additional parameters to pass to the completion call.
184
180
  Returns:
185
181
  List of message completions.
186
182
  Raises:
@@ -191,7 +187,7 @@ class LiteLLMRouterLLMClient(_BaseLiteLLMRouterClient, _BaseLiteLLMClient):
191
187
  try:
192
188
  formatted_messages = self._format_messages(messages)
193
189
  response = await self.router_client.acompletion(
194
- messages=formatted_messages, **{**self._completion_fn_args, **kwargs}
190
+ messages=formatted_messages, **self._completion_fn_args
195
191
  )
196
192
  return self._format_response(response)
197
193
  except Exception as e:
@@ -1,6 +1,6 @@
1
1
  from __future__ import annotations
2
2
 
3
- from typing import Any, Dict, List, Protocol, Union, runtime_checkable
3
+ from typing import Dict, List, Protocol, Union, runtime_checkable
4
4
 
5
5
  from rasa.shared.providers.llm.llm_response import LLMResponse
6
6
 
@@ -32,9 +32,7 @@ class LLMClient(Protocol):
32
32
  """
33
33
  ...
34
34
 
35
- def completion(
36
- self, messages: Union[List[dict], List[str], str], **kwargs: Any
37
- ) -> LLMResponse:
35
+ def completion(self, messages: Union[List[dict], List[str], str]) -> LLMResponse:
38
36
  """
39
37
  Synchronously generate completions for given list of messages.
40
38
 
@@ -50,14 +48,13 @@ class LLMClient(Protocol):
50
48
  - a list of messages. Each message is a string and will be formatted
51
49
  as a user message.
52
50
  - a single message as a string which will be formatted as user message.
53
- **kwargs: Additional parameters to pass to the completion call.
54
51
  Returns:
55
52
  LLMResponse
56
53
  """
57
54
  ...
58
55
 
59
56
  async def acompletion(
60
- self, messages: Union[List[dict], List[str], str], **kwargs: Any
57
+ self, messages: Union[List[dict], List[str], str]
61
58
  ) -> LLMResponse:
62
59
  """
63
60
  Asynchronously generate completions for given list of messages.
@@ -74,7 +71,6 @@ class LLMClient(Protocol):
74
71
  - a list of messages. Each message is a string and will be formatted
75
72
  as a user message.
76
73
  - a single message as a string which will be formatted as user message.
77
- **kwargs: Additional parameters to pass to the completion call.
78
74
  Returns:
79
75
  LLMResponse
80
76
  """
@@ -1,14 +1,9 @@
1
1
  import functools
2
- import json
3
2
  import time
4
3
  from dataclasses import asdict, dataclass, field
5
4
  from typing import Any, Awaitable, Callable, Dict, List, Optional, Text, Union
6
5
 
7
6
  import structlog
8
- from litellm.utils import ChatCompletionMessageToolCall
9
- from pydantic import BaseModel
10
-
11
- from rasa.shared.constants import KEY_TOOL_CALLS
12
7
 
13
8
  structlogger = structlog.get_logger()
14
9
 
@@ -43,37 +38,6 @@ class LLMUsage:
43
38
  return asdict(self)
44
39
 
45
40
 
46
- class LLMToolCall(BaseModel):
47
- """A class representing a response from an LLM tool call."""
48
-
49
- id: str
50
- """The ID of the tool call."""
51
-
52
- tool_name: str
53
- """The name of the tool that was called."""
54
-
55
- tool_args: Dict[str, Any]
56
- """The arguments passed to the tool call."""
57
-
58
- type: str = "function"
59
- """The type of the tool call."""
60
-
61
- @classmethod
62
- def from_dict(cls, data: Dict[Text, Any]) -> "LLMToolCall":
63
- """Creates an LLMToolResponse from a dictionary."""
64
- return cls(**data)
65
-
66
- @classmethod
67
- def from_litellm(cls, data: ChatCompletionMessageToolCall) -> "LLMToolCall":
68
- """Creates an LLMToolResponse from a dictionary."""
69
- return cls(
70
- id=data.id,
71
- tool_name=data.function.name,
72
- tool_args=json.loads(data.function.arguments),
73
- type=data.type,
74
- )
75
-
76
-
77
41
  @dataclass
78
42
  class LLMResponse:
79
43
  id: str
@@ -98,22 +62,12 @@ class LLMResponse:
98
62
  latency: Optional[float] = None
99
63
  """Optional field to store the latency of the LLM API call."""
100
64
 
101
- tool_calls: Optional[List[LLMToolCall]] = None
102
- """The list of tool calls the model generated for the input prompt."""
103
-
104
65
  @classmethod
105
66
  def from_dict(cls, data: Dict[Text, Any]) -> "LLMResponse":
106
67
  """Creates an LLMResponse from a dictionary."""
107
68
  usage_data = data.get("usage", {})
108
69
  usage_obj = LLMUsage.from_dict(usage_data) if usage_data else None
109
70
 
110
- tool_calls_data = data.get(KEY_TOOL_CALLS, [])
111
- tool_calls_obj = (
112
- [LLMToolCall.from_dict(tool) for tool in tool_calls_data]
113
- if tool_calls_data
114
- else None
115
- )
116
-
117
71
  return cls(
118
72
  id=data["id"],
119
73
  choices=data["choices"],
@@ -122,7 +76,6 @@ class LLMResponse:
122
76
  usage=usage_obj,
123
77
  additional_info=data.get("additional_info"),
124
78
  latency=data.get("latency"),
125
- tool_calls=tool_calls_obj,
126
79
  )
127
80
 
128
81
  @classmethod
@@ -139,8 +92,6 @@ class LLMResponse:
139
92
  result = asdict(self)
140
93
  if self.usage:
141
94
  result["usage"] = self.usage.to_dict()
142
- if self.tool_calls:
143
- result[KEY_TOOL_CALLS] = [tool.model_dump() for tool in self.tool_calls]
144
95
  return result
145
96
 
146
97
 
@@ -237,7 +237,7 @@ class SelfHostedLLMClient(_BaseLiteLLMClient):
237
237
  raise ProviderClientAPIException(e)
238
238
 
239
239
  async def acompletion(
240
- self, messages: Union[List[dict], List[str], str], **kwargs: Any
240
+ self, messages: Union[List[dict], List[str], str]
241
241
  ) -> LLMResponse:
242
242
  """Asynchronous completion of the model with the given messages.
243
243
 
@@ -255,18 +255,15 @@ class SelfHostedLLMClient(_BaseLiteLLMClient):
255
255
  - a list of messages. Each message is a string and will be formatted
256
256
  as a user message.
257
257
  - a single message as a string which will be formatted as user message.
258
- **kwargs: Additional parameters to pass to the completion call.
259
258
 
260
259
  Returns:
261
260
  The completion response.
262
261
  """
263
262
  if self._use_chat_completions_endpoint:
264
- return await super().acompletion(messages, **kwargs)
263
+ return await super().acompletion(messages)
265
264
  return await self._atext_completion(messages)
266
265
 
267
- def completion(
268
- self, messages: Union[List[dict], List[str], str], **kwargs: Any
269
- ) -> LLMResponse:
266
+ def completion(self, messages: Union[List[dict], List[str], str]) -> LLMResponse:
270
267
  """Completion of the model with the given messages.
271
268
 
272
269
  Method overrides the base class method to call the appropriate
@@ -276,13 +273,12 @@ class SelfHostedLLMClient(_BaseLiteLLMClient):
276
273
 
277
274
  Args:
278
275
  messages: The messages to be used for completion.
279
- **kwargs: Additional parameters to pass to the completion call.
280
276
 
281
277
  Returns:
282
278
  The completion response.
283
279
  """
284
280
  if self._use_chat_completions_endpoint:
285
- return super().completion(messages, **kwargs)
281
+ return super().completion(messages)
286
282
  return self._text_completion(messages)
287
283
 
288
284
  @staticmethod
rasa/shared/utils/llm.py CHANGED
@@ -49,15 +49,7 @@ from rasa.shared.constants import (
49
49
  RASA_PATTERN_INTERNAL_ERROR_USER_INPUT_TOO_LONG,
50
50
  ROUTER_CONFIG_KEY,
51
51
  )
52
- from rasa.shared.core.events import (
53
- AgentCancelled,
54
- AgentCompleted,
55
- AgentInterrupted,
56
- AgentResumed,
57
- AgentStarted,
58
- BotUttered,
59
- UserUttered,
60
- )
52
+ from rasa.shared.core.events import BotUttered, UserUttered
61
53
  from rasa.shared.core.slots import BooleanSlot, CategoricalSlot, Slot
62
54
  from rasa.shared.engine.caching import get_local_cache_location
63
55
  from rasa.shared.exceptions import (
@@ -120,7 +112,7 @@ DEPLOYMENT_CENTRIC_PROVIDERS = [AZURE_OPENAI_PROVIDER]
120
112
 
121
113
  # Placeholder messages used in the transcript for
122
114
  # instances where user input results in an error
123
- ERROR_PLACEHOLDER: Dict[str, str] = {
115
+ ERROR_PLACEHOLDER = {
124
116
  RASA_PATTERN_INTERNAL_ERROR_USER_INPUT_TOO_LONG: "[User sent really long message]",
125
117
  RASA_PATTERN_INTERNAL_ERROR_USER_INPUT_EMPTY: "",
126
118
  "default": "[User input triggered an error]",
@@ -233,7 +225,6 @@ def tracker_as_readable_transcript(
233
225
  ai_prefix: str = AI,
234
226
  max_turns: Optional[int] = 20,
235
227
  turns_wrapper: Optional[Callable[[List[str]], List[str]]] = None,
236
- highlight_agent_turns: bool = False,
237
228
  ) -> str:
238
229
  """Creates a readable dialogue from a tracker.
239
230
 
@@ -243,7 +234,6 @@ def tracker_as_readable_transcript(
243
234
  ai_prefix: the prefix to use for ai utterances
244
235
  max_turns: the maximum number of turns to include in the transcript
245
236
  turns_wrapper: optional function to wrap the turns in a custom way
246
- highlight_agent_turns: whether to highlight agent turns in the transcript
247
237
 
248
238
  Example:
249
239
  >>> tracker = Tracker(
@@ -261,9 +251,7 @@ def tracker_as_readable_transcript(
261
251
  Returns:
262
252
  A string representing the transcript of the tracker
263
253
  """
264
- transcript: List[str] = []
265
-
266
- current_ai_prefix = ai_prefix
254
+ transcript = []
267
255
 
268
256
  # using `applied_events` rather than `events` means that only events after the
269
257
  # most recent `Restart` or `SessionStarted` are included in the transcript
@@ -278,20 +266,9 @@ def tracker_as_readable_transcript(
278
266
  else:
279
267
  message = sanitize_message_for_prompt(event.text)
280
268
  transcript.append(f"{human_prefix}: {message}")
281
- elif isinstance(event, BotUttered):
282
- transcript.append(
283
- f"{current_ai_prefix}: {sanitize_message_for_prompt(event.text)}"
284
- )
285
269
 
286
- if highlight_agent_turns:
287
- if isinstance(event, AgentStarted) or isinstance(event, AgentResumed):
288
- current_ai_prefix = event.agent_id
289
- elif (
290
- isinstance(event, AgentCompleted)
291
- or isinstance(event, AgentCancelled)
292
- or isinstance(event, AgentInterrupted)
293
- ):
294
- current_ai_prefix = ai_prefix
270
+ elif isinstance(event, BotUttered):
271
+ transcript.append(f"{ai_prefix}: {sanitize_message_for_prompt(event.text)}")
295
272
 
296
273
  # turns_wrapper to count multiple utterances by bot/user as single turn
297
274
  if turns_wrapper:
@@ -160,43 +160,6 @@ FLOW_CANCELLED = {
160
160
  "step_id": {"type": "string"},
161
161
  }
162
162
  }
163
- AGENT_STARTED = {
164
- "properties": {
165
- "event": {"const": "agent_started"},
166
- "agent_id": {"type": "string"},
167
- "flow_id": {"type": "string"},
168
- }
169
- }
170
- AGENT_COMPLETED = {
171
- "properties": {
172
- "event": {"const": "agent_completed"},
173
- "agent_id": {"type": "string"},
174
- "flow_id": {"type": "string"},
175
- "status": {"type": "string"},
176
- }
177
- }
178
- AGENT_INTERRUPTED = {
179
- "properties": {
180
- "event": {"const": "agent_interrupted"},
181
- "agent_id": {"type": "string"},
182
- "flow_id": {"type": "string"},
183
- }
184
- }
185
- AGENT_RESUMED = {
186
- "properties": {
187
- "event": {"const": "agent_resumed"},
188
- "agent_id": {"type": "string"},
189
- "flow_id": {"type": "string"},
190
- }
191
- }
192
- AGENT_CANCELLED = {
193
- "properties": {
194
- "event": {"const": "agent_cancelled"},
195
- "agent_id": {"type": "string"},
196
- "flow_id": {"type": "string"},
197
- "reason": {"type": "string"},
198
- }
199
- }
200
163
  DIALOGUE_STACK_UPDATED = {
201
164
  "properties": {"event": {"const": "stack"}, "update": {"type": "string"}}
202
165
  }
@@ -241,11 +204,6 @@ EVENT_SCHEMA = {
241
204
  FLOW_RESUMED,
242
205
  FLOW_COMPLETED,
243
206
  FLOW_CANCELLED,
244
- AGENT_STARTED,
245
- AGENT_COMPLETED,
246
- AGENT_INTERRUPTED,
247
- AGENT_RESUMED,
248
- AGENT_CANCELLED,
249
207
  DIALOGUE_STACK_UPDATED,
250
208
  ROUTING_SESSION_ENDED,
251
209
  SESSION_ENDED,
rasa/studio/upload.py CHANGED
@@ -115,9 +115,10 @@ def run_validation(args: argparse.Namespace) -> None:
115
115
  """
116
116
  from rasa.validator import Validator
117
117
 
118
+ training_data_paths = args.data if isinstance(args.data, list) else [args.data]
118
119
  training_data_importer = TrainingDataImporter.load_from_dict(
119
120
  domain_path=args.domain,
120
- training_data_paths=[args.data],
121
+ training_data_paths=training_data_paths,
121
122
  config_path=args.config,
122
123
  expand_env_vars=False,
123
124
  )
@@ -263,8 +264,9 @@ def build_calm_import_parts(
263
264
  domain_from_files = importer.get_user_domain().as_dict()
264
265
  domain = extract_values(domain_from_files, DOMAIN_KEYS)
265
266
 
267
+ training_data_paths = data_path if isinstance(data_path, list) else [str(data_path)]
266
268
  flow_importer = FlowSyncImporter.load_from_dict(
267
- training_data_paths=[str(data_path)], expand_env_vars=False
269
+ training_data_paths=training_data_paths, expand_env_vars=False
268
270
  )
269
271
 
270
272
  flows = list(flow_importer.get_user_flows())
@@ -272,7 +274,7 @@ def build_calm_import_parts(
272
274
  flows = read_yaml(flows_yaml, expand_env_vars=False)
273
275
 
274
276
  nlu_importer = TrainingDataImporter.load_from_dict(
275
- training_data_paths=[str(data_path)], expand_env_vars=False
277
+ training_data_paths=training_data_paths, expand_env_vars=False
276
278
  )
277
279
  nlu_data = nlu_importer.get_nlu_data()
278
280
  nlu_examples = nlu_data.filter_training_examples(
@@ -349,9 +351,10 @@ def upload_nlu_assistant(
349
351
  "rasa.studio.upload.nlu_data_read",
350
352
  event_info="Found DM1 assistant data, parsing...",
351
353
  )
354
+ training_data_paths = args.data if isinstance(args.data, list) else [args.data]
352
355
  importer = TrainingDataImporter.load_from_dict(
353
356
  domain_path=args.domain,
354
- training_data_paths=[args.data],
357
+ training_data_paths=training_data_paths,
355
358
  config_path=args.config,
356
359
  expand_env_vars=False,
357
360
  )
@@ -1088,15 +1088,13 @@ def _instrument_advance_flows_until_next_action(
1088
1088
  ) -> None:
1089
1089
  def tracing_advance_flows_until_next_action_wrapper(fn: Callable) -> Callable:
1090
1090
  @functools.wraps(fn)
1091
- async def wrapper(
1091
+ def wrapper(
1092
1092
  tracker: DialogueStateTracker,
1093
1093
  available_actions: List[str],
1094
1094
  flows: FlowsList,
1095
1095
  ) -> FlowActionPrediction:
1096
1096
  with tracer.start_as_current_span(f"{module_name}.{fn.__name__}") as span:
1097
- prediction: FlowActionPrediction = await fn(
1098
- tracker, available_actions, flows
1099
- )
1097
+ prediction: FlowActionPrediction = fn(tracker, available_actions, flows)
1100
1098
 
1101
1099
  span.set_attributes(
1102
1100
  {
rasa/utils/common.py CHANGED
@@ -34,9 +34,7 @@ from rasa.constants import (
34
34
  ENV_LOG_LEVEL_KAFKA,
35
35
  ENV_LOG_LEVEL_LIBRARIES,
36
36
  ENV_LOG_LEVEL_MATPLOTLIB,
37
- ENV_LOG_LEVEL_MCP,
38
37
  ENV_LOG_LEVEL_RABBITMQ,
39
- ENV_MCP_LOGGING_ENABLED,
40
38
  )
41
39
  from rasa.shared.constants import DEFAULT_LOG_LEVEL, ENV_LOG_LEVEL, TCP_PROTOCOL
42
40
  from rasa.shared.exceptions import RasaException
@@ -283,7 +281,6 @@ def configure_library_logging() -> None:
283
281
  update_kafka_log_level(library_log_level)
284
282
  update_rabbitmq_log_level(library_log_level)
285
283
  update_websockets_log_level(library_log_level)
286
- update_mcp_log_level()
287
284
 
288
285
 
289
286
  def update_apscheduler_log_level() -> None:
@@ -418,56 +415,6 @@ def update_websockets_log_level(library_log_level: Text) -> None:
418
415
  logging.getLogger("websockets").propagate = False
419
416
 
420
417
 
421
- def update_mcp_log_level() -> None:
422
- """Set the log level for MCP-related loggers.
423
-
424
- This function configures logging levels for MCP (Model Context Protocol) related
425
- loggers to reduce noise from HTTP and MCP client libraries.
426
-
427
- Environment Variables:
428
- LOG_LEVEL_MCP: Set the log level for MCP-related loggers.
429
- Valid values: DEBUG, INFO, WARNING, ERROR, CRITICAL
430
- Default: ERROR
431
-
432
- MCP_LOGGING_ENABLED: Enable or disable MCP logging completely.
433
- Valid values: true, false
434
- Default: true
435
-
436
- Examples:
437
- # Show only ERROR and above for MCP logs
438
- export LOG_LEVEL_MCP=ERROR
439
-
440
- # Show DEBUG level MCP logs (very verbose)
441
- export LOG_LEVEL_MCP=DEBUG
442
-
443
- # Completely disable MCP logging
444
- export MCP_LOGGING_ENABLED=false
445
- """
446
- # Check if MCP logging is completely disabled
447
- mcp_logging_enabled = (
448
- os.environ.get(ENV_MCP_LOGGING_ENABLED, "true").lower() == "true"
449
- )
450
-
451
- # Default to ERROR level for MCP logs to reduce noise
452
- mcp_log_level: Union[int, str] = os.environ.get(ENV_LOG_LEVEL_MCP, "ERROR")
453
- if not mcp_logging_enabled:
454
- # Completely disable MCP logging
455
- mcp_log_level = logging.CRITICAL + 1 # Higher than CRITICAL to disable all logs
456
-
457
- # MCP client and HTTP-related loggers that are commonly noisy
458
- mcp_loggers = [
459
- "mcp.client.streamable_http",
460
- "mcp.client",
461
- "httpcore.connection",
462
- "httpcore.http11",
463
- "httpx",
464
- ]
465
-
466
- for logger_name in mcp_loggers:
467
- logging.getLogger(logger_name).setLevel(mcp_log_level)
468
- logging.getLogger(logger_name).propagate = False
469
-
470
-
471
418
  def sort_list_of_dicts_by_first_key(dicts: List[Dict]) -> List[Dict]:
472
419
  """Sorts a list of dictionaries by their first key."""
473
420
  return sorted(dicts, key=lambda d: next(iter(d.keys())))
rasa/version.py CHANGED
@@ -1,3 +1,3 @@
1
1
  # this file will automatically be changed,
2
2
  # do not add anything but the version number here!
3
- __version__ = "3.14.0.dev1"
3
+ __version__ = "3.14.0.dev20250818"
@@ -1,13 +1,13 @@
1
1
  Metadata-Version: 2.3
2
2
  Name: rasa-pro
3
- Version: 3.14.0.dev1
3
+ Version: 3.14.0.dev20250818
4
4
  Summary: State-of-the-art open-core Conversational AI framework for Enterprises that natively leverages generative AI for effortless assistant development.
5
5
  Keywords: nlp,machine-learning,machine-learning-library,bot,bots,botkit,rasa conversational-agents,conversational-ai,chatbot,chatbot-framework,bot-framework
6
6
  Author: Rasa Technologies GmbH
7
7
  Author-email: hi@rasa.com
8
8
  Maintainer: Tom Bocklisch
9
9
  Maintainer-email: tom@rasa.com
10
- Requires-Python: >=3.10,<3.12
10
+ Requires-Python: >=3.9.2,<3.12
11
11
  Classifier: Development Status :: 5 - Production/Stable
12
12
  Classifier: Intended Audience :: Developers
13
13
  Classifier: Programming Language :: Python :: 3
@@ -67,7 +67,6 @@ Requires-Dist: langcodes (>=3.5.0,<4.0.0)
67
67
  Requires-Dist: litellm (>=1.69.0,<1.70.0)
68
68
  Requires-Dist: matplotlib (>=3.9.4,<3.10.0)
69
69
  Requires-Dist: mattermostwrapper (>=2.2,<2.3)
70
- Requires-Dist: mcp (>=1.12.0,<1.13.0)
71
70
  Requires-Dist: networkx (>=3.1,<3.2)
72
71
  Requires-Dist: numpy (>=1.26.4,<1.27.0)
73
72
  Requires-Dist: onnxruntime (==1.19.2) ; extra == "pii"