rasa-pro 3.12.17__py3-none-any.whl → 3.12.18.dev1__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of rasa-pro might be problematic. Click here for more details.

rasa/__init__.py CHANGED
@@ -5,5 +5,11 @@ from rasa import version
5
5
  # define the version before the other imports since these need it
6
6
  __version__ = version.__version__
7
7
 
8
+ from litellm.integrations.langfuse.langfuse import LangFuseLogger
9
+
10
+ from rasa.monkey_patches import litellm_langfuse_logger_init_fixed
11
+
12
+ # Monkey-patch the init method as early as possible before the class is used
13
+ LangFuseLogger.__init__ = litellm_langfuse_logger_init_fixed # type: ignore
8
14
 
9
15
  logging.getLogger(__name__).addHandler(logging.NullHandler())
@@ -721,7 +721,9 @@ class IntentlessPolicy(LLMHealthCheckMixin, EmbeddingsHealthCheckMixin, Policy):
721
721
  final_response_examples.append(resp)
722
722
 
723
723
  llm_response = await self.generate_answer(
724
- final_response_examples, conversation_samples, history
724
+ final_response_examples,
725
+ conversation_samples,
726
+ history,
725
727
  )
726
728
  if not llm_response:
727
729
  structlogger.debug("intentless_policy.prediction.skip_llm_fail")
@@ -171,7 +171,6 @@ class LLMBasedRouter(LLMHealthCheckMixin, GraphComponent):
171
171
  **kwargs: Any,
172
172
  ) -> "LLMBasedRouter":
173
173
  """Loads trained component (see parent class for full docstring)."""
174
-
175
174
  # Perform health check on the resolved LLM client config
176
175
  llm_config = resolve_model_client_config(config.get(LLM_CONFIG_KEY, {}))
177
176
  cls.perform_llm_health_check(
@@ -1,6 +1,8 @@
1
1
  from abc import ABC, abstractmethod
2
+ from asyncio import Lock
2
3
  from functools import lru_cache
3
4
  from typing import Any, Dict, List, Optional, Set, Text, Tuple, Union
5
+ from uuid import UUID, uuid4
4
6
 
5
7
  import structlog
6
8
  from jinja2 import Environment, Template, select_autoescape
@@ -89,6 +91,9 @@ class LLMBasedCommandGenerator(
89
91
  else:
90
92
  self.flow_retrieval = None
91
93
 
94
+ self.sender_id_to_session_id_mapping: Dict[str, UUID] = {}
95
+ self._lock = Lock()
96
+
92
97
  ### Abstract methods
93
98
  @staticmethod
94
99
  @abstractmethod
@@ -225,8 +230,7 @@ class LLMBasedCommandGenerator(
225
230
 
226
231
  @lru_cache
227
232
  def compile_template(self, template: str) -> Template:
228
- """
229
- Compile the prompt template and register custom filters.
233
+ """Compile the prompt template and register custom filters.
230
234
  Compiling the template is an expensive operation,
231
235
  so we cache the result.
232
236
  """
@@ -328,7 +332,9 @@ class LLMBasedCommandGenerator(
328
332
 
329
333
  @measure_llm_latency
330
334
  async def invoke_llm(
331
- self, prompt: Union[List[dict], List[str], str]
335
+ self,
336
+ prompt: Union[List[dict], List[str], str],
337
+ metadata: Optional[Dict[str, Any]] = None,
332
338
  ) -> Optional[LLMResponse]:
333
339
  """Use LLM to generate a response.
334
340
 
@@ -341,6 +347,7 @@ class LLMBasedCommandGenerator(
341
347
  - a list of messages. Each message is a string and will be formatted
342
348
  as a user message.
343
349
  - a single message as a string which will be formatted as user message.
350
+ metadata: Optional metadata to be passed to the LLM call.
344
351
 
345
352
  Returns:
346
353
  An LLMResponse object.
@@ -352,7 +359,7 @@ class LLMBasedCommandGenerator(
352
359
  self.config.get(LLM_CONFIG_KEY), self.get_default_llm_config()
353
360
  )
354
361
  try:
355
- return await llm.acompletion(prompt)
362
+ return await llm.acompletion(prompt, metadata)
356
363
  except Exception as e:
357
364
  # unfortunately, langchain does not wrap LLM exceptions which means
358
365
  # we have to catch all exceptions here
@@ -655,3 +662,7 @@ class LLMBasedCommandGenerator(
655
662
  def get_default_llm_config() -> Dict[str, Any]:
656
663
  """Get the default LLM config for the command generator."""
657
664
  return DEFAULT_LLM_CONFIG
665
+
666
+ async def _get_or_create_session_id(self, sender_id: str) -> UUID:
667
+ async with self._lock:
668
+ return self.sender_id_to_session_id_mapping.setdefault(sender_id, uuid4())
@@ -55,7 +55,9 @@ class LLMCommandGenerator(SingleStepLLMCommandGenerator):
55
55
  )
56
56
 
57
57
  async def invoke_llm(
58
- self, prompt: Union[List[dict], List[str], str]
58
+ self,
59
+ prompt: Union[List[dict], List[str], str],
60
+ metadata: Optional[Dict[str, Any]] = None,
59
61
  ) -> Optional[LLMResponse]:
60
62
  try:
61
63
  return await super().invoke_llm(prompt)
@@ -42,6 +42,9 @@ from rasa.engine.storage.resource import Resource
42
42
  from rasa.engine.storage.storage import ModelStorage
43
43
  from rasa.shared.constants import (
44
44
  EMBEDDINGS_CONFIG_KEY,
45
+ LANGFUSE_CUSTOM_METADATA_DICT,
46
+ LANGFUSE_METADATA_SESSION_ID,
47
+ LANGFUSE_TAGS,
45
48
  RASA_PATTERN_CANNOT_HANDLE_NOT_SUPPORTED,
46
49
  ROUTE_TO_CALM_SLOT,
47
50
  )
@@ -107,7 +110,7 @@ structlogger = structlog.get_logger()
107
110
  )
108
111
  @deprecated(
109
112
  reason=(
110
- "The MultiStepLLMCommandGenerator is deprecated and will be removed in "
113
+ "The MultiStepLLMCommandGenerator is deprecated and will be removed in "
111
114
  "Rasa `4.0.0`."
112
115
  )
113
116
  )
@@ -492,7 +495,20 @@ class MultiStepLLMCommandGenerator(LLMBasedCommandGenerator):
492
495
  prompt=prompt,
493
496
  )
494
497
 
495
- response = await self.invoke_llm(prompt)
498
+ if tracker:
499
+ session_id = str(await self._get_or_create_session_id(tracker.sender_id))
500
+ else:
501
+ session_id = "unknown"
502
+ metadata = {
503
+ LANGFUSE_METADATA_SESSION_ID: session_id,
504
+ LANGFUSE_CUSTOM_METADATA_DICT: {
505
+ "component": self.__class__.__name__,
506
+ "function": "_predict_commands_for_active_flow",
507
+ },
508
+ LANGFUSE_TAGS: [self.__class__.__name__],
509
+ }
510
+
511
+ response = await self.invoke_llm(prompt, metadata)
496
512
  llm_response = LLMResponse.ensure_llm_response(response)
497
513
  actions = None
498
514
  if llm_response and llm_response.choices:
@@ -546,8 +562,20 @@ class MultiStepLLMCommandGenerator(LLMBasedCommandGenerator):
546
562
  ".prompt_rendered",
547
563
  prompt=prompt,
548
564
  )
565
+ if tracker:
566
+ session_id = str(await self._get_or_create_session_id(tracker.sender_id))
567
+ else:
568
+ session_id = "unknown"
569
+ metadata = {
570
+ LANGFUSE_METADATA_SESSION_ID: session_id,
571
+ LANGFUSE_CUSTOM_METADATA_DICT: {
572
+ "component": self.__class__.__name__,
573
+ "function": "_predict_commands_for_handling_flows",
574
+ },
575
+ LANGFUSE_TAGS: [self.__class__.__name__],
576
+ }
549
577
 
550
- response = await self.invoke_llm(prompt)
578
+ response = await self.invoke_llm(prompt, metadata)
551
579
  llm_response = LLMResponse.ensure_llm_response(response)
552
580
  actions = None
553
581
  if llm_response and llm_response.choices:
@@ -636,8 +664,20 @@ class MultiStepLLMCommandGenerator(LLMBasedCommandGenerator):
636
664
  flow=newly_started_flow.id,
637
665
  prompt=prompt,
638
666
  )
667
+ if tracker:
668
+ session_id = str(await self._get_or_create_session_id(tracker.sender_id))
669
+ else:
670
+ session_id = "unknown"
671
+ metadata = {
672
+ LANGFUSE_METADATA_SESSION_ID: session_id,
673
+ LANGFUSE_CUSTOM_METADATA_DICT: {
674
+ "component": self.__class__.__name__,
675
+ "function": "_predict_commands_for_newly_started_flow",
676
+ },
677
+ LANGFUSE_TAGS: [self.__class__.__name__],
678
+ }
639
679
 
640
- response = await self.invoke_llm(prompt)
680
+ response = await self.invoke_llm(prompt, metadata)
641
681
  llm_response = LLMResponse.ensure_llm_response(response)
642
682
  actions = None
643
683
  if llm_response and llm_response.choices:
@@ -47,6 +47,9 @@ from rasa.shared.constants import (
47
47
  AWS_BEDROCK_PROVIDER,
48
48
  AZURE_OPENAI_PROVIDER,
49
49
  EMBEDDINGS_CONFIG_KEY,
50
+ LANGFUSE_CUSTOM_METADATA_DICT,
51
+ LANGFUSE_METADATA_SESSION_ID,
52
+ LANGFUSE_TAGS,
50
53
  MAX_COMPLETION_TOKENS_CONFIG_KEY,
51
54
  PROMPT_TEMPLATE_CONFIG_KEY,
52
55
  ROUTE_TO_CALM_SLOT,
@@ -366,7 +369,17 @@ class CompactLLMCommandGenerator(LLMBasedCommandGenerator):
366
369
  prompt=flow_prompt,
367
370
  )
368
371
 
369
- response = await self.invoke_llm(flow_prompt)
372
+ if tracker:
373
+ session_id = str(await self._get_or_create_session_id(tracker.sender_id))
374
+ else:
375
+ session_id = "unknown"
376
+ metadata = {
377
+ LANGFUSE_METADATA_SESSION_ID: session_id,
378
+ LANGFUSE_CUSTOM_METADATA_DICT: {"component": self.__class__.__name__},
379
+ LANGFUSE_TAGS: [self.__class__.__name__],
380
+ }
381
+
382
+ response = await self.invoke_llm(flow_prompt, metadata)
370
383
  llm_response = LLMResponse.ensure_llm_response(response)
371
384
  # The check for 'None' maintains compatibility with older versions
372
385
  # of LLMCommandGenerator. In previous implementations, 'invoke_llm'
rasa/hooks.py CHANGED
@@ -1,8 +1,20 @@
1
1
  import argparse
2
2
  import logging
3
+ import os
3
4
  from typing import TYPE_CHECKING, List, Optional, Text, Union
4
5
 
6
+ import litellm
5
7
  import pluggy
8
+ import structlog
9
+
10
+ from rasa.shared.providers.constants import (
11
+ LANGFUSE_CALLBACK_NAME,
12
+ LANGFUSE_HOST_ENV_VAR,
13
+ LANGFUSE_PROJECT_ID_ENV_VAR,
14
+ LANGFUSE_PUBLIC_KEY_ENV_VAR,
15
+ LANGFUSE_SECRET_KEY_ENV_VAR,
16
+ RASA_LANGFUSE_INTEGRATION_ENABLED_ENV_VAR,
17
+ )
6
18
 
7
19
  # IMPORTANT: do not import anything from rasa here - use scoped imports
8
20
  # this avoids circular imports, as the hooks are used in different places
@@ -18,6 +30,7 @@ if TYPE_CHECKING:
18
30
 
19
31
  hookimpl = pluggy.HookimplMarker("rasa")
20
32
  logger = logging.getLogger(__name__)
33
+ structlogger = structlog.get_logger()
21
34
 
22
35
 
23
36
  @hookimpl # type: ignore[misc]
@@ -57,6 +70,8 @@ def configure_commandline(cmdline_arguments: argparse.Namespace) -> Optional[Tex
57
70
  config.configure_tracing(tracer_provider)
58
71
  config.configure_metrics(endpoints_file)
59
72
 
73
+ _init_langfuse_integration()
74
+
60
75
  return endpoints_file
61
76
 
62
77
 
@@ -115,3 +130,43 @@ def after_server_stop() -> None:
115
130
 
116
131
  if anon_pipeline is not None:
117
132
  anon_pipeline.stop()
133
+
134
+
135
+ def _is_langfuse_integration_enabled() -> bool:
136
+ return (
137
+ os.environ.get(RASA_LANGFUSE_INTEGRATION_ENABLED_ENV_VAR, "false").lower()
138
+ == "true"
139
+ )
140
+
141
+
142
+ def _init_langfuse_integration() -> None:
143
+ if not _is_langfuse_integration_enabled():
144
+ structlogger.info(
145
+ "hooks._init_langfuse_integration.disabled",
146
+ event_info="Langfuse integration is disabled.",
147
+ )
148
+ return
149
+
150
+ if (
151
+ not os.environ.get(LANGFUSE_HOST_ENV_VAR)
152
+ or not os.environ.get(LANGFUSE_PROJECT_ID_ENV_VAR)
153
+ or not os.environ.get(LANGFUSE_PUBLIC_KEY_ENV_VAR)
154
+ or not os.environ.get(LANGFUSE_SECRET_KEY_ENV_VAR)
155
+ ):
156
+ structlogger.warning(
157
+ "hooks._init_langfuse_integration.missing_langfuse_keys",
158
+ event_info=(
159
+ "Langfuse integration is enabled, but some environment variables "
160
+ "are missing. Please set LANGFUSE_HOST, LANGFUSE_PROJECT_ID, "
161
+ "LANGFUSE_PUBLIC_KEY and LANGFUSE_SECRET_KEY environment "
162
+ "variables to use Langfuse integration."
163
+ ),
164
+ )
165
+ return
166
+
167
+ litellm.success_callback = [LANGFUSE_CALLBACK_NAME]
168
+ litellm.failure_callback = [LANGFUSE_CALLBACK_NAME]
169
+ structlogger.info(
170
+ "hooks.langfuse_callbacks_initialized",
171
+ event_info="Langfuse integration initialized.",
172
+ )
@@ -1,6 +1,6 @@
1
1
  from contextlib import contextmanager
2
2
  from datetime import datetime
3
- from typing import Callable, Generator, List, Union
3
+ from typing import Any, Callable, Dict, Generator, List, Optional, Union
4
4
 
5
5
  import structlog
6
6
 
@@ -24,7 +24,9 @@ def make_mock_invoke_llm(commands: str) -> Callable:
24
24
  """
25
25
 
26
26
  async def _mock_invoke_llm(
27
- self: LLMBasedCommandGenerator, prompt: Union[List[dict], List[str], str]
27
+ self: LLMBasedCommandGenerator,
28
+ prompt: Union[List[dict], List[str], str],
29
+ metadata: Optional[Dict[str, Any]] = None,
28
30
  ) -> LLMResponse:
29
31
  structlogger.debug(
30
32
  f"LLM call intercepted, response mocked. "
rasa/monkey_patches.py ADDED
@@ -0,0 +1,91 @@
1
+ import os
2
+ import traceback
3
+ from typing import Any, Optional
4
+
5
+ from litellm.secret_managers.main import str_to_bool
6
+ from packaging.version import Version
7
+
8
+
9
+ def litellm_langfuse_logger_init_fixed(
10
+ self: Any, # we should not import LangfuseLogger class before we patch it
11
+ langfuse_public_key: Optional[str] = None,
12
+ langfuse_secret: Optional[str] = None,
13
+ langfuse_host: str = "https://cloud.langfuse.com",
14
+ flush_interval: int = 1,
15
+ ) -> None:
16
+ """Monkeypatched version of LangfuseLogger.__init__ from the LiteLLM library.
17
+
18
+ This patched version removes a call that fetched the `project_id` from
19
+ Langfuse Cloud even when it was already set via environment variables.
20
+ In the original implementation, this call was made *before* initializing
21
+ the LangfuseClient, which caused the application to freeze for up to 60 seconds.
22
+
23
+ By removing this premature call, the monkeypatch avoids the unnecessary network
24
+ request and prevents the timeout/freeze issue.
25
+
26
+ This workaround can be removed once the underlying bug is resolved in LiteLLM:
27
+ https://github.com/BerriAI/litellm/issues/7732
28
+ """
29
+ try:
30
+ import langfuse
31
+ from langfuse import Langfuse
32
+ except Exception as e:
33
+ raise Exception(
34
+ f"\033[91mLangfuse not installed, try running 'pip install langfuse' "
35
+ f"to fix this error: {e}\n{traceback.format_exc()}\033[0m"
36
+ )
37
+ # Instance variables
38
+ self.secret_key = langfuse_secret or os.getenv("LANGFUSE_SECRET_KEY", "")
39
+ self.public_key = langfuse_public_key or os.getenv("LANGFUSE_PUBLIC_KEY", "")
40
+
41
+ self.langfuse_host = langfuse_host or os.getenv(
42
+ "LANGFUSE_HOST", "https://cloud.langfuse.com"
43
+ )
44
+ self.langfuse_host.replace("http://", "https://")
45
+ if not self.langfuse_host.startswith("https://"):
46
+ self.langfuse_host = "https://" + self.langfuse_host
47
+
48
+ self.langfuse_release = os.getenv("LANGFUSE_RELEASE")
49
+ self.langfuse_debug = os.getenv("LANGFUSE_DEBUG")
50
+ self.langfuse_flush_interval = (
51
+ os.getenv("LANGFUSE_FLUSH_INTERVAL") or flush_interval
52
+ )
53
+
54
+ parameters = {
55
+ "public_key": self.public_key,
56
+ "secret_key": self.secret_key,
57
+ "host": self.langfuse_host,
58
+ "release": self.langfuse_release,
59
+ "debug": self.langfuse_debug,
60
+ "flush_interval": self.langfuse_flush_interval, # flush interval in seconds
61
+ }
62
+
63
+ if Version(langfuse.version.__version__) >= Version("2.6.0"):
64
+ parameters["sdk_integration"] = "litellm"
65
+
66
+ self.Langfuse = Langfuse(**parameters)
67
+
68
+ if os.getenv("UPSTREAM_LANGFUSE_SECRET_KEY") is not None:
69
+ upstream_langfuse_debug = (
70
+ str_to_bool(self.upstream_langfuse_debug)
71
+ if self.upstream_langfuse_debug is not None
72
+ else None
73
+ )
74
+ self.upstream_langfuse_secret_key = os.getenv("UPSTREAM_LANGFUSE_SECRET_KEY")
75
+ self.upstream_langfuse_public_key = os.getenv("UPSTREAM_LANGFUSE_PUBLIC_KEY")
76
+ self.upstream_langfuse_host = os.getenv("UPSTREAM_LANGFUSE_HOST")
77
+ self.upstream_langfuse_release = os.getenv("UPSTREAM_LANGFUSE_RELEASE")
78
+ self.upstream_langfuse_debug = os.getenv("UPSTREAM_LANGFUSE_DEBUG")
79
+ self.upstream_langfuse = Langfuse(
80
+ public_key=self.upstream_langfuse_public_key,
81
+ secret_key=self.upstream_langfuse_secret_key,
82
+ host=self.upstream_langfuse_host,
83
+ release=self.upstream_langfuse_release,
84
+ debug=(
85
+ upstream_langfuse_debug
86
+ if upstream_langfuse_debug is not None
87
+ else False
88
+ ),
89
+ )
90
+ else:
91
+ self.upstream_langfuse = None
rasa/shared/constants.py CHANGED
@@ -342,3 +342,8 @@ ROLE_SYSTEM = "system"
342
342
  # Used for key values in ValidateSlotPatternFlowStackFrame
343
343
  REFILL_UTTER = "refill_utter"
344
344
  REJECTIONS = "rejections"
345
+
346
+ LANGFUSE_METADATA_USER_ID = "trace_user_id"
347
+ LANGFUSE_METADATA_SESSION_ID = "session_id"
348
+ LANGFUSE_CUSTOM_METADATA_DICT = "trace_metadata"
349
+ LANGFUSE_TAGS = "tags"
@@ -4,3 +4,12 @@ LITE_LLM_API_KEY_FIELD = "api_key"
4
4
  LITE_LLM_API_VERSION_FIELD = "api_version"
5
5
  LITE_LLM_MODEL_FIELD = "model"
6
6
  LITE_LLM_AZURE_AD_TOKEN = "azure_ad_token"
7
+
8
+ # Enable or disable Langfuse integration
9
+ RASA_LANGFUSE_INTEGRATION_ENABLED_ENV_VAR = "RASA_LANGFUSE_INTEGRATION_ENABLED"
10
+ # Langfuse configuration
11
+ LANGFUSE_CALLBACK_NAME = "langfuse"
12
+ LANGFUSE_HOST_ENV_VAR = "LANGFUSE_HOST"
13
+ LANGFUSE_PROJECT_ID_ENV_VAR = "LANGFUSE_PROJECT_ID"
14
+ LANGFUSE_PUBLIC_KEY_ENV_VAR = "LANGFUSE_PUBLIC_KEY"
15
+ LANGFUSE_SECRET_KEY_ENV_VAR = "LANGFUSE_SECRET_KEY"
@@ -2,7 +2,7 @@ from __future__ import annotations
2
2
 
3
3
  import logging
4
4
  from abc import abstractmethod
5
- from typing import Any, Dict, List, Union, cast
5
+ from typing import Any, Dict, List, Optional, Union, cast
6
6
 
7
7
  import structlog
8
8
  from litellm import acompletion, completion, validate_environment
@@ -126,7 +126,11 @@ class _BaseLiteLLMClient:
126
126
  raise ProviderClientValidationError(event_info)
127
127
 
128
128
  @suppress_logs(log_level=logging.WARNING)
129
- def completion(self, messages: Union[List[dict], List[str], str]) -> LLMResponse:
129
+ def completion(
130
+ self,
131
+ messages: Union[List[dict], List[str], str],
132
+ metadata: Optional[Dict[str, Any]] = None,
133
+ ) -> LLMResponse:
130
134
  """Synchronously generate completions for given list of messages.
131
135
 
132
136
  Args:
@@ -138,6 +142,7 @@ class _BaseLiteLLMClient:
138
142
  - a list of messages. Each message is a string and will be formatted
139
143
  as a user message.
140
144
  - a single message as a string which will be formatted as user message.
145
+ metadata: Optional metadata to be passed to the LLM call.
141
146
 
142
147
  Returns:
143
148
  List of message completions.
@@ -155,7 +160,9 @@ class _BaseLiteLLMClient:
155
160
 
156
161
  @suppress_logs(log_level=logging.WARNING)
157
162
  async def acompletion(
158
- self, messages: Union[List[dict], List[str], str]
163
+ self,
164
+ messages: Union[List[dict], List[str], str],
165
+ metadata: Optional[Dict[str, Any]] = None,
159
166
  ) -> LLMResponse:
160
167
  """Asynchronously generate completions for given list of messages.
161
168
 
@@ -168,6 +175,7 @@ class _BaseLiteLLMClient:
168
175
  - a list of messages. Each message is a string and will be formatted
169
176
  as a user message.
170
177
  - a single message as a string which will be formatted as user message.
178
+ metadata: Optional metadata to be passed to the LLM call.
171
179
 
172
180
  Returns:
173
181
  List of message completions.
@@ -178,7 +186,9 @@ class _BaseLiteLLMClient:
178
186
  try:
179
187
  formatted_messages = self._get_formatted_messages(messages)
180
188
  arguments = resolve_environment_variables(self._completion_fn_args)
181
- response = await acompletion(messages=formatted_messages, **arguments)
189
+ response = await acompletion(
190
+ messages=formatted_messages, metadata=metadata, **arguments
191
+ )
182
192
  return self._format_response(response)
183
193
  except Exception as e:
184
194
  message = ""
@@ -1,7 +1,7 @@
1
1
  from __future__ import annotations
2
2
 
3
3
  import logging
4
- from typing import Any, Dict, List, Union
4
+ from typing import Any, Dict, List, Optional, Union
5
5
 
6
6
  import structlog
7
7
 
@@ -122,9 +122,12 @@ class LiteLLMRouterLLMClient(_BaseLiteLLMRouterClient, _BaseLiteLLMClient):
122
122
  raise ProviderClientAPIException(e)
123
123
 
124
124
  @suppress_logs(log_level=logging.WARNING)
125
- def completion(self, messages: Union[List[dict], List[str], str]) -> LLMResponse:
126
- """
127
- Synchronously generate completions for given list of messages.
125
+ def completion(
126
+ self,
127
+ messages: Union[List[dict], List[str], str],
128
+ metadata: Optional[Dict[str, Any]] = None,
129
+ ) -> LLMResponse:
130
+ """Synchronously generate completions for given list of messages.
128
131
 
129
132
  Method overrides the base class method to call the appropriate
130
133
  completion method based on the configuration. If the chat completions
@@ -140,8 +143,11 @@ class LiteLLMRouterLLMClient(_BaseLiteLLMRouterClient, _BaseLiteLLMClient):
140
143
  - a list of messages. Each message is a string and will be formatted
141
144
  as a user message.
142
145
  - a single message as a string which will be formatted as user message.
146
+ metadata: Optional metadata to be passed to the LLM call.
147
+
143
148
  Returns:
144
149
  List of message completions.
150
+
145
151
  Raises:
146
152
  ProviderClientAPIException: If the API request fails.
147
153
  """
@@ -158,10 +164,11 @@ class LiteLLMRouterLLMClient(_BaseLiteLLMRouterClient, _BaseLiteLLMClient):
158
164
 
159
165
  @suppress_logs(log_level=logging.WARNING)
160
166
  async def acompletion(
161
- self, messages: Union[List[dict], List[str], str]
167
+ self,
168
+ messages: Union[List[dict], List[str], str],
169
+ metadata: Optional[Dict[str, Any]] = None,
162
170
  ) -> LLMResponse:
163
- """
164
- Asynchronously generate completions for given list of messages.
171
+ """Asynchronously generate completions for given list of messages.
165
172
 
166
173
  Method overrides the base class method to call the appropriate
167
174
  completion method based on the configuration. If the chat completions
@@ -177,8 +184,11 @@ class LiteLLMRouterLLMClient(_BaseLiteLLMRouterClient, _BaseLiteLLMClient):
177
184
  - a list of messages. Each message is a string and will be formatted
178
185
  as a user message.
179
186
  - a single message as a string which will be formatted as user message.
187
+ metadata: Optional metadata to be passed to the LLM call.
188
+
180
189
  Returns:
181
190
  List of message completions.
191
+
182
192
  Raises:
183
193
  ProviderClientAPIException: If the API request fails.
184
194
  """
@@ -1,21 +1,19 @@
1
1
  from __future__ import annotations
2
2
 
3
- from typing import Dict, List, Protocol, Union, runtime_checkable
3
+ from typing import Any, Dict, List, Optional, Protocol, Union, runtime_checkable
4
4
 
5
5
  from rasa.shared.providers.llm.llm_response import LLMResponse
6
6
 
7
7
 
8
8
  @runtime_checkable
9
9
  class LLMClient(Protocol):
10
- """
11
- Protocol for an LLM client that specifies the interface for interacting
10
+ """Protocol for an LLM client that specifies the interface for interacting
12
11
  with the API.
13
12
  """
14
13
 
15
14
  @classmethod
16
15
  def from_config(cls, config: dict) -> LLMClient:
17
- """
18
- Initializes the llm client with the given configuration.
16
+ """Initializes the llm client with the given configuration.
19
17
 
20
18
  This class method should be implemented to parse the given
21
19
  configuration and create an instance of an llm client.
@@ -24,17 +22,24 @@ class LLMClient(Protocol):
24
22
 
25
23
  @property
26
24
  def config(self) -> Dict:
27
- """
28
- Returns the configuration for that the llm client is initialized with.
25
+ """Returns the configuration for that the llm client is initialized with.
29
26
 
30
27
  This property should be implemented to return a dictionary containing
31
28
  the configuration settings for the llm client.
32
29
  """
33
30
  ...
34
31
 
35
- def completion(self, messages: Union[List[dict], List[str], str]) -> LLMResponse:
36
- """
37
- Synchronously generate completions for given list of messages.
32
+ def completion(
33
+ self,
34
+ messages: Union[List[dict], List[str], str],
35
+ metadata: Optional[Dict[str, Any]] = None,
36
+ ) -> LLMResponse:
37
+ """Synchronously generate completions for given list of messages.
38
+ def completion(
39
+ self,
40
+ messages: Union[List[dict], List[str], str],
41
+ metadata: Optional[Dict[str, Any]] = None,
42
+ ) -> LLMResponse:
38
43
 
39
44
  This method should be implemented to take a list of messages (as
40
45
  strings) and return a list of completions (as strings).
@@ -48,16 +53,19 @@ class LLMClient(Protocol):
48
53
  - a list of messages. Each message is a string and will be formatted
49
54
  as a user message.
50
55
  - a single message as a string which will be formatted as user message.
56
+ metadata: Optional metadata to be passed to the LLM call.
57
+
51
58
  Returns:
52
59
  LLMResponse
53
60
  """
54
61
  ...
55
62
 
56
63
  async def acompletion(
57
- self, messages: Union[List[dict], List[str], str]
64
+ self,
65
+ messages: Union[List[dict], List[str], str],
66
+ metadata: Optional[Dict[str, Any]] = None,
58
67
  ) -> LLMResponse:
59
- """
60
- Asynchronously generate completions for given list of messages.
68
+ """Asynchronously generate completions for given list of messages.
61
69
 
62
70
  This method should be implemented to take a list of messages (as
63
71
  strings) and return a list of completions (as strings).
@@ -71,14 +79,15 @@ class LLMClient(Protocol):
71
79
  - a list of messages. Each message is a string and will be formatted
72
80
  as a user message.
73
81
  - a single message as a string which will be formatted as user message.
82
+ metadata: Optional metadata to be passed to the LLM call.
83
+
74
84
  Returns:
75
85
  LLMResponse
76
86
  """
77
87
  ...
78
88
 
79
89
  def validate_client_setup(self, *args, **kwargs) -> None: # type: ignore
80
- """
81
- Perform client setup validation.
90
+ """Perform client setup validation.
82
91
 
83
92
  This method should be implemented to validate whether the client can be
84
93
  used with the parameters provided through configuration or environment
@@ -237,7 +237,9 @@ class SelfHostedLLMClient(_BaseLiteLLMClient):
237
237
  raise ProviderClientAPIException(e)
238
238
 
239
239
  async def acompletion(
240
- self, messages: Union[List[dict], List[str], str]
240
+ self,
241
+ messages: Union[List[dict], List[str], str],
242
+ metadata: Optional[Dict[str, Any]] = None,
241
243
  ) -> LLMResponse:
242
244
  """Asynchronous completion of the model with the given messages.
243
245
 
@@ -255,6 +257,7 @@ class SelfHostedLLMClient(_BaseLiteLLMClient):
255
257
  - a list of messages. Each message is a string and will be formatted
256
258
  as a user message.
257
259
  - a single message as a string which will be formatted as user message.
260
+ metadata: Optional metadata to be passed to the LLM call.
258
261
 
259
262
  Returns:
260
263
  The completion response.
@@ -263,7 +266,11 @@ class SelfHostedLLMClient(_BaseLiteLLMClient):
263
266
  return await super().acompletion(messages)
264
267
  return await self._atext_completion(messages)
265
268
 
266
- def completion(self, messages: Union[List[dict], List[str], str]) -> LLMResponse:
269
+ def completion(
270
+ self,
271
+ messages: Union[List[dict], List[str], str],
272
+ metadata: Optional[Dict[str, Any]] = None,
273
+ ) -> LLMResponse:
267
274
  """Completion of the model with the given messages.
268
275
 
269
276
  Method overrides the base class method to call the appropriate
@@ -273,6 +280,7 @@ class SelfHostedLLMClient(_BaseLiteLLMClient):
273
280
 
274
281
  Args:
275
282
  messages: The messages to be used for completion.
283
+ metadata: Optional metadata to be passed to the LLM call.
276
284
 
277
285
  Returns:
278
286
  The completion response.
@@ -372,6 +372,7 @@ def extract_llm_config(
372
372
  def extract_attrs_for_llm_based_command_generator(
373
373
  self: "LLMBasedCommandGenerator",
374
374
  prompt: str,
375
+ metadata: Optional[Dict[str, Any]] = None,
375
376
  ) -> Dict[str, Any]:
376
377
  from rasa.dialogue_understanding.generator.flow_retrieval import (
377
378
  DEFAULT_EMBEDDINGS_CONFIG,
@@ -387,8 +388,7 @@ def extract_attrs_for_llm_based_command_generator(
387
388
 
388
389
 
389
390
  def extract_attrs_for_contextual_response_rephraser(
390
- self: Any,
391
- prompt: str,
391
+ self: Any, prompt: str
392
392
  ) -> Dict[str, Any]:
393
393
  from rasa.core.nlg.contextual_response_rephraser import DEFAULT_LLM_CONFIG
394
394
 
rasa/version.py CHANGED
@@ -1,3 +1,3 @@
1
1
  # this file will automatically be changed,
2
2
  # do not add anything but the version number here!
3
- __version__ = "3.12.17"
3
+ __version__ = "3.12.18.dev1"
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.3
2
2
  Name: rasa-pro
3
- Version: 3.12.17
3
+ Version: 3.12.18.dev1
4
4
  Summary: State-of-the-art open-core Conversational AI framework for Enterprises that natively leverages generative AI for effortless assistant development.
5
5
  Keywords: nlp,machine-learning,machine-learning-library,bot,bots,botkit,rasa conversational-agents,conversational-ai,chatbot,chatbot-framework,bot-framework
6
6
  Author: Rasa Technologies GmbH
@@ -63,6 +63,7 @@ Requires-Dist: keras (==2.14.0)
63
63
  Requires-Dist: langchain (>=0.2.17,<0.3.0)
64
64
  Requires-Dist: langchain-community (>=0.2.19,<0.3.0)
65
65
  Requires-Dist: langcodes (>=3.5.0,<4.0.0)
66
+ Requires-Dist: langfuse (>=2.60.2,<2.61.0)
66
67
  Requires-Dist: litellm (>=1.69.0,<1.70.0)
67
68
  Requires-Dist: matplotlib (>=3.7,<3.8)
68
69
  Requires-Dist: mattermostwrapper (>=2.2,<2.3)
@@ -1,4 +1,4 @@
1
- rasa/__init__.py,sha256=YXG8RzVxiSJ__v-AewtV453YoCbmzWlHsU_4S0O2XpE,206
1
+ rasa/__init__.py,sha256=1LPdnp38vsouYw0bt_C0Q0mfLeDKAUaeiNdqMZaihCg,495
2
2
  rasa/__main__.py,sha256=OmUXcaA9l7KR_eSYCwaCSetuczxjrcN2taNnZ2ZUTbA,6472
3
3
  rasa/anonymization/__init__.py,sha256=Z-ZUW2ofZGfI6ysjYIS7U0JL4JSzDNOkHiiXK488Zik,86
4
4
  rasa/anonymization/anonymisation_rule_yaml_reader.py,sha256=8u8ZWfbpJuyUagrfth3IGfQXVlVz31esqExfDdasxZM,3171
@@ -335,7 +335,7 @@ rasa/core/policies/flows/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZ
335
335
  rasa/core/policies/flows/flow_exceptions.py,sha256=_FQuN-cerQDM1pivce9bz4zylh5UYkljvYS1gjDukHI,1527
336
336
  rasa/core/policies/flows/flow_executor.py,sha256=sT7ZFrm_CKVKBv5SO0M_QE984ZFw8t6trm8dMxCXbv8,25649
337
337
  rasa/core/policies/flows/flow_step_result.py,sha256=agjPrD6lahGSe2ViO5peBeoMdI9ngVGRSgtytgxmJmg,1360
338
- rasa/core/policies/intentless_policy.py,sha256=zxqlhawgqIjLCGkCzw1iOqq1iPCb8dPZFcJ-mTVrQjY,36511
338
+ rasa/core/policies/intentless_policy.py,sha256=TgLSmw8Ih9BeVSva88hy6N5xdYTTkFZNy07pLZyoR_8,36536
339
339
  rasa/core/policies/intentless_prompt_template.jinja2,sha256=KhIL3cruMmkxhrs5oVbqgSvK6ZiN_6TQ_jXrgtEB-ZY,677
340
340
  rasa/core/policies/memoization.py,sha256=CX2d3yP7FehSMW92Wi9NYLZei7tBzoT3T6yybu-Nb5s,19377
341
341
  rasa/core/policies/policy.py,sha256=5SUnPajSTSf8PzB1-jFbQPtsvR-zLN-xkjeotWOxuJc,27432
@@ -365,7 +365,7 @@ rasa/dialogue_understanding/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMp
365
365
  rasa/dialogue_understanding/coexistence/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
366
366
  rasa/dialogue_understanding/coexistence/constants.py,sha256=RpgLKMG4s7AgII0fRV0siS0Zh2QVI0OVRunhgm4q_j4,94
367
367
  rasa/dialogue_understanding/coexistence/intent_based_router.py,sha256=JlYBZdScnhflLK__i4bG0-PIkuFv0B7L4yOdnLgYWAY,7609
368
- rasa/dialogue_understanding/coexistence/llm_based_router.py,sha256=Bl38ZdQWJesb3NeR7sUvoQXXRzDTwSoLqnsNf_hH5rw,11897
368
+ rasa/dialogue_understanding/coexistence/llm_based_router.py,sha256=9oCQl2KhjdJX-_StWNb7ptMAbCfoYfBHOvDI8TzsHqY,11896
369
369
  rasa/dialogue_understanding/coexistence/router_template.jinja2,sha256=CHWFreN0sv1EbPh-hf5AlCt3zxy2_llX1Pdn9Q11Y18,357
370
370
  rasa/dialogue_understanding/commands/__init__.py,sha256=F-pLETYRUjhIkjjDfXGUuPsK_ac1HcLmJkrUUP0RhME,2259
371
371
  rasa/dialogue_understanding/commands/can_not_handle_command.py,sha256=fKOj9ScLxuaFO9Iw0p7og_4zMiw2weBdx322rBKlnCI,3519
@@ -400,19 +400,19 @@ rasa/dialogue_understanding/generator/command_parser.py,sha256=wf6FSgqBw5F0legg0
400
400
  rasa/dialogue_understanding/generator/constants.py,sha256=ulqmLIwrBOZLyhsCChI_4CdOnA0I8MfuBxxuKGyFp7U,1130
401
401
  rasa/dialogue_understanding/generator/flow_document_template.jinja2,sha256=f4H6vVd-_nX_RtutMh1xD3ZQE_J2OyuPHAtiltfiAPY,253
402
402
  rasa/dialogue_understanding/generator/flow_retrieval.py,sha256=DavL-37e0tksMWkxvFImoqlsmYeYeSdDN3u7wZI0K-8,17817
403
- rasa/dialogue_understanding/generator/llm_based_command_generator.py,sha256=P1Hwjt8ph2oQQ2PzWaaBRcU36ia4mN21nTzhLtEF5Wc,23586
404
- rasa/dialogue_understanding/generator/llm_command_generator.py,sha256=z7jhIJ3W_5GFH-p15kVoWbigMIoY8fIJjc_j_uX7yxw,2581
403
+ rasa/dialogue_understanding/generator/llm_based_command_generator.py,sha256=LURXYTU2uYuRhEYlz4rSZyqQJWydb7NxtlNT-fno39Q,24057
404
+ rasa/dialogue_understanding/generator/llm_command_generator.py,sha256=E5byrCC_6r_GJm_HIosN_Se00NmXmnTCdOzaHMwTu6A,2641
405
405
  rasa/dialogue_understanding/generator/multi_step/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
406
406
  rasa/dialogue_understanding/generator/multi_step/fill_slots_prompt.jinja2,sha256=Y0m673tAML3cFPaLM-urMXDsBYUUcXIw9YUpkAhGUuA,2933
407
407
  rasa/dialogue_understanding/generator/multi_step/handle_flows_prompt.jinja2,sha256=8l93_QBKBYnqLICVdiTu5ejZDE8F36BU8-qwba0px44,1927
408
- rasa/dialogue_understanding/generator/multi_step/multi_step_llm_command_generator.py,sha256=LopAxEaY1PRNf28k_2tO1DTnPWVfh7S1qXJo6sSbPyw,32539
408
+ rasa/dialogue_understanding/generator/multi_step/multi_step_llm_command_generator.py,sha256=0rpQonIcwSjcUWCLjJ5DKf1Z5XBJiDoJ6cC7Rj6NtAM,34088
409
409
  rasa/dialogue_understanding/generator/nlu_command_adapter.py,sha256=cisxLlPVQXgbWMAz9xSxBvrOz4HO-f0G3CFVjJ2wt-g,10876
410
410
  rasa/dialogue_understanding/generator/prompt_templates/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
411
411
  rasa/dialogue_understanding/generator/prompt_templates/command_prompt_template.jinja2,sha256=nMayu-heJYH1QmcL1cFmXb8SeiJzfdDR_9Oy5IRUXsM,3937
412
412
  rasa/dialogue_understanding/generator/prompt_templates/command_prompt_v2_claude_3_5_sonnet_20240620_template.jinja2,sha256=z-cnFVfIE_kEnY1o52YE2CdCWwgYTv7R3xVxsjXWlnw,3808
413
413
  rasa/dialogue_understanding/generator/prompt_templates/command_prompt_v2_gpt_4o_2024_11_20_template.jinja2,sha256=4076ARsy0E0iADBX6li19IoM3F4F-2wK3bL6UEOvCdo,3620
414
414
  rasa/dialogue_understanding/generator/single_step/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
415
- rasa/dialogue_understanding/generator/single_step/compact_llm_command_generator.py,sha256=Lm688m3m_Z2ZvIrpTQlmxeA2Op-S0ViSPk3wIknyCmM,22413
415
+ rasa/dialogue_understanding/generator/single_step/compact_llm_command_generator.py,sha256=cd_B0KYyYWl_HbjHekGGLlVzOyxjA096cA6rA2nArC8,22889
416
416
  rasa/dialogue_understanding/generator/single_step/single_step_llm_command_generator.py,sha256=RWTPdeBfdGUmdFSUzdQejcbJJLhc_815G0g6AabTK04,5100
417
417
  rasa/dialogue_understanding/generator/utils.py,sha256=jxtb-AfngN59y2rHynqJDK80xM_yooEvr3aW1MWl6H0,2760
418
418
  rasa/dialogue_understanding/patterns/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
@@ -530,7 +530,7 @@ rasa/graph_components/providers/training_tracker_provider.py,sha256=FaCWHJA69EpM
530
530
  rasa/graph_components/validators/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
531
531
  rasa/graph_components/validators/default_recipe_validator.py,sha256=iOVoB7zVTKes8EYW110fz8ZvtgoDcCX25GlUsiESS18,24457
532
532
  rasa/graph_components/validators/finetuning_validator.py,sha256=VfCGytnweijKBG8bAqYp7zKZB2aRgi2ZI8R0eou5Ev4,12865
533
- rasa/hooks.py,sha256=5ZMrqNz323w56MMY6E8jeZ_YXgRqq8p-yi18S2XOmbo,4061
533
+ rasa/hooks.py,sha256=xQLqqPpebL04AuKZiYJEZaBJyubTdGetCW7cvmjXg7o,5804
534
534
  rasa/jupyter.py,sha256=TCYVD4QPQIMmfA6ZwDUBOBTAECwCwbU2XOkosodLO9k,1782
535
535
  rasa/keys,sha256=2Stg1fstgJ203cOoW1B2gGMY29fhEnjIfTVxKv_fqPo,101
536
536
  rasa/llm_fine_tuning/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
@@ -545,7 +545,7 @@ rasa/llm_fine_tuning/paraphrasing/rephrased_user_message.py,sha256=cOEmZ71yDXW9-
545
545
  rasa/llm_fine_tuning/paraphrasing_module.py,sha256=DIimTsamitS2k-Mes3OCBc0KPK52pSMRPOH_N7TcTIk,4574
546
546
  rasa/llm_fine_tuning/storage.py,sha256=wSurHOYh_hk0rNiHQIcXEdXqakB9M4UiCRlrT8S4WZs,5776
547
547
  rasa/llm_fine_tuning/train_test_split_module.py,sha256=z1sFYN3-5rmABiJqOjabLMEbkLK8bNfrXkooLCKDZM4,16832
548
- rasa/llm_fine_tuning/utils.py,sha256=wAhiwh-CF-gxxRkNI_Mr4wJ4T7HA7jrLjsxjCxosyeE,2357
548
+ rasa/llm_fine_tuning/utils.py,sha256=0bhNnVyITjf6i7u-jG0ZsppKM6ZNokwGfR6MT4I6KeY,2438
549
549
  rasa/markers/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
550
550
  rasa/markers/marker.py,sha256=TCLhJ-wHcvVlajIsaMm_NOqL_H6X553Oey5UZ05uCSc,9147
551
551
  rasa/markers/marker_base.py,sha256=7yxUQB2Sw7issHZBFF9mcPvO9IhabywExAEvK_mm0Ks,33467
@@ -564,6 +564,7 @@ rasa/model_manager/warm_rasa_process.py,sha256=2vg8gBEUvPrr6C5W-fxtWWSajksrOaT83
564
564
  rasa/model_service.py,sha256=XXCaiLj2xq58n05W3R1jmTIv-V8f_7PG30kVpRxf71Y,3727
565
565
  rasa/model_testing.py,sha256=eZw7l8Zz3HkH_ZPBurY93HzzudHdoQn8HBnDdZSysAY,14929
566
566
  rasa/model_training.py,sha256=10cw_CIN3q05gmTHqUdLgsfSlmyWPL0dSkrkflYbOmA,22071
567
+ rasa/monkey_patches.py,sha256=pZTDKQ8GNzeiUWeJ2MneUuremSNVScL7oXeMAEd4o4Y,3687
567
568
  rasa/nlu/__init__.py,sha256=D0IYuTK_ZQ_F_9xsy0bXxVCAtU62Fzvp8S7J9tmfI_c,123
568
569
  rasa/nlu/classifiers/__init__.py,sha256=Qvrf7_rfiMxm2Vt2fClb56R3QFExf7WPdFdL-AOvgsk,118
569
570
  rasa/nlu/classifiers/classifier.py,sha256=9fm1mORuFf1vowYIXmqE9yLRKdSC4nGQW7UqNZQipKY,133
@@ -625,7 +626,7 @@ rasa/nlu/utils/spacy_utils.py,sha256=5EnHR-MVAZhGbg2rq8VpOu7I0tagV3ThRTlM0-WO2Cg
625
626
  rasa/plugin.py,sha256=cSmFhSWr5WQyYXdJOWwgH4ra_2kbhoNLZAtnqcsGny4,3071
626
627
  rasa/server.py,sha256=0GQ9rML75EOuRDpUHZjz-uYbkSbnNuK0SRIGQJeiR-I,59599
627
628
  rasa/shared/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
628
- rasa/shared/constants.py,sha256=u9GnSSQYRjYN_mjd7XHMGgoVc6ipoiZQuLt3bFOF0O0,12264
629
+ rasa/shared/constants.py,sha256=uVK0EeY99Bh8xV8mUHNVVEyvJKfWOsdAO1MSFi-4WBU,12425
629
630
  rasa/shared/core/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
630
631
  rasa/shared/core/command_payload_reader.py,sha256=puHYsp9xbX0YQm2L1NDBItOFmdzI7AzmfGefgcHiCc0,3871
631
632
  rasa/shared/core/constants.py,sha256=gwIZHjQYafHnBlMe9_jUiIPm17hxYG9R1MOCtxeC1Ns,6337
@@ -724,7 +725,7 @@ rasa/shared/providers/_configs/self_hosted_llm_client_config.py,sha256=l2JnypPXF
724
725
  rasa/shared/providers/_configs/utils.py,sha256=u2Ram05YwQ7-frm_r8n9rafjZoF8i0qSC7XjYQRuPgo,3732
725
726
  rasa/shared/providers/_ssl_verification_utils.py,sha256=vUnP0vocf0GQ0wG8IQpPcCet4c1C9-wQWQNckNWbDBk,4165
726
727
  rasa/shared/providers/_utils.py,sha256=EZIrz3ugcI-9PWgC7v0VMUNYondAAOeeRLIE8ZmResw,5886
727
- rasa/shared/providers/constants.py,sha256=hgV8yNGxIbID_2h65OoSfSjIE4UkazrsqRg4SdkPAmI,234
728
+ rasa/shared/providers/constants.py,sha256=yF9giGO8xWCrW9dzUW-7wX-y6sh7hlbYzHYKFayrF7A,613
728
729
  rasa/shared/providers/embedding/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
729
730
  rasa/shared/providers/embedding/_base_litellm_embedding_client.py,sha256=1CUYxps_TrLVyPsPfOw7iDS502fDePseBIKnqc3ncwQ,9005
730
731
  rasa/shared/providers/embedding/_langchain_embedding_client_adapter.py,sha256=IR2Rb3ReJ9C9sxOoOGRXgtz8STWdMREs_4AeSMKFjl4,2135
@@ -736,15 +737,15 @@ rasa/shared/providers/embedding/huggingface_local_embedding_client.py,sha256=Zo3
736
737
  rasa/shared/providers/embedding/litellm_router_embedding_client.py,sha256=eafDk6IgQtL_kiKgpa6sJs1oATyRi2NT2leUFQsED2s,4551
737
738
  rasa/shared/providers/embedding/openai_embedding_client.py,sha256=XNRGE7apo2v3kWRrtgxE-Gq4rvNko3IiXtvgC4krDYE,5429
738
739
  rasa/shared/providers/llm/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
739
- rasa/shared/providers/llm/_base_litellm_client.py,sha256=Ua5Kt6VGe5vRzSzWWWx2Q3LH2PCDd8V7V4zfYD464yU,11634
740
+ rasa/shared/providers/llm/_base_litellm_client.py,sha256=X9SmcPTFYqmttyUfre_1lMtkTRLtPGg78fRiREOJYtk,11967
740
741
  rasa/shared/providers/llm/azure_openai_llm_client.py,sha256=ui85vothxR2P_-eLc4nLgbpjnpEKY2BXnIjLxBZoYz8,12504
741
742
  rasa/shared/providers/llm/default_litellm_llm_client.py,sha256=xx-o-NX_mtx6AszK--ZRj8n8JyEJuVu1-42dt8AynBM,4083
742
- rasa/shared/providers/llm/litellm_router_llm_client.py,sha256=_6vAdPLAVSI_sBJLaXLnE87M-0ip_klfQ78fQ_pyoyI,7947
743
- rasa/shared/providers/llm/llm_client.py,sha256=-hTCRsL-A3GCMRHtcyCgcCyra-9OJ8GUC-mURoRXH0k,3242
743
+ rasa/shared/providers/llm/litellm_router_llm_client.py,sha256=kF8yqwxBNjcIYz022yv0gP5RqnJzx6bfG-hcpK5ovKE,8217
744
+ rasa/shared/providers/llm/llm_client.py,sha256=11xgWbjV8brvQN-EZPjZHNofImY8JKlRmrbOD7UaL-o,3651
744
745
  rasa/shared/providers/llm/llm_response.py,sha256=8mOpZdmh4-3yM7aOmNO0yEYUmRDErfoP7ZDMUuHr2Cc,3504
745
746
  rasa/shared/providers/llm/openai_llm_client.py,sha256=rSdLj29Hl1Wm5G6Uwo77j4WqogK_3QIbTA7fyt63YAg,5013
746
747
  rasa/shared/providers/llm/rasa_llm_client.py,sha256=44Tvtnkq4mxDIxtdrGUkwBWAvX1OLaswqmpAsyBH8e8,3504
747
- rasa/shared/providers/llm/self_hosted_llm_client.py,sha256=X3QyA5nZbQap0tomg0dQozbY39Ry0y-lLnj-EowK6dI,10270
748
+ rasa/shared/providers/llm/self_hosted_llm_client.py,sha256=85jnA7AO2W4OqV0874N5YBzTafVeYtiRbaRyzyA_lKA,10544
748
749
  rasa/shared/providers/mappings.py,sha256=QSD3XWvhYCtBLNpGycN30vEnLULYIaqCsAtmfPfSZ3U,3674
749
750
  rasa/shared/providers/router/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
750
751
  rasa/shared/providers/router/_base_litellm_router_client.py,sha256=JV9lYnhIG_CWMtPB5nofjNdRO5V-Wl0DH-HyPm__eJ0,11003
@@ -781,7 +782,7 @@ rasa/tracing/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
781
782
  rasa/tracing/config.py,sha256=32X2rqAiHe0e-Iijb5AivjqDs2j03n8xx5mo07NBMI4,12964
782
783
  rasa/tracing/constants.py,sha256=-3vlfI9v_D8f-KB5tuiqBHhszu2WofFQOyjKBn28gyg,2889
783
784
  rasa/tracing/instrumentation/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
784
- rasa/tracing/instrumentation/attribute_extractors.py,sha256=hkdnqIn8PkD1ykxGbPMv-TPHbhtLgOoMQGmwcvfhi2c,29471
785
+ rasa/tracing/instrumentation/attribute_extractors.py,sha256=-w80ZDIF85aEb2OkYqZ75VssbfCWfC7Yq78i-cuc0TU,29513
785
786
  rasa/tracing/instrumentation/instrumentation.py,sha256=BPI5OoZFbl90kVJzlKEz-eD8cf-CaX_x1t4V9XBhDKo,53625
786
787
  rasa/tracing/instrumentation/intentless_policy_instrumentation.py,sha256=RgixI0FVIzBz19E3onidUpSEwjkAh8paA5_w07PMzFo,4821
787
788
  rasa/tracing/instrumentation/metrics.py,sha256=DI_qIS6sz5KYU4QDcPKfnHxKLL_Ma3wV6diH4_vg85c,12051
@@ -822,9 +823,9 @@ rasa/utils/train_utils.py,sha256=ClJx-6x3-h3Vt6mskacgkcCUJTMXjFPe3zAcy_DfmaU,212
822
823
  rasa/utils/url_tools.py,sha256=dZ1HGkVdWTJB7zYEdwoDIrEuyX9HE5WsxKKFVsXBLE0,1218
823
824
  rasa/utils/yaml.py,sha256=KjbZq5C94ZP7Jdsw8bYYF7HASI6K4-C_kdHfrnPLpSI,2000
824
825
  rasa/validator.py,sha256=524VlFTYK0B3iXYveVD6BDC3K0j1QfpzJ9O-TAWczmc,83166
825
- rasa/version.py,sha256=G6pCaFUa8m0gOyB-7v39b5KKj9mYAA-nDF6xByqy_fA,118
826
- rasa_pro-3.12.17.dist-info/METADATA,sha256=sijEl4PGvymYJ1TbvZKHjU47lRUsZ8zuSSTCqX-qbWA,10609
827
- rasa_pro-3.12.17.dist-info/NOTICE,sha256=7HlBoMHJY9CL2GlYSfTQ-PZsVmLmVkYmMiPlTjhuCqA,218
828
- rasa_pro-3.12.17.dist-info/WHEEL,sha256=fGIA9gx4Qxk2KDKeNJCbOEwSrmLtjWCwzBz351GyrPQ,88
829
- rasa_pro-3.12.17.dist-info/entry_points.txt,sha256=ckJ2SfEyTPgBqj_I6vm_tqY9dZF_LAPJZA335Xp0Q9U,43
830
- rasa_pro-3.12.17.dist-info/RECORD,,
826
+ rasa/version.py,sha256=mZsw8hlhY0sCzlC9OLh9z0BUADqzJrmfWQZYJBAC6hw,123
827
+ rasa_pro-3.12.18.dev1.dist-info/METADATA,sha256=jqVLmSY0RXPf0Xe9hpL83-BAo0SUj4bzwDiTjhTBX8s,10657
828
+ rasa_pro-3.12.18.dev1.dist-info/NOTICE,sha256=7HlBoMHJY9CL2GlYSfTQ-PZsVmLmVkYmMiPlTjhuCqA,218
829
+ rasa_pro-3.12.18.dev1.dist-info/WHEEL,sha256=fGIA9gx4Qxk2KDKeNJCbOEwSrmLtjWCwzBz351GyrPQ,88
830
+ rasa_pro-3.12.18.dev1.dist-info/entry_points.txt,sha256=ckJ2SfEyTPgBqj_I6vm_tqY9dZF_LAPJZA335Xp0Q9U,43
831
+ rasa_pro-3.12.18.dev1.dist-info/RECORD,,