rasa-pro 3.12.10.dev1__py3-none-any.whl → 3.12.11__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of rasa-pro might be problematic. Click here for more details.

rasa/__init__.py CHANGED
@@ -5,11 +5,5 @@ from rasa import version
5
5
  # define the version before the other imports since these need it
6
6
  __version__ = version.__version__
7
7
 
8
- from litellm.integrations.langfuse.langfuse import LangFuseLogger
9
-
10
- from rasa.monkey_patches import litellm_langfuse_logger_init_fixed
11
-
12
- # Monkey-patch the init method as early as possible before the class is used
13
- LangFuseLogger.__init__ = litellm_langfuse_logger_init_fixed # type: ignore
14
8
 
15
9
  logging.getLogger(__name__).addHandler(logging.NullHandler())
@@ -898,7 +898,7 @@ class RemoteAction(Action):
898
898
  draft["buttons"].extend(buttons)
899
899
 
900
900
  # Avoid overwriting `draft` values with empty values
901
- response = {k: v for k, v in response.items() if v}
901
+ response = {k: v for k, v in response.items() if v is not None}
902
902
  draft.update(response)
903
903
  bot_messages.append(create_bot_utterance(draft))
904
904
 
@@ -6,7 +6,18 @@ import uuid
6
6
  from collections import defaultdict
7
7
  from dataclasses import asdict
8
8
  from datetime import datetime, timedelta, timezone
9
- from typing import Any, Awaitable, Callable, Dict, List, Optional, Set, Text, Union
9
+ from typing import (
10
+ Any,
11
+ Awaitable,
12
+ Callable,
13
+ Dict,
14
+ List,
15
+ Optional,
16
+ Set,
17
+ Text,
18
+ Tuple,
19
+ Union,
20
+ )
10
21
 
11
22
  import structlog
12
23
  from jsonschema import ValidationError, validate
@@ -76,35 +87,45 @@ class Conversation:
76
87
 
77
88
  @staticmethod
78
89
  def get_metadata(activity: Dict[Text, Any]) -> Optional[Dict[Text, Any]]:
79
- """Get metadata from the activity."""
80
- return asdict(map_call_params(activity["parameters"]))
90
+ """Get metadata from the activity.
91
+
92
+ ONLY used for activities NOT for events (see _handle_event)."""
93
+ return activity.get("parameters")
81
94
 
82
95
  @staticmethod
83
- def _handle_event(event: Dict[Text, Any]) -> Text:
84
- """Handle start and DTMF event and return the corresponding text."""
96
+ def _handle_event(event: Dict[Text, Any]) -> Tuple[Text, Dict[Text, Any]]:
97
+ """Handle events and return a tuple of text and metadata.
98
+
99
+ Args:
100
+ event: The event to handle.
101
+
102
+ Returns:
103
+ Tuple of text and metadata.
104
+ text is either /session_start or /vaig_event_<event_name>
105
+ metadata is a dictionary with the event parameters.
106
+ """
85
107
  structlogger.debug("audiocodes.handle.event", event_payload=event)
86
108
  if "name" not in event:
87
109
  structlogger.warning(
88
110
  "audiocodes.handle.event.no_name_key", event_payload=event
89
111
  )
90
- return ""
112
+ return "", {}
91
113
 
92
114
  if event["name"] == EVENT_START:
93
115
  text = f"{INTENT_MESSAGE_PREFIX}{USER_INTENT_SESSION_START}"
116
+ metadata = asdict(map_call_params(event.get("parameters", {})))
94
117
  elif event["name"] == EVENT_DTMF:
95
118
  text = f"{INTENT_MESSAGE_PREFIX}vaig_event_DTMF"
96
- event_params = {"value": event["value"]}
97
- text += json.dumps(event_params)
119
+ metadata = {"value": event["value"]}
98
120
  else:
99
121
  # handle other events described by Audiocodes
100
122
  # https://techdocs.audiocodes.com/voice-ai-connect/#VAIG_Combined/inactivity-detection.htm?TocPath=Bot%2520integration%257CReceiving%2520notifications%257C_____3
101
123
  text = f"{INTENT_MESSAGE_PREFIX}vaig_event_{event['name']}"
102
- event_params = {**event.get("parameters", {})}
124
+ metadata = {**event.get("parameters", {})}
103
125
  if "value" in event:
104
- event_params["value"] = event["value"]
105
- text += json.dumps(event_params)
126
+ metadata["value"] = event["value"]
106
127
 
107
- return text
128
+ return text, metadata
108
129
 
109
130
  def is_active_conversation(self, now: datetime, delta: timedelta) -> bool:
110
131
  """Check if the conversation is active."""
@@ -139,21 +160,29 @@ class Conversation:
139
160
  structlogger.warning(
140
161
  "audiocodes.handle.activities.duplicate_activity",
141
162
  activity_id=activity[ACTIVITY_ID_KEY],
163
+ event_info=(
164
+ "Audiocodes might send duplicate activities if the bot has not "
165
+ "responded to the previous one or responded too late. Please "
166
+ "consider enabling the `use_websocket` option to use"
167
+ " Audiocodes Asynchronous API."
168
+ ),
142
169
  )
143
170
  continue
144
171
  self.activity_ids.append(activity[ACTIVITY_ID_KEY])
145
172
  if activity["type"] == ACTIVITY_MESSAGE:
146
173
  text = activity["text"]
174
+ metadata = self.get_metadata(activity)
147
175
  elif activity["type"] == ACTIVITY_EVENT:
148
- text = self._handle_event(activity)
176
+ text, metadata = self._handle_event(activity)
149
177
  else:
150
178
  structlogger.warning(
151
179
  "audiocodes.handle.activities.unknown_activity_type",
152
180
  activity=activity,
153
181
  )
182
+ continue
183
+
154
184
  if not text:
155
185
  continue
156
- metadata = self.get_metadata(activity)
157
186
  user_msg = UserMessage(
158
187
  text=text,
159
188
  input_channel=input_channel_name,
@@ -392,30 +421,41 @@ class AudiocodesInput(InputChannel):
392
421
  "audiocodes.on_activities.no_conversation", request=request.json
393
422
  )
394
423
  return response.json({})
395
- elif conversation.ws:
424
+
425
+ if self.use_websocket:
426
+ # send an empty response for this request
427
+ # activities are processed in the background
428
+ # chat response is sent via the websocket
396
429
  ac_output: Union[WebsocketOutput, AudiocodesOutput] = WebsocketOutput(
397
430
  conversation.ws, conversation_id
398
431
  )
399
- response_json = {}
400
- else:
401
- # handle non websocket case where messages get returned in json
402
- ac_output = AudiocodesOutput()
403
- response_json = {
432
+ self._create_task(
433
+ conversation_id,
434
+ conversation.handle_activities(
435
+ request.json,
436
+ input_channel_name=self.name(),
437
+ output_channel=ac_output,
438
+ on_new_message=on_new_message,
439
+ ),
440
+ )
441
+ return response.json({})
442
+
443
+ # without websockets, this becomes a blocking call
444
+ # and the response is sent back to the Audiocodes server
445
+ # after the activities are processed
446
+ ac_output = AudiocodesOutput()
447
+ await conversation.handle_activities(
448
+ request.json,
449
+ input_channel_name=self.name(),
450
+ output_channel=ac_output,
451
+ on_new_message=on_new_message,
452
+ )
453
+ return response.json(
454
+ {
404
455
  "conversation": conversation_id,
405
456
  "activities": ac_output.messages,
406
457
  }
407
-
408
- # start a background task to handle activities
409
- self._create_task(
410
- conversation_id,
411
- conversation.handle_activities(
412
- request.json,
413
- input_channel_name=self.name(),
414
- output_channel=ac_output,
415
- on_new_message=on_new_message,
416
- ),
417
458
  )
418
- return response.json(response_json)
419
459
 
420
460
  @ac_webhook.route(
421
461
  "/conversation/<conversation_id>/disconnect", methods=["POST"]
@@ -715,9 +715,7 @@ class IntentlessPolicy(LLMHealthCheckMixin, EmbeddingsHealthCheckMixin, Policy):
715
715
  final_response_examples.append(resp)
716
716
 
717
717
  llm_response = await self.generate_answer(
718
- final_response_examples,
719
- conversation_samples,
720
- history,
718
+ final_response_examples, conversation_samples, history
721
719
  )
722
720
  if not llm_response:
723
721
  structlogger.debug("intentless_policy.prediction.skip_llm_fail")
@@ -166,6 +166,7 @@ class LLMBasedRouter(LLMHealthCheckMixin, GraphComponent):
166
166
  **kwargs: Any,
167
167
  ) -> "LLMBasedRouter":
168
168
  """Loads trained component (see parent class for full docstring)."""
169
+
169
170
  # Perform health check on the resolved LLM client config
170
171
  llm_config = resolve_model_client_config(config.get(LLM_CONFIG_KEY, {}))
171
172
  cls.perform_llm_health_check(
@@ -1,8 +1,6 @@
1
1
  from abc import ABC, abstractmethod
2
- from asyncio import Lock
3
2
  from functools import lru_cache
4
3
  from typing import Any, Dict, List, Optional, Set, Text, Tuple, Union
5
- from uuid import UUID, uuid4
6
4
 
7
5
  import structlog
8
6
  from jinja2 import Environment, Template, select_autoescape
@@ -91,9 +89,6 @@ class LLMBasedCommandGenerator(
91
89
  else:
92
90
  self.flow_retrieval = None
93
91
 
94
- self.sender_id_to_session_id_mapping: Dict[str, UUID] = {}
95
- self._lock = Lock()
96
-
97
92
  ### Abstract methods
98
93
  @staticmethod
99
94
  @abstractmethod
@@ -230,7 +225,8 @@ class LLMBasedCommandGenerator(
230
225
 
231
226
  @lru_cache
232
227
  def compile_template(self, template: str) -> Template:
233
- """Compile the prompt template and register custom filters.
228
+ """
229
+ Compile the prompt template and register custom filters.
234
230
  Compiling the template is an expensive operation,
235
231
  so we cache the result.
236
232
  """
@@ -332,9 +328,7 @@ class LLMBasedCommandGenerator(
332
328
 
333
329
  @measure_llm_latency
334
330
  async def invoke_llm(
335
- self,
336
- prompt: Union[List[dict], List[str], str],
337
- metadata: Optional[Dict[str, Any]] = None,
331
+ self, prompt: Union[List[dict], List[str], str]
338
332
  ) -> Optional[LLMResponse]:
339
333
  """Use LLM to generate a response.
340
334
 
@@ -347,7 +341,6 @@ class LLMBasedCommandGenerator(
347
341
  - a list of messages. Each message is a string and will be formatted
348
342
  as a user message.
349
343
  - a single message as a string which will be formatted as user message.
350
- metadata: Optional metadata to be passed to the LLM call.
351
344
 
352
345
  Returns:
353
346
  An LLMResponse object.
@@ -359,7 +352,7 @@ class LLMBasedCommandGenerator(
359
352
  self.config.get(LLM_CONFIG_KEY), self.get_default_llm_config()
360
353
  )
361
354
  try:
362
- return await llm.acompletion(prompt, metadata)
355
+ return await llm.acompletion(prompt)
363
356
  except Exception as e:
364
357
  # unfortunately, langchain does not wrap LLM exceptions which means
365
358
  # we have to catch all exceptions here
@@ -662,7 +655,3 @@ class LLMBasedCommandGenerator(
662
655
  def get_default_llm_config() -> Dict[str, Any]:
663
656
  """Get the default LLM config for the command generator."""
664
657
  return DEFAULT_LLM_CONFIG
665
-
666
- async def _get_or_create_session_id(self, sender_id: str) -> UUID:
667
- async with self._lock:
668
- return self.sender_id_to_session_id_mapping.setdefault(sender_id, uuid4())
@@ -55,9 +55,7 @@ class LLMCommandGenerator(SingleStepLLMCommandGenerator):
55
55
  )
56
56
 
57
57
  async def invoke_llm(
58
- self,
59
- prompt: Union[List[dict], List[str], str],
60
- metadata: Optional[Dict[str, Any]] = None,
58
+ self, prompt: Union[List[dict], List[str], str]
61
59
  ) -> Optional[LLMResponse]:
62
60
  try:
63
61
  return await super().invoke_llm(prompt)
@@ -42,9 +42,6 @@ from rasa.engine.storage.resource import Resource
42
42
  from rasa.engine.storage.storage import ModelStorage
43
43
  from rasa.shared.constants import (
44
44
  EMBEDDINGS_CONFIG_KEY,
45
- LANGFUSE_CUSTOM_METADATA_DICT,
46
- LANGFUSE_METADATA_SESSION_ID,
47
- LANGFUSE_TAGS,
48
45
  RASA_PATTERN_CANNOT_HANDLE_NOT_SUPPORTED,
49
46
  ROUTE_TO_CALM_SLOT,
50
47
  )
@@ -110,7 +107,7 @@ structlogger = structlog.get_logger()
110
107
  )
111
108
  @deprecated(
112
109
  reason=(
113
- "The MultiStepLLMCommandGenerator is deprecated and will be removed in "
110
+ "The MultiStepLLMCommandGenerator is deprecated and will be removed in "
114
111
  "Rasa `4.0.0`."
115
112
  )
116
113
  )
@@ -495,20 +492,7 @@ class MultiStepLLMCommandGenerator(LLMBasedCommandGenerator):
495
492
  prompt=prompt,
496
493
  )
497
494
 
498
- if tracker:
499
- session_id = str(await self._get_or_create_session_id(tracker.sender_id))
500
- else:
501
- session_id = "unknown"
502
- metadata = {
503
- LANGFUSE_METADATA_SESSION_ID: session_id,
504
- LANGFUSE_CUSTOM_METADATA_DICT: {
505
- "component": self.__class__.__name__,
506
- "function": "_predict_commands_for_active_flow",
507
- },
508
- LANGFUSE_TAGS: [self.__class__.__name__],
509
- }
510
-
511
- response = await self.invoke_llm(prompt, metadata)
495
+ response = await self.invoke_llm(prompt)
512
496
  llm_response = LLMResponse.ensure_llm_response(response)
513
497
  actions = None
514
498
  if llm_response and llm_response.choices:
@@ -562,20 +546,8 @@ class MultiStepLLMCommandGenerator(LLMBasedCommandGenerator):
562
546
  ".prompt_rendered",
563
547
  prompt=prompt,
564
548
  )
565
- if tracker:
566
- session_id = str(await self._get_or_create_session_id(tracker.sender_id))
567
- else:
568
- session_id = "unknown"
569
- metadata = {
570
- LANGFUSE_METADATA_SESSION_ID: session_id,
571
- LANGFUSE_CUSTOM_METADATA_DICT: {
572
- "component": self.__class__.__name__,
573
- "function": "_predict_commands_for_handling_flows",
574
- },
575
- LANGFUSE_TAGS: [self.__class__.__name__],
576
- }
577
549
 
578
- response = await self.invoke_llm(prompt, metadata)
550
+ response = await self.invoke_llm(prompt)
579
551
  llm_response = LLMResponse.ensure_llm_response(response)
580
552
  actions = None
581
553
  if llm_response and llm_response.choices:
@@ -664,20 +636,8 @@ class MultiStepLLMCommandGenerator(LLMBasedCommandGenerator):
664
636
  flow=newly_started_flow.id,
665
637
  prompt=prompt,
666
638
  )
667
- if tracker:
668
- session_id = str(await self._get_or_create_session_id(tracker.sender_id))
669
- else:
670
- session_id = "unknown"
671
- metadata = {
672
- LANGFUSE_METADATA_SESSION_ID: session_id,
673
- LANGFUSE_CUSTOM_METADATA_DICT: {
674
- "component": self.__class__.__name__,
675
- "function": "_predict_commands_for_newly_started_flow",
676
- },
677
- LANGFUSE_TAGS: [self.__class__.__name__],
678
- }
679
639
 
680
- response = await self.invoke_llm(prompt, metadata)
640
+ response = await self.invoke_llm(prompt)
681
641
  llm_response = LLMResponse.ensure_llm_response(response)
682
642
  actions = None
683
643
  if llm_response and llm_response.choices:
@@ -47,9 +47,6 @@ from rasa.shared.constants import (
47
47
  AWS_BEDROCK_PROVIDER,
48
48
  AZURE_OPENAI_PROVIDER,
49
49
  EMBEDDINGS_CONFIG_KEY,
50
- LANGFUSE_CUSTOM_METADATA_DICT,
51
- LANGFUSE_METADATA_SESSION_ID,
52
- LANGFUSE_TAGS,
53
50
  MAX_TOKENS_CONFIG_KEY,
54
51
  PROMPT_TEMPLATE_CONFIG_KEY,
55
52
  ROUTE_TO_CALM_SLOT,
@@ -369,17 +366,7 @@ class CompactLLMCommandGenerator(LLMBasedCommandGenerator):
369
366
  prompt=flow_prompt,
370
367
  )
371
368
 
372
- if tracker:
373
- session_id = str(await self._get_or_create_session_id(tracker.sender_id))
374
- else:
375
- session_id = "unknown"
376
- metadata = {
377
- LANGFUSE_METADATA_SESSION_ID: session_id,
378
- LANGFUSE_CUSTOM_METADATA_DICT: {"component": self.__class__.__name__},
379
- LANGFUSE_TAGS: [self.__class__.__name__],
380
- }
381
-
382
- response = await self.invoke_llm(flow_prompt, metadata)
369
+ response = await self.invoke_llm(flow_prompt)
383
370
  llm_response = LLMResponse.ensure_llm_response(response)
384
371
  # The check for 'None' maintains compatibility with older versions
385
372
  # of LLMCommandGenerator. In previous implementations, 'invoke_llm'
rasa/hooks.py CHANGED
@@ -1,20 +1,8 @@
1
1
  import argparse
2
2
  import logging
3
- import os
4
3
  from typing import TYPE_CHECKING, List, Optional, Text, Union
5
4
 
6
- import litellm
7
5
  import pluggy
8
- import structlog
9
-
10
- from rasa.shared.providers.constants import (
11
- LANGFUSE_CALLBACK_NAME,
12
- LANGFUSE_HOST_ENV_VAR,
13
- LANGFUSE_PROJECT_ID_ENV_VAR,
14
- LANGFUSE_PUBLIC_KEY_ENV_VAR,
15
- LANGFUSE_SECRET_KEY_ENV_VAR,
16
- RASA_LANGFUSE_INTEGRATION_ENABLED_ENV_VAR,
17
- )
18
6
 
19
7
  # IMPORTANT: do not import anything from rasa here - use scoped imports
20
8
  # this avoids circular imports, as the hooks are used in different places
@@ -30,7 +18,6 @@ if TYPE_CHECKING:
30
18
 
31
19
  hookimpl = pluggy.HookimplMarker("rasa")
32
20
  logger = logging.getLogger(__name__)
33
- structlogger = structlog.get_logger()
34
21
 
35
22
 
36
23
  @hookimpl # type: ignore[misc]
@@ -70,8 +57,6 @@ def configure_commandline(cmdline_arguments: argparse.Namespace) -> Optional[Tex
70
57
  config.configure_tracing(tracer_provider)
71
58
  config.configure_metrics(endpoints_file)
72
59
 
73
- _init_langfuse_integration()
74
-
75
60
  return endpoints_file
76
61
 
77
62
 
@@ -130,43 +115,3 @@ def after_server_stop() -> None:
130
115
 
131
116
  if anon_pipeline is not None:
132
117
  anon_pipeline.stop()
133
-
134
-
135
- def _is_langfuse_integration_enabled() -> bool:
136
- return (
137
- os.environ.get(RASA_LANGFUSE_INTEGRATION_ENABLED_ENV_VAR, "false").lower()
138
- == "true"
139
- )
140
-
141
-
142
- def _init_langfuse_integration() -> None:
143
- if not _is_langfuse_integration_enabled():
144
- structlogger.info(
145
- "hooks._init_langfuse_integration.disabled",
146
- event_info="Langfuse integration is disabled.",
147
- )
148
- return
149
-
150
- if (
151
- not os.environ.get(LANGFUSE_HOST_ENV_VAR)
152
- or not os.environ.get(LANGFUSE_PROJECT_ID_ENV_VAR)
153
- or not os.environ.get(LANGFUSE_PUBLIC_KEY_ENV_VAR)
154
- or not os.environ.get(LANGFUSE_SECRET_KEY_ENV_VAR)
155
- ):
156
- structlogger.warning(
157
- "hooks._init_langfuse_integration.missing_langfuse_keys",
158
- event_info=(
159
- "Langfuse integration is enabled, but some environment variables "
160
- "are missing. Please set LANGFUSE_HOST, LANGFUSE_PROJECT_ID, "
161
- "LANGFUSE_PUBLIC_KEY and LANGFUSE_SECRET_KEY environment "
162
- "variables to use Langfuse integration."
163
- ),
164
- )
165
- return
166
-
167
- litellm.success_callback = [LANGFUSE_CALLBACK_NAME]
168
- litellm.failure_callback = [LANGFUSE_CALLBACK_NAME]
169
- structlogger.info(
170
- "hooks.langfuse_callbacks_initialized",
171
- event_info="Langfuse integration initialized.",
172
- )
rasa/keys ADDED
@@ -0,0 +1 @@
1
+ {"segment": "CcvVD1I68Nkkxrv93cIqv1twIwrwG8nz", "sentry": "a283f1fde04347b099c8d729109dd450@o251570"}
@@ -1,9 +1,11 @@
1
1
  from __future__ import annotations
2
2
 
3
3
  import logging
4
+ import shutil
4
5
  import typing
5
6
  from collections import OrderedDict
6
7
  from enum import Enum
8
+ from pathlib import Path
7
9
  from typing import Any, Callable, Dict, List, Optional, Text, Tuple, Type
8
10
 
9
11
  import numpy as np
@@ -43,6 +45,10 @@ if typing.TYPE_CHECKING:
43
45
 
44
46
  CONFIG_FEATURES = "features"
45
47
 
48
+ TAGGERS_DIR = "taggers"
49
+ CRFSUITE_MODEL_FILE_NAME = "model.crfsuite"
50
+ PLAIN_CRF_MODEL_FILE_NAME = "model.txt"
51
+
46
52
 
47
53
  class CRFToken:
48
54
  def __init__(
@@ -419,19 +425,11 @@ class CRFEntityExtractor(GraphComponent, EntityExtractorMixin):
419
425
  """Loads trained component (see parent class for full docstring)."""
420
426
  try:
421
427
  with model_storage.read_from(resource) as model_dir:
422
- dataset = rasa.shared.utils.io.read_json_file(
423
- model_dir / "crf_dataset.json"
424
- )
425
428
  crf_order = rasa.shared.utils.io.read_json_file(
426
429
  model_dir / "crf_order.json"
427
430
  )
428
431
 
429
- dataset = [
430
- [CRFToken.create_from_dict(token_data) for token_data in sub_list]
431
- for sub_list in dataset
432
- ]
433
-
434
- entity_taggers = cls.train_model(dataset, config, crf_order)
432
+ entity_taggers = cls._load_taggers(model_dir, config)
435
433
 
436
434
  entity_extractor = cls(config, model_storage, resource, entity_taggers)
437
435
  entity_extractor.crf_order = crf_order
@@ -443,19 +441,71 @@ class CRFEntityExtractor(GraphComponent, EntityExtractorMixin):
443
441
  )
444
442
  return cls(config, model_storage, resource)
445
443
 
444
+ @classmethod
445
+ def _load_taggers(
446
+ cls, model_dir: Path, config: Dict[Text, Any]
447
+ ) -> Dict[str, "CRF"]:
448
+ """
449
+ Load taggers from model directory that persists trained binary
450
+ `model.crfsuite` files.
451
+ """
452
+
453
+ import pycrfsuite
454
+ import sklearn_crfsuite
455
+
456
+ # Get tagger directories
457
+ taggers_base = model_dir / TAGGERS_DIR
458
+ if not taggers_base.exists():
459
+ return {}
460
+
461
+ taggers_dirs = [
462
+ directory for directory in taggers_base.iterdir() if directory.is_dir()
463
+ ]
464
+
465
+ entity_taggers: Dict[str, "CRF"] = {}
466
+
467
+ for tagger_dir in taggers_dirs:
468
+ # Instantiate sklearns CRF wrapper for the pycrfsuite's Tagger
469
+ entity_tagger = sklearn_crfsuite.CRF(
470
+ algorithm="lbfgs",
471
+ # coefficient for L1 penalty
472
+ c1=config["L1_c"],
473
+ # coefficient for L2 penalty
474
+ c2=config["L2_c"],
475
+ # stop earlier
476
+ max_iterations=config["max_iterations"],
477
+ # include transitions that are possible, but not observed
478
+ all_possible_transitions=True,
479
+ )
480
+
481
+ # Load pycrfsuite tagger from the persisted binary model.crfsuite file
482
+ entity_tagger._tagger = pycrfsuite.Tagger()
483
+ entity_tagger._tagger.open(str(tagger_dir / CRFSUITE_MODEL_FILE_NAME))
484
+
485
+ entity_taggers[tagger_dir.name] = entity_tagger
486
+
487
+ return entity_taggers
488
+
446
489
  def persist(self, dataset: List[List[CRFToken]]) -> None:
447
490
  """Persist this model into the passed directory."""
448
491
  with self._model_storage.write_to(self._resource) as model_dir:
449
- data_to_store = [
450
- [token.to_dict() for token in sub_list] for sub_list in dataset
451
- ]
452
-
453
- rasa.shared.utils.io.dump_obj_as_json_to_file(
454
- model_dir / "crf_dataset.json", data_to_store
455
- )
456
492
  rasa.shared.utils.io.dump_obj_as_json_to_file(
457
493
  model_dir / "crf_order.json", self.crf_order
458
494
  )
495
+ if self.entity_taggers is not None:
496
+ for tag_name, entity_tagger in self.entity_taggers.items():
497
+ # Create the directories for storing the CRF model
498
+ tagger_dir = model_dir / TAGGERS_DIR / tag_name
499
+ tagger_dir.mkdir(parents=True, exist_ok=True)
500
+ # Create a plain text version of the CRF model
501
+ entity_tagger.tagger_.dump(
502
+ str(tagger_dir / PLAIN_CRF_MODEL_FILE_NAME)
503
+ )
504
+ # Persist binary version of the model.crfsuite
505
+ shutil.copy2(
506
+ src=entity_tagger.modelfile.name,
507
+ dst=tagger_dir / CRFSUITE_MODEL_FILE_NAME,
508
+ )
459
509
 
460
510
  @classmethod
461
511
  def _crf_tokens_to_features(
rasa/shared/constants.py CHANGED
@@ -339,8 +339,3 @@ ROLE_SYSTEM = "system"
339
339
  # Used for key values in ValidateSlotPatternFlowStackFrame
340
340
  REFILL_UTTER = "refill_utter"
341
341
  REJECTIONS = "rejections"
342
-
343
- LANGFUSE_METADATA_USER_ID = "trace_user_id"
344
- LANGFUSE_METADATA_SESSION_ID = "session_id"
345
- LANGFUSE_CUSTOM_METADATA_DICT = "trace_metadata"
346
- LANGFUSE_TAGS = "tags"
@@ -4,12 +4,3 @@ LITE_LLM_API_KEY_FIELD = "api_key"
4
4
  LITE_LLM_API_VERSION_FIELD = "api_version"
5
5
  LITE_LLM_MODEL_FIELD = "model"
6
6
  LITE_LLM_AZURE_AD_TOKEN = "azure_ad_token"
7
-
8
- # Enable or disable Langfuse integration
9
- RASA_LANGFUSE_INTEGRATION_ENABLED_ENV_VAR = "RASA_LANGFUSE_INTEGRATION_ENABLED"
10
- # Langfuse configuration
11
- LANGFUSE_CALLBACK_NAME = "langfuse"
12
- LANGFUSE_HOST_ENV_VAR = "LANGFUSE_HOST"
13
- LANGFUSE_PROJECT_ID_ENV_VAR = "LANGFUSE_PROJECT_ID"
14
- LANGFUSE_PUBLIC_KEY_ENV_VAR = "LANGFUSE_PUBLIC_KEY"
15
- LANGFUSE_SECRET_KEY_ENV_VAR = "LANGFUSE_SECRET_KEY"
@@ -2,7 +2,7 @@ from __future__ import annotations
2
2
 
3
3
  import logging
4
4
  from abc import abstractmethod
5
- from typing import Any, Dict, List, Optional, Union, cast
5
+ from typing import Any, Dict, List, Union, cast
6
6
 
7
7
  import structlog
8
8
  from litellm import acompletion, completion, validate_environment
@@ -123,11 +123,7 @@ class _BaseLiteLLMClient:
123
123
  raise ProviderClientValidationError(event_info)
124
124
 
125
125
  @suppress_logs(log_level=logging.WARNING)
126
- def completion(
127
- self,
128
- messages: Union[List[dict], List[str], str],
129
- metadata: Optional[Dict[str, Any]] = None,
130
- ) -> LLMResponse:
126
+ def completion(self, messages: Union[List[dict], List[str], str]) -> LLMResponse:
131
127
  """Synchronously generate completions for given list of messages.
132
128
 
133
129
  Args:
@@ -139,7 +135,6 @@ class _BaseLiteLLMClient:
139
135
  - a list of messages. Each message is a string and will be formatted
140
136
  as a user message.
141
137
  - a single message as a string which will be formatted as user message.
142
- metadata: Optional metadata to be passed to the LLM call.
143
138
 
144
139
  Returns:
145
140
  List of message completions.
@@ -157,9 +152,7 @@ class _BaseLiteLLMClient:
157
152
 
158
153
  @suppress_logs(log_level=logging.WARNING)
159
154
  async def acompletion(
160
- self,
161
- messages: Union[List[dict], List[str], str],
162
- metadata: Optional[Dict[str, Any]] = None,
155
+ self, messages: Union[List[dict], List[str], str]
163
156
  ) -> LLMResponse:
164
157
  """Asynchronously generate completions for given list of messages.
165
158
 
@@ -172,7 +165,6 @@ class _BaseLiteLLMClient:
172
165
  - a list of messages. Each message is a string and will be formatted
173
166
  as a user message.
174
167
  - a single message as a string which will be formatted as user message.
175
- metadata: Optional metadata to be passed to the LLM call.
176
168
 
177
169
  Returns:
178
170
  List of message completions.
@@ -183,9 +175,7 @@ class _BaseLiteLLMClient:
183
175
  try:
184
176
  formatted_messages = self._get_formatted_messages(messages)
185
177
  arguments = resolve_environment_variables(self._completion_fn_args)
186
- response = await acompletion(
187
- messages=formatted_messages, metadata=metadata, **arguments
188
- )
178
+ response = await acompletion(messages=formatted_messages, **arguments)
189
179
  return self._format_response(response)
190
180
  except Exception as e:
191
181
  message = ""
@@ -1,7 +1,7 @@
1
1
  from __future__ import annotations
2
2
 
3
3
  import logging
4
- from typing import Any, Dict, List, Optional, Union
4
+ from typing import Any, Dict, List, Union
5
5
 
6
6
  import structlog
7
7
 
@@ -122,12 +122,9 @@ class LiteLLMRouterLLMClient(_BaseLiteLLMRouterClient, _BaseLiteLLMClient):
122
122
  raise ProviderClientAPIException(e)
123
123
 
124
124
  @suppress_logs(log_level=logging.WARNING)
125
- def completion(
126
- self,
127
- messages: Union[List[dict], List[str], str],
128
- metadata: Optional[Dict[str, Any]] = None,
129
- ) -> LLMResponse:
130
- """Synchronously generate completions for given list of messages.
125
+ def completion(self, messages: Union[List[dict], List[str], str]) -> LLMResponse:
126
+ """
127
+ Synchronously generate completions for given list of messages.
131
128
 
132
129
  Method overrides the base class method to call the appropriate
133
130
  completion method based on the configuration. If the chat completions
@@ -143,11 +140,8 @@ class LiteLLMRouterLLMClient(_BaseLiteLLMRouterClient, _BaseLiteLLMClient):
143
140
  - a list of messages. Each message is a string and will be formatted
144
141
  as a user message.
145
142
  - a single message as a string which will be formatted as user message.
146
- metadata: Optional metadata to be passed to the LLM call.
147
-
148
143
  Returns:
149
144
  List of message completions.
150
-
151
145
  Raises:
152
146
  ProviderClientAPIException: If the API request fails.
153
147
  """
@@ -164,11 +158,10 @@ class LiteLLMRouterLLMClient(_BaseLiteLLMRouterClient, _BaseLiteLLMClient):
164
158
 
165
159
  @suppress_logs(log_level=logging.WARNING)
166
160
  async def acompletion(
167
- self,
168
- messages: Union[List[dict], List[str], str],
169
- metadata: Optional[Dict[str, Any]] = None,
161
+ self, messages: Union[List[dict], List[str], str]
170
162
  ) -> LLMResponse:
171
- """Asynchronously generate completions for given list of messages.
163
+ """
164
+ Asynchronously generate completions for given list of messages.
172
165
 
173
166
  Method overrides the base class method to call the appropriate
174
167
  completion method based on the configuration. If the chat completions
@@ -184,11 +177,8 @@ class LiteLLMRouterLLMClient(_BaseLiteLLMRouterClient, _BaseLiteLLMClient):
184
177
  - a list of messages. Each message is a string and will be formatted
185
178
  as a user message.
186
179
  - a single message as a string which will be formatted as user message.
187
- metadata: Optional metadata to be passed to the LLM call.
188
-
189
180
  Returns:
190
181
  List of message completions.
191
-
192
182
  Raises:
193
183
  ProviderClientAPIException: If the API request fails.
194
184
  """
@@ -1,19 +1,21 @@
1
1
  from __future__ import annotations
2
2
 
3
- from typing import Any, Dict, List, Optional, Protocol, Union, runtime_checkable
3
+ from typing import Dict, List, Protocol, Union, runtime_checkable
4
4
 
5
5
  from rasa.shared.providers.llm.llm_response import LLMResponse
6
6
 
7
7
 
8
8
  @runtime_checkable
9
9
  class LLMClient(Protocol):
10
- """Protocol for an LLM client that specifies the interface for interacting
10
+ """
11
+ Protocol for an LLM client that specifies the interface for interacting
11
12
  with the API.
12
13
  """
13
14
 
14
15
  @classmethod
15
16
  def from_config(cls, config: dict) -> LLMClient:
16
- """Initializes the llm client with the given configuration.
17
+ """
18
+ Initializes the llm client with the given configuration.
17
19
 
18
20
  This class method should be implemented to parse the given
19
21
  configuration and create an instance of an llm client.
@@ -22,24 +24,17 @@ class LLMClient(Protocol):
22
24
 
23
25
  @property
24
26
  def config(self) -> Dict:
25
- """Returns the configuration for that the llm client is initialized with.
27
+ """
28
+ Returns the configuration for that the llm client is initialized with.
26
29
 
27
30
  This property should be implemented to return a dictionary containing
28
31
  the configuration settings for the llm client.
29
32
  """
30
33
  ...
31
34
 
32
- def completion(
33
- self,
34
- messages: Union[List[dict], List[str], str],
35
- metadata: Optional[Dict[str, Any]] = None,
36
- ) -> LLMResponse:
37
- """Synchronously generate completions for given list of messages.
38
- def completion(
39
- self,
40
- messages: Union[List[dict], List[str], str],
41
- metadata: Optional[Dict[str, Any]] = None,
42
- ) -> LLMResponse:
35
+ def completion(self, messages: Union[List[dict], List[str], str]) -> LLMResponse:
36
+ """
37
+ Synchronously generate completions for given list of messages.
43
38
 
44
39
  This method should be implemented to take a list of messages (as
45
40
  strings) and return a list of completions (as strings).
@@ -53,19 +48,16 @@ class LLMClient(Protocol):
53
48
  - a list of messages. Each message is a string and will be formatted
54
49
  as a user message.
55
50
  - a single message as a string which will be formatted as user message.
56
- metadata: Optional metadata to be passed to the LLM call.
57
-
58
51
  Returns:
59
52
  LLMResponse
60
53
  """
61
54
  ...
62
55
 
63
56
  async def acompletion(
64
- self,
65
- messages: Union[List[dict], List[str], str],
66
- metadata: Optional[Dict[str, Any]] = None,
57
+ self, messages: Union[List[dict], List[str], str]
67
58
  ) -> LLMResponse:
68
- """Asynchronously generate completions for given list of messages.
59
+ """
60
+ Asynchronously generate completions for given list of messages.
69
61
 
70
62
  This method should be implemented to take a list of messages (as
71
63
  strings) and return a list of completions (as strings).
@@ -79,15 +71,14 @@ class LLMClient(Protocol):
79
71
  - a list of messages. Each message is a string and will be formatted
80
72
  as a user message.
81
73
  - a single message as a string which will be formatted as user message.
82
- metadata: Optional metadata to be passed to the LLM call.
83
-
84
74
  Returns:
85
75
  LLMResponse
86
76
  """
87
77
  ...
88
78
 
89
79
  def validate_client_setup(self, *args, **kwargs) -> None: # type: ignore
90
- """Perform client setup validation.
80
+ """
81
+ Perform client setup validation.
91
82
 
92
83
  This method should be implemented to validate whether the client can be
93
84
  used with the parameters provided through configuration or environment
@@ -237,9 +237,7 @@ class SelfHostedLLMClient(_BaseLiteLLMClient):
237
237
  raise ProviderClientAPIException(e)
238
238
 
239
239
  async def acompletion(
240
- self,
241
- messages: Union[List[dict], List[str], str],
242
- metadata: Optional[Dict[str, Any]] = None,
240
+ self, messages: Union[List[dict], List[str], str]
243
241
  ) -> LLMResponse:
244
242
  """Asynchronous completion of the model with the given messages.
245
243
 
@@ -257,7 +255,6 @@ class SelfHostedLLMClient(_BaseLiteLLMClient):
257
255
  - a list of messages. Each message is a string and will be formatted
258
256
  as a user message.
259
257
  - a single message as a string which will be formatted as user message.
260
- metadata: Optional metadata to be passed to the LLM call.
261
258
 
262
259
  Returns:
263
260
  The completion response.
@@ -266,11 +263,7 @@ class SelfHostedLLMClient(_BaseLiteLLMClient):
266
263
  return await super().acompletion(messages)
267
264
  return await self._atext_completion(messages)
268
265
 
269
- def completion(
270
- self,
271
- messages: Union[List[dict], List[str], str],
272
- metadata: Optional[Dict[str, Any]] = None,
273
- ) -> LLMResponse:
266
+ def completion(self, messages: Union[List[dict], List[str], str]) -> LLMResponse:
274
267
  """Completion of the model with the given messages.
275
268
 
276
269
  Method overrides the base class method to call the appropriate
@@ -280,7 +273,6 @@ class SelfHostedLLMClient(_BaseLiteLLMClient):
280
273
 
281
274
  Args:
282
275
  messages: The messages to be used for completion.
283
- metadata: Optional metadata to be passed to the LLM call.
284
276
 
285
277
  Returns:
286
278
  The completion response.
@@ -372,7 +372,6 @@ def extract_llm_config(
372
372
  def extract_attrs_for_llm_based_command_generator(
373
373
  self: "LLMBasedCommandGenerator",
374
374
  prompt: str,
375
- metadata: Optional[Dict[str, Any]] = None,
376
375
  ) -> Dict[str, Any]:
377
376
  from rasa.dialogue_understanding.generator.flow_retrieval import (
378
377
  DEFAULT_EMBEDDINGS_CONFIG,
@@ -388,7 +387,8 @@ def extract_attrs_for_llm_based_command_generator(
388
387
 
389
388
 
390
389
  def extract_attrs_for_contextual_response_rephraser(
391
- self: Any, prompt: str
390
+ self: Any,
391
+ prompt: str,
392
392
  ) -> Dict[str, Any]:
393
393
  from rasa.core.nlg.contextual_response_rephraser import DEFAULT_LLM_CONFIG
394
394
 
rasa/version.py CHANGED
@@ -1,3 +1,3 @@
1
1
  # this file will automatically be changed,
2
2
  # do not add anything but the version number here!
3
- __version__ = "3.12.10.dev1"
3
+ __version__ = "3.12.11"
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.3
2
2
  Name: rasa-pro
3
- Version: 3.12.10.dev1
3
+ Version: 3.12.11
4
4
  Summary: State-of-the-art open-core Conversational AI framework for Enterprises that natively leverages generative AI for effortless assistant development.
5
5
  Keywords: nlp,machine-learning,machine-learning-library,bot,bots,botkit,rasa conversational-agents,conversational-ai,chatbot,chatbot-framework,bot-framework
6
6
  Author: Rasa Technologies GmbH
@@ -63,7 +63,6 @@ Requires-Dist: keras (==2.14.0)
63
63
  Requires-Dist: langchain (>=0.2.17,<0.3.0)
64
64
  Requires-Dist: langchain-community (>=0.2.19,<0.3.0)
65
65
  Requires-Dist: langcodes (>=3.5.0,<4.0.0)
66
- Requires-Dist: langfuse (>=2.60.2,<2.61.0)
67
66
  Requires-Dist: litellm (>=1.68.0,<1.69.0)
68
67
  Requires-Dist: matplotlib (>=3.7,<3.8)
69
68
  Requires-Dist: mattermostwrapper (>=2.2,<2.3)
@@ -1,4 +1,4 @@
1
- rasa/__init__.py,sha256=1LPdnp38vsouYw0bt_C0Q0mfLeDKAUaeiNdqMZaihCg,495
1
+ rasa/__init__.py,sha256=YXG8RzVxiSJ__v-AewtV453YoCbmzWlHsU_4S0O2XpE,206
2
2
  rasa/__main__.py,sha256=OmUXcaA9l7KR_eSYCwaCSetuczxjrcN2taNnZ2ZUTbA,6472
3
3
  rasa/anonymization/__init__.py,sha256=Z-ZUW2ofZGfI6ysjYIS7U0JL4JSzDNOkHiiXK488Zik,86
4
4
  rasa/anonymization/anonymisation_rule_yaml_reader.py,sha256=8u8ZWfbpJuyUagrfth3IGfQXVlVz31esqExfDdasxZM,3171
@@ -92,7 +92,7 @@ rasa/cli/x.py,sha256=C7dLtYXAkD-uj7hNj7Pz5YbOupp2yRcMjQbsEVqXUJ8,6825
92
92
  rasa/constants.py,sha256=m6If7alC5obaHU-JQWXEBo4mooVwIMzNRTjyTzzZSVg,1306
93
93
  rasa/core/__init__.py,sha256=wTSmsFlgK0Ylvuyq20q9APwpT5xyVJYZfzhs4rrkciM,456
94
94
  rasa/core/actions/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
95
- rasa/core/actions/action.py,sha256=2mDvSi1pSWipDWhprEFjDXf-X9yoID9DQEvmf0rQcJM,42664
95
+ rasa/core/actions/action.py,sha256=_QfY3ngSF2sf2Y3QDPJo7Nd6F_FA6_zDWgw1OQSLkEk,42676
96
96
  rasa/core/actions/action_clean_stack.py,sha256=xUP-2ipPsPAnAiwP17c-ezmHPSrV4JSUZr-eSgPQwIs,2279
97
97
  rasa/core/actions/action_exceptions.py,sha256=hghzXYN6VeHC-O_O7WiPesCNV86ZTkHgG90ZnQcbai8,724
98
98
  rasa/core/actions/action_hangup.py,sha256=o5iklHG-F9IcRgWis5C6AumVXznxzAV3o9zdduhozEM,994
@@ -268,7 +268,7 @@ rasa/core/channels/telegram.py,sha256=TKVknsk3U9tYeY1a8bzlhqkltWmZfGSOvrcmwa9qoz
268
268
  rasa/core/channels/twilio.py,sha256=2BTQpyx0b0yPpc0A2BHYfxLPgodrLGLs8nq6i3lVGAM,5906
269
269
  rasa/core/channels/vier_cvg.py,sha256=GkrWKu7NRMFtLMyYp-kQ2taWAc_keAwhYrkVPW56iaU,13544
270
270
  rasa/core/channels/voice_ready/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
271
- rasa/core/channels/voice_ready/audiocodes.py,sha256=luO0e-azKlkwnWZ9bQWBF2DlkNHvEAkIQTb8HuouqGQ,21130
271
+ rasa/core/channels/voice_ready/audiocodes.py,sha256=eUUL9awt4P49LA5WC2hbsMZsi_qYHd-S3HL1Kpyj2ew,22353
272
272
  rasa/core/channels/voice_ready/jambonz.py,sha256=bU2yrO6Gw_JcmFXeFVc8f1DK3ZDDYLQVjBB8SM2JjWc,4783
273
273
  rasa/core/channels/voice_ready/jambonz_protocol.py,sha256=E9iwvitSDpVkL7BxbckczF4b0a8lWZt-3zR4Innflow,13116
274
274
  rasa/core/channels/voice_ready/twilio_voice.py,sha256=z2pdausxQnXQP9htGh8AL2q9AvcMIx70Y5tErWpssV4,16224
@@ -335,7 +335,7 @@ rasa/core/policies/flows/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZ
335
335
  rasa/core/policies/flows/flow_exceptions.py,sha256=_FQuN-cerQDM1pivce9bz4zylh5UYkljvYS1gjDukHI,1527
336
336
  rasa/core/policies/flows/flow_executor.py,sha256=sT7ZFrm_CKVKBv5SO0M_QE984ZFw8t6trm8dMxCXbv8,25649
337
337
  rasa/core/policies/flows/flow_step_result.py,sha256=agjPrD6lahGSe2ViO5peBeoMdI9ngVGRSgtytgxmJmg,1360
338
- rasa/core/policies/intentless_policy.py,sha256=U4CvnY7T0Gj62_fKXDnaoT8gN8tNt7AtcVaje8EeBwg,36339
338
+ rasa/core/policies/intentless_policy.py,sha256=TPpnBY5r9ajtDuAhHJtl5uojC6auDr7sd_Nb8tXPIFE,36314
339
339
  rasa/core/policies/intentless_prompt_template.jinja2,sha256=KhIL3cruMmkxhrs5oVbqgSvK6ZiN_6TQ_jXrgtEB-ZY,677
340
340
  rasa/core/policies/memoization.py,sha256=CX2d3yP7FehSMW92Wi9NYLZei7tBzoT3T6yybu-Nb5s,19377
341
341
  rasa/core/policies/policy.py,sha256=5SUnPajSTSf8PzB1-jFbQPtsvR-zLN-xkjeotWOxuJc,27432
@@ -365,7 +365,7 @@ rasa/dialogue_understanding/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMp
365
365
  rasa/dialogue_understanding/coexistence/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
366
366
  rasa/dialogue_understanding/coexistence/constants.py,sha256=RpgLKMG4s7AgII0fRV0siS0Zh2QVI0OVRunhgm4q_j4,94
367
367
  rasa/dialogue_understanding/coexistence/intent_based_router.py,sha256=JlYBZdScnhflLK__i4bG0-PIkuFv0B7L4yOdnLgYWAY,7609
368
- rasa/dialogue_understanding/coexistence/llm_based_router.py,sha256=0kQ9vrKCCbBhKA13Hk570xJUH_oij3HOsYgQbpvNKOA,11751
368
+ rasa/dialogue_understanding/coexistence/llm_based_router.py,sha256=6OSdje9ZMJuJ7eoPHAuVIQGaVx0qtLg6YdpfUAuGbj0,11752
369
369
  rasa/dialogue_understanding/coexistence/router_template.jinja2,sha256=CHWFreN0sv1EbPh-hf5AlCt3zxy2_llX1Pdn9Q11Y18,357
370
370
  rasa/dialogue_understanding/commands/__init__.py,sha256=F-pLETYRUjhIkjjDfXGUuPsK_ac1HcLmJkrUUP0RhME,2259
371
371
  rasa/dialogue_understanding/commands/can_not_handle_command.py,sha256=fKOj9ScLxuaFO9Iw0p7og_4zMiw2weBdx322rBKlnCI,3519
@@ -400,19 +400,19 @@ rasa/dialogue_understanding/generator/command_parser.py,sha256=wf6FSgqBw5F0legg0
400
400
  rasa/dialogue_understanding/generator/constants.py,sha256=PuUckBGUZ-Tu31B0cs8yxN99BDW3PGoExZa-BlIL5v8,1108
401
401
  rasa/dialogue_understanding/generator/flow_document_template.jinja2,sha256=f4H6vVd-_nX_RtutMh1xD3ZQE_J2OyuPHAtiltfiAPY,253
402
402
  rasa/dialogue_understanding/generator/flow_retrieval.py,sha256=DavL-37e0tksMWkxvFImoqlsmYeYeSdDN3u7wZI0K-8,17817
403
- rasa/dialogue_understanding/generator/llm_based_command_generator.py,sha256=cUsP_3Z5k65r-4iCCJY7I1yuFKkEg1nV1e_Xg6ULBnc,24058
404
- rasa/dialogue_understanding/generator/llm_command_generator.py,sha256=E5byrCC_6r_GJm_HIosN_Se00NmXmnTCdOzaHMwTu6A,2641
403
+ rasa/dialogue_understanding/generator/llm_based_command_generator.py,sha256=uzzGufs2oDBXqz4LKz2429Hr3GvkMIKbsmBrgvEG4TA,23587
404
+ rasa/dialogue_understanding/generator/llm_command_generator.py,sha256=z7jhIJ3W_5GFH-p15kVoWbigMIoY8fIJjc_j_uX7yxw,2581
405
405
  rasa/dialogue_understanding/generator/multi_step/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
406
406
  rasa/dialogue_understanding/generator/multi_step/fill_slots_prompt.jinja2,sha256=Y0m673tAML3cFPaLM-urMXDsBYUUcXIw9YUpkAhGUuA,2933
407
407
  rasa/dialogue_understanding/generator/multi_step/handle_flows_prompt.jinja2,sha256=8l93_QBKBYnqLICVdiTu5ejZDE8F36BU8-qwba0px44,1927
408
- rasa/dialogue_understanding/generator/multi_step/multi_step_llm_command_generator.py,sha256=0rpQonIcwSjcUWCLjJ5DKf1Z5XBJiDoJ6cC7Rj6NtAM,34088
408
+ rasa/dialogue_understanding/generator/multi_step/multi_step_llm_command_generator.py,sha256=LopAxEaY1PRNf28k_2tO1DTnPWVfh7S1qXJo6sSbPyw,32539
409
409
  rasa/dialogue_understanding/generator/nlu_command_adapter.py,sha256=cisxLlPVQXgbWMAz9xSxBvrOz4HO-f0G3CFVjJ2wt-g,10876
410
410
  rasa/dialogue_understanding/generator/prompt_templates/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
411
411
  rasa/dialogue_understanding/generator/prompt_templates/command_prompt_template.jinja2,sha256=nMayu-heJYH1QmcL1cFmXb8SeiJzfdDR_9Oy5IRUXsM,3937
412
412
  rasa/dialogue_understanding/generator/prompt_templates/command_prompt_v2_claude_3_5_sonnet_20240620_template.jinja2,sha256=z-cnFVfIE_kEnY1o52YE2CdCWwgYTv7R3xVxsjXWlnw,3808
413
413
  rasa/dialogue_understanding/generator/prompt_templates/command_prompt_v2_gpt_4o_2024_11_20_template.jinja2,sha256=4076ARsy0E0iADBX6li19IoM3F4F-2wK3bL6UEOvCdo,3620
414
414
  rasa/dialogue_understanding/generator/single_step/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
415
- rasa/dialogue_understanding/generator/single_step/compact_llm_command_generator.py,sha256=e4daPCfEoyUvwooHmhFPyP-rLpdh9QDGqEccMnPXN-0,22867
415
+ rasa/dialogue_understanding/generator/single_step/compact_llm_command_generator.py,sha256=P732pdylTR_EM7xZIXbSWZuQY2lOQZ7EKkpMDhpHrps,22391
416
416
  rasa/dialogue_understanding/generator/single_step/single_step_llm_command_generator.py,sha256=RWTPdeBfdGUmdFSUzdQejcbJJLhc_815G0g6AabTK04,5100
417
417
  rasa/dialogue_understanding/generator/utils.py,sha256=jxtb-AfngN59y2rHynqJDK80xM_yooEvr3aW1MWl6H0,2760
418
418
  rasa/dialogue_understanding/patterns/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
@@ -530,8 +530,9 @@ rasa/graph_components/providers/training_tracker_provider.py,sha256=FaCWHJA69EpM
530
530
  rasa/graph_components/validators/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
531
531
  rasa/graph_components/validators/default_recipe_validator.py,sha256=iOVoB7zVTKes8EYW110fz8ZvtgoDcCX25GlUsiESS18,24457
532
532
  rasa/graph_components/validators/finetuning_validator.py,sha256=VfCGytnweijKBG8bAqYp7zKZB2aRgi2ZI8R0eou5Ev4,12865
533
- rasa/hooks.py,sha256=xQLqqPpebL04AuKZiYJEZaBJyubTdGetCW7cvmjXg7o,5804
533
+ rasa/hooks.py,sha256=5ZMrqNz323w56MMY6E8jeZ_YXgRqq8p-yi18S2XOmbo,4061
534
534
  rasa/jupyter.py,sha256=TCYVD4QPQIMmfA6ZwDUBOBTAECwCwbU2XOkosodLO9k,1782
535
+ rasa/keys,sha256=2Stg1fstgJ203cOoW1B2gGMY29fhEnjIfTVxKv_fqPo,101
535
536
  rasa/llm_fine_tuning/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
536
537
  rasa/llm_fine_tuning/annotation_module.py,sha256=6wBBjGwONVlikp79xAHp5g3rydEhPM6kP1bw1g-maYk,8578
537
538
  rasa/llm_fine_tuning/conversations.py,sha256=QZVaUsfXe5iIE830Bv-_3oo8luhGfHpirvubxzOoEvA,4116
@@ -563,7 +564,6 @@ rasa/model_manager/warm_rasa_process.py,sha256=2vg8gBEUvPrr6C5W-fxtWWSajksrOaT83
563
564
  rasa/model_service.py,sha256=XXCaiLj2xq58n05W3R1jmTIv-V8f_7PG30kVpRxf71Y,3727
564
565
  rasa/model_testing.py,sha256=eZw7l8Zz3HkH_ZPBurY93HzzudHdoQn8HBnDdZSysAY,14929
565
566
  rasa/model_training.py,sha256=1opig8_npw7dLHd8k06ZYUQCrJ61sFIbNHBgvF63yH8,21733
566
- rasa/monkey_patches.py,sha256=pZTDKQ8GNzeiUWeJ2MneUuremSNVScL7oXeMAEd4o4Y,3687
567
567
  rasa/nlu/__init__.py,sha256=D0IYuTK_ZQ_F_9xsy0bXxVCAtU62Fzvp8S7J9tmfI_c,123
568
568
  rasa/nlu/classifiers/__init__.py,sha256=Qvrf7_rfiMxm2Vt2fClb56R3QFExf7WPdFdL-AOvgsk,118
569
569
  rasa/nlu/classifiers/classifier.py,sha256=9fm1mORuFf1vowYIXmqE9yLRKdSC4nGQW7UqNZQipKY,133
@@ -583,7 +583,7 @@ rasa/nlu/emulators/luis.py,sha256=BaBluhzyHbELy2ji9zMIMYNbGklQEJqwGu-Z1zaRFS8,30
583
583
  rasa/nlu/emulators/no_emulator.py,sha256=x8VoSxRm4aec8Fk797ipy78KxyBcBjnQ08xesLhG0nI,335
584
584
  rasa/nlu/emulators/wit.py,sha256=6ltOc8S__UzesWPGI6-igxnkUa6ZKoNLhpZU7o2g4R4,1930
585
585
  rasa/nlu/extractors/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
586
- rasa/nlu/extractors/crf_entity_extractor.py,sha256=UT3lfVOLKWDSki5hGrrDxHq-zNjb6oquIgGQq0PoGsQ,27532
586
+ rasa/nlu/extractors/crf_entity_extractor.py,sha256=Cv2ObNSyGdi-f2tCY0tu28gY12bf5x9mi5d1ncg1m6o,29400
587
587
  rasa/nlu/extractors/duckling_entity_extractor.py,sha256=vDW9VgQRgIw8Lc3nYoNJFR58IwSeB0Mt_K1Tz8lg7XQ,7687
588
588
  rasa/nlu/extractors/entity_synonyms.py,sha256=9Ums__jzagpkpQbCj5MKr44tkaicaA2zj_4KFe2FgME,7149
589
589
  rasa/nlu/extractors/extractor.py,sha256=WDw2a8fSmsHeYflNpZwYwQ2o-ffwUIkWY2B7gSlfuAY,17539
@@ -625,7 +625,7 @@ rasa/nlu/utils/spacy_utils.py,sha256=5EnHR-MVAZhGbg2rq8VpOu7I0tagV3ThRTlM0-WO2Cg
625
625
  rasa/plugin.py,sha256=cSmFhSWr5WQyYXdJOWwgH4ra_2kbhoNLZAtnqcsGny4,3071
626
626
  rasa/server.py,sha256=eomGM_3SpBxaF_-VfZbkSO_bMk_vI1XLUZjt32f4gcI,59390
627
627
  rasa/shared/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
628
- rasa/shared/constants.py,sha256=GvkQKt1CPxbdoZs2bFkgNo8GA5xKc6EDW9zZjspcr_0,12290
628
+ rasa/shared/constants.py,sha256=PBpmxNQM29MoLp1pY7RGQ1I1hPt3N0_r2l_y5KguEnQ,12129
629
629
  rasa/shared/core/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
630
630
  rasa/shared/core/command_payload_reader.py,sha256=puHYsp9xbX0YQm2L1NDBItOFmdzI7AzmfGefgcHiCc0,3871
631
631
  rasa/shared/core/constants.py,sha256=gwIZHjQYafHnBlMe9_jUiIPm17hxYG9R1MOCtxeC1Ns,6337
@@ -724,7 +724,7 @@ rasa/shared/providers/_configs/self_hosted_llm_client_config.py,sha256=l2JnypPXF
724
724
  rasa/shared/providers/_configs/utils.py,sha256=u2Ram05YwQ7-frm_r8n9rafjZoF8i0qSC7XjYQRuPgo,3732
725
725
  rasa/shared/providers/_ssl_verification_utils.py,sha256=vUnP0vocf0GQ0wG8IQpPcCet4c1C9-wQWQNckNWbDBk,4165
726
726
  rasa/shared/providers/_utils.py,sha256=EZIrz3ugcI-9PWgC7v0VMUNYondAAOeeRLIE8ZmResw,5886
727
- rasa/shared/providers/constants.py,sha256=yF9giGO8xWCrW9dzUW-7wX-y6sh7hlbYzHYKFayrF7A,613
727
+ rasa/shared/providers/constants.py,sha256=hgV8yNGxIbID_2h65OoSfSjIE4UkazrsqRg4SdkPAmI,234
728
728
  rasa/shared/providers/embedding/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
729
729
  rasa/shared/providers/embedding/_base_litellm_embedding_client.py,sha256=PFavNnD6EVDQiqc9sLnBRV0hebW4iCjIh_dvpwzg4RI,8796
730
730
  rasa/shared/providers/embedding/_langchain_embedding_client_adapter.py,sha256=IR2Rb3ReJ9C9sxOoOGRXgtz8STWdMREs_4AeSMKFjl4,2135
@@ -736,15 +736,15 @@ rasa/shared/providers/embedding/huggingface_local_embedding_client.py,sha256=Zo3
736
736
  rasa/shared/providers/embedding/litellm_router_embedding_client.py,sha256=eafDk6IgQtL_kiKgpa6sJs1oATyRi2NT2leUFQsED2s,4551
737
737
  rasa/shared/providers/embedding/openai_embedding_client.py,sha256=XNRGE7apo2v3kWRrtgxE-Gq4rvNko3IiXtvgC4krDYE,5429
738
738
  rasa/shared/providers/llm/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
739
- rasa/shared/providers/llm/_base_litellm_client.py,sha256=DeNRMMf1XOK0yNrdpWt5dOfmtCFKJkU7keEsh0KAc0k,11748
739
+ rasa/shared/providers/llm/_base_litellm_client.py,sha256=uhVNIQQx8DXfA_baxavCgjvcF31bTjS_JcxHYRNneIM,11415
740
740
  rasa/shared/providers/llm/azure_openai_llm_client.py,sha256=ui85vothxR2P_-eLc4nLgbpjnpEKY2BXnIjLxBZoYz8,12504
741
741
  rasa/shared/providers/llm/default_litellm_llm_client.py,sha256=xx-o-NX_mtx6AszK--ZRj8n8JyEJuVu1-42dt8AynBM,4083
742
- rasa/shared/providers/llm/litellm_router_llm_client.py,sha256=kF8yqwxBNjcIYz022yv0gP5RqnJzx6bfG-hcpK5ovKE,8217
743
- rasa/shared/providers/llm/llm_client.py,sha256=11xgWbjV8brvQN-EZPjZHNofImY8JKlRmrbOD7UaL-o,3651
742
+ rasa/shared/providers/llm/litellm_router_llm_client.py,sha256=_6vAdPLAVSI_sBJLaXLnE87M-0ip_klfQ78fQ_pyoyI,7947
743
+ rasa/shared/providers/llm/llm_client.py,sha256=-hTCRsL-A3GCMRHtcyCgcCyra-9OJ8GUC-mURoRXH0k,3242
744
744
  rasa/shared/providers/llm/llm_response.py,sha256=8mOpZdmh4-3yM7aOmNO0yEYUmRDErfoP7ZDMUuHr2Cc,3504
745
745
  rasa/shared/providers/llm/openai_llm_client.py,sha256=rSdLj29Hl1Wm5G6Uwo77j4WqogK_3QIbTA7fyt63YAg,5013
746
746
  rasa/shared/providers/llm/rasa_llm_client.py,sha256=44Tvtnkq4mxDIxtdrGUkwBWAvX1OLaswqmpAsyBH8e8,3504
747
- rasa/shared/providers/llm/self_hosted_llm_client.py,sha256=85jnA7AO2W4OqV0874N5YBzTafVeYtiRbaRyzyA_lKA,10544
747
+ rasa/shared/providers/llm/self_hosted_llm_client.py,sha256=X3QyA5nZbQap0tomg0dQozbY39Ry0y-lLnj-EowK6dI,10270
748
748
  rasa/shared/providers/mappings.py,sha256=QSD3XWvhYCtBLNpGycN30vEnLULYIaqCsAtmfPfSZ3U,3674
749
749
  rasa/shared/providers/router/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
750
750
  rasa/shared/providers/router/_base_litellm_router_client.py,sha256=JV9lYnhIG_CWMtPB5nofjNdRO5V-Wl0DH-HyPm__eJ0,11003
@@ -781,7 +781,7 @@ rasa/tracing/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
781
781
  rasa/tracing/config.py,sha256=32X2rqAiHe0e-Iijb5AivjqDs2j03n8xx5mo07NBMI4,12964
782
782
  rasa/tracing/constants.py,sha256=-3vlfI9v_D8f-KB5tuiqBHhszu2WofFQOyjKBn28gyg,2889
783
783
  rasa/tracing/instrumentation/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
784
- rasa/tracing/instrumentation/attribute_extractors.py,sha256=-w80ZDIF85aEb2OkYqZ75VssbfCWfC7Yq78i-cuc0TU,29513
784
+ rasa/tracing/instrumentation/attribute_extractors.py,sha256=hkdnqIn8PkD1ykxGbPMv-TPHbhtLgOoMQGmwcvfhi2c,29471
785
785
  rasa/tracing/instrumentation/instrumentation.py,sha256=BPI5OoZFbl90kVJzlKEz-eD8cf-CaX_x1t4V9XBhDKo,53625
786
786
  rasa/tracing/instrumentation/intentless_policy_instrumentation.py,sha256=RgixI0FVIzBz19E3onidUpSEwjkAh8paA5_w07PMzFo,4821
787
787
  rasa/tracing/instrumentation/metrics.py,sha256=DI_qIS6sz5KYU4QDcPKfnHxKLL_Ma3wV6diH4_vg85c,12051
@@ -822,9 +822,9 @@ rasa/utils/train_utils.py,sha256=ClJx-6x3-h3Vt6mskacgkcCUJTMXjFPe3zAcy_DfmaU,212
822
822
  rasa/utils/url_tools.py,sha256=dZ1HGkVdWTJB7zYEdwoDIrEuyX9HE5WsxKKFVsXBLE0,1218
823
823
  rasa/utils/yaml.py,sha256=KjbZq5C94ZP7Jdsw8bYYF7HASI6K4-C_kdHfrnPLpSI,2000
824
824
  rasa/validator.py,sha256=524VlFTYK0B3iXYveVD6BDC3K0j1QfpzJ9O-TAWczmc,83166
825
- rasa/version.py,sha256=oJfX2ngswlvP2txRFGmPvTAGUXX4Mbt79LWBavzg04A,123
826
- rasa_pro-3.12.10.dev1.dist-info/METADATA,sha256=YgdLklR9kPyrTMfBShv8heOtWGO8u_yMCIuTV6bv8xk,10664
827
- rasa_pro-3.12.10.dev1.dist-info/NOTICE,sha256=7HlBoMHJY9CL2GlYSfTQ-PZsVmLmVkYmMiPlTjhuCqA,218
828
- rasa_pro-3.12.10.dev1.dist-info/WHEEL,sha256=fGIA9gx4Qxk2KDKeNJCbOEwSrmLtjWCwzBz351GyrPQ,88
829
- rasa_pro-3.12.10.dev1.dist-info/entry_points.txt,sha256=ckJ2SfEyTPgBqj_I6vm_tqY9dZF_LAPJZA335Xp0Q9U,43
830
- rasa_pro-3.12.10.dev1.dist-info/RECORD,,
825
+ rasa/version.py,sha256=XvXtUVw5FDQnXtExrv4wXMnPazsNF9kg9UQ_UNtd3wQ,118
826
+ rasa_pro-3.12.11.dist-info/METADATA,sha256=RhI4sfwSLwWa59piZoOXxn9CJg1xHBD1T-FJ2jk0yJE,10616
827
+ rasa_pro-3.12.11.dist-info/NOTICE,sha256=7HlBoMHJY9CL2GlYSfTQ-PZsVmLmVkYmMiPlTjhuCqA,218
828
+ rasa_pro-3.12.11.dist-info/WHEEL,sha256=fGIA9gx4Qxk2KDKeNJCbOEwSrmLtjWCwzBz351GyrPQ,88
829
+ rasa_pro-3.12.11.dist-info/entry_points.txt,sha256=ckJ2SfEyTPgBqj_I6vm_tqY9dZF_LAPJZA335Xp0Q9U,43
830
+ rasa_pro-3.12.11.dist-info/RECORD,,
rasa/monkey_patches.py DELETED
@@ -1,91 +0,0 @@
1
- import os
2
- import traceback
3
- from typing import Any, Optional
4
-
5
- from litellm.secret_managers.main import str_to_bool
6
- from packaging.version import Version
7
-
8
-
9
- def litellm_langfuse_logger_init_fixed(
10
- self: Any, # we should not import LangfuseLogger class before we patch it
11
- langfuse_public_key: Optional[str] = None,
12
- langfuse_secret: Optional[str] = None,
13
- langfuse_host: str = "https://cloud.langfuse.com",
14
- flush_interval: int = 1,
15
- ) -> None:
16
- """Monkeypatched version of LangfuseLogger.__init__ from the LiteLLM library.
17
-
18
- This patched version removes a call that fetched the `project_id` from
19
- Langfuse Cloud even when it was already set via environment variables.
20
- In the original implementation, this call was made *before* initializing
21
- the LangfuseClient, which caused the application to freeze for up to 60 seconds.
22
-
23
- By removing this premature call, the monkeypatch avoids the unnecessary network
24
- request and prevents the timeout/freeze issue.
25
-
26
- This workaround can be removed once the underlying bug is resolved in LiteLLM:
27
- https://github.com/BerriAI/litellm/issues/7732
28
- """
29
- try:
30
- import langfuse
31
- from langfuse import Langfuse
32
- except Exception as e:
33
- raise Exception(
34
- f"\033[91mLangfuse not installed, try running 'pip install langfuse' "
35
- f"to fix this error: {e}\n{traceback.format_exc()}\033[0m"
36
- )
37
- # Instance variables
38
- self.secret_key = langfuse_secret or os.getenv("LANGFUSE_SECRET_KEY", "")
39
- self.public_key = langfuse_public_key or os.getenv("LANGFUSE_PUBLIC_KEY", "")
40
-
41
- self.langfuse_host = langfuse_host or os.getenv(
42
- "LANGFUSE_HOST", "https://cloud.langfuse.com"
43
- )
44
- self.langfuse_host.replace("http://", "https://")
45
- if not self.langfuse_host.startswith("https://"):
46
- self.langfuse_host = "https://" + self.langfuse_host
47
-
48
- self.langfuse_release = os.getenv("LANGFUSE_RELEASE")
49
- self.langfuse_debug = os.getenv("LANGFUSE_DEBUG")
50
- self.langfuse_flush_interval = (
51
- os.getenv("LANGFUSE_FLUSH_INTERVAL") or flush_interval
52
- )
53
-
54
- parameters = {
55
- "public_key": self.public_key,
56
- "secret_key": self.secret_key,
57
- "host": self.langfuse_host,
58
- "release": self.langfuse_release,
59
- "debug": self.langfuse_debug,
60
- "flush_interval": self.langfuse_flush_interval, # flush interval in seconds
61
- }
62
-
63
- if Version(langfuse.version.__version__) >= Version("2.6.0"):
64
- parameters["sdk_integration"] = "litellm"
65
-
66
- self.Langfuse = Langfuse(**parameters)
67
-
68
- if os.getenv("UPSTREAM_LANGFUSE_SECRET_KEY") is not None:
69
- upstream_langfuse_debug = (
70
- str_to_bool(self.upstream_langfuse_debug)
71
- if self.upstream_langfuse_debug is not None
72
- else None
73
- )
74
- self.upstream_langfuse_secret_key = os.getenv("UPSTREAM_LANGFUSE_SECRET_KEY")
75
- self.upstream_langfuse_public_key = os.getenv("UPSTREAM_LANGFUSE_PUBLIC_KEY")
76
- self.upstream_langfuse_host = os.getenv("UPSTREAM_LANGFUSE_HOST")
77
- self.upstream_langfuse_release = os.getenv("UPSTREAM_LANGFUSE_RELEASE")
78
- self.upstream_langfuse_debug = os.getenv("UPSTREAM_LANGFUSE_DEBUG")
79
- self.upstream_langfuse = Langfuse(
80
- public_key=self.upstream_langfuse_public_key,
81
- secret_key=self.upstream_langfuse_secret_key,
82
- host=self.upstream_langfuse_host,
83
- release=self.upstream_langfuse_release,
84
- debug=(
85
- upstream_langfuse_debug
86
- if upstream_langfuse_debug is not None
87
- else False
88
- ),
89
- )
90
- else:
91
- self.upstream_langfuse = None