rasa-pro 3.12.6.dev1__py3-none-any.whl → 3.12.6.dev2__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of rasa-pro might be problematic. Click here for more details.
- rasa/__init__.py +6 -0
- rasa/core/nlg/contextual_response_rephraser.py +21 -4
- rasa/core/nlg/summarize.py +15 -1
- rasa/core/policies/enterprise_search_policy.py +16 -3
- rasa/core/policies/intentless_policy.py +17 -4
- rasa/core/policies/policy.py +2 -0
- rasa/dialogue_understanding/coexistence/llm_based_router.py +18 -4
- rasa/dialogue_understanding/generator/llm_based_command_generator.py +8 -2
- rasa/dialogue_understanding/generator/llm_command_generator.py +3 -1
- rasa/dialogue_understanding/generator/single_step/compact_llm_command_generator.py +12 -1
- rasa/hooks.py +55 -0
- rasa/monkey_patches.py +91 -0
- rasa/shared/constants.py +5 -0
- rasa/shared/core/domain.py +3 -12
- rasa/shared/providers/constants.py +9 -0
- rasa/shared/providers/llm/_base_litellm_client.py +14 -4
- rasa/shared/providers/llm/litellm_router_llm_client.py +17 -7
- rasa/shared/providers/llm/llm_client.py +24 -15
- rasa/shared/providers/llm/self_hosted_llm_client.py +10 -2
- rasa/shared/utils/health_check/health_check.py +7 -1
- rasa/tracing/instrumentation/attribute_extractors.py +4 -4
- rasa/tracing/instrumentation/intentless_policy_instrumentation.py +2 -1
- rasa/utils/licensing.py +15 -0
- rasa/version.py +1 -2
- {rasa_pro-3.12.6.dev1.dist-info → rasa_pro-3.12.6.dev2.dist-info}/METADATA +4 -1
- {rasa_pro-3.12.6.dev1.dist-info → rasa_pro-3.12.6.dev2.dist-info}/RECORD +29 -28
- {rasa_pro-3.12.6.dev1.dist-info → rasa_pro-3.12.6.dev2.dist-info}/NOTICE +0 -0
- {rasa_pro-3.12.6.dev1.dist-info → rasa_pro-3.12.6.dev2.dist-info}/WHEEL +0 -0
- {rasa_pro-3.12.6.dev1.dist-info → rasa_pro-3.12.6.dev2.dist-info}/entry_points.txt +0 -0
rasa/__init__.py
CHANGED
|
@@ -5,5 +5,11 @@ from rasa import version
|
|
|
5
5
|
# define the version before the other imports since these need it
|
|
6
6
|
__version__ = version.__version__
|
|
7
7
|
|
|
8
|
+
from litellm.integrations.langfuse.langfuse import LangFuseLogger
|
|
9
|
+
|
|
10
|
+
from rasa.monkey_patches import litellm_langfuse_logger_init_fixed
|
|
11
|
+
|
|
12
|
+
# Monkey-patch the init method as early as possible before the class is used
|
|
13
|
+
LangFuseLogger.__init__ = litellm_langfuse_logger_init_fixed # type: ignore
|
|
8
14
|
|
|
9
15
|
logging.getLogger(__name__).addHandler(logging.NullHandler())
|
|
@@ -7,6 +7,10 @@ from rasa import telemetry
|
|
|
7
7
|
from rasa.core.nlg.response import TemplatedNaturalLanguageGenerator
|
|
8
8
|
from rasa.core.nlg.summarize import summarize_conversation
|
|
9
9
|
from rasa.shared.constants import (
|
|
10
|
+
LANGFUSE_CUSTOM_METADATA_DICT,
|
|
11
|
+
LANGFUSE_METADATA_SESSION_ID,
|
|
12
|
+
LANGFUSE_METADATA_USER_ID,
|
|
13
|
+
LANGFUSE_TAGS,
|
|
10
14
|
LLM_CONFIG_KEY,
|
|
11
15
|
MODEL_CONFIG_KEY,
|
|
12
16
|
MODEL_GROUP_ID_CONFIG_KEY,
|
|
@@ -39,6 +43,7 @@ from rasa.shared.utils.llm import (
|
|
|
39
43
|
tracker_as_readable_transcript,
|
|
40
44
|
)
|
|
41
45
|
from rasa.utils.endpoints import EndpointConfig
|
|
46
|
+
from rasa.utils.licensing import get_human_readable_licence_owner
|
|
42
47
|
from rasa.utils.log_utils import log_llm
|
|
43
48
|
|
|
44
49
|
structlogger = structlog.get_logger()
|
|
@@ -130,6 +135,7 @@ class ContextualResponseRephraser(
|
|
|
130
135
|
"contextual_response_rephraser.init",
|
|
131
136
|
ContextualResponseRephraser.__name__,
|
|
132
137
|
)
|
|
138
|
+
self.user_id = get_human_readable_licence_owner()
|
|
133
139
|
|
|
134
140
|
@classmethod
|
|
135
141
|
def _add_prompt_and_llm_metadata_to_response(
|
|
@@ -199,7 +205,9 @@ class ContextualResponseRephraser(
|
|
|
199
205
|
return None
|
|
200
206
|
|
|
201
207
|
@measure_llm_latency
|
|
202
|
-
async def _generate_llm_response(
|
|
208
|
+
async def _generate_llm_response(
|
|
209
|
+
self, prompt: str, sender_id: str
|
|
210
|
+
) -> Optional[LLMResponse]:
|
|
203
211
|
"""Use LLM to generate a response.
|
|
204
212
|
|
|
205
213
|
Returns an LLMResponse object containing both the generated text
|
|
@@ -207,14 +215,21 @@ class ContextualResponseRephraser(
|
|
|
207
215
|
|
|
208
216
|
Args:
|
|
209
217
|
prompt: The prompt to send to the LLM.
|
|
218
|
+
sender_id: sender_id from the tracker.
|
|
210
219
|
|
|
211
220
|
Returns:
|
|
212
221
|
An LLMResponse object if successful, otherwise None.
|
|
213
222
|
"""
|
|
214
223
|
llm = llm_factory(self.llm_config, DEFAULT_LLM_CONFIG)
|
|
224
|
+
metadata = {
|
|
225
|
+
LANGFUSE_METADATA_USER_ID: self.user_id,
|
|
226
|
+
LANGFUSE_METADATA_SESSION_ID: sender_id,
|
|
227
|
+
LANGFUSE_CUSTOM_METADATA_DICT: {"component": self.__class__.__name__},
|
|
228
|
+
LANGFUSE_TAGS: [self.__class__.__name__],
|
|
229
|
+
}
|
|
215
230
|
|
|
216
231
|
try:
|
|
217
|
-
return await llm.acompletion(prompt)
|
|
232
|
+
return await llm.acompletion(prompt, metadata)
|
|
218
233
|
except Exception as e:
|
|
219
234
|
# unfortunately, langchain does not wrap LLM exceptions which means
|
|
220
235
|
# we have to catch all exceptions here
|
|
@@ -258,7 +273,9 @@ class ContextualResponseRephraser(
|
|
|
258
273
|
The history for the prompt.
|
|
259
274
|
"""
|
|
260
275
|
llm = llm_factory(self.llm_config, DEFAULT_LLM_CONFIG)
|
|
261
|
-
return await summarize_conversation(
|
|
276
|
+
return await summarize_conversation(
|
|
277
|
+
tracker, llm, max_turns=5, user_id=self.user_id, sender_id=tracker.sender_id
|
|
278
|
+
)
|
|
262
279
|
|
|
263
280
|
async def rephrase(
|
|
264
281
|
self,
|
|
@@ -315,7 +332,7 @@ class ContextualResponseRephraser(
|
|
|
315
332
|
or self.llm_property(MODEL_NAME_CONFIG_KEY),
|
|
316
333
|
llm_model_group_id=self.llm_property(MODEL_GROUP_ID_CONFIG_KEY),
|
|
317
334
|
)
|
|
318
|
-
llm_response = await self._generate_llm_response(prompt)
|
|
335
|
+
llm_response = await self._generate_llm_response(prompt, tracker.sender_id)
|
|
319
336
|
llm_response = LLMResponse.ensure_llm_response(llm_response)
|
|
320
337
|
|
|
321
338
|
response = self._add_prompt_and_llm_metadata_to_response(
|
rasa/core/nlg/summarize.py
CHANGED
|
@@ -4,6 +4,12 @@ import structlog
|
|
|
4
4
|
from jinja2 import Template
|
|
5
5
|
|
|
6
6
|
from rasa.core.tracker_store import DialogueStateTracker
|
|
7
|
+
from rasa.shared.constants import (
|
|
8
|
+
LANGFUSE_CUSTOM_METADATA_DICT,
|
|
9
|
+
LANGFUSE_METADATA_SESSION_ID,
|
|
10
|
+
LANGFUSE_METADATA_USER_ID,
|
|
11
|
+
LANGFUSE_TAGS,
|
|
12
|
+
)
|
|
7
13
|
from rasa.shared.providers.llm.llm_client import LLMClient
|
|
8
14
|
from rasa.shared.utils.llm import (
|
|
9
15
|
tracker_as_readable_transcript,
|
|
@@ -46,6 +52,8 @@ async def summarize_conversation(
|
|
|
46
52
|
tracker: DialogueStateTracker,
|
|
47
53
|
llm: LLMClient,
|
|
48
54
|
max_turns: Optional[int] = MAX_TURNS_DEFAULT,
|
|
55
|
+
user_id: Optional[str] = None,
|
|
56
|
+
sender_id: Optional[str] = None,
|
|
49
57
|
) -> str:
|
|
50
58
|
"""Summarizes the dialogue using the LLM.
|
|
51
59
|
|
|
@@ -58,8 +66,14 @@ async def summarize_conversation(
|
|
|
58
66
|
The summary of the dialogue.
|
|
59
67
|
"""
|
|
60
68
|
prompt = _create_summarization_prompt(tracker, max_turns)
|
|
69
|
+
metadata = {
|
|
70
|
+
LANGFUSE_METADATA_USER_ID: user_id or "unknown",
|
|
71
|
+
LANGFUSE_METADATA_SESSION_ID: sender_id or "",
|
|
72
|
+
LANGFUSE_CUSTOM_METADATA_DICT: {"component": "summarize_conversation"},
|
|
73
|
+
LANGFUSE_TAGS: ["summarize_conversation"],
|
|
74
|
+
}
|
|
61
75
|
try:
|
|
62
|
-
llm_response = await llm.acompletion(prompt)
|
|
76
|
+
llm_response = await llm.acompletion(prompt, metadata)
|
|
63
77
|
summarization = llm_response.choices[0].strip()
|
|
64
78
|
structlogger.debug(
|
|
65
79
|
"summarization.success", summarization=summarization, prompt=prompt
|
|
@@ -46,6 +46,10 @@ from rasa.graph_components.providers.forms_provider import Forms
|
|
|
46
46
|
from rasa.graph_components.providers.responses_provider import Responses
|
|
47
47
|
from rasa.shared.constants import (
|
|
48
48
|
EMBEDDINGS_CONFIG_KEY,
|
|
49
|
+
LANGFUSE_CUSTOM_METADATA_DICT,
|
|
50
|
+
LANGFUSE_METADATA_SESSION_ID,
|
|
51
|
+
LANGFUSE_METADATA_USER_ID,
|
|
52
|
+
LANGFUSE_TAGS,
|
|
49
53
|
MODEL_CONFIG_KEY,
|
|
50
54
|
MODEL_GROUP_ID_CONFIG_KEY,
|
|
51
55
|
MODEL_NAME_CONFIG_KEY,
|
|
@@ -545,7 +549,9 @@ class EnterpriseSearchPolicy(LLMHealthCheckMixin, EmbeddingsHealthCheckMixin, Po
|
|
|
545
549
|
|
|
546
550
|
if self.use_llm:
|
|
547
551
|
prompt = self._render_prompt(tracker, documents.results)
|
|
548
|
-
llm_response = await self._generate_llm_answer(
|
|
552
|
+
llm_response = await self._generate_llm_answer(
|
|
553
|
+
llm, prompt, tracker.sender_id
|
|
554
|
+
)
|
|
549
555
|
llm_response = LLMResponse.ensure_llm_response(llm_response)
|
|
550
556
|
|
|
551
557
|
self._add_prompt_and_llm_response_to_latest_message(
|
|
@@ -641,19 +647,26 @@ class EnterpriseSearchPolicy(LLMHealthCheckMixin, EmbeddingsHealthCheckMixin, Po
|
|
|
641
647
|
|
|
642
648
|
@measure_llm_latency
|
|
643
649
|
async def _generate_llm_answer(
|
|
644
|
-
self, llm: LLMClient, prompt: Text
|
|
650
|
+
self, llm: LLMClient, prompt: Text, sender_id: str
|
|
645
651
|
) -> Optional[LLMResponse]:
|
|
646
652
|
"""Fetches an LLM completion for the provided prompt.
|
|
647
653
|
|
|
648
654
|
Args:
|
|
649
655
|
llm: The LLM client used to get the completion.
|
|
650
656
|
prompt: The prompt text to send to the model.
|
|
657
|
+
sender_id: sender_id from the tracker.
|
|
651
658
|
|
|
652
659
|
Returns:
|
|
653
660
|
An LLMResponse object, or None if the call fails.
|
|
654
661
|
"""
|
|
662
|
+
metadata = {
|
|
663
|
+
LANGFUSE_METADATA_USER_ID: self.user_id,
|
|
664
|
+
LANGFUSE_METADATA_SESSION_ID: sender_id,
|
|
665
|
+
LANGFUSE_CUSTOM_METADATA_DICT: {"component": self.__class__.__name__},
|
|
666
|
+
LANGFUSE_TAGS: [self.__class__.__name__],
|
|
667
|
+
}
|
|
655
668
|
try:
|
|
656
|
-
return await llm.acompletion(prompt)
|
|
669
|
+
return await llm.acompletion(prompt, metadata)
|
|
657
670
|
except Exception as e:
|
|
658
671
|
# unfortunately, langchain does not wrap LLM exceptions which means
|
|
659
672
|
# we have to catch all exceptions here
|
|
@@ -30,6 +30,10 @@ from rasa.graph_components.providers.forms_provider import Forms
|
|
|
30
30
|
from rasa.graph_components.providers.responses_provider import Responses
|
|
31
31
|
from rasa.shared.constants import (
|
|
32
32
|
EMBEDDINGS_CONFIG_KEY,
|
|
33
|
+
LANGFUSE_CUSTOM_METADATA_DICT,
|
|
34
|
+
LANGFUSE_METADATA_SESSION_ID,
|
|
35
|
+
LANGFUSE_METADATA_USER_ID,
|
|
36
|
+
LANGFUSE_TAGS,
|
|
33
37
|
LLM_CONFIG_KEY,
|
|
34
38
|
MODEL_CONFIG_KEY,
|
|
35
39
|
MODEL_GROUP_ID_CONFIG_KEY,
|
|
@@ -619,6 +623,7 @@ class IntentlessPolicy(LLMHealthCheckMixin, EmbeddingsHealthCheckMixin, Policy):
|
|
|
619
623
|
response_examples: List[str],
|
|
620
624
|
conversation_samples: List[str],
|
|
621
625
|
history: str,
|
|
626
|
+
sender_id: str,
|
|
622
627
|
) -> Optional[str]:
|
|
623
628
|
"""Make the llm call to generate an answer."""
|
|
624
629
|
llm = llm_factory(self.config.get(LLM_CONFIG_KEY), DEFAULT_LLM_CONFIG)
|
|
@@ -634,11 +639,19 @@ class IntentlessPolicy(LLMHealthCheckMixin, EmbeddingsHealthCheckMixin, Policy):
|
|
|
634
639
|
log_event="intentless_policy.generate_answer.prompt_rendered",
|
|
635
640
|
prompt=prompt,
|
|
636
641
|
)
|
|
637
|
-
return await self._generate_llm_answer(llm, prompt)
|
|
642
|
+
return await self._generate_llm_answer(llm, prompt, sender_id)
|
|
638
643
|
|
|
639
|
-
async def _generate_llm_answer(
|
|
644
|
+
async def _generate_llm_answer(
|
|
645
|
+
self, llm: LLMClient, prompt: str, sender_id: str
|
|
646
|
+
) -> Optional[str]:
|
|
647
|
+
metadata = {
|
|
648
|
+
LANGFUSE_METADATA_USER_ID: self.user_id,
|
|
649
|
+
LANGFUSE_METADATA_SESSION_ID: sender_id,
|
|
650
|
+
LANGFUSE_CUSTOM_METADATA_DICT: {"component": self.__class__.__name__},
|
|
651
|
+
LANGFUSE_TAGS: [self.__class__.__name__],
|
|
652
|
+
}
|
|
640
653
|
try:
|
|
641
|
-
llm_response = await llm.acompletion(prompt)
|
|
654
|
+
llm_response = await llm.acompletion(prompt, metadata)
|
|
642
655
|
return llm_response.choices[0]
|
|
643
656
|
except Exception as e:
|
|
644
657
|
# unfortunately, langchain does not wrap LLM exceptions which means
|
|
@@ -714,7 +727,7 @@ class IntentlessPolicy(LLMHealthCheckMixin, EmbeddingsHealthCheckMixin, Policy):
|
|
|
714
727
|
final_response_examples.append(resp)
|
|
715
728
|
|
|
716
729
|
llm_response = await self.generate_answer(
|
|
717
|
-
final_response_examples, conversation_samples, history
|
|
730
|
+
final_response_examples, conversation_samples, history, tracker.sender_id
|
|
718
731
|
)
|
|
719
732
|
if not llm_response:
|
|
720
733
|
structlogger.debug("intentless_policy.prediction.skip_llm_fail")
|
rasa/core/policies/policy.py
CHANGED
|
@@ -39,6 +39,7 @@ from rasa.shared.core.generator import TrackerWithCachedStates
|
|
|
39
39
|
from rasa.shared.core.trackers import DialogueStateTracker
|
|
40
40
|
from rasa.shared.exceptions import FileIOException, RasaException
|
|
41
41
|
from rasa.shared.nlu.constants import ACTION_NAME, ACTION_TEXT, ENTITIES, INTENT, TEXT
|
|
42
|
+
from rasa.utils.licensing import get_human_readable_licence_owner
|
|
42
43
|
|
|
43
44
|
if TYPE_CHECKING:
|
|
44
45
|
from rasa.core.featurizers.tracker_featurizers import (
|
|
@@ -172,6 +173,7 @@ class Policy(GraphComponent):
|
|
|
172
173
|
|
|
173
174
|
self._model_storage = model_storage
|
|
174
175
|
self._resource = resource
|
|
176
|
+
self.user_id = get_human_readable_licence_owner()
|
|
175
177
|
|
|
176
178
|
@classmethod
|
|
177
179
|
def create(
|
|
@@ -23,6 +23,10 @@ from rasa.engine.recipes.default_recipe import DefaultV1Recipe
|
|
|
23
23
|
from rasa.engine.storage.resource import Resource
|
|
24
24
|
from rasa.engine.storage.storage import ModelStorage
|
|
25
25
|
from rasa.shared.constants import (
|
|
26
|
+
LANGFUSE_CUSTOM_METADATA_DICT,
|
|
27
|
+
LANGFUSE_METADATA_SESSION_ID,
|
|
28
|
+
LANGFUSE_METADATA_USER_ID,
|
|
29
|
+
LANGFUSE_TAGS,
|
|
26
30
|
MODEL_CONFIG_KEY,
|
|
27
31
|
OPENAI_PROVIDER,
|
|
28
32
|
PROMPT_CONFIG_KEY,
|
|
@@ -43,6 +47,7 @@ from rasa.shared.utils.llm import (
|
|
|
43
47
|
llm_factory,
|
|
44
48
|
resolve_model_client_config,
|
|
45
49
|
)
|
|
50
|
+
from rasa.utils.licensing import get_human_readable_licence_owner
|
|
46
51
|
from rasa.utils.log_utils import log_llm
|
|
47
52
|
|
|
48
53
|
LLM_BASED_ROUTER_PROMPT_FILE_NAME = "llm_based_router_prompt.jinja2"
|
|
@@ -113,6 +118,7 @@ class LLMBasedRouter(LLMHealthCheckMixin, GraphComponent):
|
|
|
113
118
|
self._model_storage = model_storage
|
|
114
119
|
self._resource = resource
|
|
115
120
|
self.validate_config()
|
|
121
|
+
self.user_id = get_human_readable_licence_owner()
|
|
116
122
|
|
|
117
123
|
def validate_config(self) -> None:
|
|
118
124
|
"""Validate the config of the router."""
|
|
@@ -160,7 +166,6 @@ class LLMBasedRouter(LLMHealthCheckMixin, GraphComponent):
|
|
|
160
166
|
**kwargs: Any,
|
|
161
167
|
) -> "LLMBasedRouter":
|
|
162
168
|
"""Loads trained component (see parent class for full docstring)."""
|
|
163
|
-
|
|
164
169
|
# Perform health check on the resolved LLM client config
|
|
165
170
|
llm_config = resolve_model_client_config(config.get(LLM_CONFIG_KEY, {}))
|
|
166
171
|
cls.perform_llm_health_check(
|
|
@@ -232,7 +237,7 @@ class LLMBasedRouter(LLMHealthCheckMixin, GraphComponent):
|
|
|
232
237
|
prompt=prompt,
|
|
233
238
|
)
|
|
234
239
|
# generating answer
|
|
235
|
-
answer = await self._generate_answer_using_llm(prompt)
|
|
240
|
+
answer = await self._generate_answer_using_llm(prompt, tracker.sender_id)
|
|
236
241
|
log_llm(
|
|
237
242
|
logger=structlogger,
|
|
238
243
|
log_module="LLMBasedRouter",
|
|
@@ -292,7 +297,9 @@ class LLMBasedRouter(LLMHealthCheckMixin, GraphComponent):
|
|
|
292
297
|
|
|
293
298
|
return Template(self.prompt_template).render(**inputs)
|
|
294
299
|
|
|
295
|
-
async def _generate_answer_using_llm(
|
|
300
|
+
async def _generate_answer_using_llm(
|
|
301
|
+
self, prompt: str, sender_id: str
|
|
302
|
+
) -> Optional[str]:
|
|
296
303
|
"""Use LLM to generate a response.
|
|
297
304
|
|
|
298
305
|
Args:
|
|
@@ -303,8 +310,15 @@ class LLMBasedRouter(LLMHealthCheckMixin, GraphComponent):
|
|
|
303
310
|
"""
|
|
304
311
|
llm = llm_factory(self.config.get(LLM_CONFIG_KEY), DEFAULT_LLM_CONFIG)
|
|
305
312
|
|
|
313
|
+
metadata = {
|
|
314
|
+
LANGFUSE_METADATA_USER_ID: self.user_id,
|
|
315
|
+
LANGFUSE_METADATA_SESSION_ID: sender_id,
|
|
316
|
+
LANGFUSE_CUSTOM_METADATA_DICT: {"component": self.__class__.__name__},
|
|
317
|
+
LANGFUSE_TAGS: [self.__class__.__name__],
|
|
318
|
+
}
|
|
319
|
+
|
|
306
320
|
try:
|
|
307
|
-
llm_response = await llm.acompletion(prompt)
|
|
321
|
+
llm_response = await llm.acompletion(prompt, metadata)
|
|
308
322
|
return llm_response.choices[0]
|
|
309
323
|
except Exception as e:
|
|
310
324
|
# unfortunately, langchain does not wrap LLM exceptions which means
|
|
@@ -49,6 +49,7 @@ from rasa.shared.utils.llm import (
|
|
|
49
49
|
llm_factory,
|
|
50
50
|
resolve_model_client_config,
|
|
51
51
|
)
|
|
52
|
+
from rasa.utils.licensing import get_human_readable_licence_owner
|
|
52
53
|
from rasa.utils.log_utils import log_llm
|
|
53
54
|
|
|
54
55
|
structlogger = structlog.get_logger()
|
|
@@ -92,6 +93,8 @@ class LLMBasedCommandGenerator(
|
|
|
92
93
|
else:
|
|
93
94
|
self.flow_retrieval = None
|
|
94
95
|
|
|
96
|
+
self.user_id = get_human_readable_licence_owner()
|
|
97
|
+
|
|
95
98
|
### Abstract methods
|
|
96
99
|
@staticmethod
|
|
97
100
|
@abstractmethod
|
|
@@ -331,7 +334,9 @@ class LLMBasedCommandGenerator(
|
|
|
331
334
|
|
|
332
335
|
@measure_llm_latency
|
|
333
336
|
async def invoke_llm(
|
|
334
|
-
self,
|
|
337
|
+
self,
|
|
338
|
+
prompt: Union[List[dict], List[str], str],
|
|
339
|
+
metadata: Optional[Dict[str, Any]] = None,
|
|
335
340
|
) -> Optional[LLMResponse]:
|
|
336
341
|
"""Use LLM to generate a response.
|
|
337
342
|
|
|
@@ -344,6 +349,7 @@ class LLMBasedCommandGenerator(
|
|
|
344
349
|
- a list of messages. Each message is a string and will be formatted
|
|
345
350
|
as a user message.
|
|
346
351
|
- a single message as a string which will be formatted as user message.
|
|
352
|
+
metadata: Optional metadata to be passed to the LLM call.
|
|
347
353
|
|
|
348
354
|
Returns:
|
|
349
355
|
An LLMResponse object.
|
|
@@ -355,7 +361,7 @@ class LLMBasedCommandGenerator(
|
|
|
355
361
|
self.config.get(LLM_CONFIG_KEY), self.get_default_llm_config()
|
|
356
362
|
)
|
|
357
363
|
try:
|
|
358
|
-
return await llm.acompletion(prompt)
|
|
364
|
+
return await llm.acompletion(prompt, metadata)
|
|
359
365
|
except Exception as e:
|
|
360
366
|
# unfortunately, langchain does not wrap LLM exceptions which means
|
|
361
367
|
# we have to catch all exceptions here
|
|
@@ -55,7 +55,9 @@ class LLMCommandGenerator(SingleStepLLMCommandGenerator):
|
|
|
55
55
|
)
|
|
56
56
|
|
|
57
57
|
async def invoke_llm(
|
|
58
|
-
self,
|
|
58
|
+
self,
|
|
59
|
+
prompt: Union[List[dict], List[str], str],
|
|
60
|
+
metadata: Optional[Dict[str, Any]] = None,
|
|
59
61
|
) -> Optional[LLMResponse]:
|
|
60
62
|
try:
|
|
61
63
|
return await super().invoke_llm(prompt)
|
|
@@ -47,6 +47,10 @@ from rasa.shared.constants import (
|
|
|
47
47
|
AWS_BEDROCK_PROVIDER,
|
|
48
48
|
AZURE_OPENAI_PROVIDER,
|
|
49
49
|
EMBEDDINGS_CONFIG_KEY,
|
|
50
|
+
LANGFUSE_CUSTOM_METADATA_DICT,
|
|
51
|
+
LANGFUSE_METADATA_SESSION_ID,
|
|
52
|
+
LANGFUSE_METADATA_USER_ID,
|
|
53
|
+
LANGFUSE_TAGS,
|
|
50
54
|
MAX_TOKENS_CONFIG_KEY,
|
|
51
55
|
PROMPT_TEMPLATE_CONFIG_KEY,
|
|
52
56
|
ROUTE_TO_CALM_SLOT,
|
|
@@ -362,7 +366,14 @@ class CompactLLMCommandGenerator(LLMBasedCommandGenerator):
|
|
|
362
366
|
prompt=flow_prompt,
|
|
363
367
|
)
|
|
364
368
|
|
|
365
|
-
|
|
369
|
+
metadata = {
|
|
370
|
+
LANGFUSE_METADATA_USER_ID: self.user_id,
|
|
371
|
+
LANGFUSE_METADATA_SESSION_ID: tracker.sender_id if tracker else "",
|
|
372
|
+
LANGFUSE_CUSTOM_METADATA_DICT: {"component": self.__class__.__name__},
|
|
373
|
+
LANGFUSE_TAGS: [self.__class__.__name__],
|
|
374
|
+
}
|
|
375
|
+
|
|
376
|
+
response = await self.invoke_llm(flow_prompt, metadata)
|
|
366
377
|
llm_response = LLMResponse.ensure_llm_response(response)
|
|
367
378
|
# The check for 'None' maintains compatibility with older versions
|
|
368
379
|
# of LLMCommandGenerator. In previous implementations, 'invoke_llm'
|
rasa/hooks.py
CHANGED
|
@@ -1,8 +1,20 @@
|
|
|
1
1
|
import argparse
|
|
2
2
|
import logging
|
|
3
|
+
import os
|
|
3
4
|
from typing import TYPE_CHECKING, List, Optional, Text, Union
|
|
4
5
|
|
|
6
|
+
import litellm
|
|
5
7
|
import pluggy
|
|
8
|
+
import structlog
|
|
9
|
+
|
|
10
|
+
from rasa.shared.providers.constants import (
|
|
11
|
+
LANGFUSE_CALLBACK_NAME,
|
|
12
|
+
LANGFUSE_HOST_ENV_VAR,
|
|
13
|
+
LANGFUSE_PROJECT_ID_ENV_VAR,
|
|
14
|
+
LANGFUSE_PUBLIC_KEY_ENV_VAR,
|
|
15
|
+
LANGFUSE_SECRET_KEY_ENV_VAR,
|
|
16
|
+
RASA_LANGFUSE_INTEGRATION_ENABLED_ENV_VAR,
|
|
17
|
+
)
|
|
6
18
|
|
|
7
19
|
# IMPORTANT: do not import anything from rasa here - use scoped imports
|
|
8
20
|
# this avoids circular imports, as the hooks are used in different places
|
|
@@ -18,6 +30,7 @@ if TYPE_CHECKING:
|
|
|
18
30
|
|
|
19
31
|
hookimpl = pluggy.HookimplMarker("rasa")
|
|
20
32
|
logger = logging.getLogger(__name__)
|
|
33
|
+
structlogger = structlog.get_logger()
|
|
21
34
|
|
|
22
35
|
|
|
23
36
|
@hookimpl # type: ignore[misc]
|
|
@@ -57,6 +70,8 @@ def configure_commandline(cmdline_arguments: argparse.Namespace) -> Optional[Tex
|
|
|
57
70
|
config.configure_tracing(tracer_provider)
|
|
58
71
|
config.configure_metrics(endpoints_file)
|
|
59
72
|
|
|
73
|
+
_init_langfuse_integration()
|
|
74
|
+
|
|
60
75
|
return endpoints_file
|
|
61
76
|
|
|
62
77
|
|
|
@@ -115,3 +130,43 @@ def after_server_stop() -> None:
|
|
|
115
130
|
|
|
116
131
|
if anon_pipeline is not None:
|
|
117
132
|
anon_pipeline.stop()
|
|
133
|
+
|
|
134
|
+
|
|
135
|
+
def _is_langfuse_integration_enabled() -> bool:
|
|
136
|
+
return (
|
|
137
|
+
os.environ.get(RASA_LANGFUSE_INTEGRATION_ENABLED_ENV_VAR, "false").lower()
|
|
138
|
+
== "true"
|
|
139
|
+
)
|
|
140
|
+
|
|
141
|
+
|
|
142
|
+
def _init_langfuse_integration() -> None:
|
|
143
|
+
if not _is_langfuse_integration_enabled():
|
|
144
|
+
structlogger.info(
|
|
145
|
+
"hooks._init_langfuse_integration.disabled",
|
|
146
|
+
event_info="Langfuse integration is disabled.",
|
|
147
|
+
)
|
|
148
|
+
return
|
|
149
|
+
|
|
150
|
+
if (
|
|
151
|
+
not os.environ.get(LANGFUSE_HOST_ENV_VAR)
|
|
152
|
+
or not os.environ.get(LANGFUSE_PROJECT_ID_ENV_VAR)
|
|
153
|
+
or not os.environ.get(LANGFUSE_PUBLIC_KEY_ENV_VAR)
|
|
154
|
+
or not os.environ.get(LANGFUSE_SECRET_KEY_ENV_VAR)
|
|
155
|
+
):
|
|
156
|
+
structlogger.warning(
|
|
157
|
+
"hooks._init_langfuse_integration.missing_langfuse_keys",
|
|
158
|
+
event_info=(
|
|
159
|
+
"Langfuse integration is enabled, but some environment variables"
|
|
160
|
+
"are missing. Please set LANGFUSE_HOST, LANGFUSE_PROJECT_ID, "
|
|
161
|
+
"LANGFUSE_PUBLIC_KEY and LANGFUSE_SECRET_KEY environment "
|
|
162
|
+
"variables to use Langfuse integration."
|
|
163
|
+
),
|
|
164
|
+
)
|
|
165
|
+
return
|
|
166
|
+
|
|
167
|
+
litellm.success_callback = [LANGFUSE_CALLBACK_NAME]
|
|
168
|
+
litellm.failure_callback = [LANGFUSE_CALLBACK_NAME]
|
|
169
|
+
structlogger.info(
|
|
170
|
+
"hooks.langfuse_callbacks_initialized",
|
|
171
|
+
event_info="Langfuse integration initialized.",
|
|
172
|
+
)
|
rasa/monkey_patches.py
ADDED
|
@@ -0,0 +1,91 @@
|
|
|
1
|
+
import os
|
|
2
|
+
import traceback
|
|
3
|
+
from typing import Any, Optional
|
|
4
|
+
|
|
5
|
+
from litellm.secret_managers.main import str_to_bool
|
|
6
|
+
from packaging.version import Version
|
|
7
|
+
|
|
8
|
+
|
|
9
|
+
def litellm_langfuse_logger_init_fixed(
|
|
10
|
+
self: Any, # we should not import LangfuseLogger class before we patch it
|
|
11
|
+
langfuse_public_key: Optional[str] = None,
|
|
12
|
+
langfuse_secret: Optional[str] = None,
|
|
13
|
+
langfuse_host: str = "https://cloud.langfuse.com",
|
|
14
|
+
flush_interval: int = 1,
|
|
15
|
+
) -> None:
|
|
16
|
+
"""Monkeypatched version of LangfuseLogger.__init__ from the LiteLLM library.
|
|
17
|
+
|
|
18
|
+
This patched version removes a call that fetched the `project_id` from
|
|
19
|
+
Langfuse Cloud even when it was already set via environment variables.
|
|
20
|
+
In the original implementation, this call was made *before* initializing
|
|
21
|
+
the LangfuseClient, which caused the application to freeze for up to 60 seconds.
|
|
22
|
+
|
|
23
|
+
By removing this premature call, the monkeypatch avoids the unnecessary network
|
|
24
|
+
request and prevents the timeout/freeze issue.
|
|
25
|
+
|
|
26
|
+
This workaround can be removed once the underlying bug is resolved in LiteLLM:
|
|
27
|
+
https://github.com/BerriAI/litellm/issues/7732
|
|
28
|
+
"""
|
|
29
|
+
try:
|
|
30
|
+
import langfuse
|
|
31
|
+
from langfuse import Langfuse
|
|
32
|
+
except Exception as e:
|
|
33
|
+
raise Exception(
|
|
34
|
+
f"\033[91mLangfuse not installed, try running 'pip install langfuse' "
|
|
35
|
+
f"to fix this error: {e}\n{traceback.format_exc()}\033[0m"
|
|
36
|
+
)
|
|
37
|
+
# Instance variables
|
|
38
|
+
self.secret_key = langfuse_secret or os.getenv("LANGFUSE_SECRET_KEY", "")
|
|
39
|
+
self.public_key = langfuse_public_key or os.getenv("LANGFUSE_PUBLIC_KEY", "")
|
|
40
|
+
|
|
41
|
+
self.langfuse_host = langfuse_host or os.getenv(
|
|
42
|
+
"LANGFUSE_HOST", "https://cloud.langfuse.com"
|
|
43
|
+
)
|
|
44
|
+
self.langfuse_host.replace("http://", "https://")
|
|
45
|
+
if not self.langfuse_host.startswith("https://"):
|
|
46
|
+
self.langfuse_host = "https://" + self.langfuse_host
|
|
47
|
+
|
|
48
|
+
self.langfuse_release = os.getenv("LANGFUSE_RELEASE")
|
|
49
|
+
self.langfuse_debug = os.getenv("LANGFUSE_DEBUG")
|
|
50
|
+
self.langfuse_flush_interval = (
|
|
51
|
+
os.getenv("LANGFUSE_FLUSH_INTERVAL") or flush_interval
|
|
52
|
+
)
|
|
53
|
+
|
|
54
|
+
parameters = {
|
|
55
|
+
"public_key": self.public_key,
|
|
56
|
+
"secret_key": self.secret_key,
|
|
57
|
+
"host": self.langfuse_host,
|
|
58
|
+
"release": self.langfuse_release,
|
|
59
|
+
"debug": self.langfuse_debug,
|
|
60
|
+
"flush_interval": self.langfuse_flush_interval, # flush interval in seconds
|
|
61
|
+
}
|
|
62
|
+
|
|
63
|
+
if Version(langfuse.version.__version__) >= Version("2.6.0"):
|
|
64
|
+
parameters["sdk_integration"] = "litellm"
|
|
65
|
+
|
|
66
|
+
self.Langfuse = Langfuse(**parameters)
|
|
67
|
+
|
|
68
|
+
if os.getenv("UPSTREAM_LANGFUSE_SECRET_KEY") is not None:
|
|
69
|
+
upstream_langfuse_debug = (
|
|
70
|
+
str_to_bool(self.upstream_langfuse_debug)
|
|
71
|
+
if self.upstream_langfuse_debug is not None
|
|
72
|
+
else None
|
|
73
|
+
)
|
|
74
|
+
self.upstream_langfuse_secret_key = os.getenv("UPSTREAM_LANGFUSE_SECRET_KEY")
|
|
75
|
+
self.upstream_langfuse_public_key = os.getenv("UPSTREAM_LANGFUSE_PUBLIC_KEY")
|
|
76
|
+
self.upstream_langfuse_host = os.getenv("UPSTREAM_LANGFUSE_HOST")
|
|
77
|
+
self.upstream_langfuse_release = os.getenv("UPSTREAM_LANGFUSE_RELEASE")
|
|
78
|
+
self.upstream_langfuse_debug = os.getenv("UPSTREAM_LANGFUSE_DEBUG")
|
|
79
|
+
self.upstream_langfuse = Langfuse(
|
|
80
|
+
public_key=self.upstream_langfuse_public_key,
|
|
81
|
+
secret_key=self.upstream_langfuse_secret_key,
|
|
82
|
+
host=self.upstream_langfuse_host,
|
|
83
|
+
release=self.upstream_langfuse_release,
|
|
84
|
+
debug=(
|
|
85
|
+
upstream_langfuse_debug
|
|
86
|
+
if upstream_langfuse_debug is not None
|
|
87
|
+
else False
|
|
88
|
+
),
|
|
89
|
+
)
|
|
90
|
+
else:
|
|
91
|
+
self.upstream_langfuse = None
|
rasa/shared/constants.py
CHANGED
|
@@ -338,3 +338,8 @@ ROLE_SYSTEM = "system"
|
|
|
338
338
|
# Used for key values in ValidateSlotPatternFlowStackFrame
|
|
339
339
|
REFILL_UTTER = "refill_utter"
|
|
340
340
|
REJECTIONS = "rejections"
|
|
341
|
+
|
|
342
|
+
LANGFUSE_METADATA_USER_ID = "trace_user_id"
|
|
343
|
+
LANGFUSE_METADATA_SESSION_ID = "session_id"
|
|
344
|
+
LANGFUSE_CUSTOM_METADATA_DICT = "trace_metadata"
|
|
345
|
+
LANGFUSE_TAGS = "tags"
|
rasa/shared/core/domain.py
CHANGED
|
@@ -1678,14 +1678,6 @@ class Domain:
|
|
|
1678
1678
|
"""Write domain to a file."""
|
|
1679
1679
|
as_yaml = self.as_yaml()
|
|
1680
1680
|
rasa.shared.utils.io.write_text_file(as_yaml, filename)
|
|
1681
|
-
# run the check again on the written domain to catch any errors
|
|
1682
|
-
# that may have been missed in the user defined domain files
|
|
1683
|
-
structlogger.info(
|
|
1684
|
-
"domain.persist.domain_written_to_file",
|
|
1685
|
-
event_info="The entire domain content has been written to file.",
|
|
1686
|
-
filename=filename,
|
|
1687
|
-
)
|
|
1688
|
-
Domain.is_domain_file(filename)
|
|
1689
1681
|
|
|
1690
1682
|
def as_yaml(self) -> Text:
|
|
1691
1683
|
"""Dump the `Domain` object as a YAML string.
|
|
@@ -1980,18 +1972,17 @@ class Domain:
|
|
|
1980
1972
|
|
|
1981
1973
|
try:
|
|
1982
1974
|
content = read_yaml_file(filename, expand_env_vars=cls.expand_env_vars)
|
|
1983
|
-
except (RasaException, YamlSyntaxException)
|
|
1984
|
-
structlogger.
|
|
1975
|
+
except (RasaException, YamlSyntaxException):
|
|
1976
|
+
structlogger.warning(
|
|
1985
1977
|
"domain.cannot_load_domain_file",
|
|
1986
1978
|
file=filename,
|
|
1987
|
-
error=error,
|
|
1988
1979
|
event_info=(
|
|
1989
1980
|
f"The file {filename} could not be loaded as domain file. "
|
|
1990
1981
|
f"You can use https://yamlchecker.com/ to validate "
|
|
1991
1982
|
f"the YAML syntax of your file."
|
|
1992
1983
|
),
|
|
1993
1984
|
)
|
|
1994
|
-
|
|
1985
|
+
return False
|
|
1995
1986
|
|
|
1996
1987
|
return any(key in content for key in ALL_DOMAIN_KEYS)
|
|
1997
1988
|
|
|
@@ -4,3 +4,12 @@ LITE_LLM_API_KEY_FIELD = "api_key"
|
|
|
4
4
|
LITE_LLM_API_VERSION_FIELD = "api_version"
|
|
5
5
|
LITE_LLM_MODEL_FIELD = "model"
|
|
6
6
|
LITE_LLM_AZURE_AD_TOKEN = "azure_ad_token"
|
|
7
|
+
|
|
8
|
+
# Enable or disable Langfuse integration
|
|
9
|
+
RASA_LANGFUSE_INTEGRATION_ENABLED_ENV_VAR = "RASA_LANGFUSE_INTEGRATION_ENABLED"
|
|
10
|
+
# Langfuse configuration
|
|
11
|
+
LANGFUSE_CALLBACK_NAME = "langfuse"
|
|
12
|
+
LANGFUSE_HOST_ENV_VAR = "LANGFUSE_HOST"
|
|
13
|
+
LANGFUSE_PROJECT_ID_ENV_VAR = "LANGFUSE_PROJECT_ID"
|
|
14
|
+
LANGFUSE_PUBLIC_KEY_ENV_VAR = "LANGFUSE_PUBLIC_KEY"
|
|
15
|
+
LANGFUSE_SECRET_KEY_ENV_VAR = "LANGFUSE_SECRET_KEY"
|
|
@@ -2,7 +2,7 @@ from __future__ import annotations
|
|
|
2
2
|
|
|
3
3
|
import logging
|
|
4
4
|
from abc import abstractmethod
|
|
5
|
-
from typing import Any, Dict, List, Union, cast
|
|
5
|
+
from typing import Any, Dict, List, Optional, Union, cast
|
|
6
6
|
|
|
7
7
|
import structlog
|
|
8
8
|
from litellm import acompletion, completion, validate_environment
|
|
@@ -120,7 +120,11 @@ class _BaseLiteLLMClient:
|
|
|
120
120
|
raise ProviderClientValidationError(event_info)
|
|
121
121
|
|
|
122
122
|
@suppress_logs(log_level=logging.WARNING)
|
|
123
|
-
def completion(
|
|
123
|
+
def completion(
|
|
124
|
+
self,
|
|
125
|
+
messages: Union[List[dict], List[str], str],
|
|
126
|
+
metadata: Optional[Dict[str, Any]] = None,
|
|
127
|
+
) -> LLMResponse:
|
|
124
128
|
"""Synchronously generate completions for given list of messages.
|
|
125
129
|
|
|
126
130
|
Args:
|
|
@@ -132,6 +136,7 @@ class _BaseLiteLLMClient:
|
|
|
132
136
|
- a list of messages. Each message is a string and will be formatted
|
|
133
137
|
as a user message.
|
|
134
138
|
- a single message as a string which will be formatted as user message.
|
|
139
|
+
metadata: Optional metadata to be passed to the LLM call.
|
|
135
140
|
|
|
136
141
|
Returns:
|
|
137
142
|
List of message completions.
|
|
@@ -149,7 +154,9 @@ class _BaseLiteLLMClient:
|
|
|
149
154
|
|
|
150
155
|
@suppress_logs(log_level=logging.WARNING)
|
|
151
156
|
async def acompletion(
|
|
152
|
-
self,
|
|
157
|
+
self,
|
|
158
|
+
messages: Union[List[dict], List[str], str],
|
|
159
|
+
metadata: Optional[Dict[str, Any]] = None,
|
|
153
160
|
) -> LLMResponse:
|
|
154
161
|
"""Asynchronously generate completions for given list of messages.
|
|
155
162
|
|
|
@@ -162,6 +169,7 @@ class _BaseLiteLLMClient:
|
|
|
162
169
|
- a list of messages. Each message is a string and will be formatted
|
|
163
170
|
as a user message.
|
|
164
171
|
- a single message as a string which will be formatted as user message.
|
|
172
|
+
metadata: Optional metadata to be passed to the LLM call.
|
|
165
173
|
|
|
166
174
|
Returns:
|
|
167
175
|
List of message completions.
|
|
@@ -172,7 +180,9 @@ class _BaseLiteLLMClient:
|
|
|
172
180
|
try:
|
|
173
181
|
formatted_messages = self._get_formatted_messages(messages)
|
|
174
182
|
arguments = resolve_environment_variables(self._completion_fn_args)
|
|
175
|
-
response = await acompletion(
|
|
183
|
+
response = await acompletion(
|
|
184
|
+
messages=formatted_messages, metadata=metadata, **arguments
|
|
185
|
+
)
|
|
176
186
|
return self._format_response(response)
|
|
177
187
|
except Exception as e:
|
|
178
188
|
message = ""
|
|
@@ -1,7 +1,7 @@
|
|
|
1
1
|
from __future__ import annotations
|
|
2
2
|
|
|
3
3
|
import logging
|
|
4
|
-
from typing import Any, Dict, List, Union
|
|
4
|
+
from typing import Any, Dict, List, Optional, Union
|
|
5
5
|
|
|
6
6
|
import structlog
|
|
7
7
|
|
|
@@ -122,9 +122,12 @@ class LiteLLMRouterLLMClient(_BaseLiteLLMRouterClient, _BaseLiteLLMClient):
|
|
|
122
122
|
raise ProviderClientAPIException(e)
|
|
123
123
|
|
|
124
124
|
@suppress_logs(log_level=logging.WARNING)
|
|
125
|
-
def completion(
|
|
126
|
-
|
|
127
|
-
|
|
125
|
+
def completion(
|
|
126
|
+
self,
|
|
127
|
+
messages: Union[List[dict], List[str], str],
|
|
128
|
+
metadata: Optional[Dict[str, Any]] = None,
|
|
129
|
+
) -> LLMResponse:
|
|
130
|
+
"""Synchronously generate completions for given list of messages.
|
|
128
131
|
|
|
129
132
|
Method overrides the base class method to call the appropriate
|
|
130
133
|
completion method based on the configuration. If the chat completions
|
|
@@ -140,8 +143,11 @@ class LiteLLMRouterLLMClient(_BaseLiteLLMRouterClient, _BaseLiteLLMClient):
|
|
|
140
143
|
- a list of messages. Each message is a string and will be formatted
|
|
141
144
|
as a user message.
|
|
142
145
|
- a single message as a string which will be formatted as user message.
|
|
146
|
+
metadata: Optional metadata to be passed to the LLM call.
|
|
147
|
+
|
|
143
148
|
Returns:
|
|
144
149
|
List of message completions.
|
|
150
|
+
|
|
145
151
|
Raises:
|
|
146
152
|
ProviderClientAPIException: If the API request fails.
|
|
147
153
|
"""
|
|
@@ -158,10 +164,11 @@ class LiteLLMRouterLLMClient(_BaseLiteLLMRouterClient, _BaseLiteLLMClient):
|
|
|
158
164
|
|
|
159
165
|
@suppress_logs(log_level=logging.WARNING)
|
|
160
166
|
async def acompletion(
|
|
161
|
-
self,
|
|
167
|
+
self,
|
|
168
|
+
messages: Union[List[dict], List[str], str],
|
|
169
|
+
metadata: Optional[Dict[str, Any]] = None,
|
|
162
170
|
) -> LLMResponse:
|
|
163
|
-
"""
|
|
164
|
-
Asynchronously generate completions for given list of messages.
|
|
171
|
+
"""Asynchronously generate completions for given list of messages.
|
|
165
172
|
|
|
166
173
|
Method overrides the base class method to call the appropriate
|
|
167
174
|
completion method based on the configuration. If the chat completions
|
|
@@ -177,8 +184,11 @@ class LiteLLMRouterLLMClient(_BaseLiteLLMRouterClient, _BaseLiteLLMClient):
|
|
|
177
184
|
- a list of messages. Each message is a string and will be formatted
|
|
178
185
|
as a user message.
|
|
179
186
|
- a single message as a string which will be formatted as user message.
|
|
187
|
+
metadata: Optional metadata to be passed to the LLM call.
|
|
188
|
+
|
|
180
189
|
Returns:
|
|
181
190
|
List of message completions.
|
|
191
|
+
|
|
182
192
|
Raises:
|
|
183
193
|
ProviderClientAPIException: If the API request fails.
|
|
184
194
|
"""
|
|
@@ -1,21 +1,19 @@
|
|
|
1
1
|
from __future__ import annotations
|
|
2
2
|
|
|
3
|
-
from typing import Dict, List, Protocol, Union, runtime_checkable
|
|
3
|
+
from typing import Any, Dict, List, Optional, Protocol, Union, runtime_checkable
|
|
4
4
|
|
|
5
5
|
from rasa.shared.providers.llm.llm_response import LLMResponse
|
|
6
6
|
|
|
7
7
|
|
|
8
8
|
@runtime_checkable
|
|
9
9
|
class LLMClient(Protocol):
|
|
10
|
-
"""
|
|
11
|
-
Protocol for an LLM client that specifies the interface for interacting
|
|
10
|
+
"""Protocol for an LLM client that specifies the interface for interacting
|
|
12
11
|
with the API.
|
|
13
12
|
"""
|
|
14
13
|
|
|
15
14
|
@classmethod
|
|
16
15
|
def from_config(cls, config: dict) -> LLMClient:
|
|
17
|
-
"""
|
|
18
|
-
Initializes the llm client with the given configuration.
|
|
16
|
+
"""Initializes the llm client with the given configuration.
|
|
19
17
|
|
|
20
18
|
This class method should be implemented to parse the given
|
|
21
19
|
configuration and create an instance of an llm client.
|
|
@@ -24,17 +22,24 @@ class LLMClient(Protocol):
|
|
|
24
22
|
|
|
25
23
|
@property
|
|
26
24
|
def config(self) -> Dict:
|
|
27
|
-
"""
|
|
28
|
-
Returns the configuration for that the llm client is initialized with.
|
|
25
|
+
"""Returns the configuration for that the llm client is initialized with.
|
|
29
26
|
|
|
30
27
|
This property should be implemented to return a dictionary containing
|
|
31
28
|
the configuration settings for the llm client.
|
|
32
29
|
"""
|
|
33
30
|
...
|
|
34
31
|
|
|
35
|
-
def completion(
|
|
36
|
-
|
|
37
|
-
|
|
32
|
+
def completion(
|
|
33
|
+
self,
|
|
34
|
+
messages: Union[List[dict], List[str], str],
|
|
35
|
+
metadata: Optional[Dict[str, Any]] = None,
|
|
36
|
+
) -> LLMResponse:
|
|
37
|
+
"""Synchronously generate completions for given list of messages.
|
|
38
|
+
def completion(
|
|
39
|
+
self,
|
|
40
|
+
messages: Union[List[dict], List[str], str],
|
|
41
|
+
metadata: Optional[Dict[str, Any]] = None,
|
|
42
|
+
) -> LLMResponse:
|
|
38
43
|
|
|
39
44
|
This method should be implemented to take a list of messages (as
|
|
40
45
|
strings) and return a list of completions (as strings).
|
|
@@ -48,16 +53,19 @@ class LLMClient(Protocol):
|
|
|
48
53
|
- a list of messages. Each message is a string and will be formatted
|
|
49
54
|
as a user message.
|
|
50
55
|
- a single message as a string which will be formatted as user message.
|
|
56
|
+
metadata: Optional metadata to be passed to the LLM call.
|
|
57
|
+
|
|
51
58
|
Returns:
|
|
52
59
|
LLMResponse
|
|
53
60
|
"""
|
|
54
61
|
...
|
|
55
62
|
|
|
56
63
|
async def acompletion(
|
|
57
|
-
self,
|
|
64
|
+
self,
|
|
65
|
+
messages: Union[List[dict], List[str], str],
|
|
66
|
+
metadata: Optional[Dict[str, Any]] = None,
|
|
58
67
|
) -> LLMResponse:
|
|
59
|
-
"""
|
|
60
|
-
Asynchronously generate completions for given list of messages.
|
|
68
|
+
"""Asynchronously generate completions for given list of messages.
|
|
61
69
|
|
|
62
70
|
This method should be implemented to take a list of messages (as
|
|
63
71
|
strings) and return a list of completions (as strings).
|
|
@@ -71,14 +79,15 @@ class LLMClient(Protocol):
|
|
|
71
79
|
- a list of messages. Each message is a string and will be formatted
|
|
72
80
|
as a user message.
|
|
73
81
|
- a single message as a string which will be formatted as user message.
|
|
82
|
+
metadata: Optional metadata to be passed to the LLM call.
|
|
83
|
+
|
|
74
84
|
Returns:
|
|
75
85
|
LLMResponse
|
|
76
86
|
"""
|
|
77
87
|
...
|
|
78
88
|
|
|
79
89
|
def validate_client_setup(self, *args, **kwargs) -> None: # type: ignore
|
|
80
|
-
"""
|
|
81
|
-
Perform client setup validation.
|
|
90
|
+
"""Perform client setup validation.
|
|
82
91
|
|
|
83
92
|
This method should be implemented to validate whether the client can be
|
|
84
93
|
used with the parameters provided through configuration or environment
|
|
@@ -237,7 +237,9 @@ class SelfHostedLLMClient(_BaseLiteLLMClient):
|
|
|
237
237
|
raise ProviderClientAPIException(e)
|
|
238
238
|
|
|
239
239
|
async def acompletion(
|
|
240
|
-
self,
|
|
240
|
+
self,
|
|
241
|
+
messages: Union[List[dict], List[str], str],
|
|
242
|
+
metadata: Optional[Dict[str, Any]] = None,
|
|
241
243
|
) -> LLMResponse:
|
|
242
244
|
"""Asynchronous completion of the model with the given messages.
|
|
243
245
|
|
|
@@ -255,6 +257,7 @@ class SelfHostedLLMClient(_BaseLiteLLMClient):
|
|
|
255
257
|
- a list of messages. Each message is a string and will be formatted
|
|
256
258
|
as a user message.
|
|
257
259
|
- a single message as a string which will be formatted as user message.
|
|
260
|
+
metadata: Optional metadata to be passed to the LLM call.
|
|
258
261
|
|
|
259
262
|
Returns:
|
|
260
263
|
The completion response.
|
|
@@ -263,7 +266,11 @@ class SelfHostedLLMClient(_BaseLiteLLMClient):
|
|
|
263
266
|
return await super().acompletion(messages)
|
|
264
267
|
return await self._atext_completion(messages)
|
|
265
268
|
|
|
266
|
-
def completion(
|
|
269
|
+
def completion(
|
|
270
|
+
self,
|
|
271
|
+
messages: Union[List[dict], List[str], str],
|
|
272
|
+
metadata: Optional[Dict[str, Any]] = None,
|
|
273
|
+
) -> LLMResponse:
|
|
267
274
|
"""Completion of the model with the given messages.
|
|
268
275
|
|
|
269
276
|
Method overrides the base class method to call the appropriate
|
|
@@ -273,6 +280,7 @@ class SelfHostedLLMClient(_BaseLiteLLMClient):
|
|
|
273
280
|
|
|
274
281
|
Args:
|
|
275
282
|
messages: The messages to be used for completion.
|
|
283
|
+
metadata: Optional metadata to be passed to the LLM call.
|
|
276
284
|
|
|
277
285
|
Returns:
|
|
278
286
|
The completion response.
|
|
@@ -3,6 +3,7 @@ import sys
|
|
|
3
3
|
from typing import Any, Dict, Optional
|
|
4
4
|
|
|
5
5
|
from rasa.shared.constants import (
|
|
6
|
+
LANGFUSE_CUSTOM_METADATA_DICT,
|
|
6
7
|
LLM_API_HEALTH_CHECK_DEFAULT_VALUE,
|
|
7
8
|
LLM_API_HEALTH_CHECK_ENV_VAR,
|
|
8
9
|
MODELS_CONFIG_KEY,
|
|
@@ -198,7 +199,12 @@ def send_test_llm_api_request(
|
|
|
198
199
|
config=llm_client.config,
|
|
199
200
|
)
|
|
200
201
|
try:
|
|
201
|
-
llm_client.completion(
|
|
202
|
+
llm_client.completion(
|
|
203
|
+
"hello",
|
|
204
|
+
metadata={
|
|
205
|
+
LANGFUSE_CUSTOM_METADATA_DICT: {"component": log_source_component}
|
|
206
|
+
},
|
|
207
|
+
)
|
|
202
208
|
except Exception as e:
|
|
203
209
|
structlogger.error(
|
|
204
210
|
f"{log_source_function}.send_test_llm_api_request_failed",
|
|
@@ -372,6 +372,7 @@ def extract_llm_config(
|
|
|
372
372
|
def extract_attrs_for_llm_based_command_generator(
|
|
373
373
|
self: "LLMBasedCommandGenerator",
|
|
374
374
|
prompt: str,
|
|
375
|
+
metadata: Optional[Dict[str, Any]] = None,
|
|
375
376
|
) -> Dict[str, Any]:
|
|
376
377
|
from rasa.dialogue_understanding.generator.flow_retrieval import (
|
|
377
378
|
DEFAULT_EMBEDDINGS_CONFIG,
|
|
@@ -387,8 +388,7 @@ def extract_attrs_for_llm_based_command_generator(
|
|
|
387
388
|
|
|
388
389
|
|
|
389
390
|
def extract_attrs_for_contextual_response_rephraser(
|
|
390
|
-
self: Any,
|
|
391
|
-
prompt: str,
|
|
391
|
+
self: Any, prompt: str, sender_id: str
|
|
392
392
|
) -> Dict[str, Any]:
|
|
393
393
|
from rasa.core.nlg.contextual_response_rephraser import DEFAULT_LLM_CONFIG
|
|
394
394
|
|
|
@@ -721,7 +721,7 @@ def extract_attrs_for_intentless_policy_find_closest_response(
|
|
|
721
721
|
|
|
722
722
|
|
|
723
723
|
def extract_attrs_for_intentless_policy_generate_llm_answer(
|
|
724
|
-
self: "IntentlessPolicy", llm: "BaseLLM", prompt: str
|
|
724
|
+
self: "IntentlessPolicy", llm: "BaseLLM", prompt: str, sender_id: str
|
|
725
725
|
) -> Dict[str, Any]:
|
|
726
726
|
from rasa.core.policies.intentless_policy import (
|
|
727
727
|
DEFAULT_EMBEDDINGS_CONFIG,
|
|
@@ -738,7 +738,7 @@ def extract_attrs_for_intentless_policy_generate_llm_answer(
|
|
|
738
738
|
|
|
739
739
|
|
|
740
740
|
def extract_attrs_for_enterprise_search_generate_llm_answer(
|
|
741
|
-
self: "EnterpriseSearchPolicy", llm: "BaseLLM", prompt: str
|
|
741
|
+
self: "EnterpriseSearchPolicy", llm: "BaseLLM", prompt: str, sender_id: str
|
|
742
742
|
) -> Dict[str, Any]:
|
|
743
743
|
from rasa.core.policies.enterprise_search_policy import (
|
|
744
744
|
DEFAULT_EMBEDDINGS_CONFIG,
|
|
@@ -121,12 +121,13 @@ def _instrument_generate_answer(
|
|
|
121
121
|
response_examples: List[str],
|
|
122
122
|
conversation_samples: List[str],
|
|
123
123
|
history: str,
|
|
124
|
+
sender_id: str,
|
|
124
125
|
) -> Optional[str]:
|
|
125
126
|
with tracer.start_as_current_span(
|
|
126
127
|
f"{self.__class__.__name__}.{fn.__name__}"
|
|
127
128
|
) as span:
|
|
128
129
|
llm_response = await fn(
|
|
129
|
-
self, response_examples, conversation_samples, history
|
|
130
|
+
self, response_examples, conversation_samples, history, sender_id
|
|
130
131
|
)
|
|
131
132
|
span.set_attributes(
|
|
132
133
|
{
|
rasa/utils/licensing.py
CHANGED
|
@@ -539,3 +539,18 @@ async def _count_conversations_after(
|
|
|
539
539
|
return 0
|
|
540
540
|
|
|
541
541
|
return await tracker_store.count_conversations(after_timestamp=after_timestamp)
|
|
542
|
+
|
|
543
|
+
|
|
544
|
+
def get_human_readable_licence_owner() -> str:
|
|
545
|
+
user_id = "unknown"
|
|
546
|
+
|
|
547
|
+
try:
|
|
548
|
+
retrieved_license = retrieve_license_from_env()
|
|
549
|
+
if retrieved_license:
|
|
550
|
+
decoded = License.decode(retrieved_license)
|
|
551
|
+
if decoded:
|
|
552
|
+
user_id = (
|
|
553
|
+
f"{decoded.company or ''}_{decoded.email or ''}_{decoded.jti or ''}"
|
|
554
|
+
)
|
|
555
|
+
finally:
|
|
556
|
+
return user_id
|
rasa/version.py
CHANGED
|
@@ -1,6 +1,6 @@
|
|
|
1
1
|
Metadata-Version: 2.3
|
|
2
2
|
Name: rasa-pro
|
|
3
|
-
Version: 3.12.6.
|
|
3
|
+
Version: 3.12.6.dev2
|
|
4
4
|
Summary: State-of-the-art open-core Conversational AI framework for Enterprises that natively leverages generative AI for effortless assistant development.
|
|
5
5
|
Keywords: nlp,machine-learning,machine-learning-library,bot,bots,botkit,rasa conversational-agents,conversational-ai,chatbot,chatbot-framework,bot-framework
|
|
6
6
|
Author: Rasa Technologies GmbH
|
|
@@ -63,6 +63,7 @@ Requires-Dist: keras (==2.14.0)
|
|
|
63
63
|
Requires-Dist: langchain (>=0.2.17,<0.3.0)
|
|
64
64
|
Requires-Dist: langchain-community (>=0.2.19,<0.3.0)
|
|
65
65
|
Requires-Dist: langcodes (>=3.5.0,<4.0.0)
|
|
66
|
+
Requires-Dist: langfuse (>=2.60.2,<2.61.0)
|
|
66
67
|
Requires-Dist: litellm (>=1.52.6,<1.53.0)
|
|
67
68
|
Requires-Dist: matplotlib (>=3.7,<3.8)
|
|
68
69
|
Requires-Dist: mattermostwrapper (>=2.2,<2.3)
|
|
@@ -116,6 +117,7 @@ Requires-Dist: scikit-learn (>=1.5.1,<1.6.0)
|
|
|
116
117
|
Requires-Dist: scipy (>=1.13.1,<1.14.0)
|
|
117
118
|
Requires-Dist: sentencepiece[sentencepiece] (>=0.1.99,<0.2.0) ; extra == "transformers" or extra == "full"
|
|
118
119
|
Requires-Dist: sentry-sdk (>=1.14.0,<1.15.0)
|
|
120
|
+
Requires-Dist: setuptools (>=70.0.0,<70.1.0)
|
|
119
121
|
Requires-Dist: sklearn-crfsuite (>=0.3.6,<0.4.0)
|
|
120
122
|
Requires-Dist: skops (>=0.10.0,<0.11.0)
|
|
121
123
|
Requires-Dist: slack-sdk (>=3.27.1,<3.28.0)
|
|
@@ -145,6 +147,7 @@ Requires-Dist: typing-utils (>=0.1.0,<0.2.0)
|
|
|
145
147
|
Requires-Dist: ujson (>=5.8,<6.0)
|
|
146
148
|
Requires-Dist: webexteamssdk (>=1.6.1,<1.7.0)
|
|
147
149
|
Requires-Dist: websockets (>=10.4,<11.0)
|
|
150
|
+
Requires-Dist: wheel (>=0.40.0)
|
|
148
151
|
Project-URL: Documentation, https://rasa.com/docs
|
|
149
152
|
Project-URL: Homepage, https://rasa.com
|
|
150
153
|
Project-URL: Repository, https://github.com/rasahq/rasa
|
|
@@ -1,4 +1,4 @@
|
|
|
1
|
-
rasa/__init__.py,sha256=
|
|
1
|
+
rasa/__init__.py,sha256=1LPdnp38vsouYw0bt_C0Q0mfLeDKAUaeiNdqMZaihCg,495
|
|
2
2
|
rasa/__main__.py,sha256=OmUXcaA9l7KR_eSYCwaCSetuczxjrcN2taNnZ2ZUTbA,6472
|
|
3
3
|
rasa/anonymization/__init__.py,sha256=Z-ZUW2ofZGfI6ysjYIS7U0JL4JSzDNOkHiiXK488Zik,86
|
|
4
4
|
rasa/anonymization/anonymisation_rule_yaml_reader.py,sha256=8u8ZWfbpJuyUagrfth3IGfQXVlVz31esqExfDdasxZM,3171
|
|
@@ -318,16 +318,16 @@ rasa/core/lock_store.py,sha256=weupfBiYMz-B_N-LAONCvp-po1uPRdie9imLYn7hFDU,12504
|
|
|
318
318
|
rasa/core/migrate.py,sha256=h1dOpXxmVmZlbLVGy1yOU_Obp2KzRiOiL0iuEacA0Cg,14618
|
|
319
319
|
rasa/core/nlg/__init__.py,sha256=jZuQAhOUcxO-KqqHGqICHSY3oDeXlUiGr2trQDYfG6o,240
|
|
320
320
|
rasa/core/nlg/callback.py,sha256=0zDQsOa3uV66G3smCVQ9cUdvj-it8tFneIzqShM7NeI,5208
|
|
321
|
-
rasa/core/nlg/contextual_response_rephraser.py,sha256=
|
|
321
|
+
rasa/core/nlg/contextual_response_rephraser.py,sha256=mrLW-TYZRuGvdHtPEEH5YlwxFrAvs-BcfjrtYjgULsI,14075
|
|
322
322
|
rasa/core/nlg/generator.py,sha256=iMTqt0sPRMc55ontZU1svQVPKixDojBXN-cFuOvLMGo,11647
|
|
323
323
|
rasa/core/nlg/interpolator.py,sha256=hEOhqfMXrAqTZiqjg2t6ZfTK6DJQ5IiX4tJIz2b8Fbw,5190
|
|
324
324
|
rasa/core/nlg/response.py,sha256=SecKyoBQjEnZr4t-Gg5fkUpkozwGT2lzswIKgD63Dac,7248
|
|
325
|
-
rasa/core/nlg/summarize.py,sha256=
|
|
325
|
+
rasa/core/nlg/summarize.py,sha256=AtmBj1I7FSAkNIx5ZE5UdxnbvCIn1q8bajx2UPgJjdA,2614
|
|
326
326
|
rasa/core/nlg/translate.py,sha256=ZXRvysqXGdtHBJ7x3YkW6zfmnb9DuEGHCMTL41v-M8M,2112
|
|
327
327
|
rasa/core/persistor.py,sha256=EP8kaGQQbRJKkxw2GCZkjJk-O2n4PgIHXa9F9a5MjVk,20337
|
|
328
328
|
rasa/core/policies/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
|
|
329
329
|
rasa/core/policies/ensemble.py,sha256=XoHxU0jcb_io_LBOpjJffylzqtGEB7CH9ivhRyO8pDc,12960
|
|
330
|
-
rasa/core/policies/enterprise_search_policy.py,sha256=
|
|
330
|
+
rasa/core/policies/enterprise_search_policy.py,sha256=CBMWJkY3ueRkdZRGU8WUQtovuZ9wgbLX1yAUBh9aJ-0,36994
|
|
331
331
|
rasa/core/policies/enterprise_search_prompt_template.jinja2,sha256=dCS_seyBGxMQoMsOjjvPp0dd31OSzZCJSZeev1FJK5Q,1187
|
|
332
332
|
rasa/core/policies/enterprise_search_prompt_with_citation_template.jinja2,sha256=va9rpP97dN3PKoJZOVfyuISt3cPBlb10Pqyz25RwO_Q,3294
|
|
333
333
|
rasa/core/policies/flow_policy.py,sha256=597G62hrLF_CAMCvu-TPRldFnjMP2XEIkhcIaPWcQAc,7489
|
|
@@ -335,10 +335,10 @@ rasa/core/policies/flows/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZ
|
|
|
335
335
|
rasa/core/policies/flows/flow_exceptions.py,sha256=_FQuN-cerQDM1pivce9bz4zylh5UYkljvYS1gjDukHI,1527
|
|
336
336
|
rasa/core/policies/flows/flow_executor.py,sha256=1KJhBovD9INHVF7PQD4N7CH46JFfPrsy7QXnnGAPlNU,26938
|
|
337
337
|
rasa/core/policies/flows/flow_step_result.py,sha256=agjPrD6lahGSe2ViO5peBeoMdI9ngVGRSgtytgxmJmg,1360
|
|
338
|
-
rasa/core/policies/intentless_policy.py,sha256=
|
|
338
|
+
rasa/core/policies/intentless_policy.py,sha256=fs1iITH02Ph44PCHHn8SxK0gmzwQ2KbMrKevBoJbHik,36578
|
|
339
339
|
rasa/core/policies/intentless_prompt_template.jinja2,sha256=KhIL3cruMmkxhrs5oVbqgSvK6ZiN_6TQ_jXrgtEB-ZY,677
|
|
340
340
|
rasa/core/policies/memoization.py,sha256=CX2d3yP7FehSMW92Wi9NYLZei7tBzoT3T6yybu-Nb5s,19377
|
|
341
|
-
rasa/core/policies/policy.py,sha256=
|
|
341
|
+
rasa/core/policies/policy.py,sha256=QrlB_scK7mj8q2QFNfmXxEMiZb_Kh9o-mkPhN0C1NsY,27556
|
|
342
342
|
rasa/core/policies/rule_policy.py,sha256=EItfUn07JIBLRIbriPKDprsvWq_-xzZTGrlTS2erByA,50730
|
|
343
343
|
rasa/core/policies/ted_policy.py,sha256=0RzIuyrtt4PxLcqQ-bfaExkZvU-TnsMbgmDcwh2SakY,87710
|
|
344
344
|
rasa/core/policies/unexpected_intent_policy.py,sha256=ZXvbswf2NDy00kHmBQcyXa1OVYFyc79HQKrFkQ4gCfM,39609
|
|
@@ -365,7 +365,7 @@ rasa/dialogue_understanding/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMp
|
|
|
365
365
|
rasa/dialogue_understanding/coexistence/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
|
|
366
366
|
rasa/dialogue_understanding/coexistence/constants.py,sha256=RpgLKMG4s7AgII0fRV0siS0Zh2QVI0OVRunhgm4q_j4,94
|
|
367
367
|
rasa/dialogue_understanding/coexistence/intent_based_router.py,sha256=JlYBZdScnhflLK__i4bG0-PIkuFv0B7L4yOdnLgYWAY,7609
|
|
368
|
-
rasa/dialogue_understanding/coexistence/llm_based_router.py,sha256=
|
|
368
|
+
rasa/dialogue_understanding/coexistence/llm_based_router.py,sha256=VwNFrjfw7yI_JFFmDTFa77YsCQyyTwKFfIXg13WyFZM,11930
|
|
369
369
|
rasa/dialogue_understanding/coexistence/router_template.jinja2,sha256=CHWFreN0sv1EbPh-hf5AlCt3zxy2_llX1Pdn9Q11Y18,357
|
|
370
370
|
rasa/dialogue_understanding/commands/__init__.py,sha256=F-pLETYRUjhIkjjDfXGUuPsK_ac1HcLmJkrUUP0RhME,2259
|
|
371
371
|
rasa/dialogue_understanding/commands/can_not_handle_command.py,sha256=fKOj9ScLxuaFO9Iw0p7og_4zMiw2weBdx322rBKlnCI,3519
|
|
@@ -401,8 +401,8 @@ rasa/dialogue_understanding/generator/command_parser.py,sha256=wf6FSgqBw5F0legg0
|
|
|
401
401
|
rasa/dialogue_understanding/generator/constants.py,sha256=PuUckBGUZ-Tu31B0cs8yxN99BDW3PGoExZa-BlIL5v8,1108
|
|
402
402
|
rasa/dialogue_understanding/generator/flow_document_template.jinja2,sha256=f4H6vVd-_nX_RtutMh1xD3ZQE_J2OyuPHAtiltfiAPY,253
|
|
403
403
|
rasa/dialogue_understanding/generator/flow_retrieval.py,sha256=wlGnMj17-X1-siQmdSvOd7K61sRzBf82MQEL2pqDQMI,17891
|
|
404
|
-
rasa/dialogue_understanding/generator/llm_based_command_generator.py,sha256=
|
|
405
|
-
rasa/dialogue_understanding/generator/llm_command_generator.py,sha256=
|
|
404
|
+
rasa/dialogue_understanding/generator/llm_based_command_generator.py,sha256=QWddvGPQYnu-ZrZiuqB976ClKwh4ulVpLXF16IeAWUQ,24215
|
|
405
|
+
rasa/dialogue_understanding/generator/llm_command_generator.py,sha256=E5byrCC_6r_GJm_HIosN_Se00NmXmnTCdOzaHMwTu6A,2641
|
|
406
406
|
rasa/dialogue_understanding/generator/multi_step/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
|
|
407
407
|
rasa/dialogue_understanding/generator/multi_step/fill_slots_prompt.jinja2,sha256=Y0m673tAML3cFPaLM-urMXDsBYUUcXIw9YUpkAhGUuA,2933
|
|
408
408
|
rasa/dialogue_understanding/generator/multi_step/handle_flows_prompt.jinja2,sha256=8l93_QBKBYnqLICVdiTu5ejZDE8F36BU8-qwba0px44,1927
|
|
@@ -413,7 +413,7 @@ rasa/dialogue_understanding/generator/prompt_templates/command_prompt_template.j
|
|
|
413
413
|
rasa/dialogue_understanding/generator/prompt_templates/command_prompt_v2_claude_3_5_sonnet_20240620_template.jinja2,sha256=z-cnFVfIE_kEnY1o52YE2CdCWwgYTv7R3xVxsjXWlnw,3808
|
|
414
414
|
rasa/dialogue_understanding/generator/prompt_templates/command_prompt_v2_gpt_4o_2024_11_20_template.jinja2,sha256=4076ARsy0E0iADBX6li19IoM3F4F-2wK3bL6UEOvCdo,3620
|
|
415
415
|
rasa/dialogue_understanding/generator/single_step/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
|
|
416
|
-
rasa/dialogue_understanding/generator/single_step/compact_llm_command_generator.py,sha256=
|
|
416
|
+
rasa/dialogue_understanding/generator/single_step/compact_llm_command_generator.py,sha256=DHTHKMRSV_tIWyj2lppxNGKgUT1jiESooTtDeVwmAqo,22166
|
|
417
417
|
rasa/dialogue_understanding/generator/single_step/single_step_llm_command_generator.py,sha256=cvWsl-hYUgZ_tIQmWjMM1RQwsgRTlZ_osfLaUQiBk-U,4543
|
|
418
418
|
rasa/dialogue_understanding/generator/utils.py,sha256=jxtb-AfngN59y2rHynqJDK80xM_yooEvr3aW1MWl6H0,2760
|
|
419
419
|
rasa/dialogue_understanding/patterns/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
|
|
@@ -532,7 +532,7 @@ rasa/graph_components/providers/training_tracker_provider.py,sha256=FaCWHJA69EpM
|
|
|
532
532
|
rasa/graph_components/validators/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
|
|
533
533
|
rasa/graph_components/validators/default_recipe_validator.py,sha256=iOVoB7zVTKes8EYW110fz8ZvtgoDcCX25GlUsiESS18,24457
|
|
534
534
|
rasa/graph_components/validators/finetuning_validator.py,sha256=VfCGytnweijKBG8bAqYp7zKZB2aRgi2ZI8R0eou5Ev4,12865
|
|
535
|
-
rasa/hooks.py,sha256=
|
|
535
|
+
rasa/hooks.py,sha256=JfdrB00FDQyrULr9eKXJf8Suwa6LUlcAK6n-SUDEDpU,5803
|
|
536
536
|
rasa/jupyter.py,sha256=TCYVD4QPQIMmfA6ZwDUBOBTAECwCwbU2XOkosodLO9k,1782
|
|
537
537
|
rasa/llm_fine_tuning/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
|
|
538
538
|
rasa/llm_fine_tuning/annotation_module.py,sha256=6wBBjGwONVlikp79xAHp5g3rydEhPM6kP1bw1g-maYk,8578
|
|
@@ -565,6 +565,7 @@ rasa/model_manager/warm_rasa_process.py,sha256=2vg8gBEUvPrr6C5W-fxtWWSajksrOaT83
|
|
|
565
565
|
rasa/model_service.py,sha256=XXCaiLj2xq58n05W3R1jmTIv-V8f_7PG30kVpRxf71Y,3727
|
|
566
566
|
rasa/model_testing.py,sha256=eZw7l8Zz3HkH_ZPBurY93HzzudHdoQn8HBnDdZSysAY,14929
|
|
567
567
|
rasa/model_training.py,sha256=1opig8_npw7dLHd8k06ZYUQCrJ61sFIbNHBgvF63yH8,21733
|
|
568
|
+
rasa/monkey_patches.py,sha256=pZTDKQ8GNzeiUWeJ2MneUuremSNVScL7oXeMAEd4o4Y,3687
|
|
568
569
|
rasa/nlu/__init__.py,sha256=D0IYuTK_ZQ_F_9xsy0bXxVCAtU62Fzvp8S7J9tmfI_c,123
|
|
569
570
|
rasa/nlu/classifiers/__init__.py,sha256=Qvrf7_rfiMxm2Vt2fClb56R3QFExf7WPdFdL-AOvgsk,118
|
|
570
571
|
rasa/nlu/classifiers/classifier.py,sha256=9fm1mORuFf1vowYIXmqE9yLRKdSC4nGQW7UqNZQipKY,133
|
|
@@ -626,12 +627,12 @@ rasa/nlu/utils/spacy_utils.py,sha256=5EnHR-MVAZhGbg2rq8VpOu7I0tagV3ThRTlM0-WO2Cg
|
|
|
626
627
|
rasa/plugin.py,sha256=cSmFhSWr5WQyYXdJOWwgH4ra_2kbhoNLZAtnqcsGny4,3071
|
|
627
628
|
rasa/server.py,sha256=eomGM_3SpBxaF_-VfZbkSO_bMk_vI1XLUZjt32f4gcI,59390
|
|
628
629
|
rasa/shared/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
|
|
629
|
-
rasa/shared/constants.py,sha256=
|
|
630
|
+
rasa/shared/constants.py,sha256=0vDXxJNkhE6QyRyP3CyL_dGLTn-iJ3UJZYl_rJxcWtE,12234
|
|
630
631
|
rasa/shared/core/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
|
|
631
632
|
rasa/shared/core/command_payload_reader.py,sha256=puHYsp9xbX0YQm2L1NDBItOFmdzI7AzmfGefgcHiCc0,3871
|
|
632
633
|
rasa/shared/core/constants.py,sha256=kbaZlfjhJWrRhWhYRBjGCj6TeHF03-wuXcD9FXX1plY,6632
|
|
633
634
|
rasa/shared/core/conversation.py,sha256=0nUhcbQkPDnO3_Rig7oiinrWmPy5fsVQs_U6Fx1hG5c,1384
|
|
634
|
-
rasa/shared/core/domain.py,sha256=
|
|
635
|
+
rasa/shared/core/domain.py,sha256=KccnBgQmyVnWfoCfMh-kfwt7BHuFRKU64-_Sbh5RI20,81175
|
|
635
636
|
rasa/shared/core/events.py,sha256=kTUWSpDepj3kpjjXveYXz3h2XcIQV3Sq8h7MTbx5fMw,86489
|
|
636
637
|
rasa/shared/core/flows/__init__.py,sha256=Z4pBY0qcEbHeOwgmKsyg2Nz4dX9CF67fFCwj2KXSMpg,180
|
|
637
638
|
rasa/shared/core/flows/constants.py,sha256=0HN3k-apOb_fi8E2AJtUxMxro8jwFVyXQpil-tHEzbM,340
|
|
@@ -725,7 +726,7 @@ rasa/shared/providers/_configs/self_hosted_llm_client_config.py,sha256=l2JnypPXF
|
|
|
725
726
|
rasa/shared/providers/_configs/utils.py,sha256=u2Ram05YwQ7-frm_r8n9rafjZoF8i0qSC7XjYQRuPgo,3732
|
|
726
727
|
rasa/shared/providers/_ssl_verification_utils.py,sha256=vUnP0vocf0GQ0wG8IQpPcCet4c1C9-wQWQNckNWbDBk,4165
|
|
727
728
|
rasa/shared/providers/_utils.py,sha256=g1N5xv3P8Nk7NPHECWk6e_C_DNPGKlYgM8OII-FkCRs,3090
|
|
728
|
-
rasa/shared/providers/constants.py,sha256=
|
|
729
|
+
rasa/shared/providers/constants.py,sha256=yF9giGO8xWCrW9dzUW-7wX-y6sh7hlbYzHYKFayrF7A,613
|
|
729
730
|
rasa/shared/providers/embedding/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
|
|
730
731
|
rasa/shared/providers/embedding/_base_litellm_embedding_client.py,sha256=PFavNnD6EVDQiqc9sLnBRV0hebW4iCjIh_dvpwzg4RI,8796
|
|
731
732
|
rasa/shared/providers/embedding/_langchain_embedding_client_adapter.py,sha256=IR2Rb3ReJ9C9sxOoOGRXgtz8STWdMREs_4AeSMKFjl4,2135
|
|
@@ -737,15 +738,15 @@ rasa/shared/providers/embedding/huggingface_local_embedding_client.py,sha256=Zo3
|
|
|
737
738
|
rasa/shared/providers/embedding/litellm_router_embedding_client.py,sha256=eafDk6IgQtL_kiKgpa6sJs1oATyRi2NT2leUFQsED2s,4551
|
|
738
739
|
rasa/shared/providers/embedding/openai_embedding_client.py,sha256=XNRGE7apo2v3kWRrtgxE-Gq4rvNko3IiXtvgC4krDYE,5429
|
|
739
740
|
rasa/shared/providers/llm/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
|
|
740
|
-
rasa/shared/providers/llm/_base_litellm_client.py,sha256=
|
|
741
|
+
rasa/shared/providers/llm/_base_litellm_client.py,sha256=ScfqN_Zb6RYDZZiSlc3s79oOA15QAgOI9JDfWj20iSM,11744
|
|
741
742
|
rasa/shared/providers/llm/azure_openai_llm_client.py,sha256=tMHn0i7-HZb01__CuzKvzSbXAq2dE0Oov4U7qIl74no,14989
|
|
742
743
|
rasa/shared/providers/llm/default_litellm_llm_client.py,sha256=xx-o-NX_mtx6AszK--ZRj8n8JyEJuVu1-42dt8AynBM,4083
|
|
743
|
-
rasa/shared/providers/llm/litellm_router_llm_client.py,sha256=
|
|
744
|
-
rasa/shared/providers/llm/llm_client.py,sha256
|
|
744
|
+
rasa/shared/providers/llm/litellm_router_llm_client.py,sha256=kF8yqwxBNjcIYz022yv0gP5RqnJzx6bfG-hcpK5ovKE,8217
|
|
745
|
+
rasa/shared/providers/llm/llm_client.py,sha256=11xgWbjV8brvQN-EZPjZHNofImY8JKlRmrbOD7UaL-o,3651
|
|
745
746
|
rasa/shared/providers/llm/llm_response.py,sha256=8mOpZdmh4-3yM7aOmNO0yEYUmRDErfoP7ZDMUuHr2Cc,3504
|
|
746
747
|
rasa/shared/providers/llm/openai_llm_client.py,sha256=rSdLj29Hl1Wm5G6Uwo77j4WqogK_3QIbTA7fyt63YAg,5013
|
|
747
748
|
rasa/shared/providers/llm/rasa_llm_client.py,sha256=44Tvtnkq4mxDIxtdrGUkwBWAvX1OLaswqmpAsyBH8e8,3504
|
|
748
|
-
rasa/shared/providers/llm/self_hosted_llm_client.py,sha256=
|
|
749
|
+
rasa/shared/providers/llm/self_hosted_llm_client.py,sha256=85jnA7AO2W4OqV0874N5YBzTafVeYtiRbaRyzyA_lKA,10544
|
|
749
750
|
rasa/shared/providers/mappings.py,sha256=QSD3XWvhYCtBLNpGycN30vEnLULYIaqCsAtmfPfSZ3U,3674
|
|
750
751
|
rasa/shared/providers/router/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
|
|
751
752
|
rasa/shared/providers/router/_base_litellm_router_client.py,sha256=I6ucWfG7dUz_aWR8biDZI90cvd1uOKT8L76uayN3vJs,8592
|
|
@@ -756,7 +757,7 @@ rasa/shared/utils/common.py,sha256=bNYo9B1l14VEN9pqQxAYzJ92zCl6iMUfsG8zlOU1qI4,1
|
|
|
756
757
|
rasa/shared/utils/constants.py,sha256=CkcwE1mmaAo_1QDydUX5WO8x_-stFGZ17dPAGP1t_k4,262
|
|
757
758
|
rasa/shared/utils/health_check/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
|
|
758
759
|
rasa/shared/utils/health_check/embeddings_health_check_mixin.py,sha256=ASOzDtI3i6HlRLzee8pafejlTkUesOhY6FZb5-wAZMI,1034
|
|
759
|
-
rasa/shared/utils/health_check/health_check.py,sha256=
|
|
760
|
+
rasa/shared/utils/health_check/health_check.py,sha256=0fhxJdjqIlkKdp5gqdoVsGpg8vl6TOgCy9Le0GMyJak,9888
|
|
760
761
|
rasa/shared/utils/health_check/llm_health_check_mixin.py,sha256=ANP5Q68TRX8p4wWkRCAISsWBV1iYYeGnqWILnR1NawE,957
|
|
761
762
|
rasa/shared/utils/io.py,sha256=AhuECoXGO367NvWRCBu99utEtTQnyxWVJyKOOpLePpg,15917
|
|
762
763
|
rasa/shared/utils/llm.py,sha256=yoS5yd0jwj_wN-IaJhpxOoiG8JAvIGbYhzOoDbo0gAU,28959
|
|
@@ -782,9 +783,9 @@ rasa/tracing/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
|
|
|
782
783
|
rasa/tracing/config.py,sha256=32X2rqAiHe0e-Iijb5AivjqDs2j03n8xx5mo07NBMI4,12964
|
|
783
784
|
rasa/tracing/constants.py,sha256=-3vlfI9v_D8f-KB5tuiqBHhszu2WofFQOyjKBn28gyg,2889
|
|
784
785
|
rasa/tracing/instrumentation/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
|
|
785
|
-
rasa/tracing/instrumentation/attribute_extractors.py,sha256=
|
|
786
|
+
rasa/tracing/instrumentation/attribute_extractors.py,sha256=Rh7SSARmRSeZPHiLOIvzwlaeJs-RDffpgsJOBP2J5Pk,29561
|
|
786
787
|
rasa/tracing/instrumentation/instrumentation.py,sha256=BPI5OoZFbl90kVJzlKEz-eD8cf-CaX_x1t4V9XBhDKo,53625
|
|
787
|
-
rasa/tracing/instrumentation/intentless_policy_instrumentation.py,sha256=
|
|
788
|
+
rasa/tracing/instrumentation/intentless_policy_instrumentation.py,sha256=c_C6m3oAxQbLWc0AbYecRqRzWTc4ACvIUkf3hEJdUUo,4860
|
|
788
789
|
rasa/tracing/instrumentation/metrics.py,sha256=DI_qIS6sz5KYU4QDcPKfnHxKLL_Ma3wV6diH4_vg85c,12051
|
|
789
790
|
rasa/tracing/metric_instrument_provider.py,sha256=9J9a-a4lmBe20PuTHa_HwKX7O8kEAQdY5ajPLDCQkwE,12174
|
|
790
791
|
rasa/utils/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
|
|
@@ -795,7 +796,7 @@ rasa/utils/converter.py,sha256=H4LHpoAK7MXMmvNZG_uSn0gbccCJvHtsA2-6Zya4u6M,1656
|
|
|
795
796
|
rasa/utils/endpoints.py,sha256=htalZ5AXvXxNlVeTUgk3LJ-OKzt-dr5GTgRQTyC-0-0,10073
|
|
796
797
|
rasa/utils/io.py,sha256=LIAdQQqUPA-V_mdpgeQzPDzA4rmsdZLyVKc8j_0Z70Y,7161
|
|
797
798
|
rasa/utils/json_utils.py,sha256=SKtJzzsIRCAgNEQiBvWDDm9euMRBgJ-TyvCi2tXHH1w,1689
|
|
798
|
-
rasa/utils/licensing.py,sha256=
|
|
799
|
+
rasa/utils/licensing.py,sha256=PpFmrfuPFEFhvB67A_rJVbk31RdmqT5Yxam-eVm30Zo,20910
|
|
799
800
|
rasa/utils/log_utils.py,sha256=5YaXS5yehwoNn48zihmqODwZEMQqzzuZ_cO0cfCOCe0,6631
|
|
800
801
|
rasa/utils/mapper.py,sha256=CZiD3fu7-W-OJgoB1R8JaOg-Hq13TK20D-zGVNgbF18,7726
|
|
801
802
|
rasa/utils/ml_utils.py,sha256=y4Czr9GdRBj-a2npXU8ED2qC9bzw5olRyqQEmu5BB8k,4185
|
|
@@ -823,9 +824,9 @@ rasa/utils/train_utils.py,sha256=ClJx-6x3-h3Vt6mskacgkcCUJTMXjFPe3zAcy_DfmaU,212
|
|
|
823
824
|
rasa/utils/url_tools.py,sha256=dZ1HGkVdWTJB7zYEdwoDIrEuyX9HE5WsxKKFVsXBLE0,1218
|
|
824
825
|
rasa/utils/yaml.py,sha256=KjbZq5C94ZP7Jdsw8bYYF7HASI6K4-C_kdHfrnPLpSI,2000
|
|
825
826
|
rasa/validator.py,sha256=tAFzUKVbCPRPx0LjCUKY0zSCaX2hgINuaMfK123FCyc,88716
|
|
826
|
-
rasa/version.py,sha256=
|
|
827
|
-
rasa_pro-3.12.6.
|
|
828
|
-
rasa_pro-3.12.6.
|
|
829
|
-
rasa_pro-3.12.6.
|
|
830
|
-
rasa_pro-3.12.6.
|
|
831
|
-
rasa_pro-3.12.6.
|
|
827
|
+
rasa/version.py,sha256=VTTo56s7d7AAaqOlkzoP9TX2R5EVQvlrayFJK1vUSK8,122
|
|
828
|
+
rasa_pro-3.12.6.dev2.dist-info/METADATA,sha256=Yn6dT0MY9OmZ7kUk3zue2k7pIKwIlXJDk_CBEhvBWLc,10663
|
|
829
|
+
rasa_pro-3.12.6.dev2.dist-info/NOTICE,sha256=7HlBoMHJY9CL2GlYSfTQ-PZsVmLmVkYmMiPlTjhuCqA,218
|
|
830
|
+
rasa_pro-3.12.6.dev2.dist-info/WHEEL,sha256=fGIA9gx4Qxk2KDKeNJCbOEwSrmLtjWCwzBz351GyrPQ,88
|
|
831
|
+
rasa_pro-3.12.6.dev2.dist-info/entry_points.txt,sha256=ckJ2SfEyTPgBqj_I6vm_tqY9dZF_LAPJZA335Xp0Q9U,43
|
|
832
|
+
rasa_pro-3.12.6.dev2.dist-info/RECORD,,
|
|
File without changes
|
|
File without changes
|
|
File without changes
|