rasa-pro 3.12.8__py3-none-any.whl → 3.12.10.dev1__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of rasa-pro might be problematic. Click here for more details.
- rasa/__init__.py +6 -0
- rasa/core/channels/studio_chat.py +25 -9
- rasa/core/policies/intentless_policy.py +3 -1
- rasa/dialogue_understanding/coexistence/llm_based_router.py +0 -1
- rasa/dialogue_understanding/generator/llm_based_command_generator.py +15 -4
- rasa/dialogue_understanding/generator/llm_command_generator.py +3 -1
- rasa/dialogue_understanding/generator/multi_step/multi_step_llm_command_generator.py +44 -4
- rasa/dialogue_understanding/generator/single_step/compact_llm_command_generator.py +14 -1
- rasa/hooks.py +55 -0
- rasa/monkey_patches.py +91 -0
- rasa/shared/constants.py +5 -0
- rasa/shared/core/flows/flow.py +121 -125
- rasa/shared/providers/constants.py +9 -0
- rasa/shared/providers/llm/_base_litellm_client.py +14 -4
- rasa/shared/providers/llm/litellm_router_llm_client.py +17 -7
- rasa/shared/providers/llm/llm_client.py +24 -15
- rasa/shared/providers/llm/self_hosted_llm_client.py +10 -2
- rasa/tracing/instrumentation/attribute_extractors.py +2 -2
- rasa/version.py +1 -1
- {rasa_pro-3.12.8.dist-info → rasa_pro-3.12.10.dev1.dist-info}/METADATA +4 -3
- {rasa_pro-3.12.8.dist-info → rasa_pro-3.12.10.dev1.dist-info}/RECORD +24 -23
- {rasa_pro-3.12.8.dist-info → rasa_pro-3.12.10.dev1.dist-info}/NOTICE +0 -0
- {rasa_pro-3.12.8.dist-info → rasa_pro-3.12.10.dev1.dist-info}/WHEEL +0 -0
- {rasa_pro-3.12.8.dist-info → rasa_pro-3.12.10.dev1.dist-info}/entry_points.txt +0 -0
rasa/__init__.py
CHANGED
|
@@ -5,5 +5,11 @@ from rasa import version
|
|
|
5
5
|
# define the version before the other imports since these need it
|
|
6
6
|
__version__ = version.__version__
|
|
7
7
|
|
|
8
|
+
from litellm.integrations.langfuse.langfuse import LangFuseLogger
|
|
9
|
+
|
|
10
|
+
from rasa.monkey_patches import litellm_langfuse_logger_init_fixed
|
|
11
|
+
|
|
12
|
+
# Monkey-patch the init method as early as possible before the class is used
|
|
13
|
+
LangFuseLogger.__init__ = litellm_langfuse_logger_init_fixed # type: ignore
|
|
8
14
|
|
|
9
15
|
logging.getLogger(__name__).addHandler(logging.NullHandler())
|
|
@@ -16,6 +16,7 @@ import structlog
|
|
|
16
16
|
from sanic import Sanic
|
|
17
17
|
|
|
18
18
|
from rasa.core.channels.socketio import SocketBlueprint, SocketIOInput
|
|
19
|
+
from rasa.core.exceptions import AgentNotReady
|
|
19
20
|
from rasa.hooks import hookimpl
|
|
20
21
|
from rasa.plugin import plugin_manager
|
|
21
22
|
from rasa.shared.core.constants import ACTION_LISTEN_NAME
|
|
@@ -149,8 +150,15 @@ class StudioChatInput(SocketIOInput):
|
|
|
149
150
|
"""
|
|
150
151
|
await on_new_message(message)
|
|
151
152
|
|
|
152
|
-
if not self.agent:
|
|
153
|
+
if not self.agent or not self.agent.is_ready():
|
|
153
154
|
structlogger.error("studio_chat.on_message_proxy.agent_not_initialized")
|
|
155
|
+
await self.emit_error(
|
|
156
|
+
"The Rasa Pro model could not be loaded. "
|
|
157
|
+
"Please check the training and deployment logs "
|
|
158
|
+
"for more information.",
|
|
159
|
+
message.sender_id,
|
|
160
|
+
AgentNotReady("The Rasa Pro model could not be loaded."),
|
|
161
|
+
)
|
|
154
162
|
return
|
|
155
163
|
|
|
156
164
|
tracker = await self.agent.tracker_store.retrieve(message.sender_id)
|
|
@@ -160,6 +168,17 @@ class StudioChatInput(SocketIOInput):
|
|
|
160
168
|
|
|
161
169
|
await self.on_tracker_updated(tracker)
|
|
162
170
|
|
|
171
|
+
async def emit_error(self, message: str, room: str, e: Exception) -> None:
|
|
172
|
+
await self.emit(
|
|
173
|
+
"error",
|
|
174
|
+
{
|
|
175
|
+
"message": message,
|
|
176
|
+
"error": str(e),
|
|
177
|
+
"exception": str(type(e).__name__),
|
|
178
|
+
},
|
|
179
|
+
room=room,
|
|
180
|
+
)
|
|
181
|
+
|
|
163
182
|
async def handle_tracker_update(self, sid: str, data: Dict) -> None:
|
|
164
183
|
from rasa.shared.core.trackers import DialogueStateTracker
|
|
165
184
|
|
|
@@ -200,15 +219,12 @@ class StudioChatInput(SocketIOInput):
|
|
|
200
219
|
error=e,
|
|
201
220
|
sender_id=data["sender_id"],
|
|
202
221
|
)
|
|
203
|
-
await self.
|
|
204
|
-
"error",
|
|
205
|
-
|
|
206
|
-
|
|
207
|
-
"error": str(e),
|
|
208
|
-
"exception": str(type(e).__name__),
|
|
209
|
-
},
|
|
210
|
-
room=sid,
|
|
222
|
+
await self.emit_error(
|
|
223
|
+
"An error occurred while updating the conversation.",
|
|
224
|
+
data["sender_id"],
|
|
225
|
+
e,
|
|
211
226
|
)
|
|
227
|
+
|
|
212
228
|
if not tracker:
|
|
213
229
|
# in case the tracker couldn't be updated, we retrieve the prior
|
|
214
230
|
# version and use that to populate the update
|
|
@@ -715,7 +715,9 @@ class IntentlessPolicy(LLMHealthCheckMixin, EmbeddingsHealthCheckMixin, Policy):
|
|
|
715
715
|
final_response_examples.append(resp)
|
|
716
716
|
|
|
717
717
|
llm_response = await self.generate_answer(
|
|
718
|
-
final_response_examples,
|
|
718
|
+
final_response_examples,
|
|
719
|
+
conversation_samples,
|
|
720
|
+
history,
|
|
719
721
|
)
|
|
720
722
|
if not llm_response:
|
|
721
723
|
structlogger.debug("intentless_policy.prediction.skip_llm_fail")
|
|
@@ -166,7 +166,6 @@ class LLMBasedRouter(LLMHealthCheckMixin, GraphComponent):
|
|
|
166
166
|
**kwargs: Any,
|
|
167
167
|
) -> "LLMBasedRouter":
|
|
168
168
|
"""Loads trained component (see parent class for full docstring)."""
|
|
169
|
-
|
|
170
169
|
# Perform health check on the resolved LLM client config
|
|
171
170
|
llm_config = resolve_model_client_config(config.get(LLM_CONFIG_KEY, {}))
|
|
172
171
|
cls.perform_llm_health_check(
|
|
@@ -1,6 +1,8 @@
|
|
|
1
1
|
from abc import ABC, abstractmethod
|
|
2
|
+
from asyncio import Lock
|
|
2
3
|
from functools import lru_cache
|
|
3
4
|
from typing import Any, Dict, List, Optional, Set, Text, Tuple, Union
|
|
5
|
+
from uuid import UUID, uuid4
|
|
4
6
|
|
|
5
7
|
import structlog
|
|
6
8
|
from jinja2 import Environment, Template, select_autoescape
|
|
@@ -89,6 +91,9 @@ class LLMBasedCommandGenerator(
|
|
|
89
91
|
else:
|
|
90
92
|
self.flow_retrieval = None
|
|
91
93
|
|
|
94
|
+
self.sender_id_to_session_id_mapping: Dict[str, UUID] = {}
|
|
95
|
+
self._lock = Lock()
|
|
96
|
+
|
|
92
97
|
### Abstract methods
|
|
93
98
|
@staticmethod
|
|
94
99
|
@abstractmethod
|
|
@@ -225,8 +230,7 @@ class LLMBasedCommandGenerator(
|
|
|
225
230
|
|
|
226
231
|
@lru_cache
|
|
227
232
|
def compile_template(self, template: str) -> Template:
|
|
228
|
-
"""
|
|
229
|
-
Compile the prompt template and register custom filters.
|
|
233
|
+
"""Compile the prompt template and register custom filters.
|
|
230
234
|
Compiling the template is an expensive operation,
|
|
231
235
|
so we cache the result.
|
|
232
236
|
"""
|
|
@@ -328,7 +332,9 @@ class LLMBasedCommandGenerator(
|
|
|
328
332
|
|
|
329
333
|
@measure_llm_latency
|
|
330
334
|
async def invoke_llm(
|
|
331
|
-
self,
|
|
335
|
+
self,
|
|
336
|
+
prompt: Union[List[dict], List[str], str],
|
|
337
|
+
metadata: Optional[Dict[str, Any]] = None,
|
|
332
338
|
) -> Optional[LLMResponse]:
|
|
333
339
|
"""Use LLM to generate a response.
|
|
334
340
|
|
|
@@ -341,6 +347,7 @@ class LLMBasedCommandGenerator(
|
|
|
341
347
|
- a list of messages. Each message is a string and will be formatted
|
|
342
348
|
as a user message.
|
|
343
349
|
- a single message as a string which will be formatted as user message.
|
|
350
|
+
metadata: Optional metadata to be passed to the LLM call.
|
|
344
351
|
|
|
345
352
|
Returns:
|
|
346
353
|
An LLMResponse object.
|
|
@@ -352,7 +359,7 @@ class LLMBasedCommandGenerator(
|
|
|
352
359
|
self.config.get(LLM_CONFIG_KEY), self.get_default_llm_config()
|
|
353
360
|
)
|
|
354
361
|
try:
|
|
355
|
-
return await llm.acompletion(prompt)
|
|
362
|
+
return await llm.acompletion(prompt, metadata)
|
|
356
363
|
except Exception as e:
|
|
357
364
|
# unfortunately, langchain does not wrap LLM exceptions which means
|
|
358
365
|
# we have to catch all exceptions here
|
|
@@ -655,3 +662,7 @@ class LLMBasedCommandGenerator(
|
|
|
655
662
|
def get_default_llm_config() -> Dict[str, Any]:
|
|
656
663
|
"""Get the default LLM config for the command generator."""
|
|
657
664
|
return DEFAULT_LLM_CONFIG
|
|
665
|
+
|
|
666
|
+
async def _get_or_create_session_id(self, sender_id: str) -> UUID:
|
|
667
|
+
async with self._lock:
|
|
668
|
+
return self.sender_id_to_session_id_mapping.setdefault(sender_id, uuid4())
|
|
@@ -55,7 +55,9 @@ class LLMCommandGenerator(SingleStepLLMCommandGenerator):
|
|
|
55
55
|
)
|
|
56
56
|
|
|
57
57
|
async def invoke_llm(
|
|
58
|
-
self,
|
|
58
|
+
self,
|
|
59
|
+
prompt: Union[List[dict], List[str], str],
|
|
60
|
+
metadata: Optional[Dict[str, Any]] = None,
|
|
59
61
|
) -> Optional[LLMResponse]:
|
|
60
62
|
try:
|
|
61
63
|
return await super().invoke_llm(prompt)
|
|
@@ -42,6 +42,9 @@ from rasa.engine.storage.resource import Resource
|
|
|
42
42
|
from rasa.engine.storage.storage import ModelStorage
|
|
43
43
|
from rasa.shared.constants import (
|
|
44
44
|
EMBEDDINGS_CONFIG_KEY,
|
|
45
|
+
LANGFUSE_CUSTOM_METADATA_DICT,
|
|
46
|
+
LANGFUSE_METADATA_SESSION_ID,
|
|
47
|
+
LANGFUSE_TAGS,
|
|
45
48
|
RASA_PATTERN_CANNOT_HANDLE_NOT_SUPPORTED,
|
|
46
49
|
ROUTE_TO_CALM_SLOT,
|
|
47
50
|
)
|
|
@@ -107,7 +110,7 @@ structlogger = structlog.get_logger()
|
|
|
107
110
|
)
|
|
108
111
|
@deprecated(
|
|
109
112
|
reason=(
|
|
110
|
-
"The MultiStepLLMCommandGenerator is
|
|
113
|
+
"The MultiStepLLMCommandGenerator is deprecated and will be removed in "
|
|
111
114
|
"Rasa `4.0.0`."
|
|
112
115
|
)
|
|
113
116
|
)
|
|
@@ -492,7 +495,20 @@ class MultiStepLLMCommandGenerator(LLMBasedCommandGenerator):
|
|
|
492
495
|
prompt=prompt,
|
|
493
496
|
)
|
|
494
497
|
|
|
495
|
-
|
|
498
|
+
if tracker:
|
|
499
|
+
session_id = str(await self._get_or_create_session_id(tracker.sender_id))
|
|
500
|
+
else:
|
|
501
|
+
session_id = "unknown"
|
|
502
|
+
metadata = {
|
|
503
|
+
LANGFUSE_METADATA_SESSION_ID: session_id,
|
|
504
|
+
LANGFUSE_CUSTOM_METADATA_DICT: {
|
|
505
|
+
"component": self.__class__.__name__,
|
|
506
|
+
"function": "_predict_commands_for_active_flow",
|
|
507
|
+
},
|
|
508
|
+
LANGFUSE_TAGS: [self.__class__.__name__],
|
|
509
|
+
}
|
|
510
|
+
|
|
511
|
+
response = await self.invoke_llm(prompt, metadata)
|
|
496
512
|
llm_response = LLMResponse.ensure_llm_response(response)
|
|
497
513
|
actions = None
|
|
498
514
|
if llm_response and llm_response.choices:
|
|
@@ -546,8 +562,20 @@ class MultiStepLLMCommandGenerator(LLMBasedCommandGenerator):
|
|
|
546
562
|
".prompt_rendered",
|
|
547
563
|
prompt=prompt,
|
|
548
564
|
)
|
|
565
|
+
if tracker:
|
|
566
|
+
session_id = str(await self._get_or_create_session_id(tracker.sender_id))
|
|
567
|
+
else:
|
|
568
|
+
session_id = "unknown"
|
|
569
|
+
metadata = {
|
|
570
|
+
LANGFUSE_METADATA_SESSION_ID: session_id,
|
|
571
|
+
LANGFUSE_CUSTOM_METADATA_DICT: {
|
|
572
|
+
"component": self.__class__.__name__,
|
|
573
|
+
"function": "_predict_commands_for_handling_flows",
|
|
574
|
+
},
|
|
575
|
+
LANGFUSE_TAGS: [self.__class__.__name__],
|
|
576
|
+
}
|
|
549
577
|
|
|
550
|
-
response = await self.invoke_llm(prompt)
|
|
578
|
+
response = await self.invoke_llm(prompt, metadata)
|
|
551
579
|
llm_response = LLMResponse.ensure_llm_response(response)
|
|
552
580
|
actions = None
|
|
553
581
|
if llm_response and llm_response.choices:
|
|
@@ -636,8 +664,20 @@ class MultiStepLLMCommandGenerator(LLMBasedCommandGenerator):
|
|
|
636
664
|
flow=newly_started_flow.id,
|
|
637
665
|
prompt=prompt,
|
|
638
666
|
)
|
|
667
|
+
if tracker:
|
|
668
|
+
session_id = str(await self._get_or_create_session_id(tracker.sender_id))
|
|
669
|
+
else:
|
|
670
|
+
session_id = "unknown"
|
|
671
|
+
metadata = {
|
|
672
|
+
LANGFUSE_METADATA_SESSION_ID: session_id,
|
|
673
|
+
LANGFUSE_CUSTOM_METADATA_DICT: {
|
|
674
|
+
"component": self.__class__.__name__,
|
|
675
|
+
"function": "_predict_commands_for_newly_started_flow",
|
|
676
|
+
},
|
|
677
|
+
LANGFUSE_TAGS: [self.__class__.__name__],
|
|
678
|
+
}
|
|
639
679
|
|
|
640
|
-
response = await self.invoke_llm(prompt)
|
|
680
|
+
response = await self.invoke_llm(prompt, metadata)
|
|
641
681
|
llm_response = LLMResponse.ensure_llm_response(response)
|
|
642
682
|
actions = None
|
|
643
683
|
if llm_response and llm_response.choices:
|
|
@@ -47,6 +47,9 @@ from rasa.shared.constants import (
|
|
|
47
47
|
AWS_BEDROCK_PROVIDER,
|
|
48
48
|
AZURE_OPENAI_PROVIDER,
|
|
49
49
|
EMBEDDINGS_CONFIG_KEY,
|
|
50
|
+
LANGFUSE_CUSTOM_METADATA_DICT,
|
|
51
|
+
LANGFUSE_METADATA_SESSION_ID,
|
|
52
|
+
LANGFUSE_TAGS,
|
|
50
53
|
MAX_TOKENS_CONFIG_KEY,
|
|
51
54
|
PROMPT_TEMPLATE_CONFIG_KEY,
|
|
52
55
|
ROUTE_TO_CALM_SLOT,
|
|
@@ -366,7 +369,17 @@ class CompactLLMCommandGenerator(LLMBasedCommandGenerator):
|
|
|
366
369
|
prompt=flow_prompt,
|
|
367
370
|
)
|
|
368
371
|
|
|
369
|
-
|
|
372
|
+
if tracker:
|
|
373
|
+
session_id = str(await self._get_or_create_session_id(tracker.sender_id))
|
|
374
|
+
else:
|
|
375
|
+
session_id = "unknown"
|
|
376
|
+
metadata = {
|
|
377
|
+
LANGFUSE_METADATA_SESSION_ID: session_id,
|
|
378
|
+
LANGFUSE_CUSTOM_METADATA_DICT: {"component": self.__class__.__name__},
|
|
379
|
+
LANGFUSE_TAGS: [self.__class__.__name__],
|
|
380
|
+
}
|
|
381
|
+
|
|
382
|
+
response = await self.invoke_llm(flow_prompt, metadata)
|
|
370
383
|
llm_response = LLMResponse.ensure_llm_response(response)
|
|
371
384
|
# The check for 'None' maintains compatibility with older versions
|
|
372
385
|
# of LLMCommandGenerator. In previous implementations, 'invoke_llm'
|
rasa/hooks.py
CHANGED
|
@@ -1,8 +1,20 @@
|
|
|
1
1
|
import argparse
|
|
2
2
|
import logging
|
|
3
|
+
import os
|
|
3
4
|
from typing import TYPE_CHECKING, List, Optional, Text, Union
|
|
4
5
|
|
|
6
|
+
import litellm
|
|
5
7
|
import pluggy
|
|
8
|
+
import structlog
|
|
9
|
+
|
|
10
|
+
from rasa.shared.providers.constants import (
|
|
11
|
+
LANGFUSE_CALLBACK_NAME,
|
|
12
|
+
LANGFUSE_HOST_ENV_VAR,
|
|
13
|
+
LANGFUSE_PROJECT_ID_ENV_VAR,
|
|
14
|
+
LANGFUSE_PUBLIC_KEY_ENV_VAR,
|
|
15
|
+
LANGFUSE_SECRET_KEY_ENV_VAR,
|
|
16
|
+
RASA_LANGFUSE_INTEGRATION_ENABLED_ENV_VAR,
|
|
17
|
+
)
|
|
6
18
|
|
|
7
19
|
# IMPORTANT: do not import anything from rasa here - use scoped imports
|
|
8
20
|
# this avoids circular imports, as the hooks are used in different places
|
|
@@ -18,6 +30,7 @@ if TYPE_CHECKING:
|
|
|
18
30
|
|
|
19
31
|
hookimpl = pluggy.HookimplMarker("rasa")
|
|
20
32
|
logger = logging.getLogger(__name__)
|
|
33
|
+
structlogger = structlog.get_logger()
|
|
21
34
|
|
|
22
35
|
|
|
23
36
|
@hookimpl # type: ignore[misc]
|
|
@@ -57,6 +70,8 @@ def configure_commandline(cmdline_arguments: argparse.Namespace) -> Optional[Tex
|
|
|
57
70
|
config.configure_tracing(tracer_provider)
|
|
58
71
|
config.configure_metrics(endpoints_file)
|
|
59
72
|
|
|
73
|
+
_init_langfuse_integration()
|
|
74
|
+
|
|
60
75
|
return endpoints_file
|
|
61
76
|
|
|
62
77
|
|
|
@@ -115,3 +130,43 @@ def after_server_stop() -> None:
|
|
|
115
130
|
|
|
116
131
|
if anon_pipeline is not None:
|
|
117
132
|
anon_pipeline.stop()
|
|
133
|
+
|
|
134
|
+
|
|
135
|
+
def _is_langfuse_integration_enabled() -> bool:
|
|
136
|
+
return (
|
|
137
|
+
os.environ.get(RASA_LANGFUSE_INTEGRATION_ENABLED_ENV_VAR, "false").lower()
|
|
138
|
+
== "true"
|
|
139
|
+
)
|
|
140
|
+
|
|
141
|
+
|
|
142
|
+
def _init_langfuse_integration() -> None:
|
|
143
|
+
if not _is_langfuse_integration_enabled():
|
|
144
|
+
structlogger.info(
|
|
145
|
+
"hooks._init_langfuse_integration.disabled",
|
|
146
|
+
event_info="Langfuse integration is disabled.",
|
|
147
|
+
)
|
|
148
|
+
return
|
|
149
|
+
|
|
150
|
+
if (
|
|
151
|
+
not os.environ.get(LANGFUSE_HOST_ENV_VAR)
|
|
152
|
+
or not os.environ.get(LANGFUSE_PROJECT_ID_ENV_VAR)
|
|
153
|
+
or not os.environ.get(LANGFUSE_PUBLIC_KEY_ENV_VAR)
|
|
154
|
+
or not os.environ.get(LANGFUSE_SECRET_KEY_ENV_VAR)
|
|
155
|
+
):
|
|
156
|
+
structlogger.warning(
|
|
157
|
+
"hooks._init_langfuse_integration.missing_langfuse_keys",
|
|
158
|
+
event_info=(
|
|
159
|
+
"Langfuse integration is enabled, but some environment variables "
|
|
160
|
+
"are missing. Please set LANGFUSE_HOST, LANGFUSE_PROJECT_ID, "
|
|
161
|
+
"LANGFUSE_PUBLIC_KEY and LANGFUSE_SECRET_KEY environment "
|
|
162
|
+
"variables to use Langfuse integration."
|
|
163
|
+
),
|
|
164
|
+
)
|
|
165
|
+
return
|
|
166
|
+
|
|
167
|
+
litellm.success_callback = [LANGFUSE_CALLBACK_NAME]
|
|
168
|
+
litellm.failure_callback = [LANGFUSE_CALLBACK_NAME]
|
|
169
|
+
structlogger.info(
|
|
170
|
+
"hooks.langfuse_callbacks_initialized",
|
|
171
|
+
event_info="Langfuse integration initialized.",
|
|
172
|
+
)
|
rasa/monkey_patches.py
ADDED
|
@@ -0,0 +1,91 @@
|
|
|
1
|
+
import os
|
|
2
|
+
import traceback
|
|
3
|
+
from typing import Any, Optional
|
|
4
|
+
|
|
5
|
+
from litellm.secret_managers.main import str_to_bool
|
|
6
|
+
from packaging.version import Version
|
|
7
|
+
|
|
8
|
+
|
|
9
|
+
def litellm_langfuse_logger_init_fixed(
|
|
10
|
+
self: Any, # we should not import LangfuseLogger class before we patch it
|
|
11
|
+
langfuse_public_key: Optional[str] = None,
|
|
12
|
+
langfuse_secret: Optional[str] = None,
|
|
13
|
+
langfuse_host: str = "https://cloud.langfuse.com",
|
|
14
|
+
flush_interval: int = 1,
|
|
15
|
+
) -> None:
|
|
16
|
+
"""Monkeypatched version of LangfuseLogger.__init__ from the LiteLLM library.
|
|
17
|
+
|
|
18
|
+
This patched version removes a call that fetched the `project_id` from
|
|
19
|
+
Langfuse Cloud even when it was already set via environment variables.
|
|
20
|
+
In the original implementation, this call was made *before* initializing
|
|
21
|
+
the LangfuseClient, which caused the application to freeze for up to 60 seconds.
|
|
22
|
+
|
|
23
|
+
By removing this premature call, the monkeypatch avoids the unnecessary network
|
|
24
|
+
request and prevents the timeout/freeze issue.
|
|
25
|
+
|
|
26
|
+
This workaround can be removed once the underlying bug is resolved in LiteLLM:
|
|
27
|
+
https://github.com/BerriAI/litellm/issues/7732
|
|
28
|
+
"""
|
|
29
|
+
try:
|
|
30
|
+
import langfuse
|
|
31
|
+
from langfuse import Langfuse
|
|
32
|
+
except Exception as e:
|
|
33
|
+
raise Exception(
|
|
34
|
+
f"\033[91mLangfuse not installed, try running 'pip install langfuse' "
|
|
35
|
+
f"to fix this error: {e}\n{traceback.format_exc()}\033[0m"
|
|
36
|
+
)
|
|
37
|
+
# Instance variables
|
|
38
|
+
self.secret_key = langfuse_secret or os.getenv("LANGFUSE_SECRET_KEY", "")
|
|
39
|
+
self.public_key = langfuse_public_key or os.getenv("LANGFUSE_PUBLIC_KEY", "")
|
|
40
|
+
|
|
41
|
+
self.langfuse_host = langfuse_host or os.getenv(
|
|
42
|
+
"LANGFUSE_HOST", "https://cloud.langfuse.com"
|
|
43
|
+
)
|
|
44
|
+
self.langfuse_host.replace("http://", "https://")
|
|
45
|
+
if not self.langfuse_host.startswith("https://"):
|
|
46
|
+
self.langfuse_host = "https://" + self.langfuse_host
|
|
47
|
+
|
|
48
|
+
self.langfuse_release = os.getenv("LANGFUSE_RELEASE")
|
|
49
|
+
self.langfuse_debug = os.getenv("LANGFUSE_DEBUG")
|
|
50
|
+
self.langfuse_flush_interval = (
|
|
51
|
+
os.getenv("LANGFUSE_FLUSH_INTERVAL") or flush_interval
|
|
52
|
+
)
|
|
53
|
+
|
|
54
|
+
parameters = {
|
|
55
|
+
"public_key": self.public_key,
|
|
56
|
+
"secret_key": self.secret_key,
|
|
57
|
+
"host": self.langfuse_host,
|
|
58
|
+
"release": self.langfuse_release,
|
|
59
|
+
"debug": self.langfuse_debug,
|
|
60
|
+
"flush_interval": self.langfuse_flush_interval, # flush interval in seconds
|
|
61
|
+
}
|
|
62
|
+
|
|
63
|
+
if Version(langfuse.version.__version__) >= Version("2.6.0"):
|
|
64
|
+
parameters["sdk_integration"] = "litellm"
|
|
65
|
+
|
|
66
|
+
self.Langfuse = Langfuse(**parameters)
|
|
67
|
+
|
|
68
|
+
if os.getenv("UPSTREAM_LANGFUSE_SECRET_KEY") is not None:
|
|
69
|
+
upstream_langfuse_debug = (
|
|
70
|
+
str_to_bool(self.upstream_langfuse_debug)
|
|
71
|
+
if self.upstream_langfuse_debug is not None
|
|
72
|
+
else None
|
|
73
|
+
)
|
|
74
|
+
self.upstream_langfuse_secret_key = os.getenv("UPSTREAM_LANGFUSE_SECRET_KEY")
|
|
75
|
+
self.upstream_langfuse_public_key = os.getenv("UPSTREAM_LANGFUSE_PUBLIC_KEY")
|
|
76
|
+
self.upstream_langfuse_host = os.getenv("UPSTREAM_LANGFUSE_HOST")
|
|
77
|
+
self.upstream_langfuse_release = os.getenv("UPSTREAM_LANGFUSE_RELEASE")
|
|
78
|
+
self.upstream_langfuse_debug = os.getenv("UPSTREAM_LANGFUSE_DEBUG")
|
|
79
|
+
self.upstream_langfuse = Langfuse(
|
|
80
|
+
public_key=self.upstream_langfuse_public_key,
|
|
81
|
+
secret_key=self.upstream_langfuse_secret_key,
|
|
82
|
+
host=self.upstream_langfuse_host,
|
|
83
|
+
release=self.upstream_langfuse_release,
|
|
84
|
+
debug=(
|
|
85
|
+
upstream_langfuse_debug
|
|
86
|
+
if upstream_langfuse_debug is not None
|
|
87
|
+
else False
|
|
88
|
+
),
|
|
89
|
+
)
|
|
90
|
+
else:
|
|
91
|
+
self.upstream_langfuse = None
|
rasa/shared/constants.py
CHANGED
|
@@ -339,3 +339,8 @@ ROLE_SYSTEM = "system"
|
|
|
339
339
|
# Used for key values in ValidateSlotPatternFlowStackFrame
|
|
340
340
|
REFILL_UTTER = "refill_utter"
|
|
341
341
|
REJECTIONS = "rejections"
|
|
342
|
+
|
|
343
|
+
LANGFUSE_METADATA_USER_ID = "trace_user_id"
|
|
344
|
+
LANGFUSE_METADATA_SESSION_ID = "session_id"
|
|
345
|
+
LANGFUSE_CUSTOM_METADATA_DICT = "trace_metadata"
|
|
346
|
+
LANGFUSE_TAGS = "tags"
|
rasa/shared/core/flows/flow.py
CHANGED
|
@@ -38,6 +38,7 @@ from rasa.shared.core.flows.flow_step_sequence import FlowStepSequence
|
|
|
38
38
|
from rasa.shared.core.flows.nlu_trigger import NLUTriggers
|
|
39
39
|
from rasa.shared.core.flows.steps import (
|
|
40
40
|
ActionFlowStep,
|
|
41
|
+
CallFlowStep,
|
|
41
42
|
CollectInformationFlowStep,
|
|
42
43
|
EndFlowStep,
|
|
43
44
|
StartFlowStep,
|
|
@@ -466,161 +467,156 @@ class Flow:
|
|
|
466
467
|
and a set of visited step IDs to prevent revisiting steps.
|
|
467
468
|
It calls `go_over_steps` to recursively explore and fill the paths list.
|
|
468
469
|
"""
|
|
469
|
-
|
|
470
|
-
|
|
470
|
+
all_paths = FlowPathsList(self.id, paths=[])
|
|
471
|
+
start_step: FlowStep = self.first_step_in_flow()
|
|
471
472
|
current_path: FlowPath = FlowPath(flow=self.id, nodes=[])
|
|
472
|
-
|
|
473
|
+
visited_step_ids: Set[str] = set()
|
|
473
474
|
|
|
474
|
-
self._go_over_steps(
|
|
475
|
-
|
|
476
|
-
if not flow_paths_list.is_path_part_of_list(current_path):
|
|
477
|
-
flow_paths_list.paths.append(copy.deepcopy(current_path))
|
|
475
|
+
self._go_over_steps(start_step, current_path, all_paths, visited_step_ids)
|
|
478
476
|
|
|
479
477
|
structlogger.debug(
|
|
480
478
|
"shared.core.flows.flow.extract_all_paths",
|
|
481
479
|
comment="Extraction complete",
|
|
482
|
-
number_of_paths=len(
|
|
480
|
+
number_of_paths=len(all_paths.paths),
|
|
483
481
|
flow_name=self.name,
|
|
484
482
|
)
|
|
485
|
-
return
|
|
483
|
+
return all_paths
|
|
486
484
|
|
|
487
485
|
def _go_over_steps(
|
|
488
486
|
self,
|
|
489
|
-
|
|
487
|
+
current_step: FlowStep,
|
|
490
488
|
current_path: FlowPath,
|
|
491
|
-
|
|
492
|
-
|
|
489
|
+
all_paths: FlowPathsList,
|
|
490
|
+
visited_step_ids: Set[str],
|
|
493
491
|
) -> None:
|
|
494
492
|
"""Processes the flow steps recursively.
|
|
495
493
|
|
|
496
|
-
Either following direct step IDs or handling conditions, and adds complete
|
|
497
|
-
paths to the collected_paths.
|
|
498
|
-
|
|
499
494
|
Args:
|
|
500
|
-
|
|
495
|
+
current_step: The current step being processed.
|
|
501
496
|
current_path: The current path being constructed.
|
|
502
|
-
|
|
503
|
-
|
|
497
|
+
all_paths: The list where completed paths are added.
|
|
498
|
+
visited_step_ids: A set of steps that have been visited to avoid cycles.
|
|
504
499
|
|
|
505
500
|
Returns:
|
|
506
|
-
None: This function modifies
|
|
501
|
+
None: This function modifies all_paths in place by appending new paths
|
|
507
502
|
as they are found.
|
|
508
503
|
"""
|
|
509
|
-
#
|
|
510
|
-
#
|
|
511
|
-
#
|
|
512
|
-
|
|
513
|
-
|
|
514
|
-
|
|
515
|
-
|
|
516
|
-
|
|
517
|
-
|
|
518
|
-
|
|
519
|
-
|
|
520
|
-
|
|
521
|
-
|
|
522
|
-
|
|
523
|
-
|
|
524
|
-
|
|
525
|
-
# We only create new path nodes for ActionFlowStep and
|
|
526
|
-
# CollectInformationFlowStep because these are externally visible
|
|
527
|
-
# changes in the assistant's behaviour (trackable in the e2e tests).
|
|
528
|
-
# For other flow steps, we only follow their links.
|
|
529
|
-
# We decided to ignore calls to other flows in our coverage analysis.
|
|
530
|
-
if not isinstance(step, (CollectInformationFlowStep, ActionFlowStep)):
|
|
531
|
-
self._handle_links(
|
|
532
|
-
step.next.links,
|
|
533
|
-
current_path,
|
|
534
|
-
completed_paths,
|
|
535
|
-
step_ids_visited,
|
|
536
|
-
)
|
|
537
|
-
continue
|
|
538
|
-
|
|
539
|
-
# 2. Check if already visited this custom step id
|
|
540
|
-
# in order to keep track of loops
|
|
541
|
-
if step.custom_id is not None and step.custom_id in step_ids_visited:
|
|
542
|
-
if not completed_paths.is_path_part_of_list(current_path):
|
|
543
|
-
completed_paths.paths.append(copy.deepcopy(current_path))
|
|
544
|
-
return # Stop traversing this path if we've revisited a step
|
|
545
|
-
elif step.custom_id is not None:
|
|
546
|
-
step_ids_visited.add(step.custom_id)
|
|
547
|
-
|
|
548
|
-
# 3. Append step info to the path
|
|
549
|
-
current_path.nodes.append(
|
|
550
|
-
PathNode(
|
|
551
|
-
flow=current_path.flow,
|
|
552
|
-
step_id=step.id,
|
|
553
|
-
lines=step.metadata["line_numbers"],
|
|
554
|
-
)
|
|
504
|
+
# Check if the step is relevant for testable_paths extraction.
|
|
505
|
+
# We only create new path nodes for ActionFlowStep, CallFlowStep and
|
|
506
|
+
# CollectInformationFlowStep because these are externally visible
|
|
507
|
+
# changes in the assistant's behaviour (trackable in the e2e tests).
|
|
508
|
+
# For other flow steps, we only follow their links.
|
|
509
|
+
# We decided to ignore calls to other flows in our coverage analysis.
|
|
510
|
+
should_add_node = isinstance(
|
|
511
|
+
current_step, (CollectInformationFlowStep, ActionFlowStep, CallFlowStep)
|
|
512
|
+
)
|
|
513
|
+
if should_add_node:
|
|
514
|
+
# Add current step to the current path that is being constructed.
|
|
515
|
+
current_path.nodes.append(
|
|
516
|
+
PathNode(
|
|
517
|
+
flow=current_path.flow,
|
|
518
|
+
step_id=current_step.id,
|
|
519
|
+
lines=current_step.metadata["line_numbers"],
|
|
555
520
|
)
|
|
521
|
+
)
|
|
556
522
|
|
|
557
|
-
|
|
558
|
-
|
|
559
|
-
|
|
560
|
-
|
|
561
|
-
|
|
562
|
-
)
|
|
563
|
-
|
|
564
|
-
|
|
565
|
-
|
|
566
|
-
|
|
567
|
-
|
|
568
|
-
|
|
569
|
-
|
|
570
|
-
|
|
571
|
-
|
|
572
|
-
|
|
523
|
+
if current_step.id in visited_step_ids or self.is_end_of_path(current_step):
|
|
524
|
+
# Found a cycle, or reached an end step, do not proceed further.
|
|
525
|
+
all_paths.paths.append(copy.deepcopy(current_path))
|
|
526
|
+
# Remove the last node from the path if it was added.
|
|
527
|
+
if should_add_node:
|
|
528
|
+
current_path.nodes.pop()
|
|
529
|
+
return
|
|
530
|
+
|
|
531
|
+
# Mark current step as visited in this path.
|
|
532
|
+
visited_step_ids.add(current_step.id)
|
|
533
|
+
|
|
534
|
+
# Iterate over all links of the current step.
|
|
535
|
+
for link in current_step.next.links:
|
|
536
|
+
self._handle_link(
|
|
537
|
+
current_path,
|
|
538
|
+
all_paths,
|
|
539
|
+
visited_step_ids,
|
|
540
|
+
link,
|
|
541
|
+
)
|
|
542
|
+
|
|
543
|
+
# Backtrack the current step and remove it from the path.
|
|
544
|
+
visited_step_ids.remove(current_step.id)
|
|
573
545
|
|
|
574
|
-
|
|
546
|
+
# Remove the last node from the path if it was added.
|
|
547
|
+
if should_add_node:
|
|
548
|
+
current_path.nodes.pop()
|
|
549
|
+
|
|
550
|
+
def _handle_link(
|
|
575
551
|
self,
|
|
576
|
-
|
|
577
|
-
|
|
578
|
-
|
|
579
|
-
|
|
552
|
+
current_path: FlowPath,
|
|
553
|
+
all_paths: FlowPathsList,
|
|
554
|
+
visited_step_ids: Set[str],
|
|
555
|
+
link: FlowStepLink,
|
|
580
556
|
) -> None:
|
|
581
|
-
"""
|
|
582
|
-
|
|
583
|
-
Potentially recursively calling itself to handle conditional paths and
|
|
584
|
-
branching.
|
|
557
|
+
"""Handles the next step in a flow.
|
|
585
558
|
|
|
586
559
|
Args:
|
|
587
|
-
|
|
588
|
-
|
|
589
|
-
|
|
590
|
-
|
|
591
|
-
to avoid loops.
|
|
560
|
+
current_path: The current path being constructed.
|
|
561
|
+
all_paths: The list where completed paths are added.
|
|
562
|
+
visited_step_ids: A set of steps that have been visited to avoid cycles.
|
|
563
|
+
link: The link to be followed.
|
|
592
564
|
|
|
593
565
|
Returns:
|
|
594
|
-
None:
|
|
595
|
-
as they are
|
|
566
|
+
None: This function modifies all_paths in place by appending new paths
|
|
567
|
+
as they are found.
|
|
596
568
|
"""
|
|
597
|
-
|
|
598
|
-
|
|
599
|
-
|
|
600
|
-
|
|
601
|
-
if isinstance(link, StaticFlowStepLink):
|
|
602
|
-
# Find this id in the flow steps and restart from there
|
|
603
|
-
for i, step in enumerate(steps):
|
|
604
|
-
if step.id == link.target_step_id:
|
|
605
|
-
self._go_over_steps(
|
|
606
|
-
steps[i:],
|
|
607
|
-
copy.deepcopy(path),
|
|
608
|
-
collected_paths,
|
|
609
|
-
copy.deepcopy(step_ids_visited),
|
|
610
|
-
)
|
|
611
|
-
|
|
612
|
-
# If conditions
|
|
613
|
-
elif isinstance(link, (IfFlowStepLink, ElseFlowStepLink)):
|
|
614
|
-
# Handling conditional paths
|
|
615
|
-
target_steps: Union[str, List[FlowStep]]
|
|
616
|
-
if isinstance(link.target_reference, FlowStepSequence):
|
|
617
|
-
target_steps = link.target_reference.child_steps
|
|
618
|
-
else:
|
|
619
|
-
target_steps = link.target_reference
|
|
620
|
-
|
|
569
|
+
# StaticFlowStepLink is a direct link to the next step.
|
|
570
|
+
if isinstance(link, StaticFlowStepLink):
|
|
571
|
+
# Find the step by its id and continue the path.
|
|
572
|
+
if step := self._get_step_by_step_id(link.target_step_id):
|
|
621
573
|
self._go_over_steps(
|
|
622
|
-
|
|
623
|
-
|
|
624
|
-
|
|
625
|
-
|
|
574
|
+
step,
|
|
575
|
+
current_path,
|
|
576
|
+
all_paths,
|
|
577
|
+
visited_step_ids,
|
|
626
578
|
)
|
|
579
|
+
return
|
|
580
|
+
# IfFlowStepLink and ElseFlowStepLink are conditional links.
|
|
581
|
+
elif isinstance(link, (IfFlowStepLink, ElseFlowStepLink)):
|
|
582
|
+
if isinstance(link.target_reference, FlowStepSequence):
|
|
583
|
+
# If the target is a FlowStepSequence, we need to go over all
|
|
584
|
+
# child steps of the sequence.
|
|
585
|
+
for child_step in link.target_reference.child_steps:
|
|
586
|
+
self._go_over_steps(
|
|
587
|
+
child_step,
|
|
588
|
+
current_path,
|
|
589
|
+
all_paths,
|
|
590
|
+
visited_step_ids,
|
|
591
|
+
)
|
|
592
|
+
return
|
|
593
|
+
else:
|
|
594
|
+
# Find the step by its id and continue the path.
|
|
595
|
+
if step := self._get_step_by_step_id(link.target_reference):
|
|
596
|
+
self._go_over_steps(
|
|
597
|
+
step,
|
|
598
|
+
current_path,
|
|
599
|
+
all_paths,
|
|
600
|
+
visited_step_ids,
|
|
601
|
+
)
|
|
602
|
+
return
|
|
603
|
+
|
|
604
|
+
def is_end_of_path(self, step: FlowStep) -> bool:
|
|
605
|
+
"""Check if there is no path available from the current step."""
|
|
606
|
+
if (
|
|
607
|
+
len(step.next.links) == 1
|
|
608
|
+
and isinstance(step.next.links[0], StaticFlowStepLink)
|
|
609
|
+
and step.next.links[0].target == END_STEP
|
|
610
|
+
):
|
|
611
|
+
return True
|
|
612
|
+
return False
|
|
613
|
+
|
|
614
|
+
def _get_step_by_step_id(
|
|
615
|
+
self,
|
|
616
|
+
step_id: Optional[str],
|
|
617
|
+
) -> Optional[FlowStep]:
|
|
618
|
+
"""Get a step by its id from a list of steps."""
|
|
619
|
+
for step in self.steps:
|
|
620
|
+
if step.id == step_id:
|
|
621
|
+
return step
|
|
622
|
+
return None
|
|
@@ -4,3 +4,12 @@ LITE_LLM_API_KEY_FIELD = "api_key"
|
|
|
4
4
|
LITE_LLM_API_VERSION_FIELD = "api_version"
|
|
5
5
|
LITE_LLM_MODEL_FIELD = "model"
|
|
6
6
|
LITE_LLM_AZURE_AD_TOKEN = "azure_ad_token"
|
|
7
|
+
|
|
8
|
+
# Enable or disable Langfuse integration
|
|
9
|
+
RASA_LANGFUSE_INTEGRATION_ENABLED_ENV_VAR = "RASA_LANGFUSE_INTEGRATION_ENABLED"
|
|
10
|
+
# Langfuse configuration
|
|
11
|
+
LANGFUSE_CALLBACK_NAME = "langfuse"
|
|
12
|
+
LANGFUSE_HOST_ENV_VAR = "LANGFUSE_HOST"
|
|
13
|
+
LANGFUSE_PROJECT_ID_ENV_VAR = "LANGFUSE_PROJECT_ID"
|
|
14
|
+
LANGFUSE_PUBLIC_KEY_ENV_VAR = "LANGFUSE_PUBLIC_KEY"
|
|
15
|
+
LANGFUSE_SECRET_KEY_ENV_VAR = "LANGFUSE_SECRET_KEY"
|
|
@@ -2,7 +2,7 @@ from __future__ import annotations
|
|
|
2
2
|
|
|
3
3
|
import logging
|
|
4
4
|
from abc import abstractmethod
|
|
5
|
-
from typing import Any, Dict, List, Union, cast
|
|
5
|
+
from typing import Any, Dict, List, Optional, Union, cast
|
|
6
6
|
|
|
7
7
|
import structlog
|
|
8
8
|
from litellm import acompletion, completion, validate_environment
|
|
@@ -123,7 +123,11 @@ class _BaseLiteLLMClient:
|
|
|
123
123
|
raise ProviderClientValidationError(event_info)
|
|
124
124
|
|
|
125
125
|
@suppress_logs(log_level=logging.WARNING)
|
|
126
|
-
def completion(
|
|
126
|
+
def completion(
|
|
127
|
+
self,
|
|
128
|
+
messages: Union[List[dict], List[str], str],
|
|
129
|
+
metadata: Optional[Dict[str, Any]] = None,
|
|
130
|
+
) -> LLMResponse:
|
|
127
131
|
"""Synchronously generate completions for given list of messages.
|
|
128
132
|
|
|
129
133
|
Args:
|
|
@@ -135,6 +139,7 @@ class _BaseLiteLLMClient:
|
|
|
135
139
|
- a list of messages. Each message is a string and will be formatted
|
|
136
140
|
as a user message.
|
|
137
141
|
- a single message as a string which will be formatted as user message.
|
|
142
|
+
metadata: Optional metadata to be passed to the LLM call.
|
|
138
143
|
|
|
139
144
|
Returns:
|
|
140
145
|
List of message completions.
|
|
@@ -152,7 +157,9 @@ class _BaseLiteLLMClient:
|
|
|
152
157
|
|
|
153
158
|
@suppress_logs(log_level=logging.WARNING)
|
|
154
159
|
async def acompletion(
|
|
155
|
-
self,
|
|
160
|
+
self,
|
|
161
|
+
messages: Union[List[dict], List[str], str],
|
|
162
|
+
metadata: Optional[Dict[str, Any]] = None,
|
|
156
163
|
) -> LLMResponse:
|
|
157
164
|
"""Asynchronously generate completions for given list of messages.
|
|
158
165
|
|
|
@@ -165,6 +172,7 @@ class _BaseLiteLLMClient:
|
|
|
165
172
|
- a list of messages. Each message is a string and will be formatted
|
|
166
173
|
as a user message.
|
|
167
174
|
- a single message as a string which will be formatted as user message.
|
|
175
|
+
metadata: Optional metadata to be passed to the LLM call.
|
|
168
176
|
|
|
169
177
|
Returns:
|
|
170
178
|
List of message completions.
|
|
@@ -175,7 +183,9 @@ class _BaseLiteLLMClient:
|
|
|
175
183
|
try:
|
|
176
184
|
formatted_messages = self._get_formatted_messages(messages)
|
|
177
185
|
arguments = resolve_environment_variables(self._completion_fn_args)
|
|
178
|
-
response = await acompletion(
|
|
186
|
+
response = await acompletion(
|
|
187
|
+
messages=formatted_messages, metadata=metadata, **arguments
|
|
188
|
+
)
|
|
179
189
|
return self._format_response(response)
|
|
180
190
|
except Exception as e:
|
|
181
191
|
message = ""
|
|
@@ -1,7 +1,7 @@
|
|
|
1
1
|
from __future__ import annotations
|
|
2
2
|
|
|
3
3
|
import logging
|
|
4
|
-
from typing import Any, Dict, List, Union
|
|
4
|
+
from typing import Any, Dict, List, Optional, Union
|
|
5
5
|
|
|
6
6
|
import structlog
|
|
7
7
|
|
|
@@ -122,9 +122,12 @@ class LiteLLMRouterLLMClient(_BaseLiteLLMRouterClient, _BaseLiteLLMClient):
|
|
|
122
122
|
raise ProviderClientAPIException(e)
|
|
123
123
|
|
|
124
124
|
@suppress_logs(log_level=logging.WARNING)
|
|
125
|
-
def completion(
|
|
126
|
-
|
|
127
|
-
|
|
125
|
+
def completion(
|
|
126
|
+
self,
|
|
127
|
+
messages: Union[List[dict], List[str], str],
|
|
128
|
+
metadata: Optional[Dict[str, Any]] = None,
|
|
129
|
+
) -> LLMResponse:
|
|
130
|
+
"""Synchronously generate completions for given list of messages.
|
|
128
131
|
|
|
129
132
|
Method overrides the base class method to call the appropriate
|
|
130
133
|
completion method based on the configuration. If the chat completions
|
|
@@ -140,8 +143,11 @@ class LiteLLMRouterLLMClient(_BaseLiteLLMRouterClient, _BaseLiteLLMClient):
|
|
|
140
143
|
- a list of messages. Each message is a string and will be formatted
|
|
141
144
|
as a user message.
|
|
142
145
|
- a single message as a string which will be formatted as user message.
|
|
146
|
+
metadata: Optional metadata to be passed to the LLM call.
|
|
147
|
+
|
|
143
148
|
Returns:
|
|
144
149
|
List of message completions.
|
|
150
|
+
|
|
145
151
|
Raises:
|
|
146
152
|
ProviderClientAPIException: If the API request fails.
|
|
147
153
|
"""
|
|
@@ -158,10 +164,11 @@ class LiteLLMRouterLLMClient(_BaseLiteLLMRouterClient, _BaseLiteLLMClient):
|
|
|
158
164
|
|
|
159
165
|
@suppress_logs(log_level=logging.WARNING)
|
|
160
166
|
async def acompletion(
|
|
161
|
-
self,
|
|
167
|
+
self,
|
|
168
|
+
messages: Union[List[dict], List[str], str],
|
|
169
|
+
metadata: Optional[Dict[str, Any]] = None,
|
|
162
170
|
) -> LLMResponse:
|
|
163
|
-
"""
|
|
164
|
-
Asynchronously generate completions for given list of messages.
|
|
171
|
+
"""Asynchronously generate completions for given list of messages.
|
|
165
172
|
|
|
166
173
|
Method overrides the base class method to call the appropriate
|
|
167
174
|
completion method based on the configuration. If the chat completions
|
|
@@ -177,8 +184,11 @@ class LiteLLMRouterLLMClient(_BaseLiteLLMRouterClient, _BaseLiteLLMClient):
|
|
|
177
184
|
- a list of messages. Each message is a string and will be formatted
|
|
178
185
|
as a user message.
|
|
179
186
|
- a single message as a string which will be formatted as user message.
|
|
187
|
+
metadata: Optional metadata to be passed to the LLM call.
|
|
188
|
+
|
|
180
189
|
Returns:
|
|
181
190
|
List of message completions.
|
|
191
|
+
|
|
182
192
|
Raises:
|
|
183
193
|
ProviderClientAPIException: If the API request fails.
|
|
184
194
|
"""
|
|
@@ -1,21 +1,19 @@
|
|
|
1
1
|
from __future__ import annotations
|
|
2
2
|
|
|
3
|
-
from typing import Dict, List, Protocol, Union, runtime_checkable
|
|
3
|
+
from typing import Any, Dict, List, Optional, Protocol, Union, runtime_checkable
|
|
4
4
|
|
|
5
5
|
from rasa.shared.providers.llm.llm_response import LLMResponse
|
|
6
6
|
|
|
7
7
|
|
|
8
8
|
@runtime_checkable
|
|
9
9
|
class LLMClient(Protocol):
|
|
10
|
-
"""
|
|
11
|
-
Protocol for an LLM client that specifies the interface for interacting
|
|
10
|
+
"""Protocol for an LLM client that specifies the interface for interacting
|
|
12
11
|
with the API.
|
|
13
12
|
"""
|
|
14
13
|
|
|
15
14
|
@classmethod
|
|
16
15
|
def from_config(cls, config: dict) -> LLMClient:
|
|
17
|
-
"""
|
|
18
|
-
Initializes the llm client with the given configuration.
|
|
16
|
+
"""Initializes the llm client with the given configuration.
|
|
19
17
|
|
|
20
18
|
This class method should be implemented to parse the given
|
|
21
19
|
configuration and create an instance of an llm client.
|
|
@@ -24,17 +22,24 @@ class LLMClient(Protocol):
|
|
|
24
22
|
|
|
25
23
|
@property
|
|
26
24
|
def config(self) -> Dict:
|
|
27
|
-
"""
|
|
28
|
-
Returns the configuration for that the llm client is initialized with.
|
|
25
|
+
"""Returns the configuration for that the llm client is initialized with.
|
|
29
26
|
|
|
30
27
|
This property should be implemented to return a dictionary containing
|
|
31
28
|
the configuration settings for the llm client.
|
|
32
29
|
"""
|
|
33
30
|
...
|
|
34
31
|
|
|
35
|
-
def completion(
|
|
36
|
-
|
|
37
|
-
|
|
32
|
+
def completion(
|
|
33
|
+
self,
|
|
34
|
+
messages: Union[List[dict], List[str], str],
|
|
35
|
+
metadata: Optional[Dict[str, Any]] = None,
|
|
36
|
+
) -> LLMResponse:
|
|
37
|
+
"""Synchronously generate completions for given list of messages.
|
|
38
|
+
def completion(
|
|
39
|
+
self,
|
|
40
|
+
messages: Union[List[dict], List[str], str],
|
|
41
|
+
metadata: Optional[Dict[str, Any]] = None,
|
|
42
|
+
) -> LLMResponse:
|
|
38
43
|
|
|
39
44
|
This method should be implemented to take a list of messages (as
|
|
40
45
|
strings) and return a list of completions (as strings).
|
|
@@ -48,16 +53,19 @@ class LLMClient(Protocol):
|
|
|
48
53
|
- a list of messages. Each message is a string and will be formatted
|
|
49
54
|
as a user message.
|
|
50
55
|
- a single message as a string which will be formatted as user message.
|
|
56
|
+
metadata: Optional metadata to be passed to the LLM call.
|
|
57
|
+
|
|
51
58
|
Returns:
|
|
52
59
|
LLMResponse
|
|
53
60
|
"""
|
|
54
61
|
...
|
|
55
62
|
|
|
56
63
|
async def acompletion(
|
|
57
|
-
self,
|
|
64
|
+
self,
|
|
65
|
+
messages: Union[List[dict], List[str], str],
|
|
66
|
+
metadata: Optional[Dict[str, Any]] = None,
|
|
58
67
|
) -> LLMResponse:
|
|
59
|
-
"""
|
|
60
|
-
Asynchronously generate completions for given list of messages.
|
|
68
|
+
"""Asynchronously generate completions for given list of messages.
|
|
61
69
|
|
|
62
70
|
This method should be implemented to take a list of messages (as
|
|
63
71
|
strings) and return a list of completions (as strings).
|
|
@@ -71,14 +79,15 @@ class LLMClient(Protocol):
|
|
|
71
79
|
- a list of messages. Each message is a string and will be formatted
|
|
72
80
|
as a user message.
|
|
73
81
|
- a single message as a string which will be formatted as user message.
|
|
82
|
+
metadata: Optional metadata to be passed to the LLM call.
|
|
83
|
+
|
|
74
84
|
Returns:
|
|
75
85
|
LLMResponse
|
|
76
86
|
"""
|
|
77
87
|
...
|
|
78
88
|
|
|
79
89
|
def validate_client_setup(self, *args, **kwargs) -> None: # type: ignore
|
|
80
|
-
"""
|
|
81
|
-
Perform client setup validation.
|
|
90
|
+
"""Perform client setup validation.
|
|
82
91
|
|
|
83
92
|
This method should be implemented to validate whether the client can be
|
|
84
93
|
used with the parameters provided through configuration or environment
|
|
@@ -237,7 +237,9 @@ class SelfHostedLLMClient(_BaseLiteLLMClient):
|
|
|
237
237
|
raise ProviderClientAPIException(e)
|
|
238
238
|
|
|
239
239
|
async def acompletion(
|
|
240
|
-
self,
|
|
240
|
+
self,
|
|
241
|
+
messages: Union[List[dict], List[str], str],
|
|
242
|
+
metadata: Optional[Dict[str, Any]] = None,
|
|
241
243
|
) -> LLMResponse:
|
|
242
244
|
"""Asynchronous completion of the model with the given messages.
|
|
243
245
|
|
|
@@ -255,6 +257,7 @@ class SelfHostedLLMClient(_BaseLiteLLMClient):
|
|
|
255
257
|
- a list of messages. Each message is a string and will be formatted
|
|
256
258
|
as a user message.
|
|
257
259
|
- a single message as a string which will be formatted as user message.
|
|
260
|
+
metadata: Optional metadata to be passed to the LLM call.
|
|
258
261
|
|
|
259
262
|
Returns:
|
|
260
263
|
The completion response.
|
|
@@ -263,7 +266,11 @@ class SelfHostedLLMClient(_BaseLiteLLMClient):
|
|
|
263
266
|
return await super().acompletion(messages)
|
|
264
267
|
return await self._atext_completion(messages)
|
|
265
268
|
|
|
266
|
-
def completion(
|
|
269
|
+
def completion(
|
|
270
|
+
self,
|
|
271
|
+
messages: Union[List[dict], List[str], str],
|
|
272
|
+
metadata: Optional[Dict[str, Any]] = None,
|
|
273
|
+
) -> LLMResponse:
|
|
267
274
|
"""Completion of the model with the given messages.
|
|
268
275
|
|
|
269
276
|
Method overrides the base class method to call the appropriate
|
|
@@ -273,6 +280,7 @@ class SelfHostedLLMClient(_BaseLiteLLMClient):
|
|
|
273
280
|
|
|
274
281
|
Args:
|
|
275
282
|
messages: The messages to be used for completion.
|
|
283
|
+
metadata: Optional metadata to be passed to the LLM call.
|
|
276
284
|
|
|
277
285
|
Returns:
|
|
278
286
|
The completion response.
|
|
@@ -372,6 +372,7 @@ def extract_llm_config(
|
|
|
372
372
|
def extract_attrs_for_llm_based_command_generator(
|
|
373
373
|
self: "LLMBasedCommandGenerator",
|
|
374
374
|
prompt: str,
|
|
375
|
+
metadata: Optional[Dict[str, Any]] = None,
|
|
375
376
|
) -> Dict[str, Any]:
|
|
376
377
|
from rasa.dialogue_understanding.generator.flow_retrieval import (
|
|
377
378
|
DEFAULT_EMBEDDINGS_CONFIG,
|
|
@@ -387,8 +388,7 @@ def extract_attrs_for_llm_based_command_generator(
|
|
|
387
388
|
|
|
388
389
|
|
|
389
390
|
def extract_attrs_for_contextual_response_rephraser(
|
|
390
|
-
self: Any,
|
|
391
|
-
prompt: str,
|
|
391
|
+
self: Any, prompt: str
|
|
392
392
|
) -> Dict[str, Any]:
|
|
393
393
|
from rasa.core.nlg.contextual_response_rephraser import DEFAULT_LLM_CONFIG
|
|
394
394
|
|
rasa/version.py
CHANGED
|
@@ -1,6 +1,6 @@
|
|
|
1
1
|
Metadata-Version: 2.3
|
|
2
2
|
Name: rasa-pro
|
|
3
|
-
Version: 3.12.
|
|
3
|
+
Version: 3.12.10.dev1
|
|
4
4
|
Summary: State-of-the-art open-core Conversational AI framework for Enterprises that natively leverages generative AI for effortless assistant development.
|
|
5
5
|
Keywords: nlp,machine-learning,machine-learning-library,bot,bots,botkit,rasa conversational-agents,conversational-ai,chatbot,chatbot-framework,bot-framework
|
|
6
6
|
Author: Rasa Technologies GmbH
|
|
@@ -63,12 +63,13 @@ Requires-Dist: keras (==2.14.0)
|
|
|
63
63
|
Requires-Dist: langchain (>=0.2.17,<0.3.0)
|
|
64
64
|
Requires-Dist: langchain-community (>=0.2.19,<0.3.0)
|
|
65
65
|
Requires-Dist: langcodes (>=3.5.0,<4.0.0)
|
|
66
|
-
Requires-Dist:
|
|
66
|
+
Requires-Dist: langfuse (>=2.60.2,<2.61.0)
|
|
67
|
+
Requires-Dist: litellm (>=1.68.0,<1.69.0)
|
|
67
68
|
Requires-Dist: matplotlib (>=3.7,<3.8)
|
|
68
69
|
Requires-Dist: mattermostwrapper (>=2.2,<2.3)
|
|
69
70
|
Requires-Dist: networkx (>=3.1,<3.2)
|
|
70
71
|
Requires-Dist: numpy (>=1.26.4,<1.27.0)
|
|
71
|
-
Requires-Dist: openai (>=1.
|
|
72
|
+
Requires-Dist: openai (>=1.68.2,<1.69.0)
|
|
72
73
|
Requires-Dist: openpyxl (>=3.1.5,<4.0.0)
|
|
73
74
|
Requires-Dist: opentelemetry-api (>=1.16.0,<1.17.0)
|
|
74
75
|
Requires-Dist: opentelemetry-exporter-jaeger (>=1.16.0,<1.17.0)
|
|
@@ -1,4 +1,4 @@
|
|
|
1
|
-
rasa/__init__.py,sha256=
|
|
1
|
+
rasa/__init__.py,sha256=1LPdnp38vsouYw0bt_C0Q0mfLeDKAUaeiNdqMZaihCg,495
|
|
2
2
|
rasa/__main__.py,sha256=OmUXcaA9l7KR_eSYCwaCSetuczxjrcN2taNnZ2ZUTbA,6472
|
|
3
3
|
rasa/anonymization/__init__.py,sha256=Z-ZUW2ofZGfI6ysjYIS7U0JL4JSzDNOkHiiXK488Zik,86
|
|
4
4
|
rasa/anonymization/anonymisation_rule_yaml_reader.py,sha256=8u8ZWfbpJuyUagrfth3IGfQXVlVz31esqExfDdasxZM,3171
|
|
@@ -263,7 +263,7 @@ rasa/core/channels/rest.py,sha256=ShKGmooXphhcDnHyV8TiQhDhj2r7hxTKNQ57FwFfyUA,72
|
|
|
263
263
|
rasa/core/channels/rocketchat.py,sha256=hajaH6549CjEYFM5jSapw1DQKBPKTXbn7cVSuZzknmI,5999
|
|
264
264
|
rasa/core/channels/slack.py,sha256=jVsTTUu9wUjukPoIsAhbee9o0QFUMCNlQHbR8LTcMBc,24406
|
|
265
265
|
rasa/core/channels/socketio.py,sha256=Q7Gts30Ulwj90pQQxaUk4NykzagXErXgbHYwOjTmDig,10842
|
|
266
|
-
rasa/core/channels/studio_chat.py,sha256=
|
|
266
|
+
rasa/core/channels/studio_chat.py,sha256=KUhR0Irst8pJ7zGMoeZuKquAUOYVB45i75wlVsbDqPU,9218
|
|
267
267
|
rasa/core/channels/telegram.py,sha256=TKVknsk3U9tYeY1a8bzlhqkltWmZfGSOvrcmwa9qozc,12499
|
|
268
268
|
rasa/core/channels/twilio.py,sha256=2BTQpyx0b0yPpc0A2BHYfxLPgodrLGLs8nq6i3lVGAM,5906
|
|
269
269
|
rasa/core/channels/vier_cvg.py,sha256=GkrWKu7NRMFtLMyYp-kQ2taWAc_keAwhYrkVPW56iaU,13544
|
|
@@ -335,7 +335,7 @@ rasa/core/policies/flows/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZ
|
|
|
335
335
|
rasa/core/policies/flows/flow_exceptions.py,sha256=_FQuN-cerQDM1pivce9bz4zylh5UYkljvYS1gjDukHI,1527
|
|
336
336
|
rasa/core/policies/flows/flow_executor.py,sha256=sT7ZFrm_CKVKBv5SO0M_QE984ZFw8t6trm8dMxCXbv8,25649
|
|
337
337
|
rasa/core/policies/flows/flow_step_result.py,sha256=agjPrD6lahGSe2ViO5peBeoMdI9ngVGRSgtytgxmJmg,1360
|
|
338
|
-
rasa/core/policies/intentless_policy.py,sha256=
|
|
338
|
+
rasa/core/policies/intentless_policy.py,sha256=U4CvnY7T0Gj62_fKXDnaoT8gN8tNt7AtcVaje8EeBwg,36339
|
|
339
339
|
rasa/core/policies/intentless_prompt_template.jinja2,sha256=KhIL3cruMmkxhrs5oVbqgSvK6ZiN_6TQ_jXrgtEB-ZY,677
|
|
340
340
|
rasa/core/policies/memoization.py,sha256=CX2d3yP7FehSMW92Wi9NYLZei7tBzoT3T6yybu-Nb5s,19377
|
|
341
341
|
rasa/core/policies/policy.py,sha256=5SUnPajSTSf8PzB1-jFbQPtsvR-zLN-xkjeotWOxuJc,27432
|
|
@@ -365,7 +365,7 @@ rasa/dialogue_understanding/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMp
|
|
|
365
365
|
rasa/dialogue_understanding/coexistence/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
|
|
366
366
|
rasa/dialogue_understanding/coexistence/constants.py,sha256=RpgLKMG4s7AgII0fRV0siS0Zh2QVI0OVRunhgm4q_j4,94
|
|
367
367
|
rasa/dialogue_understanding/coexistence/intent_based_router.py,sha256=JlYBZdScnhflLK__i4bG0-PIkuFv0B7L4yOdnLgYWAY,7609
|
|
368
|
-
rasa/dialogue_understanding/coexistence/llm_based_router.py,sha256=
|
|
368
|
+
rasa/dialogue_understanding/coexistence/llm_based_router.py,sha256=0kQ9vrKCCbBhKA13Hk570xJUH_oij3HOsYgQbpvNKOA,11751
|
|
369
369
|
rasa/dialogue_understanding/coexistence/router_template.jinja2,sha256=CHWFreN0sv1EbPh-hf5AlCt3zxy2_llX1Pdn9Q11Y18,357
|
|
370
370
|
rasa/dialogue_understanding/commands/__init__.py,sha256=F-pLETYRUjhIkjjDfXGUuPsK_ac1HcLmJkrUUP0RhME,2259
|
|
371
371
|
rasa/dialogue_understanding/commands/can_not_handle_command.py,sha256=fKOj9ScLxuaFO9Iw0p7og_4zMiw2weBdx322rBKlnCI,3519
|
|
@@ -400,19 +400,19 @@ rasa/dialogue_understanding/generator/command_parser.py,sha256=wf6FSgqBw5F0legg0
|
|
|
400
400
|
rasa/dialogue_understanding/generator/constants.py,sha256=PuUckBGUZ-Tu31B0cs8yxN99BDW3PGoExZa-BlIL5v8,1108
|
|
401
401
|
rasa/dialogue_understanding/generator/flow_document_template.jinja2,sha256=f4H6vVd-_nX_RtutMh1xD3ZQE_J2OyuPHAtiltfiAPY,253
|
|
402
402
|
rasa/dialogue_understanding/generator/flow_retrieval.py,sha256=DavL-37e0tksMWkxvFImoqlsmYeYeSdDN3u7wZI0K-8,17817
|
|
403
|
-
rasa/dialogue_understanding/generator/llm_based_command_generator.py,sha256=
|
|
404
|
-
rasa/dialogue_understanding/generator/llm_command_generator.py,sha256=
|
|
403
|
+
rasa/dialogue_understanding/generator/llm_based_command_generator.py,sha256=cUsP_3Z5k65r-4iCCJY7I1yuFKkEg1nV1e_Xg6ULBnc,24058
|
|
404
|
+
rasa/dialogue_understanding/generator/llm_command_generator.py,sha256=E5byrCC_6r_GJm_HIosN_Se00NmXmnTCdOzaHMwTu6A,2641
|
|
405
405
|
rasa/dialogue_understanding/generator/multi_step/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
|
|
406
406
|
rasa/dialogue_understanding/generator/multi_step/fill_slots_prompt.jinja2,sha256=Y0m673tAML3cFPaLM-urMXDsBYUUcXIw9YUpkAhGUuA,2933
|
|
407
407
|
rasa/dialogue_understanding/generator/multi_step/handle_flows_prompt.jinja2,sha256=8l93_QBKBYnqLICVdiTu5ejZDE8F36BU8-qwba0px44,1927
|
|
408
|
-
rasa/dialogue_understanding/generator/multi_step/multi_step_llm_command_generator.py,sha256=
|
|
408
|
+
rasa/dialogue_understanding/generator/multi_step/multi_step_llm_command_generator.py,sha256=0rpQonIcwSjcUWCLjJ5DKf1Z5XBJiDoJ6cC7Rj6NtAM,34088
|
|
409
409
|
rasa/dialogue_understanding/generator/nlu_command_adapter.py,sha256=cisxLlPVQXgbWMAz9xSxBvrOz4HO-f0G3CFVjJ2wt-g,10876
|
|
410
410
|
rasa/dialogue_understanding/generator/prompt_templates/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
|
|
411
411
|
rasa/dialogue_understanding/generator/prompt_templates/command_prompt_template.jinja2,sha256=nMayu-heJYH1QmcL1cFmXb8SeiJzfdDR_9Oy5IRUXsM,3937
|
|
412
412
|
rasa/dialogue_understanding/generator/prompt_templates/command_prompt_v2_claude_3_5_sonnet_20240620_template.jinja2,sha256=z-cnFVfIE_kEnY1o52YE2CdCWwgYTv7R3xVxsjXWlnw,3808
|
|
413
413
|
rasa/dialogue_understanding/generator/prompt_templates/command_prompt_v2_gpt_4o_2024_11_20_template.jinja2,sha256=4076ARsy0E0iADBX6li19IoM3F4F-2wK3bL6UEOvCdo,3620
|
|
414
414
|
rasa/dialogue_understanding/generator/single_step/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
|
|
415
|
-
rasa/dialogue_understanding/generator/single_step/compact_llm_command_generator.py,sha256=
|
|
415
|
+
rasa/dialogue_understanding/generator/single_step/compact_llm_command_generator.py,sha256=e4daPCfEoyUvwooHmhFPyP-rLpdh9QDGqEccMnPXN-0,22867
|
|
416
416
|
rasa/dialogue_understanding/generator/single_step/single_step_llm_command_generator.py,sha256=RWTPdeBfdGUmdFSUzdQejcbJJLhc_815G0g6AabTK04,5100
|
|
417
417
|
rasa/dialogue_understanding/generator/utils.py,sha256=jxtb-AfngN59y2rHynqJDK80xM_yooEvr3aW1MWl6H0,2760
|
|
418
418
|
rasa/dialogue_understanding/patterns/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
|
|
@@ -530,7 +530,7 @@ rasa/graph_components/providers/training_tracker_provider.py,sha256=FaCWHJA69EpM
|
|
|
530
530
|
rasa/graph_components/validators/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
|
|
531
531
|
rasa/graph_components/validators/default_recipe_validator.py,sha256=iOVoB7zVTKes8EYW110fz8ZvtgoDcCX25GlUsiESS18,24457
|
|
532
532
|
rasa/graph_components/validators/finetuning_validator.py,sha256=VfCGytnweijKBG8bAqYp7zKZB2aRgi2ZI8R0eou5Ev4,12865
|
|
533
|
-
rasa/hooks.py,sha256=
|
|
533
|
+
rasa/hooks.py,sha256=xQLqqPpebL04AuKZiYJEZaBJyubTdGetCW7cvmjXg7o,5804
|
|
534
534
|
rasa/jupyter.py,sha256=TCYVD4QPQIMmfA6ZwDUBOBTAECwCwbU2XOkosodLO9k,1782
|
|
535
535
|
rasa/llm_fine_tuning/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
|
|
536
536
|
rasa/llm_fine_tuning/annotation_module.py,sha256=6wBBjGwONVlikp79xAHp5g3rydEhPM6kP1bw1g-maYk,8578
|
|
@@ -563,6 +563,7 @@ rasa/model_manager/warm_rasa_process.py,sha256=2vg8gBEUvPrr6C5W-fxtWWSajksrOaT83
|
|
|
563
563
|
rasa/model_service.py,sha256=XXCaiLj2xq58n05W3R1jmTIv-V8f_7PG30kVpRxf71Y,3727
|
|
564
564
|
rasa/model_testing.py,sha256=eZw7l8Zz3HkH_ZPBurY93HzzudHdoQn8HBnDdZSysAY,14929
|
|
565
565
|
rasa/model_training.py,sha256=1opig8_npw7dLHd8k06ZYUQCrJ61sFIbNHBgvF63yH8,21733
|
|
566
|
+
rasa/monkey_patches.py,sha256=pZTDKQ8GNzeiUWeJ2MneUuremSNVScL7oXeMAEd4o4Y,3687
|
|
566
567
|
rasa/nlu/__init__.py,sha256=D0IYuTK_ZQ_F_9xsy0bXxVCAtU62Fzvp8S7J9tmfI_c,123
|
|
567
568
|
rasa/nlu/classifiers/__init__.py,sha256=Qvrf7_rfiMxm2Vt2fClb56R3QFExf7WPdFdL-AOvgsk,118
|
|
568
569
|
rasa/nlu/classifiers/classifier.py,sha256=9fm1mORuFf1vowYIXmqE9yLRKdSC4nGQW7UqNZQipKY,133
|
|
@@ -624,7 +625,7 @@ rasa/nlu/utils/spacy_utils.py,sha256=5EnHR-MVAZhGbg2rq8VpOu7I0tagV3ThRTlM0-WO2Cg
|
|
|
624
625
|
rasa/plugin.py,sha256=cSmFhSWr5WQyYXdJOWwgH4ra_2kbhoNLZAtnqcsGny4,3071
|
|
625
626
|
rasa/server.py,sha256=eomGM_3SpBxaF_-VfZbkSO_bMk_vI1XLUZjt32f4gcI,59390
|
|
626
627
|
rasa/shared/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
|
|
627
|
-
rasa/shared/constants.py,sha256=
|
|
628
|
+
rasa/shared/constants.py,sha256=GvkQKt1CPxbdoZs2bFkgNo8GA5xKc6EDW9zZjspcr_0,12290
|
|
628
629
|
rasa/shared/core/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
|
|
629
630
|
rasa/shared/core/command_payload_reader.py,sha256=puHYsp9xbX0YQm2L1NDBItOFmdzI7AzmfGefgcHiCc0,3871
|
|
630
631
|
rasa/shared/core/constants.py,sha256=gwIZHjQYafHnBlMe9_jUiIPm17hxYG9R1MOCtxeC1Ns,6337
|
|
@@ -633,7 +634,7 @@ rasa/shared/core/domain.py,sha256=piJu4Kr2exC9ehC3e2oNaxPxXkeIhOYoQJQQOuzMw18,81
|
|
|
633
634
|
rasa/shared/core/events.py,sha256=kTUWSpDepj3kpjjXveYXz3h2XcIQV3Sq8h7MTbx5fMw,86489
|
|
634
635
|
rasa/shared/core/flows/__init__.py,sha256=Z4pBY0qcEbHeOwgmKsyg2Nz4dX9CF67fFCwj2KXSMpg,180
|
|
635
636
|
rasa/shared/core/flows/constants.py,sha256=0HN3k-apOb_fi8E2AJtUxMxro8jwFVyXQpil-tHEzbM,340
|
|
636
|
-
rasa/shared/core/flows/flow.py,sha256=
|
|
637
|
+
rasa/shared/core/flows/flow.py,sha256=nW7EvfWBS9eK24GIodDUwwKY8nTaJtY6bp5Xz_IHZAc,23107
|
|
637
638
|
rasa/shared/core/flows/flow_path.py,sha256=xstwahZBU5cfMY46mREA4NoOGlKLBRAqeP_mJ3UZqOI,2283
|
|
638
639
|
rasa/shared/core/flows/flow_step.py,sha256=ZvjXz1Fs5FR1_BlGBitOEYRnLhzk-bBYv1CC2Oi6iWQ,4537
|
|
639
640
|
rasa/shared/core/flows/flow_step_links.py,sha256=U9c4MFASieJGp_-XMhR0hrxFQISCJAF4TQ0wEy4IjB0,10530
|
|
@@ -723,7 +724,7 @@ rasa/shared/providers/_configs/self_hosted_llm_client_config.py,sha256=l2JnypPXF
|
|
|
723
724
|
rasa/shared/providers/_configs/utils.py,sha256=u2Ram05YwQ7-frm_r8n9rafjZoF8i0qSC7XjYQRuPgo,3732
|
|
724
725
|
rasa/shared/providers/_ssl_verification_utils.py,sha256=vUnP0vocf0GQ0wG8IQpPcCet4c1C9-wQWQNckNWbDBk,4165
|
|
725
726
|
rasa/shared/providers/_utils.py,sha256=EZIrz3ugcI-9PWgC7v0VMUNYondAAOeeRLIE8ZmResw,5886
|
|
726
|
-
rasa/shared/providers/constants.py,sha256=
|
|
727
|
+
rasa/shared/providers/constants.py,sha256=yF9giGO8xWCrW9dzUW-7wX-y6sh7hlbYzHYKFayrF7A,613
|
|
727
728
|
rasa/shared/providers/embedding/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
|
|
728
729
|
rasa/shared/providers/embedding/_base_litellm_embedding_client.py,sha256=PFavNnD6EVDQiqc9sLnBRV0hebW4iCjIh_dvpwzg4RI,8796
|
|
729
730
|
rasa/shared/providers/embedding/_langchain_embedding_client_adapter.py,sha256=IR2Rb3ReJ9C9sxOoOGRXgtz8STWdMREs_4AeSMKFjl4,2135
|
|
@@ -735,15 +736,15 @@ rasa/shared/providers/embedding/huggingface_local_embedding_client.py,sha256=Zo3
|
|
|
735
736
|
rasa/shared/providers/embedding/litellm_router_embedding_client.py,sha256=eafDk6IgQtL_kiKgpa6sJs1oATyRi2NT2leUFQsED2s,4551
|
|
736
737
|
rasa/shared/providers/embedding/openai_embedding_client.py,sha256=XNRGE7apo2v3kWRrtgxE-Gq4rvNko3IiXtvgC4krDYE,5429
|
|
737
738
|
rasa/shared/providers/llm/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
|
|
738
|
-
rasa/shared/providers/llm/_base_litellm_client.py,sha256=
|
|
739
|
+
rasa/shared/providers/llm/_base_litellm_client.py,sha256=DeNRMMf1XOK0yNrdpWt5dOfmtCFKJkU7keEsh0KAc0k,11748
|
|
739
740
|
rasa/shared/providers/llm/azure_openai_llm_client.py,sha256=ui85vothxR2P_-eLc4nLgbpjnpEKY2BXnIjLxBZoYz8,12504
|
|
740
741
|
rasa/shared/providers/llm/default_litellm_llm_client.py,sha256=xx-o-NX_mtx6AszK--ZRj8n8JyEJuVu1-42dt8AynBM,4083
|
|
741
|
-
rasa/shared/providers/llm/litellm_router_llm_client.py,sha256=
|
|
742
|
-
rasa/shared/providers/llm/llm_client.py,sha256
|
|
742
|
+
rasa/shared/providers/llm/litellm_router_llm_client.py,sha256=kF8yqwxBNjcIYz022yv0gP5RqnJzx6bfG-hcpK5ovKE,8217
|
|
743
|
+
rasa/shared/providers/llm/llm_client.py,sha256=11xgWbjV8brvQN-EZPjZHNofImY8JKlRmrbOD7UaL-o,3651
|
|
743
744
|
rasa/shared/providers/llm/llm_response.py,sha256=8mOpZdmh4-3yM7aOmNO0yEYUmRDErfoP7ZDMUuHr2Cc,3504
|
|
744
745
|
rasa/shared/providers/llm/openai_llm_client.py,sha256=rSdLj29Hl1Wm5G6Uwo77j4WqogK_3QIbTA7fyt63YAg,5013
|
|
745
746
|
rasa/shared/providers/llm/rasa_llm_client.py,sha256=44Tvtnkq4mxDIxtdrGUkwBWAvX1OLaswqmpAsyBH8e8,3504
|
|
746
|
-
rasa/shared/providers/llm/self_hosted_llm_client.py,sha256=
|
|
747
|
+
rasa/shared/providers/llm/self_hosted_llm_client.py,sha256=85jnA7AO2W4OqV0874N5YBzTafVeYtiRbaRyzyA_lKA,10544
|
|
747
748
|
rasa/shared/providers/mappings.py,sha256=QSD3XWvhYCtBLNpGycN30vEnLULYIaqCsAtmfPfSZ3U,3674
|
|
748
749
|
rasa/shared/providers/router/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
|
|
749
750
|
rasa/shared/providers/router/_base_litellm_router_client.py,sha256=JV9lYnhIG_CWMtPB5nofjNdRO5V-Wl0DH-HyPm__eJ0,11003
|
|
@@ -780,7 +781,7 @@ rasa/tracing/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
|
|
|
780
781
|
rasa/tracing/config.py,sha256=32X2rqAiHe0e-Iijb5AivjqDs2j03n8xx5mo07NBMI4,12964
|
|
781
782
|
rasa/tracing/constants.py,sha256=-3vlfI9v_D8f-KB5tuiqBHhszu2WofFQOyjKBn28gyg,2889
|
|
782
783
|
rasa/tracing/instrumentation/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
|
|
783
|
-
rasa/tracing/instrumentation/attribute_extractors.py,sha256
|
|
784
|
+
rasa/tracing/instrumentation/attribute_extractors.py,sha256=-w80ZDIF85aEb2OkYqZ75VssbfCWfC7Yq78i-cuc0TU,29513
|
|
784
785
|
rasa/tracing/instrumentation/instrumentation.py,sha256=BPI5OoZFbl90kVJzlKEz-eD8cf-CaX_x1t4V9XBhDKo,53625
|
|
785
786
|
rasa/tracing/instrumentation/intentless_policy_instrumentation.py,sha256=RgixI0FVIzBz19E3onidUpSEwjkAh8paA5_w07PMzFo,4821
|
|
786
787
|
rasa/tracing/instrumentation/metrics.py,sha256=DI_qIS6sz5KYU4QDcPKfnHxKLL_Ma3wV6diH4_vg85c,12051
|
|
@@ -821,9 +822,9 @@ rasa/utils/train_utils.py,sha256=ClJx-6x3-h3Vt6mskacgkcCUJTMXjFPe3zAcy_DfmaU,212
|
|
|
821
822
|
rasa/utils/url_tools.py,sha256=dZ1HGkVdWTJB7zYEdwoDIrEuyX9HE5WsxKKFVsXBLE0,1218
|
|
822
823
|
rasa/utils/yaml.py,sha256=KjbZq5C94ZP7Jdsw8bYYF7HASI6K4-C_kdHfrnPLpSI,2000
|
|
823
824
|
rasa/validator.py,sha256=524VlFTYK0B3iXYveVD6BDC3K0j1QfpzJ9O-TAWczmc,83166
|
|
824
|
-
rasa/version.py,sha256=
|
|
825
|
-
rasa_pro-3.12.
|
|
826
|
-
rasa_pro-3.12.
|
|
827
|
-
rasa_pro-3.12.
|
|
828
|
-
rasa_pro-3.12.
|
|
829
|
-
rasa_pro-3.12.
|
|
825
|
+
rasa/version.py,sha256=oJfX2ngswlvP2txRFGmPvTAGUXX4Mbt79LWBavzg04A,123
|
|
826
|
+
rasa_pro-3.12.10.dev1.dist-info/METADATA,sha256=YgdLklR9kPyrTMfBShv8heOtWGO8u_yMCIuTV6bv8xk,10664
|
|
827
|
+
rasa_pro-3.12.10.dev1.dist-info/NOTICE,sha256=7HlBoMHJY9CL2GlYSfTQ-PZsVmLmVkYmMiPlTjhuCqA,218
|
|
828
|
+
rasa_pro-3.12.10.dev1.dist-info/WHEEL,sha256=fGIA9gx4Qxk2KDKeNJCbOEwSrmLtjWCwzBz351GyrPQ,88
|
|
829
|
+
rasa_pro-3.12.10.dev1.dist-info/entry_points.txt,sha256=ckJ2SfEyTPgBqj_I6vm_tqY9dZF_LAPJZA335Xp0Q9U,43
|
|
830
|
+
rasa_pro-3.12.10.dev1.dist-info/RECORD,,
|
|
File without changes
|
|
File without changes
|
|
File without changes
|