rasa-pro 3.11.3a1.dev2__py3-none-any.whl → 3.11.3a1.dev4__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of rasa-pro might be problematic. Click here for more details.
- rasa/core/actions/action.py +0 -2
- rasa/core/channels/socketio.py +36 -1
- rasa/core/nlg/contextual_response_rephraser.py +9 -68
- rasa/core/policies/enterprise_search_policy.py +12 -80
- rasa/dialogue_understanding/generator/command_generator.py +10 -18
- rasa/dialogue_understanding/generator/llm_based_command_generator.py +5 -7
- rasa/dialogue_understanding/generator/llm_command_generator.py +1 -2
- rasa/dialogue_understanding/generator/multi_step/multi_step_llm_command_generator.py +15 -30
- rasa/dialogue_understanding/generator/single_step/single_step_llm_command_generator.py +6 -9
- rasa/shared/core/events.py +8 -8
- rasa/shared/nlu/constants.py +0 -1
- rasa/shared/providers/llm/llm_response.py +1 -30
- rasa/shared/utils/schemas/events.py +1 -1
- rasa/version.py +1 -1
- {rasa_pro-3.11.3a1.dev2.dist-info → rasa_pro-3.11.3a1.dev4.dist-info}/METADATA +1 -1
- {rasa_pro-3.11.3a1.dev2.dist-info → rasa_pro-3.11.3a1.dev4.dist-info}/RECORD +19 -19
- {rasa_pro-3.11.3a1.dev2.dist-info → rasa_pro-3.11.3a1.dev4.dist-info}/NOTICE +0 -0
- {rasa_pro-3.11.3a1.dev2.dist-info → rasa_pro-3.11.3a1.dev4.dist-info}/WHEEL +0 -0
- {rasa_pro-3.11.3a1.dev2.dist-info → rasa_pro-3.11.3a1.dev4.dist-info}/entry_points.txt +0 -0
rasa/core/actions/action.py
CHANGED
|
@@ -93,7 +93,6 @@ from rasa.shared.nlu.constants import (
|
|
|
93
93
|
INTENT_NAME_KEY,
|
|
94
94
|
INTENT_RANKING_KEY,
|
|
95
95
|
)
|
|
96
|
-
from rasa.shared.nlu.constants import PROMPTS
|
|
97
96
|
from rasa.shared.utils.io import raise_warning
|
|
98
97
|
from rasa.shared.utils.schemas.events import EVENTS_SCHEMA
|
|
99
98
|
from rasa.utils.endpoints import ClientResponseError, EndpointConfig
|
|
@@ -268,7 +267,6 @@ def create_bot_utterance(message: Dict[Text, Any]) -> BotUttered:
|
|
|
268
267
|
"attachment": message.pop("attachment", None) or message.get("image", None),
|
|
269
268
|
"image": message.pop("image", None),
|
|
270
269
|
"custom": message.pop("custom", None),
|
|
271
|
-
PROMPTS: message.pop(PROMPTS, None),
|
|
272
270
|
},
|
|
273
271
|
metadata=message,
|
|
274
272
|
)
|
rasa/core/channels/socketio.py
CHANGED
|
@@ -1,3 +1,4 @@
|
|
|
1
|
+
import asyncio
|
|
1
2
|
import inspect
|
|
2
3
|
import logging
|
|
3
4
|
import uuid
|
|
@@ -6,6 +7,7 @@ from typing import Any, Awaitable, Callable, Dict, Iterable, List, Optional, Tex
|
|
|
6
7
|
|
|
7
8
|
import rasa.core.channels.channel
|
|
8
9
|
from rasa.core.channels.channel import InputChannel, OutputChannel, UserMessage
|
|
10
|
+
from rasa.shared.core.trackers import EventVerbosity
|
|
9
11
|
import rasa.shared.utils.io
|
|
10
12
|
from sanic import Blueprint, response, Sanic
|
|
11
13
|
from sanic.request import Request
|
|
@@ -188,6 +190,8 @@ class SocketIOInput(InputChannel):
|
|
|
188
190
|
metadata_key: Optional[Text] = "metadata",
|
|
189
191
|
):
|
|
190
192
|
"""Creates a ``SocketIOInput`` object."""
|
|
193
|
+
from rasa.core.agent import Agent
|
|
194
|
+
|
|
191
195
|
self.bot_message_evt = bot_message_evt
|
|
192
196
|
self.session_persistence = session_persistence
|
|
193
197
|
self.user_message_evt = user_message_evt
|
|
@@ -199,6 +203,8 @@ class SocketIOInput(InputChannel):
|
|
|
199
203
|
self.jwt_key = jwt_key
|
|
200
204
|
self.jwt_algorithm = jwt_method
|
|
201
205
|
|
|
206
|
+
self.agent: Optional[Agent] = None
|
|
207
|
+
|
|
202
208
|
def get_output_channel(self) -> Optional["OutputChannel"]:
|
|
203
209
|
"""Creates socket.io output channel object."""
|
|
204
210
|
if self.sio is None:
|
|
@@ -212,7 +218,7 @@ class SocketIOInput(InputChannel):
|
|
|
212
218
|
return None
|
|
213
219
|
return SocketIOOutput(self.sio, self.bot_message_evt)
|
|
214
220
|
|
|
215
|
-
async def on_new_tracker_dump(self, sender_id: str, tracker_dump: str)
|
|
221
|
+
async def on_new_tracker_dump(self, sender_id: str, tracker_dump: str):
|
|
216
222
|
if self.sio:
|
|
217
223
|
await self.sio.emit("tracker", tracker_dump, room=sender_id)
|
|
218
224
|
|
|
@@ -227,6 +233,11 @@ class SocketIOInput(InputChannel):
|
|
|
227
233
|
sio, self.socketio_path, "socketio_webhook", __name__
|
|
228
234
|
)
|
|
229
235
|
|
|
236
|
+
@socketio_webhook.listener("after_server_start") # type: ignore[misc]
|
|
237
|
+
async def after_server_start(app: Sanic, _: asyncio.AbstractEventLoop) -> None:
|
|
238
|
+
"""Prints a message after the server has started with inspect URL."""
|
|
239
|
+
self.agent = app.ctx.agent
|
|
240
|
+
|
|
230
241
|
# make sio object static to use in get_output_channel
|
|
231
242
|
self.sio = sio
|
|
232
243
|
|
|
@@ -272,6 +283,30 @@ class SocketIOInput(InputChannel):
|
|
|
272
283
|
await sio.emit("session_confirm", data["session_id"], room=sid)
|
|
273
284
|
logger.debug(f"User {sid} connected to socketIO endpoint.")
|
|
274
285
|
|
|
286
|
+
@sio.on("tracker", namespace=self.namespace)
|
|
287
|
+
async def handle_tracker(sid: Text, data: Dict) -> None:
|
|
288
|
+
from rasa.shared.core.trackers import DialogueStateTracker
|
|
289
|
+
|
|
290
|
+
if self.agent is None:
|
|
291
|
+
raise ValueError("Agent is not initialized")
|
|
292
|
+
|
|
293
|
+
async with self.agent.lock_store.lock(data["sender_id"]):
|
|
294
|
+
tracker = DialogueStateTracker.from_dict(
|
|
295
|
+
data["sender_id"], data["events"], self.agent.domain.slots
|
|
296
|
+
)
|
|
297
|
+
|
|
298
|
+
# will override an existing tracker with the same id!
|
|
299
|
+
await self.agent.tracker_store.save(tracker)
|
|
300
|
+
|
|
301
|
+
# TODO: rather figure out how to trigger the on_tracker_updated
|
|
302
|
+
# of the development inspector channel
|
|
303
|
+
state = tracker.current_state(EventVerbosity.AFTER_RESTART)
|
|
304
|
+
await sio.emit(
|
|
305
|
+
"tracker",
|
|
306
|
+
json.dumps(state),
|
|
307
|
+
room=sid,
|
|
308
|
+
)
|
|
309
|
+
|
|
275
310
|
@sio.on(self.user_message_evt, namespace=self.namespace)
|
|
276
311
|
async def handle_message(sid: Text, data: Dict) -> None:
|
|
277
312
|
output_channel = SocketIOOutput(sio, self.bot_message_evt)
|
|
@@ -2,11 +2,9 @@ from typing import Any, Dict, Optional, Text
|
|
|
2
2
|
|
|
3
3
|
import structlog
|
|
4
4
|
from jinja2 import Template
|
|
5
|
-
|
|
6
5
|
from rasa import telemetry
|
|
7
6
|
from rasa.core.nlg.response import TemplatedNaturalLanguageGenerator
|
|
8
7
|
from rasa.core.nlg.summarize import summarize_conversation
|
|
9
|
-
from rasa.dialogue_understanding.utils import record_commands_and_prompts
|
|
10
8
|
from rasa.shared.constants import (
|
|
11
9
|
LLM_CONFIG_KEY,
|
|
12
10
|
MODEL_CONFIG_KEY,
|
|
@@ -20,12 +18,6 @@ from rasa.shared.constants import (
|
|
|
20
18
|
from rasa.shared.core.domain import KEY_RESPONSES_TEXT, Domain
|
|
21
19
|
from rasa.shared.core.events import BotUttered, UserUttered
|
|
22
20
|
from rasa.shared.core.trackers import DialogueStateTracker
|
|
23
|
-
from rasa.shared.nlu.constants import (
|
|
24
|
-
PROMPTS,
|
|
25
|
-
KEY_USER_PROMPT,
|
|
26
|
-
KEY_LLM_RESPONSE_METADATA,
|
|
27
|
-
)
|
|
28
|
-
from rasa.shared.providers.llm.llm_response import LLMResponse
|
|
29
21
|
from rasa.shared.utils.health_check.llm_health_check_mixin import LLMHealthCheckMixin
|
|
30
22
|
from rasa.shared.utils.llm import (
|
|
31
23
|
DEFAULT_OPENAI_GENERATE_MODEL_NAME,
|
|
@@ -132,44 +124,6 @@ class ContextualResponseRephraser(
|
|
|
132
124
|
ContextualResponseRephraser.__name__,
|
|
133
125
|
)
|
|
134
126
|
|
|
135
|
-
@classmethod
|
|
136
|
-
def _add_prompt_and_llm_metadata_to_response(
|
|
137
|
-
cls,
|
|
138
|
-
response: Dict[str, Any],
|
|
139
|
-
prompt_name: str,
|
|
140
|
-
user_prompt: str,
|
|
141
|
-
llm_response: Optional["LLMResponse"] = None,
|
|
142
|
-
) -> Dict[str, Any]:
|
|
143
|
-
"""Stores the prompt and LLMResponse metadata to response.
|
|
144
|
-
|
|
145
|
-
Args:
|
|
146
|
-
response: The response to add the prompt and LLMResponse metadata to.
|
|
147
|
-
prompt_name: A name identifying prompt usage.
|
|
148
|
-
user_prompt: The user prompt that was sent to the LLM.
|
|
149
|
-
llm_response: The response object from the LLM (None if no response).
|
|
150
|
-
"""
|
|
151
|
-
if not record_commands_and_prompts:
|
|
152
|
-
return response
|
|
153
|
-
|
|
154
|
-
prompt_data: Dict[Text, Any] = {
|
|
155
|
-
KEY_USER_PROMPT: user_prompt,
|
|
156
|
-
}
|
|
157
|
-
|
|
158
|
-
if llm_response is not None:
|
|
159
|
-
prompt_data[KEY_LLM_RESPONSE_METADATA] = llm_response.to_dict()
|
|
160
|
-
|
|
161
|
-
prompt_tuple = (prompt_name, prompt_data)
|
|
162
|
-
|
|
163
|
-
component_name = cls.__name__
|
|
164
|
-
existing_prompts = response.get(PROMPTS, {})
|
|
165
|
-
if component_name in existing_prompts:
|
|
166
|
-
existing_prompts[component_name].append(prompt_tuple)
|
|
167
|
-
else:
|
|
168
|
-
existing_prompts[component_name] = [prompt_tuple]
|
|
169
|
-
|
|
170
|
-
response[PROMPTS] = existing_prompts
|
|
171
|
-
return response
|
|
172
|
-
|
|
173
127
|
def _last_message_if_human(self, tracker: DialogueStateTracker) -> Optional[str]:
|
|
174
128
|
"""Returns the latest message from the tracker.
|
|
175
129
|
|
|
@@ -188,24 +142,20 @@ class ContextualResponseRephraser(
|
|
|
188
142
|
return None
|
|
189
143
|
return None
|
|
190
144
|
|
|
191
|
-
async def _generate_llm_response(self, prompt: str) -> Optional[
|
|
192
|
-
"""
|
|
193
|
-
Use LLM to generate a response, returning an LLMResponse object
|
|
194
|
-
containing both the generated text (choices) and metadata.
|
|
145
|
+
async def _generate_llm_response(self, prompt: str) -> Optional[str]:
|
|
146
|
+
"""Use LLM to generate a response.
|
|
195
147
|
|
|
196
148
|
Args:
|
|
197
|
-
prompt:
|
|
149
|
+
prompt: the prompt to send to the LLM
|
|
198
150
|
|
|
199
151
|
Returns:
|
|
200
|
-
|
|
152
|
+
generated text
|
|
201
153
|
"""
|
|
202
154
|
llm = llm_factory(self.llm_config, DEFAULT_LLM_CONFIG)
|
|
203
155
|
|
|
204
156
|
try:
|
|
205
|
-
|
|
206
|
-
|
|
207
|
-
return LLMResponse.from_dict(response_dict)
|
|
208
|
-
|
|
157
|
+
llm_response = await llm.acompletion(prompt)
|
|
158
|
+
return llm_response.choices[0]
|
|
209
159
|
except Exception as e:
|
|
210
160
|
# unfortunately, langchain does not wrap LLM exceptions which means
|
|
211
161
|
# we have to catch all exceptions here
|
|
@@ -305,20 +255,11 @@ class ContextualResponseRephraser(
|
|
|
305
255
|
or self.llm_property(MODEL_NAME_CONFIG_KEY),
|
|
306
256
|
llm_model_group_id=self.llm_property(MODEL_GROUP_ID_CONFIG_KEY),
|
|
307
257
|
)
|
|
308
|
-
|
|
309
|
-
|
|
310
|
-
|
|
311
|
-
response=response,
|
|
312
|
-
prompt_name="rephrase_prompt",
|
|
313
|
-
user_prompt=prompt,
|
|
314
|
-
llm_response=llm_response,
|
|
315
|
-
)
|
|
316
|
-
|
|
317
|
-
if llm_response is None or not llm_response.choices:
|
|
318
|
-
# If the LLM fails to generate a response, return the original response.
|
|
258
|
+
if not (updated_text := await self._generate_llm_response(prompt)):
|
|
259
|
+
# If the LLM fails to generate a response, we
|
|
260
|
+
# return the original response.
|
|
319
261
|
return response
|
|
320
262
|
|
|
321
|
-
updated_text = llm_response.choices[0]
|
|
322
263
|
structlogger.debug(
|
|
323
264
|
"nlg.rewrite.complete",
|
|
324
265
|
response_text=response_text,
|
|
@@ -2,7 +2,6 @@ import importlib.resources
|
|
|
2
2
|
import json
|
|
3
3
|
import re
|
|
4
4
|
from typing import TYPE_CHECKING, Any, Dict, List, Optional, Text
|
|
5
|
-
|
|
6
5
|
import dotenv
|
|
7
6
|
import structlog
|
|
8
7
|
from jinja2 import Template
|
|
@@ -38,7 +37,6 @@ from rasa.dialogue_understanding.stack.frames import (
|
|
|
38
37
|
SearchStackFrame,
|
|
39
38
|
)
|
|
40
39
|
from rasa.dialogue_understanding.stack.frames import PatternFlowStackFrame
|
|
41
|
-
from rasa.dialogue_understanding.utils import record_commands_and_prompts
|
|
42
40
|
from rasa.engine.graph import ExecutionContext
|
|
43
41
|
from rasa.engine.recipes.default_recipe import DefaultV1Recipe
|
|
44
42
|
from rasa.engine.storage.resource import Resource
|
|
@@ -65,17 +63,11 @@ from rasa.shared.core.events import Event, UserUttered, BotUttered
|
|
|
65
63
|
from rasa.shared.core.generator import TrackerWithCachedStates
|
|
66
64
|
from rasa.shared.core.trackers import DialogueStateTracker, EventVerbosity
|
|
67
65
|
from rasa.shared.exceptions import RasaException, FileIOException
|
|
68
|
-
from rasa.shared.nlu.constants import (
|
|
69
|
-
PROMPTS,
|
|
70
|
-
KEY_USER_PROMPT,
|
|
71
|
-
KEY_LLM_RESPONSE_METADATA,
|
|
72
|
-
)
|
|
73
66
|
from rasa.shared.nlu.training_data.training_data import TrainingData
|
|
74
67
|
from rasa.shared.providers.embedding._langchain_embedding_client_adapter import (
|
|
75
68
|
_LangchainEmbeddingClientAdapter,
|
|
76
69
|
)
|
|
77
70
|
from rasa.shared.providers.llm.llm_client import LLMClient
|
|
78
|
-
from rasa.shared.providers.llm.llm_response import LLMResponse
|
|
79
71
|
from rasa.shared.utils.cli import print_error_and_exit
|
|
80
72
|
from rasa.shared.utils.health_check.embeddings_health_check_mixin import (
|
|
81
73
|
EmbeddingsHealthCheckMixin,
|
|
@@ -280,45 +272,6 @@ class EnterpriseSearchPolicy(LLMHealthCheckMixin, EmbeddingsHealthCheckMixin, Po
|
|
|
280
272
|
# Wrap the embedding client in the adapter
|
|
281
273
|
return _LangchainEmbeddingClientAdapter(client)
|
|
282
274
|
|
|
283
|
-
@classmethod
|
|
284
|
-
def _store_prompt_and_llm_response_in_tracker(
|
|
285
|
-
cls,
|
|
286
|
-
tracker: DialogueStateTracker,
|
|
287
|
-
prompt_name: str,
|
|
288
|
-
user_prompt: str,
|
|
289
|
-
llm_response: Optional[LLMResponse] = None,
|
|
290
|
-
) -> None:
|
|
291
|
-
"""Stores the prompt and LLMResponse metadata in the tracker.
|
|
292
|
-
|
|
293
|
-
Args:
|
|
294
|
-
tracker: The DialogueStateTracker containing the current conversation state.
|
|
295
|
-
prompt_name: A name identifying prompt usage.
|
|
296
|
-
user_prompt: The user prompt that was sent to the LLM.
|
|
297
|
-
llm_response: The response object from the LLM (None if no response).
|
|
298
|
-
"""
|
|
299
|
-
if not record_commands_and_prompts:
|
|
300
|
-
return
|
|
301
|
-
|
|
302
|
-
if not tracker.latest_message:
|
|
303
|
-
return
|
|
304
|
-
|
|
305
|
-
parse_data = tracker.latest_message.parse_data
|
|
306
|
-
if parse_data is not None and PROMPTS not in parse_data:
|
|
307
|
-
parse_data[PROMPTS] = {} # type: ignore[literal-required]
|
|
308
|
-
|
|
309
|
-
component_name = cls.__name__
|
|
310
|
-
existing_prompts = parse_data[PROMPTS].get(component_name, []) # type: ignore[literal-required]
|
|
311
|
-
|
|
312
|
-
prompt_data: Dict[Text, Any] = {
|
|
313
|
-
KEY_USER_PROMPT: user_prompt,
|
|
314
|
-
}
|
|
315
|
-
if llm_response is not None:
|
|
316
|
-
prompt_data[KEY_LLM_RESPONSE_METADATA] = llm_response.to_dict()
|
|
317
|
-
|
|
318
|
-
prompt_tuple = (prompt_name, prompt_data)
|
|
319
|
-
existing_prompts.append(prompt_tuple)
|
|
320
|
-
parse_data[PROMPTS][component_name] = existing_prompts # type: ignore[literal-required]
|
|
321
|
-
|
|
322
275
|
def train( # type: ignore[override]
|
|
323
276
|
self,
|
|
324
277
|
training_trackers: List[TrackerWithCachedStates],
|
|
@@ -545,26 +498,13 @@ class EnterpriseSearchPolicy(LLMHealthCheckMixin, EmbeddingsHealthCheckMixin, Po
|
|
|
545
498
|
|
|
546
499
|
if self.use_llm:
|
|
547
500
|
prompt = self._render_prompt(tracker, documents.results)
|
|
548
|
-
|
|
549
|
-
|
|
550
|
-
self._store_prompt_and_llm_response_in_tracker(
|
|
551
|
-
tracker=tracker,
|
|
552
|
-
prompt_name="enterprise_search_prompt",
|
|
553
|
-
user_prompt=prompt,
|
|
554
|
-
llm_response=llm_response,
|
|
555
|
-
)
|
|
501
|
+
llm_answer = await self._generate_llm_answer(llm, prompt)
|
|
556
502
|
|
|
557
|
-
if
|
|
558
|
-
|
|
559
|
-
response = None
|
|
560
|
-
else:
|
|
561
|
-
llm_answer = llm_response.choices[0]
|
|
503
|
+
if self.citation_enabled:
|
|
504
|
+
llm_answer = self.post_process_citations(llm_answer)
|
|
562
505
|
|
|
563
|
-
|
|
564
|
-
|
|
565
|
-
|
|
566
|
-
logger.debug(f"{logger_key}.llm_answer", llm_answer=llm_answer)
|
|
567
|
-
response = llm_answer
|
|
506
|
+
logger.debug(f"{logger_key}.llm_answer", llm_answer=llm_answer)
|
|
507
|
+
response = llm_answer
|
|
568
508
|
else:
|
|
569
509
|
response = documents.results[0].metadata.get("answer", None)
|
|
570
510
|
if not response:
|
|
@@ -576,6 +516,7 @@ class EnterpriseSearchPolicy(LLMHealthCheckMixin, EmbeddingsHealthCheckMixin, Po
|
|
|
576
516
|
"enterprise_search_policy.predict_action_probabilities.no_llm",
|
|
577
517
|
search_results=documents,
|
|
578
518
|
)
|
|
519
|
+
|
|
579
520
|
if response is None:
|
|
580
521
|
return self._create_prediction_internal_error(domain, tracker)
|
|
581
522
|
|
|
@@ -640,21 +581,10 @@ class EnterpriseSearchPolicy(LLMHealthCheckMixin, EmbeddingsHealthCheckMixin, Po
|
|
|
640
581
|
|
|
641
582
|
async def _generate_llm_answer(
|
|
642
583
|
self, llm: LLMClient, prompt: Text
|
|
643
|
-
) -> Optional[
|
|
644
|
-
"""Fetches an LLM completion for the provided prompt.
|
|
645
|
-
|
|
646
|
-
Args:
|
|
647
|
-
llm: The LLM client used to get the completion.
|
|
648
|
-
prompt: The prompt text to send to the model.
|
|
649
|
-
|
|
650
|
-
Returns:
|
|
651
|
-
An LLMResponse object, or None if the call fails.
|
|
652
|
-
"""
|
|
584
|
+
) -> Optional[Text]:
|
|
653
585
|
try:
|
|
654
|
-
|
|
655
|
-
|
|
656
|
-
return LLMResponse.from_dict(response_dict)
|
|
657
|
-
|
|
586
|
+
llm_response = await llm.acompletion(prompt)
|
|
587
|
+
llm_answer = llm_response.choices[0]
|
|
658
588
|
except Exception as e:
|
|
659
589
|
# unfortunately, langchain does not wrap LLM exceptions which means
|
|
660
590
|
# we have to catch all exceptions here
|
|
@@ -662,7 +592,9 @@ class EnterpriseSearchPolicy(LLMHealthCheckMixin, EmbeddingsHealthCheckMixin, Po
|
|
|
662
592
|
"enterprise_search_policy._generate_llm_answer.llm_error",
|
|
663
593
|
error=e,
|
|
664
594
|
)
|
|
665
|
-
|
|
595
|
+
llm_answer = None
|
|
596
|
+
|
|
597
|
+
return llm_answer
|
|
666
598
|
|
|
667
599
|
def _create_prediction(
|
|
668
600
|
self,
|
|
@@ -26,10 +26,8 @@ from rasa.shared.nlu.constants import (
|
|
|
26
26
|
PROMPTS,
|
|
27
27
|
KEY_USER_PROMPT,
|
|
28
28
|
KEY_SYSTEM_PROMPT,
|
|
29
|
-
KEY_LLM_RESPONSE_METADATA,
|
|
30
29
|
)
|
|
31
30
|
from rasa.shared.nlu.training_data.message import Message
|
|
32
|
-
from rasa.shared.providers.llm.llm_response import LLMResponse
|
|
33
31
|
from rasa.shared.utils.llm import DEFAULT_MAX_USER_INPUT_CHARACTERS
|
|
34
32
|
|
|
35
33
|
structlogger = structlog.get_logger()
|
|
@@ -401,7 +399,6 @@ class CommandGenerator:
|
|
|
401
399
|
prompt_name: str,
|
|
402
400
|
user_prompt: str,
|
|
403
401
|
system_prompt: Optional[str] = None,
|
|
404
|
-
llm_response: Optional[LLMResponse] = None,
|
|
405
402
|
) -> None:
|
|
406
403
|
"""Add prompt to the message parse data.
|
|
407
404
|
|
|
@@ -414,16 +411,14 @@ class CommandGenerator:
|
|
|
414
411
|
"fill_slots_prompt",
|
|
415
412
|
{
|
|
416
413
|
"user_prompt": <prompt content>",
|
|
417
|
-
"system_prompt": <prompt content>"
|
|
418
|
-
"llm_response_metadata": <metadata dict from LLMResponse>
|
|
414
|
+
"system_prompt": <prompt content>"
|
|
419
415
|
}
|
|
420
416
|
),
|
|
421
417
|
(
|
|
422
418
|
"handle_flows_prompt",
|
|
423
419
|
{
|
|
424
420
|
"user_prompt": <prompt content>",
|
|
425
|
-
"system_prompt": <prompt content>"
|
|
426
|
-
"llm_response_metadata": <metadata dict from LLMResponse>
|
|
421
|
+
"system_prompt": <prompt content>"
|
|
427
422
|
}
|
|
428
423
|
),
|
|
429
424
|
],
|
|
@@ -432,8 +427,7 @@ class CommandGenerator:
|
|
|
432
427
|
"prompt_template",
|
|
433
428
|
{
|
|
434
429
|
"user_prompt": <prompt content>",
|
|
435
|
-
"system_prompt": <prompt content>"
|
|
436
|
-
"llm_response_metadata": <metadata dict from LLMResponse>
|
|
430
|
+
"system_prompt": <prompt content>"
|
|
437
431
|
}
|
|
438
432
|
),
|
|
439
433
|
]
|
|
@@ -446,15 +440,13 @@ class CommandGenerator:
|
|
|
446
440
|
if not record_commands_and_prompts:
|
|
447
441
|
return
|
|
448
442
|
|
|
449
|
-
|
|
450
|
-
|
|
451
|
-
|
|
452
|
-
|
|
453
|
-
|
|
454
|
-
|
|
455
|
-
|
|
456
|
-
|
|
457
|
-
prompt_tuple = (prompt_name, prompt_data)
|
|
443
|
+
prompt_tuple = (
|
|
444
|
+
prompt_name,
|
|
445
|
+
{
|
|
446
|
+
KEY_USER_PROMPT: user_prompt,
|
|
447
|
+
**({KEY_SYSTEM_PROMPT: system_prompt} if system_prompt else {}),
|
|
448
|
+
},
|
|
449
|
+
)
|
|
458
450
|
|
|
459
451
|
if message.get(PROMPTS) is not None:
|
|
460
452
|
prompts = message.get(PROMPTS)
|
|
@@ -32,7 +32,6 @@ from rasa.shared.exceptions import ProviderClientAPIException
|
|
|
32
32
|
from rasa.shared.nlu.constants import FLOWS_IN_PROMPT
|
|
33
33
|
from rasa.shared.nlu.training_data.message import Message
|
|
34
34
|
from rasa.shared.nlu.training_data.training_data import TrainingData
|
|
35
|
-
from rasa.shared.providers.llm.llm_response import LLMResponse
|
|
36
35
|
from rasa.shared.utils.health_check.llm_health_check_mixin import LLMHealthCheckMixin
|
|
37
36
|
from rasa.shared.utils.llm import (
|
|
38
37
|
allowed_values_for_slot,
|
|
@@ -305,23 +304,22 @@ class LLMBasedCommandGenerator(
|
|
|
305
304
|
)
|
|
306
305
|
return filtered_flows
|
|
307
306
|
|
|
308
|
-
async def invoke_llm(self, prompt: Text) -> Optional[
|
|
307
|
+
async def invoke_llm(self, prompt: Text) -> Optional[Text]:
|
|
309
308
|
"""Use LLM to generate a response.
|
|
310
309
|
|
|
311
310
|
Args:
|
|
312
311
|
prompt: The prompt to send to the LLM.
|
|
313
312
|
|
|
314
313
|
Returns:
|
|
315
|
-
|
|
314
|
+
The generated text.
|
|
316
315
|
|
|
317
316
|
Raises:
|
|
318
|
-
ProviderClientAPIException
|
|
317
|
+
ProviderClientAPIException if an error during API call.
|
|
319
318
|
"""
|
|
320
319
|
llm = llm_factory(self.config.get(LLM_CONFIG_KEY), DEFAULT_LLM_CONFIG)
|
|
321
320
|
try:
|
|
322
|
-
|
|
323
|
-
|
|
324
|
-
return LLMResponse.from_dict(response_dict)
|
|
321
|
+
llm_response = await llm.acompletion(prompt)
|
|
322
|
+
return llm_response.choices[0]
|
|
325
323
|
except Exception as e:
|
|
326
324
|
# unfortunately, langchain does not wrap LLM exceptions which means
|
|
327
325
|
# we have to catch all exceptions here
|
|
@@ -10,7 +10,6 @@ from rasa.engine.recipes.default_recipe import DefaultV1Recipe
|
|
|
10
10
|
from rasa.engine.storage.resource import Resource
|
|
11
11
|
from rasa.engine.storage.storage import ModelStorage
|
|
12
12
|
from rasa.shared.exceptions import ProviderClientAPIException
|
|
13
|
-
from rasa.shared.providers.llm.llm_response import LLMResponse
|
|
14
13
|
from rasa.shared.utils.io import raise_deprecation_warning
|
|
15
14
|
|
|
16
15
|
structlogger = structlog.get_logger()
|
|
@@ -54,7 +53,7 @@ class LLMCommandGenerator(SingleStepLLMCommandGenerator):
|
|
|
54
53
|
**kwargs,
|
|
55
54
|
)
|
|
56
55
|
|
|
57
|
-
async def invoke_llm(self, prompt: Text) -> Optional[
|
|
56
|
+
async def invoke_llm(self, prompt: Text) -> Optional[Text]:
|
|
58
57
|
try:
|
|
59
58
|
return await super().invoke_llm(prompt)
|
|
60
59
|
except ProviderClientAPIException:
|
|
@@ -535,11 +535,7 @@ class MultiStepLLMCommandGenerator(LLMBasedCommandGenerator):
|
|
|
535
535
|
prompt=prompt,
|
|
536
536
|
)
|
|
537
537
|
|
|
538
|
-
|
|
539
|
-
actions = None
|
|
540
|
-
if llm_response and llm_response.choices:
|
|
541
|
-
actions = llm_response.choices[0]
|
|
542
|
-
|
|
538
|
+
actions = await self.invoke_llm(prompt)
|
|
543
539
|
structlogger.debug(
|
|
544
540
|
"multi_step_llm_command_generator"
|
|
545
541
|
".predict_commands_for_active_flow"
|
|
@@ -551,11 +547,10 @@ class MultiStepLLMCommandGenerator(LLMBasedCommandGenerator):
|
|
|
551
547
|
|
|
552
548
|
if commands:
|
|
553
549
|
self._add_prompt_to_message_parse_data(
|
|
554
|
-
message
|
|
555
|
-
|
|
556
|
-
|
|
557
|
-
|
|
558
|
-
llm_response=llm_response,
|
|
550
|
+
message,
|
|
551
|
+
MultiStepLLMCommandGenerator.__name__,
|
|
552
|
+
"fill_slots_for_active_flow_prompt",
|
|
553
|
+
prompt,
|
|
559
554
|
)
|
|
560
555
|
|
|
561
556
|
return commands
|
|
@@ -589,11 +584,7 @@ class MultiStepLLMCommandGenerator(LLMBasedCommandGenerator):
|
|
|
589
584
|
prompt=prompt,
|
|
590
585
|
)
|
|
591
586
|
|
|
592
|
-
|
|
593
|
-
actions = None
|
|
594
|
-
if llm_response and llm_response.choices:
|
|
595
|
-
actions = llm_response.choices[0]
|
|
596
|
-
|
|
587
|
+
actions = await self.invoke_llm(prompt)
|
|
597
588
|
structlogger.debug(
|
|
598
589
|
"multi_step_llm_command_generator"
|
|
599
590
|
".predict_commands_for_handling_flows"
|
|
@@ -607,11 +598,10 @@ class MultiStepLLMCommandGenerator(LLMBasedCommandGenerator):
|
|
|
607
598
|
|
|
608
599
|
if commands:
|
|
609
600
|
self._add_prompt_to_message_parse_data(
|
|
610
|
-
message
|
|
611
|
-
|
|
612
|
-
|
|
613
|
-
|
|
614
|
-
llm_response=llm_response,
|
|
601
|
+
message,
|
|
602
|
+
MultiStepLLMCommandGenerator.__name__,
|
|
603
|
+
"handle_flows_prompt",
|
|
604
|
+
prompt,
|
|
615
605
|
)
|
|
616
606
|
|
|
617
607
|
return commands
|
|
@@ -678,11 +668,7 @@ class MultiStepLLMCommandGenerator(LLMBasedCommandGenerator):
|
|
|
678
668
|
prompt=prompt,
|
|
679
669
|
)
|
|
680
670
|
|
|
681
|
-
|
|
682
|
-
actions = None
|
|
683
|
-
if llm_response and llm_response.choices:
|
|
684
|
-
actions = llm_response.choices[0]
|
|
685
|
-
|
|
671
|
+
actions = await self.invoke_llm(prompt)
|
|
686
672
|
structlogger.debug(
|
|
687
673
|
"multi_step_llm_command_generator"
|
|
688
674
|
".predict_commands_for_newly_started_flow"
|
|
@@ -709,11 +695,10 @@ class MultiStepLLMCommandGenerator(LLMBasedCommandGenerator):
|
|
|
709
695
|
|
|
710
696
|
if commands:
|
|
711
697
|
self._add_prompt_to_message_parse_data(
|
|
712
|
-
message
|
|
713
|
-
|
|
714
|
-
|
|
715
|
-
|
|
716
|
-
llm_response=llm_response,
|
|
698
|
+
message,
|
|
699
|
+
MultiStepLLMCommandGenerator.__name__,
|
|
700
|
+
"fill_slots_for_new_flow_prompt",
|
|
701
|
+
prompt,
|
|
717
702
|
)
|
|
718
703
|
|
|
719
704
|
return commands
|
|
@@ -264,15 +264,13 @@ class SingleStepLLMCommandGenerator(LLMBasedCommandGenerator):
|
|
|
264
264
|
prompt=flow_prompt,
|
|
265
265
|
)
|
|
266
266
|
|
|
267
|
-
|
|
267
|
+
action_list = await self.invoke_llm(flow_prompt)
|
|
268
268
|
# The check for 'None' maintains compatibility with older versions
|
|
269
269
|
# of LLMCommandGenerator. In previous implementations, 'invoke_llm'
|
|
270
270
|
# might return 'None' to indicate a failure to generate actions.
|
|
271
|
-
if
|
|
271
|
+
if action_list is None:
|
|
272
272
|
return [ErrorCommand()]
|
|
273
273
|
|
|
274
|
-
action_list = llm_response.choices[0]
|
|
275
|
-
|
|
276
274
|
log_llm(
|
|
277
275
|
logger=structlogger,
|
|
278
276
|
log_module="SingleStepLLMCommandGenerator",
|
|
@@ -287,11 +285,10 @@ class SingleStepLLMCommandGenerator(LLMBasedCommandGenerator):
|
|
|
287
285
|
message, SingleStepLLMCommandGenerator.__name__, commands
|
|
288
286
|
)
|
|
289
287
|
self._add_prompt_to_message_parse_data(
|
|
290
|
-
message
|
|
291
|
-
|
|
292
|
-
|
|
293
|
-
|
|
294
|
-
llm_response=llm_response,
|
|
288
|
+
message,
|
|
289
|
+
SingleStepLLMCommandGenerator.__name__,
|
|
290
|
+
"command_generator_prompt",
|
|
291
|
+
flow_prompt,
|
|
295
292
|
)
|
|
296
293
|
|
|
297
294
|
return commands
|
rasa/shared/core/events.py
CHANGED
|
@@ -2,10 +2,14 @@ import abc
|
|
|
2
2
|
import copy
|
|
3
3
|
import json
|
|
4
4
|
import logging
|
|
5
|
+
import structlog
|
|
5
6
|
import re
|
|
7
|
+
from abc import ABC
|
|
8
|
+
|
|
9
|
+
import jsonpickle
|
|
6
10
|
import time
|
|
7
11
|
import uuid
|
|
8
|
-
from
|
|
12
|
+
from dateutil import parser
|
|
9
13
|
from datetime import datetime
|
|
10
14
|
from typing import (
|
|
11
15
|
List,
|
|
@@ -20,14 +24,11 @@ from typing import (
|
|
|
20
24
|
Tuple,
|
|
21
25
|
TypeVar,
|
|
22
26
|
)
|
|
23
|
-
from typing import Union
|
|
24
|
-
|
|
25
|
-
import jsonpickle
|
|
26
|
-
import structlog
|
|
27
|
-
from dateutil import parser
|
|
28
27
|
|
|
29
28
|
import rasa.shared.utils.common
|
|
30
29
|
import rasa.shared.utils.io
|
|
30
|
+
from typing import Union
|
|
31
|
+
|
|
31
32
|
from rasa.shared.constants import DOCS_URL_TRAINING_DATA
|
|
32
33
|
from rasa.shared.core.constants import (
|
|
33
34
|
LOOP_NAME,
|
|
@@ -61,7 +62,7 @@ from rasa.shared.nlu.constants import (
|
|
|
61
62
|
ENTITY_ATTRIBUTE_END,
|
|
62
63
|
FULL_RETRIEVAL_INTENT_NAME_KEY,
|
|
63
64
|
)
|
|
64
|
-
|
|
65
|
+
|
|
65
66
|
|
|
66
67
|
if TYPE_CHECKING:
|
|
67
68
|
from typing_extensions import TypedDict
|
|
@@ -97,7 +98,6 @@ if TYPE_CHECKING:
|
|
|
97
98
|
ENTITIES: List[EntityPrediction],
|
|
98
99
|
"message_id": Optional[Text],
|
|
99
100
|
"metadata": Dict,
|
|
100
|
-
PROMPTS: Dict,
|
|
101
101
|
},
|
|
102
102
|
total=False,
|
|
103
103
|
)
|
rasa/shared/nlu/constants.py
CHANGED
|
@@ -6,7 +6,6 @@ PREDICTED_COMMANDS = "predicted_commands"
|
|
|
6
6
|
PROMPTS = "prompts"
|
|
7
7
|
KEY_USER_PROMPT = "user_prompt"
|
|
8
8
|
KEY_SYSTEM_PROMPT = "system_prompt"
|
|
9
|
-
KEY_LLM_RESPONSE_METADATA = "llm_response_metadata"
|
|
10
9
|
LLM_COMMANDS = "llm_commands" # needed for fine-tuning
|
|
11
10
|
LLM_PROMPT = "llm_prompt" # needed for fine-tuning
|
|
12
11
|
FLOWS_FROM_SEMANTIC_SEARCH = "flows_from_semantic_search"
|
|
@@ -1,5 +1,5 @@
|
|
|
1
1
|
from dataclasses import dataclass, field, asdict
|
|
2
|
-
from typing import Dict, List, Optional
|
|
2
|
+
from typing import Dict, List, Optional
|
|
3
3
|
|
|
4
4
|
|
|
5
5
|
@dataclass
|
|
@@ -16,18 +16,6 @@ class LLMUsage:
|
|
|
16
16
|
def __post_init__(self) -> None:
|
|
17
17
|
self.total_tokens = self.prompt_tokens + self.completion_tokens
|
|
18
18
|
|
|
19
|
-
@classmethod
|
|
20
|
-
def from_dict(cls, data: Dict[Text, Any]) -> "LLMUsage":
|
|
21
|
-
"""
|
|
22
|
-
Creates an LLMUsage object from a dictionary.
|
|
23
|
-
If any keys are missing, they will default to zero
|
|
24
|
-
or whatever default you prefer.
|
|
25
|
-
"""
|
|
26
|
-
return cls(
|
|
27
|
-
prompt_tokens=data.get("prompt_tokens"),
|
|
28
|
-
completion_tokens=data.get("completion_tokens"),
|
|
29
|
-
)
|
|
30
|
-
|
|
31
19
|
def to_dict(self) -> dict:
|
|
32
20
|
"""Converts the LLMUsage dataclass instance into a dictionary."""
|
|
33
21
|
return asdict(self)
|
|
@@ -54,23 +42,6 @@ class LLMResponse:
|
|
|
54
42
|
"""Optional dictionary for storing additional information related to the
|
|
55
43
|
completion that may not be covered by other fields."""
|
|
56
44
|
|
|
57
|
-
@classmethod
|
|
58
|
-
def from_dict(cls, data: Dict[Text, Any]) -> "LLMResponse":
|
|
59
|
-
"""
|
|
60
|
-
Creates an LLMResponse from a dictionary.
|
|
61
|
-
"""
|
|
62
|
-
usage_data = data.get("usage")
|
|
63
|
-
usage_obj = LLMUsage.from_dict(usage_data) if usage_data else None
|
|
64
|
-
|
|
65
|
-
return cls(
|
|
66
|
-
id=data["id"],
|
|
67
|
-
choices=data["choices"],
|
|
68
|
-
created=data["created"],
|
|
69
|
-
model=data.get("model"),
|
|
70
|
-
usage=usage_obj,
|
|
71
|
-
additional_info=data.get("additional_info"),
|
|
72
|
-
)
|
|
73
|
-
|
|
74
45
|
def to_dict(self) -> dict:
|
|
75
46
|
"""Converts the LLMResponse dataclass instance into a dictionary."""
|
|
76
47
|
result = asdict(self)
|
|
@@ -161,7 +161,7 @@ FLOW_CANCELLED = {
|
|
|
161
161
|
}
|
|
162
162
|
}
|
|
163
163
|
DIALOGUE_STACK_UPDATED = {
|
|
164
|
-
"properties": {"event": {"const": "stack"}, "update": {"type": "
|
|
164
|
+
"properties": {"event": {"const": "stack"}, "update": {"type": "array"}}
|
|
165
165
|
}
|
|
166
166
|
ROUTING_SESSION_ENDED = {"properties": {"event": {"const": "routing_session_ended"}}}
|
|
167
167
|
|
rasa/version.py
CHANGED
|
@@ -1,6 +1,6 @@
|
|
|
1
1
|
Metadata-Version: 2.1
|
|
2
2
|
Name: rasa-pro
|
|
3
|
-
Version: 3.11.3a1.
|
|
3
|
+
Version: 3.11.3a1.dev4
|
|
4
4
|
Summary: State-of-the-art open-core Conversational AI framework for Enterprises that natively leverages generative AI for effortless assistant development.
|
|
5
5
|
Home-page: https://rasa.com
|
|
6
6
|
Keywords: nlp,machine-learning,machine-learning-library,bot,bots,botkit,rasa conversational-agents,conversational-ai,chatbot,chatbot-framework,bot-framework
|
|
@@ -92,7 +92,7 @@ rasa/cli/x.py,sha256=C7dLtYXAkD-uj7hNj7Pz5YbOupp2yRcMjQbsEVqXUJ8,6825
|
|
|
92
92
|
rasa/constants.py,sha256=YrrBiJUc0cL5Xrsap6IioNbQ6dKaqDiueqHmMIYkpF0,1348
|
|
93
93
|
rasa/core/__init__.py,sha256=DYHLve7F1yQBVOZTA63efVIwLiULMuihOfdpzw1j0os,457
|
|
94
94
|
rasa/core/actions/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
|
|
95
|
-
rasa/core/actions/action.py,sha256=
|
|
95
|
+
rasa/core/actions/action.py,sha256=3tXb_DAKEzguq5zDuV1j04Fd5uYvwQckc4GR_EoDVYE,45286
|
|
96
96
|
rasa/core/actions/action_clean_stack.py,sha256=xUP-2ipPsPAnAiwP17c-ezmHPSrV4JSUZr-eSgPQwIs,2279
|
|
97
97
|
rasa/core/actions/action_exceptions.py,sha256=hghzXYN6VeHC-O_O7WiPesCNV86ZTkHgG90ZnQcbai8,724
|
|
98
98
|
rasa/core/actions/action_hangup.py,sha256=wpXunkGC71krAYZD3BbqzlHLZxNg1mIviwWz0j9Go-c,994
|
|
@@ -254,7 +254,7 @@ rasa/core/channels/rasa_chat.py,sha256=XGZ7QLyQHhB-m7EjetDNEBSjAa2mEFqU-e-FuS9z3
|
|
|
254
254
|
rasa/core/channels/rest.py,sha256=YDBnbdrlvaYL7Efy3cm2LbbSm7cBAFDhmcypojHXbog,7227
|
|
255
255
|
rasa/core/channels/rocketchat.py,sha256=HWOMxXLuwadYEYIMMP-z6RqAJzMGZDLklpgqLOipXF0,5998
|
|
256
256
|
rasa/core/channels/slack.py,sha256=3b8OZQ_gih5XBwhQ1q4BbBUC1SCAPaO9AoJEn2NaoQE,24405
|
|
257
|
-
rasa/core/channels/socketio.py,sha256=
|
|
257
|
+
rasa/core/channels/socketio.py,sha256=g8IfIFjcAVC1MZve2N8IKXVPpEQzpFaCQ4q3pECS0j4,13334
|
|
258
258
|
rasa/core/channels/telegram.py,sha256=5BrNECFM3qe9XjNpDb8Q9fbqCT5aKr5L6IH21W8sum8,10651
|
|
259
259
|
rasa/core/channels/twilio.py,sha256=GsdjfplZdBj0fRB60bSggPF1DXFZ_x18V_dlcDy5VFs,5943
|
|
260
260
|
rasa/core/channels/vier_cvg.py,sha256=PfvSluQqgJbP0JzZPFUvum3z7H55JPPeobcD-z5zCkw,13544
|
|
@@ -307,7 +307,7 @@ rasa/core/lock_store.py,sha256=fgdufUYXHEiTcD7NCCqgDAQRRtt7jrKafENHqFKOyi0,12504
|
|
|
307
307
|
rasa/core/migrate.py,sha256=XNeYdiRytBmBNubOQ8KZOT_wR1o9aOpHHfBU9PCB2eg,14626
|
|
308
308
|
rasa/core/nlg/__init__.py,sha256=0eQOZ0fB35b18oVhRFczcH30jJHgO8WXFhnbXGOxJek,240
|
|
309
309
|
rasa/core/nlg/callback.py,sha256=rFkDe7CSAETASRefpERUT6-DHWPs0UXhx8x4tZ1QE0M,5238
|
|
310
|
-
rasa/core/nlg/contextual_response_rephraser.py,sha256=
|
|
310
|
+
rasa/core/nlg/contextual_response_rephraser.py,sha256=RqYig6NFnaXcW5vkAUSb54XWoBkeVWm2WYDCsafthBY,11055
|
|
311
311
|
rasa/core/nlg/generator.py,sha256=YZ_rh--MeyzA6oXRqr_Ng-jcmPgbCmWMJJrquPmo__8,8436
|
|
312
312
|
rasa/core/nlg/interpolator.py,sha256=Dc-J2Vf6vPPUbwIgZQm3AJDGvMaFTsh9Citd4CYuA9U,5189
|
|
313
313
|
rasa/core/nlg/response.py,sha256=aHpy9BgjO7ub6v-sVPiQqutUA_7-UD1l3DJGVeQyp4k,5888
|
|
@@ -315,7 +315,7 @@ rasa/core/nlg/summarize.py,sha256=JO6VCfM_RnU0QX8Us42YkNOxC0ESKV1xcVH_sCW27ZU,21
|
|
|
315
315
|
rasa/core/persistor.py,sha256=0BZvrA1xObxVtADWLVapj4NOmvqIEen1LKoMOdtZ63s,20337
|
|
316
316
|
rasa/core/policies/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
|
|
317
317
|
rasa/core/policies/ensemble.py,sha256=AjNOEy2Iubbe-LdKaoFUXG8ch6yPrg3bTvcTcAPmeOs,12959
|
|
318
|
-
rasa/core/policies/enterprise_search_policy.py,sha256
|
|
318
|
+
rasa/core/policies/enterprise_search_policy.py,sha256=nG1vgZO5woxvXCZWayYXQzZkmxPemfsL0c62QkZcgcI,34126
|
|
319
319
|
rasa/core/policies/enterprise_search_prompt_template.jinja2,sha256=dCS_seyBGxMQoMsOjjvPp0dd31OSzZCJSZeev1FJK5Q,1187
|
|
320
320
|
rasa/core/policies/enterprise_search_prompt_with_citation_template.jinja2,sha256=vRQBs3q13UmvRRgqA8-DmRtM7tqZP2ngwMVJ4gy7lE0,3302
|
|
321
321
|
rasa/core/policies/flow_policy.py,sha256=wGb1l_59cGM9ZaexSIK5uXFi618739oNfLOxx2FC0_Y,7490
|
|
@@ -379,20 +379,20 @@ rasa/dialogue_understanding/commands/start_flow_command.py,sha256=a0Yk8xpBpFgC3H
|
|
|
379
379
|
rasa/dialogue_understanding/commands/user_silence_command.py,sha256=QtqsMU5mrbUp5dla2yGSpxXfIfi_h6Eu72mTDZQ_aTU,1724
|
|
380
380
|
rasa/dialogue_understanding/commands/utils.py,sha256=OiyLFGEsrfFSIJcvBY6lTIIXqDY9OxaikVGtcl4Kokk,1911
|
|
381
381
|
rasa/dialogue_understanding/generator/__init__.py,sha256=Ykeb2wQ1DuiUWAWO0hLIPSTK1_Ktiq9DZXF6D3ugN78,764
|
|
382
|
-
rasa/dialogue_understanding/generator/command_generator.py,sha256=
|
|
382
|
+
rasa/dialogue_understanding/generator/command_generator.py,sha256=Egdy-g46BGBw-iP-dKBM3sca-X-2SyBQL5NPyKTiHWw,15974
|
|
383
383
|
rasa/dialogue_understanding/generator/constants.py,sha256=9Nwjo2Qobioetr9SyyQxsGvEPSbKCVS5ZX1GGJtbA0E,716
|
|
384
384
|
rasa/dialogue_understanding/generator/flow_document_template.jinja2,sha256=f4H6vVd-_nX_RtutMh1xD3ZQE_J2OyuPHAtiltfiAPY,253
|
|
385
385
|
rasa/dialogue_understanding/generator/flow_retrieval.py,sha256=MkwUgQA9xRlAQUdWF2cBEX2tW2PQhBsq2Jsy2vmqWY4,17891
|
|
386
|
-
rasa/dialogue_understanding/generator/llm_based_command_generator.py,sha256=
|
|
387
|
-
rasa/dialogue_understanding/generator/llm_command_generator.py,sha256=
|
|
386
|
+
rasa/dialogue_understanding/generator/llm_based_command_generator.py,sha256=hzHUUMPmIZaLZkFRBgVK42l2nTUn04H4W8GpBBF1XIs,17554
|
|
387
|
+
rasa/dialogue_understanding/generator/llm_command_generator.py,sha256=yQ8aAMsTKGSARroJq0TfKVLe3ShYl8K8oklDk_KGies,2459
|
|
388
388
|
rasa/dialogue_understanding/generator/multi_step/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
|
|
389
389
|
rasa/dialogue_understanding/generator/multi_step/fill_slots_prompt.jinja2,sha256=Y0m673tAML3cFPaLM-urMXDsBYUUcXIw9YUpkAhGUuA,2933
|
|
390
390
|
rasa/dialogue_understanding/generator/multi_step/handle_flows_prompt.jinja2,sha256=8l93_QBKBYnqLICVdiTu5ejZDE8F36BU8-qwba0px44,1927
|
|
391
|
-
rasa/dialogue_understanding/generator/multi_step/multi_step_llm_command_generator.py,sha256=
|
|
391
|
+
rasa/dialogue_understanding/generator/multi_step/multi_step_llm_command_generator.py,sha256=zw1N0UyEOzYfgm3sFP8ptZ92fSLszwiACM4Vqwt8lIo,33527
|
|
392
392
|
rasa/dialogue_understanding/generator/nlu_command_adapter.py,sha256=pzd1q-syU_QuqTRcfd_GsXyOJaxfApqh_LsOKuEN46g,9332
|
|
393
393
|
rasa/dialogue_understanding/generator/single_step/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
|
|
394
394
|
rasa/dialogue_understanding/generator/single_step/command_prompt_template.jinja2,sha256=nMayu-heJYH1QmcL1cFmXb8SeiJzfdDR_9Oy5IRUXsM,3937
|
|
395
|
-
rasa/dialogue_understanding/generator/single_step/single_step_llm_command_generator.py,sha256=
|
|
395
|
+
rasa/dialogue_understanding/generator/single_step/single_step_llm_command_generator.py,sha256=hhFnxzc8lji7UZsFaVK-GTkyJ-34jaN-IhWcebDJhBI,18493
|
|
396
396
|
rasa/dialogue_understanding/patterns/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
|
|
397
397
|
rasa/dialogue_understanding/patterns/cancel.py,sha256=IQ4GVHNnNCqwKRLlAqBtLsgolcbPPnHsHdb3aOAFhEs,3868
|
|
398
398
|
rasa/dialogue_understanding/patterns/cannot_handle.py,sha256=pg0zJHl-hDBnl6y9IyxZzW57yuMdfD8xI8eiK6EVrG8,1406
|
|
@@ -591,7 +591,7 @@ rasa/shared/core/command_payload_reader.py,sha256=Vhiop9LWFawaEruRifBBrVmoEJ-fj1
|
|
|
591
591
|
rasa/shared/core/constants.py,sha256=WNFzABG-eiVREBL6aDZAmcNDiSmuSbvWuxXIMoX2Iv8,5704
|
|
592
592
|
rasa/shared/core/conversation.py,sha256=tw1fD2XB3gOdQjDI8hHo5TAAmE2JYNogQGWe3rE929w,1385
|
|
593
593
|
rasa/shared/core/domain.py,sha256=SsRLbLIEZ-coPTEwr-XxU_O-X-0mR466YLvXJJOAEpc,81247
|
|
594
|
-
rasa/shared/core/events.py,sha256=
|
|
594
|
+
rasa/shared/core/events.py,sha256=6yuOrZs8hZaR0FV1nC58l1u6qE4fegwrvL5nH1w7xY4,83719
|
|
595
595
|
rasa/shared/core/flows/__init__.py,sha256=HszhIvEARpmyxABFc1MKYvj8oy04WiZW1xmCdToakbs,181
|
|
596
596
|
rasa/shared/core/flows/flow.py,sha256=XzF9RUxLNyiGndnpvECV4pMczzc6g7UtgwokyXAoaTY,21496
|
|
597
597
|
rasa/shared/core/flows/flow_path.py,sha256=xstwahZBU5cfMY46mREA4NoOGlKLBRAqeP_mJ3UZqOI,2283
|
|
@@ -643,7 +643,7 @@ rasa/shared/importers/rasa.py,sha256=877EU8qPZSMBk5VAVAAUhfsh6vatRJrYOqWz1YGR6p8
|
|
|
643
643
|
rasa/shared/importers/remote_importer.py,sha256=fKLQskaCVPpD5cCMQ9sR71cZZlSIP-SSv3J3o2kra2w,7696
|
|
644
644
|
rasa/shared/importers/utils.py,sha256=Gi3BM5RUr-9nX_Ujf-g-tt19_bKPizmQIi6eAflDAmo,1289
|
|
645
645
|
rasa/shared/nlu/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
|
|
646
|
-
rasa/shared/nlu/constants.py,sha256=
|
|
646
|
+
rasa/shared/nlu/constants.py,sha256=KUYpaGAjwBwdUV8TZupei-xWAcb8RmaqhXNF8SMVwqU,1773
|
|
647
647
|
rasa/shared/nlu/interpreter.py,sha256=eCNJp61nQYTGVf4aJi8SCWb46jxZY6-C1M1LFxMyQTM,188
|
|
648
648
|
rasa/shared/nlu/training_data/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
|
|
649
649
|
rasa/shared/nlu/training_data/entities_parser.py,sha256=fC-VIso07so6E9b6KrQXOBC-ZUGCQGvnMvzVwiAO1GQ,6729
|
|
@@ -695,7 +695,7 @@ rasa/shared/providers/llm/azure_openai_llm_client.py,sha256=A6sg2bvulNczuzu1J0V7
|
|
|
695
695
|
rasa/shared/providers/llm/default_litellm_llm_client.py,sha256=1oiUIXr_U5ldyBQZ8cnrV3P7Qw9kMw1yvaVg6mjKkHU,3940
|
|
696
696
|
rasa/shared/providers/llm/litellm_router_llm_client.py,sha256=llko2DfOpiLMpHxnW26I1Hb1wTn7VmZ_yu43GRXhqwQ,6815
|
|
697
697
|
rasa/shared/providers/llm/llm_client.py,sha256=6-gMsEJqquhUPGXzNiq_ybM_McLWxAJ_QhbmWcLnb_Q,2358
|
|
698
|
-
rasa/shared/providers/llm/llm_response.py,sha256=
|
|
698
|
+
rasa/shared/providers/llm/llm_response.py,sha256=Ltmc8yk9cAqtK8QgwfZZywudM5ZQsT4y_AKAQ3q05hA,1490
|
|
699
699
|
rasa/shared/providers/llm/openai_llm_client.py,sha256=uDdcugBcO3sfxbduc00eqaZdrJP0VFX5dkBd2Dem47M,4844
|
|
700
700
|
rasa/shared/providers/llm/rasa_llm_client.py,sha256=SpgWn3uHHEezIcyvMfi468zRLw_W8VF6sIs-VIhElPc,3357
|
|
701
701
|
rasa/shared/providers/llm/self_hosted_llm_client.py,sha256=98FaF0-lYnytC46ulhrCAQjUKy9TI0U2QILml__UCzc,9170
|
|
@@ -717,7 +717,7 @@ rasa/shared/utils/pykwalify_extensions.py,sha256=4W8gde8C6QpGCY_t9IEmaZSgjMuie1x
|
|
|
717
717
|
rasa/shared/utils/schemas/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
|
|
718
718
|
rasa/shared/utils/schemas/config.yml,sha256=czxSADw9hOIZdhvFP8pVUQo810hs9_C8ZGfCPx17taM,27
|
|
719
719
|
rasa/shared/utils/schemas/domain.yml,sha256=b2k4ZYSV-QL3hGjDaRg8rfoqaTh4hbhDc_hBlMB8cuI,3409
|
|
720
|
-
rasa/shared/utils/schemas/events.py,sha256=
|
|
720
|
+
rasa/shared/utils/schemas/events.py,sha256=9sg_w4VeFMksyl-uscUht1TErf1gfKR56agyYSvl2c4,6912
|
|
721
721
|
rasa/shared/utils/schemas/model_config.yml,sha256=OravyVWalSwjiXYRarRzg0tiRnUFHe1q4-5Wj1TEeFk,811
|
|
722
722
|
rasa/shared/utils/schemas/stories.yml,sha256=DV3wAFnv1leD7kV-FH-GQihF1QX5oKHc8Eb24mxjizc,4737
|
|
723
723
|
rasa/shared/utils/yaml.py,sha256=HpG4whRyFMEJ39YEMd-X1HBJL6C2cAwvPlMGzqq74z0,37638
|
|
@@ -776,9 +776,9 @@ rasa/utils/train_utils.py,sha256=f1NWpp5y6al0dzoQyyio4hc4Nf73DRoRSHDzEK6-C4E,212
|
|
|
776
776
|
rasa/utils/url_tools.py,sha256=JQcHL2aLqLHu82k7_d9imUoETCm2bmlHaDpOJ-dKqBc,1218
|
|
777
777
|
rasa/utils/yaml.py,sha256=KjbZq5C94ZP7Jdsw8bYYF7HASI6K4-C_kdHfrnPLpSI,2000
|
|
778
778
|
rasa/validator.py,sha256=wl5IKiyDmk6FlDcGO2Js-H-gHPeqVqUJ6hB4fgN0xjI,66796
|
|
779
|
-
rasa/version.py,sha256=
|
|
780
|
-
rasa_pro-3.11.3a1.
|
|
781
|
-
rasa_pro-3.11.3a1.
|
|
782
|
-
rasa_pro-3.11.3a1.
|
|
783
|
-
rasa_pro-3.11.3a1.
|
|
784
|
-
rasa_pro-3.11.3a1.
|
|
779
|
+
rasa/version.py,sha256=GA1IzBOw2Is9Gh8r2ooq5d5oRHY4Cr5V2c_YGCN_jaI,124
|
|
780
|
+
rasa_pro-3.11.3a1.dev4.dist-info/METADATA,sha256=UoBAUZRnGQGLH0j91R1nxARNIGeoD7wlzdqqMuPmb_U,10798
|
|
781
|
+
rasa_pro-3.11.3a1.dev4.dist-info/NOTICE,sha256=7HlBoMHJY9CL2GlYSfTQ-PZsVmLmVkYmMiPlTjhuCqA,218
|
|
782
|
+
rasa_pro-3.11.3a1.dev4.dist-info/WHEEL,sha256=sP946D7jFCHeNz5Iq4fL4Lu-PrWrFsgfLXbbkciIZwg,88
|
|
783
|
+
rasa_pro-3.11.3a1.dev4.dist-info/entry_points.txt,sha256=ckJ2SfEyTPgBqj_I6vm_tqY9dZF_LAPJZA335Xp0Q9U,43
|
|
784
|
+
rasa_pro-3.11.3a1.dev4.dist-info/RECORD,,
|
|
File without changes
|
|
File without changes
|
|
File without changes
|