rasa-pro 3.12.0rc1__py3-none-any.whl → 3.12.0rc3__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of rasa-pro might be problematic. Click here for more details.

Files changed (70) hide show
  1. README.md +10 -13
  2. rasa/cli/dialogue_understanding_test.py +5 -8
  3. rasa/cli/llm_fine_tuning.py +47 -12
  4. rasa/cli/project_templates/calm/domain/list_contacts.yml +1 -2
  5. rasa/cli/project_templates/calm/domain/remove_contact.yml +1 -2
  6. rasa/cli/project_templates/calm/domain/shared.yml +1 -4
  7. rasa/core/actions/action_handle_digressions.py +35 -13
  8. rasa/core/channels/voice_stream/asr/asr_event.py +5 -0
  9. rasa/core/channels/voice_stream/audiocodes.py +19 -6
  10. rasa/core/channels/voice_stream/call_state.py +3 -9
  11. rasa/core/channels/voice_stream/genesys.py +40 -55
  12. rasa/core/channels/voice_stream/voice_channel.py +61 -39
  13. rasa/core/policies/flows/flow_executor.py +7 -2
  14. rasa/core/processor.py +0 -1
  15. rasa/core/tracker_store.py +123 -34
  16. rasa/dialogue_understanding/commands/can_not_handle_command.py +1 -1
  17. rasa/dialogue_understanding/commands/cancel_flow_command.py +1 -1
  18. rasa/dialogue_understanding/commands/change_flow_command.py +1 -1
  19. rasa/dialogue_understanding/commands/chit_chat_answer_command.py +1 -1
  20. rasa/dialogue_understanding/commands/clarify_command.py +1 -1
  21. rasa/dialogue_understanding/commands/command_syntax_manager.py +1 -1
  22. rasa/dialogue_understanding/commands/handle_digressions_command.py +1 -7
  23. rasa/dialogue_understanding/commands/human_handoff_command.py +1 -1
  24. rasa/dialogue_understanding/commands/knowledge_answer_command.py +1 -1
  25. rasa/dialogue_understanding/commands/repeat_bot_messages_command.py +1 -1
  26. rasa/dialogue_understanding/commands/set_slot_command.py +2 -1
  27. rasa/dialogue_understanding/commands/skip_question_command.py +1 -1
  28. rasa/dialogue_understanding/commands/start_flow_command.py +3 -1
  29. rasa/dialogue_understanding/commands/utils.py +2 -32
  30. rasa/dialogue_understanding/generator/command_parser.py +41 -0
  31. rasa/dialogue_understanding/generator/constants.py +7 -2
  32. rasa/dialogue_understanding/generator/llm_based_command_generator.py +9 -2
  33. rasa/dialogue_understanding/generator/multi_step/multi_step_llm_command_generator.py +1 -1
  34. rasa/dialogue_understanding/generator/prompt_templates/command_prompt_v2_claude_3_5_sonnet_20240620_template.jinja2 +29 -48
  35. rasa/dialogue_understanding/generator/prompt_templates/command_prompt_v2_fallback_other_models_template.jinja2 +57 -0
  36. rasa/dialogue_understanding/generator/prompt_templates/command_prompt_v2_gpt_4o_2024_11_20_template.jinja2 +23 -50
  37. rasa/dialogue_understanding/generator/single_step/compact_llm_command_generator.py +141 -27
  38. rasa/dialogue_understanding/generator/single_step/single_step_llm_command_generator.py +32 -18
  39. rasa/dialogue_understanding/processor/command_processor.py +43 -23
  40. rasa/dialogue_understanding/stack/utils.py +49 -6
  41. rasa/dialogue_understanding_test/du_test_case.py +30 -10
  42. rasa/dialogue_understanding_test/du_test_result.py +1 -1
  43. rasa/e2e_test/assertions.py +6 -8
  44. rasa/e2e_test/llm_judge_prompts/answer_relevance_prompt_template.jinja2 +5 -1
  45. rasa/e2e_test/llm_judge_prompts/groundedness_prompt_template.jinja2 +4 -0
  46. rasa/engine/language.py +67 -25
  47. rasa/llm_fine_tuning/conversations.py +3 -31
  48. rasa/llm_fine_tuning/llm_data_preparation_module.py +5 -3
  49. rasa/llm_fine_tuning/paraphrasing/rephrase_validator.py +18 -13
  50. rasa/llm_fine_tuning/paraphrasing_module.py +6 -2
  51. rasa/llm_fine_tuning/train_test_split_module.py +27 -27
  52. rasa/llm_fine_tuning/utils.py +7 -0
  53. rasa/shared/constants.py +4 -0
  54. rasa/shared/core/domain.py +2 -0
  55. rasa/shared/core/slots.py +6 -0
  56. rasa/shared/providers/_configs/azure_entra_id_config.py +8 -8
  57. rasa/shared/providers/llm/litellm_router_llm_client.py +1 -0
  58. rasa/shared/providers/llm/openai_llm_client.py +2 -2
  59. rasa/shared/providers/router/_base_litellm_router_client.py +38 -7
  60. rasa/shared/utils/llm.py +69 -10
  61. rasa/telemetry.py +13 -3
  62. rasa/tracing/instrumentation/attribute_extractors.py +2 -5
  63. rasa/validator.py +2 -2
  64. rasa/version.py +1 -1
  65. {rasa_pro-3.12.0rc1.dist-info → rasa_pro-3.12.0rc3.dist-info}/METADATA +12 -14
  66. {rasa_pro-3.12.0rc1.dist-info → rasa_pro-3.12.0rc3.dist-info}/RECORD +69 -68
  67. rasa/dialogue_understanding/generator/prompt_templates/command_prompt_v2_default.jinja2 +0 -68
  68. {rasa_pro-3.12.0rc1.dist-info → rasa_pro-3.12.0rc3.dist-info}/NOTICE +0 -0
  69. {rasa_pro-3.12.0rc1.dist-info → rasa_pro-3.12.0rc3.dist-info}/WHEEL +0 -0
  70. {rasa_pro-3.12.0rc1.dist-info → rasa_pro-3.12.0rc3.dist-info}/entry_points.txt +0 -0
@@ -1,6 +1,7 @@
1
1
  from __future__ import annotations
2
2
 
3
3
  import os
4
+ from copy import deepcopy
4
5
  from typing import Any, Dict, List
5
6
 
6
7
  import structlog
@@ -18,6 +19,7 @@ from rasa.shared.constants import (
18
19
  USE_CHAT_COMPLETIONS_ENDPOINT_CONFIG_KEY,
19
20
  )
20
21
  from rasa.shared.exceptions import ProviderClientValidationError
22
+ from rasa.shared.providers._configs.azure_entra_id_config import AzureEntraIDOAuthConfig
21
23
  from rasa.shared.providers._configs.litellm_router_client_config import (
22
24
  LiteLLMRouterClientConfig,
23
25
  )
@@ -61,12 +63,8 @@ class _BaseLiteLLMRouterClient:
61
63
  self._extra_parameters = kwargs or {}
62
64
  self.additional_client_setup()
63
65
  try:
64
- resolved_model_configurations = (
65
- self._resolve_env_vars_in_model_configurations()
66
- )
67
- self._router_client = Router(
68
- model_list=resolved_model_configurations, **router_settings
69
- )
66
+ # We instantiate a router client here to validate the configuration.
67
+ self._router_client = self._create_router_client()
70
68
  except Exception as e:
71
69
  event_info = "Cannot instantiate a router client."
72
70
  structlogger.error(
@@ -145,6 +143,14 @@ class _BaseLiteLLMRouterClient:
145
143
  @property
146
144
  def router_client(self) -> Router:
147
145
  """Returns the instantiated LiteLLM Router client."""
146
+ # In ca se oauth is used, due to a bug in LiteLLM,
147
+ # azure_ad_token_provider is not working as expected.
148
+ # To work around this, we create a new client every
149
+ # time we need to make a call which will
150
+ # ensure that the token is always fresh.
151
+ # GitHub issue for LiteLLm: https://github.com/BerriAI/litellm/issues/4417
152
+ if self._has_oauth():
153
+ return self._create_router_client()
148
154
  return self._router_client
149
155
 
150
156
  @property
@@ -175,11 +181,36 @@ class _BaseLiteLLMRouterClient:
175
181
  **self._litellm_extra_parameters,
176
182
  }
177
183
 
184
+ def _create_router_client(self) -> Router:
185
+ resolved_model_configurations = self._resolve_env_vars_in_model_configurations()
186
+ return Router(model_list=resolved_model_configurations, **self.router_settings)
187
+
188
+ def _has_oauth(self) -> bool:
189
+ for model_configuration in self.model_configurations:
190
+ if model_configuration.get("litellm_params", {}).get("oauth", None):
191
+ return True
192
+ return False
193
+
178
194
  def _resolve_env_vars_in_model_configurations(self) -> List:
179
195
  model_configuration_with_resolved_keys = []
180
196
  for model_configuration in self.model_configurations:
181
197
  resolved_model_configuration = resolve_environment_variables(
182
- model_configuration
198
+ deepcopy(model_configuration)
183
199
  )
200
+
201
+ if not isinstance(resolved_model_configuration, dict):
202
+ continue
203
+
204
+ lite_llm_params = resolved_model_configuration.get("litellm_params", {})
205
+ if lite_llm_params.get("oauth", None):
206
+ oauth_config_dict = lite_llm_params.pop("oauth")
207
+ oauth_config = AzureEntraIDOAuthConfig.from_dict(oauth_config_dict)
208
+ credential = oauth_config.create_azure_credential()
209
+ # token_provider = get_bearer_token_provider(
210
+ # credential, *oauth_config.scopes
211
+ # )
212
+ resolved_model_configuration["litellm_params"]["azure_ad_token"] = (
213
+ credential.get_token(*oauth_config.scopes).token
214
+ )
184
215
  model_configuration_with_resolved_keys.append(resolved_model_configuration)
185
216
  return model_configuration_with_resolved_keys
rasa/shared/utils/llm.py CHANGED
@@ -667,35 +667,94 @@ def get_prompt_template(
667
667
  """
668
668
  try:
669
669
  if jinja_file_path is not None:
670
- return rasa.shared.utils.io.read_file(jinja_file_path)
670
+ prompt_template = rasa.shared.utils.io.read_file(jinja_file_path)
671
+ structlogger.info(
672
+ "utils.llm.get_prompt_template.custom_prompt_template_read_successfull",
673
+ event_info=(
674
+ f"Custom prompt template read successfully from "
675
+ f"`{jinja_file_path}`."
676
+ ),
677
+ prompt_file_path=jinja_file_path,
678
+ )
679
+ return prompt_template
671
680
  except (FileIOException, FileNotFoundException):
672
681
  structlogger.warning(
673
- "Failed to read custom prompt template. Using default template instead.",
674
- jinja_file_path=jinja_file_path,
682
+ "utils.llm.get_prompt_template.failed_to_read_custom_prompt_template",
683
+ event_info=(
684
+ "Failed to read custom prompt template. Using default template instead."
685
+ ),
675
686
  )
676
687
  return default_prompt_template
677
688
 
678
689
 
679
690
  def get_default_prompt_template_based_on_model(
680
- config: Dict[str, Any],
691
+ llm_config: Dict[str, Any],
681
692
  model_prompt_mapping: Dict[str, Any],
693
+ default_prompt_path: str,
682
694
  fallback_prompt_path: str,
683
695
  ) -> Text:
684
696
  """Returns the default prompt template based on the model name.
685
697
 
686
698
  Args:
687
- config: The model config.
699
+ llm_config: The model config.
688
700
  model_prompt_mapping: The mapping of model name to prompt template.
689
- fallback_prompt_path: The fallback prompt path.
701
+ default_prompt_path: The default prompt path of the component.
702
+ fallback_prompt_path: The fallback prompt path for all other models
703
+ that do not have a mapping in the model_prompt_mapping.
690
704
 
691
705
  Returns:
692
706
  The default prompt template.
693
707
  """
694
- provider = config.get(PROVIDER_CONFIG_KEY)
695
- model = config.get(MODEL_CONFIG_KEY, "")
708
+ _llm_config = deepcopy(llm_config)
709
+ if MODELS_CONFIG_KEY in _llm_config:
710
+ _llm_config = _llm_config[MODELS_CONFIG_KEY][0]
711
+ provider = _llm_config.get(PROVIDER_CONFIG_KEY)
712
+ model = _llm_config.get(MODEL_CONFIG_KEY)
713
+ if not model:
714
+ # If the model is not defined, we default to the default prompt template.
715
+ structlogger.info(
716
+ "utils.llm.get_default_prompt_template_based_on_model.using_default_prompt_template",
717
+ event_info=(
718
+ f"Model not defined in the config. Default prompt template read from"
719
+ f" - `{default_prompt_path}`."
720
+ ),
721
+ default_prompt_path=default_prompt_path,
722
+ )
723
+ return importlib.resources.read_text(
724
+ DEFAULT_PROMPT_PACKAGE_NAME, default_prompt_path
725
+ )
726
+
696
727
  model_name = model if provider and provider in model else f"{provider}/{model}"
697
- prompt_file_path = model_prompt_mapping.get(model_name, fallback_prompt_path)
698
- return importlib.resources.read_text(DEFAULT_PROMPT_PACKAGE_NAME, prompt_file_path)
728
+ if prompt_file_path := model_prompt_mapping.get(model_name):
729
+ # If the model is found in the mapping, we use the model-specific prompt
730
+ # template.
731
+ structlogger.info(
732
+ "utils.llm.get_default_prompt_template_based_on_model.using_model_specific_prompt_template",
733
+ event_info=(
734
+ f"Using model-specific default prompt template. Default prompt "
735
+ f"template read from - `{prompt_file_path}`."
736
+ ),
737
+ default_prompt_path=prompt_file_path,
738
+ model_name=model_name,
739
+ )
740
+ return importlib.resources.read_text(
741
+ DEFAULT_PROMPT_PACKAGE_NAME, prompt_file_path
742
+ )
743
+
744
+ # If the model is not found in the mapping, we default to the fallback prompt
745
+ # template.
746
+ structlogger.info(
747
+ "utils.llm.get_default_prompt_template_based_on_model.using_fallback_prompt_template",
748
+ event_info=(
749
+ f"Model not found in the model prompt mapping. Fallback prompt template "
750
+ f"read from - `{fallback_prompt_path}`."
751
+ ),
752
+ fallback_prompt_path=fallback_prompt_path,
753
+ model_name=model_name,
754
+ )
755
+ return importlib.resources.read_text(
756
+ DEFAULT_PROMPT_PACKAGE_NAME, fallback_prompt_path
757
+ )
699
758
 
700
759
 
701
760
  def allowed_values_for_slot(slot: Slot) -> Union[str, None]:
rasa/telemetry.py CHANGED
@@ -15,7 +15,7 @@ from collections import defaultdict
15
15
  from datetime import datetime
16
16
  from functools import wraps
17
17
  from pathlib import Path
18
- from typing import Any, Callable, Dict, List, Optional, Text, Tuple
18
+ from typing import Any, Callable, Dict, List, Optional, Text, Tuple, Type, cast
19
19
 
20
20
  import importlib_resources
21
21
  import requests
@@ -1126,12 +1126,12 @@ def _get_llm_command_generator_config(config: Dict[str, Any]) -> Optional[Dict]:
1126
1126
  """
1127
1127
  from rasa.dialogue_understanding.generator import (
1128
1128
  CompactLLMCommandGenerator,
1129
+ LLMBasedCommandGenerator,
1129
1130
  LLMCommandGenerator,
1130
1131
  MultiStepLLMCommandGenerator,
1131
1132
  SingleStepLLMCommandGenerator,
1132
1133
  )
1133
1134
  from rasa.dialogue_understanding.generator.constants import (
1134
- DEFAULT_LLM_CONFIG,
1135
1135
  FLOW_RETRIEVAL_KEY,
1136
1136
  LLM_CONFIG_KEY,
1137
1137
  )
@@ -1162,6 +1162,12 @@ def _get_llm_command_generator_config(config: Dict[str, Any]) -> Optional[Dict]:
1162
1162
 
1163
1163
  def extract_llm_command_generator_llm_client_settings(component: Dict) -> Dict:
1164
1164
  """Extracts settings related to LLM command generator."""
1165
+ component_class_lookup = {
1166
+ LLMCommandGenerator.__name__: LLMCommandGenerator,
1167
+ SingleStepLLMCommandGenerator.__name__: SingleStepLLMCommandGenerator,
1168
+ MultiStepLLMCommandGenerator.__name__: MultiStepLLMCommandGenerator,
1169
+ CompactLLMCommandGenerator.__name__: CompactLLMCommandGenerator,
1170
+ }
1165
1171
  llm_config = component.get(LLM_CONFIG_KEY, {})
1166
1172
  # Config at this stage is not yet resolved, so read from `model_group`
1167
1173
  llm_model_group_id = llm_config.get(MODEL_GROUP_CONFIG_KEY)
@@ -1169,7 +1175,11 @@ def _get_llm_command_generator_config(config: Dict[str, Any]) -> Optional[Dict]:
1169
1175
  MODEL_NAME_CONFIG_KEY
1170
1176
  )
1171
1177
  if llm_model_group_id is None and llm_model_name is None:
1172
- llm_model_name = DEFAULT_LLM_CONFIG[MODEL_CONFIG_KEY]
1178
+ component_clz = cast(
1179
+ Type[LLMBasedCommandGenerator],
1180
+ component_class_lookup[component["name"]],
1181
+ )
1182
+ llm_model_name = component_clz.get_default_llm_config()[MODEL_CONFIG_KEY]
1173
1183
 
1174
1184
  custom_prompt_used = (
1175
1185
  PROMPT_CONFIG_KEY in component or PROMPT_TEMPLATE_CONFIG_KEY in component
@@ -58,9 +58,7 @@ from rasa.shared.core.trackers import DialogueStateTracker
58
58
  from rasa.shared.core.training_data.structures import StoryGraph
59
59
  from rasa.shared.importers.importer import TrainingDataImporter
60
60
  from rasa.shared.nlu.constants import INTENT_NAME_KEY, SET_SLOT_COMMAND
61
- from rasa.shared.utils.llm import (
62
- combine_custom_and_default_config,
63
- )
61
+ from rasa.shared.utils.llm import combine_custom_and_default_config
64
62
  from rasa.tracing.constants import (
65
63
  PROMPT_TOKEN_LENGTH_ATTRIBUTE_NAME,
66
64
  REQUEST_BODY_SIZE_IN_BYTES_ATTRIBUTE_NAME,
@@ -375,14 +373,13 @@ def extract_attrs_for_llm_based_command_generator(
375
373
  self: "LLMBasedCommandGenerator",
376
374
  prompt: str,
377
375
  ) -> Dict[str, Any]:
378
- from rasa.dialogue_understanding.generator.constants import DEFAULT_LLM_CONFIG
379
376
  from rasa.dialogue_understanding.generator.flow_retrieval import (
380
377
  DEFAULT_EMBEDDINGS_CONFIG,
381
378
  )
382
379
 
383
380
  attributes = extract_llm_config(
384
381
  self,
385
- default_llm_config=DEFAULT_LLM_CONFIG,
382
+ default_llm_config=self.get_default_llm_config(),
386
383
  default_embeddings_config=DEFAULT_EMBEDDINGS_CONFIG,
387
384
  )
388
385
 
rasa/validator.py CHANGED
@@ -1429,7 +1429,7 @@ class Validator:
1429
1429
  event_info=(
1430
1430
  f"{count} response{' is' if count == 1 else 's are'} "
1431
1431
  f"missing translations for some languages. "
1432
- "Run 'rasa data validate language' for details."
1432
+ "Run 'rasa data validate translations' for details."
1433
1433
  ),
1434
1434
  )
1435
1435
  if flow_warnings:
@@ -1440,7 +1440,7 @@ class Validator:
1440
1440
  event_info=(
1441
1441
  f"{count} flow{' is' if count == 1 else 's are'} "
1442
1442
  f"missing translations for some languages. "
1443
- "Run 'rasa data validate language' for details."
1443
+ "Run 'rasa data validate translations' for details."
1444
1444
  ),
1445
1445
  )
1446
1446
  else:
rasa/version.py CHANGED
@@ -1,3 +1,3 @@
1
1
  # this file will automatically be changed,
2
2
  # do not add anything but the version number here!
3
- __version__ = "3.12.0rc1"
3
+ __version__ = "3.12.0rc3"
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.1
2
2
  Name: rasa-pro
3
- Version: 3.12.0rc1
3
+ Version: 3.12.0rc3
4
4
  Summary: State-of-the-art open-core Conversational AI framework for Enterprises that natively leverages generative AI for effortless assistant development.
5
5
  Home-page: https://rasa.com
6
6
  Keywords: nlp,machine-learning,machine-learning-library,bot,bots,botkit,rasa conversational-agents,conversational-ai,chatbot,chatbot-framework,bot-framework
@@ -64,6 +64,7 @@ Requires-Dist: jsonschema (>=4.22)
64
64
  Requires-Dist: keras (==2.14.0)
65
65
  Requires-Dist: langchain (>=0.2.0,<0.3.0)
66
66
  Requires-Dist: langchain-community (>=0.2.0,<0.3.0)
67
+ Requires-Dist: langcodes (>=3.5.0,<4.0.0)
67
68
  Requires-Dist: litellm (>=1.52.6,<1.53.0)
68
69
  Requires-Dist: matplotlib (>=3.7,<3.8)
69
70
  Requires-Dist: mattermostwrapper (>=2.2,<2.3)
@@ -157,15 +158,14 @@ Description-Content-Type: text/markdown
157
158
 
158
159
  <div align="center">
159
160
 
160
- [![Build Status](https://github.com/RasaHQ/rasa-private/workflows/Continuous%20Integration/badge.svg)](https://github.com/RasaHQ/rasa-private/actions)
161
161
  [![Quality Gate Status](https://sonarcloud.io/api/project_badges/measure?project=RasaHQ_rasa&metric=alert_status)](https://sonarcloud.io/summary/new_code?id=RasaHQ_rasa)
162
- [![Documentation Status](https://img.shields.io/badge/docs-stable-brightgreen.svg)](https://rasa.com/docs/rasa-pro/)
162
+ [![Documentation Status](https://img.shields.io/badge/docs-stable-brightgreen.svg)](https://rasa.com/docs/docs/pro/intro)
163
+ ![Python version support](https://img.shields.io/pypi/pyversions/rasa-pro)
163
164
 
164
165
  </div>
165
166
 
166
167
  <hr />
167
168
 
168
-
169
169
  Rasa Pro is a framework for building scalable, dynamic conversational AI assistants that integrate large language models (LLMs) to enable more contextually aware and agentic interactions. Whether you’re new to conversational AI or an experienced developer, Rasa Pro offers enhanced flexibility, control, and performance for mission-critical applications.
170
170
 
171
171
  Building on the foundation of Rasa Open Source, Rasa Pro adds advanced features like CALM (Conversational AI with Language Models) and Dialogue Understanding (DU), which enable developers to shift from traditional intent-driven systems to LLM-based agents. This allows for more robust, responsive interactions that adhere strictly to business logic, while reducing risks like prompt injection and minimizing hallucinations.
@@ -178,20 +178,18 @@ Building on the foundation of Rasa Open Source, Rasa Pro adds advanced features
178
178
  - **Robustness and Control:** Maintain strict adherence to business logic, preventing unwanted behaviors like prompt injection and hallucinations, leading to more reliable responses and secure interactions.
179
179
  - **Built-in Security:** Safeguard sensitive data, control access, and ensure secure deployment, essential for production environments that demand high levels of security and compliance.
180
180
 
181
+ A [free developer license](https://rasa.com/docs/pro/intro/#who-rasa-pro-is-for) is available so you can explore and get to know Rasa Pro. It allows you to take your assistant live in production a limited capacity. A paid license is required for larger-scale production use, but all code is visible and can be customized as needed.
181
182
 
183
+ To get started right now, you can
182
184
 
183
- A [free developer license](https://rasa.com/docs/rasa-pro/developer-edition/) is available so you can explore and get to know Rasa Pro. For small production deployments, the Extended Developer License allows you to take your assistant live in a limited capacity. A paid license is required for larger-scale production use, but all code is visible and can be customized as needed.
184
-
185
- To get started right now, you can
186
-
187
- `pip install rasa-pro`
185
+ `pip install rasa-pro`
188
186
 
189
- Check out our
187
+ Check out our
190
188
 
191
- - [Rasa-pro Quickstart](https://rasa.com/docs/rasa-pro/installation/quickstart/),
192
- - [Conversational AI with Language Models (CALM) conceptual rundown](https://rasa.com/docs/rasa-pro/calm/),
193
- - [Rasa Pro / CALM tutorial](https://rasa.com/docs/rasa-pro/tutorial), and
194
- - [Rasa pro changelog](https://rasa.com/docs/rasa/rasa-pro-changelog/)
189
+ - [Rasa-pro Quickstart](https://rasa.com/docs/learn/quickstart/pro),
190
+ - [Conversational AI with Language Models (CALM) conceptual rundown](https://rasa.com/docs/learn/concepts/calm),
191
+ - [Rasa Pro / CALM tutorial](https://rasa.com/docs/pro/tutorial), and
192
+ - [Rasa pro changelog](https://rasa.com/docs/reference/changelogs/rasa-pro-changelog)
195
193
 
196
194
  for more. Also feel free to reach out to us on the [Rasa forum](https://forum.rasa.com/).
197
195