rasa-pro 3.11.0a4.dev3__py3-none-any.whl → 3.11.0rc2__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of rasa-pro might be problematic. Click here for more details.

Files changed (184) hide show
  1. rasa/__main__.py +22 -12
  2. rasa/api.py +1 -1
  3. rasa/cli/arguments/default_arguments.py +1 -2
  4. rasa/cli/arguments/shell.py +5 -1
  5. rasa/cli/e2e_test.py +1 -1
  6. rasa/cli/evaluate.py +8 -8
  7. rasa/cli/inspect.py +6 -4
  8. rasa/cli/llm_fine_tuning.py +1 -1
  9. rasa/cli/project_templates/calm/config.yml +5 -7
  10. rasa/cli/project_templates/calm/endpoints.yml +8 -0
  11. rasa/cli/project_templates/tutorial/config.yml +8 -5
  12. rasa/cli/project_templates/tutorial/data/flows.yml +1 -1
  13. rasa/cli/project_templates/tutorial/data/patterns.yml +5 -0
  14. rasa/cli/project_templates/tutorial/domain.yml +14 -0
  15. rasa/cli/project_templates/tutorial/endpoints.yml +7 -7
  16. rasa/cli/run.py +1 -1
  17. rasa/cli/scaffold.py +4 -2
  18. rasa/cli/studio/studio.py +18 -8
  19. rasa/cli/utils.py +5 -0
  20. rasa/cli/x.py +8 -8
  21. rasa/constants.py +1 -1
  22. rasa/core/actions/action_repeat_bot_messages.py +17 -0
  23. rasa/core/channels/channel.py +20 -0
  24. rasa/core/channels/inspector/dist/assets/{arc-6852c607.js → arc-bc141fb2.js} +1 -1
  25. rasa/core/channels/inspector/dist/assets/{c4Diagram-d0fbc5ce-acc952b2.js → c4Diagram-d0fbc5ce-be2db283.js} +1 -1
  26. rasa/core/channels/inspector/dist/assets/{classDiagram-936ed81e-848a7597.js → classDiagram-936ed81e-55366915.js} +1 -1
  27. rasa/core/channels/inspector/dist/assets/{classDiagram-v2-c3cb15f1-a73d3e68.js → classDiagram-v2-c3cb15f1-bb529518.js} +1 -1
  28. rasa/core/channels/inspector/dist/assets/{createText-62fc7601-e5ee049d.js → createText-62fc7601-b0ec81d6.js} +1 -1
  29. rasa/core/channels/inspector/dist/assets/{edges-f2ad444c-771e517e.js → edges-f2ad444c-6166330c.js} +1 -1
  30. rasa/core/channels/inspector/dist/assets/{erDiagram-9d236eb7-aa347178.js → erDiagram-9d236eb7-5ccc6a8e.js} +1 -1
  31. rasa/core/channels/inspector/dist/assets/{flowDb-1972c806-651fc57d.js → flowDb-1972c806-fca3bfe4.js} +1 -1
  32. rasa/core/channels/inspector/dist/assets/{flowDiagram-7ea5b25a-ca67804f.js → flowDiagram-7ea5b25a-4739080f.js} +1 -1
  33. rasa/core/channels/inspector/dist/assets/flowDiagram-v2-855bc5b3-736177bf.js +1 -0
  34. rasa/core/channels/inspector/dist/assets/{flowchart-elk-definition-abe16c3d-2dbc568d.js → flowchart-elk-definition-abe16c3d-7c1b0e0f.js} +1 -1
  35. rasa/core/channels/inspector/dist/assets/{ganttDiagram-9b5ea136-25a65bd8.js → ganttDiagram-9b5ea136-772fd050.js} +1 -1
  36. rasa/core/channels/inspector/dist/assets/{gitGraphDiagram-99d0ae7c-fdc7378d.js → gitGraphDiagram-99d0ae7c-8eae1dc9.js} +1 -1
  37. rasa/core/channels/inspector/dist/assets/{index-2c4b9a3b-6f1fd606.js → index-2c4b9a3b-f55afcdf.js} +1 -1
  38. rasa/core/channels/inspector/dist/assets/{index-efdd30c1.js → index-e7cef9de.js} +68 -68
  39. rasa/core/channels/inspector/dist/assets/{infoDiagram-736b4530-cb1a041a.js → infoDiagram-736b4530-124d4a14.js} +1 -1
  40. rasa/core/channels/inspector/dist/assets/{journeyDiagram-df861f2b-14609879.js → journeyDiagram-df861f2b-7c4fae44.js} +1 -1
  41. rasa/core/channels/inspector/dist/assets/{layout-2490f52b.js → layout-b9885fb6.js} +1 -1
  42. rasa/core/channels/inspector/dist/assets/{line-40186f1f.js → line-7c59abb6.js} +1 -1
  43. rasa/core/channels/inspector/dist/assets/{linear-08814e93.js → linear-4776f780.js} +1 -1
  44. rasa/core/channels/inspector/dist/assets/{mindmap-definition-beec6740-1a534584.js → mindmap-definition-beec6740-2332c46c.js} +1 -1
  45. rasa/core/channels/inspector/dist/assets/{pieDiagram-dbbf0591-72397b61.js → pieDiagram-dbbf0591-8fb39303.js} +1 -1
  46. rasa/core/channels/inspector/dist/assets/{quadrantDiagram-4d7f4fd6-3bb0b6a3.js → quadrantDiagram-4d7f4fd6-3c7180a2.js} +1 -1
  47. rasa/core/channels/inspector/dist/assets/{requirementDiagram-6fc4c22a-57334f61.js → requirementDiagram-6fc4c22a-e910bcb8.js} +1 -1
  48. rasa/core/channels/inspector/dist/assets/{sankeyDiagram-8f13d901-111e1297.js → sankeyDiagram-8f13d901-ead16c89.js} +1 -1
  49. rasa/core/channels/inspector/dist/assets/{sequenceDiagram-b655622a-10bcfe62.js → sequenceDiagram-b655622a-29a02a19.js} +1 -1
  50. rasa/core/channels/inspector/dist/assets/{stateDiagram-59f0c015-acaf7513.js → stateDiagram-59f0c015-042b3137.js} +1 -1
  51. rasa/core/channels/inspector/dist/assets/{stateDiagram-v2-2b26beab-3ec2a235.js → stateDiagram-v2-2b26beab-2178c0f3.js} +1 -1
  52. rasa/core/channels/inspector/dist/assets/{styles-080da4f6-62730289.js → styles-080da4f6-23ffa4fc.js} +1 -1
  53. rasa/core/channels/inspector/dist/assets/{styles-3dcbcfbf-5284ee76.js → styles-3dcbcfbf-94f59763.js} +1 -1
  54. rasa/core/channels/inspector/dist/assets/{styles-9c745c82-642435e3.js → styles-9c745c82-78a6bebc.js} +1 -1
  55. rasa/core/channels/inspector/dist/assets/{svgDrawCommon-4835440b-b250a350.js → svgDrawCommon-4835440b-eae2a6f6.js} +1 -1
  56. rasa/core/channels/inspector/dist/assets/{timeline-definition-5b62e21b-c2b147ed.js → timeline-definition-5b62e21b-5c968d92.js} +1 -1
  57. rasa/core/channels/inspector/dist/assets/{xychartDiagram-2b33534f-f92cfea9.js → xychartDiagram-2b33534f-fd3db0d5.js} +1 -1
  58. rasa/core/channels/inspector/dist/index.html +1 -1
  59. rasa/core/channels/inspector/src/App.tsx +1 -1
  60. rasa/core/channels/inspector/src/helpers/audiostream.ts +77 -16
  61. rasa/core/channels/socketio.py +2 -1
  62. rasa/core/channels/telegram.py +1 -1
  63. rasa/core/channels/twilio.py +1 -1
  64. rasa/core/channels/voice_ready/audiocodes.py +12 -0
  65. rasa/core/channels/voice_ready/jambonz.py +15 -4
  66. rasa/core/channels/voice_ready/twilio_voice.py +6 -21
  67. rasa/core/channels/voice_stream/asr/asr_event.py +5 -0
  68. rasa/core/channels/voice_stream/asr/azure.py +122 -0
  69. rasa/core/channels/voice_stream/asr/deepgram.py +16 -6
  70. rasa/core/channels/voice_stream/audio_bytes.py +1 -0
  71. rasa/core/channels/voice_stream/browser_audio.py +31 -8
  72. rasa/core/channels/voice_stream/call_state.py +23 -0
  73. rasa/core/channels/voice_stream/tts/azure.py +6 -2
  74. rasa/core/channels/voice_stream/tts/cartesia.py +10 -6
  75. rasa/core/channels/voice_stream/tts/tts_engine.py +1 -0
  76. rasa/core/channels/voice_stream/twilio_media_streams.py +27 -18
  77. rasa/core/channels/voice_stream/util.py +4 -4
  78. rasa/core/channels/voice_stream/voice_channel.py +189 -39
  79. rasa/core/featurizers/single_state_featurizer.py +22 -1
  80. rasa/core/featurizers/tracker_featurizers.py +115 -18
  81. rasa/core/nlg/contextual_response_rephraser.py +32 -30
  82. rasa/core/persistor.py +86 -39
  83. rasa/core/policies/enterprise_search_policy.py +119 -60
  84. rasa/core/policies/flows/flow_executor.py +7 -4
  85. rasa/core/policies/intentless_policy.py +78 -22
  86. rasa/core/policies/ted_policy.py +58 -33
  87. rasa/core/policies/unexpected_intent_policy.py +15 -7
  88. rasa/core/processor.py +25 -0
  89. rasa/core/training/interactive.py +34 -35
  90. rasa/core/utils.py +8 -3
  91. rasa/dialogue_understanding/coexistence/llm_based_router.py +39 -12
  92. rasa/dialogue_understanding/commands/change_flow_command.py +6 -0
  93. rasa/dialogue_understanding/commands/user_silence_command.py +59 -0
  94. rasa/dialogue_understanding/commands/utils.py +5 -0
  95. rasa/dialogue_understanding/generator/constants.py +2 -0
  96. rasa/dialogue_understanding/generator/flow_retrieval.py +49 -4
  97. rasa/dialogue_understanding/generator/llm_based_command_generator.py +37 -23
  98. rasa/dialogue_understanding/generator/multi_step/multi_step_llm_command_generator.py +57 -10
  99. rasa/dialogue_understanding/generator/nlu_command_adapter.py +19 -1
  100. rasa/dialogue_understanding/generator/single_step/single_step_llm_command_generator.py +71 -11
  101. rasa/dialogue_understanding/patterns/default_flows_for_patterns.yml +39 -0
  102. rasa/dialogue_understanding/patterns/user_silence.py +37 -0
  103. rasa/dialogue_understanding/processor/command_processor.py +21 -1
  104. rasa/e2e_test/e2e_test_case.py +85 -6
  105. rasa/e2e_test/e2e_test_runner.py +4 -2
  106. rasa/e2e_test/utils/io.py +1 -1
  107. rasa/engine/validation.py +316 -10
  108. rasa/model_manager/config.py +15 -3
  109. rasa/model_manager/model_api.py +15 -7
  110. rasa/model_manager/runner_service.py +8 -6
  111. rasa/model_manager/socket_bridge.py +6 -3
  112. rasa/model_manager/trainer_service.py +7 -5
  113. rasa/model_manager/utils.py +28 -7
  114. rasa/model_service.py +9 -2
  115. rasa/model_training.py +2 -0
  116. rasa/nlu/classifiers/diet_classifier.py +38 -25
  117. rasa/nlu/classifiers/logistic_regression_classifier.py +22 -9
  118. rasa/nlu/classifiers/sklearn_intent_classifier.py +37 -16
  119. rasa/nlu/extractors/crf_entity_extractor.py +93 -50
  120. rasa/nlu/featurizers/sparse_featurizer/count_vectors_featurizer.py +45 -16
  121. rasa/nlu/featurizers/sparse_featurizer/lexical_syntactic_featurizer.py +52 -17
  122. rasa/nlu/featurizers/sparse_featurizer/regex_featurizer.py +5 -3
  123. rasa/nlu/tokenizers/whitespace_tokenizer.py +3 -14
  124. rasa/server.py +3 -1
  125. rasa/shared/constants.py +36 -3
  126. rasa/shared/core/constants.py +7 -0
  127. rasa/shared/core/domain.py +26 -0
  128. rasa/shared/core/flows/flow.py +5 -0
  129. rasa/shared/core/flows/flows_list.py +5 -1
  130. rasa/shared/core/flows/flows_yaml_schema.json +10 -0
  131. rasa/shared/core/flows/utils.py +39 -0
  132. rasa/shared/core/flows/validation.py +96 -0
  133. rasa/shared/core/slots.py +5 -0
  134. rasa/shared/nlu/training_data/features.py +120 -2
  135. rasa/shared/providers/_configs/azure_openai_client_config.py +5 -3
  136. rasa/shared/providers/_configs/litellm_router_client_config.py +200 -0
  137. rasa/shared/providers/_configs/model_group_config.py +167 -0
  138. rasa/shared/providers/_configs/openai_client_config.py +1 -1
  139. rasa/shared/providers/_configs/rasa_llm_client_config.py +73 -0
  140. rasa/shared/providers/_configs/self_hosted_llm_client_config.py +1 -0
  141. rasa/shared/providers/_configs/utils.py +16 -0
  142. rasa/shared/providers/embedding/_base_litellm_embedding_client.py +18 -29
  143. rasa/shared/providers/embedding/azure_openai_embedding_client.py +54 -21
  144. rasa/shared/providers/embedding/litellm_router_embedding_client.py +135 -0
  145. rasa/shared/providers/llm/_base_litellm_client.py +37 -31
  146. rasa/shared/providers/llm/azure_openai_llm_client.py +50 -29
  147. rasa/shared/providers/llm/litellm_router_llm_client.py +127 -0
  148. rasa/shared/providers/llm/rasa_llm_client.py +112 -0
  149. rasa/shared/providers/llm/self_hosted_llm_client.py +1 -1
  150. rasa/shared/providers/mappings.py +19 -0
  151. rasa/shared/providers/router/__init__.py +0 -0
  152. rasa/shared/providers/router/_base_litellm_router_client.py +149 -0
  153. rasa/shared/providers/router/router_client.py +73 -0
  154. rasa/shared/utils/common.py +8 -0
  155. rasa/shared/utils/health_check/__init__.py +0 -0
  156. rasa/shared/utils/health_check/embeddings_health_check_mixin.py +31 -0
  157. rasa/shared/utils/health_check/health_check.py +256 -0
  158. rasa/shared/utils/health_check/llm_health_check_mixin.py +31 -0
  159. rasa/shared/utils/io.py +28 -6
  160. rasa/shared/utils/llm.py +353 -46
  161. rasa/shared/utils/yaml.py +111 -73
  162. rasa/studio/auth.py +3 -5
  163. rasa/studio/config.py +13 -4
  164. rasa/studio/constants.py +1 -0
  165. rasa/studio/data_handler.py +10 -3
  166. rasa/studio/upload.py +81 -26
  167. rasa/telemetry.py +92 -17
  168. rasa/tracing/config.py +2 -0
  169. rasa/tracing/instrumentation/attribute_extractors.py +94 -17
  170. rasa/tracing/instrumentation/instrumentation.py +121 -0
  171. rasa/utils/common.py +5 -0
  172. rasa/utils/io.py +7 -81
  173. rasa/utils/log_utils.py +9 -2
  174. rasa/utils/sanic_error_handler.py +32 -0
  175. rasa/utils/tensorflow/feature_array.py +366 -0
  176. rasa/utils/tensorflow/model_data.py +2 -193
  177. rasa/validator.py +70 -0
  178. rasa/version.py +1 -1
  179. {rasa_pro-3.11.0a4.dev3.dist-info → rasa_pro-3.11.0rc2.dist-info}/METADATA +11 -10
  180. {rasa_pro-3.11.0a4.dev3.dist-info → rasa_pro-3.11.0rc2.dist-info}/RECORD +183 -163
  181. rasa/core/channels/inspector/dist/assets/flowDiagram-v2-855bc5b3-587d82d8.js +0 -1
  182. {rasa_pro-3.11.0a4.dev3.dist-info → rasa_pro-3.11.0rc2.dist-info}/NOTICE +0 -0
  183. {rasa_pro-3.11.0a4.dev3.dist-info → rasa_pro-3.11.0rc2.dist-info}/WHEEL +0 -0
  184. {rasa_pro-3.11.0a4.dev3.dist-info → rasa_pro-3.11.0rc2.dist-info}/entry_points.txt +0 -0
rasa/telemetry.py CHANGED
@@ -32,7 +32,13 @@ from rasa.constants import (
32
32
  CONFIG_TELEMETRY_ENABLED,
33
33
  CONFIG_TELEMETRY_ID,
34
34
  )
35
- from rasa.shared.constants import PROMPT_CONFIG_KEY, PROMPT_TEMPLATE_CONFIG_KEY
35
+ from rasa.shared.constants import (
36
+ PROMPT_CONFIG_KEY,
37
+ PROMPT_TEMPLATE_CONFIG_KEY,
38
+ MODEL_GROUP_CONFIG_KEY,
39
+ LLM_API_HEALTH_CHECK_ENV_VAR,
40
+ LLM_API_HEALTH_CHECK_DEFAULT_VALUE,
41
+ )
36
42
  from rasa.engine.storage.local_model_storage import LocalModelStorage
37
43
  from rasa.shared.constants import DOCS_URL_TELEMETRY, UTTER_ASK_PREFIX
38
44
  from rasa.shared.core.flows import Flow
@@ -106,6 +112,7 @@ TELEMETRY_INTERACTIVE_LEARNING_STARTED_EVENT = "Interactive Learning Started"
106
112
  TELEMETRY_SERVER_STARTED_EVENT = "Server Started"
107
113
  TELEMETRY_PROJECT_CREATED_EVENT = "Project Created"
108
114
  TELEMETRY_SHELL_STARTED_EVENT = "Shell Started"
115
+ TELEMETRY_INSPECT_STARTED_EVENT = "Inspect Started"
109
116
  TELEMETRY_VISUALIZATION_STARTED_EVENT = "Story Visualization Started"
110
117
  TELEMETRY_TEST_CORE_EVENT = "Model Core Tested"
111
118
  TELEMETRY_TEST_NLU_EVENT = "Model NLU Tested"
@@ -157,6 +164,7 @@ NUM_LINK_STEPS = "num_link_steps"
157
164
  NUM_CALL_STEPS = "num_call_steps"
158
165
  NUM_SHARED_SLOTS_BETWEEN_FLOWS = "num_shared_slots_between_flows"
159
166
  LLM_COMMAND_GENERATOR_MODEL_NAME = "llm_command_generator_model_name"
167
+ LLM_COMMAND_GENERATOR_MODEL_GROUP_ID = "llm_command_generator_model_group_id"
160
168
  LLM_COMMAND_GENERATOR_CUSTOM_PROMPT_USED = "llm_command_generator_custom_prompt_used"
161
169
  MULTI_STEP_LLM_COMMAND_GENERATOR_HANDLE_FLOWS_PROMPT_USED = (
162
170
  "multi_step_llm_command_generator_custom_handle_flows_prompt_used"
@@ -166,6 +174,7 @@ MULTI_STEP_LLM_COMMAND_GENERATOR_FILL_SLOTS_PROMPT_USED = (
166
174
  )
167
175
  FLOW_RETRIEVAL_ENABLED = "flow_retrieval_enabled"
168
176
  FLOW_RETRIEVAL_EMBEDDING_MODEL_NAME = "flow_retrieval_embedding_model_name"
177
+ FLOW_RETRIEVAL_EMBEDDING_MODEL_GROUP_ID = "flow_retrieval_embedding_model_group_id"
169
178
  TRACING_BACKEND = "tracing_backend"
170
179
  METRICS_BACKEND = "metrics_backend"
171
180
  VERSION = "version"
@@ -960,6 +969,13 @@ def track_model_training(
960
969
  "policies": config.get("policies"),
961
970
  "train_schema": config.get("train_schema"),
962
971
  "predict_schema": config.get("predict_schema"),
972
+ "model_groups": rasa.core.utils.AvailableEndpoints.get_instance().model_groups,
973
+ "api_health_check_enabled": (
974
+ os.getenv(
975
+ LLM_API_HEALTH_CHECK_ENV_VAR, LLM_API_HEALTH_CHECK_DEFAULT_VALUE
976
+ ).lower()
977
+ == "true"
978
+ ),
963
979
  "num_intent_examples": len(nlu_data.intent_examples),
964
980
  "num_entity_examples": len(nlu_data.entity_examples),
965
981
  "num_actions": len(domain.action_names_or_texts),
@@ -1114,46 +1130,76 @@ def _get_llm_command_generator_config(config: Dict[str, Any]) -> Optional[Dict]:
1114
1130
  return component
1115
1131
  return None
1116
1132
 
1117
- def extract_settings(component: Dict) -> Dict:
1118
- """Extracts the settings from the command generator component."""
1133
+ def extract_llm_command_generator_llm_client_settings(component: Dict) -> Dict:
1134
+ """Extracts settings related to LLM command generator."""
1119
1135
  llm_config = component.get(LLM_CONFIG_KEY, {})
1120
- llm_model_name = (
1121
- llm_config.get(MODEL_CONFIG_KEY)
1122
- or llm_config.get(MODEL_NAME_CONFIG_KEY)
1123
- or DEFAULT_LLM_CONFIG[MODEL_CONFIG_KEY]
1136
+ llm_model_group_id = llm_config.get(MODEL_GROUP_CONFIG_KEY)
1137
+ llm_model_name = llm_config.get(MODEL_CONFIG_KEY) or llm_config.get(
1138
+ MODEL_NAME_CONFIG_KEY
1124
1139
  )
1140
+ if llm_model_group_id is None and llm_model_name is None:
1141
+ llm_model_name = DEFAULT_LLM_CONFIG[MODEL_CONFIG_KEY]
1142
+
1143
+ custom_prompt_used = (
1144
+ PROMPT_CONFIG_KEY in component or PROMPT_TEMPLATE_CONFIG_KEY in component
1145
+ )
1146
+ return {
1147
+ LLM_COMMAND_GENERATOR_MODEL_NAME: llm_model_name,
1148
+ LLM_COMMAND_GENERATOR_MODEL_GROUP_ID: llm_model_group_id,
1149
+ LLM_COMMAND_GENERATOR_CUSTOM_PROMPT_USED: custom_prompt_used,
1150
+ }
1151
+
1152
+ def extract_multistep_command_generator_prompt_settings(component: Dict) -> Dict:
1153
+ """Extracts settings related to multistep command generator."""
1154
+ prompt_templates = component.get("prompt_templates", {})
1155
+ handle_flows_prompt_used = HANDLE_FLOWS_KEY in prompt_templates
1156
+ fill_slots_prompt_used = FILL_SLOTS_KEY in prompt_templates
1157
+ return {
1158
+ MULTI_STEP_LLM_COMMAND_GENERATOR_HANDLE_FLOWS_PROMPT_USED: handle_flows_prompt_used, # noqa: E501
1159
+ MULTI_STEP_LLM_COMMAND_GENERATOR_FILL_SLOTS_PROMPT_USED: fill_slots_prompt_used, # noqa: E501
1160
+ }
1161
+
1162
+ def extract_flow_retrieval_settings(component: Dict) -> Dict:
1163
+ """Extracts settings related to flow retrieval."""
1125
1164
  flow_retrieval_config = component.get(FLOW_RETRIEVAL_KEY, {})
1126
1165
  flow_retrieval_enabled = flow_retrieval_config.get("active", True)
1127
- flow_retrieval_embeddings_config = flow_retrieval_config.get(
1166
+ embeddings_config = flow_retrieval_config.get(
1128
1167
  EMBEDDINGS_CONFIG_KEY, DEFAULT_EMBEDDINGS_CONFIG
1129
1168
  )
1130
1169
  flow_retrieval_embedding_model_name = (
1131
1170
  (
1132
- flow_retrieval_embeddings_config.get(MODEL_NAME_CONFIG_KEY)
1133
- or flow_retrieval_embeddings_config.get(MODEL_CONFIG_KEY)
1171
+ embeddings_config.get(MODEL_NAME_CONFIG_KEY)
1172
+ or embeddings_config.get(MODEL_CONFIG_KEY)
1134
1173
  )
1135
1174
  if flow_retrieval_enabled
1136
1175
  else None
1137
1176
  )
1177
+ flow_retrieval_embedding_model_group_id = embeddings_config.get(
1178
+ MODEL_GROUP_CONFIG_KEY
1179
+ )
1138
1180
  return {
1139
- LLM_COMMAND_GENERATOR_MODEL_NAME: llm_model_name,
1140
- LLM_COMMAND_GENERATOR_CUSTOM_PROMPT_USED: PROMPT_CONFIG_KEY in component
1141
- or PROMPT_TEMPLATE_CONFIG_KEY in component,
1142
- MULTI_STEP_LLM_COMMAND_GENERATOR_HANDLE_FLOWS_PROMPT_USED: HANDLE_FLOWS_KEY
1143
- in component.get("prompt_templates", {}),
1144
- MULTI_STEP_LLM_COMMAND_GENERATOR_FILL_SLOTS_PROMPT_USED: FILL_SLOTS_KEY
1145
- in component.get("prompt_templates", {}),
1146
1181
  FLOW_RETRIEVAL_ENABLED: flow_retrieval_enabled,
1147
1182
  FLOW_RETRIEVAL_EMBEDDING_MODEL_NAME: flow_retrieval_embedding_model_name,
1183
+ FLOW_RETRIEVAL_EMBEDDING_MODEL_GROUP_ID: flow_retrieval_embedding_model_group_id, # noqa: E501
1148
1184
  }
1149
1185
 
1186
+ def extract_settings(component: Dict) -> Dict:
1187
+ """Extracts the settings from the command generator component."""
1188
+ settings = {}
1189
+ settings.update(extract_llm_command_generator_llm_client_settings(component))
1190
+ settings.update(extract_multistep_command_generator_prompt_settings(component))
1191
+ settings.update(extract_flow_retrieval_settings(component))
1192
+ return settings
1193
+
1150
1194
  command_generator_config = {
1151
1195
  LLM_COMMAND_GENERATOR_MODEL_NAME: None,
1196
+ LLM_COMMAND_GENERATOR_MODEL_GROUP_ID: None,
1152
1197
  LLM_COMMAND_GENERATOR_CUSTOM_PROMPT_USED: None,
1153
1198
  MULTI_STEP_LLM_COMMAND_GENERATOR_HANDLE_FLOWS_PROMPT_USED: None,
1154
1199
  MULTI_STEP_LLM_COMMAND_GENERATOR_FILL_SLOTS_PROMPT_USED: None,
1155
1200
  FLOW_RETRIEVAL_ENABLED: None,
1156
1201
  FLOW_RETRIEVAL_EMBEDDING_MODEL_NAME: None,
1202
+ FLOW_RETRIEVAL_EMBEDDING_MODEL_GROUP_ID: None,
1157
1203
  }
1158
1204
 
1159
1205
  pipeline = config.get("pipeline", [])
@@ -1333,6 +1379,17 @@ def track_shell_started(model_type: Text) -> None:
1333
1379
  _track(TELEMETRY_SHELL_STARTED_EVENT, {"type": model_type})
1334
1380
 
1335
1381
 
1382
+ @ensure_telemetry_enabled
1383
+ def track_inspect_started(model_type: Text) -> None:
1384
+ """Track when a user starts a bot using rasa inspect.
1385
+
1386
+ Args:
1387
+ channel: Channel name `socketio` (used for chat assistants)
1388
+ or `browser_audio` (used for voice).
1389
+ """
1390
+ _track(TELEMETRY_INSPECT_STARTED_EVENT, {"type": model_type})
1391
+
1392
+
1336
1393
  @ensure_telemetry_enabled
1337
1394
  def track_visualization() -> None:
1338
1395
  """Track when a user runs the visualization."""
@@ -1553,6 +1610,7 @@ def track_response_rephrase(
1553
1610
  custom_prompt_template: Optional[str],
1554
1611
  llm_type: Optional[str],
1555
1612
  llm_model: Optional[str],
1613
+ llm_model_group_id: Optional[str],
1556
1614
  ) -> None:
1557
1615
  """Track when a user rephrases a response."""
1558
1616
  _track(
@@ -1562,6 +1620,7 @@ def track_response_rephrase(
1562
1620
  "custom_prompt_template": custom_prompt_template,
1563
1621
  "llm_type": llm_type,
1564
1622
  "llm_model": llm_model,
1623
+ "llm_model_group_id": llm_model_group_id,
1565
1624
  },
1566
1625
  )
1567
1626
 
@@ -1576,8 +1635,10 @@ def track_intentless_policy_train() -> None:
1576
1635
  def track_intentless_policy_train_completed(
1577
1636
  embeddings_type: Optional[str],
1578
1637
  embeddings_model: Optional[str],
1638
+ embeddings_model_group_id: Optional[str],
1579
1639
  llm_type: Optional[str],
1580
1640
  llm_model: Optional[str],
1641
+ llm_model_group_id: Optional[str],
1581
1642
  ) -> None:
1582
1643
  """Track when a user trains a policy."""
1583
1644
  _track(
@@ -1585,8 +1646,10 @@ def track_intentless_policy_train_completed(
1585
1646
  {
1586
1647
  "embeddings_type": embeddings_type,
1587
1648
  "embeddings_model": embeddings_model,
1649
+ "embeddings_model_group_id": embeddings_model_group_id,
1588
1650
  "llm_type": llm_type,
1589
1651
  "llm_model": llm_model,
1652
+ "llm_model_group_id": llm_model_group_id,
1590
1653
  },
1591
1654
  )
1592
1655
 
@@ -1595,8 +1658,10 @@ def track_intentless_policy_train_completed(
1595
1658
  def track_intentless_policy_predict(
1596
1659
  embeddings_type: Optional[str],
1597
1660
  embeddings_model: Optional[str],
1661
+ embeddings_model_group_id: Optional[str],
1598
1662
  llm_type: Optional[str],
1599
1663
  llm_model: Optional[str],
1664
+ llm_model_group_id: Optional[str],
1600
1665
  score: float,
1601
1666
  ) -> None:
1602
1667
  """Track when a user trains a policy."""
@@ -1605,8 +1670,10 @@ def track_intentless_policy_predict(
1605
1670
  {
1606
1671
  "embeddings_type": embeddings_type,
1607
1672
  "embeddings_model": embeddings_model,
1673
+ "embeddings_model_group_id": embeddings_model_group_id,
1608
1674
  "llm_type": llm_type,
1609
1675
  "llm_model": llm_model,
1676
+ "llm_model_group_id": llm_model_group_id,
1610
1677
  "score": score,
1611
1678
  },
1612
1679
  )
@@ -1696,8 +1763,10 @@ def track_enterprise_search_policy_train_completed(
1696
1763
  vector_store_type: Optional[str],
1697
1764
  embeddings_type: Optional[str],
1698
1765
  embeddings_model: Optional[str],
1766
+ embeddings_model_group_id: Optional[str],
1699
1767
  llm_type: Optional[str],
1700
1768
  llm_model: Optional[str],
1769
+ llm_model_group_id: Optional[str],
1701
1770
  citation_enabled: Optional[bool],
1702
1771
  ) -> None:
1703
1772
  """Track when a user completes training Enterprise Search policy."""
@@ -1707,8 +1776,10 @@ def track_enterprise_search_policy_train_completed(
1707
1776
  "vector_store_type": vector_store_type,
1708
1777
  "embeddings_type": embeddings_type,
1709
1778
  "embeddings_model": embeddings_model,
1779
+ "embeddings_model_group_id": embeddings_model_group_id,
1710
1780
  "llm_type": llm_type,
1711
1781
  "llm_model": llm_model,
1782
+ "llm_model_group_id": llm_model_group_id,
1712
1783
  "citation_enabled": citation_enabled,
1713
1784
  },
1714
1785
  )
@@ -1719,8 +1790,10 @@ def track_enterprise_search_policy_predict(
1719
1790
  vector_store_type: Optional[str],
1720
1791
  embeddings_type: Optional[str],
1721
1792
  embeddings_model: Optional[str],
1793
+ embeddings_model_group_id: Optional[str],
1722
1794
  llm_type: Optional[str],
1723
1795
  llm_model: Optional[str],
1796
+ llm_model_group_id: Optional[str],
1724
1797
  citation_enabled: Optional[bool],
1725
1798
  ) -> None:
1726
1799
  """Track when a user predicts the next action using Enterprise Search policy."""
@@ -1730,8 +1803,10 @@ def track_enterprise_search_policy_predict(
1730
1803
  "vector_store_type": vector_store_type,
1731
1804
  "embeddings_type": embeddings_type,
1732
1805
  "embeddings_model": embeddings_model,
1806
+ "embeddings_model_group_id": embeddings_model_group_id,
1733
1807
  "llm_type": llm_type,
1734
1808
  "llm_model": llm_model,
1809
+ "llm_model_group_id": llm_model_group_id,
1735
1810
  "citation_enabled": citation_enabled,
1736
1811
  },
1737
1812
  )
rasa/tracing/config.py CHANGED
@@ -33,6 +33,7 @@ from rasa.dialogue_understanding.generator import (
33
33
  SingleStepLLMCommandGenerator,
34
34
  MultiStepLLMCommandGenerator,
35
35
  )
36
+ from rasa.dialogue_understanding.generator.flow_retrieval import FlowRetrieval
36
37
  from rasa.dialogue_understanding.generator.nlu_command_adapter import NLUCommandAdapter
37
38
  from rasa.engine.graph import GraphNode
38
39
  from rasa.engine.training.graph_trainer import GraphTrainer
@@ -111,6 +112,7 @@ def configure_tracing(tracer_provider: Optional[TracerProvider]) -> None:
111
112
  single_step_llm_command_generator_class=SingleStepLLMCommandGenerator,
112
113
  multi_step_llm_command_generator_class=MultiStepLLMCommandGenerator,
113
114
  custom_action_executor_subclasses=custom_action_executor_subclasses,
115
+ flow_retrieval_class=FlowRetrieval,
114
116
  )
115
117
 
116
118
 
@@ -5,6 +5,7 @@ from typing import TYPE_CHECKING, Any, Dict, List, Optional, Text, Tuple, Union
5
5
 
6
6
  import tiktoken
7
7
  from numpy import ndarray
8
+ from rasa.dialogue_understanding.generator.constants import FLOW_RETRIEVAL_KEY
8
9
  from rasa_sdk.grpc_py import action_webhook_pb2
9
10
 
10
11
  from rasa.core.actions.action import DirectCustomActionExecutor
@@ -19,6 +20,7 @@ from rasa.core.processor import MessageProcessor
19
20
  from rasa.core.tracker_store import TrackerStore
20
21
  from rasa.dialogue_understanding.commands import Command
21
22
  from rasa.dialogue_understanding.stack.dialogue_stack import DialogueStack
23
+ from rasa.dialogue_understanding.generator import LLMBasedCommandGenerator
22
24
  from rasa.engine.graph import ExecutionContext, GraphModelConfiguration, GraphNode
23
25
  from rasa.engine.training.graph_trainer import GraphTrainer
24
26
  from rasa.shared.constants import (
@@ -27,6 +29,8 @@ from rasa.shared.constants import (
27
29
  PROVIDER_CONFIG_KEY,
28
30
  TIMEOUT_CONFIG_KEY,
29
31
  DEPLOYMENT_CONFIG_KEY,
32
+ MODEL_GROUP_ID_CONFIG_KEY,
33
+ LLM_CONFIG_KEY,
30
34
  )
31
35
  from rasa.shared.core.constants import REQUESTED_SLOT
32
36
  from rasa.shared.core.domain import Domain
@@ -50,10 +54,7 @@ if TYPE_CHECKING:
50
54
  from rasa.core.policies.enterprise_search_policy import EnterpriseSearchPolicy
51
55
  from rasa.core.policies.intentless_policy import IntentlessPolicy
52
56
  from rasa.core.policies.policy import PolicyPrediction
53
- from rasa.dialogue_understanding.generator import (
54
- CommandGenerator,
55
- LLMBasedCommandGenerator,
56
- )
57
+ from rasa.dialogue_understanding.generator import CommandGenerator
57
58
 
58
59
  # This file contains all attribute extractors for tracing instrumentation.
59
60
  # These are functions that are applied to the arguments of the wrapped function to be
@@ -300,22 +301,49 @@ def extract_attrs_for_command(
300
301
  }
301
302
 
302
303
 
303
- def extract_llm_config(self: Any, default_llm_config: Dict[str, Any]) -> Dict[str, Any]:
304
+ def extract_llm_config(
305
+ self: Any,
306
+ default_llm_config: Dict[str, Any],
307
+ default_embeddings_config: Dict[str, Any],
308
+ ) -> Dict[str, Any]:
304
309
  if isinstance(self, ContextualResponseRephraser):
305
- config = self.nlg_endpoint.kwargs
310
+ # ContextualResponseRephraser is not a graph component, so it's
311
+ # not having a full config.
312
+ config = {"llm": self.llm_config}
306
313
  else:
307
314
  config = self.config
308
315
 
309
316
  llm_property = combine_custom_and_default_config(
310
- config.get("llm"), default_llm_config
317
+ config.get(LLM_CONFIG_KEY), default_llm_config
311
318
  )
312
319
 
320
+ if isinstance(self, LLMBasedCommandGenerator):
321
+ flow_retrieval_config = config.get(FLOW_RETRIEVAL_KEY, {}) or {}
322
+ embeddings_property = combine_custom_and_default_config(
323
+ flow_retrieval_config.get(EMBEDDINGS_CONFIG_KEY),
324
+ default_embeddings_config,
325
+ )
326
+ else:
327
+ embeddings_property = combine_custom_and_default_config(
328
+ config.get(EMBEDDINGS_CONFIG_KEY), default_embeddings_config
329
+ )
330
+
313
331
  attributes = {
314
332
  "class_name": self.__class__.__name__,
333
+ # llm client attributes
315
334
  "llm_model": str(llm_property.get(MODEL_CONFIG_KEY)),
316
335
  "llm_type": str(llm_property.get(PROVIDER_CONFIG_KEY)),
317
- "embeddings": json.dumps(config.get(EMBEDDINGS_CONFIG_KEY, {})),
336
+ "llm_model_group_id": str(llm_property.get(MODEL_GROUP_ID_CONFIG_KEY)),
318
337
  "llm_temperature": str(llm_property.get("temperature")),
338
+ "llm_request_timeout": str(llm_property.get(TIMEOUT_CONFIG_KEY)),
339
+ # embedding client attributes
340
+ "embeddings_model": str(embeddings_property.get(MODEL_CONFIG_KEY)),
341
+ "embeddings_type": str(embeddings_property.get(PROVIDER_CONFIG_KEY)),
342
+ "embeddings_model_group_id": str(
343
+ embeddings_property.get(MODEL_GROUP_ID_CONFIG_KEY)
344
+ ),
345
+ # TODO: Keeping this to avoid potential breaking changes
346
+ "embeddings": json.dumps(embeddings_property, sort_keys=True),
319
347
  "request_timeout": str(llm_property.get(TIMEOUT_CONFIG_KEY)),
320
348
  }
321
349
 
@@ -329,11 +357,16 @@ def extract_attrs_for_llm_based_command_generator(
329
357
  self: "LLMBasedCommandGenerator",
330
358
  prompt: str,
331
359
  ) -> Dict[str, Any]:
332
- from rasa.dialogue_understanding.generator.constants import (
333
- DEFAULT_LLM_CONFIG,
360
+ from rasa.dialogue_understanding.generator.constants import DEFAULT_LLM_CONFIG
361
+ from rasa.dialogue_understanding.generator.flow_retrieval import (
362
+ DEFAULT_EMBEDDINGS_CONFIG,
334
363
  )
335
364
 
336
- attributes = extract_llm_config(self, default_llm_config=DEFAULT_LLM_CONFIG)
365
+ attributes = extract_llm_config(
366
+ self,
367
+ default_llm_config=DEFAULT_LLM_CONFIG,
368
+ default_embeddings_config=DEFAULT_EMBEDDINGS_CONFIG,
369
+ )
337
370
 
338
371
  return extend_attributes_with_prompt_tokens_length(self, attributes, prompt)
339
372
 
@@ -344,7 +377,12 @@ def extract_attrs_for_contextual_response_rephraser(
344
377
  ) -> Dict[str, Any]:
345
378
  from rasa.core.nlg.contextual_response_rephraser import DEFAULT_LLM_CONFIG
346
379
 
347
- attributes = extract_llm_config(self, default_llm_config=DEFAULT_LLM_CONFIG)
380
+ attributes = extract_llm_config(
381
+ self,
382
+ default_llm_config=DEFAULT_LLM_CONFIG,
383
+ # rephraser is not using embeddings
384
+ default_embeddings_config={},
385
+ )
348
386
 
349
387
  return extend_attributes_with_prompt_tokens_length(self, attributes, prompt)
350
388
 
@@ -355,7 +393,12 @@ def extract_attrs_for_create_history(
355
393
  ) -> Dict[str, Any]:
356
394
  from rasa.core.nlg.contextual_response_rephraser import DEFAULT_LLM_CONFIG
357
395
 
358
- return extract_llm_config(self, default_llm_config=DEFAULT_LLM_CONFIG)
396
+ return extract_llm_config(
397
+ self,
398
+ default_llm_config=DEFAULT_LLM_CONFIG,
399
+ # rephraser is not using embeddings
400
+ default_embeddings_config={},
401
+ )
359
402
 
360
403
 
361
404
  def extract_attrs_for_generate(
@@ -371,6 +414,26 @@ def extract_attrs_for_generate(
371
414
  }
372
415
 
373
416
 
417
+ def extract_attrs_for_performing_health_check(
418
+ custom_config: Optional[Dict[str, Any]],
419
+ default_config: Dict[str, Any],
420
+ log_source_method: str,
421
+ log_source_component: str,
422
+ ) -> Dict[str, Any]:
423
+ from rasa.shared.utils.health_check.health_check import is_api_health_check_enabled
424
+
425
+ attrs = {
426
+ "api_health_check_enabled": is_api_health_check_enabled(),
427
+ "health_check_trigger_component": log_source_component,
428
+ "health_check_trigger_method": log_source_method,
429
+ }
430
+ if is_api_health_check_enabled():
431
+ attrs["config"] = json.dumps(
432
+ combine_custom_and_default_config(custom_config, default_config)
433
+ )
434
+ return attrs
435
+
436
+
374
437
  def extract_attrs_for_execute_commands(
375
438
  tracker: DialogueStateTracker,
376
439
  all_flows: FlowsList,
@@ -580,9 +643,16 @@ def extract_attrs_for_intentless_policy_find_closest_response(
580
643
  def extract_attrs_for_intentless_policy_generate_llm_answer(
581
644
  self: "IntentlessPolicy", llm: "BaseLLM", prompt: str
582
645
  ) -> Dict[str, Any]:
583
- from rasa.core.policies.intentless_policy import DEFAULT_LLM_CONFIG
646
+ from rasa.core.policies.intentless_policy import (
647
+ DEFAULT_LLM_CONFIG,
648
+ DEFAULT_EMBEDDINGS_CONFIG,
649
+ )
584
650
 
585
- attributes = extract_llm_config(self, default_llm_config=DEFAULT_LLM_CONFIG)
651
+ attributes = extract_llm_config(
652
+ self,
653
+ default_llm_config=DEFAULT_LLM_CONFIG,
654
+ default_embeddings_config=DEFAULT_EMBEDDINGS_CONFIG,
655
+ )
586
656
 
587
657
  return extend_attributes_with_prompt_tokens_length(self, attributes, prompt)
588
658
 
@@ -590,9 +660,16 @@ def extract_attrs_for_intentless_policy_generate_llm_answer(
590
660
  def extract_attrs_for_enterprise_search_generate_llm_answer(
591
661
  self: "EnterpriseSearchPolicy", llm: "BaseLLM", prompt: str
592
662
  ) -> Dict[str, Any]:
593
- from rasa.core.policies.enterprise_search_policy import DEFAULT_LLM_CONFIG
663
+ from rasa.core.policies.enterprise_search_policy import (
664
+ DEFAULT_LLM_CONFIG,
665
+ DEFAULT_EMBEDDINGS_CONFIG,
666
+ )
594
667
 
595
- attributes = extract_llm_config(self, default_llm_config=DEFAULT_LLM_CONFIG)
668
+ attributes = extract_llm_config(
669
+ self,
670
+ default_llm_config=DEFAULT_LLM_CONFIG,
671
+ default_embeddings_config=DEFAULT_EMBEDDINGS_CONFIG,
672
+ )
596
673
 
597
674
  return extend_attributes_with_prompt_tokens_length(self, attributes, prompt)
598
675
 
@@ -45,6 +45,7 @@ from rasa.dialogue_understanding.generator import (
45
45
  MultiStepLLMCommandGenerator,
46
46
  SingleStepLLMCommandGenerator,
47
47
  )
48
+ from rasa.dialogue_understanding.generator.flow_retrieval import FlowRetrieval
48
49
  from rasa.dialogue_understanding.generator.nlu_command_adapter import NLUCommandAdapter
49
50
  from rasa.engine.graph import GraphNode
50
51
  from rasa.engine.training.graph_trainer import GraphTrainer
@@ -283,6 +284,7 @@ SingleStepLLMCommandGeneratorType = TypeVar(
283
284
  MultiStepLLMCommandGeneratorType = TypeVar(
284
285
  "MultiStepLLMCommandGeneratorType", bound=MultiStepLLMCommandGenerator
285
286
  )
287
+ FlowRetrievalType = TypeVar("FlowRetrievalType", bound=FlowRetrieval)
286
288
  CommandType = TypeVar("CommandType", bound=Command)
287
289
  PolicyType = TypeVar("PolicyType", bound=Policy)
288
290
  InformationRetrievalType = TypeVar(
@@ -317,6 +319,7 @@ def instrument(
317
319
  custom_action_executor_subclasses: Optional[
318
320
  List[Type[CustomActionExecutor]]
319
321
  ] = None,
322
+ flow_retrieval_class: Optional[Type[FlowRetrievalType]] = None,
320
323
  ) -> None:
321
324
  """Substitute methods to be traced by their traced counterparts.
322
325
 
@@ -445,6 +448,12 @@ def instrument(
445
448
  "_check_commands_against_startable_flows",
446
449
  attribute_extractors.extract_attrs_for_check_commands_against_startable_flows,
447
450
  )
451
+ _instrument_perform_health_check_method_for_component(
452
+ tracer_provider.get_tracer(llm_command_generator_class.__module__),
453
+ llm_command_generator_class,
454
+ "perform_llm_health_check",
455
+ attribute_extractors.extract_attrs_for_performing_health_check,
456
+ )
448
457
  mark_class_as_instrumented(llm_command_generator_class)
449
458
 
450
459
  if (
@@ -468,6 +477,14 @@ def instrument(
468
477
  "_check_commands_against_startable_flows",
469
478
  attribute_extractors.extract_attrs_for_check_commands_against_startable_flows,
470
479
  )
480
+ _instrument_perform_health_check_method_for_component(
481
+ tracer_provider.get_tracer(
482
+ single_step_llm_command_generator_class.__module__
483
+ ),
484
+ single_step_llm_command_generator_class,
485
+ "perform_llm_health_check",
486
+ attribute_extractors.extract_attrs_for_performing_health_check,
487
+ )
471
488
  mark_class_as_instrumented(single_step_llm_command_generator_class)
472
489
 
473
490
  if multi_step_llm_command_generator_class is not None and not class_is_instrumented(
@@ -488,8 +505,36 @@ def instrument(
488
505
  ),
489
506
  multi_step_llm_command_generator_class,
490
507
  )
508
+ _instrument_perform_health_check_method_for_component(
509
+ tracer_provider.get_tracer(
510
+ multi_step_llm_command_generator_class.__module__
511
+ ),
512
+ multi_step_llm_command_generator_class,
513
+ "perform_llm_health_check",
514
+ attribute_extractors.extract_attrs_for_performing_health_check,
515
+ )
491
516
  mark_class_as_instrumented(multi_step_llm_command_generator_class)
492
517
 
518
+ if (
519
+ any(
520
+ llm_based_command_generator_class is not None
521
+ for llm_based_command_generator_class in (
522
+ llm_command_generator_class,
523
+ single_step_llm_command_generator_class,
524
+ multi_step_llm_command_generator_class,
525
+ )
526
+ )
527
+ and flow_retrieval_class is not None
528
+ and not class_is_instrumented(flow_retrieval_class)
529
+ ):
530
+ _instrument_perform_health_check_method_for_component(
531
+ tracer_provider.get_tracer(flow_retrieval_class.__module__),
532
+ flow_retrieval_class,
533
+ "perform_embeddings_health_check",
534
+ attribute_extractors.extract_attrs_for_performing_health_check,
535
+ )
536
+ mark_class_as_instrumented(flow_retrieval_class)
537
+
493
538
  if command_subclasses:
494
539
  for command_subclass in command_subclasses:
495
540
  if command_subclass is not None and not class_is_instrumented(
@@ -524,6 +569,12 @@ def instrument(
524
569
  "generate",
525
570
  attribute_extractors.extract_attrs_for_generate,
526
571
  )
572
+ _instrument_perform_health_check_method_for_component(
573
+ tracer_provider.get_tracer(contextual_response_rephraser_class.__module__),
574
+ contextual_response_rephraser_class,
575
+ "perform_llm_health_check",
576
+ attribute_extractors.extract_attrs_for_performing_health_check,
577
+ )
527
578
  mark_class_as_instrumented(contextual_response_rephraser_class)
528
579
 
529
580
  if not module_is_instrumented(COMMAND_PROCESSOR_MODULE_NAME):
@@ -755,6 +806,18 @@ def _instrument_enterprise_search_policy(
755
806
  "_generate_llm_answer",
756
807
  attribute_extractors.extract_attrs_for_enterprise_search_generate_llm_answer,
757
808
  )
809
+ _instrument_perform_health_check_method_for_component(
810
+ tracer_provider.get_tracer(policy_class.__module__),
811
+ policy_class,
812
+ "perform_embeddings_health_check",
813
+ attribute_extractors.extract_attrs_for_performing_health_check,
814
+ )
815
+ _instrument_perform_health_check_method_for_component(
816
+ tracer_provider.get_tracer(policy_class.__module__),
817
+ policy_class,
818
+ "perform_llm_health_check",
819
+ attribute_extractors.extract_attrs_for_performing_health_check,
820
+ )
758
821
 
759
822
 
760
823
  def _instrument_intentless_policy(
@@ -787,6 +850,18 @@ def _instrument_intentless_policy(
787
850
  "_generate_llm_answer",
788
851
  attribute_extractors.extract_attrs_for_intentless_policy_generate_llm_answer,
789
852
  )
853
+ _instrument_perform_health_check_method_for_component(
854
+ tracer_provider.get_tracer(policy_class.__module__),
855
+ policy_class,
856
+ "perform_embeddings_health_check",
857
+ attribute_extractors.extract_attrs_for_performing_health_check,
858
+ )
859
+ _instrument_perform_health_check_method_for_component(
860
+ tracer_provider.get_tracer(policy_class.__module__),
861
+ policy_class,
862
+ "perform_llm_health_check",
863
+ attribute_extractors.extract_attrs_for_performing_health_check,
864
+ )
790
865
 
791
866
 
792
867
  def _instrument_processor(
@@ -1139,6 +1214,52 @@ def _instrument_grpc_custom_action_executor(
1139
1214
  logger.debug(f"Instrumented '{grpc_custom_action_executor_class.__name__}.run.")
1140
1215
 
1141
1216
 
1217
+ def _instrument_perform_health_check_method_for_component(
1218
+ tracer: Tracer,
1219
+ instrumented_class: Type,
1220
+ method_name: Text,
1221
+ attr_extractor: Optional[Callable] = None,
1222
+ return_value_attr_extractor: Optional[Callable] = None,
1223
+ ) -> None:
1224
+ def tracing_perform_health_check_for_component(
1225
+ fn: Callable[..., S],
1226
+ ) -> Callable[..., S]:
1227
+ @functools.wraps(fn)
1228
+ def wrapper(*args: Any, **kwargs: Any) -> S:
1229
+ # Check the first argument to adjust for self/cls depending on how
1230
+ # the static method from LLMHealthCheckMixin / EmbeddingsLLMHealthCheckMixin
1231
+ # is called.
1232
+ if args and isinstance(
1233
+ args[0], (instrumented_class, type(instrumented_class))
1234
+ ):
1235
+ # The first argument is self/cls; align args to match the signature
1236
+ args = args[1:]
1237
+
1238
+ span_name = f"{instrumented_class.__name__}.{fn.__name__}"
1239
+ extracted_attrs = attr_extractor(*args, **kwargs) if attr_extractor else {}
1240
+
1241
+ with tracer.start_as_current_span(span_name) as span:
1242
+ result = fn(*args, **kwargs)
1243
+
1244
+ # Extract attributes from the return value, if an extractor is provided
1245
+ return_value_attributes = (
1246
+ return_value_attr_extractor(result, *args, **kwargs)
1247
+ if return_value_attr_extractor
1248
+ else {}
1249
+ )
1250
+
1251
+ span.set_attributes({**extracted_attrs, **return_value_attributes})
1252
+ return result
1253
+
1254
+ return wrapper
1255
+
1256
+ method_to_trace = getattr(instrumented_class, method_name)
1257
+ traced_method = tracing_perform_health_check_for_component(method_to_trace)
1258
+ setattr(instrumented_class, method_name, traced_method)
1259
+
1260
+ logger.debug(f"Instrumented '{instrumented_class.__name__}.{method_name}'.")
1261
+
1262
+
1142
1263
  def _mangled_instrumented_boolean_attribute_name(instrumented_class: Type) -> Text:
1143
1264
  # see https://peps.python.org/pep-0008/#method-names-and-instance-variables
1144
1265
  # and https://stackoverflow.com/a/50401073
rasa/utils/common.py CHANGED
@@ -90,6 +90,11 @@ EXPECTED_WARNINGS: List[Tuple[Type[Warning], str]] = [
90
90
  # Ignore Keras DeprecationWarning since it requires that we
91
91
  # upgrade tensorflow-macos to 2.13.0 version.
92
92
  (DeprecationWarning, "invalid escape sequence*"),
93
+ # Ignore importlib open_text and read_text warnings for now
94
+ (
95
+ DeprecationWarning,
96
+ "https://importlib-resources.readthedocs.io/en/latest/using.html#migrating-from-legacy",
97
+ ),
93
98
  ]
94
99
 
95
100
  PYTHON_LOGGING_SCHEMA_DOCS = (