langwatch-scenario 0.7.3__tar.gz → 0.7.8__tar.gz

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (255) hide show
  1. {langwatch_scenario-0.7.3 → langwatch_scenario-0.7.8}/PKG-INFO +14 -12
  2. {langwatch_scenario-0.7.3 → langwatch_scenario-0.7.8}/README.md +12 -10
  3. {langwatch_scenario-0.7.3 → langwatch_scenario-0.7.8}/langwatch_scenario.egg-info/PKG-INFO +14 -12
  4. {langwatch_scenario-0.7.3 → langwatch_scenario-0.7.8}/langwatch_scenario.egg-info/SOURCES.txt +6 -1
  5. {langwatch_scenario-0.7.3 → langwatch_scenario-0.7.8}/langwatch_scenario.egg-info/requires.txt +1 -1
  6. {langwatch_scenario-0.7.3 → langwatch_scenario-0.7.8}/pyproject.toml +4 -11
  7. {langwatch_scenario-0.7.3 → langwatch_scenario-0.7.8}/scenario/__init__.py +1 -1
  8. {langwatch_scenario-0.7.3 → langwatch_scenario-0.7.8}/scenario/_error_messages.py +2 -2
  9. langwatch_scenario-0.7.8/scenario/_events/event_alert_message_logger.py +95 -0
  10. {langwatch_scenario-0.7.3 → langwatch_scenario-0.7.8}/scenario/_events/event_bus.py +90 -30
  11. langwatch_scenario-0.7.8/scenario/_events/event_reporter.py +98 -0
  12. {langwatch_scenario-0.7.3 → langwatch_scenario-0.7.8}/scenario/_generated/langwatch_api_client/README.md +27 -17
  13. langwatch_scenario-0.7.8/scenario/config/__init__.py +43 -0
  14. langwatch_scenario-0.7.8/scenario/config/langwatch.py +51 -0
  15. langwatch_scenario-0.7.8/scenario/config/model.py +42 -0
  16. langwatch_scenario-0.7.3/scenario/config.py → langwatch_scenario-0.7.8/scenario/config/scenario.py +5 -34
  17. {langwatch_scenario-0.7.3 → langwatch_scenario-0.7.8}/scenario/judge_agent.py +13 -2
  18. {langwatch_scenario-0.7.3 → langwatch_scenario-0.7.8}/scenario/user_simulator_agent.py +18 -6
  19. {langwatch_scenario-0.7.3 → langwatch_scenario-0.7.8}/tests/test_event_reporter.py +5 -3
  20. langwatch_scenario-0.7.8/tests/test_model_config.py +37 -0
  21. {langwatch_scenario-0.7.3 → langwatch_scenario-0.7.8}/tests/test_scenario_event_bus.py +28 -14
  22. {langwatch_scenario-0.7.3 → langwatch_scenario-0.7.8}/tests/test_scenario_executor_events.py +46 -17
  23. langwatch_scenario-0.7.3/scenario/_events/event_reporter.py +0 -83
  24. {langwatch_scenario-0.7.3 → langwatch_scenario-0.7.8}/langwatch_scenario.egg-info/dependency_links.txt +0 -0
  25. {langwatch_scenario-0.7.3 → langwatch_scenario-0.7.8}/langwatch_scenario.egg-info/entry_points.txt +0 -0
  26. {langwatch_scenario-0.7.3 → langwatch_scenario-0.7.8}/langwatch_scenario.egg-info/top_level.txt +0 -0
  27. {langwatch_scenario-0.7.3 → langwatch_scenario-0.7.8}/scenario/_events/__init__.py +0 -0
  28. {langwatch_scenario-0.7.3 → langwatch_scenario-0.7.8}/scenario/_events/events.py +0 -0
  29. {langwatch_scenario-0.7.3 → langwatch_scenario-0.7.8}/scenario/_events/messages.py +0 -0
  30. {langwatch_scenario-0.7.3 → langwatch_scenario-0.7.8}/scenario/_events/utils.py +0 -0
  31. {langwatch_scenario-0.7.3 → langwatch_scenario-0.7.8}/scenario/_generated/langwatch_api_client/lang_watch_api_client/__init__.py +0 -0
  32. {langwatch_scenario-0.7.3 → langwatch_scenario-0.7.8}/scenario/_generated/langwatch_api_client/lang_watch_api_client/api/__init__.py +0 -0
  33. {langwatch_scenario-0.7.3 → langwatch_scenario-0.7.8}/scenario/_generated/langwatch_api_client/lang_watch_api_client/api/default/__init__.py +0 -0
  34. {langwatch_scenario-0.7.3 → langwatch_scenario-0.7.8}/scenario/_generated/langwatch_api_client/lang_watch_api_client/api/default/delete_api_annotations_id.py +0 -0
  35. {langwatch_scenario-0.7.3 → langwatch_scenario-0.7.8}/scenario/_generated/langwatch_api_client/lang_watch_api_client/api/default/delete_api_prompts_by_id.py +0 -0
  36. {langwatch_scenario-0.7.3 → langwatch_scenario-0.7.8}/scenario/_generated/langwatch_api_client/lang_watch_api_client/api/default/delete_api_scenario_events.py +0 -0
  37. {langwatch_scenario-0.7.3 → langwatch_scenario-0.7.8}/scenario/_generated/langwatch_api_client/lang_watch_api_client/api/default/get_api_annotations.py +0 -0
  38. {langwatch_scenario-0.7.3 → langwatch_scenario-0.7.8}/scenario/_generated/langwatch_api_client/lang_watch_api_client/api/default/get_api_annotations_id.py +0 -0
  39. {langwatch_scenario-0.7.3 → langwatch_scenario-0.7.8}/scenario/_generated/langwatch_api_client/lang_watch_api_client/api/default/get_api_annotations_trace_id.py +0 -0
  40. {langwatch_scenario-0.7.3 → langwatch_scenario-0.7.8}/scenario/_generated/langwatch_api_client/lang_watch_api_client/api/default/get_api_dataset_by_slug_or_id.py +0 -0
  41. {langwatch_scenario-0.7.3 → langwatch_scenario-0.7.8}/scenario/_generated/langwatch_api_client/lang_watch_api_client/api/default/get_api_prompts.py +0 -0
  42. {langwatch_scenario-0.7.3 → langwatch_scenario-0.7.8}/scenario/_generated/langwatch_api_client/lang_watch_api_client/api/default/get_api_prompts_by_id.py +0 -0
  43. {langwatch_scenario-0.7.3 → langwatch_scenario-0.7.8}/scenario/_generated/langwatch_api_client/lang_watch_api_client/api/default/get_api_prompts_by_id_versions.py +0 -0
  44. {langwatch_scenario-0.7.3 → langwatch_scenario-0.7.8}/scenario/_generated/langwatch_api_client/lang_watch_api_client/api/default/get_api_trace_id.py +0 -0
  45. {langwatch_scenario-0.7.3 → langwatch_scenario-0.7.8}/scenario/_generated/langwatch_api_client/lang_watch_api_client/api/default/patch_api_annotations_id.py +0 -0
  46. {langwatch_scenario-0.7.3 → langwatch_scenario-0.7.8}/scenario/_generated/langwatch_api_client/lang_watch_api_client/api/default/post_api_annotations_trace_id.py +0 -0
  47. {langwatch_scenario-0.7.3 → langwatch_scenario-0.7.8}/scenario/_generated/langwatch_api_client/lang_watch_api_client/api/default/post_api_dataset_by_slug_entries.py +0 -0
  48. {langwatch_scenario-0.7.3 → langwatch_scenario-0.7.8}/scenario/_generated/langwatch_api_client/lang_watch_api_client/api/default/post_api_prompts.py +0 -0
  49. {langwatch_scenario-0.7.3 → langwatch_scenario-0.7.8}/scenario/_generated/langwatch_api_client/lang_watch_api_client/api/default/post_api_prompts_by_id_versions.py +0 -0
  50. {langwatch_scenario-0.7.3 → langwatch_scenario-0.7.8}/scenario/_generated/langwatch_api_client/lang_watch_api_client/api/default/post_api_scenario_events.py +0 -0
  51. {langwatch_scenario-0.7.3 → langwatch_scenario-0.7.8}/scenario/_generated/langwatch_api_client/lang_watch_api_client/api/default/post_api_trace_id_share.py +0 -0
  52. {langwatch_scenario-0.7.3 → langwatch_scenario-0.7.8}/scenario/_generated/langwatch_api_client/lang_watch_api_client/api/default/post_api_trace_id_unshare.py +0 -0
  53. {langwatch_scenario-0.7.3 → langwatch_scenario-0.7.8}/scenario/_generated/langwatch_api_client/lang_watch_api_client/api/default/put_api_prompts_by_id.py +0 -0
  54. {langwatch_scenario-0.7.3 → langwatch_scenario-0.7.8}/scenario/_generated/langwatch_api_client/lang_watch_api_client/api/traces/__init__.py +0 -0
  55. {langwatch_scenario-0.7.3 → langwatch_scenario-0.7.8}/scenario/_generated/langwatch_api_client/lang_watch_api_client/api/traces/post_api_trace_search.py +0 -0
  56. {langwatch_scenario-0.7.3 → langwatch_scenario-0.7.8}/scenario/_generated/langwatch_api_client/lang_watch_api_client/client.py +0 -0
  57. {langwatch_scenario-0.7.3 → langwatch_scenario-0.7.8}/scenario/_generated/langwatch_api_client/lang_watch_api_client/errors.py +0 -0
  58. {langwatch_scenario-0.7.3 → langwatch_scenario-0.7.8}/scenario/_generated/langwatch_api_client/lang_watch_api_client/models/__init__.py +0 -0
  59. {langwatch_scenario-0.7.3 → langwatch_scenario-0.7.8}/scenario/_generated/langwatch_api_client/lang_watch_api_client/models/annotation.py +0 -0
  60. {langwatch_scenario-0.7.3 → langwatch_scenario-0.7.8}/scenario/_generated/langwatch_api_client/lang_watch_api_client/models/dataset_post_entries.py +0 -0
  61. {langwatch_scenario-0.7.3 → langwatch_scenario-0.7.8}/scenario/_generated/langwatch_api_client/lang_watch_api_client/models/dataset_post_entries_entries_item.py +0 -0
  62. {langwatch_scenario-0.7.3 → langwatch_scenario-0.7.8}/scenario/_generated/langwatch_api_client/lang_watch_api_client/models/delete_api_annotations_id_response_200.py +0 -0
  63. {langwatch_scenario-0.7.3 → langwatch_scenario-0.7.8}/scenario/_generated/langwatch_api_client/lang_watch_api_client/models/delete_api_prompts_by_id_response_200.py +0 -0
  64. {langwatch_scenario-0.7.3 → langwatch_scenario-0.7.8}/scenario/_generated/langwatch_api_client/lang_watch_api_client/models/delete_api_prompts_by_id_response_400.py +0 -0
  65. {langwatch_scenario-0.7.3 → langwatch_scenario-0.7.8}/scenario/_generated/langwatch_api_client/lang_watch_api_client/models/delete_api_prompts_by_id_response_400_error.py +0 -0
  66. {langwatch_scenario-0.7.3 → langwatch_scenario-0.7.8}/scenario/_generated/langwatch_api_client/lang_watch_api_client/models/delete_api_prompts_by_id_response_401.py +0 -0
  67. {langwatch_scenario-0.7.3 → langwatch_scenario-0.7.8}/scenario/_generated/langwatch_api_client/lang_watch_api_client/models/delete_api_prompts_by_id_response_401_error.py +0 -0
  68. {langwatch_scenario-0.7.3 → langwatch_scenario-0.7.8}/scenario/_generated/langwatch_api_client/lang_watch_api_client/models/delete_api_prompts_by_id_response_404.py +0 -0
  69. {langwatch_scenario-0.7.3 → langwatch_scenario-0.7.8}/scenario/_generated/langwatch_api_client/lang_watch_api_client/models/delete_api_prompts_by_id_response_500.py +0 -0
  70. {langwatch_scenario-0.7.3 → langwatch_scenario-0.7.8}/scenario/_generated/langwatch_api_client/lang_watch_api_client/models/delete_api_scenario_events_response_200.py +0 -0
  71. {langwatch_scenario-0.7.3 → langwatch_scenario-0.7.8}/scenario/_generated/langwatch_api_client/lang_watch_api_client/models/delete_api_scenario_events_response_400.py +0 -0
  72. {langwatch_scenario-0.7.3 → langwatch_scenario-0.7.8}/scenario/_generated/langwatch_api_client/lang_watch_api_client/models/delete_api_scenario_events_response_401.py +0 -0
  73. {langwatch_scenario-0.7.3 → langwatch_scenario-0.7.8}/scenario/_generated/langwatch_api_client/lang_watch_api_client/models/delete_api_scenario_events_response_500.py +0 -0
  74. {langwatch_scenario-0.7.3 → langwatch_scenario-0.7.8}/scenario/_generated/langwatch_api_client/lang_watch_api_client/models/error.py +0 -0
  75. {langwatch_scenario-0.7.3 → langwatch_scenario-0.7.8}/scenario/_generated/langwatch_api_client/lang_watch_api_client/models/evaluation.py +0 -0
  76. {langwatch_scenario-0.7.3 → langwatch_scenario-0.7.8}/scenario/_generated/langwatch_api_client/lang_watch_api_client/models/evaluation_timestamps.py +0 -0
  77. {langwatch_scenario-0.7.3 → langwatch_scenario-0.7.8}/scenario/_generated/langwatch_api_client/lang_watch_api_client/models/get_api_dataset_by_slug_or_id_response_200.py +0 -0
  78. {langwatch_scenario-0.7.3 → langwatch_scenario-0.7.8}/scenario/_generated/langwatch_api_client/lang_watch_api_client/models/get_api_dataset_by_slug_or_id_response_200_data_item.py +0 -0
  79. {langwatch_scenario-0.7.3 → langwatch_scenario-0.7.8}/scenario/_generated/langwatch_api_client/lang_watch_api_client/models/get_api_dataset_by_slug_or_id_response_200_data_item_entry.py +0 -0
  80. {langwatch_scenario-0.7.3 → langwatch_scenario-0.7.8}/scenario/_generated/langwatch_api_client/lang_watch_api_client/models/get_api_dataset_by_slug_or_id_response_400.py +0 -0
  81. {langwatch_scenario-0.7.3 → langwatch_scenario-0.7.8}/scenario/_generated/langwatch_api_client/lang_watch_api_client/models/get_api_dataset_by_slug_or_id_response_401.py +0 -0
  82. {langwatch_scenario-0.7.3 → langwatch_scenario-0.7.8}/scenario/_generated/langwatch_api_client/lang_watch_api_client/models/get_api_dataset_by_slug_or_id_response_404.py +0 -0
  83. {langwatch_scenario-0.7.3 → langwatch_scenario-0.7.8}/scenario/_generated/langwatch_api_client/lang_watch_api_client/models/get_api_dataset_by_slug_or_id_response_422.py +0 -0
  84. {langwatch_scenario-0.7.3 → langwatch_scenario-0.7.8}/scenario/_generated/langwatch_api_client/lang_watch_api_client/models/get_api_dataset_by_slug_or_id_response_500.py +0 -0
  85. {langwatch_scenario-0.7.3 → langwatch_scenario-0.7.8}/scenario/_generated/langwatch_api_client/lang_watch_api_client/models/get_api_prompts_by_id_response_200.py +0 -0
  86. {langwatch_scenario-0.7.3 → langwatch_scenario-0.7.8}/scenario/_generated/langwatch_api_client/lang_watch_api_client/models/get_api_prompts_by_id_response_200_messages_item.py +0 -0
  87. {langwatch_scenario-0.7.3 → langwatch_scenario-0.7.8}/scenario/_generated/langwatch_api_client/lang_watch_api_client/models/get_api_prompts_by_id_response_200_messages_item_role.py +0 -0
  88. {langwatch_scenario-0.7.3 → langwatch_scenario-0.7.8}/scenario/_generated/langwatch_api_client/lang_watch_api_client/models/get_api_prompts_by_id_response_200_response_format_type_0.py +0 -0
  89. {langwatch_scenario-0.7.3 → langwatch_scenario-0.7.8}/scenario/_generated/langwatch_api_client/lang_watch_api_client/models/get_api_prompts_by_id_response_200_response_format_type_0_json_schema.py +0 -0
  90. {langwatch_scenario-0.7.3 → langwatch_scenario-0.7.8}/scenario/_generated/langwatch_api_client/lang_watch_api_client/models/get_api_prompts_by_id_response_200_response_format_type_0_json_schema_schema.py +0 -0
  91. {langwatch_scenario-0.7.3 → langwatch_scenario-0.7.8}/scenario/_generated/langwatch_api_client/lang_watch_api_client/models/get_api_prompts_by_id_response_200_response_format_type_0_type.py +0 -0
  92. {langwatch_scenario-0.7.3 → langwatch_scenario-0.7.8}/scenario/_generated/langwatch_api_client/lang_watch_api_client/models/get_api_prompts_by_id_response_400.py +0 -0
  93. {langwatch_scenario-0.7.3 → langwatch_scenario-0.7.8}/scenario/_generated/langwatch_api_client/lang_watch_api_client/models/get_api_prompts_by_id_response_400_error.py +0 -0
  94. {langwatch_scenario-0.7.3 → langwatch_scenario-0.7.8}/scenario/_generated/langwatch_api_client/lang_watch_api_client/models/get_api_prompts_by_id_response_401.py +0 -0
  95. {langwatch_scenario-0.7.3 → langwatch_scenario-0.7.8}/scenario/_generated/langwatch_api_client/lang_watch_api_client/models/get_api_prompts_by_id_response_401_error.py +0 -0
  96. {langwatch_scenario-0.7.3 → langwatch_scenario-0.7.8}/scenario/_generated/langwatch_api_client/lang_watch_api_client/models/get_api_prompts_by_id_response_404.py +0 -0
  97. {langwatch_scenario-0.7.3 → langwatch_scenario-0.7.8}/scenario/_generated/langwatch_api_client/lang_watch_api_client/models/get_api_prompts_by_id_response_500.py +0 -0
  98. {langwatch_scenario-0.7.3 → langwatch_scenario-0.7.8}/scenario/_generated/langwatch_api_client/lang_watch_api_client/models/get_api_prompts_by_id_versions_response_200.py +0 -0
  99. {langwatch_scenario-0.7.3 → langwatch_scenario-0.7.8}/scenario/_generated/langwatch_api_client/lang_watch_api_client/models/get_api_prompts_by_id_versions_response_200_config_data.py +0 -0
  100. {langwatch_scenario-0.7.3 → langwatch_scenario-0.7.8}/scenario/_generated/langwatch_api_client/lang_watch_api_client/models/get_api_prompts_by_id_versions_response_200_config_data_demonstrations.py +0 -0
  101. {langwatch_scenario-0.7.3 → langwatch_scenario-0.7.8}/scenario/_generated/langwatch_api_client/lang_watch_api_client/models/get_api_prompts_by_id_versions_response_200_config_data_demonstrations_columns_item.py +0 -0
  102. {langwatch_scenario-0.7.3 → langwatch_scenario-0.7.8}/scenario/_generated/langwatch_api_client/lang_watch_api_client/models/get_api_prompts_by_id_versions_response_200_config_data_demonstrations_columns_item_type.py +0 -0
  103. {langwatch_scenario-0.7.3 → langwatch_scenario-0.7.8}/scenario/_generated/langwatch_api_client/lang_watch_api_client/models/get_api_prompts_by_id_versions_response_200_config_data_demonstrations_rows_item.py +0 -0
  104. {langwatch_scenario-0.7.3 → langwatch_scenario-0.7.8}/scenario/_generated/langwatch_api_client/lang_watch_api_client/models/get_api_prompts_by_id_versions_response_200_config_data_inputs_item.py +0 -0
  105. {langwatch_scenario-0.7.3 → langwatch_scenario-0.7.8}/scenario/_generated/langwatch_api_client/lang_watch_api_client/models/get_api_prompts_by_id_versions_response_200_config_data_inputs_item_type.py +0 -0
  106. {langwatch_scenario-0.7.3 → langwatch_scenario-0.7.8}/scenario/_generated/langwatch_api_client/lang_watch_api_client/models/get_api_prompts_by_id_versions_response_200_config_data_messages_item.py +0 -0
  107. {langwatch_scenario-0.7.3 → langwatch_scenario-0.7.8}/scenario/_generated/langwatch_api_client/lang_watch_api_client/models/get_api_prompts_by_id_versions_response_200_config_data_messages_item_role.py +0 -0
  108. {langwatch_scenario-0.7.3 → langwatch_scenario-0.7.8}/scenario/_generated/langwatch_api_client/lang_watch_api_client/models/get_api_prompts_by_id_versions_response_200_config_data_outputs_item.py +0 -0
  109. {langwatch_scenario-0.7.3 → langwatch_scenario-0.7.8}/scenario/_generated/langwatch_api_client/lang_watch_api_client/models/get_api_prompts_by_id_versions_response_200_config_data_outputs_item_json_schema.py +0 -0
  110. {langwatch_scenario-0.7.3 → langwatch_scenario-0.7.8}/scenario/_generated/langwatch_api_client/lang_watch_api_client/models/get_api_prompts_by_id_versions_response_200_config_data_outputs_item_type.py +0 -0
  111. {langwatch_scenario-0.7.3 → langwatch_scenario-0.7.8}/scenario/_generated/langwatch_api_client/lang_watch_api_client/models/get_api_prompts_by_id_versions_response_200_config_data_prompting_technique.py +0 -0
  112. {langwatch_scenario-0.7.3 → langwatch_scenario-0.7.8}/scenario/_generated/langwatch_api_client/lang_watch_api_client/models/get_api_prompts_by_id_versions_response_400.py +0 -0
  113. {langwatch_scenario-0.7.3 → langwatch_scenario-0.7.8}/scenario/_generated/langwatch_api_client/lang_watch_api_client/models/get_api_prompts_by_id_versions_response_400_error.py +0 -0
  114. {langwatch_scenario-0.7.3 → langwatch_scenario-0.7.8}/scenario/_generated/langwatch_api_client/lang_watch_api_client/models/get_api_prompts_by_id_versions_response_401.py +0 -0
  115. {langwatch_scenario-0.7.3 → langwatch_scenario-0.7.8}/scenario/_generated/langwatch_api_client/lang_watch_api_client/models/get_api_prompts_by_id_versions_response_401_error.py +0 -0
  116. {langwatch_scenario-0.7.3 → langwatch_scenario-0.7.8}/scenario/_generated/langwatch_api_client/lang_watch_api_client/models/get_api_prompts_by_id_versions_response_404.py +0 -0
  117. {langwatch_scenario-0.7.3 → langwatch_scenario-0.7.8}/scenario/_generated/langwatch_api_client/lang_watch_api_client/models/get_api_prompts_by_id_versions_response_500.py +0 -0
  118. {langwatch_scenario-0.7.3 → langwatch_scenario-0.7.8}/scenario/_generated/langwatch_api_client/lang_watch_api_client/models/get_api_prompts_response_200_item.py +0 -0
  119. {langwatch_scenario-0.7.3 → langwatch_scenario-0.7.8}/scenario/_generated/langwatch_api_client/lang_watch_api_client/models/get_api_prompts_response_200_item_messages_item.py +0 -0
  120. {langwatch_scenario-0.7.3 → langwatch_scenario-0.7.8}/scenario/_generated/langwatch_api_client/lang_watch_api_client/models/get_api_prompts_response_200_item_messages_item_role.py +0 -0
  121. {langwatch_scenario-0.7.3 → langwatch_scenario-0.7.8}/scenario/_generated/langwatch_api_client/lang_watch_api_client/models/get_api_prompts_response_200_item_response_format_type_0.py +0 -0
  122. {langwatch_scenario-0.7.3 → langwatch_scenario-0.7.8}/scenario/_generated/langwatch_api_client/lang_watch_api_client/models/get_api_prompts_response_200_item_response_format_type_0_json_schema.py +0 -0
  123. {langwatch_scenario-0.7.3 → langwatch_scenario-0.7.8}/scenario/_generated/langwatch_api_client/lang_watch_api_client/models/get_api_prompts_response_200_item_response_format_type_0_json_schema_schema.py +0 -0
  124. {langwatch_scenario-0.7.3 → langwatch_scenario-0.7.8}/scenario/_generated/langwatch_api_client/lang_watch_api_client/models/get_api_prompts_response_200_item_response_format_type_0_type.py +0 -0
  125. {langwatch_scenario-0.7.3 → langwatch_scenario-0.7.8}/scenario/_generated/langwatch_api_client/lang_watch_api_client/models/get_api_prompts_response_400.py +0 -0
  126. {langwatch_scenario-0.7.3 → langwatch_scenario-0.7.8}/scenario/_generated/langwatch_api_client/lang_watch_api_client/models/get_api_prompts_response_400_error.py +0 -0
  127. {langwatch_scenario-0.7.3 → langwatch_scenario-0.7.8}/scenario/_generated/langwatch_api_client/lang_watch_api_client/models/get_api_prompts_response_401.py +0 -0
  128. {langwatch_scenario-0.7.3 → langwatch_scenario-0.7.8}/scenario/_generated/langwatch_api_client/lang_watch_api_client/models/get_api_prompts_response_401_error.py +0 -0
  129. {langwatch_scenario-0.7.3 → langwatch_scenario-0.7.8}/scenario/_generated/langwatch_api_client/lang_watch_api_client/models/get_api_prompts_response_500.py +0 -0
  130. {langwatch_scenario-0.7.3 → langwatch_scenario-0.7.8}/scenario/_generated/langwatch_api_client/lang_watch_api_client/models/get_api_trace_id_response_200.py +0 -0
  131. {langwatch_scenario-0.7.3 → langwatch_scenario-0.7.8}/scenario/_generated/langwatch_api_client/lang_watch_api_client/models/get_api_trace_id_response_200_error_type_0.py +0 -0
  132. {langwatch_scenario-0.7.3 → langwatch_scenario-0.7.8}/scenario/_generated/langwatch_api_client/lang_watch_api_client/models/get_api_trace_id_response_200_evaluations_item.py +0 -0
  133. {langwatch_scenario-0.7.3 → langwatch_scenario-0.7.8}/scenario/_generated/langwatch_api_client/lang_watch_api_client/models/get_api_trace_id_response_200_evaluations_item_error.py +0 -0
  134. {langwatch_scenario-0.7.3 → langwatch_scenario-0.7.8}/scenario/_generated/langwatch_api_client/lang_watch_api_client/models/get_api_trace_id_response_200_evaluations_item_timestamps.py +0 -0
  135. {langwatch_scenario-0.7.3 → langwatch_scenario-0.7.8}/scenario/_generated/langwatch_api_client/lang_watch_api_client/models/get_api_trace_id_response_200_input.py +0 -0
  136. {langwatch_scenario-0.7.3 → langwatch_scenario-0.7.8}/scenario/_generated/langwatch_api_client/lang_watch_api_client/models/get_api_trace_id_response_200_metadata.py +0 -0
  137. {langwatch_scenario-0.7.3 → langwatch_scenario-0.7.8}/scenario/_generated/langwatch_api_client/lang_watch_api_client/models/get_api_trace_id_response_200_metrics.py +0 -0
  138. {langwatch_scenario-0.7.3 → langwatch_scenario-0.7.8}/scenario/_generated/langwatch_api_client/lang_watch_api_client/models/get_api_trace_id_response_200_output.py +0 -0
  139. {langwatch_scenario-0.7.3 → langwatch_scenario-0.7.8}/scenario/_generated/langwatch_api_client/lang_watch_api_client/models/get_api_trace_id_response_200_spans_item.py +0 -0
  140. {langwatch_scenario-0.7.3 → langwatch_scenario-0.7.8}/scenario/_generated/langwatch_api_client/lang_watch_api_client/models/get_api_trace_id_response_200_spans_item_error_type_0.py +0 -0
  141. {langwatch_scenario-0.7.3 → langwatch_scenario-0.7.8}/scenario/_generated/langwatch_api_client/lang_watch_api_client/models/get_api_trace_id_response_200_spans_item_input.py +0 -0
  142. {langwatch_scenario-0.7.3 → langwatch_scenario-0.7.8}/scenario/_generated/langwatch_api_client/lang_watch_api_client/models/get_api_trace_id_response_200_spans_item_input_value_item.py +0 -0
  143. {langwatch_scenario-0.7.3 → langwatch_scenario-0.7.8}/scenario/_generated/langwatch_api_client/lang_watch_api_client/models/get_api_trace_id_response_200_spans_item_metrics.py +0 -0
  144. {langwatch_scenario-0.7.3 → langwatch_scenario-0.7.8}/scenario/_generated/langwatch_api_client/lang_watch_api_client/models/get_api_trace_id_response_200_spans_item_output.py +0 -0
  145. {langwatch_scenario-0.7.3 → langwatch_scenario-0.7.8}/scenario/_generated/langwatch_api_client/lang_watch_api_client/models/get_api_trace_id_response_200_spans_item_output_value_item.py +0 -0
  146. {langwatch_scenario-0.7.3 → langwatch_scenario-0.7.8}/scenario/_generated/langwatch_api_client/lang_watch_api_client/models/get_api_trace_id_response_200_spans_item_params.py +0 -0
  147. {langwatch_scenario-0.7.3 → langwatch_scenario-0.7.8}/scenario/_generated/langwatch_api_client/lang_watch_api_client/models/get_api_trace_id_response_200_spans_item_timestamps.py +0 -0
  148. {langwatch_scenario-0.7.3 → langwatch_scenario-0.7.8}/scenario/_generated/langwatch_api_client/lang_watch_api_client/models/get_api_trace_id_response_200_timestamps.py +0 -0
  149. {langwatch_scenario-0.7.3 → langwatch_scenario-0.7.8}/scenario/_generated/langwatch_api_client/lang_watch_api_client/models/input_.py +0 -0
  150. {langwatch_scenario-0.7.3 → langwatch_scenario-0.7.8}/scenario/_generated/langwatch_api_client/lang_watch_api_client/models/metadata.py +0 -0
  151. {langwatch_scenario-0.7.3 → langwatch_scenario-0.7.8}/scenario/_generated/langwatch_api_client/lang_watch_api_client/models/metrics.py +0 -0
  152. {langwatch_scenario-0.7.3 → langwatch_scenario-0.7.8}/scenario/_generated/langwatch_api_client/lang_watch_api_client/models/output.py +0 -0
  153. {langwatch_scenario-0.7.3 → langwatch_scenario-0.7.8}/scenario/_generated/langwatch_api_client/lang_watch_api_client/models/pagination.py +0 -0
  154. {langwatch_scenario-0.7.3 → langwatch_scenario-0.7.8}/scenario/_generated/langwatch_api_client/lang_watch_api_client/models/patch_api_annotations_id_body.py +0 -0
  155. {langwatch_scenario-0.7.3 → langwatch_scenario-0.7.8}/scenario/_generated/langwatch_api_client/lang_watch_api_client/models/patch_api_annotations_id_response_200.py +0 -0
  156. {langwatch_scenario-0.7.3 → langwatch_scenario-0.7.8}/scenario/_generated/langwatch_api_client/lang_watch_api_client/models/post_api_annotations_trace_id_body.py +0 -0
  157. {langwatch_scenario-0.7.3 → langwatch_scenario-0.7.8}/scenario/_generated/langwatch_api_client/lang_watch_api_client/models/post_api_prompts_body.py +0 -0
  158. {langwatch_scenario-0.7.3 → langwatch_scenario-0.7.8}/scenario/_generated/langwatch_api_client/lang_watch_api_client/models/post_api_prompts_by_id_versions_body.py +0 -0
  159. {langwatch_scenario-0.7.3 → langwatch_scenario-0.7.8}/scenario/_generated/langwatch_api_client/lang_watch_api_client/models/post_api_prompts_by_id_versions_body_config_data.py +0 -0
  160. {langwatch_scenario-0.7.3 → langwatch_scenario-0.7.8}/scenario/_generated/langwatch_api_client/lang_watch_api_client/models/post_api_prompts_by_id_versions_body_config_data_demonstrations.py +0 -0
  161. {langwatch_scenario-0.7.3 → langwatch_scenario-0.7.8}/scenario/_generated/langwatch_api_client/lang_watch_api_client/models/post_api_prompts_by_id_versions_body_config_data_demonstrations_columns_item.py +0 -0
  162. {langwatch_scenario-0.7.3 → langwatch_scenario-0.7.8}/scenario/_generated/langwatch_api_client/lang_watch_api_client/models/post_api_prompts_by_id_versions_body_config_data_demonstrations_columns_item_type.py +0 -0
  163. {langwatch_scenario-0.7.3 → langwatch_scenario-0.7.8}/scenario/_generated/langwatch_api_client/lang_watch_api_client/models/post_api_prompts_by_id_versions_body_config_data_demonstrations_rows_item.py +0 -0
  164. {langwatch_scenario-0.7.3 → langwatch_scenario-0.7.8}/scenario/_generated/langwatch_api_client/lang_watch_api_client/models/post_api_prompts_by_id_versions_body_config_data_inputs_item.py +0 -0
  165. {langwatch_scenario-0.7.3 → langwatch_scenario-0.7.8}/scenario/_generated/langwatch_api_client/lang_watch_api_client/models/post_api_prompts_by_id_versions_body_config_data_inputs_item_type.py +0 -0
  166. {langwatch_scenario-0.7.3 → langwatch_scenario-0.7.8}/scenario/_generated/langwatch_api_client/lang_watch_api_client/models/post_api_prompts_by_id_versions_body_config_data_messages_item.py +0 -0
  167. {langwatch_scenario-0.7.3 → langwatch_scenario-0.7.8}/scenario/_generated/langwatch_api_client/lang_watch_api_client/models/post_api_prompts_by_id_versions_body_config_data_messages_item_role.py +0 -0
  168. {langwatch_scenario-0.7.3 → langwatch_scenario-0.7.8}/scenario/_generated/langwatch_api_client/lang_watch_api_client/models/post_api_prompts_by_id_versions_body_config_data_outputs_item.py +0 -0
  169. {langwatch_scenario-0.7.3 → langwatch_scenario-0.7.8}/scenario/_generated/langwatch_api_client/lang_watch_api_client/models/post_api_prompts_by_id_versions_body_config_data_outputs_item_json_schema.py +0 -0
  170. {langwatch_scenario-0.7.3 → langwatch_scenario-0.7.8}/scenario/_generated/langwatch_api_client/lang_watch_api_client/models/post_api_prompts_by_id_versions_body_config_data_outputs_item_type.py +0 -0
  171. {langwatch_scenario-0.7.3 → langwatch_scenario-0.7.8}/scenario/_generated/langwatch_api_client/lang_watch_api_client/models/post_api_prompts_by_id_versions_body_config_data_prompting_technique.py +0 -0
  172. {langwatch_scenario-0.7.3 → langwatch_scenario-0.7.8}/scenario/_generated/langwatch_api_client/lang_watch_api_client/models/post_api_prompts_by_id_versions_response_200.py +0 -0
  173. {langwatch_scenario-0.7.3 → langwatch_scenario-0.7.8}/scenario/_generated/langwatch_api_client/lang_watch_api_client/models/post_api_prompts_by_id_versions_response_200_config_data.py +0 -0
  174. {langwatch_scenario-0.7.3 → langwatch_scenario-0.7.8}/scenario/_generated/langwatch_api_client/lang_watch_api_client/models/post_api_prompts_by_id_versions_response_200_config_data_demonstrations.py +0 -0
  175. {langwatch_scenario-0.7.3 → langwatch_scenario-0.7.8}/scenario/_generated/langwatch_api_client/lang_watch_api_client/models/post_api_prompts_by_id_versions_response_200_config_data_demonstrations_columns_item.py +0 -0
  176. {langwatch_scenario-0.7.3 → langwatch_scenario-0.7.8}/scenario/_generated/langwatch_api_client/lang_watch_api_client/models/post_api_prompts_by_id_versions_response_200_config_data_demonstrations_columns_item_type.py +0 -0
  177. {langwatch_scenario-0.7.3 → langwatch_scenario-0.7.8}/scenario/_generated/langwatch_api_client/lang_watch_api_client/models/post_api_prompts_by_id_versions_response_200_config_data_demonstrations_rows_item.py +0 -0
  178. {langwatch_scenario-0.7.3 → langwatch_scenario-0.7.8}/scenario/_generated/langwatch_api_client/lang_watch_api_client/models/post_api_prompts_by_id_versions_response_200_config_data_inputs_item.py +0 -0
  179. {langwatch_scenario-0.7.3 → langwatch_scenario-0.7.8}/scenario/_generated/langwatch_api_client/lang_watch_api_client/models/post_api_prompts_by_id_versions_response_200_config_data_inputs_item_type.py +0 -0
  180. {langwatch_scenario-0.7.3 → langwatch_scenario-0.7.8}/scenario/_generated/langwatch_api_client/lang_watch_api_client/models/post_api_prompts_by_id_versions_response_200_config_data_messages_item.py +0 -0
  181. {langwatch_scenario-0.7.3 → langwatch_scenario-0.7.8}/scenario/_generated/langwatch_api_client/lang_watch_api_client/models/post_api_prompts_by_id_versions_response_200_config_data_messages_item_role.py +0 -0
  182. {langwatch_scenario-0.7.3 → langwatch_scenario-0.7.8}/scenario/_generated/langwatch_api_client/lang_watch_api_client/models/post_api_prompts_by_id_versions_response_200_config_data_outputs_item.py +0 -0
  183. {langwatch_scenario-0.7.3 → langwatch_scenario-0.7.8}/scenario/_generated/langwatch_api_client/lang_watch_api_client/models/post_api_prompts_by_id_versions_response_200_config_data_outputs_item_json_schema.py +0 -0
  184. {langwatch_scenario-0.7.3 → langwatch_scenario-0.7.8}/scenario/_generated/langwatch_api_client/lang_watch_api_client/models/post_api_prompts_by_id_versions_response_200_config_data_outputs_item_type.py +0 -0
  185. {langwatch_scenario-0.7.3 → langwatch_scenario-0.7.8}/scenario/_generated/langwatch_api_client/lang_watch_api_client/models/post_api_prompts_by_id_versions_response_200_config_data_prompting_technique.py +0 -0
  186. {langwatch_scenario-0.7.3 → langwatch_scenario-0.7.8}/scenario/_generated/langwatch_api_client/lang_watch_api_client/models/post_api_prompts_by_id_versions_response_400.py +0 -0
  187. {langwatch_scenario-0.7.3 → langwatch_scenario-0.7.8}/scenario/_generated/langwatch_api_client/lang_watch_api_client/models/post_api_prompts_by_id_versions_response_400_error.py +0 -0
  188. {langwatch_scenario-0.7.3 → langwatch_scenario-0.7.8}/scenario/_generated/langwatch_api_client/lang_watch_api_client/models/post_api_prompts_by_id_versions_response_401.py +0 -0
  189. {langwatch_scenario-0.7.3 → langwatch_scenario-0.7.8}/scenario/_generated/langwatch_api_client/lang_watch_api_client/models/post_api_prompts_by_id_versions_response_401_error.py +0 -0
  190. {langwatch_scenario-0.7.3 → langwatch_scenario-0.7.8}/scenario/_generated/langwatch_api_client/lang_watch_api_client/models/post_api_prompts_by_id_versions_response_404.py +0 -0
  191. {langwatch_scenario-0.7.3 → langwatch_scenario-0.7.8}/scenario/_generated/langwatch_api_client/lang_watch_api_client/models/post_api_prompts_by_id_versions_response_500.py +0 -0
  192. {langwatch_scenario-0.7.3 → langwatch_scenario-0.7.8}/scenario/_generated/langwatch_api_client/lang_watch_api_client/models/post_api_prompts_response_200.py +0 -0
  193. {langwatch_scenario-0.7.3 → langwatch_scenario-0.7.8}/scenario/_generated/langwatch_api_client/lang_watch_api_client/models/post_api_prompts_response_200_messages_item.py +0 -0
  194. {langwatch_scenario-0.7.3 → langwatch_scenario-0.7.8}/scenario/_generated/langwatch_api_client/lang_watch_api_client/models/post_api_prompts_response_200_messages_item_role.py +0 -0
  195. {langwatch_scenario-0.7.3 → langwatch_scenario-0.7.8}/scenario/_generated/langwatch_api_client/lang_watch_api_client/models/post_api_prompts_response_200_response_format_type_0.py +0 -0
  196. {langwatch_scenario-0.7.3 → langwatch_scenario-0.7.8}/scenario/_generated/langwatch_api_client/lang_watch_api_client/models/post_api_prompts_response_200_response_format_type_0_json_schema.py +0 -0
  197. {langwatch_scenario-0.7.3 → langwatch_scenario-0.7.8}/scenario/_generated/langwatch_api_client/lang_watch_api_client/models/post_api_prompts_response_200_response_format_type_0_json_schema_schema.py +0 -0
  198. {langwatch_scenario-0.7.3 → langwatch_scenario-0.7.8}/scenario/_generated/langwatch_api_client/lang_watch_api_client/models/post_api_prompts_response_200_response_format_type_0_type.py +0 -0
  199. {langwatch_scenario-0.7.3 → langwatch_scenario-0.7.8}/scenario/_generated/langwatch_api_client/lang_watch_api_client/models/post_api_prompts_response_400.py +0 -0
  200. {langwatch_scenario-0.7.3 → langwatch_scenario-0.7.8}/scenario/_generated/langwatch_api_client/lang_watch_api_client/models/post_api_prompts_response_400_error.py +0 -0
  201. {langwatch_scenario-0.7.3 → langwatch_scenario-0.7.8}/scenario/_generated/langwatch_api_client/lang_watch_api_client/models/post_api_prompts_response_401.py +0 -0
  202. {langwatch_scenario-0.7.3 → langwatch_scenario-0.7.8}/scenario/_generated/langwatch_api_client/lang_watch_api_client/models/post_api_prompts_response_401_error.py +0 -0
  203. {langwatch_scenario-0.7.3 → langwatch_scenario-0.7.8}/scenario/_generated/langwatch_api_client/lang_watch_api_client/models/post_api_prompts_response_500.py +0 -0
  204. {langwatch_scenario-0.7.3 → langwatch_scenario-0.7.8}/scenario/_generated/langwatch_api_client/lang_watch_api_client/models/post_api_scenario_events_body_type_0.py +0 -0
  205. {langwatch_scenario-0.7.3 → langwatch_scenario-0.7.8}/scenario/_generated/langwatch_api_client/lang_watch_api_client/models/post_api_scenario_events_body_type_0_metadata.py +0 -0
  206. {langwatch_scenario-0.7.3 → langwatch_scenario-0.7.8}/scenario/_generated/langwatch_api_client/lang_watch_api_client/models/post_api_scenario_events_body_type_1.py +0 -0
  207. {langwatch_scenario-0.7.3 → langwatch_scenario-0.7.8}/scenario/_generated/langwatch_api_client/lang_watch_api_client/models/post_api_scenario_events_body_type_1_results_type_0.py +0 -0
  208. {langwatch_scenario-0.7.3 → langwatch_scenario-0.7.8}/scenario/_generated/langwatch_api_client/lang_watch_api_client/models/post_api_scenario_events_body_type_1_results_type_0_verdict.py +0 -0
  209. {langwatch_scenario-0.7.3 → langwatch_scenario-0.7.8}/scenario/_generated/langwatch_api_client/lang_watch_api_client/models/post_api_scenario_events_body_type_1_status.py +0 -0
  210. {langwatch_scenario-0.7.3 → langwatch_scenario-0.7.8}/scenario/_generated/langwatch_api_client/lang_watch_api_client/models/post_api_scenario_events_body_type_2.py +0 -0
  211. {langwatch_scenario-0.7.3 → langwatch_scenario-0.7.8}/scenario/_generated/langwatch_api_client/lang_watch_api_client/models/post_api_scenario_events_body_type_2_messages_item_type_0.py +0 -0
  212. {langwatch_scenario-0.7.3 → langwatch_scenario-0.7.8}/scenario/_generated/langwatch_api_client/lang_watch_api_client/models/post_api_scenario_events_body_type_2_messages_item_type_1.py +0 -0
  213. {langwatch_scenario-0.7.3 → langwatch_scenario-0.7.8}/scenario/_generated/langwatch_api_client/lang_watch_api_client/models/post_api_scenario_events_body_type_2_messages_item_type_2.py +0 -0
  214. {langwatch_scenario-0.7.3 → langwatch_scenario-0.7.8}/scenario/_generated/langwatch_api_client/lang_watch_api_client/models/post_api_scenario_events_body_type_2_messages_item_type_2_tool_calls_item.py +0 -0
  215. {langwatch_scenario-0.7.3 → langwatch_scenario-0.7.8}/scenario/_generated/langwatch_api_client/lang_watch_api_client/models/post_api_scenario_events_body_type_2_messages_item_type_2_tool_calls_item_function.py +0 -0
  216. {langwatch_scenario-0.7.3 → langwatch_scenario-0.7.8}/scenario/_generated/langwatch_api_client/lang_watch_api_client/models/post_api_scenario_events_body_type_2_messages_item_type_3.py +0 -0
  217. {langwatch_scenario-0.7.3 → langwatch_scenario-0.7.8}/scenario/_generated/langwatch_api_client/lang_watch_api_client/models/post_api_scenario_events_body_type_2_messages_item_type_4.py +0 -0
  218. {langwatch_scenario-0.7.3 → langwatch_scenario-0.7.8}/scenario/_generated/langwatch_api_client/lang_watch_api_client/models/post_api_scenario_events_response_201.py +0 -0
  219. {langwatch_scenario-0.7.3 → langwatch_scenario-0.7.8}/scenario/_generated/langwatch_api_client/lang_watch_api_client/models/post_api_scenario_events_response_400.py +0 -0
  220. {langwatch_scenario-0.7.3 → langwatch_scenario-0.7.8}/scenario/_generated/langwatch_api_client/lang_watch_api_client/models/post_api_scenario_events_response_401.py +0 -0
  221. {langwatch_scenario-0.7.3 → langwatch_scenario-0.7.8}/scenario/_generated/langwatch_api_client/lang_watch_api_client/models/post_api_scenario_events_response_500.py +0 -0
  222. {langwatch_scenario-0.7.3 → langwatch_scenario-0.7.8}/scenario/_generated/langwatch_api_client/lang_watch_api_client/models/post_api_trace_id_share_response_200.py +0 -0
  223. {langwatch_scenario-0.7.3 → langwatch_scenario-0.7.8}/scenario/_generated/langwatch_api_client/lang_watch_api_client/models/post_api_trace_id_unshare_response_200.py +0 -0
  224. {langwatch_scenario-0.7.3 → langwatch_scenario-0.7.8}/scenario/_generated/langwatch_api_client/lang_watch_api_client/models/put_api_prompts_by_id_body.py +0 -0
  225. {langwatch_scenario-0.7.3 → langwatch_scenario-0.7.8}/scenario/_generated/langwatch_api_client/lang_watch_api_client/models/put_api_prompts_by_id_response_200.py +0 -0
  226. {langwatch_scenario-0.7.3 → langwatch_scenario-0.7.8}/scenario/_generated/langwatch_api_client/lang_watch_api_client/models/put_api_prompts_by_id_response_400.py +0 -0
  227. {langwatch_scenario-0.7.3 → langwatch_scenario-0.7.8}/scenario/_generated/langwatch_api_client/lang_watch_api_client/models/put_api_prompts_by_id_response_400_error.py +0 -0
  228. {langwatch_scenario-0.7.3 → langwatch_scenario-0.7.8}/scenario/_generated/langwatch_api_client/lang_watch_api_client/models/put_api_prompts_by_id_response_401.py +0 -0
  229. {langwatch_scenario-0.7.3 → langwatch_scenario-0.7.8}/scenario/_generated/langwatch_api_client/lang_watch_api_client/models/put_api_prompts_by_id_response_401_error.py +0 -0
  230. {langwatch_scenario-0.7.3 → langwatch_scenario-0.7.8}/scenario/_generated/langwatch_api_client/lang_watch_api_client/models/put_api_prompts_by_id_response_404.py +0 -0
  231. {langwatch_scenario-0.7.3 → langwatch_scenario-0.7.8}/scenario/_generated/langwatch_api_client/lang_watch_api_client/models/put_api_prompts_by_id_response_500.py +0 -0
  232. {langwatch_scenario-0.7.3 → langwatch_scenario-0.7.8}/scenario/_generated/langwatch_api_client/lang_watch_api_client/models/search_request.py +0 -0
  233. {langwatch_scenario-0.7.3 → langwatch_scenario-0.7.8}/scenario/_generated/langwatch_api_client/lang_watch_api_client/models/search_request_filters.py +0 -0
  234. {langwatch_scenario-0.7.3 → langwatch_scenario-0.7.8}/scenario/_generated/langwatch_api_client/lang_watch_api_client/models/search_response.py +0 -0
  235. {langwatch_scenario-0.7.3 → langwatch_scenario-0.7.8}/scenario/_generated/langwatch_api_client/lang_watch_api_client/models/timestamps.py +0 -0
  236. {langwatch_scenario-0.7.3 → langwatch_scenario-0.7.8}/scenario/_generated/langwatch_api_client/lang_watch_api_client/models/trace.py +0 -0
  237. {langwatch_scenario-0.7.3 → langwatch_scenario-0.7.8}/scenario/_generated/langwatch_api_client/lang_watch_api_client/py.typed +0 -0
  238. {langwatch_scenario-0.7.3 → langwatch_scenario-0.7.8}/scenario/_generated/langwatch_api_client/lang_watch_api_client/types.py +0 -0
  239. {langwatch_scenario-0.7.3 → langwatch_scenario-0.7.8}/scenario/_generated/langwatch_api_client/pyproject.toml +0 -0
  240. {langwatch_scenario-0.7.3 → langwatch_scenario-0.7.8}/scenario/_utils/__init__.py +0 -0
  241. {langwatch_scenario-0.7.3 → langwatch_scenario-0.7.8}/scenario/_utils/ids.py +0 -0
  242. {langwatch_scenario-0.7.3 → langwatch_scenario-0.7.8}/scenario/_utils/message_conversion.py +0 -0
  243. {langwatch_scenario-0.7.3 → langwatch_scenario-0.7.8}/scenario/_utils/utils.py +0 -0
  244. {langwatch_scenario-0.7.3 → langwatch_scenario-0.7.8}/scenario/agent_adapter.py +0 -0
  245. {langwatch_scenario-0.7.3 → langwatch_scenario-0.7.8}/scenario/cache.py +0 -0
  246. {langwatch_scenario-0.7.3 → langwatch_scenario-0.7.8}/scenario/pytest_plugin.py +0 -0
  247. {langwatch_scenario-0.7.3 → langwatch_scenario-0.7.8}/scenario/scenario_executor.py +0 -0
  248. {langwatch_scenario-0.7.3 → langwatch_scenario-0.7.8}/scenario/scenario_state.py +0 -0
  249. {langwatch_scenario-0.7.3 → langwatch_scenario-0.7.8}/scenario/script.py +0 -0
  250. {langwatch_scenario-0.7.3 → langwatch_scenario-0.7.8}/scenario/types.py +0 -0
  251. {langwatch_scenario-0.7.3 → langwatch_scenario-0.7.8}/setup.cfg +0 -0
  252. {langwatch_scenario-0.7.3 → langwatch_scenario-0.7.8}/setup.py +0 -0
  253. {langwatch_scenario-0.7.3 → langwatch_scenario-0.7.8}/tests/test_scenario.py +0 -0
  254. {langwatch_scenario-0.7.3 → langwatch_scenario-0.7.8}/tests/test_scenario_agent.py +0 -0
  255. {langwatch_scenario-0.7.3 → langwatch_scenario-0.7.8}/tests/test_scenario_executor.py +0 -0
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.4
2
2
  Name: langwatch-scenario
3
- Version: 0.7.3
3
+ Version: 0.7.8
4
4
  Summary: The end-to-end agent testing library
5
5
  Author-email: LangWatch Team <support@langwatch.ai>
6
6
  License: MIT
@@ -30,12 +30,12 @@ Requires-Dist: pksuid>=1.1.2
30
30
  Requires-Dist: httpx>=0.27.0
31
31
  Requires-Dist: rx>=3.2.0
32
32
  Requires-Dist: python-dateutil>=2.9.0.post0
33
+ Requires-Dist: pydantic-settings>=2.9.1
33
34
  Provides-Extra: dev
34
35
  Requires-Dist: black; extra == "dev"
35
36
  Requires-Dist: isort; extra == "dev"
36
37
  Requires-Dist: pytest-cov; extra == "dev"
37
38
  Requires-Dist: pre-commit; extra == "dev"
38
- Requires-Dist: commitizen; extra == "dev"
39
39
  Requires-Dist: pyright; extra == "dev"
40
40
  Requires-Dist: pydantic-ai; extra == "dev"
41
41
  Requires-Dist: function-schema; extra == "dev"
@@ -88,7 +88,7 @@ result = await scenario.run(
88
88
  # Define the agents that will play this simulation
89
89
  agents=[
90
90
  WeatherAgent(),
91
- scenario.UserSimulatorAgent(model="openai/gpt-4.1-mini"),
91
+ scenario.UserSimulatorAgent(model="openai/gpt-4.1"),
92
92
  ],
93
93
 
94
94
  # (Optional) Control the simulation
@@ -159,7 +159,7 @@ import pytest
159
159
  import scenario
160
160
  import litellm
161
161
 
162
- scenario.configure(default_model="openai/gpt-4.1-mini")
162
+ scenario.configure(default_model="openai/gpt-4.1")
163
163
 
164
164
 
165
165
  @pytest.mark.agent_test
@@ -189,6 +189,7 @@ async def test_vegetarian_recipe_agent():
189
189
  ]
190
190
  ),
191
191
  ],
192
+ set_id="python-examples",
192
193
  )
193
194
 
194
195
  # Assert for pytest to know whether the test passed
@@ -202,7 +203,7 @@ import litellm
202
203
  @scenario.cache()
203
204
  def vegetarian_recipe_agent(messages) -> scenario.AgentReturnTypes:
204
205
  response = litellm.completion(
205
- model="openai/gpt-4.1-mini",
206
+ model="openai/gpt-4.1",
206
207
  messages=[
207
208
  {
208
209
  "role": "system",
@@ -227,17 +228,17 @@ def vegetarian_recipe_agent(messages) -> scenario.AgentReturnTypes:
227
228
  Save it as `tests/vegetarian-recipe-agent.test.ts`:
228
229
 
229
230
  ```typescript
231
+ import scenario, { type AgentAdapter, AgentRole } from "@langwatch/scenario";
230
232
  import { openai } from "@ai-sdk/openai";
231
- import * as scenario from "@langwatch/scenario";
232
233
  import { generateText } from "ai";
233
234
  import { describe, it, expect } from "vitest";
234
235
 
235
236
  describe("Vegetarian Recipe Agent", () => {
236
- const agent: scenario.AgentAdapter = {
237
- role: scenario.AgentRole.AGENT,
237
+ const agent: AgentAdapter = {
238
+ role: AgentRole.AGENT,
238
239
  call: async (input) => {
239
240
  const response = await generateText({
240
- model: openai("gpt-4.1-mini"),
241
+ model: openai("gpt-4.1"),
241
242
  messages: [
242
243
  {
243
244
  role: "system",
@@ -258,7 +259,7 @@ describe("Vegetarian Recipe Agent", () => {
258
259
  agent,
259
260
  scenario.userSimulatorAgent(),
260
261
  scenario.judgeAgent({
261
- model: openai("gpt-4.1-mini"),
262
+ model: openai("gpt-4.1"),
262
263
  criteria: [
263
264
  "Agent should not ask more than two follow-up questions",
264
265
  "Agent should generate a recipe",
@@ -268,6 +269,7 @@ describe("Vegetarian Recipe Agent", () => {
268
269
  ],
269
270
  }),
270
271
  ],
272
+ setId: "javascript-examples",
271
273
  });
272
274
  expect(result.success).toBe(true);
273
275
  });
@@ -417,7 +419,7 @@ You can enable debug mode by setting the `debug` field to `True` in the `Scenari
417
419
  Debug mode allows you to see the messages in slow motion step by step, and intervene with your own inputs to debug your agent from the middle of the conversation.
418
420
 
419
421
  ```python
420
- scenario.configure(default_model="openai/gpt-4.1-mini", debug=True)
422
+ scenario.configure(default_model="openai/gpt-4.1", debug=True)
421
423
  ```
422
424
 
423
425
  or
@@ -431,7 +433,7 @@ pytest -s tests/test_vegetarian_recipe_agent.py --debug
431
433
  Each time the scenario runs, the testing agent might chose a different input to start, this is good to make sure it covers the variance of real users as well, however we understand that the non-deterministic nature of it might make it less repeatable, costly and harder to debug. To solve for it, you can use the `cache_key` field in the `Scenario.configure` method or in the specific scenario you are running, this will make the testing agent give the same input for given the same scenario:
432
434
 
433
435
  ```python
434
- scenario.configure(default_model="openai/gpt-4.1-mini", cache_key="42")
436
+ scenario.configure(default_model="openai/gpt-4.1", cache_key="42")
435
437
  ```
436
438
 
437
439
  To bust the cache, you can simply pass a different `cache_key`, disable it, or delete the cache files located at `~/.scenario/cache`.
@@ -44,7 +44,7 @@ result = await scenario.run(
44
44
  # Define the agents that will play this simulation
45
45
  agents=[
46
46
  WeatherAgent(),
47
- scenario.UserSimulatorAgent(model="openai/gpt-4.1-mini"),
47
+ scenario.UserSimulatorAgent(model="openai/gpt-4.1"),
48
48
  ],
49
49
 
50
50
  # (Optional) Control the simulation
@@ -115,7 +115,7 @@ import pytest
115
115
  import scenario
116
116
  import litellm
117
117
 
118
- scenario.configure(default_model="openai/gpt-4.1-mini")
118
+ scenario.configure(default_model="openai/gpt-4.1")
119
119
 
120
120
 
121
121
  @pytest.mark.agent_test
@@ -145,6 +145,7 @@ async def test_vegetarian_recipe_agent():
145
145
  ]
146
146
  ),
147
147
  ],
148
+ set_id="python-examples",
148
149
  )
149
150
 
150
151
  # Assert for pytest to know whether the test passed
@@ -158,7 +159,7 @@ import litellm
158
159
  @scenario.cache()
159
160
  def vegetarian_recipe_agent(messages) -> scenario.AgentReturnTypes:
160
161
  response = litellm.completion(
161
- model="openai/gpt-4.1-mini",
162
+ model="openai/gpt-4.1",
162
163
  messages=[
163
164
  {
164
165
  "role": "system",
@@ -183,17 +184,17 @@ def vegetarian_recipe_agent(messages) -> scenario.AgentReturnTypes:
183
184
  Save it as `tests/vegetarian-recipe-agent.test.ts`:
184
185
 
185
186
  ```typescript
187
+ import scenario, { type AgentAdapter, AgentRole } from "@langwatch/scenario";
186
188
  import { openai } from "@ai-sdk/openai";
187
- import * as scenario from "@langwatch/scenario";
188
189
  import { generateText } from "ai";
189
190
  import { describe, it, expect } from "vitest";
190
191
 
191
192
  describe("Vegetarian Recipe Agent", () => {
192
- const agent: scenario.AgentAdapter = {
193
- role: scenario.AgentRole.AGENT,
193
+ const agent: AgentAdapter = {
194
+ role: AgentRole.AGENT,
194
195
  call: async (input) => {
195
196
  const response = await generateText({
196
- model: openai("gpt-4.1-mini"),
197
+ model: openai("gpt-4.1"),
197
198
  messages: [
198
199
  {
199
200
  role: "system",
@@ -214,7 +215,7 @@ describe("Vegetarian Recipe Agent", () => {
214
215
  agent,
215
216
  scenario.userSimulatorAgent(),
216
217
  scenario.judgeAgent({
217
- model: openai("gpt-4.1-mini"),
218
+ model: openai("gpt-4.1"),
218
219
  criteria: [
219
220
  "Agent should not ask more than two follow-up questions",
220
221
  "Agent should generate a recipe",
@@ -224,6 +225,7 @@ describe("Vegetarian Recipe Agent", () => {
224
225
  ],
225
226
  }),
226
227
  ],
228
+ setId: "javascript-examples",
227
229
  });
228
230
  expect(result.success).toBe(true);
229
231
  });
@@ -373,7 +375,7 @@ You can enable debug mode by setting the `debug` field to `True` in the `Scenari
373
375
  Debug mode allows you to see the messages in slow motion step by step, and intervene with your own inputs to debug your agent from the middle of the conversation.
374
376
 
375
377
  ```python
376
- scenario.configure(default_model="openai/gpt-4.1-mini", debug=True)
378
+ scenario.configure(default_model="openai/gpt-4.1", debug=True)
377
379
  ```
378
380
 
379
381
  or
@@ -387,7 +389,7 @@ pytest -s tests/test_vegetarian_recipe_agent.py --debug
387
389
  Each time the scenario runs, the testing agent might chose a different input to start, this is good to make sure it covers the variance of real users as well, however we understand that the non-deterministic nature of it might make it less repeatable, costly and harder to debug. To solve for it, you can use the `cache_key` field in the `Scenario.configure` method or in the specific scenario you are running, this will make the testing agent give the same input for given the same scenario:
388
390
 
389
391
  ```python
390
- scenario.configure(default_model="openai/gpt-4.1-mini", cache_key="42")
392
+ scenario.configure(default_model="openai/gpt-4.1", cache_key="42")
391
393
  ```
392
394
 
393
395
  To bust the cache, you can simply pass a different `cache_key`, disable it, or delete the cache files located at `~/.scenario/cache`.
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.4
2
2
  Name: langwatch-scenario
3
- Version: 0.7.3
3
+ Version: 0.7.8
4
4
  Summary: The end-to-end agent testing library
5
5
  Author-email: LangWatch Team <support@langwatch.ai>
6
6
  License: MIT
@@ -30,12 +30,12 @@ Requires-Dist: pksuid>=1.1.2
30
30
  Requires-Dist: httpx>=0.27.0
31
31
  Requires-Dist: rx>=3.2.0
32
32
  Requires-Dist: python-dateutil>=2.9.0.post0
33
+ Requires-Dist: pydantic-settings>=2.9.1
33
34
  Provides-Extra: dev
34
35
  Requires-Dist: black; extra == "dev"
35
36
  Requires-Dist: isort; extra == "dev"
36
37
  Requires-Dist: pytest-cov; extra == "dev"
37
38
  Requires-Dist: pre-commit; extra == "dev"
38
- Requires-Dist: commitizen; extra == "dev"
39
39
  Requires-Dist: pyright; extra == "dev"
40
40
  Requires-Dist: pydantic-ai; extra == "dev"
41
41
  Requires-Dist: function-schema; extra == "dev"
@@ -88,7 +88,7 @@ result = await scenario.run(
88
88
  # Define the agents that will play this simulation
89
89
  agents=[
90
90
  WeatherAgent(),
91
- scenario.UserSimulatorAgent(model="openai/gpt-4.1-mini"),
91
+ scenario.UserSimulatorAgent(model="openai/gpt-4.1"),
92
92
  ],
93
93
 
94
94
  # (Optional) Control the simulation
@@ -159,7 +159,7 @@ import pytest
159
159
  import scenario
160
160
  import litellm
161
161
 
162
- scenario.configure(default_model="openai/gpt-4.1-mini")
162
+ scenario.configure(default_model="openai/gpt-4.1")
163
163
 
164
164
 
165
165
  @pytest.mark.agent_test
@@ -189,6 +189,7 @@ async def test_vegetarian_recipe_agent():
189
189
  ]
190
190
  ),
191
191
  ],
192
+ set_id="python-examples",
192
193
  )
193
194
 
194
195
  # Assert for pytest to know whether the test passed
@@ -202,7 +203,7 @@ import litellm
202
203
  @scenario.cache()
203
204
  def vegetarian_recipe_agent(messages) -> scenario.AgentReturnTypes:
204
205
  response = litellm.completion(
205
- model="openai/gpt-4.1-mini",
206
+ model="openai/gpt-4.1",
206
207
  messages=[
207
208
  {
208
209
  "role": "system",
@@ -227,17 +228,17 @@ def vegetarian_recipe_agent(messages) -> scenario.AgentReturnTypes:
227
228
  Save it as `tests/vegetarian-recipe-agent.test.ts`:
228
229
 
229
230
  ```typescript
231
+ import scenario, { type AgentAdapter, AgentRole } from "@langwatch/scenario";
230
232
  import { openai } from "@ai-sdk/openai";
231
- import * as scenario from "@langwatch/scenario";
232
233
  import { generateText } from "ai";
233
234
  import { describe, it, expect } from "vitest";
234
235
 
235
236
  describe("Vegetarian Recipe Agent", () => {
236
- const agent: scenario.AgentAdapter = {
237
- role: scenario.AgentRole.AGENT,
237
+ const agent: AgentAdapter = {
238
+ role: AgentRole.AGENT,
238
239
  call: async (input) => {
239
240
  const response = await generateText({
240
- model: openai("gpt-4.1-mini"),
241
+ model: openai("gpt-4.1"),
241
242
  messages: [
242
243
  {
243
244
  role: "system",
@@ -258,7 +259,7 @@ describe("Vegetarian Recipe Agent", () => {
258
259
  agent,
259
260
  scenario.userSimulatorAgent(),
260
261
  scenario.judgeAgent({
261
- model: openai("gpt-4.1-mini"),
262
+ model: openai("gpt-4.1"),
262
263
  criteria: [
263
264
  "Agent should not ask more than two follow-up questions",
264
265
  "Agent should generate a recipe",
@@ -268,6 +269,7 @@ describe("Vegetarian Recipe Agent", () => {
268
269
  ],
269
270
  }),
270
271
  ],
272
+ setId: "javascript-examples",
271
273
  });
272
274
  expect(result.success).toBe(true);
273
275
  });
@@ -417,7 +419,7 @@ You can enable debug mode by setting the `debug` field to `True` in the `Scenari
417
419
  Debug mode allows you to see the messages in slow motion step by step, and intervene with your own inputs to debug your agent from the middle of the conversation.
418
420
 
419
421
  ```python
420
- scenario.configure(default_model="openai/gpt-4.1-mini", debug=True)
422
+ scenario.configure(default_model="openai/gpt-4.1", debug=True)
421
423
  ```
422
424
 
423
425
  or
@@ -431,7 +433,7 @@ pytest -s tests/test_vegetarian_recipe_agent.py --debug
431
433
  Each time the scenario runs, the testing agent might chose a different input to start, this is good to make sure it covers the variance of real users as well, however we understand that the non-deterministic nature of it might make it less repeatable, costly and harder to debug. To solve for it, you can use the `cache_key` field in the `Scenario.configure` method or in the specific scenario you are running, this will make the testing agent give the same input for given the same scenario:
432
434
 
433
435
  ```python
434
- scenario.configure(default_model="openai/gpt-4.1-mini", cache_key="42")
436
+ scenario.configure(default_model="openai/gpt-4.1", cache_key="42")
435
437
  ```
436
438
 
437
439
  To bust the cache, you can simply pass a different `cache_key`, disable it, or delete the cache files located at `~/.scenario/cache`.
@@ -11,7 +11,6 @@ scenario/__init__.py
11
11
  scenario/_error_messages.py
12
12
  scenario/agent_adapter.py
13
13
  scenario/cache.py
14
- scenario/config.py
15
14
  scenario/judge_agent.py
16
15
  scenario/pytest_plugin.py
17
16
  scenario/scenario_executor.py
@@ -20,6 +19,7 @@ scenario/script.py
20
19
  scenario/types.py
21
20
  scenario/user_simulator_agent.py
22
21
  scenario/_events/__init__.py
22
+ scenario/_events/event_alert_message_logger.py
23
23
  scenario/_events/event_bus.py
24
24
  scenario/_events/event_reporter.py
25
25
  scenario/_events/events.py
@@ -239,7 +239,12 @@ scenario/_utils/__init__.py
239
239
  scenario/_utils/ids.py
240
240
  scenario/_utils/message_conversion.py
241
241
  scenario/_utils/utils.py
242
+ scenario/config/__init__.py
243
+ scenario/config/langwatch.py
244
+ scenario/config/model.py
245
+ scenario/config/scenario.py
242
246
  tests/test_event_reporter.py
247
+ tests/test_model_config.py
243
248
  tests/test_scenario.py
244
249
  tests/test_scenario_agent.py
245
250
  tests/test_scenario_event_bus.py
@@ -12,13 +12,13 @@ pksuid>=1.1.2
12
12
  httpx>=0.27.0
13
13
  rx>=3.2.0
14
14
  python-dateutil>=2.9.0.post0
15
+ pydantic-settings>=2.9.1
15
16
 
16
17
  [dev]
17
18
  black
18
19
  isort
19
20
  pytest-cov
20
21
  pre-commit
21
- commitizen
22
22
  pyright
23
23
  pydantic-ai
24
24
  function-schema
@@ -4,7 +4,7 @@ build-backend = "setuptools.build_meta"
4
4
 
5
5
  [project]
6
6
  name = "langwatch-scenario"
7
- version = "0.7.3"
7
+ version = "0.7.8"
8
8
  description = "The end-to-end agent testing library"
9
9
  readme = "README.md"
10
10
  authors = [{ name = "LangWatch Team", email = "support@langwatch.ai" }]
@@ -35,6 +35,7 @@ dependencies = [
35
35
  "httpx>=0.27.0",
36
36
  "rx>=3.2.0",
37
37
  "python-dateutil>=2.9.0.post0",
38
+ "pydantic-settings>=2.9.1",
38
39
  ]
39
40
 
40
41
  [project.optional-dependencies]
@@ -43,7 +44,6 @@ dev = [
43
44
  "isort",
44
45
  "pytest-cov",
45
46
  "pre-commit",
46
- "commitizen",
47
47
  "pyright",
48
48
  "pydantic-ai",
49
49
  "function-schema",
@@ -67,7 +67,7 @@ markers = ["agent_test: marks tests as agent scenario tests"]
67
67
 
68
68
  [dependency-groups]
69
69
  dev = [
70
- "commitizen>=4.8.3",
70
+
71
71
  "function-schema>=0.4.5",
72
72
  "pre-commit>=4.2.0",
73
73
  "pydantic-ai>=0.0.52",
@@ -77,11 +77,4 @@ dev = [
77
77
  "respx>=0.22.0",
78
78
  ]
79
79
 
80
- [tool.commitizen]
81
- name = "cz_conventional_commits"
82
- version = "0.7.0"
83
- tag_format = "python/v$version"
84
- legacy_tag_formats = ["v$version"]
85
- version_files = ["pyproject.toml:version"]
86
- bump_message = "bump: version $current_version → $new_version"
87
- major_version_zero = true
80
+
@@ -25,7 +25,7 @@ Basic Usage:
25
25
  import scenario
26
26
 
27
27
  # Configure global settings
28
- scenario.configure(default_model="openai/gpt-4.1-mini")
28
+ scenario.configure(default_model="openai/gpt-4.1")
29
29
 
30
30
  # Create your agent adapter
31
31
  class MyAgent(scenario.AgentAdapter):
@@ -8,12 +8,12 @@ def agent_not_configured_error_message(class_name: str):
8
8
 
9
9
  {termcolor.colored("->", "cyan")} {class_name} was initialized without a model, please set the model when defining the testing agent, for example:
10
10
 
11
- {class_name}(model="openai/gpt-4.1-mini")
11
+ {class_name}(model="openai/gpt-4.1")
12
12
  {termcolor.colored("^" * (29 + len(class_name)), "green")}
13
13
 
14
14
  {termcolor.colored("->", "cyan")} Alternatively, you can set the default model globally, for example:
15
15
 
16
- scenario.configure(default_model="openai/gpt-4.1-mini")
16
+ scenario.configure(default_model="openai/gpt-4.1")
17
17
  {termcolor.colored("^" * 55, "green")}
18
18
  """
19
19
 
@@ -0,0 +1,95 @@
1
+ import os
2
+ from typing import Set
3
+ from .._utils.ids import get_batch_run_id
4
+
5
+
6
+ class EventAlertMessageLogger:
7
+ """
8
+ Handles console output of alert messages for scenario events.
9
+
10
+ Single responsibility: Display user-friendly messages about event reporting status
11
+ and simulation watching instructions.
12
+ """
13
+
14
+ _shown_batch_ids: Set[str] = set()
15
+
16
+ def handle_greeting(self) -> None:
17
+ """
18
+ Shows a fancy greeting message about simulation reporting status.
19
+ Only shows once per batch run to avoid spam.
20
+ """
21
+ if self._is_greeting_disabled():
22
+ return
23
+
24
+ batch_run_id = get_batch_run_id()
25
+
26
+ if batch_run_id in EventAlertMessageLogger._shown_batch_ids:
27
+ return
28
+
29
+ EventAlertMessageLogger._shown_batch_ids.add(batch_run_id)
30
+ self._display_greeting(batch_run_id)
31
+
32
+ def handle_watch_message(self, set_url: str) -> None:
33
+ """
34
+ Shows a fancy message about how to watch the simulation.
35
+ Called when a run started event is received with a session ID.
36
+ """
37
+ if self._is_greeting_disabled():
38
+ return
39
+
40
+ self._display_watch_message(set_url)
41
+
42
+ def _is_greeting_disabled(self) -> bool:
43
+ """Check if greeting messages are disabled via environment variable."""
44
+ return bool(os.getenv("SCENARIO_DISABLE_SIMULATION_REPORT_INFO"))
45
+
46
+ def _display_greeting(self, batch_run_id: str) -> None:
47
+ """Display the greeting message with simulation reporting status."""
48
+ separator = "─" * 60
49
+
50
+ if not os.getenv("LANGWATCH_API_KEY"):
51
+ print(f"\n{separator}")
52
+ print("🚀 LangWatch Simulation Reporting")
53
+ print(f"{separator}")
54
+ print("➡️ API key not configured")
55
+ print(" Simulations will only output final results")
56
+ print("")
57
+ print("💡 To visualize conversations in real time:")
58
+ print(" • Set LANGWATCH_API_KEY environment variable")
59
+ print(" • Or configure apiKey in scenario.config.js")
60
+ print("")
61
+ print(f"📦 Batch Run ID: {batch_run_id}")
62
+ print("")
63
+ print("🔇 To disable these messages:")
64
+ print(" • Set SCENARIO_DISABLE_SIMULATION_REPORT_INFO=true")
65
+ print(f"{separator}\n")
66
+ else:
67
+ endpoint = os.getenv("LANGWATCH_ENDPOINT", "https://app.langwatch.ai")
68
+ api_key = os.getenv("LANGWATCH_API_KEY", "")
69
+
70
+ print(f"\n{separator}")
71
+ print("🚀 LangWatch Simulation Reporting")
72
+ print(f"{separator}")
73
+ print("✅ Simulation reporting enabled")
74
+ print(f" Endpoint: {endpoint}")
75
+ print(f" API Key: {'Configured' if api_key else 'Not configured'}")
76
+ print("")
77
+ print(f"📦 Batch Run ID: {batch_run_id}")
78
+ print("")
79
+ print("🔇 To disable these messages:")
80
+ print(" • Set SCENARIO_DISABLE_SIMULATION_REPORT_INFO=true")
81
+ print(f"{separator}\n")
82
+
83
+ def _display_watch_message(self, set_url: str) -> None:
84
+ """Display the watch message with URLs for viewing the simulation."""
85
+ separator = "─" * 60
86
+ batch_url = f"{set_url}/{get_batch_run_id()}"
87
+
88
+ print(f"\n{separator}")
89
+ print("👀 Watch Your Simulation Live")
90
+ print(f"{separator}")
91
+ print("🌐 Open in your browser:")
92
+ print(f" Scenario Set: {set_url}")
93
+ print(f" Batch Run: {batch_url}")
94
+ print("")
95
+ print(f"{separator}\n")