langwatch-scenario 0.6.0__py3-none-any.whl → 0.7.1__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- {langwatch_scenario-0.6.0.dist-info → langwatch_scenario-0.7.1.dist-info}/METADATA +143 -41
- langwatch_scenario-0.7.1.dist-info/RECORD +237 -0
- scenario/__init__.py +1 -4
- scenario/{events → _events}/__init__.py +9 -11
- scenario/_events/event_bus.py +185 -0
- scenario/{events → _events}/event_reporter.py +1 -1
- scenario/{events → _events}/events.py +20 -27
- scenario/_events/messages.py +58 -0
- scenario/{events → _events}/utils.py +43 -32
- scenario/_generated/langwatch_api_client/README.md +139 -0
- scenario/_generated/langwatch_api_client/lang_watch_api_client/__init__.py +13 -0
- scenario/_generated/langwatch_api_client/lang_watch_api_client/api/__init__.py +1 -0
- scenario/_generated/langwatch_api_client/lang_watch_api_client/api/default/__init__.py +1 -0
- scenario/_generated/langwatch_api_client/lang_watch_api_client/api/default/delete_api_annotations_id.py +155 -0
- scenario/_generated/langwatch_api_client/lang_watch_api_client/api/default/delete_api_prompts_by_id.py +218 -0
- scenario/_generated/langwatch_api_client/lang_watch_api_client/api/default/delete_api_scenario_events.py +183 -0
- scenario/_generated/langwatch_api_client/lang_watch_api_client/api/default/get_api_annotations.py +136 -0
- scenario/_generated/langwatch_api_client/lang_watch_api_client/api/default/get_api_annotations_id.py +155 -0
- scenario/_generated/langwatch_api_client/lang_watch_api_client/api/default/get_api_annotations_trace_id.py +160 -0
- scenario/_generated/langwatch_api_client/lang_watch_api_client/api/default/get_api_dataset_by_slug_or_id.py +229 -0
- scenario/_generated/langwatch_api_client/lang_watch_api_client/api/default/get_api_prompts.py +188 -0
- scenario/_generated/langwatch_api_client/lang_watch_api_client/api/default/get_api_prompts_by_id.py +218 -0
- scenario/_generated/langwatch_api_client/lang_watch_api_client/api/default/get_api_prompts_by_id_versions.py +218 -0
- scenario/_generated/langwatch_api_client/lang_watch_api_client/api/default/get_api_trace_id.py +155 -0
- scenario/_generated/langwatch_api_client/lang_watch_api_client/api/default/patch_api_annotations_id.py +178 -0
- scenario/_generated/langwatch_api_client/lang_watch_api_client/api/default/post_api_annotations_trace_id.py +178 -0
- scenario/_generated/langwatch_api_client/lang_watch_api_client/api/default/post_api_dataset_by_slug_entries.py +108 -0
- scenario/_generated/langwatch_api_client/lang_watch_api_client/api/default/post_api_prompts.py +187 -0
- scenario/_generated/langwatch_api_client/lang_watch_api_client/api/default/post_api_prompts_by_id_versions.py +241 -0
- scenario/_generated/langwatch_api_client/lang_watch_api_client/api/default/post_api_scenario_events.py +229 -0
- scenario/_generated/langwatch_api_client/lang_watch_api_client/api/default/post_api_trace_id_share.py +155 -0
- scenario/_generated/langwatch_api_client/lang_watch_api_client/api/default/post_api_trace_id_unshare.py +155 -0
- scenario/_generated/langwatch_api_client/lang_watch_api_client/api/default/put_api_prompts_by_id.py +241 -0
- scenario/_generated/langwatch_api_client/lang_watch_api_client/api/traces/__init__.py +1 -0
- scenario/_generated/langwatch_api_client/lang_watch_api_client/api/traces/post_api_trace_search.py +168 -0
- scenario/_generated/langwatch_api_client/lang_watch_api_client/client.py +268 -0
- scenario/_generated/langwatch_api_client/lang_watch_api_client/errors.py +16 -0
- scenario/_generated/langwatch_api_client/lang_watch_api_client/models/__init__.py +455 -0
- scenario/_generated/langwatch_api_client/lang_watch_api_client/models/annotation.py +131 -0
- scenario/_generated/langwatch_api_client/lang_watch_api_client/models/dataset_post_entries.py +74 -0
- scenario/_generated/langwatch_api_client/lang_watch_api_client/models/dataset_post_entries_entries_item.py +44 -0
- scenario/_generated/langwatch_api_client/lang_watch_api_client/models/delete_api_annotations_id_response_200.py +68 -0
- scenario/_generated/langwatch_api_client/lang_watch_api_client/models/delete_api_prompts_by_id_response_200.py +59 -0
- scenario/_generated/langwatch_api_client/lang_watch_api_client/models/delete_api_prompts_by_id_response_400.py +61 -0
- scenario/_generated/langwatch_api_client/lang_watch_api_client/models/delete_api_prompts_by_id_response_400_error.py +8 -0
- scenario/_generated/langwatch_api_client/lang_watch_api_client/models/delete_api_prompts_by_id_response_401.py +61 -0
- scenario/_generated/langwatch_api_client/lang_watch_api_client/models/delete_api_prompts_by_id_response_401_error.py +8 -0
- scenario/_generated/langwatch_api_client/lang_watch_api_client/models/delete_api_prompts_by_id_response_404.py +59 -0
- scenario/_generated/langwatch_api_client/lang_watch_api_client/models/delete_api_prompts_by_id_response_500.py +59 -0
- scenario/_generated/langwatch_api_client/lang_watch_api_client/models/delete_api_scenario_events_response_200.py +81 -0
- scenario/_generated/langwatch_api_client/lang_watch_api_client/models/delete_api_scenario_events_response_400.py +59 -0
- scenario/_generated/langwatch_api_client/lang_watch_api_client/models/delete_api_scenario_events_response_401.py +59 -0
- scenario/_generated/langwatch_api_client/lang_watch_api_client/models/delete_api_scenario_events_response_500.py +59 -0
- scenario/_generated/langwatch_api_client/lang_watch_api_client/models/error.py +67 -0
- scenario/_generated/langwatch_api_client/lang_watch_api_client/models/evaluation.py +164 -0
- scenario/_generated/langwatch_api_client/lang_watch_api_client/models/evaluation_timestamps.py +68 -0
- scenario/_generated/langwatch_api_client/lang_watch_api_client/models/get_api_dataset_by_slug_or_id_response_200.py +75 -0
- scenario/_generated/langwatch_api_client/lang_watch_api_client/models/get_api_dataset_by_slug_or_id_response_200_data_item.py +109 -0
- scenario/_generated/langwatch_api_client/lang_watch_api_client/models/get_api_dataset_by_slug_or_id_response_200_data_item_entry.py +44 -0
- scenario/_generated/langwatch_api_client/lang_watch_api_client/models/get_api_dataset_by_slug_or_id_response_400.py +78 -0
- scenario/_generated/langwatch_api_client/lang_watch_api_client/models/get_api_dataset_by_slug_or_id_response_401.py +78 -0
- scenario/_generated/langwatch_api_client/lang_watch_api_client/models/get_api_dataset_by_slug_or_id_response_404.py +78 -0
- scenario/_generated/langwatch_api_client/lang_watch_api_client/models/get_api_dataset_by_slug_or_id_response_422.py +67 -0
- scenario/_generated/langwatch_api_client/lang_watch_api_client/models/get_api_dataset_by_slug_or_id_response_500.py +78 -0
- scenario/_generated/langwatch_api_client/lang_watch_api_client/models/get_api_prompts_by_id_response_200.py +172 -0
- scenario/_generated/langwatch_api_client/lang_watch_api_client/models/get_api_prompts_by_id_response_200_messages_item.py +69 -0
- scenario/_generated/langwatch_api_client/lang_watch_api_client/models/get_api_prompts_by_id_response_200_messages_item_role.py +10 -0
- scenario/_generated/langwatch_api_client/lang_watch_api_client/models/get_api_prompts_by_id_response_200_response_format_type_0.py +81 -0
- scenario/_generated/langwatch_api_client/lang_watch_api_client/models/get_api_prompts_by_id_response_200_response_format_type_0_json_schema.py +77 -0
- scenario/_generated/langwatch_api_client/lang_watch_api_client/models/get_api_prompts_by_id_response_200_response_format_type_0_json_schema_schema.py +44 -0
- scenario/_generated/langwatch_api_client/lang_watch_api_client/models/get_api_prompts_by_id_response_200_response_format_type_0_type.py +8 -0
- scenario/_generated/langwatch_api_client/lang_watch_api_client/models/get_api_prompts_by_id_response_400.py +61 -0
- scenario/_generated/langwatch_api_client/lang_watch_api_client/models/get_api_prompts_by_id_response_400_error.py +8 -0
- scenario/_generated/langwatch_api_client/lang_watch_api_client/models/get_api_prompts_by_id_response_401.py +61 -0
- scenario/_generated/langwatch_api_client/lang_watch_api_client/models/get_api_prompts_by_id_response_401_error.py +8 -0
- scenario/_generated/langwatch_api_client/lang_watch_api_client/models/get_api_prompts_by_id_response_404.py +59 -0
- scenario/_generated/langwatch_api_client/lang_watch_api_client/models/get_api_prompts_by_id_response_500.py +59 -0
- scenario/_generated/langwatch_api_client/lang_watch_api_client/models/get_api_prompts_by_id_versions_response_200.py +155 -0
- scenario/_generated/langwatch_api_client/lang_watch_api_client/models/get_api_prompts_by_id_versions_response_200_config_data.py +204 -0
- scenario/_generated/langwatch_api_client/lang_watch_api_client/models/get_api_prompts_by_id_versions_response_200_config_data_demonstrations.py +101 -0
- scenario/_generated/langwatch_api_client/lang_watch_api_client/models/get_api_prompts_by_id_versions_response_200_config_data_demonstrations_columns_item.py +79 -0
- scenario/_generated/langwatch_api_client/lang_watch_api_client/models/get_api_prompts_by_id_versions_response_200_config_data_demonstrations_columns_item_type.py +18 -0
- scenario/_generated/langwatch_api_client/lang_watch_api_client/models/get_api_prompts_by_id_versions_response_200_config_data_demonstrations_rows_item.py +59 -0
- scenario/_generated/langwatch_api_client/lang_watch_api_client/models/get_api_prompts_by_id_versions_response_200_config_data_inputs_item.py +71 -0
- scenario/_generated/langwatch_api_client/lang_watch_api_client/models/get_api_prompts_by_id_versions_response_200_config_data_inputs_item_type.py +16 -0
- scenario/_generated/langwatch_api_client/lang_watch_api_client/models/get_api_prompts_by_id_versions_response_200_config_data_messages_item.py +71 -0
- scenario/_generated/langwatch_api_client/lang_watch_api_client/models/get_api_prompts_by_id_versions_response_200_config_data_messages_item_role.py +10 -0
- scenario/_generated/langwatch_api_client/lang_watch_api_client/models/get_api_prompts_by_id_versions_response_200_config_data_outputs_item.py +98 -0
- scenario/_generated/langwatch_api_client/lang_watch_api_client/models/get_api_prompts_by_id_versions_response_200_config_data_outputs_item_json_schema.py +59 -0
- scenario/_generated/langwatch_api_client/lang_watch_api_client/models/get_api_prompts_by_id_versions_response_200_config_data_outputs_item_type.py +11 -0
- scenario/_generated/langwatch_api_client/lang_watch_api_client/models/get_api_prompts_by_id_versions_response_200_config_data_prompting_technique.py +59 -0
- scenario/_generated/langwatch_api_client/lang_watch_api_client/models/get_api_prompts_by_id_versions_response_400.py +61 -0
- scenario/_generated/langwatch_api_client/lang_watch_api_client/models/get_api_prompts_by_id_versions_response_400_error.py +8 -0
- scenario/_generated/langwatch_api_client/lang_watch_api_client/models/get_api_prompts_by_id_versions_response_401.py +61 -0
- scenario/_generated/langwatch_api_client/lang_watch_api_client/models/get_api_prompts_by_id_versions_response_401_error.py +8 -0
- scenario/_generated/langwatch_api_client/lang_watch_api_client/models/get_api_prompts_by_id_versions_response_404.py +59 -0
- scenario/_generated/langwatch_api_client/lang_watch_api_client/models/get_api_prompts_by_id_versions_response_500.py +59 -0
- scenario/_generated/langwatch_api_client/lang_watch_api_client/models/get_api_prompts_response_200_item.py +172 -0
- scenario/_generated/langwatch_api_client/lang_watch_api_client/models/get_api_prompts_response_200_item_messages_item.py +69 -0
- scenario/_generated/langwatch_api_client/lang_watch_api_client/models/get_api_prompts_response_200_item_messages_item_role.py +10 -0
- scenario/_generated/langwatch_api_client/lang_watch_api_client/models/get_api_prompts_response_200_item_response_format_type_0.py +81 -0
- scenario/_generated/langwatch_api_client/lang_watch_api_client/models/get_api_prompts_response_200_item_response_format_type_0_json_schema.py +77 -0
- scenario/_generated/langwatch_api_client/lang_watch_api_client/models/get_api_prompts_response_200_item_response_format_type_0_json_schema_schema.py +44 -0
- scenario/_generated/langwatch_api_client/lang_watch_api_client/models/get_api_prompts_response_200_item_response_format_type_0_type.py +8 -0
- scenario/_generated/langwatch_api_client/lang_watch_api_client/models/get_api_prompts_response_400.py +61 -0
- scenario/_generated/langwatch_api_client/lang_watch_api_client/models/get_api_prompts_response_400_error.py +8 -0
- scenario/_generated/langwatch_api_client/lang_watch_api_client/models/get_api_prompts_response_401.py +61 -0
- scenario/_generated/langwatch_api_client/lang_watch_api_client/models/get_api_prompts_response_401_error.py +8 -0
- scenario/_generated/langwatch_api_client/lang_watch_api_client/models/get_api_prompts_response_500.py +59 -0
- scenario/_generated/langwatch_api_client/lang_watch_api_client/models/get_api_trace_id_response_200.py +249 -0
- scenario/_generated/langwatch_api_client/lang_watch_api_client/models/get_api_trace_id_response_200_error_type_0.py +79 -0
- scenario/_generated/langwatch_api_client/lang_watch_api_client/models/get_api_trace_id_response_200_evaluations_item.py +152 -0
- scenario/_generated/langwatch_api_client/lang_watch_api_client/models/get_api_trace_id_response_200_evaluations_item_error.py +79 -0
- scenario/_generated/langwatch_api_client/lang_watch_api_client/models/get_api_trace_id_response_200_evaluations_item_timestamps.py +68 -0
- scenario/_generated/langwatch_api_client/lang_watch_api_client/models/get_api_trace_id_response_200_input.py +59 -0
- scenario/_generated/langwatch_api_client/lang_watch_api_client/models/get_api_trace_id_response_200_metadata.py +68 -0
- scenario/_generated/langwatch_api_client/lang_watch_api_client/models/get_api_trace_id_response_200_metrics.py +95 -0
- scenario/_generated/langwatch_api_client/lang_watch_api_client/models/get_api_trace_id_response_200_output.py +59 -0
- scenario/_generated/langwatch_api_client/lang_watch_api_client/models/get_api_trace_id_response_200_spans_item.py +271 -0
- scenario/_generated/langwatch_api_client/lang_watch_api_client/models/get_api_trace_id_response_200_spans_item_error_type_0.py +79 -0
- scenario/_generated/langwatch_api_client/lang_watch_api_client/models/get_api_trace_id_response_200_spans_item_input.py +90 -0
- scenario/_generated/langwatch_api_client/lang_watch_api_client/models/get_api_trace_id_response_200_spans_item_input_value_item.py +69 -0
- scenario/_generated/langwatch_api_client/lang_watch_api_client/models/get_api_trace_id_response_200_spans_item_metrics.py +77 -0
- scenario/_generated/langwatch_api_client/lang_watch_api_client/models/get_api_trace_id_response_200_spans_item_output.py +89 -0
- scenario/_generated/langwatch_api_client/lang_watch_api_client/models/get_api_trace_id_response_200_spans_item_output_value_item.py +68 -0
- scenario/_generated/langwatch_api_client/lang_watch_api_client/models/get_api_trace_id_response_200_spans_item_params.py +68 -0
- scenario/_generated/langwatch_api_client/lang_watch_api_client/models/get_api_trace_id_response_200_spans_item_timestamps.py +95 -0
- scenario/_generated/langwatch_api_client/lang_watch_api_client/models/get_api_trace_id_response_200_timestamps.py +77 -0
- scenario/_generated/langwatch_api_client/lang_watch_api_client/models/input_.py +68 -0
- scenario/_generated/langwatch_api_client/lang_watch_api_client/models/metadata.py +68 -0
- scenario/_generated/langwatch_api_client/lang_watch_api_client/models/metrics.py +115 -0
- scenario/_generated/langwatch_api_client/lang_watch_api_client/models/output.py +59 -0
- scenario/_generated/langwatch_api_client/lang_watch_api_client/models/pagination.py +68 -0
- scenario/_generated/langwatch_api_client/lang_watch_api_client/models/patch_api_annotations_id_body.py +77 -0
- scenario/_generated/langwatch_api_client/lang_watch_api_client/models/patch_api_annotations_id_response_200.py +68 -0
- scenario/_generated/langwatch_api_client/lang_watch_api_client/models/post_api_annotations_trace_id_body.py +77 -0
- scenario/_generated/langwatch_api_client/lang_watch_api_client/models/post_api_prompts_body.py +59 -0
- scenario/_generated/langwatch_api_client/lang_watch_api_client/models/post_api_prompts_by_id_versions_body.py +147 -0
- scenario/_generated/langwatch_api_client/lang_watch_api_client/models/post_api_prompts_by_id_versions_body_config_data.py +207 -0
- scenario/_generated/langwatch_api_client/lang_watch_api_client/models/post_api_prompts_by_id_versions_body_config_data_demonstrations.py +106 -0
- scenario/_generated/langwatch_api_client/lang_watch_api_client/models/post_api_prompts_by_id_versions_body_config_data_demonstrations_columns_item.py +79 -0
- scenario/_generated/langwatch_api_client/lang_watch_api_client/models/post_api_prompts_by_id_versions_body_config_data_demonstrations_columns_item_type.py +18 -0
- scenario/_generated/langwatch_api_client/lang_watch_api_client/models/post_api_prompts_by_id_versions_body_config_data_demonstrations_rows_item.py +59 -0
- scenario/_generated/langwatch_api_client/lang_watch_api_client/models/post_api_prompts_by_id_versions_body_config_data_inputs_item.py +71 -0
- scenario/_generated/langwatch_api_client/lang_watch_api_client/models/post_api_prompts_by_id_versions_body_config_data_inputs_item_type.py +16 -0
- scenario/_generated/langwatch_api_client/lang_watch_api_client/models/post_api_prompts_by_id_versions_body_config_data_messages_item.py +71 -0
- scenario/_generated/langwatch_api_client/lang_watch_api_client/models/post_api_prompts_by_id_versions_body_config_data_messages_item_role.py +10 -0
- scenario/_generated/langwatch_api_client/lang_watch_api_client/models/post_api_prompts_by_id_versions_body_config_data_outputs_item.py +98 -0
- scenario/_generated/langwatch_api_client/lang_watch_api_client/models/post_api_prompts_by_id_versions_body_config_data_outputs_item_json_schema.py +59 -0
- scenario/_generated/langwatch_api_client/lang_watch_api_client/models/post_api_prompts_by_id_versions_body_config_data_outputs_item_type.py +11 -0
- scenario/_generated/langwatch_api_client/lang_watch_api_client/models/post_api_prompts_by_id_versions_body_config_data_prompting_technique.py +59 -0
- scenario/_generated/langwatch_api_client/lang_watch_api_client/models/post_api_prompts_by_id_versions_response_200.py +155 -0
- scenario/_generated/langwatch_api_client/lang_watch_api_client/models/post_api_prompts_by_id_versions_response_200_config_data.py +206 -0
- scenario/_generated/langwatch_api_client/lang_watch_api_client/models/post_api_prompts_by_id_versions_response_200_config_data_demonstrations.py +101 -0
- scenario/_generated/langwatch_api_client/lang_watch_api_client/models/post_api_prompts_by_id_versions_response_200_config_data_demonstrations_columns_item.py +79 -0
- scenario/_generated/langwatch_api_client/lang_watch_api_client/models/post_api_prompts_by_id_versions_response_200_config_data_demonstrations_columns_item_type.py +18 -0
- scenario/_generated/langwatch_api_client/lang_watch_api_client/models/post_api_prompts_by_id_versions_response_200_config_data_demonstrations_rows_item.py +59 -0
- scenario/_generated/langwatch_api_client/lang_watch_api_client/models/post_api_prompts_by_id_versions_response_200_config_data_inputs_item.py +71 -0
- scenario/_generated/langwatch_api_client/lang_watch_api_client/models/post_api_prompts_by_id_versions_response_200_config_data_inputs_item_type.py +16 -0
- scenario/_generated/langwatch_api_client/lang_watch_api_client/models/post_api_prompts_by_id_versions_response_200_config_data_messages_item.py +71 -0
- scenario/_generated/langwatch_api_client/lang_watch_api_client/models/post_api_prompts_by_id_versions_response_200_config_data_messages_item_role.py +10 -0
- scenario/_generated/langwatch_api_client/lang_watch_api_client/models/post_api_prompts_by_id_versions_response_200_config_data_outputs_item.py +98 -0
- scenario/_generated/langwatch_api_client/lang_watch_api_client/models/post_api_prompts_by_id_versions_response_200_config_data_outputs_item_json_schema.py +59 -0
- scenario/_generated/langwatch_api_client/lang_watch_api_client/models/post_api_prompts_by_id_versions_response_200_config_data_outputs_item_type.py +11 -0
- scenario/_generated/langwatch_api_client/lang_watch_api_client/models/post_api_prompts_by_id_versions_response_200_config_data_prompting_technique.py +59 -0
- scenario/_generated/langwatch_api_client/lang_watch_api_client/models/post_api_prompts_by_id_versions_response_400.py +61 -0
- scenario/_generated/langwatch_api_client/lang_watch_api_client/models/post_api_prompts_by_id_versions_response_400_error.py +8 -0
- scenario/_generated/langwatch_api_client/lang_watch_api_client/models/post_api_prompts_by_id_versions_response_401.py +61 -0
- scenario/_generated/langwatch_api_client/lang_watch_api_client/models/post_api_prompts_by_id_versions_response_401_error.py +8 -0
- scenario/_generated/langwatch_api_client/lang_watch_api_client/models/post_api_prompts_by_id_versions_response_404.py +59 -0
- scenario/_generated/langwatch_api_client/lang_watch_api_client/models/post_api_prompts_by_id_versions_response_500.py +59 -0
- scenario/_generated/langwatch_api_client/lang_watch_api_client/models/post_api_prompts_response_200.py +172 -0
- scenario/_generated/langwatch_api_client/lang_watch_api_client/models/post_api_prompts_response_200_messages_item.py +69 -0
- scenario/_generated/langwatch_api_client/lang_watch_api_client/models/post_api_prompts_response_200_messages_item_role.py +10 -0
- scenario/_generated/langwatch_api_client/lang_watch_api_client/models/post_api_prompts_response_200_response_format_type_0.py +81 -0
- scenario/_generated/langwatch_api_client/lang_watch_api_client/models/post_api_prompts_response_200_response_format_type_0_json_schema.py +77 -0
- scenario/_generated/langwatch_api_client/lang_watch_api_client/models/post_api_prompts_response_200_response_format_type_0_json_schema_schema.py +44 -0
- scenario/_generated/langwatch_api_client/lang_watch_api_client/models/post_api_prompts_response_200_response_format_type_0_type.py +8 -0
- scenario/_generated/langwatch_api_client/lang_watch_api_client/models/post_api_prompts_response_400.py +61 -0
- scenario/_generated/langwatch_api_client/lang_watch_api_client/models/post_api_prompts_response_400_error.py +8 -0
- scenario/_generated/langwatch_api_client/lang_watch_api_client/models/post_api_prompts_response_401.py +61 -0
- scenario/_generated/langwatch_api_client/lang_watch_api_client/models/post_api_prompts_response_401_error.py +8 -0
- scenario/_generated/langwatch_api_client/lang_watch_api_client/models/post_api_prompts_response_500.py +59 -0
- scenario/_generated/langwatch_api_client/lang_watch_api_client/models/post_api_scenario_events_body_type_0.py +127 -0
- scenario/_generated/langwatch_api_client/lang_watch_api_client/models/post_api_scenario_events_body_type_0_metadata.py +68 -0
- scenario/_generated/langwatch_api_client/lang_watch_api_client/models/post_api_scenario_events_body_type_1.py +164 -0
- scenario/_generated/langwatch_api_client/lang_watch_api_client/models/post_api_scenario_events_body_type_1_results_type_0.py +98 -0
- scenario/_generated/langwatch_api_client/lang_watch_api_client/models/post_api_scenario_events_body_type_1_results_type_0_verdict.py +10 -0
- scenario/_generated/langwatch_api_client/lang_watch_api_client/models/post_api_scenario_events_body_type_1_status.py +13 -0
- scenario/_generated/langwatch_api_client/lang_watch_api_client/models/post_api_scenario_events_body_type_2.py +245 -0
- scenario/_generated/langwatch_api_client/lang_watch_api_client/models/post_api_scenario_events_body_type_2_messages_item_type_0.py +88 -0
- scenario/_generated/langwatch_api_client/lang_watch_api_client/models/post_api_scenario_events_body_type_2_messages_item_type_1.py +88 -0
- scenario/_generated/langwatch_api_client/lang_watch_api_client/models/post_api_scenario_events_body_type_2_messages_item_type_2.py +120 -0
- scenario/_generated/langwatch_api_client/lang_watch_api_client/models/post_api_scenario_events_body_type_2_messages_item_type_2_tool_calls_item.py +87 -0
- scenario/_generated/langwatch_api_client/lang_watch_api_client/models/post_api_scenario_events_body_type_2_messages_item_type_2_tool_calls_item_function.py +67 -0
- scenario/_generated/langwatch_api_client/lang_watch_api_client/models/post_api_scenario_events_body_type_2_messages_item_type_3.py +88 -0
- scenario/_generated/langwatch_api_client/lang_watch_api_client/models/post_api_scenario_events_body_type_2_messages_item_type_4.py +85 -0
- scenario/_generated/langwatch_api_client/lang_watch_api_client/models/post_api_scenario_events_response_201.py +81 -0
- scenario/_generated/langwatch_api_client/lang_watch_api_client/models/post_api_scenario_events_response_400.py +59 -0
- scenario/_generated/langwatch_api_client/lang_watch_api_client/models/post_api_scenario_events_response_401.py +59 -0
- scenario/_generated/langwatch_api_client/lang_watch_api_client/models/post_api_scenario_events_response_500.py +59 -0
- scenario/_generated/langwatch_api_client/lang_watch_api_client/models/post_api_trace_id_share_response_200.py +59 -0
- scenario/_generated/langwatch_api_client/lang_watch_api_client/models/post_api_trace_id_unshare_response_200.py +59 -0
- scenario/_generated/langwatch_api_client/lang_watch_api_client/models/put_api_prompts_by_id_body.py +59 -0
- scenario/_generated/langwatch_api_client/lang_watch_api_client/models/put_api_prompts_by_id_response_200.py +75 -0
- scenario/_generated/langwatch_api_client/lang_watch_api_client/models/put_api_prompts_by_id_response_400.py +61 -0
- scenario/_generated/langwatch_api_client/lang_watch_api_client/models/put_api_prompts_by_id_response_400_error.py +8 -0
- scenario/_generated/langwatch_api_client/lang_watch_api_client/models/put_api_prompts_by_id_response_401.py +61 -0
- scenario/_generated/langwatch_api_client/lang_watch_api_client/models/put_api_prompts_by_id_response_401_error.py +8 -0
- scenario/_generated/langwatch_api_client/lang_watch_api_client/models/put_api_prompts_by_id_response_404.py +59 -0
- scenario/_generated/langwatch_api_client/lang_watch_api_client/models/put_api_prompts_by_id_response_500.py +59 -0
- scenario/_generated/langwatch_api_client/lang_watch_api_client/models/search_request.py +133 -0
- scenario/_generated/langwatch_api_client/lang_watch_api_client/models/search_request_filters.py +51 -0
- scenario/_generated/langwatch_api_client/lang_watch_api_client/models/search_response.py +93 -0
- scenario/_generated/langwatch_api_client/lang_watch_api_client/models/timestamps.py +77 -0
- scenario/_generated/langwatch_api_client/lang_watch_api_client/models/trace.py +225 -0
- scenario/_generated/langwatch_api_client/lang_watch_api_client/py.typed +1 -0
- scenario/_generated/langwatch_api_client/lang_watch_api_client/types.py +46 -0
- scenario/_generated/langwatch_api_client/pyproject.toml +27 -0
- scenario/_utils/__init__.py +1 -1
- scenario/_utils/message_conversion.py +2 -2
- scenario/judge_agent.py +6 -1
- scenario/pytest_plugin.py +4 -4
- scenario/scenario_executor.py +196 -223
- scenario/types.py +5 -2
- langwatch_scenario-0.6.0.dist-info/RECORD +0 -27
- scenario/events/event_bus.py +0 -175
- scenario/events/messages.py +0 -84
- {langwatch_scenario-0.6.0.dist-info → langwatch_scenario-0.7.1.dist-info}/WHEEL +0 -0
- {langwatch_scenario-0.6.0.dist-info → langwatch_scenario-0.7.1.dist-info}/entry_points.txt +0 -0
- {langwatch_scenario-0.6.0.dist-info → langwatch_scenario-0.7.1.dist-info}/top_level.txt +0 -0
@@ -0,0 +1,185 @@
|
|
1
|
+
from rx.core.observable.observable import Observable
|
2
|
+
from typing import Optional, Any
|
3
|
+
from .events import ScenarioEvent
|
4
|
+
from .event_reporter import EventReporter
|
5
|
+
|
6
|
+
import asyncio
|
7
|
+
import queue
|
8
|
+
import threading
|
9
|
+
import logging
|
10
|
+
|
11
|
+
class ScenarioEventBus:
|
12
|
+
"""
|
13
|
+
Subscribes to scenario event streams and handles HTTP posting using a dedicated worker thread.
|
14
|
+
|
15
|
+
The EventBus acts as an observer of scenario events, automatically
|
16
|
+
posting them to external APIs. It uses a queue-based threading model
|
17
|
+
where events are processed by a dedicated worker thread.
|
18
|
+
|
19
|
+
Key design principles:
|
20
|
+
- Single worker thread handles all HTTP posting (simplifies concurrency)
|
21
|
+
- Thread created lazily when first event arrives
|
22
|
+
- Thread terminates when queue empty and stream completed
|
23
|
+
- Non-daemon thread ensures all events posted before program exit
|
24
|
+
|
25
|
+
Attributes:
|
26
|
+
_event_reporter: EventReporter instance for HTTP posting of events
|
27
|
+
_max_retries: Maximum number of retry attempts for failed event processing
|
28
|
+
_event_queue: Thread-safe queue for passing events to worker thread
|
29
|
+
_completed: Whether the event stream has completed
|
30
|
+
_subscription: RxPY subscription to the event stream
|
31
|
+
_worker_thread: Dedicated thread for processing events
|
32
|
+
"""
|
33
|
+
|
34
|
+
def __init__(
|
35
|
+
self, event_reporter: Optional[EventReporter] = None, max_retries: int = 3
|
36
|
+
):
|
37
|
+
"""
|
38
|
+
Initialize the event bus with optional event reporter and retry configuration.
|
39
|
+
|
40
|
+
Args:
|
41
|
+
event_reporter: Optional EventReporter for HTTP posting of events.
|
42
|
+
If not provided, a default EventReporter will be created.
|
43
|
+
max_retries: Maximum number of retry attempts for failed event processing.
|
44
|
+
Defaults to 3 attempts with exponential backoff.
|
45
|
+
"""
|
46
|
+
self._event_reporter: EventReporter = event_reporter or EventReporter()
|
47
|
+
self._max_retries = max_retries
|
48
|
+
|
49
|
+
# Custom logger for this class
|
50
|
+
self.logger = logging.getLogger(__name__)
|
51
|
+
|
52
|
+
# Threading infrastructure
|
53
|
+
self._event_queue: queue.Queue[ScenarioEvent] = queue.Queue()
|
54
|
+
self._completed = False
|
55
|
+
self._subscription: Optional[Any] = None
|
56
|
+
self._worker_thread: Optional[threading.Thread] = None
|
57
|
+
self._shutdown_event = threading.Event() # Signal worker to shutdown
|
58
|
+
|
59
|
+
def _get_or_create_worker(self) -> None:
|
60
|
+
"""Lazily create worker thread when first event arrives"""
|
61
|
+
if self._worker_thread is None or not self._worker_thread.is_alive():
|
62
|
+
self.logger.debug("Creating new worker thread")
|
63
|
+
self._worker_thread = threading.Thread(
|
64
|
+
target=self._worker_loop,
|
65
|
+
daemon=False,
|
66
|
+
name="ScenarioEventBus-Worker"
|
67
|
+
)
|
68
|
+
self._worker_thread.start()
|
69
|
+
self.logger.debug("Worker thread started")
|
70
|
+
|
71
|
+
def _worker_loop(self) -> None:
|
72
|
+
"""Main worker thread loop - processes events from queue until shutdown"""
|
73
|
+
self.logger.debug("Worker thread loop started")
|
74
|
+
while True:
|
75
|
+
try:
|
76
|
+
if self._shutdown_event.wait(timeout=0.1):
|
77
|
+
self.logger.debug("Worker thread received shutdown signal")
|
78
|
+
break
|
79
|
+
|
80
|
+
try:
|
81
|
+
event = self._event_queue.get(timeout=0.1)
|
82
|
+
self.logger.debug(f"Worker picked up event: {event.type_} ({event.scenario_run_id})")
|
83
|
+
self._process_event_sync(event)
|
84
|
+
self._event_queue.task_done()
|
85
|
+
except queue.Empty:
|
86
|
+
# Exit if stream completed and no more events
|
87
|
+
if self._completed:
|
88
|
+
self.logger.debug("Stream completed and no more events, worker thread exiting")
|
89
|
+
break
|
90
|
+
continue
|
91
|
+
|
92
|
+
except Exception as e:
|
93
|
+
self.logger.error(f"Worker thread error: {e}")
|
94
|
+
|
95
|
+
self.logger.debug("Worker thread loop ended")
|
96
|
+
|
97
|
+
def _process_event_sync(self, event: ScenarioEvent) -> None:
|
98
|
+
"""
|
99
|
+
Process event synchronously in worker thread with retry logic.
|
100
|
+
"""
|
101
|
+
self.logger.debug(f"Processing HTTP post for {event.type_} ({event.scenario_run_id})")
|
102
|
+
|
103
|
+
try:
|
104
|
+
# Convert async to sync using asyncio.run - this blocks until HTTP completes
|
105
|
+
success = asyncio.run(self._process_event_with_retry(event))
|
106
|
+
if not success:
|
107
|
+
self.logger.warning(f"Failed to process event {event.type_} after {self._max_retries} attempts")
|
108
|
+
else:
|
109
|
+
self.logger.debug(f"Successfully posted {event.type_} ({event.scenario_run_id})")
|
110
|
+
except Exception as e:
|
111
|
+
self.logger.error(f"Error processing event {event.type_}: {e}")
|
112
|
+
|
113
|
+
async def _process_event_with_retry(self, event: ScenarioEvent, attempt: int = 1) -> bool:
|
114
|
+
"""
|
115
|
+
Process a single event with retry logic (now runs in worker thread context).
|
116
|
+
"""
|
117
|
+
try:
|
118
|
+
if self._event_reporter:
|
119
|
+
await self._event_reporter.post_event(event)
|
120
|
+
return True
|
121
|
+
except Exception as e:
|
122
|
+
if attempt >= self._max_retries:
|
123
|
+
return False
|
124
|
+
print(f"Error processing event (attempt {attempt}/{self._max_retries}): {e}")
|
125
|
+
await asyncio.sleep(0.1 * (2 ** (attempt - 1))) # Exponential backoff
|
126
|
+
return await self._process_event_with_retry(event, attempt + 1)
|
127
|
+
|
128
|
+
def subscribe_to_events(self, event_stream: Observable) -> None:
|
129
|
+
"""
|
130
|
+
Subscribe to any observable stream of scenario events.
|
131
|
+
Events are queued for processing by the dedicated worker thread.
|
132
|
+
"""
|
133
|
+
if self._subscription is not None:
|
134
|
+
self.logger.debug("Already subscribed to event stream")
|
135
|
+
return
|
136
|
+
|
137
|
+
def handle_event(event: ScenarioEvent) -> None:
|
138
|
+
self.logger.debug(f"Event received, queuing: {event.type_} ({event.scenario_run_id})")
|
139
|
+
self._get_or_create_worker()
|
140
|
+
self._event_queue.put(event)
|
141
|
+
self.logger.debug(f"Event queued: {event.type_} ({event.scenario_run_id})")
|
142
|
+
|
143
|
+
self.logger.info("Subscribing to event stream")
|
144
|
+
self._subscription = event_stream.subscribe(
|
145
|
+
handle_event,
|
146
|
+
lambda e: self.logger.error(f"Error in event stream: {e}"),
|
147
|
+
lambda: self._set_completed()
|
148
|
+
)
|
149
|
+
|
150
|
+
def _set_completed(self):
|
151
|
+
"""Helper to set completed state with logging"""
|
152
|
+
self.logger.debug("Event stream completed")
|
153
|
+
self._completed = True
|
154
|
+
|
155
|
+
def drain(self) -> None:
|
156
|
+
"""
|
157
|
+
Waits for all queued events to complete processing.
|
158
|
+
|
159
|
+
This method blocks until all events in the queue have been processed.
|
160
|
+
Since _process_event_sync() uses asyncio.run(), HTTP requests complete
|
161
|
+
before task_done() is called, so join() ensures everything is finished.
|
162
|
+
"""
|
163
|
+
self.logger.debug("Drain started - waiting for queue to empty")
|
164
|
+
|
165
|
+
# Wait for all events to be processed - this is sufficient!
|
166
|
+
self._event_queue.join()
|
167
|
+
self.logger.debug("Event queue drained")
|
168
|
+
|
169
|
+
# Signal worker to shutdown and wait for it
|
170
|
+
self._shutdown_event.set()
|
171
|
+
if self._worker_thread and self._worker_thread.is_alive():
|
172
|
+
self.logger.debug("Waiting for worker thread to shutdown...")
|
173
|
+
self._worker_thread.join(timeout=5.0)
|
174
|
+
if self._worker_thread.is_alive():
|
175
|
+
self.logger.warning("Worker thread did not shutdown within timeout")
|
176
|
+
else:
|
177
|
+
self.logger.debug("Worker thread shutdown complete")
|
178
|
+
|
179
|
+
self.logger.info("Drain completed")
|
180
|
+
|
181
|
+
def is_completed(self) -> bool:
|
182
|
+
"""
|
183
|
+
Returns whether all events have been processed.
|
184
|
+
"""
|
185
|
+
return self._completed and self._event_queue.empty()
|
@@ -35,7 +35,7 @@ class EventReporter:
|
|
35
35
|
def __init__(self, endpoint: Optional[str] = None, api_key: Optional[str] = None):
|
36
36
|
self.endpoint = endpoint or os.getenv("LANGWATCH_ENDPOINT")
|
37
37
|
self.api_key = api_key or os.getenv("LANGWATCH_API_KEY", "")
|
38
|
-
self.logger = logging.getLogger(
|
38
|
+
self.logger = logging.getLogger(__name__)
|
39
39
|
|
40
40
|
async def post_event(self, event: ScenarioEvent):
|
41
41
|
"""
|
@@ -8,39 +8,32 @@ the backend, and provides a single import location for event models.
|
|
8
8
|
If you need to add custom logic or helpers, you can extend or wrap these models here.
|
9
9
|
"""
|
10
10
|
|
11
|
-
from typing import Union, Any, Optional
|
12
|
-
from scenario.
|
11
|
+
from typing import Union, Any, Optional, TypeAlias
|
12
|
+
from scenario._generated.langwatch_api_client.lang_watch_api_client.models import (
|
13
13
|
PostApiScenarioEventsBodyType0,
|
14
|
-
PostApiScenarioEventsBodyType0Metadata
|
14
|
+
PostApiScenarioEventsBodyType0Metadata,
|
15
15
|
PostApiScenarioEventsBodyType1,
|
16
|
-
PostApiScenarioEventsBodyType1ResultsType0
|
17
|
-
PostApiScenarioEventsBodyType1ResultsType0Verdict
|
18
|
-
PostApiScenarioEventsBodyType1Status
|
16
|
+
PostApiScenarioEventsBodyType1ResultsType0,
|
17
|
+
PostApiScenarioEventsBodyType1ResultsType0Verdict,
|
18
|
+
PostApiScenarioEventsBodyType1Status,
|
19
19
|
PostApiScenarioEventsBodyType2,
|
20
|
-
# Message types for the snapshot event
|
21
|
-
PostApiScenarioEventsBodyType2MessagesItemType0,
|
22
|
-
PostApiScenarioEventsBodyType2MessagesItemType1,
|
23
|
-
PostApiScenarioEventsBodyType2MessagesItemType2,
|
24
|
-
PostApiScenarioEventsBodyType2MessagesItemType3,
|
25
|
-
PostApiScenarioEventsBodyType2MessagesItemType4,
|
26
20
|
)
|
21
|
+
from .messages import MessageType
|
22
|
+
|
23
|
+
# Create alias for cleaner naming
|
24
|
+
ScenarioRunStartedEventMetadata: TypeAlias = PostApiScenarioEventsBodyType0Metadata
|
25
|
+
ScenarioRunFinishedEventResults: TypeAlias = PostApiScenarioEventsBodyType1ResultsType0
|
26
|
+
ScenarioRunFinishedEventVerdict: TypeAlias = PostApiScenarioEventsBodyType1ResultsType0Verdict
|
27
|
+
ScenarioRunFinishedEventStatus: TypeAlias = PostApiScenarioEventsBodyType1Status
|
27
28
|
|
28
|
-
# Type alias for message types
|
29
|
-
MessageType = Union[
|
30
|
-
PostApiScenarioEventsBodyType2MessagesItemType0,
|
31
|
-
PostApiScenarioEventsBodyType2MessagesItemType1,
|
32
|
-
PostApiScenarioEventsBodyType2MessagesItemType2,
|
33
|
-
PostApiScenarioEventsBodyType2MessagesItemType3,
|
34
|
-
PostApiScenarioEventsBodyType2MessagesItemType4,
|
35
|
-
]
|
36
29
|
|
37
30
|
class ScenarioRunStartedEvent(PostApiScenarioEventsBodyType0):
|
38
31
|
"""
|
39
32
|
Event published when a scenario run begins execution.
|
40
|
-
|
33
|
+
|
41
34
|
Automatically sets type_ to "SCENARIO_RUN_STARTED" and includes metadata
|
42
35
|
about the scenario (name, description, etc.).
|
43
|
-
|
36
|
+
|
44
37
|
Args:
|
45
38
|
batch_run_id (str): Unique identifier for the batch of scenario runs
|
46
39
|
scenario_id (str): Unique identifier for the scenario definition
|
@@ -74,10 +67,10 @@ class ScenarioRunStartedEvent(PostApiScenarioEventsBodyType0):
|
|
74
67
|
class ScenarioRunFinishedEvent(PostApiScenarioEventsBodyType1):
|
75
68
|
"""
|
76
69
|
Event published when a scenario run completes execution.
|
77
|
-
|
70
|
+
|
78
71
|
Automatically sets type_ to "SCENARIO_RUN_FINISHED" and includes results
|
79
72
|
with verdict (PASS/FAIL/SUCCESS) and reasoning.
|
80
|
-
|
73
|
+
|
81
74
|
Args:
|
82
75
|
batch_run_id (str): Unique identifier for the batch of scenario runs
|
83
76
|
scenario_id (str): Unique identifier for the scenario definition
|
@@ -114,10 +107,10 @@ class ScenarioRunFinishedEvent(PostApiScenarioEventsBodyType1):
|
|
114
107
|
class ScenarioMessageSnapshotEvent(PostApiScenarioEventsBodyType2):
|
115
108
|
"""
|
116
109
|
Event published to capture intermediate state during scenario execution.
|
117
|
-
|
110
|
+
|
118
111
|
Automatically sets type_ to "SCENARIO_MESSAGE_SNAPSHOT" and allows tracking
|
119
112
|
of messages, context, or other runtime data during scenario processing.
|
120
|
-
|
113
|
+
|
121
114
|
Args:
|
122
115
|
batch_run_id (str): Unique identifier for the batch of scenario runs
|
123
116
|
scenario_id (str): Unique identifier for the scenario definition
|
@@ -151,7 +144,7 @@ class ScenarioMessageSnapshotEvent(PostApiScenarioEventsBodyType2):
|
|
151
144
|
# Union type for all supported event types
|
152
145
|
ScenarioEvent = Union[
|
153
146
|
ScenarioRunStartedEvent,
|
154
|
-
ScenarioRunFinishedEvent,
|
147
|
+
ScenarioRunFinishedEvent,
|
155
148
|
ScenarioMessageSnapshotEvent
|
156
149
|
]
|
157
150
|
|
@@ -0,0 +1,58 @@
|
|
1
|
+
"""
|
2
|
+
Exports message models from the generated LangWatch API client,
|
3
|
+
renaming the auto-generated types to clean, meaningful names.
|
4
|
+
|
5
|
+
This ensures all message types are always in sync with the OpenAPI spec and
|
6
|
+
the backend, and provides a single import location for message models.
|
7
|
+
|
8
|
+
If you need to add custom logic or helpers, you can extend or wrap these models here.
|
9
|
+
"""
|
10
|
+
|
11
|
+
from typing import Union, TypeAlias
|
12
|
+
from scenario._generated.langwatch_api_client.lang_watch_api_client.models import (
|
13
|
+
PostApiScenarioEventsBodyType2MessagesItemType0,
|
14
|
+
PostApiScenarioEventsBodyType2MessagesItemType1,
|
15
|
+
PostApiScenarioEventsBodyType2MessagesItemType2,
|
16
|
+
PostApiScenarioEventsBodyType2MessagesItemType3,
|
17
|
+
PostApiScenarioEventsBodyType2MessagesItemType4,
|
18
|
+
PostApiScenarioEventsBodyType2MessagesItemType2ToolCallsItem,
|
19
|
+
PostApiScenarioEventsBodyType2MessagesItemType2ToolCallsItemFunction,
|
20
|
+
)
|
21
|
+
|
22
|
+
# Create aliases for cleaner naming
|
23
|
+
DeveloperMessage: TypeAlias = PostApiScenarioEventsBodyType2MessagesItemType0
|
24
|
+
SystemMessage: TypeAlias = PostApiScenarioEventsBodyType2MessagesItemType1
|
25
|
+
AssistantMessage: TypeAlias = PostApiScenarioEventsBodyType2MessagesItemType2
|
26
|
+
UserMessage: TypeAlias = PostApiScenarioEventsBodyType2MessagesItemType3
|
27
|
+
ToolMessage: TypeAlias = PostApiScenarioEventsBodyType2MessagesItemType4
|
28
|
+
ToolCall: TypeAlias = PostApiScenarioEventsBodyType2MessagesItemType2ToolCallsItem
|
29
|
+
FunctionCall: TypeAlias = PostApiScenarioEventsBodyType2MessagesItemType2ToolCallsItemFunction
|
30
|
+
|
31
|
+
# Union type for all supported message types
|
32
|
+
MessageType = Union[
|
33
|
+
DeveloperMessage,
|
34
|
+
SystemMessage,
|
35
|
+
AssistantMessage,
|
36
|
+
UserMessage,
|
37
|
+
ToolMessage,
|
38
|
+
]
|
39
|
+
|
40
|
+
__all__ = [
|
41
|
+
"MessageType",
|
42
|
+
"DeveloperMessage",
|
43
|
+
"SystemMessage",
|
44
|
+
"AssistantMessage",
|
45
|
+
"UserMessage",
|
46
|
+
"ToolMessage",
|
47
|
+
"ToolCall",
|
48
|
+
"FunctionCall",
|
49
|
+
|
50
|
+
# API client models -- Required for PDocs
|
51
|
+
"PostApiScenarioEventsBodyType2MessagesItemType0",
|
52
|
+
"PostApiScenarioEventsBodyType2MessagesItemType1",
|
53
|
+
"PostApiScenarioEventsBodyType2MessagesItemType2",
|
54
|
+
"PostApiScenarioEventsBodyType2MessagesItemType3",
|
55
|
+
"PostApiScenarioEventsBodyType2MessagesItemType4",
|
56
|
+
"PostApiScenarioEventsBodyType2MessagesItemType2ToolCallsItem",
|
57
|
+
"PostApiScenarioEventsBodyType2MessagesItemType2ToolCallsItemFunction",
|
58
|
+
]
|
@@ -1,86 +1,97 @@
|
|
1
|
+
import warnings
|
1
2
|
from openai.types.chat.chat_completion_message_param import ChatCompletionMessageParam
|
2
|
-
from .
|
3
|
-
from
|
4
|
-
|
3
|
+
from .events import MessageType
|
4
|
+
from .messages import (
|
5
|
+
SystemMessage,
|
6
|
+
AssistantMessage,
|
7
|
+
UserMessage,
|
8
|
+
ToolMessage,
|
9
|
+
ToolCall,
|
10
|
+
FunctionCall,
|
11
|
+
)
|
12
|
+
from typing import List
|
5
13
|
import uuid
|
6
14
|
|
7
|
-
|
8
|
-
Message = Union[UserMessage, AssistantMessage, SystemMessage, ToolMessage]
|
9
|
-
|
10
|
-
def convert_messages_to_ag_ui_messages(messages: list[ChatCompletionMessageParam]) -> list[Message]:
|
15
|
+
def convert_messages_to_api_client_messages(messages: list[ChatCompletionMessageParam]) -> list[MessageType]:
|
11
16
|
"""
|
12
|
-
Converts OpenAI ChatCompletionMessageParam messages to
|
13
|
-
|
14
|
-
This function transforms messages from OpenAI's format to the
|
15
|
-
|
16
|
-
|
17
|
+
Converts OpenAI ChatCompletionMessageParam messages to API client Message format.
|
18
|
+
|
19
|
+
This function transforms messages from OpenAI's format to the API client format
|
20
|
+
that matches the expected schema for ScenarioMessageSnapshotEvent.
|
21
|
+
|
17
22
|
Args:
|
18
23
|
messages: List of OpenAI ChatCompletionMessageParam messages
|
19
|
-
|
24
|
+
|
20
25
|
Returns:
|
21
|
-
List of
|
22
|
-
|
26
|
+
List of API client Message objects
|
27
|
+
|
23
28
|
Raises:
|
24
29
|
ValueError: If message role is not supported or message format is invalid
|
25
30
|
"""
|
26
31
|
|
27
|
-
converted_messages: list[
|
28
|
-
|
32
|
+
converted_messages: list[MessageType] = []
|
33
|
+
|
29
34
|
for i, message in enumerate(messages):
|
30
35
|
# Generate unique ID for each message
|
31
36
|
message_id = message.get("id") or str(uuid.uuid4())
|
32
|
-
|
37
|
+
|
33
38
|
role = message.get("role")
|
34
39
|
content = message.get("content")
|
35
|
-
|
40
|
+
|
36
41
|
if role == "user":
|
37
42
|
if not content:
|
38
43
|
raise ValueError(f"User message at index {i} missing required content")
|
39
44
|
converted_messages.append(UserMessage(
|
40
45
|
id=message_id,
|
46
|
+
role="user",
|
41
47
|
content=str(content)
|
42
48
|
))
|
43
49
|
elif role == "assistant":
|
44
50
|
# Handle tool calls if present
|
45
51
|
tool_calls = message.get("tool_calls")
|
46
|
-
|
47
|
-
|
52
|
+
api_tool_calls: List[ToolCall] = []
|
53
|
+
|
48
54
|
if tool_calls:
|
49
|
-
ag_ui_tool_calls = []
|
50
55
|
for tool_call in tool_calls:
|
51
|
-
|
56
|
+
api_tool_calls.append(ToolCall(
|
52
57
|
id=tool_call.get("id", str(uuid.uuid4())),
|
58
|
+
type_="function",
|
53
59
|
function=FunctionCall(
|
54
|
-
name=tool_call["function"]
|
55
|
-
arguments=tool_call["function"]
|
60
|
+
name=tool_call["function"].get("name", "unknown"),
|
61
|
+
arguments=tool_call["function"].get("arguments", "{}")
|
56
62
|
)
|
57
63
|
))
|
58
|
-
|
64
|
+
|
59
65
|
converted_messages.append(AssistantMessage(
|
60
66
|
id=message_id,
|
61
|
-
|
62
|
-
|
67
|
+
role="assistant",
|
68
|
+
content=str(content),
|
69
|
+
tool_calls=api_tool_calls
|
63
70
|
))
|
64
71
|
elif role == "system":
|
65
72
|
if not content:
|
66
73
|
raise ValueError(f"System message at index {i} missing required content")
|
67
74
|
converted_messages.append(SystemMessage(
|
68
75
|
id=message_id,
|
76
|
+
role="system",
|
69
77
|
content=str(content)
|
70
78
|
))
|
71
79
|
elif role == "tool":
|
72
80
|
tool_call_id = message.get("tool_call_id")
|
73
81
|
if not tool_call_id:
|
74
|
-
|
82
|
+
warnings.warn(f"Tool message at index {i} missing required tool_call_id, skipping tool message")
|
83
|
+
continue
|
75
84
|
if not content:
|
76
|
-
|
77
|
-
|
85
|
+
warnings.warn(f"Tool message at index {i} missing required content, skipping tool message")
|
86
|
+
continue
|
87
|
+
|
78
88
|
converted_messages.append(ToolMessage(
|
79
89
|
id=message_id,
|
90
|
+
role="tool",
|
80
91
|
content=str(content),
|
81
92
|
tool_call_id=tool_call_id
|
82
93
|
))
|
83
94
|
else:
|
84
95
|
raise ValueError(f"Unsupported message role '{role}' at index {i}")
|
85
|
-
|
96
|
+
|
86
97
|
return converted_messages
|
@@ -0,0 +1,139 @@
|
|
1
|
+
# lang-watch-api-client
|
2
|
+
**⚠️ AUTO-GENERATED CODE - DO NOT EDIT MANUALLY ⚠️**
|
3
|
+
|
4
|
+
This is an auto-generated client library for accessing LangWatch API, created using `openapi-python-client`.
|
5
|
+
|
6
|
+
## Regeneration
|
7
|
+
To regenerate this client:
|
8
|
+
```bash
|
9
|
+
make generate-openapi-client
|
10
|
+
```
|
11
|
+
|
12
|
+
## Source
|
13
|
+
Generated from: `../langwatch-saas/langwatch/langwatch/src/app/api/openapiLangWatch.json`
|
14
|
+
|
15
|
+
---
|
16
|
+
|
17
|
+
A client library for accessing LangWatch API
|
18
|
+
|
19
|
+
## Usage
|
20
|
+
First, create a client:
|
21
|
+
|
22
|
+
```python
|
23
|
+
from lang_watch_api_client import Client
|
24
|
+
|
25
|
+
client = Client(base_url="https://api.langwatch.ai")
|
26
|
+
```
|
27
|
+
|
28
|
+
If the endpoints you're going to hit require authentication, use `AuthenticatedClient` instead:
|
29
|
+
|
30
|
+
```python
|
31
|
+
from lang_watch_api_client import AuthenticatedClient
|
32
|
+
|
33
|
+
client = AuthenticatedClient(base_url="https://api.langwatch.ai", token="SuperSecretToken")
|
34
|
+
```
|
35
|
+
|
36
|
+
Now call your endpoint and use your models:
|
37
|
+
|
38
|
+
```python
|
39
|
+
from lang_watch_api_client.models import MyDataModel
|
40
|
+
from lang_watch_api_client.api.my_tag import get_my_data_model
|
41
|
+
from lang_watch_api_client.types import Response
|
42
|
+
|
43
|
+
with client as client:
|
44
|
+
my_data: MyDataModel = get_my_data_model.sync(client=client)
|
45
|
+
# or if you need more info (e.g. status_code)
|
46
|
+
response: Response[MyDataModel] = get_my_data_model.sync_detailed(client=client)
|
47
|
+
```
|
48
|
+
|
49
|
+
Or do the same thing with an async version:
|
50
|
+
|
51
|
+
```python
|
52
|
+
from lang_watch_api_client.models import MyDataModel
|
53
|
+
from lang_watch_api_client.api.my_tag import get_my_data_model
|
54
|
+
from lang_watch_api_client.types import Response
|
55
|
+
|
56
|
+
async with client as client:
|
57
|
+
my_data: MyDataModel = await get_my_data_model.asyncio(client=client)
|
58
|
+
response: Response[MyDataModel] = await get_my_data_model.asyncio_detailed(client=client)
|
59
|
+
```
|
60
|
+
|
61
|
+
By default, when you're calling an HTTPS API it will attempt to verify that SSL is working correctly. Using certificate verification is highly recommended most of the time, but sometimes you may need to authenticate to a server (especially an internal server) using a custom certificate bundle.
|
62
|
+
|
63
|
+
```python
|
64
|
+
client = AuthenticatedClient(
|
65
|
+
base_url="https://internal_api.langwatch.ai",
|
66
|
+
token="SuperSecretToken",
|
67
|
+
verify_ssl="/path/to/certificate_bundle.pem",
|
68
|
+
)
|
69
|
+
```
|
70
|
+
|
71
|
+
You can also disable certificate validation altogether, but beware that **this is a security risk**.
|
72
|
+
|
73
|
+
```python
|
74
|
+
client = AuthenticatedClient(
|
75
|
+
base_url="https://internal_api.langwatch.ai",
|
76
|
+
token="SuperSecretToken",
|
77
|
+
verify_ssl=False
|
78
|
+
)
|
79
|
+
```
|
80
|
+
|
81
|
+
Things to know:
|
82
|
+
1. Every path/method combo becomes a Python module with four functions:
|
83
|
+
1. `sync`: Blocking request that returns parsed data (if successful) or `None`
|
84
|
+
1. `sync_detailed`: Blocking request that always returns a `Request`, optionally with `parsed` set if the request was successful.
|
85
|
+
1. `asyncio`: Like `sync` but async instead of blocking
|
86
|
+
1. `asyncio_detailed`: Like `sync_detailed` but async instead of blocking
|
87
|
+
|
88
|
+
1. All path/query params, and bodies become method arguments.
|
89
|
+
1. If your endpoint had any tags on it, the first tag will be used as a module name for the function (my_tag above)
|
90
|
+
1. Any endpoint which did not have a tag will be in `lang_watch_api_client.api.default`
|
91
|
+
|
92
|
+
## Advanced customizations
|
93
|
+
|
94
|
+
There are more settings on the generated `Client` class which let you control more runtime behavior, check out the docstring on that class for more info. You can also customize the underlying `httpx.Client` or `httpx.AsyncClient` (depending on your use-case):
|
95
|
+
|
96
|
+
```python
|
97
|
+
from lang_watch_api_client import Client
|
98
|
+
|
99
|
+
def log_request(request):
|
100
|
+
print(f"Request event hook: {request.method} {request.url} - Waiting for response")
|
101
|
+
|
102
|
+
def log_response(response):
|
103
|
+
request = response.request
|
104
|
+
print(f"Response event hook: {request.method} {request.url} - Status {response.status_code}")
|
105
|
+
|
106
|
+
client = Client(
|
107
|
+
base_url="https://api.langwatch.ai",
|
108
|
+
httpx_args={"event_hooks": {"request": [log_request], "response": [log_response]}},
|
109
|
+
)
|
110
|
+
|
111
|
+
# Or get the underlying httpx client to modify directly with client.get_httpx_client() or client.get_async_httpx_client()
|
112
|
+
```
|
113
|
+
|
114
|
+
You can even set the httpx client directly, but beware that this will override any existing settings (e.g., base_url):
|
115
|
+
|
116
|
+
```python
|
117
|
+
import httpx
|
118
|
+
from lang_watch_api_client import Client
|
119
|
+
|
120
|
+
client = Client(
|
121
|
+
base_url="https://api.langwatch.ai",
|
122
|
+
)
|
123
|
+
# Note that base_url needs to be re-set, as would any shared cookies, headers, etc.
|
124
|
+
client.set_httpx_client(httpx.Client(base_url="https://api.langwatch.ai", proxies="http://localhost:8030"))
|
125
|
+
```
|
126
|
+
|
127
|
+
## Building / publishing this package
|
128
|
+
This project uses [Poetry](https://python-poetry.org/) to manage dependencies and packaging. Here are the basics:
|
129
|
+
1. Update the metadata in pyproject.toml (e.g. authors, version)
|
130
|
+
1. If you're using a private repository, configure it with Poetry
|
131
|
+
1. `poetry config repositories.<your-repository-name> <url-to-your-repository>`
|
132
|
+
1. `poetry config http-basic.<your-repository-name> <username> <password>`
|
133
|
+
1. Publish the client with `poetry publish --build -r <your-repository-name>` or, if for public PyPI, just `poetry publish --build`
|
134
|
+
|
135
|
+
If you want to install this client into another project without publishing it (e.g. for development) then:
|
136
|
+
1. If that project **is using Poetry**, you can simply do `poetry add <path-to-this-client>` from that project
|
137
|
+
1. If that project is not using Poetry:
|
138
|
+
1. Build a wheel with `poetry build -f wheel`
|
139
|
+
1. Install that wheel from the other project `pip install <path-to-wheel>`
|
@@ -0,0 +1,13 @@
|
|
1
|
+
# AUTO-GENERATED CODE - DO NOT EDIT MANUALLY
|
2
|
+
# Generated by openapi-python-client from ../../langwatch-saas/langwatch/langwatch/src/app/api/openapiLangWatch.json
|
3
|
+
#
|
4
|
+
# To regenerate: make generate-openapi-client
|
5
|
+
|
6
|
+
"""A client library for accessing LangWatch API"""
|
7
|
+
|
8
|
+
from .client import AuthenticatedClient, Client
|
9
|
+
|
10
|
+
__all__ = (
|
11
|
+
"AuthenticatedClient",
|
12
|
+
"Client",
|
13
|
+
)
|
@@ -0,0 +1 @@
|
|
1
|
+
"""Contains methods for accessing the API"""
|
@@ -0,0 +1 @@
|
|
1
|
+
"""Contains endpoint functions for accessing the API"""
|