langwatch-scenario 0.4.0__py3-none-any.whl → 0.6.0__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- {langwatch_scenario-0.4.0.dist-info → langwatch_scenario-0.6.0.dist-info}/METADATA +93 -71
- langwatch_scenario-0.6.0.dist-info/RECORD +27 -0
- scenario/__init__.py +11 -114
- scenario/_utils/__init__.py +32 -0
- scenario/_utils/ids.py +58 -0
- scenario/_utils/message_conversion.py +103 -0
- scenario/{utils.py → _utils/utils.py} +21 -110
- scenario/agent_adapter.py +8 -4
- scenario/cache.py +4 -3
- scenario/config.py +7 -5
- scenario/events/__init__.py +66 -0
- scenario/events/event_bus.py +175 -0
- scenario/events/event_reporter.py +83 -0
- scenario/events/events.py +169 -0
- scenario/events/messages.py +84 -0
- scenario/events/utils.py +86 -0
- scenario/judge_agent.py +7 -28
- scenario/pytest_plugin.py +2 -47
- scenario/scenario_executor.py +268 -84
- scenario/scenario_state.py +6 -6
- scenario/script.py +9 -9
- scenario/types.py +10 -6
- scenario/user_simulator_agent.py +4 -11
- langwatch_scenario-0.4.0.dist-info/RECORD +0 -18
- {langwatch_scenario-0.4.0.dist-info → langwatch_scenario-0.6.0.dist-info}/WHEEL +0 -0
- {langwatch_scenario-0.4.0.dist-info → langwatch_scenario-0.6.0.dist-info}/entry_points.txt +0 -0
- {langwatch_scenario-0.4.0.dist-info → langwatch_scenario-0.6.0.dist-info}/top_level.txt +0 -0
- /scenario/{error_messages.py → _error_messages.py} +0 -0
@@ -0,0 +1,103 @@
|
|
1
|
+
"""
|
2
|
+
Message conversion utilities for scenario execution.
|
3
|
+
|
4
|
+
This module provides functions for converting between different message formats
|
5
|
+
used in scenario execution, particularly for normalizing agent return types
|
6
|
+
to OpenAI-compatible message formats.
|
7
|
+
"""
|
8
|
+
|
9
|
+
import json
|
10
|
+
from typing import Any, List, Literal, TypeVar, cast
|
11
|
+
from pydantic import BaseModel
|
12
|
+
from openai.types.chat import ChatCompletionMessageParam
|
13
|
+
|
14
|
+
from scenario.types import AgentReturnTypes, ScenarioResult
|
15
|
+
from .utils import SerializableAndPydanticEncoder
|
16
|
+
|
17
|
+
T = TypeVar("T")
|
18
|
+
|
19
|
+
|
20
|
+
def convert_agent_return_types_to_openai_messages(
|
21
|
+
agent_response: AgentReturnTypes, role: Literal["user", "assistant"]
|
22
|
+
) -> List[ChatCompletionMessageParam]:
|
23
|
+
"""
|
24
|
+
Convert various agent return types to standardized OpenAI message format.
|
25
|
+
|
26
|
+
This function normalizes different return types from agent adapters into
|
27
|
+
a consistent list of OpenAI-compatible messages that can be used throughout
|
28
|
+
the scenario execution pipeline.
|
29
|
+
|
30
|
+
Args:
|
31
|
+
agent_response: Response from an agent adapter call
|
32
|
+
role: The role to assign to string responses ("user" or "assistant")
|
33
|
+
|
34
|
+
Returns:
|
35
|
+
List of OpenAI-compatible messages
|
36
|
+
|
37
|
+
Raises:
|
38
|
+
ValueError: If agent_response is a ScenarioResult (which should be handled separately)
|
39
|
+
|
40
|
+
Example:
|
41
|
+
```
|
42
|
+
# String response
|
43
|
+
messages = convert_agent_return_types_to_openai_messages("Hello", "assistant")
|
44
|
+
# Result: [{"role": "assistant", "content": "Hello"}]
|
45
|
+
|
46
|
+
# Dict response
|
47
|
+
response = {"role": "assistant", "content": "Hi", "tool_calls": [...]}
|
48
|
+
messages = convert_agent_return_types_to_openai_messages(response, "assistant")
|
49
|
+
# Result: [{"role": "assistant", "content": "Hi", "tool_calls": [...]}]
|
50
|
+
|
51
|
+
# List response
|
52
|
+
responses = [
|
53
|
+
{"role": "assistant", "content": "Thinking..."},
|
54
|
+
{"role": "assistant", "content": "Here's the answer"}
|
55
|
+
]
|
56
|
+
messages = convert_agent_return_types_to_openai_messages(responses, "assistant")
|
57
|
+
# Result: Same list, validated and normalized
|
58
|
+
```
|
59
|
+
"""
|
60
|
+
if isinstance(agent_response, ScenarioResult):
|
61
|
+
raise ValueError(
|
62
|
+
"Unexpectedly tried to convert a ScenarioResult to openai messages",
|
63
|
+
agent_response.__repr__(),
|
64
|
+
)
|
65
|
+
|
66
|
+
def convert_maybe_object_to_openai_message(
|
67
|
+
obj: Any,
|
68
|
+
) -> ChatCompletionMessageParam:
|
69
|
+
if isinstance(obj, dict):
|
70
|
+
return cast(ChatCompletionMessageParam, obj)
|
71
|
+
elif isinstance(obj, BaseModel):
|
72
|
+
return cast(
|
73
|
+
ChatCompletionMessageParam,
|
74
|
+
obj.model_dump(
|
75
|
+
exclude_unset=True,
|
76
|
+
exclude_none=True,
|
77
|
+
exclude_defaults=True,
|
78
|
+
warnings=False,
|
79
|
+
),
|
80
|
+
)
|
81
|
+
else:
|
82
|
+
raise ValueError(f"Unexpected agent response type: {type(obj).__name__}")
|
83
|
+
|
84
|
+
def ensure_dict(
|
85
|
+
obj: T,
|
86
|
+
) -> T:
|
87
|
+
return json.loads(json.dumps(obj, cls=SerializableAndPydanticEncoder))
|
88
|
+
|
89
|
+
if isinstance(agent_response, str):
|
90
|
+
return [
|
91
|
+
(
|
92
|
+
{"role": "user", "content": agent_response}
|
93
|
+
if role == "user"
|
94
|
+
else {"role": "assistant", "content": agent_response}
|
95
|
+
)
|
96
|
+
]
|
97
|
+
elif isinstance(agent_response, list):
|
98
|
+
return [
|
99
|
+
ensure_dict(convert_maybe_object_to_openai_message(message))
|
100
|
+
for message in agent_response
|
101
|
+
]
|
102
|
+
else:
|
103
|
+
return [ensure_dict(convert_maybe_object_to_openai_message(agent_response))]
|
@@ -11,13 +11,10 @@ import sys
|
|
11
11
|
from typing import (
|
12
12
|
Any,
|
13
13
|
Iterator,
|
14
|
-
List,
|
15
|
-
Literal,
|
16
14
|
Optional,
|
17
15
|
Union,
|
18
16
|
TypeVar,
|
19
17
|
Awaitable,
|
20
|
-
cast,
|
21
18
|
)
|
22
19
|
from pydantic import BaseModel
|
23
20
|
import copy
|
@@ -33,8 +30,8 @@ from rich.console import Console
|
|
33
30
|
from rich.text import Text
|
34
31
|
from rich.errors import LiveError
|
35
32
|
|
36
|
-
from scenario.
|
37
|
-
from scenario.types import
|
33
|
+
from scenario._error_messages import message_return_error_message
|
34
|
+
from scenario.types import ScenarioResult
|
38
35
|
|
39
36
|
T = TypeVar("T")
|
40
37
|
|
@@ -48,7 +45,7 @@ class SerializableAndPydanticEncoder(json.JSONEncoder):
|
|
48
45
|
Used for caching and logging scenarios that contain complex objects.
|
49
46
|
|
50
47
|
Example:
|
51
|
-
```
|
48
|
+
```
|
52
49
|
data = {
|
53
50
|
"model": SomeBaseModel(field="value"),
|
54
51
|
"iterator": iter([1, 2, 3])
|
@@ -56,7 +53,7 @@ class SerializableAndPydanticEncoder(json.JSONEncoder):
|
|
56
53
|
json.dumps(data, cls=SerializableAndPydanticEncoder)
|
57
54
|
```
|
58
55
|
"""
|
59
|
-
def default(self, o):
|
56
|
+
def default(self, o: Any) -> Any:
|
60
57
|
if isinstance(o, BaseModel):
|
61
58
|
return o.model_dump(exclude_unset=True)
|
62
59
|
if isinstance(o, Iterator):
|
@@ -73,26 +70,26 @@ class SerializableWithStringFallback(SerializableAndPydanticEncoder):
|
|
73
70
|
that logging and caching operations never fail due to serialization issues.
|
74
71
|
|
75
72
|
Example:
|
76
|
-
```
|
73
|
+
```
|
77
74
|
# This will work even with complex non-serializable objects
|
78
75
|
data = {"function": lambda x: x, "complex_object": SomeComplexClass()}
|
79
76
|
json.dumps(data, cls=SerializableWithStringFallback)
|
80
77
|
# Result: {"function": "<function <lambda> at 0x...>", "complex_object": "..."}
|
81
78
|
```
|
82
79
|
"""
|
83
|
-
def default(self, o):
|
80
|
+
def default(self, o: Any) -> Any:
|
84
81
|
try:
|
85
82
|
return super().default(o)
|
86
83
|
except:
|
87
84
|
return str(o)
|
88
85
|
|
89
86
|
|
90
|
-
def safe_list_at(list, index, default=None):
|
87
|
+
def safe_list_at(list_obj: list, index: int, default: Any = None) -> Any:
|
91
88
|
"""
|
92
89
|
Safely get an item from a list by index with a default fallback.
|
93
90
|
|
94
91
|
Args:
|
95
|
-
|
92
|
+
list_obj: The list to access
|
96
93
|
index: The index to retrieve
|
97
94
|
default: Value to return if index is out of bounds
|
98
95
|
|
@@ -100,7 +97,7 @@ def safe_list_at(list, index, default=None):
|
|
100
97
|
The item at the index, or the default value if index is invalid
|
101
98
|
|
102
99
|
Example:
|
103
|
-
```
|
100
|
+
```
|
104
101
|
items = ["a", "b", "c"]
|
105
102
|
print(safe_list_at(items, 1)) # "b"
|
106
103
|
print(safe_list_at(items, 10)) # None
|
@@ -108,12 +105,12 @@ def safe_list_at(list, index, default=None):
|
|
108
105
|
```
|
109
106
|
"""
|
110
107
|
try:
|
111
|
-
return
|
108
|
+
return list_obj[index]
|
112
109
|
except:
|
113
110
|
return default
|
114
111
|
|
115
112
|
|
116
|
-
def safe_attr_or_key(obj, attr_or_key, default=None):
|
113
|
+
def safe_attr_or_key(obj: Any, attr_or_key: str, default: Any = None) -> Any:
|
117
114
|
"""
|
118
115
|
Safely get an attribute or dictionary key from an object.
|
119
116
|
|
@@ -129,7 +126,7 @@ def safe_attr_or_key(obj, attr_or_key, default=None):
|
|
129
126
|
The attribute/key value, or the default if not found
|
130
127
|
|
131
128
|
Example:
|
132
|
-
```
|
129
|
+
```
|
133
130
|
class MyClass:
|
134
131
|
attr = "value"
|
135
132
|
|
@@ -141,10 +138,10 @@ def safe_attr_or_key(obj, attr_or_key, default=None):
|
|
141
138
|
print(safe_attr_or_key(obj, "missing")) # None
|
142
139
|
```
|
143
140
|
"""
|
144
|
-
return getattr(obj, attr_or_key, obj
|
141
|
+
return getattr(obj, attr_or_key, getattr(obj, 'get', lambda x, default=None: default)(attr_or_key, default))
|
145
142
|
|
146
143
|
|
147
|
-
def title_case(string):
|
144
|
+
def title_case(string: str) -> str:
|
148
145
|
"""
|
149
146
|
Convert snake_case string to Title Case.
|
150
147
|
|
@@ -155,7 +152,7 @@ def title_case(string):
|
|
155
152
|
String converted to Title Case
|
156
153
|
|
157
154
|
Example:
|
158
|
-
```
|
155
|
+
```
|
159
156
|
print(title_case("user_simulator_agent")) # "User Simulator Agent"
|
160
157
|
print(title_case("api_key")) # "Api Key"
|
161
158
|
```
|
@@ -178,7 +175,7 @@ def print_openai_messages(
|
|
178
175
|
messages: List of OpenAI-compatible messages to print
|
179
176
|
|
180
177
|
Example:
|
181
|
-
```
|
178
|
+
```
|
182
179
|
messages = [
|
183
180
|
{"role": "user", "content": "Hello"},
|
184
181
|
{"role": "assistant", "content": "Hi there!"},
|
@@ -226,7 +223,7 @@ def print_openai_messages(
|
|
226
223
|
)
|
227
224
|
|
228
225
|
|
229
|
-
def _take_maybe_json_first_lines(string, max_lines=5):
|
226
|
+
def _take_maybe_json_first_lines(string: str, max_lines: int = 5) -> str:
|
230
227
|
"""
|
231
228
|
Truncate string content and format JSON if possible.
|
232
229
|
|
@@ -268,14 +265,14 @@ class TextFirstSpinner(Spinner):
|
|
268
265
|
color: Color for the descriptive text
|
269
266
|
**kwargs: Additional arguments passed to the base Spinner class
|
270
267
|
"""
|
271
|
-
def __init__(self, name, text: str, color: str, **kwargs):
|
268
|
+
def __init__(self, name: str, text: str, color: str, **kwargs: Any) -> None:
|
272
269
|
super().__init__(
|
273
270
|
name, "", style="bold white", **kwargs
|
274
271
|
) # Initialize with empty text
|
275
272
|
self.text_before = text
|
276
273
|
self.color = color
|
277
274
|
|
278
|
-
def render(self, time):
|
275
|
+
def render(self, time: float) -> Text:
|
279
276
|
# Get the original spinner frame
|
280
277
|
spinner_frame = super().render(time)
|
281
278
|
# Create a composite with text first, then spinner
|
@@ -299,7 +296,7 @@ def show_spinner(
|
|
299
296
|
enabled: Whether to show the spinner (respects verbose settings)
|
300
297
|
|
301
298
|
Example:
|
302
|
-
```
|
299
|
+
```
|
303
300
|
with show_spinner("Calling agent...", color="blue", enabled=True):
|
304
301
|
response = await agent.call(input_data)
|
305
302
|
|
@@ -345,7 +342,7 @@ def check_valid_return_type(return_value: Any, class_name: str) -> None:
|
|
345
342
|
ValueError: If the return value is not in a supported format
|
346
343
|
|
347
344
|
Example:
|
348
|
-
```
|
345
|
+
```
|
349
346
|
# Valid return values
|
350
347
|
check_valid_return_type("Hello world", "MyAgent") # OK
|
351
348
|
check_valid_return_type({"role": "assistant", "content": "Hi"}, "MyAgent") # OK
|
@@ -383,92 +380,6 @@ def check_valid_return_type(return_value: Any, class_name: str) -> None:
|
|
383
380
|
)
|
384
381
|
|
385
382
|
|
386
|
-
def convert_agent_return_types_to_openai_messages(
|
387
|
-
agent_response: AgentReturnTypes, role: Literal["user", "assistant"]
|
388
|
-
) -> List[ChatCompletionMessageParam]:
|
389
|
-
"""
|
390
|
-
Convert various agent return types to standardized OpenAI message format.
|
391
|
-
|
392
|
-
This function normalizes different return types from agent adapters into
|
393
|
-
a consistent list of OpenAI-compatible messages that can be used throughout
|
394
|
-
the scenario execution pipeline.
|
395
|
-
|
396
|
-
Args:
|
397
|
-
agent_response: Response from an agent adapter call
|
398
|
-
role: The role to assign to string responses ("user" or "assistant")
|
399
|
-
|
400
|
-
Returns:
|
401
|
-
List of OpenAI-compatible messages
|
402
|
-
|
403
|
-
Raises:
|
404
|
-
ValueError: If agent_response is a ScenarioResult (which should be handled separately)
|
405
|
-
|
406
|
-
Example:
|
407
|
-
```python
|
408
|
-
# String response
|
409
|
-
messages = convert_agent_return_types_to_openai_messages("Hello", "assistant")
|
410
|
-
# Result: [{"role": "assistant", "content": "Hello"}]
|
411
|
-
|
412
|
-
# Dict response
|
413
|
-
response = {"role": "assistant", "content": "Hi", "tool_calls": [...]}
|
414
|
-
messages = convert_agent_return_types_to_openai_messages(response, "assistant")
|
415
|
-
# Result: [{"role": "assistant", "content": "Hi", "tool_calls": [...]}]
|
416
|
-
|
417
|
-
# List response
|
418
|
-
responses = [
|
419
|
-
{"role": "assistant", "content": "Thinking..."},
|
420
|
-
{"role": "assistant", "content": "Here's the answer"}
|
421
|
-
]
|
422
|
-
messages = convert_agent_return_types_to_openai_messages(responses, "assistant")
|
423
|
-
# Result: Same list, validated and normalized
|
424
|
-
```
|
425
|
-
"""
|
426
|
-
if isinstance(agent_response, ScenarioResult):
|
427
|
-
raise ValueError(
|
428
|
-
"Unexpectedly tried to convert a ScenarioResult to openai messages",
|
429
|
-
agent_response.__repr__(),
|
430
|
-
)
|
431
|
-
|
432
|
-
def convert_maybe_object_to_openai_message(
|
433
|
-
obj: Any,
|
434
|
-
) -> ChatCompletionMessageParam:
|
435
|
-
if isinstance(obj, dict):
|
436
|
-
return cast(ChatCompletionMessageParam, obj)
|
437
|
-
elif isinstance(obj, BaseModel):
|
438
|
-
return cast(
|
439
|
-
ChatCompletionMessageParam,
|
440
|
-
obj.model_dump(
|
441
|
-
exclude_unset=True,
|
442
|
-
exclude_none=True,
|
443
|
-
exclude_defaults=True,
|
444
|
-
warnings=False,
|
445
|
-
),
|
446
|
-
)
|
447
|
-
else:
|
448
|
-
raise ValueError(f"Unexpected agent response type: {type(obj).__name__}")
|
449
|
-
|
450
|
-
def ensure_dict(
|
451
|
-
obj: T,
|
452
|
-
) -> T:
|
453
|
-
return json.loads(json.dumps(obj, cls=SerializableAndPydanticEncoder))
|
454
|
-
|
455
|
-
if isinstance(agent_response, str):
|
456
|
-
return [
|
457
|
-
(
|
458
|
-
{"role": "user", "content": agent_response}
|
459
|
-
if role == "user"
|
460
|
-
else {"role": "assistant", "content": agent_response}
|
461
|
-
)
|
462
|
-
]
|
463
|
-
elif isinstance(agent_response, list):
|
464
|
-
return [
|
465
|
-
ensure_dict(convert_maybe_object_to_openai_message(message))
|
466
|
-
for message in agent_response
|
467
|
-
]
|
468
|
-
else:
|
469
|
-
return [ensure_dict(convert_maybe_object_to_openai_message(agent_response))]
|
470
|
-
|
471
|
-
|
472
383
|
def reverse_roles(
|
473
384
|
messages: list[ChatCompletionMessageParam],
|
474
385
|
) -> list[ChatCompletionMessageParam]:
|
scenario/agent_adapter.py
CHANGED
@@ -26,9 +26,9 @@ class AgentAdapter(ABC):
|
|
26
26
|
role: The role this agent plays in scenarios (USER, AGENT, or JUDGE)
|
27
27
|
|
28
28
|
Example:
|
29
|
-
```
|
29
|
+
```
|
30
30
|
import scenario
|
31
|
-
from
|
31
|
+
from my_agent import MyCustomAgent
|
32
32
|
|
33
33
|
class MyAgentAdapter(scenario.AgentAdapter):
|
34
34
|
def __init__(self):
|
@@ -66,6 +66,7 @@ class AgentAdapter(ABC):
|
|
66
66
|
- For stateful agents, use input.thread_id to maintain conversation context
|
67
67
|
- For stateless agents, use input.messages for the full conversation history
|
68
68
|
"""
|
69
|
+
|
69
70
|
role: ClassVar[AgentRole] = AgentRole.AGENT
|
70
71
|
|
71
72
|
@abstractmethod
|
@@ -82,13 +83,17 @@ class AgentAdapter(ABC):
|
|
82
83
|
|
83
84
|
Returns:
|
84
85
|
AgentReturnTypes: The agent's response, which can be:
|
86
|
+
|
85
87
|
- str: Simple text response
|
88
|
+
|
86
89
|
- ChatCompletionMessageParam: Single OpenAI-format message
|
90
|
+
|
87
91
|
- List[ChatCompletionMessageParam]: Multiple messages for complex responses
|
92
|
+
|
88
93
|
- ScenarioResult: Direct test result (typically only used by judge agents)
|
89
94
|
|
90
95
|
Example:
|
91
|
-
```
|
96
|
+
```
|
92
97
|
async def call(self, input: AgentInput) -> AgentReturnTypes:
|
93
98
|
# Simple string response
|
94
99
|
user_msg = input.last_new_user_message_str()
|
@@ -98,7 +103,6 @@ class AgentAdapter(ABC):
|
|
98
103
|
return {
|
99
104
|
"role": "assistant",
|
100
105
|
"content": "Let me help you with that...",
|
101
|
-
"tool_calls": [...] # If your agent uses tools
|
102
106
|
}
|
103
107
|
|
104
108
|
# Or multiple messages for complex interactions
|
scenario/cache.py
CHANGED
@@ -18,7 +18,7 @@ import json
|
|
18
18
|
|
19
19
|
import wrapt
|
20
20
|
from scenario.types import AgentInput
|
21
|
-
from scenario.utils import SerializableWithStringFallback
|
21
|
+
from scenario._utils.utils import SerializableWithStringFallback
|
22
22
|
|
23
23
|
if TYPE_CHECKING:
|
24
24
|
from scenario.scenario_executor import ScenarioExecutor
|
@@ -39,7 +39,7 @@ def get_cache() -> Memory:
|
|
39
39
|
Memory instance configured with the appropriate cache directory
|
40
40
|
|
41
41
|
Example:
|
42
|
-
```
|
42
|
+
```
|
43
43
|
# Default cache location: ~/.scenario/cache
|
44
44
|
cache = get_cache()
|
45
45
|
|
@@ -75,7 +75,7 @@ def scenario_cache(ignore=[]):
|
|
75
75
|
Decorator function that can be applied to any function or method
|
76
76
|
|
77
77
|
Example:
|
78
|
-
```
|
78
|
+
```
|
79
79
|
import scenario
|
80
80
|
|
81
81
|
class MyAgent:
|
@@ -105,6 +105,7 @@ def scenario_cache(ignore=[]):
|
|
105
105
|
- AgentInput objects are specially handled to exclude thread_id from caching
|
106
106
|
- Both sync and async functions are supported
|
107
107
|
"""
|
108
|
+
|
108
109
|
@wrapt.decorator
|
109
110
|
def wrapper(wrapped: Callable, instance=None, args=[], kwargs={}):
|
110
111
|
scenario: "ScenarioExecutor" = context_scenario.get()
|
scenario/config.py
CHANGED
@@ -9,6 +9,7 @@ and debugging options.
|
|
9
9
|
from typing import Optional, Union, ClassVar
|
10
10
|
from pydantic import BaseModel
|
11
11
|
|
12
|
+
|
12
13
|
class ModelConfig(BaseModel):
|
13
14
|
"""
|
14
15
|
Configuration for LLM model settings.
|
@@ -23,7 +24,7 @@ class ModelConfig(BaseModel):
|
|
23
24
|
max_tokens: Maximum number of tokens to generate in responses
|
24
25
|
|
25
26
|
Example:
|
26
|
-
```
|
27
|
+
```
|
27
28
|
model_config = ModelConfig(
|
28
29
|
model="openai/gpt-4.1-mini",
|
29
30
|
api_key="your-api-key",
|
@@ -32,6 +33,7 @@ class ModelConfig(BaseModel):
|
|
32
33
|
)
|
33
34
|
```
|
34
35
|
"""
|
36
|
+
|
35
37
|
model: str
|
36
38
|
api_key: Optional[str] = None
|
37
39
|
temperature: float = 0.0
|
@@ -54,7 +56,7 @@ class ScenarioConfig(BaseModel):
|
|
54
56
|
debug: Whether to enable debug mode with step-by-step interaction
|
55
57
|
|
56
58
|
Example:
|
57
|
-
```
|
59
|
+
```
|
58
60
|
# Configure globally for all scenarios
|
59
61
|
scenario.configure(
|
60
62
|
default_model="openai/gpt-4.1-mini",
|
@@ -106,7 +108,7 @@ class ScenarioConfig(BaseModel):
|
|
106
108
|
debug: Enable debug mode for step-by-step execution with user intervention
|
107
109
|
|
108
110
|
Example:
|
109
|
-
```
|
111
|
+
```
|
110
112
|
import scenario
|
111
113
|
|
112
114
|
# Set up default configuration
|
@@ -151,7 +153,7 @@ class ScenarioConfig(BaseModel):
|
|
151
153
|
A new ScenarioConfig instance with merged values
|
152
154
|
|
153
155
|
Example:
|
154
|
-
```
|
156
|
+
```
|
155
157
|
base_config = ScenarioConfig(max_turns=10, verbose=True)
|
156
158
|
override_config = ScenarioConfig(max_turns=20)
|
157
159
|
|
@@ -174,7 +176,7 @@ class ScenarioConfig(BaseModel):
|
|
174
176
|
Dictionary of configuration key-value pairs, excluding None values
|
175
177
|
|
176
178
|
Example:
|
177
|
-
```
|
179
|
+
```
|
178
180
|
config = ScenarioConfig(max_turns=15, verbose=True)
|
179
181
|
items = config.items()
|
180
182
|
# Result: {"max_turns": 15, "verbose": True}
|
@@ -0,0 +1,66 @@
|
|
1
|
+
"""
|
2
|
+
Scenario events module for handling event publishing, processing, and reporting.
|
3
|
+
|
4
|
+
This module provides event models, an event bus for processing, and utilities
|
5
|
+
for converting between different message formats.
|
6
|
+
"""
|
7
|
+
|
8
|
+
# Core event types and models
|
9
|
+
from .events import (
|
10
|
+
ScenarioEvent,
|
11
|
+
ScenarioRunStartedEvent,
|
12
|
+
ScenarioRunStartedEventMetadata,
|
13
|
+
ScenarioRunFinishedEvent,
|
14
|
+
ScenarioRunFinishedEventResults,
|
15
|
+
ScenarioRunFinishedEventVerdict,
|
16
|
+
ScenarioRunFinishedEventStatus,
|
17
|
+
ScenarioMessageSnapshotEvent,
|
18
|
+
MessageType,
|
19
|
+
)
|
20
|
+
|
21
|
+
# Event processing infrastructure
|
22
|
+
from .event_bus import ScenarioEventBus
|
23
|
+
from .event_reporter import EventReporter
|
24
|
+
|
25
|
+
# Message utilities and types
|
26
|
+
from .messages import (
|
27
|
+
Message,
|
28
|
+
UserMessage,
|
29
|
+
AssistantMessage,
|
30
|
+
SystemMessage,
|
31
|
+
ToolMessage,
|
32
|
+
ToolCall,
|
33
|
+
FunctionCall,
|
34
|
+
)
|
35
|
+
|
36
|
+
# Utility functions
|
37
|
+
from .utils import convert_messages_to_ag_ui_messages
|
38
|
+
|
39
|
+
__all__ = [
|
40
|
+
# Event types
|
41
|
+
"ScenarioEvent",
|
42
|
+
"ScenarioRunStartedEvent",
|
43
|
+
"ScenarioRunStartedEventMetadata",
|
44
|
+
"ScenarioRunFinishedEvent",
|
45
|
+
"ScenarioRunFinishedEventResults",
|
46
|
+
"ScenarioRunFinishedEventVerdict",
|
47
|
+
"ScenarioRunFinishedEventStatus",
|
48
|
+
"ScenarioMessageSnapshotEvent",
|
49
|
+
"MessageType",
|
50
|
+
|
51
|
+
# Event processing
|
52
|
+
"ScenarioEventBus",
|
53
|
+
"EventReporter",
|
54
|
+
|
55
|
+
# Messages
|
56
|
+
"Message",
|
57
|
+
"UserMessage",
|
58
|
+
"AssistantMessage",
|
59
|
+
"SystemMessage",
|
60
|
+
"ToolMessage",
|
61
|
+
"ToolCall",
|
62
|
+
"FunctionCall",
|
63
|
+
|
64
|
+
# Utils
|
65
|
+
"convert_messages_to_ag_ui_messages",
|
66
|
+
]
|