langwatch-scenario 0.7.10__py3-none-any.whl → 0.7.12__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- {langwatch_scenario-0.7.10.dist-info → langwatch_scenario-0.7.12.dist-info}/METADATA +1 -1
- {langwatch_scenario-0.7.10.dist-info → langwatch_scenario-0.7.12.dist-info}/RECORD +10 -10
- scenario/_events/event_alert_message_logger.py +1 -1
- scenario/config/model.py +25 -1
- scenario/config/scenario.py +1 -1
- scenario/judge_agent.py +22 -2
- scenario/user_simulator_agent.py +22 -2
- {langwatch_scenario-0.7.10.dist-info → langwatch_scenario-0.7.12.dist-info}/WHEEL +0 -0
- {langwatch_scenario-0.7.10.dist-info → langwatch_scenario-0.7.12.dist-info}/entry_points.txt +0 -0
- {langwatch_scenario-0.7.10.dist-info → langwatch_scenario-0.7.12.dist-info}/top_level.txt +0 -0
@@ -2,16 +2,16 @@ scenario/__init__.py,sha256=4WO8TjY8Lc0NhYL7b9LvaB1xCBqwUkLuI0uIA6PQP6c,4223
|
|
2
2
|
scenario/_error_messages.py,sha256=QVFSbhzsVNGz2GOBOaoQFW6w6AOyZCWLTt0ySWPfnGw,3882
|
3
3
|
scenario/agent_adapter.py,sha256=PoY2KQqYuqzIIb3-nhIU-MPXwHJc1vmwdweMy7ut-hk,4255
|
4
4
|
scenario/cache.py,sha256=J6s6Sia_Ce6TrnsInlhfxm6SF8tygo3sH-_cQCRX1WA,6213
|
5
|
-
scenario/judge_agent.py,sha256=
|
5
|
+
scenario/judge_agent.py,sha256=PiOgrHHPnwvCbIzEFEFdUH4NI9waLnvNfHNZwMSpkrU,17640
|
6
6
|
scenario/py.typed,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
|
7
7
|
scenario/pytest_plugin.py,sha256=wRCuGD9uwrrLt2fY15zK6mnmY9W_dO_m0WalPJYE5II,11491
|
8
8
|
scenario/scenario_executor.py,sha256=v41UgSHebosXf95FfYIeVUm6s4IbMP_U58FdGoZ_kZU,35653
|
9
9
|
scenario/scenario_state.py,sha256=R8PhPHW3obYo3DCjBH5XDdZ6bp4uol7wCXO8K2Tz30I,7101
|
10
10
|
scenario/script.py,sha256=A0N5pP0l4FFn1xdKc78U_wkwWhEWH3EFeU_LRDtNyEI,12241
|
11
11
|
scenario/types.py,sha256=CRSCHUplXEXhj6EYQsncwJBzbd2128YTGlFxlk-rrG8,11193
|
12
|
-
scenario/user_simulator_agent.py,sha256=
|
12
|
+
scenario/user_simulator_agent.py,sha256=nBo_i2UKX_Vn_sR1mZIJHrYewhhJfE6tAxkijxW8cmY,10499
|
13
13
|
scenario/_events/__init__.py,sha256=4cj6H9zuXzvWhT2P2JNdjWzeF1PUepTjqIDw85Vid9s,1500
|
14
|
-
scenario/_events/event_alert_message_logger.py,sha256=
|
14
|
+
scenario/_events/event_alert_message_logger.py,sha256=4zf9DV69ZkjHdLCyj7mgXdzpMy21YZLwXaVd8EB6AwY,2995
|
15
15
|
scenario/_events/event_bus.py,sha256=IsKNsClF1JFYj728EcxX1hw_KbfDkfJq3Y2Kv4h94n4,9871
|
16
16
|
scenario/_events/event_reporter.py,sha256=-6NNbBMy_FYr1O-1FuZ6eIUnLuI8NGRMUr0pybLJrCI,3873
|
17
17
|
scenario/_events/events.py,sha256=UtEGY-_1B0LrwpgsNKgrvJBZhRtxuj3K_i6ZBfF7E4Q,6387
|
@@ -233,10 +233,10 @@ scenario/_utils/message_conversion.py,sha256=AWHn31E7J0mz9sBXWruVVAgtsrJz1R_xEf-
|
|
233
233
|
scenario/_utils/utils.py,sha256=msQgUWaLh3U9jIIHmxkEbOaklga63AF0KJzsaKa_mZc,14008
|
234
234
|
scenario/config/__init__.py,sha256=b2X_bqkIrd7jZY9dRrXk2wOqoPe87Nl_SRGuZhlolxA,1123
|
235
235
|
scenario/config/langwatch.py,sha256=ijWchFbUsLbQooAZmwyTw4rxfRLQseZ1GoVSiPPbzpw,1677
|
236
|
-
scenario/config/model.py,sha256=
|
237
|
-
scenario/config/scenario.py,sha256=
|
238
|
-
langwatch_scenario-0.7.
|
239
|
-
langwatch_scenario-0.7.
|
240
|
-
langwatch_scenario-0.7.
|
241
|
-
langwatch_scenario-0.7.
|
242
|
-
langwatch_scenario-0.7.
|
236
|
+
scenario/config/model.py,sha256=VQtPkG-O1gVjtMIKbMOrbcnySv8pqODokC_VOaKVS4g,2054
|
237
|
+
scenario/config/scenario.py,sha256=tUnzFBtuhPimU4EyaXm9T6KlpfntGtQaH5teW2F9oGk,5426
|
238
|
+
langwatch_scenario-0.7.12.dist-info/METADATA,sha256=bnDVB7CRXwJxTmW5Rhu8lZ_CoqEVB881wIK5U0LD-kI,20065
|
239
|
+
langwatch_scenario-0.7.12.dist-info/WHEEL,sha256=_zCd3N1l69ArxyTb8rzEoP9TpbYXkqRFSNOD5OuxnTs,91
|
240
|
+
langwatch_scenario-0.7.12.dist-info/entry_points.txt,sha256=WlEnJ_gku0i18bIa3DSuGqXRX-QDQLe_s0YmRzK45TI,45
|
241
|
+
langwatch_scenario-0.7.12.dist-info/top_level.txt,sha256=45Mn28aedJsetnBMB5xSmrJ-yo701QLH89Zlz4r1clE,9
|
242
|
+
langwatch_scenario-0.7.12.dist-info/RECORD,,
|
@@ -77,7 +77,7 @@ class EventAlertMessageLogger:
|
|
77
77
|
print(f"Follow it live: {batch_url}")
|
78
78
|
print(f"{separator}\n")
|
79
79
|
|
80
|
-
config = ScenarioConfig.default_config
|
80
|
+
config = ScenarioConfig.default_config or ScenarioConfig()
|
81
81
|
if config and not config.headless:
|
82
82
|
# Open the URL in the default browser (cross-platform)
|
83
83
|
try:
|
scenario/config/model.py
CHANGED
@@ -6,7 +6,7 @@ user simulator and judge agents in the Scenario framework.
|
|
6
6
|
"""
|
7
7
|
|
8
8
|
from typing import Optional
|
9
|
-
from pydantic import BaseModel
|
9
|
+
from pydantic import BaseModel, ConfigDict
|
10
10
|
|
11
11
|
|
12
12
|
class ModelConfig(BaseModel):
|
@@ -16,6 +16,9 @@ class ModelConfig(BaseModel):
|
|
16
16
|
This class encapsulates all the parameters needed to configure an LLM model
|
17
17
|
for use with user simulator and judge agents in the Scenario framework.
|
18
18
|
|
19
|
+
The ModelConfig accepts any additional parameters that litellm supports,
|
20
|
+
including headers, timeout, client, and other provider-specific options.
|
21
|
+
|
19
22
|
Attributes:
|
20
23
|
model: The model identifier (e.g., "openai/gpt-4.1", "anthropic/claude-3-sonnet")
|
21
24
|
api_base: Optional base URL where the model is hosted
|
@@ -25,6 +28,7 @@ class ModelConfig(BaseModel):
|
|
25
28
|
|
26
29
|
Example:
|
27
30
|
```
|
31
|
+
# Basic configuration
|
28
32
|
model_config = ModelConfig(
|
29
33
|
model="openai/gpt-4.1",
|
30
34
|
api_base="https://api.openai.com/v1",
|
@@ -32,9 +36,29 @@ class ModelConfig(BaseModel):
|
|
32
36
|
temperature=0.1,
|
33
37
|
max_tokens=1000
|
34
38
|
)
|
39
|
+
|
40
|
+
# With custom headers and timeout
|
41
|
+
model_config = ModelConfig(
|
42
|
+
model="openai/gpt-4",
|
43
|
+
headers={"X-Custom-Header": "value"},
|
44
|
+
timeout=60,
|
45
|
+
num_retries=3
|
46
|
+
)
|
47
|
+
|
48
|
+
# With custom OpenAI client
|
49
|
+
from openai import OpenAI
|
50
|
+
model_config = ModelConfig(
|
51
|
+
model="openai/gpt-4",
|
52
|
+
client=OpenAI(
|
53
|
+
base_url="https://custom.com",
|
54
|
+
default_headers={"X-Auth": "token"}
|
55
|
+
)
|
56
|
+
)
|
35
57
|
```
|
36
58
|
"""
|
37
59
|
|
60
|
+
model_config = ConfigDict(extra="allow")
|
61
|
+
|
38
62
|
model: str
|
39
63
|
api_base: Optional[str] = None
|
40
64
|
api_key: Optional[str] = None
|
scenario/config/scenario.py
CHANGED
@@ -65,7 +65,7 @@ class ScenarioConfig(BaseModel):
|
|
65
65
|
@classmethod
|
66
66
|
def configure(
|
67
67
|
cls,
|
68
|
-
default_model: Optional[str] = None,
|
68
|
+
default_model: Optional[Union[str, ModelConfig]] = None,
|
69
69
|
max_turns: Optional[int] = None,
|
70
70
|
verbose: Optional[Union[bool, int]] = None,
|
71
71
|
cache_key: Optional[str] = None,
|
scenario/judge_agent.py
CHANGED
@@ -105,6 +105,7 @@ class JudgeAgent(AgentAdapter):
|
|
105
105
|
max_tokens: Optional[int]
|
106
106
|
criteria: List[str]
|
107
107
|
system_prompt: Optional[str]
|
108
|
+
_extra_params: dict
|
108
109
|
|
109
110
|
def __init__(
|
110
111
|
self,
|
@@ -116,6 +117,7 @@ class JudgeAgent(AgentAdapter):
|
|
116
117
|
temperature: float = 0.0,
|
117
118
|
max_tokens: Optional[int] = None,
|
118
119
|
system_prompt: Optional[str] = None,
|
120
|
+
**extra_params,
|
119
121
|
):
|
120
122
|
"""
|
121
123
|
Initialize a judge agent with evaluation criteria.
|
@@ -159,8 +161,12 @@ class JudgeAgent(AgentAdapter):
|
|
159
161
|
system_prompt="You are a senior software engineer reviewing code for production use."
|
160
162
|
)
|
161
163
|
```
|
164
|
+
|
165
|
+
Note:
|
166
|
+
Advanced usage: Additional parameters can be passed as keyword arguments
|
167
|
+
(e.g., headers, timeout, client) for specialized configurations. These are
|
168
|
+
experimental and may not be supported in future versions.
|
162
169
|
"""
|
163
|
-
# Override the default system prompt for the judge agent
|
164
170
|
self.criteria = criteria or []
|
165
171
|
self.api_base = api_base
|
166
172
|
self.api_key = api_key
|
@@ -191,9 +197,22 @@ class JudgeAgent(AgentAdapter):
|
|
191
197
|
self.max_tokens = (
|
192
198
|
max_tokens or ScenarioConfig.default_config.default_model.max_tokens
|
193
199
|
)
|
200
|
+
# Extract extra params from ModelConfig
|
201
|
+
config_dict = ScenarioConfig.default_config.default_model.model_dump(
|
202
|
+
exclude_none=True
|
203
|
+
)
|
204
|
+
config_dict.pop("model", None)
|
205
|
+
config_dict.pop("api_base", None)
|
206
|
+
config_dict.pop("api_key", None)
|
207
|
+
config_dict.pop("temperature", None)
|
208
|
+
config_dict.pop("max_tokens", None)
|
209
|
+
# Merge: config extras < agent extra_params
|
210
|
+
self._extra_params = {**config_dict, **extra_params}
|
211
|
+
else:
|
212
|
+
self._extra_params = extra_params
|
194
213
|
|
195
214
|
if not hasattr(self, "model"):
|
196
|
-
raise Exception(agent_not_configured_error_message("
|
215
|
+
raise Exception(agent_not_configured_error_message("JudgeAgent"))
|
197
216
|
|
198
217
|
@scenario_cache()
|
199
218
|
async def call(
|
@@ -370,6 +389,7 @@ if you don't have enough information to make a verdict, say inconclusive with ma
|
|
370
389
|
if (is_last_message or enforce_judgment) and has_criteria
|
371
390
|
else "required"
|
372
391
|
),
|
392
|
+
**self._extra_params,
|
373
393
|
),
|
374
394
|
)
|
375
395
|
|
scenario/user_simulator_agent.py
CHANGED
@@ -87,6 +87,7 @@ class UserSimulatorAgent(AgentAdapter):
|
|
87
87
|
temperature: float
|
88
88
|
max_tokens: Optional[int]
|
89
89
|
system_prompt: Optional[str]
|
90
|
+
_extra_params: dict
|
90
91
|
|
91
92
|
def __init__(
|
92
93
|
self,
|
@@ -97,6 +98,7 @@ class UserSimulatorAgent(AgentAdapter):
|
|
97
98
|
temperature: float = 0.0,
|
98
99
|
max_tokens: Optional[int] = None,
|
99
100
|
system_prompt: Optional[str] = None,
|
101
|
+
**extra_params,
|
100
102
|
):
|
101
103
|
"""
|
102
104
|
Initialize a user simulator agent.
|
@@ -133,8 +135,12 @@ class UserSimulatorAgent(AgentAdapter):
|
|
133
135
|
'''
|
134
136
|
)
|
135
137
|
```
|
138
|
+
|
139
|
+
Note:
|
140
|
+
Advanced usage: Additional parameters can be passed as keyword arguments
|
141
|
+
(e.g., headers, timeout, client) for specialized configurations. These are
|
142
|
+
experimental and may not be supported in future versions.
|
136
143
|
"""
|
137
|
-
# Override the default system prompt for the user simulator agent
|
138
144
|
self.api_base = api_base
|
139
145
|
self.api_key = api_key
|
140
146
|
self.temperature = temperature
|
@@ -164,9 +170,22 @@ class UserSimulatorAgent(AgentAdapter):
|
|
164
170
|
self.max_tokens = (
|
165
171
|
max_tokens or ScenarioConfig.default_config.default_model.max_tokens
|
166
172
|
)
|
173
|
+
# Extract extra params from ModelConfig
|
174
|
+
config_dict = ScenarioConfig.default_config.default_model.model_dump(
|
175
|
+
exclude_none=True
|
176
|
+
)
|
177
|
+
config_dict.pop("model", None)
|
178
|
+
config_dict.pop("api_base", None)
|
179
|
+
config_dict.pop("api_key", None)
|
180
|
+
config_dict.pop("temperature", None)
|
181
|
+
config_dict.pop("max_tokens", None)
|
182
|
+
# Merge: config extras < agent extra_params
|
183
|
+
self._extra_params = {**config_dict, **extra_params}
|
184
|
+
else:
|
185
|
+
self._extra_params = extra_params
|
167
186
|
|
168
187
|
if not hasattr(self, "model"):
|
169
|
-
raise Exception(agent_not_configured_error_message("
|
188
|
+
raise Exception(agent_not_configured_error_message("UserSimulatorAgent"))
|
170
189
|
|
171
190
|
@scenario_cache()
|
172
191
|
async def call(
|
@@ -237,6 +256,7 @@ Your goal (assistant) is to interact with the Agent Under Test (user) as if you
|
|
237
256
|
api_base=self.api_base,
|
238
257
|
max_tokens=self.max_tokens,
|
239
258
|
tools=[],
|
259
|
+
**self._extra_params,
|
240
260
|
),
|
241
261
|
)
|
242
262
|
|
File without changes
|
{langwatch_scenario-0.7.10.dist-info → langwatch_scenario-0.7.12.dist-info}/entry_points.txt
RENAMED
File without changes
|
File without changes
|