langwatch-scenario 0.2.0__py3-none-any.whl → 0.4.0__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
scenario/testing_agent.py DELETED
@@ -1,262 +0,0 @@
1
- """
2
- TestingAgent module: defines the testing agent that interacts with the agent under test.
3
- """
4
-
5
- import json
6
- import logging
7
- import re
8
- from typing import TYPE_CHECKING, Dict, List, Any, Optional, Union, cast
9
- from pydantic import BaseModel
10
-
11
- from litellm import Choices, completion
12
- from litellm.files.main import ModelResponse
13
-
14
- from scenario.cache import scenario_cache
15
- from scenario.utils import safe_attr_or_key
16
-
17
- from .result import ScenarioResult
18
-
19
- if TYPE_CHECKING:
20
- from scenario.scenario import Scenario
21
-
22
-
23
- logger = logging.getLogger("scenario")
24
-
25
-
26
- class TestingAgent(BaseModel):
27
- """
28
- The Testing Agent that interacts with the agent under test.
29
-
30
- This agent is responsible for:
31
- 1. Generating messages to send to the agent based on the scenario
32
- 2. Evaluating the responses from the agent against the success/failure criteria
33
- 3. Determining when to end the test and return a result
34
- """
35
-
36
- model: str
37
- api_key: Optional[str] = None
38
- temperature: float = 0.0
39
- max_tokens: Optional[int] = None
40
-
41
- # To prevent pytest from thinking this is actually a test class
42
- __test__ = False
43
-
44
- @scenario_cache(ignore=["scenario"])
45
- def generate_next_message(
46
- self,
47
- scenario: "Scenario",
48
- conversation: List[Dict[str, Any]],
49
- first_message: bool = False,
50
- last_message: bool = False,
51
- ) -> Union[str, ScenarioResult]:
52
- """
53
- Generate the next message in the conversation based on history OR
54
- return a ScenarioResult if the test should conclude.
55
-
56
- Returns either:
57
- - A string message to send to the agent (if conversation should continue)
58
- - A ScenarioResult (if the test should conclude)
59
- """
60
-
61
- messages = [
62
- {
63
- "role": "system",
64
- "content": f"""
65
- <role>
66
- You are pretending to be a user, you are testing an AI Agent (shown as the user role) based on a scenario.
67
- Approach this naturally, as a human user would, with very short inputs, few words, all lowercase, imperative, not periods, like when they google or talk to chatgpt.
68
- </role>
69
-
70
- <goal>
71
- Your goal (assistant) is to interact with the Agent Under Test (user) as if you were a human user to see if it can complete the scenario successfully.
72
- </goal>
73
-
74
- <scenario>
75
- {scenario.description}
76
- </scenario>
77
-
78
- <criteria>
79
- {"\n".join([f"{idx + 1}. {criterion}" for idx, criterion in enumerate(scenario.criteria)])}
80
- </criteria>
81
-
82
- <execution_flow>
83
- 1. Generate the first message to start the scenario
84
- 2. After the Agent Under Test (user) responds, generate the next message to send to the Agent Under Test, keep repeating step 2 until criterias match
85
- 3. If the test should end, use the finish_test tool to determine if all the criteria have been met
86
- </execution_flow>
87
-
88
- <rules>
89
- 1. Test should end immediately if a criteria mentioning something the agent should NOT do is met
90
- 2. Test should continue until all scenario goals have been met to try going through all the criteria
91
- 3. DO NOT make any judgment calls that are not explicitly listed in the success or failure criteria, withhold judgement if necessary
92
- 4. DO NOT carry over any requests yourself, YOU ARE NOT the assistant today, wait for the user to do it
93
- </rules>
94
- """,
95
- },
96
- {"role": "assistant", "content": "Hello, how can I help you today?"},
97
- *conversation,
98
- ]
99
-
100
- if last_message:
101
- messages.append(
102
- {
103
- "role": "user",
104
- "content": """
105
- System:
106
-
107
- <finish_test>
108
- This is the last message, conversation has reached the maximum number of turns, give your final verdict,
109
- if you don't have enough information to make a verdict, say inconclusive with max turns reached.
110
- </finish_test>
111
- """,
112
- }
113
- )
114
-
115
- # User to assistant role reversal
116
- # LLM models are biased to always be the assistant not the user, so we need to do this reversal otherwise models like GPT 4.5 is
117
- # super confused, and Claude 3.7 even starts throwing exceptions.
118
- for message in messages:
119
- # Can't reverse tool calls
120
- if not safe_attr_or_key(message, "content") or safe_attr_or_key(
121
- message, "tool_calls"
122
- ):
123
- continue
124
-
125
- if type(message) == dict:
126
- if message["role"] == "user":
127
- message["role"] = "assistant"
128
- elif message["role"] == "assistant":
129
- message["role"] = "user"
130
- else:
131
- if getattr(message, "role", None) == "user":
132
- message.role = "assistant"
133
- elif getattr(message, "role", None) == "assistant":
134
- message.role = "user"
135
-
136
- # Define the tool
137
- criteria_names = [
138
- re.sub(
139
- r"[^a-zA-Z0-9]",
140
- "_",
141
- criterion.replace(" ", "_").replace("'", "").lower(),
142
- )[:70]
143
- for criterion in scenario.criteria
144
- ]
145
- tools = [
146
- {
147
- "type": "function",
148
- "function": {
149
- "name": "finish_test",
150
- "description": "Complete the test with a final verdict",
151
- "strict": True,
152
- "parameters": {
153
- "type": "object",
154
- "properties": {
155
- "criteria": {
156
- "type": "object",
157
- "properties": {
158
- criteria_names[idx]: {
159
- "enum": [True, False, "inconclusive"],
160
- "description": criterion,
161
- }
162
- for idx, criterion in enumerate(scenario.criteria)
163
- },
164
- "required": criteria_names,
165
- "additionalProperties": False,
166
- "description": "Strict verdict for each criterion",
167
- },
168
- "reasoning": {
169
- "type": "string",
170
- "description": "Explanation of what the final verdict should be",
171
- },
172
- "verdict": {
173
- "type": "string",
174
- "enum": ["success", "failure", "inconclusive"],
175
- "description": "The final verdict of the test",
176
- },
177
- },
178
- "required": ["criteria", "reasoning", "verdict"],
179
- "additionalProperties": False,
180
- },
181
- },
182
- }
183
- ]
184
-
185
- response = cast(
186
- ModelResponse,
187
- completion(
188
- model=self.model,
189
- messages=messages,
190
- temperature=self.temperature,
191
- max_tokens=self.max_tokens,
192
- tools=tools if not first_message else None,
193
- tool_choice="required" if last_message else None,
194
- ),
195
- )
196
-
197
- # Extract the content from the response
198
- if hasattr(response, "choices") and len(response.choices) > 0:
199
- message = cast(Choices, response.choices[0]).message
200
-
201
- # Check if the LLM chose to use the tool
202
- if message.tool_calls:
203
- tool_call = message.tool_calls[0]
204
- if tool_call.function.name == "finish_test":
205
- # Parse the tool call arguments
206
- try:
207
- args = json.loads(tool_call.function.arguments)
208
- verdict = args.get("verdict", "inconclusive")
209
- reasoning = args.get("reasoning", "No reasoning provided")
210
- criteria = args.get("criteria", {})
211
-
212
- passed_criteria = [
213
- scenario.criteria[idx]
214
- for idx, criterion in enumerate(criteria.values())
215
- if criterion == True
216
- ]
217
- failed_criteria = [
218
- scenario.criteria[idx]
219
- for idx, criterion in enumerate(criteria.values())
220
- if criterion == False
221
- ]
222
-
223
- # Return the appropriate ScenarioResult based on the verdict
224
- if verdict == "success":
225
- return ScenarioResult.success_result(
226
- conversation=conversation,
227
- reasoning=reasoning,
228
- passed_criteria=passed_criteria,
229
- )
230
- elif verdict == "failure":
231
- return ScenarioResult.failure_result(
232
- conversation=conversation,
233
- reasoning=reasoning,
234
- passed_criteria=passed_criteria,
235
- failed_criteria=failed_criteria,
236
- )
237
- else: # inconclusive
238
- return ScenarioResult(
239
- success=False,
240
- conversation=conversation,
241
- reasoning=reasoning,
242
- passed_criteria=passed_criteria,
243
- failed_criteria=failed_criteria,
244
- )
245
- except json.JSONDecodeError:
246
- logger.error("Failed to parse tool call arguments")
247
-
248
- # If no tool call use the message content as next message
249
- message_content = message.content
250
- if message_content is None:
251
- # If invalid tool call, raise an error
252
- if message.tool_calls:
253
- raise Exception(
254
- f"Invalid tool call from testing agent: {message.tool_calls.__repr__()}"
255
- )
256
- raise Exception(f"No response from LLM: {response.__repr__()}")
257
-
258
- return message_content
259
- else:
260
- raise Exception(
261
- f"Unexpected response format from LLM: {response.__repr__()}"
262
- )