signalwire-agents 0.1.1__py3-none-any.whl → 0.1.5__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- signalwire_agents/__init__.py +1 -1
- signalwire_agents/agent_server.py +1 -1
- signalwire_agents/core/__init__.py +29 -0
- signalwire_agents/core/agent_base.py +2541 -0
- signalwire_agents/core/function_result.py +123 -0
- signalwire_agents/core/pom_builder.py +204 -0
- signalwire_agents/core/security/__init__.py +9 -0
- signalwire_agents/core/security/session_manager.py +179 -0
- signalwire_agents/core/state/__init__.py +17 -0
- signalwire_agents/core/state/file_state_manager.py +219 -0
- signalwire_agents/core/state/state_manager.py +101 -0
- signalwire_agents/core/swaig_function.py +172 -0
- signalwire_agents/core/swml_builder.py +214 -0
- signalwire_agents/core/swml_handler.py +227 -0
- signalwire_agents/core/swml_renderer.py +368 -0
- signalwire_agents/core/swml_service.py +1057 -0
- signalwire_agents/prefabs/__init__.py +26 -0
- signalwire_agents/prefabs/concierge.py +267 -0
- signalwire_agents/prefabs/faq_bot.py +305 -0
- signalwire_agents/prefabs/info_gatherer.py +263 -0
- signalwire_agents/prefabs/receptionist.py +295 -0
- signalwire_agents/prefabs/survey.py +378 -0
- signalwire_agents/utils/__init__.py +9 -0
- signalwire_agents/utils/pom_utils.py +9 -0
- signalwire_agents/utils/schema_utils.py +357 -0
- signalwire_agents/utils/token_generators.py +9 -0
- signalwire_agents/utils/validators.py +9 -0
- {signalwire_agents-0.1.1.dist-info → signalwire_agents-0.1.5.dist-info}/METADATA +1 -1
- signalwire_agents-0.1.5.dist-info/RECORD +34 -0
- signalwire_agents-0.1.1.dist-info/RECORD +0 -9
- {signalwire_agents-0.1.1.data → signalwire_agents-0.1.5.data}/data/schema.json +0 -0
- {signalwire_agents-0.1.1.dist-info → signalwire_agents-0.1.5.dist-info}/WHEEL +0 -0
- {signalwire_agents-0.1.1.dist-info → signalwire_agents-0.1.5.dist-info}/licenses/LICENSE +0 -0
- {signalwire_agents-0.1.1.dist-info → signalwire_agents-0.1.5.dist-info}/top_level.txt +0 -0
@@ -0,0 +1,378 @@
|
|
1
|
+
"""
|
2
|
+
Copyright (c) 2025 SignalWire
|
3
|
+
|
4
|
+
This file is part of the SignalWire AI Agents SDK.
|
5
|
+
|
6
|
+
Licensed under the MIT License.
|
7
|
+
See LICENSE file in the project root for full license information.
|
8
|
+
"""
|
9
|
+
|
10
|
+
"""
|
11
|
+
SurveyAgent - Prefab agent for conducting automated surveys
|
12
|
+
"""
|
13
|
+
|
14
|
+
from typing import List, Dict, Any, Optional, Union
|
15
|
+
import json
|
16
|
+
import os
|
17
|
+
from datetime import datetime
|
18
|
+
|
19
|
+
from signalwire_agents.core.agent_base import AgentBase
|
20
|
+
from signalwire_agents.core.function_result import SwaigFunctionResult
|
21
|
+
|
22
|
+
|
23
|
+
class SurveyAgent(AgentBase):
|
24
|
+
"""
|
25
|
+
A prefab agent designed to conduct automated surveys with users.
|
26
|
+
|
27
|
+
This agent will:
|
28
|
+
1. Introduce the survey purpose and structure
|
29
|
+
2. Ask predefined questions in sequence
|
30
|
+
3. Collect and validate responses
|
31
|
+
4. Provide a summary of collected responses
|
32
|
+
|
33
|
+
Example:
|
34
|
+
agent = SurveyAgent(
|
35
|
+
survey_name="Customer Satisfaction Survey",
|
36
|
+
introduction="We'd like to get your feedback on your recent experience.",
|
37
|
+
questions=[
|
38
|
+
{
|
39
|
+
"id": "satisfaction",
|
40
|
+
"text": "How satisfied were you with our service?",
|
41
|
+
"type": "rating",
|
42
|
+
"scale": 5,
|
43
|
+
"required": True
|
44
|
+
},
|
45
|
+
{
|
46
|
+
"id": "comments",
|
47
|
+
"text": "Do you have any additional comments?",
|
48
|
+
"type": "open_ended",
|
49
|
+
"required": False
|
50
|
+
}
|
51
|
+
]
|
52
|
+
)
|
53
|
+
"""
|
54
|
+
|
55
|
+
def __init__(
|
56
|
+
self,
|
57
|
+
survey_name: str,
|
58
|
+
questions: List[Dict[str, Any]],
|
59
|
+
introduction: Optional[str] = None,
|
60
|
+
conclusion: Optional[str] = None,
|
61
|
+
brand_name: Optional[str] = None,
|
62
|
+
max_retries: int = 2,
|
63
|
+
**kwargs
|
64
|
+
):
|
65
|
+
"""
|
66
|
+
Initialize a survey agent
|
67
|
+
|
68
|
+
Args:
|
69
|
+
survey_name: Name of the survey
|
70
|
+
questions: List of question objects with the following keys:
|
71
|
+
- id: Unique identifier for the question
|
72
|
+
- text: The question text to ask
|
73
|
+
- type: Type of question (rating, multiple_choice, yes_no, open_ended)
|
74
|
+
- options: List of options for multiple_choice questions
|
75
|
+
- scale: For rating questions, the scale (e.g., 1-5)
|
76
|
+
- required: Whether the question requires an answer
|
77
|
+
introduction: Optional custom introduction message
|
78
|
+
conclusion: Optional custom conclusion message
|
79
|
+
brand_name: Optional brand or company name
|
80
|
+
max_retries: Maximum number of times to retry invalid answers
|
81
|
+
**kwargs: Additional arguments for AgentBase
|
82
|
+
"""
|
83
|
+
# Initialize the base agent
|
84
|
+
super().__init__(
|
85
|
+
name="survey",
|
86
|
+
route="/survey",
|
87
|
+
use_pom=True,
|
88
|
+
**kwargs
|
89
|
+
)
|
90
|
+
|
91
|
+
# Store configuration
|
92
|
+
self.survey_name = survey_name
|
93
|
+
self.questions = questions
|
94
|
+
self.brand_name = brand_name or "Our Company"
|
95
|
+
self.max_retries = max_retries
|
96
|
+
|
97
|
+
# Default messages if not provided
|
98
|
+
self.introduction = introduction or f"Welcome to our {survey_name}. We appreciate your participation."
|
99
|
+
self.conclusion = conclusion or "Thank you for completing our survey. Your feedback is valuable to us."
|
100
|
+
|
101
|
+
# Validate questions
|
102
|
+
self._validate_questions()
|
103
|
+
|
104
|
+
# Set up the agent's configuration
|
105
|
+
self._setup_survey_agent()
|
106
|
+
|
107
|
+
def _validate_questions(self):
|
108
|
+
"""Validate the question format and structure"""
|
109
|
+
valid_types = ["rating", "multiple_choice", "yes_no", "open_ended"]
|
110
|
+
|
111
|
+
for i, question in enumerate(self.questions):
|
112
|
+
# Ensure required fields are present
|
113
|
+
if "id" not in question or not question["id"]:
|
114
|
+
question["id"] = f"question_{i+1}"
|
115
|
+
|
116
|
+
if "text" not in question or not question["text"]:
|
117
|
+
raise ValueError(f"Question {i+1} is missing the 'text' field")
|
118
|
+
|
119
|
+
if "type" not in question or question["type"] not in valid_types:
|
120
|
+
raise ValueError(f"Question {i+1} has an invalid type. Must be one of: {', '.join(valid_types)}")
|
121
|
+
|
122
|
+
# Set defaults for optional fields
|
123
|
+
if "required" not in question:
|
124
|
+
question["required"] = True
|
125
|
+
|
126
|
+
# Type-specific validation
|
127
|
+
if question["type"] == "multiple_choice" and ("options" not in question or not question["options"]):
|
128
|
+
raise ValueError(f"Multiple choice question '{question['id']}' must have options")
|
129
|
+
|
130
|
+
if question["type"] == "rating" and "scale" not in question:
|
131
|
+
question["scale"] = 5 # Default to 5-point scale
|
132
|
+
|
133
|
+
def _setup_survey_agent(self):
|
134
|
+
"""Configure the survey agent with appropriate settings"""
|
135
|
+
# Basic personality and instructions
|
136
|
+
self.prompt_add_section("Personality",
|
137
|
+
body=f"You are a friendly and professional survey agent representing {self.brand_name}."
|
138
|
+
)
|
139
|
+
|
140
|
+
self.prompt_add_section("Goal",
|
141
|
+
body=f"Conduct the '{self.survey_name}' survey by asking questions and collecting responses."
|
142
|
+
)
|
143
|
+
|
144
|
+
# Build detailed instructions
|
145
|
+
instructions = [
|
146
|
+
"Guide the user through each survey question in sequence.",
|
147
|
+
"Ask only one question at a time and wait for a response.",
|
148
|
+
"For rating questions, explain the scale (e.g., 1-5, where 5 is best).",
|
149
|
+
"For multiple choice questions, list all the options.",
|
150
|
+
f"If a response is invalid, explain and retry up to {self.max_retries} times.",
|
151
|
+
"Be conversational but stay focused on collecting the survey data.",
|
152
|
+
"After all questions are answered, thank the user for their participation."
|
153
|
+
]
|
154
|
+
|
155
|
+
self.prompt_add_section("Instructions", bullets=instructions)
|
156
|
+
|
157
|
+
# Add introduction section
|
158
|
+
self.prompt_add_section("Introduction",
|
159
|
+
body=f"Begin with this introduction: {self.introduction}"
|
160
|
+
)
|
161
|
+
|
162
|
+
# Questions section with all the survey questions
|
163
|
+
questions_subsections = []
|
164
|
+
for q in self.questions:
|
165
|
+
# Build a description based on question type
|
166
|
+
description = f"ID: {q['id']}\nType: {q['type']}\nRequired: {q['required']}"
|
167
|
+
|
168
|
+
if q["type"] == "rating":
|
169
|
+
description += f"\nScale: 1-{q['scale']}"
|
170
|
+
|
171
|
+
if q["type"] == "multiple_choice" and "options" in q:
|
172
|
+
options_list = ", ".join(q["options"])
|
173
|
+
description += f"\nOptions: {options_list}"
|
174
|
+
|
175
|
+
questions_subsections.append({
|
176
|
+
"title": q["text"],
|
177
|
+
"body": description
|
178
|
+
})
|
179
|
+
|
180
|
+
self.prompt_add_section("Survey Questions",
|
181
|
+
body="Ask these questions in order:",
|
182
|
+
subsections=questions_subsections
|
183
|
+
)
|
184
|
+
|
185
|
+
# Add conclusion section
|
186
|
+
self.prompt_add_section("Conclusion",
|
187
|
+
body=f"End with this conclusion: {self.conclusion}"
|
188
|
+
)
|
189
|
+
|
190
|
+
# Set up the post-prompt for summary
|
191
|
+
post_prompt = """
|
192
|
+
Return a JSON summary of the survey responses:
|
193
|
+
{
|
194
|
+
"survey_name": "SURVEY_NAME",
|
195
|
+
"responses": {
|
196
|
+
"QUESTION_ID_1": "RESPONSE_1",
|
197
|
+
"QUESTION_ID_2": "RESPONSE_2",
|
198
|
+
...
|
199
|
+
},
|
200
|
+
"completion_status": "complete/incomplete",
|
201
|
+
"timestamp": "CURRENT_TIMESTAMP"
|
202
|
+
}
|
203
|
+
"""
|
204
|
+
self.set_post_prompt(post_prompt)
|
205
|
+
|
206
|
+
# Configure hints to help the AI understand survey terminology
|
207
|
+
type_terms = []
|
208
|
+
for q in self.questions:
|
209
|
+
if q["type"] == "rating":
|
210
|
+
type_terms.extend([str(i) for i in range(1, q["scale"]+1)])
|
211
|
+
elif q["type"] == "multiple_choice" and "options" in q:
|
212
|
+
type_terms.extend(q["options"])
|
213
|
+
elif q["type"] == "yes_no":
|
214
|
+
type_terms.extend(["yes", "no"])
|
215
|
+
|
216
|
+
self.add_hints([
|
217
|
+
self.survey_name,
|
218
|
+
self.brand_name,
|
219
|
+
*type_terms
|
220
|
+
])
|
221
|
+
|
222
|
+
# Set AI behavior parameters
|
223
|
+
self.set_params({
|
224
|
+
"wait_for_user": False,
|
225
|
+
"end_of_speech_timeout": 1500, # Longer timeout for thoughtful responses
|
226
|
+
"ai_volume": 5,
|
227
|
+
"static_greeting": self.introduction,
|
228
|
+
"static_greeting_no_barge": True
|
229
|
+
})
|
230
|
+
|
231
|
+
# Add global data available to the AI
|
232
|
+
self.set_global_data({
|
233
|
+
"survey_name": self.survey_name,
|
234
|
+
"brand_name": self.brand_name,
|
235
|
+
"questions": self.questions,
|
236
|
+
"max_retries": self.max_retries
|
237
|
+
})
|
238
|
+
|
239
|
+
# Configure native functions
|
240
|
+
self.set_native_functions(["check_time"])
|
241
|
+
|
242
|
+
@AgentBase.tool(
|
243
|
+
name="validate_response",
|
244
|
+
description="Validate if a response meets the requirements for a specific question",
|
245
|
+
parameters={
|
246
|
+
"question_id": {
|
247
|
+
"type": "string",
|
248
|
+
"description": "The ID of the question"
|
249
|
+
},
|
250
|
+
"response": {
|
251
|
+
"type": "string",
|
252
|
+
"description": "The user's response to validate"
|
253
|
+
}
|
254
|
+
}
|
255
|
+
)
|
256
|
+
def validate_response(self, args, raw_data):
|
257
|
+
"""
|
258
|
+
Validate if a response meets the requirements for a specific question
|
259
|
+
|
260
|
+
This function checks if a user's response is valid for the specified question
|
261
|
+
based on the question type and constraints.
|
262
|
+
"""
|
263
|
+
question_id = args.get("question_id", "")
|
264
|
+
response = args.get("response", "")
|
265
|
+
|
266
|
+
# Find the question by ID
|
267
|
+
question = None
|
268
|
+
for q in self.questions:
|
269
|
+
if q["id"] == question_id:
|
270
|
+
question = q
|
271
|
+
break
|
272
|
+
|
273
|
+
if not question:
|
274
|
+
return SwaigFunctionResult(f"Error: Question with ID '{question_id}' not found.")
|
275
|
+
|
276
|
+
# Validate based on question type
|
277
|
+
valid = True
|
278
|
+
message = f"Response to '{question_id}' is valid."
|
279
|
+
|
280
|
+
if question["type"] == "rating":
|
281
|
+
try:
|
282
|
+
rating = int(response.strip())
|
283
|
+
if rating < 1 or rating > question.get("scale", 5):
|
284
|
+
valid = False
|
285
|
+
message = f"Invalid rating. Please provide a number between 1 and {question.get('scale', 5)}."
|
286
|
+
except ValueError:
|
287
|
+
valid = False
|
288
|
+
message = f"Invalid rating. Please provide a number between 1 and {question.get('scale', 5)}."
|
289
|
+
|
290
|
+
elif question["type"] == "multiple_choice":
|
291
|
+
options = question.get("options", [])
|
292
|
+
if not any(response.lower().strip() == option.lower() for option in options):
|
293
|
+
valid = False
|
294
|
+
message = f"Invalid choice. Please select one of: {', '.join(options)}."
|
295
|
+
|
296
|
+
elif question["type"] == "yes_no":
|
297
|
+
response_lower = response.lower().strip()
|
298
|
+
if response_lower not in ["yes", "no", "y", "n"]:
|
299
|
+
valid = False
|
300
|
+
message = "Please answer with 'yes' or 'no'."
|
301
|
+
|
302
|
+
# For open-ended, any non-empty response is valid
|
303
|
+
elif question["type"] == "open_ended":
|
304
|
+
if not response.strip() and question.get("required", True):
|
305
|
+
valid = False
|
306
|
+
message = "A response is required for this question."
|
307
|
+
|
308
|
+
return SwaigFunctionResult({
|
309
|
+
"response": message,
|
310
|
+
"valid": valid,
|
311
|
+
"question_id": question_id
|
312
|
+
})
|
313
|
+
|
314
|
+
@AgentBase.tool(
|
315
|
+
name="log_response",
|
316
|
+
description="Log a validated response to a survey question",
|
317
|
+
parameters={
|
318
|
+
"question_id": {
|
319
|
+
"type": "string",
|
320
|
+
"description": "The ID of the question"
|
321
|
+
},
|
322
|
+
"response": {
|
323
|
+
"type": "string",
|
324
|
+
"description": "The user's validated response"
|
325
|
+
}
|
326
|
+
}
|
327
|
+
)
|
328
|
+
def log_response(self, args, raw_data):
|
329
|
+
"""
|
330
|
+
Log a validated response to a survey question
|
331
|
+
|
332
|
+
This function would typically connect to a database or API to store the response.
|
333
|
+
In this example, it just acknowledges that the response was received.
|
334
|
+
"""
|
335
|
+
question_id = args.get("question_id", "")
|
336
|
+
response = args.get("response", "")
|
337
|
+
|
338
|
+
# Find the question by ID for a more informative message
|
339
|
+
question_text = ""
|
340
|
+
for q in self.questions:
|
341
|
+
if q["id"] == question_id:
|
342
|
+
question_text = q["text"]
|
343
|
+
break
|
344
|
+
|
345
|
+
# In a real implementation, you would store this response in a database
|
346
|
+
# For this example, we just acknowledge it
|
347
|
+
message = f"Response to '{question_text}' has been recorded."
|
348
|
+
|
349
|
+
return SwaigFunctionResult({
|
350
|
+
"response": message,
|
351
|
+
"question_id": question_id,
|
352
|
+
"success": True
|
353
|
+
})
|
354
|
+
|
355
|
+
def on_summary(self, summary, raw_data=None):
|
356
|
+
"""
|
357
|
+
Process the survey results summary
|
358
|
+
|
359
|
+
Args:
|
360
|
+
summary: Summary data containing survey responses
|
361
|
+
raw_data: The complete raw POST data from the request
|
362
|
+
"""
|
363
|
+
if summary:
|
364
|
+
try:
|
365
|
+
# Log survey completion
|
366
|
+
if isinstance(summary, dict):
|
367
|
+
print(f"Survey completed: {json.dumps(summary, indent=2)}")
|
368
|
+
|
369
|
+
# Here you would typically:
|
370
|
+
# 1. Store the responses in a database
|
371
|
+
# 2. Trigger any follow-up actions
|
372
|
+
# 3. Send notifications if needed
|
373
|
+
|
374
|
+
else:
|
375
|
+
print(f"Survey summary (unstructured): {summary}")
|
376
|
+
|
377
|
+
except Exception as e:
|
378
|
+
print(f"Error processing survey summary: {str(e)}")
|