signalwire-agents 0.1.0__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (32) hide show
  1. signalwire_agents/__init__.py +17 -0
  2. signalwire_agents/agent_server.py +336 -0
  3. signalwire_agents/core/__init__.py +20 -0
  4. signalwire_agents/core/agent_base.py +2449 -0
  5. signalwire_agents/core/function_result.py +104 -0
  6. signalwire_agents/core/pom_builder.py +195 -0
  7. signalwire_agents/core/security/__init__.py +0 -0
  8. signalwire_agents/core/security/session_manager.py +170 -0
  9. signalwire_agents/core/state/__init__.py +8 -0
  10. signalwire_agents/core/state/file_state_manager.py +210 -0
  11. signalwire_agents/core/state/state_manager.py +92 -0
  12. signalwire_agents/core/swaig_function.py +163 -0
  13. signalwire_agents/core/swml_builder.py +205 -0
  14. signalwire_agents/core/swml_handler.py +218 -0
  15. signalwire_agents/core/swml_renderer.py +359 -0
  16. signalwire_agents/core/swml_service.py +1009 -0
  17. signalwire_agents/prefabs/__init__.py +15 -0
  18. signalwire_agents/prefabs/concierge.py +276 -0
  19. signalwire_agents/prefabs/faq_bot.py +314 -0
  20. signalwire_agents/prefabs/info_gatherer.py +253 -0
  21. signalwire_agents/prefabs/survey.py +387 -0
  22. signalwire_agents/schema.json +5611 -0
  23. signalwire_agents/utils/__init__.py +0 -0
  24. signalwire_agents/utils/pom_utils.py +0 -0
  25. signalwire_agents/utils/schema_utils.py +348 -0
  26. signalwire_agents/utils/token_generators.py +0 -0
  27. signalwire_agents/utils/validators.py +0 -0
  28. signalwire_agents-0.1.0.data/data/schema.json +5611 -0
  29. signalwire_agents-0.1.0.dist-info/METADATA +154 -0
  30. signalwire_agents-0.1.0.dist-info/RECORD +32 -0
  31. signalwire_agents-0.1.0.dist-info/WHEEL +5 -0
  32. signalwire_agents-0.1.0.dist-info/top_level.txt +1 -0
@@ -0,0 +1,253 @@
1
+ """
2
+ InfoGathererAgent - Prefab agent for collecting structured information from users
3
+ """
4
+
5
+ from typing import List, Dict, Any, Optional, Union
6
+ import json
7
+ import os
8
+
9
+ from signalwire_agents.core.agent_base import AgentBase
10
+ from signalwire_agents.core.function_result import SwaigFunctionResult
11
+
12
+
13
+ class InfoGathererAgent(AgentBase):
14
+ """
15
+ A prefab agent designed to collect specific fields of information from a user.
16
+
17
+ This agent will:
18
+ 1. Ask for each requested field
19
+ 2. Confirm the collected information
20
+ 3. Return a structured JSON summary
21
+
22
+ Example:
23
+ agent = InfoGathererAgent(
24
+ fields=[
25
+ {"name": "full_name", "prompt": "What is your full name?"},
26
+ {"name": "reason", "prompt": "How can I help you today?"}
27
+ ],
28
+ confirmation_template="Thanks {full_name}, I'll help you with {reason}."
29
+ )
30
+ """
31
+
32
+ def __init__(
33
+ self,
34
+ fields: List[Dict[str, str]],
35
+ confirmation_template: Optional[str] = None,
36
+ summary_format: Optional[Dict[str, Any]] = None,
37
+ name: str = "info_gatherer",
38
+ route: str = "/info_gatherer",
39
+ schema_path: Optional[str] = None,
40
+ **kwargs
41
+ ):
42
+ """
43
+ Initialize an information gathering agent
44
+
45
+ Args:
46
+ fields: List of fields to collect, each with:
47
+ - name: Field name (for storage)
48
+ - prompt: Question to ask to collect the field
49
+ - validation: Optional regex or description of valid inputs
50
+ confirmation_template: Optional template string for confirming collected info
51
+ Format with field names in {brackets}, e.g. "Thanks {name}!"
52
+ summary_format: Optional JSON template for the post_prompt summary
53
+ name: Agent name for the route
54
+ route: HTTP route for this agent
55
+ schema_path: Optional path to a custom schema
56
+ **kwargs: Additional arguments for AgentBase
57
+ """
58
+ # Find schema.json if not provided
59
+ if not schema_path:
60
+ current_dir = os.path.dirname(os.path.abspath(__file__))
61
+ parent_dir = os.path.dirname(os.path.dirname(current_dir))
62
+
63
+ schema_locations = [
64
+ os.path.join(current_dir, "schema.json"),
65
+ os.path.join(parent_dir, "schema.json")
66
+ ]
67
+
68
+ for loc in schema_locations:
69
+ if os.path.exists(loc):
70
+ schema_path = loc
71
+ break
72
+
73
+ # Initialize the base agent
74
+ super().__init__(
75
+ name=name,
76
+ route=route,
77
+ use_pom=True,
78
+ schema_path=schema_path,
79
+ **kwargs
80
+ )
81
+
82
+ self.fields = fields
83
+ self.confirmation_template = confirmation_template
84
+ self.summary_format = summary_format
85
+
86
+ # Build the prompt
87
+ self._build_info_gatherer_prompt()
88
+
89
+ # Set up the post-prompt template
90
+ self._setup_post_prompt()
91
+
92
+ # Configure additional agent settings
93
+ self._configure_agent_settings()
94
+
95
+ def _build_info_gatherer_prompt(self):
96
+ """Build the agent prompt for information gathering"""
97
+ # Create base instructions
98
+ instructions = [
99
+ "Ask for ONLY ONE piece of information at a time.",
100
+ "Confirm each answer before moving to the next question.",
101
+ "Do not ask for information not in your field list.",
102
+ "Be polite but direct with your questions."
103
+ ]
104
+
105
+ # Add field-specific instructions
106
+ for i, field in enumerate(self.fields, 1):
107
+ field_name = field.get("name")
108
+ field_prompt = field.get("prompt")
109
+ validation = field.get("validation", "")
110
+
111
+ field_text = f"{i}. {field_name}: \"{field_prompt}\""
112
+ if validation:
113
+ field_text += f" ({validation})"
114
+
115
+ instructions.append(field_text)
116
+
117
+ # Add confirmation instruction if a template is provided
118
+ if self.confirmation_template:
119
+ instructions.append(
120
+ f"After collecting all fields, confirm with: {self.confirmation_template}"
121
+ )
122
+
123
+ # Create the prompt sections directly using prompt_add_section
124
+ self.prompt_add_section(
125
+ "Personality",
126
+ body="You are a friendly and efficient virtual assistant."
127
+ )
128
+
129
+ self.prompt_add_section(
130
+ "Goal",
131
+ body="Your job is to collect specific information from the user."
132
+ )
133
+
134
+ self.prompt_add_section(
135
+ "Instructions",
136
+ bullets=instructions
137
+ )
138
+
139
+ def _setup_post_prompt(self):
140
+ """Set up the post-prompt for summary formatting"""
141
+ # Build a JSON template for the collected data
142
+ if not self.summary_format:
143
+ # Default format: a flat dictionary of field values
144
+ field_list = ", ".join([f'"{f["name"]}": "%{{{f["name"]}}}"' for f in self.fields])
145
+ post_prompt = f"""
146
+ Return a JSON object with all the information collected:
147
+ {{
148
+ {field_list}
149
+ }}
150
+ """
151
+ else:
152
+ # Format is provided as a template - just serialize it
153
+ post_prompt = f"""
154
+ Return the following JSON structure with the collected information:
155
+ {json.dumps(self.summary_format, indent=2)}
156
+ """
157
+
158
+ self.set_post_prompt(post_prompt)
159
+
160
+ def _configure_agent_settings(self):
161
+ """Configure additional agent settings"""
162
+ # Add field names as hints to help the AI recognize them
163
+ field_names = [field.get("name") for field in self.fields if "name" in field]
164
+ self.add_hints(field_names)
165
+
166
+ # Set AI behavior parameters for better information collection
167
+ self.set_params({
168
+ "wait_for_user": False,
169
+ "end_of_speech_timeout": 1200, # Slightly longer for thoughtful responses
170
+ "ai_volume": 5,
171
+ "digit_timeout": 3000, # 3 seconds for DTMF input timeout
172
+ "energy_level": 50 # Medium energy threshold
173
+ })
174
+
175
+ # Add global data with the fields structure
176
+ self.set_global_data({
177
+ "fields": [
178
+ {
179
+ "name": field.get("name"),
180
+ "prompt": field.get("prompt")
181
+ }
182
+ for field in self.fields
183
+ ]
184
+ })
185
+
186
+ @AgentBase.tool(
187
+ name="validate_field",
188
+ description="Validate if the provided value is valid for a specific field",
189
+ parameters={
190
+ "field_name": {
191
+ "type": "string",
192
+ "description": "The name of the field to validate"
193
+ },
194
+ "value": {
195
+ "type": "string",
196
+ "description": "The value provided by the user"
197
+ }
198
+ }
199
+ )
200
+ def validate_field(self, args, raw_data):
201
+ """
202
+ Validate if a provided value is valid for a specific field
203
+
204
+ This function checks if a user's input meets any validation criteria
205
+ specified for the field.
206
+ """
207
+ field_name = args.get("field_name", "")
208
+ value = args.get("value", "")
209
+
210
+ # Find the field by name
211
+ field = None
212
+ for f in self.fields:
213
+ if f.get("name") == field_name:
214
+ field = f
215
+ break
216
+
217
+ if not field:
218
+ return SwaigFunctionResult(f"Error: Field '{field_name}' not found in configuration.")
219
+
220
+ # Check if the field has validation requirements
221
+ validation = field.get("validation", "")
222
+
223
+ # Simple validation check (in a real implementation, you would perform
224
+ # more sophisticated validation based on the validation rules)
225
+ if validation and not value.strip():
226
+ return SwaigFunctionResult({
227
+ "response": f"The field '{field_name}' cannot be empty.",
228
+ "valid": False
229
+ })
230
+
231
+ # For this simple example, we'll consider any non-empty value valid
232
+ return SwaigFunctionResult({
233
+ "response": f"The value for '{field_name}' is valid.",
234
+ "valid": True
235
+ })
236
+
237
+ def on_summary(self, summary, raw_data=None):
238
+ """
239
+ Process the collected information summary
240
+
241
+ Args:
242
+ summary: Dictionary of collected field values
243
+ raw_data: The complete raw POST data from the request
244
+
245
+ Override this method in subclasses to use the collected data.
246
+ """
247
+ if summary:
248
+ if isinstance(summary, dict):
249
+ print(f"Information collected: {json.dumps(summary, indent=2)}")
250
+ else:
251
+ print(f"Information collected: {summary}")
252
+
253
+ # Subclasses should override this to save or process the collected data
@@ -0,0 +1,387 @@
1
+ """
2
+ SurveyAgent - Prefab agent for conducting automated surveys
3
+ """
4
+
5
+ from typing import List, Dict, Any, Optional, Union
6
+ import json
7
+ import os
8
+ from datetime import datetime
9
+
10
+ from signalwire_agents.core.agent_base import AgentBase
11
+ from signalwire_agents.core.function_result import SwaigFunctionResult
12
+
13
+
14
+ class SurveyAgent(AgentBase):
15
+ """
16
+ A prefab agent designed to conduct automated surveys with users.
17
+
18
+ This agent will:
19
+ 1. Introduce the survey purpose and structure
20
+ 2. Ask predefined questions in sequence
21
+ 3. Collect and validate responses
22
+ 4. Provide a summary of collected responses
23
+
24
+ Example:
25
+ agent = SurveyAgent(
26
+ survey_name="Customer Satisfaction Survey",
27
+ introduction="We'd like to get your feedback on your recent experience.",
28
+ questions=[
29
+ {
30
+ "id": "satisfaction",
31
+ "text": "How satisfied were you with our service?",
32
+ "type": "rating",
33
+ "scale": 5,
34
+ "required": True
35
+ },
36
+ {
37
+ "id": "comments",
38
+ "text": "Do you have any additional comments?",
39
+ "type": "open_ended",
40
+ "required": False
41
+ }
42
+ ]
43
+ )
44
+ """
45
+
46
+ def __init__(
47
+ self,
48
+ survey_name: str,
49
+ questions: List[Dict[str, Any]],
50
+ introduction: Optional[str] = None,
51
+ conclusion: Optional[str] = None,
52
+ brand_name: Optional[str] = None,
53
+ max_retries: int = 2,
54
+ schema_path: Optional[str] = None,
55
+ **kwargs
56
+ ):
57
+ """
58
+ Initialize a survey agent
59
+
60
+ Args:
61
+ survey_name: Name of the survey
62
+ questions: List of question objects with the following keys:
63
+ - id: Unique identifier for the question
64
+ - text: The question text to ask
65
+ - type: Type of question (rating, multiple_choice, yes_no, open_ended)
66
+ - options: List of options for multiple_choice questions
67
+ - scale: For rating questions, the scale (e.g., 1-5)
68
+ - required: Whether the question requires an answer
69
+ introduction: Optional custom introduction message
70
+ conclusion: Optional custom conclusion message
71
+ brand_name: Optional brand or company name
72
+ max_retries: Maximum number of times to retry invalid answers
73
+ schema_path: Optional path to a custom schema
74
+ **kwargs: Additional arguments for AgentBase
75
+ """
76
+ # Find schema.json if not provided
77
+ if not schema_path:
78
+ current_dir = os.path.dirname(os.path.abspath(__file__))
79
+ parent_dir = os.path.dirname(os.path.dirname(current_dir))
80
+
81
+ schema_locations = [
82
+ os.path.join(current_dir, "schema.json"),
83
+ os.path.join(parent_dir, "schema.json")
84
+ ]
85
+
86
+ for loc in schema_locations:
87
+ if os.path.exists(loc):
88
+ schema_path = loc
89
+ break
90
+
91
+ # Initialize the base agent
92
+ super().__init__(
93
+ name="survey",
94
+ route="/survey",
95
+ use_pom=True,
96
+ schema_path=schema_path,
97
+ **kwargs
98
+ )
99
+
100
+ # Store configuration
101
+ self.survey_name = survey_name
102
+ self.questions = questions
103
+ self.brand_name = brand_name or "Our Company"
104
+ self.max_retries = max_retries
105
+
106
+ # Default messages if not provided
107
+ self.introduction = introduction or f"Welcome to our {survey_name}. We appreciate your participation."
108
+ self.conclusion = conclusion or "Thank you for completing our survey. Your feedback is valuable to us."
109
+
110
+ # Validate questions
111
+ self._validate_questions()
112
+
113
+ # Set up the agent's configuration
114
+ self._setup_survey_agent()
115
+
116
+ def _validate_questions(self):
117
+ """Validate the question format and structure"""
118
+ valid_types = ["rating", "multiple_choice", "yes_no", "open_ended"]
119
+
120
+ for i, question in enumerate(self.questions):
121
+ # Ensure required fields are present
122
+ if "id" not in question or not question["id"]:
123
+ question["id"] = f"question_{i+1}"
124
+
125
+ if "text" not in question or not question["text"]:
126
+ raise ValueError(f"Question {i+1} is missing the 'text' field")
127
+
128
+ if "type" not in question or question["type"] not in valid_types:
129
+ raise ValueError(f"Question {i+1} has an invalid type. Must be one of: {', '.join(valid_types)}")
130
+
131
+ # Set defaults for optional fields
132
+ if "required" not in question:
133
+ question["required"] = True
134
+
135
+ # Type-specific validation
136
+ if question["type"] == "multiple_choice" and ("options" not in question or not question["options"]):
137
+ raise ValueError(f"Multiple choice question '{question['id']}' must have options")
138
+
139
+ if question["type"] == "rating" and "scale" not in question:
140
+ question["scale"] = 5 # Default to 5-point scale
141
+
142
+ def _setup_survey_agent(self):
143
+ """Configure the survey agent with appropriate settings"""
144
+ # Basic personality and instructions
145
+ self.prompt_add_section("Personality",
146
+ body=f"You are a friendly and professional survey agent representing {self.brand_name}."
147
+ )
148
+
149
+ self.prompt_add_section("Goal",
150
+ body=f"Conduct the '{self.survey_name}' survey by asking questions and collecting responses."
151
+ )
152
+
153
+ # Build detailed instructions
154
+ instructions = [
155
+ "Guide the user through each survey question in sequence.",
156
+ "Ask only one question at a time and wait for a response.",
157
+ "For rating questions, explain the scale (e.g., 1-5, where 5 is best).",
158
+ "For multiple choice questions, list all the options.",
159
+ f"If a response is invalid, explain and retry up to {self.max_retries} times.",
160
+ "Be conversational but stay focused on collecting the survey data.",
161
+ "After all questions are answered, thank the user for their participation."
162
+ ]
163
+
164
+ self.prompt_add_section("Instructions", bullets=instructions)
165
+
166
+ # Add introduction section
167
+ self.prompt_add_section("Introduction",
168
+ body=f"Begin with this introduction: {self.introduction}"
169
+ )
170
+
171
+ # Questions section with all the survey questions
172
+ questions_subsections = []
173
+ for q in self.questions:
174
+ # Build a description based on question type
175
+ description = f"ID: {q['id']}\nType: {q['type']}\nRequired: {q['required']}"
176
+
177
+ if q["type"] == "rating":
178
+ description += f"\nScale: 1-{q['scale']}"
179
+
180
+ if q["type"] == "multiple_choice" and "options" in q:
181
+ options_list = ", ".join(q["options"])
182
+ description += f"\nOptions: {options_list}"
183
+
184
+ questions_subsections.append({
185
+ "title": q["text"],
186
+ "body": description
187
+ })
188
+
189
+ self.prompt_add_section("Survey Questions",
190
+ body="Ask these questions in order:",
191
+ subsections=questions_subsections
192
+ )
193
+
194
+ # Add conclusion section
195
+ self.prompt_add_section("Conclusion",
196
+ body=f"End with this conclusion: {self.conclusion}"
197
+ )
198
+
199
+ # Set up the post-prompt for summary
200
+ post_prompt = """
201
+ Return a JSON summary of the survey responses:
202
+ {
203
+ "survey_name": "SURVEY_NAME",
204
+ "responses": {
205
+ "QUESTION_ID_1": "RESPONSE_1",
206
+ "QUESTION_ID_2": "RESPONSE_2",
207
+ ...
208
+ },
209
+ "completion_status": "complete/incomplete",
210
+ "timestamp": "CURRENT_TIMESTAMP"
211
+ }
212
+ """
213
+ self.set_post_prompt(post_prompt)
214
+
215
+ # Configure hints to help the AI understand survey terminology
216
+ type_terms = []
217
+ for q in self.questions:
218
+ if q["type"] == "rating":
219
+ type_terms.extend([str(i) for i in range(1, q["scale"]+1)])
220
+ elif q["type"] == "multiple_choice" and "options" in q:
221
+ type_terms.extend(q["options"])
222
+ elif q["type"] == "yes_no":
223
+ type_terms.extend(["yes", "no"])
224
+
225
+ self.add_hints([
226
+ self.survey_name,
227
+ self.brand_name,
228
+ *type_terms
229
+ ])
230
+
231
+ # Set AI behavior parameters
232
+ self.set_params({
233
+ "wait_for_user": False,
234
+ "end_of_speech_timeout": 1500, # Longer timeout for thoughtful responses
235
+ "ai_volume": 5,
236
+ "static_greeting": self.introduction,
237
+ "static_greeting_no_barge": True
238
+ })
239
+
240
+ # Add global data available to the AI
241
+ self.set_global_data({
242
+ "survey_name": self.survey_name,
243
+ "brand_name": self.brand_name,
244
+ "questions": self.questions,
245
+ "max_retries": self.max_retries
246
+ })
247
+
248
+ # Configure native functions
249
+ self.set_native_functions(["check_time"])
250
+
251
+ @AgentBase.tool(
252
+ name="validate_response",
253
+ description="Validate if a response meets the requirements for a specific question",
254
+ parameters={
255
+ "question_id": {
256
+ "type": "string",
257
+ "description": "The ID of the question"
258
+ },
259
+ "response": {
260
+ "type": "string",
261
+ "description": "The user's response to validate"
262
+ }
263
+ }
264
+ )
265
+ def validate_response(self, args, raw_data):
266
+ """
267
+ Validate if a response meets the requirements for a specific question
268
+
269
+ This function checks if a user's response is valid for the specified question
270
+ based on the question type and constraints.
271
+ """
272
+ question_id = args.get("question_id", "")
273
+ response = args.get("response", "")
274
+
275
+ # Find the question by ID
276
+ question = None
277
+ for q in self.questions:
278
+ if q["id"] == question_id:
279
+ question = q
280
+ break
281
+
282
+ if not question:
283
+ return SwaigFunctionResult(f"Error: Question with ID '{question_id}' not found.")
284
+
285
+ # Validate based on question type
286
+ valid = True
287
+ message = f"Response to '{question_id}' is valid."
288
+
289
+ if question["type"] == "rating":
290
+ try:
291
+ rating = int(response.strip())
292
+ if rating < 1 or rating > question.get("scale", 5):
293
+ valid = False
294
+ message = f"Invalid rating. Please provide a number between 1 and {question.get('scale', 5)}."
295
+ except ValueError:
296
+ valid = False
297
+ message = f"Invalid rating. Please provide a number between 1 and {question.get('scale', 5)}."
298
+
299
+ elif question["type"] == "multiple_choice":
300
+ options = question.get("options", [])
301
+ if not any(response.lower().strip() == option.lower() for option in options):
302
+ valid = False
303
+ message = f"Invalid choice. Please select one of: {', '.join(options)}."
304
+
305
+ elif question["type"] == "yes_no":
306
+ response_lower = response.lower().strip()
307
+ if response_lower not in ["yes", "no", "y", "n"]:
308
+ valid = False
309
+ message = "Please answer with 'yes' or 'no'."
310
+
311
+ # For open-ended, any non-empty response is valid
312
+ elif question["type"] == "open_ended":
313
+ if not response.strip() and question.get("required", True):
314
+ valid = False
315
+ message = "A response is required for this question."
316
+
317
+ return SwaigFunctionResult({
318
+ "response": message,
319
+ "valid": valid,
320
+ "question_id": question_id
321
+ })
322
+
323
+ @AgentBase.tool(
324
+ name="log_response",
325
+ description="Log a validated response to a survey question",
326
+ parameters={
327
+ "question_id": {
328
+ "type": "string",
329
+ "description": "The ID of the question"
330
+ },
331
+ "response": {
332
+ "type": "string",
333
+ "description": "The user's validated response"
334
+ }
335
+ }
336
+ )
337
+ def log_response(self, args, raw_data):
338
+ """
339
+ Log a validated response to a survey question
340
+
341
+ This function would typically connect to a database or API to store the response.
342
+ In this example, it just acknowledges that the response was received.
343
+ """
344
+ question_id = args.get("question_id", "")
345
+ response = args.get("response", "")
346
+
347
+ # Find the question by ID for a more informative message
348
+ question_text = ""
349
+ for q in self.questions:
350
+ if q["id"] == question_id:
351
+ question_text = q["text"]
352
+ break
353
+
354
+ # In a real implementation, you would store this response in a database
355
+ # For this example, we just acknowledge it
356
+ message = f"Response to '{question_text}' has been recorded."
357
+
358
+ return SwaigFunctionResult({
359
+ "response": message,
360
+ "question_id": question_id,
361
+ "success": True
362
+ })
363
+
364
+ def on_summary(self, summary, raw_data=None):
365
+ """
366
+ Process the survey results summary
367
+
368
+ Args:
369
+ summary: Summary data containing survey responses
370
+ raw_data: The complete raw POST data from the request
371
+ """
372
+ if summary:
373
+ try:
374
+ # Log survey completion
375
+ if isinstance(summary, dict):
376
+ print(f"Survey completed: {json.dumps(summary, indent=2)}")
377
+
378
+ # Here you would typically:
379
+ # 1. Store the responses in a database
380
+ # 2. Trigger any follow-up actions
381
+ # 3. Send notifications if needed
382
+
383
+ else:
384
+ print(f"Survey summary (unstructured): {summary}")
385
+
386
+ except Exception as e:
387
+ print(f"Error processing survey summary: {str(e)}")