signalwire-agents 0.1.46__py3-none-any.whl → 0.1.47__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -18,7 +18,7 @@ A package for building AI agents using SignalWire's AI and SWML capabilities.
18
18
  from .core.logging_config import configure_logging
19
19
  configure_logging()
20
20
 
21
- __version__ = "0.1.46"
21
+ __version__ = "0.1.47"
22
22
 
23
23
  # Import core classes for easier access
24
24
  from .core.agent_base import AgentBase
@@ -66,6 +66,11 @@ Examples:
66
66
  sw-search ./docs \\
67
67
  --chunking-strategy qa
68
68
 
69
+
70
+ # JSON-based chunking (pre-chunked content)
71
+ sw-search ./api_chunks.json \
72
+ --chunking-strategy json \
73
+ --file-types json
69
74
  # Full configuration example
70
75
  sw-search ./docs ./examples README.md \\
71
76
  --output ./knowledge.swsearch \\
@@ -141,7 +146,7 @@ Examples:
141
146
 
142
147
  parser.add_argument(
143
148
  '--chunking-strategy',
144
- choices=['sentence', 'sliding', 'paragraph', 'page', 'semantic', 'topic', 'qa'],
149
+ choices=['sentence', 'sliding', 'paragraph', 'page', 'semantic', 'topic', 'qa', 'json'],
145
150
  default='sentence',
146
151
  help='Chunking strategy to use (default: sentence)'
147
152
  )
@@ -250,20 +250,9 @@ class AgentBase(
250
250
  self._params = {}
251
251
  self._global_data = {}
252
252
  self._function_includes = []
253
- # Initialize with default LLM params
254
- self._prompt_llm_params = {
255
- 'temperature': 0.3,
256
- 'top_p': 1.0,
257
- 'barge_confidence': 0.0,
258
- 'presence_penalty': 0.1,
259
- 'frequency_penalty': 0.1
260
- }
261
- self._post_prompt_llm_params = {
262
- 'temperature': 0.0,
263
- 'top_p': 1.0,
264
- 'presence_penalty': 0.0,
265
- 'frequency_penalty': 0.0
266
- }
253
+ # Initialize LLM params as empty - only send if explicitly set
254
+ self._prompt_llm_params = {}
255
+ self._post_prompt_llm_params = {}
267
256
 
268
257
  # Dynamic configuration callback
269
258
  self._dynamic_config_callback = None
@@ -692,7 +681,7 @@ class AgentBase(
692
681
  "parameters": func._ensure_parameter_structure()
693
682
  }
694
683
 
695
- # Add wait_file if present (SignalWire SWML expects wait_file, not fillers)
684
+ # Add wait_file if present (audio/video file URL)
696
685
  if hasattr(func, 'wait_file') and func.wait_file:
697
686
  wait_file_url = func.wait_file
698
687
  # If wait_file is a relative URL, convert it to absolute using agent's base URL
@@ -704,9 +693,10 @@ class AgentBase(
704
693
  wait_file_url = '/' + wait_file_url
705
694
  wait_file_url = f"{base_url}{wait_file_url}"
706
695
  function_entry["wait_file"] = wait_file_url
707
- elif func.fillers:
708
- # Backward compatibility: use fillers as wait_file if wait_file not specified
709
- function_entry["wait_file"] = func.fillers
696
+
697
+ # Add fillers if present (text phrases to say while processing)
698
+ if hasattr(func, 'fillers') and func.fillers:
699
+ function_entry["fillers"] = func.fillers
710
700
 
711
701
  # Add wait_file_loops if present
712
702
  if hasattr(func, 'wait_file_loops') and func.wait_file_loops is not None:
@@ -833,27 +823,29 @@ class AgentBase(
833
823
 
834
824
  # Always add LLM parameters to prompt
835
825
  if "prompt" in ai_config:
836
- # Update existing prompt with LLM params
837
- if isinstance(ai_config["prompt"], dict):
838
- ai_config["prompt"].update(agent_to_use._prompt_llm_params)
839
- elif isinstance(ai_config["prompt"], str):
840
- # Convert string prompt to dict format
841
- ai_config["prompt"] = {
842
- "text": ai_config["prompt"],
843
- **agent_to_use._prompt_llm_params
844
- }
826
+ # Only add LLM params if explicitly set
827
+ if agent_to_use._prompt_llm_params:
828
+ if isinstance(ai_config["prompt"], dict):
829
+ ai_config["prompt"].update(agent_to_use._prompt_llm_params)
830
+ elif isinstance(ai_config["prompt"], str):
831
+ # Convert string prompt to dict format
832
+ ai_config["prompt"] = {
833
+ "text": ai_config["prompt"],
834
+ **agent_to_use._prompt_llm_params
835
+ }
845
836
 
846
- # Always add LLM parameters to post_prompt if post_prompt exists
837
+ # Only add LLM parameters to post_prompt if explicitly set
847
838
  if post_prompt and "post_prompt" in ai_config:
848
- # Update existing post_prompt with LLM params
849
- if isinstance(ai_config["post_prompt"], dict):
850
- ai_config["post_prompt"].update(agent_to_use._post_prompt_llm_params)
851
- elif isinstance(ai_config["post_prompt"], str):
852
- # Convert string post_prompt to dict format
853
- ai_config["post_prompt"] = {
854
- "text": ai_config["post_prompt"],
855
- **agent_to_use._post_prompt_llm_params
856
- }
839
+ # Only add LLM params if explicitly set
840
+ if agent_to_use._post_prompt_llm_params:
841
+ if isinstance(ai_config["post_prompt"], dict):
842
+ ai_config["post_prompt"].update(agent_to_use._post_prompt_llm_params)
843
+ elif isinstance(ai_config["post_prompt"], str):
844
+ # Convert string post_prompt to dict format
845
+ ai_config["post_prompt"] = {
846
+ "text": ai_config["post_prompt"],
847
+ **agent_to_use._post_prompt_llm_params
848
+ }
857
849
 
858
850
  except ValueError as e:
859
851
  if not agent_to_use._suppress_logs:
@@ -372,28 +372,22 @@ class AIConfigMixin:
372
372
  self._function_includes = valid_includes
373
373
  return self
374
374
 
375
- def set_prompt_llm_params(
376
- self,
377
- temperature: Optional[float] = None,
378
- top_p: Optional[float] = None,
379
- barge_confidence: Optional[float] = None,
380
- presence_penalty: Optional[float] = None,
381
- frequency_penalty: Optional[float] = None
382
- ) -> 'AgentBase':
375
+ def set_prompt_llm_params(self, **params) -> 'AgentBase':
383
376
  """
384
377
  Set LLM parameters for the main prompt.
385
378
 
386
- Args:
387
- temperature: Randomness setting (0.0-1.5). Lower values make output more deterministic.
388
- Default: 0.3
389
- top_p: Alternative to temperature (0.0-1.0). Controls nucleus sampling.
390
- Default: 1.0
391
- barge_confidence: ASR confidence to interrupt (0.0-1.0). Higher values make it harder to interrupt.
392
- Default: 0.0
393
- presence_penalty: Topic diversity (-2.0 to 2.0). Positive values encourage new topics.
394
- Default: 0.1
395
- frequency_penalty: Repetition control (-2.0 to 2.0). Positive values reduce repetition.
396
- Default: 0.1
379
+ Accepts any parameters which will be passed through to the SignalWire server.
380
+ The server will validate and apply parameters based on the target model's capabilities.
381
+
382
+ Common parameters include:
383
+ temperature: Randomness setting. Lower values make output more deterministic.
384
+ top_p: Alternative to temperature. Controls nucleus sampling.
385
+ barge_confidence: ASR confidence to interrupt. Higher values make it harder to interrupt.
386
+ presence_penalty: Topic diversity. Positive values encourage new topics.
387
+ frequency_penalty: Repetition control. Positive values reduce repetition.
388
+
389
+ Note: Parameters are model-specific and will be validated by the server.
390
+ Invalid parameters for the selected model will be handled/ignored by the server.
397
391
 
398
392
  Returns:
399
393
  Self for method chaining
@@ -405,57 +399,28 @@ class AIConfigMixin:
405
399
  barge_confidence=0.6
406
400
  )
407
401
  """
408
- # Validate and set temperature
409
- if temperature is not None:
410
- if not 0.0 <= temperature <= 1.5:
411
- raise ValueError("temperature must be between 0.0 and 1.5")
412
- self._prompt_llm_params['temperature'] = temperature
413
-
414
- # Validate and set top_p
415
- if top_p is not None:
416
- if not 0.0 <= top_p <= 1.0:
417
- raise ValueError("top_p must be between 0.0 and 1.0")
418
- self._prompt_llm_params['top_p'] = top_p
419
-
420
- # Validate and set barge_confidence
421
- if barge_confidence is not None:
422
- if not 0.0 <= barge_confidence <= 1.0:
423
- raise ValueError("barge_confidence must be between 0.0 and 1.0")
424
- self._prompt_llm_params['barge_confidence'] = barge_confidence
425
-
426
- # Validate and set presence_penalty
427
- if presence_penalty is not None:
428
- if not -2.0 <= presence_penalty <= 2.0:
429
- raise ValueError("presence_penalty must be between -2.0 and 2.0")
430
- self._prompt_llm_params['presence_penalty'] = presence_penalty
431
-
432
- # Validate and set frequency_penalty
433
- if frequency_penalty is not None:
434
- if not -2.0 <= frequency_penalty <= 2.0:
435
- raise ValueError("frequency_penalty must be between -2.0 and 2.0")
436
- self._prompt_llm_params['frequency_penalty'] = frequency_penalty
402
+ # Accept any parameters without validation
403
+ if params:
404
+ self._prompt_llm_params.update(params)
437
405
 
438
406
  return self
439
407
 
440
- def set_post_prompt_llm_params(
441
- self,
442
- temperature: Optional[float] = None,
443
- top_p: Optional[float] = None,
444
- presence_penalty: Optional[float] = None,
445
- frequency_penalty: Optional[float] = None
446
- ) -> 'AgentBase':
408
+ def set_post_prompt_llm_params(self, **params) -> 'AgentBase':
447
409
  """
448
410
  Set LLM parameters for the post-prompt.
449
411
 
450
- Args:
451
- temperature: Randomness setting (0.0-1.5). Lower values make output more deterministic.
452
- Default: 0.0
453
- top_p: Alternative to temperature (0.0-1.0). Controls nucleus sampling.
454
- Default: 1.0
455
- presence_penalty: Topic diversity (-2.0 to 2.0). Positive values encourage new topics.
456
- Default: 0.0
457
- frequency_penalty: Repetition control (-2.0 to 2.0). Positive values reduce repetition.
458
- Default: 0.0
412
+ Accepts any parameters which will be passed through to the SignalWire server.
413
+ The server will validate and apply parameters based on the target model's capabilities.
414
+
415
+ Common parameters include:
416
+ temperature: Randomness setting. Lower values make output more deterministic.
417
+ top_p: Alternative to temperature. Controls nucleus sampling.
418
+ presence_penalty: Topic diversity. Positive values encourage new topics.
419
+ frequency_penalty: Repetition control. Positive values reduce repetition.
420
+
421
+ Note: Parameters are model-specific and will be validated by the server.
422
+ Invalid parameters for the selected model will be handled/ignored by the server.
423
+ barge_confidence is not applicable to post-prompt.
459
424
 
460
425
  Returns:
461
426
  Self for method chaining
@@ -466,28 +431,8 @@ class AIConfigMixin:
466
431
  top_p=0.9
467
432
  )
468
433
  """
469
- # Validate and set temperature
470
- if temperature is not None:
471
- if not 0.0 <= temperature <= 1.5:
472
- raise ValueError("temperature must be between 0.0 and 1.5")
473
- self._post_prompt_llm_params['temperature'] = temperature
474
-
475
- # Validate and set top_p
476
- if top_p is not None:
477
- if not 0.0 <= top_p <= 1.0:
478
- raise ValueError("top_p must be between 0.0 and 1.0")
479
- self._post_prompt_llm_params['top_p'] = top_p
480
-
481
- # Validate and set presence_penalty
482
- if presence_penalty is not None:
483
- if not -2.0 <= presence_penalty <= 2.0:
484
- raise ValueError("presence_penalty must be between -2.0 and 2.0")
485
- self._post_prompt_llm_params['presence_penalty'] = presence_penalty
486
-
487
- # Validate and set frequency_penalty
488
- if frequency_penalty is not None:
489
- if not -2.0 <= frequency_penalty <= 2.0:
490
- raise ValueError("frequency_penalty must be between -2.0 and 2.0")
491
- self._post_prompt_llm_params['frequency_penalty'] = frequency_penalty
434
+ # Accept any parameters without validation
435
+ if params:
436
+ self._post_prompt_llm_params.update(params)
492
437
 
493
438
  return self
@@ -57,8 +57,8 @@ class SWAIGFunction:
57
57
  self.description = description
58
58
  self.parameters = parameters or {}
59
59
  self.secure = secure
60
- self.fillers = fillers # Keep for backward compatibility
61
- self.wait_file = wait_file or fillers # Use wait_file if provided, else fall back to fillers
60
+ self.fillers = fillers # Text phrases to say while processing
61
+ self.wait_file = wait_file # URL to audio/video file to play while waiting
62
62
  self.wait_file_loops = wait_file_loops
63
63
  self.webhook_url = webhook_url
64
64
  self.required = required or []
@@ -140,6 +140,8 @@ class DocumentProcessor:
140
140
  return self._chunk_by_topics(content, filename, file_type)
141
141
  elif self.chunking_strategy == 'qa':
142
142
  return self._chunk_by_qa_optimization(content, filename, file_type)
143
+ elif self.chunking_strategy == 'json':
144
+ return self._chunk_from_json(content, filename, file_type)
143
145
  else:
144
146
  # Fallback to sentence-based chunking
145
147
  return self._chunk_by_sentences(content, filename, file_type)
@@ -1022,4 +1024,103 @@ class DocumentProcessor:
1022
1024
  ))
1023
1025
 
1024
1026
  return chunks if chunks else [self._create_chunk(content, filename, "QA Section 1",
1025
- metadata={'chunk_method': 'qa_optimized', 'chunk_index': 0})]
1027
+ metadata={'chunk_method': 'qa_optimized', 'chunk_index': 0})]
1028
+
1029
+ def _chunk_from_json(self, content: str, filename: str, file_type: str) -> List[Dict[str, Any]]:
1030
+ """
1031
+ Create chunks from pre-processed JSON content
1032
+
1033
+ This strategy expects content to be a JSON string with the following structure:
1034
+ {
1035
+ "chunks": [
1036
+ {
1037
+ "chunk_id": "unique_id",
1038
+ "type": "content|toc",
1039
+ "content": "text content",
1040
+ "metadata": {
1041
+ "url": "https://...",
1042
+ "section_number": 1,
1043
+ "related_toc": "toc_id",
1044
+ ...
1045
+ }
1046
+ },
1047
+ ...
1048
+ ]
1049
+ }
1050
+
1051
+ Args:
1052
+ content: JSON string containing pre-chunked content
1053
+ filename: Name of the source file
1054
+ file_type: Should be 'json'
1055
+
1056
+ Returns:
1057
+ List of chunk dictionaries formatted for the search index
1058
+ """
1059
+ try:
1060
+ # Parse JSON content
1061
+ data = json.loads(content)
1062
+
1063
+ if not isinstance(data, dict) or 'chunks' not in data:
1064
+ logger.error(f"Invalid JSON structure in {filename}: expected 'chunks' key")
1065
+ # Fallback to treating it as plain text
1066
+ return self._chunk_by_sentences(content, filename, file_type)
1067
+
1068
+ chunks = []
1069
+ for idx, json_chunk in enumerate(data['chunks']):
1070
+ if not isinstance(json_chunk, dict) or 'content' not in json_chunk:
1071
+ logger.warning(f"Skipping invalid chunk {idx} in {filename}")
1072
+ continue
1073
+
1074
+ # Extract metadata from JSON chunk
1075
+ json_metadata = json_chunk.get('metadata', {})
1076
+ chunk_type = json_chunk.get('type', 'content')
1077
+
1078
+ # Build chunk metadata
1079
+ metadata = {
1080
+ 'chunk_method': 'json',
1081
+ 'chunk_index': idx,
1082
+ 'chunk_type': chunk_type,
1083
+ 'original_chunk_id': json_chunk.get('chunk_id', f'chunk_{idx}')
1084
+ }
1085
+
1086
+ # Merge JSON metadata
1087
+ metadata.update(json_metadata)
1088
+
1089
+ # Determine section name
1090
+ if chunk_type == 'toc':
1091
+ section = f"TOC: {json_chunk.get('content', '')[:50]}"
1092
+ else:
1093
+ section = json_metadata.get('section', f"Section {json_metadata.get('section_number', idx + 1)}")
1094
+
1095
+ # Create chunk with proper structure
1096
+ chunk = self._create_chunk(
1097
+ content=json_chunk['content'],
1098
+ filename=filename,
1099
+ section=section,
1100
+ metadata=metadata
1101
+ )
1102
+
1103
+ # Add any additional fields from JSON
1104
+ if 'tags' in json_chunk:
1105
+ chunk['tags'] = json_chunk['tags']
1106
+
1107
+ # For TOC entries, we might want to add special tags
1108
+ if chunk_type == 'toc' and 'tags' not in chunk:
1109
+ chunk['tags'] = ['toc', 'navigation']
1110
+
1111
+ chunks.append(chunk)
1112
+
1113
+ if not chunks:
1114
+ logger.warning(f"No valid chunks found in JSON file {filename}")
1115
+ return self._chunk_by_sentences(str(data), filename, file_type)
1116
+
1117
+ logger.info(f"Created {len(chunks)} chunks from JSON file {filename}")
1118
+ return chunks
1119
+
1120
+ except json.JSONDecodeError as e:
1121
+ logger.error(f"Failed to parse JSON in {filename}: {e}")
1122
+ # Fallback to sentence chunking
1123
+ return self._chunk_by_sentences(content, filename, file_type)
1124
+ except Exception as e:
1125
+ logger.error(f"Unexpected error processing JSON chunks in {filename}: {e}")
1126
+ return self._chunk_by_sentences(content, filename, file_type)
@@ -55,7 +55,7 @@ class IndexBuilder:
55
55
 
56
56
  Args:
57
57
  model_name: Name of the sentence transformer model to use
58
- chunking_strategy: Strategy for chunking documents ('sentence', 'sliding', 'paragraph', 'page', 'semantic', 'topic', 'qa')
58
+ chunking_strategy: Strategy for chunking documents ('sentence', 'sliding', 'paragraph', 'page', 'semantic', 'topic', 'qa', 'json')
59
59
  max_sentences_per_chunk: For sentence strategy (default: 5)
60
60
  chunk_size: For sliding strategy - words per chunk (default: 50)
61
61
  chunk_overlap: For sliding strategy - overlap in words (default: 10)
@@ -176,7 +176,7 @@ class WeatherApiSkill(SkillBase):
176
176
  "data_map": {
177
177
  "webhooks": [
178
178
  {
179
- "url": f"https://api.weatherapi.com/v1/current.json?key={self.api_key}&q=${{args.location}}&aqi=no",
179
+ "url": f"https://api.weatherapi.com/v1/current.json?key={self.api_key}&q=${{lc:enc:args.location}}&aqi=no",
180
180
  "method": "GET",
181
181
  "output": SwaigFunctionResult(weather_template).to_dict()
182
182
  }
@@ -188,4 +188,4 @@ class WeatherApiSkill(SkillBase):
188
188
  }
189
189
  }
190
190
 
191
- return [tool]
191
+ return [tool]
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.4
2
2
  Name: signalwire_agents
3
- Version: 0.1.46
3
+ Version: 0.1.47
4
4
  Summary: SignalWire AI Agents SDK
5
5
  Author-email: SignalWire Team <info@signalwire.com>
6
6
  License: MIT
@@ -664,6 +664,7 @@ class PreciseAgent(AgentBase):
664
664
  self.prompt_add_section("Instructions", "Provide accurate, detailed information.")
665
665
 
666
666
  # Set custom LLM parameters for the main prompt
667
+ # These parameters are passed to the server which validates them based on the model
667
668
  self.set_prompt_llm_params(
668
669
  temperature=0.3, # Low temperature for more consistent responses
669
670
  top_p=0.9, # Slightly reduced for focused responses
@@ -685,13 +686,17 @@ agent = PreciseAgent()
685
686
  agent.serve()
686
687
  ```
687
688
 
688
- #### Available LLM Parameters
689
+ #### Common LLM Parameters
689
690
 
690
- - **temperature** (0.0-1.5): Controls randomness. Lower = more focused, higher = more creative
691
- - **top_p** (0.0-1.0): Nucleus sampling. Lower = more focused on likely tokens
692
- - **barge_confidence** (0.0-1.0): ASR confidence to interrupt. Higher = harder to interrupt
693
- - **presence_penalty** (-2.0-2.0): Topic diversity. Positive = new topics
694
- - **frequency_penalty** (-2.0-2.0): Repetition control. Positive = varied vocabulary
691
+ The SDK accepts any parameters which are passed to the server for validation based on the model. Common parameters include:
692
+
693
+ - **temperature**: Controls randomness. Lower = more focused, higher = more creative
694
+ - **top_p**: Nucleus sampling. Lower = more focused on likely tokens
695
+ - **barge_confidence**: ASR confidence to interrupt. Higher = harder to interrupt (main prompt only)
696
+ - **presence_penalty**: Topic diversity. Positive = new topics
697
+ - **frequency_penalty**: Repetition control. Positive = varied vocabulary
698
+
699
+ Note: No defaults are sent unless explicitly set. The server handles validation and applies appropriate defaults based on the model.
695
700
 
696
701
  For more details on LLM parameter tuning, see [LLM Parameters Guide](docs/llm_parameters.md).
697
702
 
@@ -1,9 +1,9 @@
1
- signalwire_agents/__init__.py,sha256=Hv5VQbfoIDYAidp17GYz_wbKRqSGKGQ87ZlHhhZ0DSM,5031
1
+ signalwire_agents/__init__.py,sha256=VjQ0bhIJ_R2wvxwnWiIyZzzGP7hVFpdeOoAgEuhZ_gg,5031
2
2
  signalwire_agents/agent_server.py,sha256=x9HyWia8D3r6KMqY-Q4DtNVivfJWLTx8B-KzUI8okuA,26880
3
3
  signalwire_agents/schema.json,sha256=6-7ccbt39iM1CO36dOfvupRPfd0gnQ0XoAdyo-EFyjo,238042
4
4
  signalwire_agents/agents/bedrock.py,sha256=J582gooNtxtep4xdVOfyDzRtHp_XrurPMS93xf2Xod0,10836
5
5
  signalwire_agents/cli/__init__.py,sha256=XbxAQFaCIdGXIXJiriVBWoFPOJsC401u21588nO4TG8,388
6
- signalwire_agents/cli/build_search.py,sha256=Mqs9gSh43-0vv_4qrFEADt2b4dJqlGTfIFORK3ZlQPk,31895
6
+ signalwire_agents/cli/build_search.py,sha256=UXKyW27Xr-gGO7ooztMLb3YD_tEdJO6ptDSIUGgwZ0w,32035
7
7
  signalwire_agents/cli/config.py,sha256=2i4e0BArdKsaXxjeueYYRNke7GWicHPYC2wuitVrP7A,2541
8
8
  signalwire_agents/cli/swaig_test_wrapper.py,sha256=t63HQpEc1Up5AcysEHP1OsEQcgSMKH-9H1L2IhFso18,1533
9
9
  signalwire_agents/cli/test_swaig.py,sha256=-v-XjTUWZNxmMJuOF5_cB1Jz8x8emJoqgqS_8jLeT4Y,31487
@@ -24,7 +24,7 @@ signalwire_agents/cli/simulation/data_generation.py,sha256=pxa9aJ6XkI0O8yAIGvBTU
24
24
  signalwire_agents/cli/simulation/data_overrides.py,sha256=3_3pT6j-q2gRufPX2bZ1BrmY7u1IdloLooKAJil33vI,6319
25
25
  signalwire_agents/cli/simulation/mock_env.py,sha256=fvaR_xdLMm8AbpNUbTJOFG9THcti3Zds-0QNDbKMaYk,10249
26
26
  signalwire_agents/core/__init__.py,sha256=xjPq8DmUnWYUG28sd17n430VWPmMH9oZ9W14gYwG96g,806
27
- signalwire_agents/core/agent_base.py,sha256=vAx9sQ985juMi0lnfP7bw1fl_1iYRzXVIpPyE-xEFBE,50139
27
+ signalwire_agents/core/agent_base.py,sha256=Mq-2rqE_ntsxNMgY0FqBvpSWoT-RqMu_OtI8WQCmNhQ,50012
28
28
  signalwire_agents/core/auth_handler.py,sha256=jXrof9WZ1W9qqlQT9WElcmSRafL2kG7207x5SqWN9MU,8481
29
29
  signalwire_agents/core/config_loader.py,sha256=rStVRRUaeMGrMc44ocr0diMQQARZhbKqwMqQ6kqUNos,8722
30
30
  signalwire_agents/core/contexts.py,sha256=g9FgOGMfGCUWlm57YZcv7CvOf-Ub9FdKZIOMu14ADfE,24428
@@ -35,7 +35,7 @@ signalwire_agents/core/pom_builder.py,sha256=ywuiIfP8BeLBPo_G4X1teZlG6zTCMkW71CZ
35
35
  signalwire_agents/core/security_config.py,sha256=iAnAzKEJQiXL6mMpDaYm3Sjkxwm4x2N9HD6DeWSI8yI,12536
36
36
  signalwire_agents/core/skill_base.py,sha256=1b_4ht_T1BVnfzHYqoILb3idrrPYMs5-G-adHo2IVss,6903
37
37
  signalwire_agents/core/skill_manager.py,sha256=D4erpz0tmSYLqyfeteNNIY0VRWDtX0rDw3n7Z_f0W5U,10493
38
- signalwire_agents/core/swaig_function.py,sha256=Zf1RQOadBgV4oxXJY7n4IfueYu0dKinfjB5RkBoHbrI,7534
38
+ signalwire_agents/core/swaig_function.py,sha256=KnUQ2g99kDSzOzD1PJ0Iqs8DeeZ6jDIIN54C5MA4TWw,7521
39
39
  signalwire_agents/core/swml_builder.py,sha256=tJBFDAVTENEfjGLp2h9_AKOYt5O9FrSYLI-nZZVwM1E,15604
40
40
  signalwire_agents/core/swml_handler.py,sha256=hFDq41dQWL3EdFbq6h0hizE1dIqdVeiTeCrujbZsPzo,8397
41
41
  signalwire_agents/core/swml_renderer.py,sha256=-WAB_5ss836a8nBo5zlb6SaQKFNF4XIo1odWIXM4eE8,6860
@@ -53,7 +53,7 @@ signalwire_agents/core/agent/tools/__init__.py,sha256=eOcmyeGm6qogT3wsBx7QvdjmTb
53
53
  signalwire_agents/core/agent/tools/decorator.py,sha256=pC6j1114GwVBd2U3h23I9gKLtu8AgeiuWV0lUzz682U,2961
54
54
  signalwire_agents/core/agent/tools/registry.py,sha256=HScbKKwpJqFZ_odmeFklSQ0p0EMasEyKSxNwX568OPo,8054
55
55
  signalwire_agents/core/mixins/__init__.py,sha256=NsFpfF7TDP_lNR0Riw4Nbvt4fDbv_A3OoVbBqRrtXQM,652
56
- signalwire_agents/core/mixins/ai_config_mixin.py,sha256=kT-xVVWMIE6RQe6qQFCRYxli345bxOs5uS1dCtMTeTc,18232
56
+ signalwire_agents/core/mixins/ai_config_mixin.py,sha256=_m2cVgauRegDVPFW3bVmpskr40UrEcOZto4IXGDkUX4,15897
57
57
  signalwire_agents/core/mixins/auth_mixin.py,sha256=Y9kR423-76U_pKL7KXzseeXX2a-4WxNWyo3odS7TDQM,9879
58
58
  signalwire_agents/core/mixins/prompt_mixin.py,sha256=bEsuw9J2F_upFYI02KyC7o2eGZjwOKQ352rmJBZirAM,13729
59
59
  signalwire_agents/core/mixins/serverless_mixin.py,sha256=QIIbl_-16XFJi5aqrWpNzORbyCJQmhaplWXnW6U9i68,16137
@@ -70,8 +70,8 @@ signalwire_agents/prefabs/info_gatherer.py,sha256=0LpYTaU7C76Efp3yUIdNX6xzWH7mj5
70
70
  signalwire_agents/prefabs/receptionist.py,sha256=em0uk_F0tmePvzE6Hi9HFlL3MHChH0RaHHqSvww9pK0,10323
71
71
  signalwire_agents/prefabs/survey.py,sha256=a-0-xAnQYhdX4Lzgyna14lpNfaCV-rLUFkQF6QOCQAY,14534
72
72
  signalwire_agents/search/__init__.py,sha256=x7saU_MDbhoOIzcvCT1-gnqyH2rrMpzB4ZUqk-av-lI,3958
73
- signalwire_agents/search/document_processor.py,sha256=J4OG640qbqGslbVevvD4J2cbTmFCZiGJ1bLX2yDayaE,43699
74
- signalwire_agents/search/index_builder.py,sha256=EqvX-yjcSYAsNFaFnkzQewUVISl1v452OEgfuwwQsZ4,29268
73
+ signalwire_agents/search/document_processor.py,sha256=Q6EDRu9GKJkWLeJREvppAuxuZ5wSiZXoolp7CgKK2f4,47941
74
+ signalwire_agents/search/index_builder.py,sha256=bjctP8SVG8QMIJyiqRnjZEi_OKohx2jyuk2vneERslk,29276
75
75
  signalwire_agents/search/pgvector_backend.py,sha256=7OerJvzCGQigbb_RnV2M5PEOHR2EUMBn4n2bHML08I0,19172
76
76
  signalwire_agents/search/query_processor.py,sha256=WMm_jjArQ6-Jpy0Cc0sUI4saidOtDRKx_XLv0qi3N3k,16739
77
77
  signalwire_agents/search/search_engine.py,sha256=rGRTs8qRX4biXhsOg7jnt6YvoetoN_KG3ByKwtX7h6o,16635
@@ -114,7 +114,7 @@ signalwire_agents/skills/swml_transfer/__init__.py,sha256=YyfxRpbgT4ZpEjGolwffKq
114
114
  signalwire_agents/skills/swml_transfer/skill.py,sha256=_qzJRd9P5VN8flTDe9N-9cvsLU0sN7XuY5yjk-DNlv8,15363
115
115
  signalwire_agents/skills/weather_api/README.md,sha256=buzCtrhxXAxZ8k7Qt_lR62E_tqnpXmXRsTarb_F43zg,6439
116
116
  signalwire_agents/skills/weather_api/__init__.py,sha256=WCS--GFBX8straIZPuGAmTDZ7t-y7VI6ioB1Kf8eeP4,257
117
- signalwire_agents/skills/weather_api/skill.py,sha256=KftYleCBKuqa72dcGZcCieRshI3BTdCrClenKVyM0ac,7196
117
+ signalwire_agents/skills/weather_api/skill.py,sha256=LNJItYzgRSZYNYcH7Z37BOjjPy3aaM0OjMRnAxiUhOI,7204
118
118
  signalwire_agents/skills/web_search/README.md,sha256=Y95cxEScMzhmslUJF8u_Nh15FbEBuus4P-E8_kk2an0,5438
119
119
  signalwire_agents/skills/web_search/__init__.py,sha256=kv4CzmF1lldRZcL_HivieslP7gtTFvxcfprKG4n6b-Q,236
120
120
  signalwire_agents/skills/web_search/skill.py,sha256=EGu6ff9aAb2W323_XDCcVDW1wbAKTZYK8HQOT__iqtE,12660
@@ -128,9 +128,9 @@ signalwire_agents/utils/token_generators.py,sha256=4Mr7baQ_xR_hfJ72YxQRAT_GFa663
128
128
  signalwire_agents/utils/validators.py,sha256=4Mr7baQ_xR_hfJ72YxQRAT_GFa663YjFX_PumJ35Xds,191
129
129
  signalwire_agents/web/__init__.py,sha256=XE_pSTY9Aalzr7J7wqFth1Zr3cccQHPPcF5HWNrOpz8,383
130
130
  signalwire_agents/web/web_service.py,sha256=a2PSHJgX1tlZr0Iz1A1UouZjXEePJAZL632evvLVM38,21071
131
- signalwire_agents-0.1.46.dist-info/licenses/LICENSE,sha256=NYvAsB-rTcSvG9cqHt9EUHAWLiA9YzM4Qfz-mPdvDR0,1067
132
- signalwire_agents-0.1.46.dist-info/METADATA,sha256=8Ek6hbt3XtY4I_7nZj4LKk8qJL0KZogp6tOzmd-ioro,41281
133
- signalwire_agents-0.1.46.dist-info/WHEEL,sha256=_zCd3N1l69ArxyTb8rzEoP9TpbYXkqRFSNOD5OuxnTs,91
134
- signalwire_agents-0.1.46.dist-info/entry_points.txt,sha256=ZDT65zfTO_YyDzi_hwQbCxIhrUfu_t8RpNXMMXlUPWI,144
135
- signalwire_agents-0.1.46.dist-info/top_level.txt,sha256=kDGS6ZYv84K9P5Kyg9_S8P_pbUXoHkso0On_DB5bbWc,18
136
- signalwire_agents-0.1.46.dist-info/RECORD,,
131
+ signalwire_agents-0.1.47.dist-info/licenses/LICENSE,sha256=NYvAsB-rTcSvG9cqHt9EUHAWLiA9YzM4Qfz-mPdvDR0,1067
132
+ signalwire_agents-0.1.47.dist-info/METADATA,sha256=-QAbmvCBQGH41IiHCUu8OKpWp3prVNguEf0xeITLPwY,41596
133
+ signalwire_agents-0.1.47.dist-info/WHEEL,sha256=_zCd3N1l69ArxyTb8rzEoP9TpbYXkqRFSNOD5OuxnTs,91
134
+ signalwire_agents-0.1.47.dist-info/entry_points.txt,sha256=ZDT65zfTO_YyDzi_hwQbCxIhrUfu_t8RpNXMMXlUPWI,144
135
+ signalwire_agents-0.1.47.dist-info/top_level.txt,sha256=kDGS6ZYv84K9P5Kyg9_S8P_pbUXoHkso0On_DB5bbWc,18
136
+ signalwire_agents-0.1.47.dist-info/RECORD,,