signalwire-agents 0.1.11__py3-none-any.whl → 0.1.12__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (24) hide show
  1. signalwire_agents/__init__.py +5 -1
  2. signalwire_agents/agent_server.py +222 -13
  3. signalwire_agents/cli/build_search.py +457 -0
  4. signalwire_agents/cli/test_swaig.py +177 -113
  5. signalwire_agents/core/agent_base.py +1 -1
  6. signalwire_agents/core/logging_config.py +232 -0
  7. signalwire_agents/search/__init__.py +131 -0
  8. signalwire_agents/search/document_processor.py +764 -0
  9. signalwire_agents/search/index_builder.py +534 -0
  10. signalwire_agents/search/query_processor.py +371 -0
  11. signalwire_agents/search/search_engine.py +383 -0
  12. signalwire_agents/search/search_service.py +251 -0
  13. signalwire_agents/skills/native_vector_search/__init__.py +1 -0
  14. signalwire_agents/skills/native_vector_search/skill.py +352 -0
  15. signalwire_agents/skills/registry.py +2 -15
  16. signalwire_agents/utils/__init__.py +13 -1
  17. {signalwire_agents-0.1.11.dist-info → signalwire_agents-0.1.12.dist-info}/METADATA +110 -3
  18. {signalwire_agents-0.1.11.dist-info → signalwire_agents-0.1.12.dist-info}/RECORD +23 -14
  19. {signalwire_agents-0.1.11.dist-info → signalwire_agents-0.1.12.dist-info}/entry_points.txt +1 -0
  20. signalwire_agents/utils/serverless.py +0 -38
  21. {signalwire_agents-0.1.11.data → signalwire_agents-0.1.12.data}/data/schema.json +0 -0
  22. {signalwire_agents-0.1.11.dist-info → signalwire_agents-0.1.12.dist-info}/WHEEL +0 -0
  23. {signalwire_agents-0.1.11.dist-info → signalwire_agents-0.1.12.dist-info}/licenses/LICENSE +0 -0
  24. {signalwire_agents-0.1.11.dist-info → signalwire_agents-0.1.12.dist-info}/top_level.txt +0 -0
@@ -0,0 +1,383 @@
1
+ """
2
+ Copyright (c) 2025 SignalWire
3
+
4
+ This file is part of the SignalWire AI Agents SDK.
5
+
6
+ Licensed under the MIT License.
7
+ See LICENSE file in the project root for full license information.
8
+ """
9
+
10
+ import sqlite3
11
+ import json
12
+ import logging
13
+ from typing import List, Dict, Any, Optional, Union
14
+
15
+ try:
16
+ import numpy as np
17
+ from sklearn.metrics.pairwise import cosine_similarity
18
+ NDArray = np.ndarray
19
+ except ImportError:
20
+ np = None
21
+ cosine_similarity = None
22
+ NDArray = Any # Fallback type for when numpy is not available
23
+
24
+ logger = logging.getLogger(__name__)
25
+
26
+ class SearchEngine:
27
+ """Hybrid search engine for vector and keyword search"""
28
+
29
+ def __init__(self, index_path: str, model=None):
30
+ self.index_path = index_path
31
+ self.model = model
32
+ self.config = self._load_config()
33
+ self.embedding_dim = int(self.config.get('embedding_dimensions', 768))
34
+
35
+ def _load_config(self) -> Dict[str, str]:
36
+ """Load index configuration"""
37
+ try:
38
+ conn = sqlite3.connect(self.index_path)
39
+ cursor = conn.cursor()
40
+ cursor.execute("SELECT key, value FROM config")
41
+ config = dict(cursor.fetchall())
42
+ conn.close()
43
+ return config
44
+ except Exception as e:
45
+ logger.error(f"Error loading config from {self.index_path}: {e}")
46
+ return {}
47
+
48
+ def search(self, query_vector: List[float], enhanced_text: str,
49
+ count: int = 3, distance_threshold: float = 0.0,
50
+ tags: Optional[List[str]] = None) -> List[Dict[str, Any]]:
51
+ """
52
+ Perform hybrid search (vector + keyword)
53
+
54
+ Args:
55
+ query_vector: Embedding vector for the query
56
+ enhanced_text: Processed query text for keyword search
57
+ count: Number of results to return
58
+ distance_threshold: Minimum similarity score
59
+ tags: Filter by tags
60
+
61
+ Returns:
62
+ List of search results with scores and metadata
63
+ """
64
+
65
+ if not np or not cosine_similarity:
66
+ logger.warning("NumPy or scikit-learn not available. Using keyword search only.")
67
+ return self._keyword_search_only(enhanced_text, count, tags)
68
+
69
+ # Convert query vector to numpy array
70
+ try:
71
+ query_array = np.array(query_vector).reshape(1, -1)
72
+ except Exception as e:
73
+ logger.error(f"Error converting query vector: {e}")
74
+ return self._keyword_search_only(enhanced_text, count, tags)
75
+
76
+ # Vector search
77
+ vector_results = self._vector_search(query_array, count * 2)
78
+
79
+ # Keyword search
80
+ keyword_results = self._keyword_search(enhanced_text, count * 2)
81
+
82
+ # Merge and rank results
83
+ merged_results = self._merge_results(vector_results, keyword_results)
84
+
85
+ # Filter by tags if specified
86
+ if tags:
87
+ merged_results = self._filter_by_tags(merged_results, tags)
88
+
89
+ # Filter by distance threshold
90
+ filtered_results = [
91
+ r for r in merged_results
92
+ if r['score'] >= distance_threshold
93
+ ]
94
+
95
+ return filtered_results[:count]
96
+
97
+ def _keyword_search_only(self, enhanced_text: str, count: int,
98
+ tags: Optional[List[str]] = None) -> List[Dict[str, Any]]:
99
+ """Fallback to keyword search only when vector search is unavailable"""
100
+ keyword_results = self._keyword_search(enhanced_text, count)
101
+
102
+ if tags:
103
+ keyword_results = self._filter_by_tags(keyword_results, tags)
104
+
105
+ return keyword_results[:count]
106
+
107
+ def _vector_search(self, query_vector: Union[NDArray, Any], count: int) -> List[Dict[str, Any]]:
108
+ """Perform vector similarity search"""
109
+ if not np or not cosine_similarity:
110
+ return []
111
+
112
+ try:
113
+ conn = sqlite3.connect(self.index_path)
114
+ cursor = conn.cursor()
115
+
116
+ # Get all embeddings (for small datasets, this is fine)
117
+ # For large datasets, we'd use FAISS or similar
118
+ cursor.execute('''
119
+ SELECT id, content, embedding, filename, section, tags, metadata
120
+ FROM chunks
121
+ WHERE embedding IS NOT NULL AND embedding != ''
122
+ ''')
123
+
124
+ results = []
125
+ for row in cursor.fetchall():
126
+ chunk_id, content, embedding_blob, filename, section, tags_json, metadata_json = row
127
+
128
+ if not embedding_blob:
129
+ continue
130
+
131
+ try:
132
+ # Convert embedding back to numpy array
133
+ embedding = np.frombuffer(embedding_blob, dtype=np.float32).reshape(1, -1)
134
+
135
+ # Calculate similarity
136
+ similarity = cosine_similarity(query_vector, embedding)[0][0]
137
+
138
+ results.append({
139
+ 'id': chunk_id,
140
+ 'content': content,
141
+ 'score': float(similarity),
142
+ 'metadata': {
143
+ 'filename': filename,
144
+ 'section': section,
145
+ 'tags': json.loads(tags_json) if tags_json else [],
146
+ 'metadata': json.loads(metadata_json) if metadata_json else {}
147
+ },
148
+ 'search_type': 'vector'
149
+ })
150
+ except Exception as e:
151
+ logger.warning(f"Error processing embedding for chunk {chunk_id}: {e}")
152
+ continue
153
+
154
+ conn.close()
155
+
156
+ # Sort by similarity score
157
+ results.sort(key=lambda x: x['score'], reverse=True)
158
+ return results[:count]
159
+
160
+ except Exception as e:
161
+ logger.error(f"Error in vector search: {e}")
162
+ return []
163
+
164
+ def _keyword_search(self, enhanced_text: str, count: int) -> List[Dict[str, Any]]:
165
+ """Perform full-text search"""
166
+ try:
167
+ conn = sqlite3.connect(self.index_path)
168
+ cursor = conn.cursor()
169
+
170
+ # Escape FTS5 special characters
171
+ escaped_text = self._escape_fts_query(enhanced_text)
172
+
173
+ # FTS5 search
174
+ cursor.execute('''
175
+ SELECT c.id, c.content, c.filename, c.section, c.tags, c.metadata,
176
+ chunks_fts.rank
177
+ FROM chunks_fts
178
+ JOIN chunks c ON chunks_fts.rowid = c.id
179
+ WHERE chunks_fts MATCH ?
180
+ ORDER BY chunks_fts.rank
181
+ LIMIT ?
182
+ ''', (escaped_text, count))
183
+
184
+ results = []
185
+ for row in cursor.fetchall():
186
+ chunk_id, content, filename, section, tags_json, metadata_json, rank = row
187
+
188
+ # Convert FTS rank to similarity score (higher rank = lower score)
189
+ # FTS5 rank is negative, so we convert it to a positive similarity score
190
+ score = 1.0 / (1.0 + abs(rank))
191
+
192
+ results.append({
193
+ 'id': chunk_id,
194
+ 'content': content,
195
+ 'score': float(score),
196
+ 'metadata': {
197
+ 'filename': filename,
198
+ 'section': section,
199
+ 'tags': json.loads(tags_json) if tags_json else [],
200
+ 'metadata': json.loads(metadata_json) if metadata_json else {}
201
+ },
202
+ 'search_type': 'keyword'
203
+ })
204
+
205
+ conn.close()
206
+ return results
207
+
208
+ except Exception as e:
209
+ logger.error(f"Error in keyword search: {e}")
210
+ # Fallback to simple LIKE search
211
+ return self._fallback_search(enhanced_text, count)
212
+
213
+ def _escape_fts_query(self, query: str) -> str:
214
+ """Escape special characters for FTS5 queries"""
215
+ # FTS5 special characters that need escaping
216
+ special_chars = ['"', "'", '(', ')', '*', '-', '+', ':', '^']
217
+
218
+ escaped = query
219
+ for char in special_chars:
220
+ escaped = escaped.replace(char, f'\\{char}')
221
+
222
+ return escaped
223
+
224
+ def _fallback_search(self, enhanced_text: str, count: int) -> List[Dict[str, Any]]:
225
+ """Fallback search using LIKE when FTS fails"""
226
+ try:
227
+ conn = sqlite3.connect(self.index_path)
228
+ cursor = conn.cursor()
229
+
230
+ # Simple LIKE search
231
+ search_terms = enhanced_text.lower().split()
232
+ like_conditions = []
233
+ params = []
234
+
235
+ for term in search_terms[:5]: # Limit to 5 terms to avoid too complex queries
236
+ like_conditions.append("LOWER(processed_content) LIKE ?")
237
+ params.append(f"%{term}%")
238
+
239
+ if not like_conditions:
240
+ return []
241
+
242
+ query = f'''
243
+ SELECT id, content, filename, section, tags, metadata
244
+ FROM chunks
245
+ WHERE {" OR ".join(like_conditions)}
246
+ LIMIT ?
247
+ '''
248
+ params.append(count)
249
+
250
+ cursor.execute(query, params)
251
+
252
+ results = []
253
+ for row in cursor.fetchall():
254
+ chunk_id, content, filename, section, tags_json, metadata_json = row
255
+
256
+ # Simple scoring based on term matches
257
+ content_lower = content.lower()
258
+ score = sum(1 for term in search_terms if term.lower() in content_lower) / len(search_terms)
259
+
260
+ results.append({
261
+ 'id': chunk_id,
262
+ 'content': content,
263
+ 'score': float(score),
264
+ 'metadata': {
265
+ 'filename': filename,
266
+ 'section': section,
267
+ 'tags': json.loads(tags_json) if tags_json else [],
268
+ 'metadata': json.loads(metadata_json) if metadata_json else {}
269
+ },
270
+ 'search_type': 'fallback'
271
+ })
272
+
273
+ conn.close()
274
+
275
+ # Sort by score
276
+ results.sort(key=lambda x: x['score'], reverse=True)
277
+ return results
278
+
279
+ except Exception as e:
280
+ logger.error(f"Error in fallback search: {e}")
281
+ return []
282
+
283
+ def _merge_results(self, vector_results: List[Dict], keyword_results: List[Dict]) -> List[Dict[str, Any]]:
284
+ """Merge and rank vector and keyword search results"""
285
+ # Create a combined list with weighted scores
286
+ combined = {}
287
+
288
+ # Add vector results with weight
289
+ for result in vector_results:
290
+ chunk_id = result['id']
291
+ combined[chunk_id] = result.copy()
292
+ combined[chunk_id]['vector_score'] = result['score']
293
+ combined[chunk_id]['keyword_score'] = 0.0
294
+
295
+ # Add keyword results with weight
296
+ for result in keyword_results:
297
+ chunk_id = result['id']
298
+ if chunk_id in combined:
299
+ combined[chunk_id]['keyword_score'] = result['score']
300
+ else:
301
+ combined[chunk_id] = result.copy()
302
+ combined[chunk_id]['vector_score'] = 0.0
303
+ combined[chunk_id]['keyword_score'] = result['score']
304
+
305
+ # Calculate combined score (weighted average)
306
+ vector_weight = 0.7
307
+ keyword_weight = 0.3
308
+
309
+ for chunk_id, result in combined.items():
310
+ vector_score = result.get('vector_score', 0.0)
311
+ keyword_score = result.get('keyword_score', 0.0)
312
+ result['score'] = (vector_score * vector_weight + keyword_score * keyword_weight)
313
+
314
+ # Add debug info
315
+ result['metadata']['search_scores'] = {
316
+ 'vector': vector_score,
317
+ 'keyword': keyword_score,
318
+ 'combined': result['score']
319
+ }
320
+
321
+ # Sort by combined score
322
+ sorted_results = sorted(combined.values(), key=lambda x: x['score'], reverse=True)
323
+ return sorted_results
324
+
325
+ def _filter_by_tags(self, results: List[Dict], required_tags: List[str]) -> List[Dict[str, Any]]:
326
+ """Filter results by required tags"""
327
+ filtered = []
328
+ for result in results:
329
+ result_tags = result['metadata'].get('tags', [])
330
+ if any(tag in result_tags for tag in required_tags):
331
+ filtered.append(result)
332
+ return filtered
333
+
334
+ def get_stats(self) -> Dict[str, Any]:
335
+ """Get statistics about the search index"""
336
+ conn = sqlite3.connect(self.index_path)
337
+ cursor = conn.cursor()
338
+
339
+ try:
340
+ # Get total chunks
341
+ cursor.execute("SELECT COUNT(*) FROM chunks")
342
+ total_chunks = cursor.fetchone()[0]
343
+
344
+ # Get total files
345
+ cursor.execute("SELECT COUNT(DISTINCT filename) FROM chunks")
346
+ total_files = cursor.fetchone()[0]
347
+
348
+ # Get average chunk size
349
+ cursor.execute("SELECT AVG(LENGTH(content)) FROM chunks")
350
+ avg_chunk_size = cursor.fetchone()[0] or 0
351
+
352
+ # Get file types
353
+ cursor.execute("""
354
+ SELECT
355
+ CASE
356
+ WHEN filename LIKE '%.md' THEN 'markdown'
357
+ WHEN filename LIKE '%.py' THEN 'python'
358
+ WHEN filename LIKE '%.txt' THEN 'text'
359
+ WHEN filename LIKE '%.pdf' THEN 'pdf'
360
+ WHEN filename LIKE '%.docx' THEN 'docx'
361
+ ELSE 'other'
362
+ END as file_type,
363
+ COUNT(DISTINCT filename) as count
364
+ FROM chunks
365
+ GROUP BY file_type
366
+ """)
367
+ file_types = dict(cursor.fetchall())
368
+
369
+ # Get languages
370
+ cursor.execute("SELECT language, COUNT(*) FROM chunks GROUP BY language")
371
+ languages = dict(cursor.fetchall())
372
+
373
+ return {
374
+ 'total_chunks': total_chunks,
375
+ 'total_files': total_files,
376
+ 'avg_chunk_size': int(avg_chunk_size),
377
+ 'file_types': file_types,
378
+ 'languages': languages,
379
+ 'config': self.config
380
+ }
381
+
382
+ finally:
383
+ conn.close()
@@ -0,0 +1,251 @@
1
+ """
2
+ Copyright (c) 2025 SignalWire
3
+
4
+ This file is part of the SignalWire AI Agents SDK.
5
+
6
+ Licensed under the MIT License.
7
+ See LICENSE file in the project root for full license information.
8
+ """
9
+
10
+ import logging
11
+ from typing import Dict, Any, List, Optional
12
+
13
+ try:
14
+ from fastapi import FastAPI, HTTPException
15
+ from pydantic import BaseModel
16
+ except ImportError:
17
+ FastAPI = None
18
+ HTTPException = None
19
+ BaseModel = None
20
+
21
+ try:
22
+ from sentence_transformers import SentenceTransformer
23
+ except ImportError:
24
+ SentenceTransformer = None
25
+
26
+ from .query_processor import preprocess_query
27
+ from .search_engine import SearchEngine
28
+
29
+ logger = logging.getLogger(__name__)
30
+
31
+ # Pydantic models for API
32
+ if BaseModel:
33
+ class SearchRequest(BaseModel):
34
+ query: str
35
+ index_name: str = "default"
36
+ count: int = 3
37
+ distance: float = 0.0
38
+ tags: Optional[List[str]] = None
39
+ language: Optional[str] = None
40
+
41
+ class SearchResult(BaseModel):
42
+ content: str
43
+ score: float
44
+ metadata: Dict[str, Any]
45
+
46
+ class SearchResponse(BaseModel):
47
+ results: List[SearchResult]
48
+ query_analysis: Optional[Dict[str, Any]] = None
49
+ else:
50
+ # Fallback classes when FastAPI is not available
51
+ class SearchRequest:
52
+ def __init__(self, query: str, index_name: str = "default", count: int = 3,
53
+ distance: float = 0.0, tags: Optional[List[str]] = None,
54
+ language: Optional[str] = None):
55
+ self.query = query
56
+ self.index_name = index_name
57
+ self.count = count
58
+ self.distance = distance
59
+ self.tags = tags
60
+ self.language = language
61
+
62
+ class SearchResult:
63
+ def __init__(self, content: str, score: float, metadata: Dict[str, Any]):
64
+ self.content = content
65
+ self.score = score
66
+ self.metadata = metadata
67
+
68
+ class SearchResponse:
69
+ def __init__(self, results: List[SearchResult], query_analysis: Optional[Dict[str, Any]] = None):
70
+ self.results = results
71
+ self.query_analysis = query_analysis
72
+
73
+ class SearchService:
74
+ """Local search service with HTTP API"""
75
+
76
+ def __init__(self, port: int = 8001, indexes: Dict[str, str] = None):
77
+ self.port = port
78
+ self.indexes = indexes or {}
79
+ self.search_engines = {}
80
+ self.model = None
81
+
82
+ if FastAPI:
83
+ self.app = FastAPI(title="SignalWire Local Search Service")
84
+ self._setup_routes()
85
+ else:
86
+ self.app = None
87
+ logger.warning("FastAPI not available. HTTP service will not be available.")
88
+
89
+ self._load_resources()
90
+
91
+ def _setup_routes(self):
92
+ """Setup FastAPI routes"""
93
+ if not self.app:
94
+ return
95
+
96
+ @self.app.post("/search", response_model=SearchResponse)
97
+ async def search(request: SearchRequest):
98
+ return await self._handle_search(request)
99
+
100
+ @self.app.get("/health")
101
+ async def health():
102
+ return {"status": "healthy", "indexes": list(self.indexes.keys())}
103
+
104
+ @self.app.post("/reload_index")
105
+ async def reload_index(index_name: str, index_path: str):
106
+ """Reload or add new index"""
107
+ self.indexes[index_name] = index_path
108
+ self.search_engines[index_name] = SearchEngine(index_path, self.model)
109
+ return {"status": "reloaded", "index": index_name}
110
+
111
+ def _load_resources(self):
112
+ """Load embedding model and search indexes"""
113
+ # Load model (shared across all indexes)
114
+ if self.indexes and SentenceTransformer:
115
+ # Get model name from first index
116
+ sample_index = next(iter(self.indexes.values()))
117
+ model_name = self._get_model_name(sample_index)
118
+ try:
119
+ self.model = SentenceTransformer(model_name)
120
+ except Exception as e:
121
+ logger.warning(f"Could not load sentence transformer model: {e}")
122
+ self.model = None
123
+
124
+ # Load search engines for each index
125
+ for index_name, index_path in self.indexes.items():
126
+ try:
127
+ self.search_engines[index_name] = SearchEngine(index_path, self.model)
128
+ except Exception as e:
129
+ logger.error(f"Error loading search engine for {index_name}: {e}")
130
+
131
+ def _get_model_name(self, index_path: str) -> str:
132
+ """Get embedding model name from index config"""
133
+ try:
134
+ import sqlite3
135
+ conn = sqlite3.connect(index_path)
136
+ cursor = conn.cursor()
137
+ cursor.execute("SELECT value FROM config WHERE key = 'embedding_model'")
138
+ result = cursor.fetchone()
139
+ conn.close()
140
+ return result[0] if result else 'sentence-transformers/all-mpnet-base-v2'
141
+ except Exception as e:
142
+ logger.warning(f"Could not get model name from index: {e}")
143
+ return 'sentence-transformers/all-mpnet-base-v2'
144
+
145
+ async def _handle_search(self, request: SearchRequest) -> SearchResponse:
146
+ """Handle search request"""
147
+ if request.index_name not in self.search_engines:
148
+ if HTTPException:
149
+ raise HTTPException(status_code=404, detail=f"Index '{request.index_name}' not found")
150
+ else:
151
+ raise ValueError(f"Index '{request.index_name}' not found")
152
+
153
+ search_engine = self.search_engines[request.index_name]
154
+
155
+ # Enhance query
156
+ try:
157
+ enhanced = preprocess_query(
158
+ request.query,
159
+ language=request.language or 'auto',
160
+ vector=True
161
+ )
162
+ except Exception as e:
163
+ logger.error(f"Error preprocessing query: {e}")
164
+ enhanced = {
165
+ 'enhanced_text': request.query,
166
+ 'vector': [],
167
+ 'language': 'en'
168
+ }
169
+
170
+ # Perform search
171
+ try:
172
+ results = search_engine.search(
173
+ query_vector=enhanced.get('vector', []),
174
+ enhanced_text=enhanced['enhanced_text'],
175
+ count=request.count,
176
+ distance_threshold=request.distance,
177
+ tags=request.tags
178
+ )
179
+ except Exception as e:
180
+ logger.error(f"Error performing search: {e}")
181
+ results = []
182
+
183
+ # Format response
184
+ search_results = [
185
+ SearchResult(
186
+ content=result['content'],
187
+ score=result['score'],
188
+ metadata=result['metadata']
189
+ )
190
+ for result in results
191
+ ]
192
+
193
+ return SearchResponse(
194
+ results=search_results,
195
+ query_analysis={
196
+ 'original_query': request.query,
197
+ 'enhanced_query': enhanced['enhanced_text'],
198
+ 'detected_language': enhanced.get('language'),
199
+ 'pos_analysis': enhanced.get('POS')
200
+ }
201
+ )
202
+
203
+ def search_direct(self, query: str, index_name: str = "default", count: int = 3,
204
+ distance: float = 0.0, tags: Optional[List[str]] = None,
205
+ language: Optional[str] = None) -> Dict[str, Any]:
206
+ """Direct search method (non-async) for programmatic use"""
207
+ request = SearchRequest(
208
+ query=query,
209
+ index_name=index_name,
210
+ count=count,
211
+ distance=distance,
212
+ tags=tags,
213
+ language=language
214
+ )
215
+
216
+ # Use asyncio to run the async method
217
+ import asyncio
218
+ try:
219
+ loop = asyncio.get_event_loop()
220
+ except RuntimeError:
221
+ loop = asyncio.new_event_loop()
222
+ asyncio.set_event_loop(loop)
223
+
224
+ response = loop.run_until_complete(self._handle_search(request))
225
+
226
+ return {
227
+ 'results': [
228
+ {
229
+ 'content': r.content,
230
+ 'score': r.score,
231
+ 'metadata': r.metadata
232
+ }
233
+ for r in response.results
234
+ ],
235
+ 'query_analysis': response.query_analysis
236
+ }
237
+
238
+ def start(self):
239
+ """Start the service"""
240
+ if not self.app:
241
+ raise RuntimeError("FastAPI not available. Cannot start HTTP service.")
242
+
243
+ try:
244
+ import uvicorn
245
+ uvicorn.run(self.app, host="0.0.0.0", port=self.port)
246
+ except ImportError:
247
+ raise RuntimeError("uvicorn not available. Cannot start HTTP service.")
248
+
249
+ def stop(self):
250
+ """Stop the service (placeholder for cleanup)"""
251
+ pass
@@ -0,0 +1 @@
1
+ # Native Vector Search Skill