vectra-client 0.3.3 → 0.4.0
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- checksums.yaml +4 -4
- data/.rubocop.yml +10 -0
- data/CHANGELOG.md +26 -8
- data/README.md +35 -2
- data/docs/_layouts/default.html +1 -0
- data/docs/_layouts/home.html +44 -3
- data/docs/_layouts/page.html +42 -9
- data/docs/assets/style.css +226 -1
- data/docs/examples/index.md +9 -0
- data/docs/examples/real-world.md +576 -0
- data/docs/grafana_final.png +0 -0
- data/docs/guides/getting-started.md +70 -2
- data/docs/guides/monitoring.md +50 -0
- data/docs/providers/index.md +12 -0
- data/docs/providers/memory.md +145 -0
- data/docs/providers/weaviate.md +84 -25
- data/examples/GRAFANA_QUICKSTART.md +158 -0
- data/examples/README.md +332 -0
- data/examples/comprehensive_demo.rb +1116 -0
- data/examples/grafana-dashboard.json +878 -0
- data/examples/grafana-setup.md +340 -0
- data/examples/prometheus-exporter.rb +229 -0
- data/lib/vectra/batch.rb +63 -8
- data/lib/vectra/client.rb +188 -1
- data/lib/vectra/configuration.rb +4 -2
- data/lib/vectra/credential_rotation.rb +2 -3
- data/lib/vectra/providers/base.rb +19 -1
- data/lib/vectra/providers/memory.rb +298 -0
- data/lib/vectra/providers/qdrant.rb +31 -0
- data/lib/vectra/providers/weaviate.rb +454 -10
- data/lib/vectra/vector.rb +56 -0
- data/lib/vectra/version.rb +1 -1
- data/lib/vectra.rb +20 -0
- data/vectra.gemspec +56 -0
- metadata +12 -1
|
@@ -0,0 +1,576 @@
|
|
|
1
|
+
---
|
|
2
|
+
layout: page
|
|
3
|
+
title: Real-World Examples
|
|
4
|
+
permalink: /examples/real-world/
|
|
5
|
+
---
|
|
6
|
+
|
|
7
|
+
# Real-World Examples
|
|
8
|
+
|
|
9
|
+
Production-ready examples demonstrating Vectra in real-world scenarios.
|
|
10
|
+
|
|
11
|
+
## E-Commerce Product Search
|
|
12
|
+
|
|
13
|
+
Semantic product search with filtering, caching, and performance optimization.
|
|
14
|
+
|
|
15
|
+
```ruby
|
|
16
|
+
require "vectra"
|
|
17
|
+
|
|
18
|
+
class ProductSearchService
|
|
19
|
+
def initialize
|
|
20
|
+
@client = Vectra.pinecone(
|
|
21
|
+
api_key: ENV["PINECONE_API_KEY"],
|
|
22
|
+
environment: "us-east-1"
|
|
23
|
+
)
|
|
24
|
+
|
|
25
|
+
# Performance optimizations
|
|
26
|
+
@cache = Vectra::Cache.new(ttl: 600, max_size: 5000)
|
|
27
|
+
@cached_client = Vectra::CachedClient.new(@client, cache: @cache)
|
|
28
|
+
|
|
29
|
+
# Resilience patterns
|
|
30
|
+
@rate_limiter = Vectra::RateLimiter.new(requests_per_second: 100)
|
|
31
|
+
@circuit_breaker = Vectra::CircuitBreaker.new(
|
|
32
|
+
name: "product-search",
|
|
33
|
+
failure_threshold: 5,
|
|
34
|
+
recovery_timeout: 60
|
|
35
|
+
)
|
|
36
|
+
end
|
|
37
|
+
|
|
38
|
+
def search(query:, category: nil, price_range: nil, limit: 20)
|
|
39
|
+
query_embedding = generate_embedding(query)
|
|
40
|
+
# Normalize for better cosine similarity
|
|
41
|
+
query_embedding = Vectra::Vector.normalize(query_embedding)
|
|
42
|
+
|
|
43
|
+
filter = {}
|
|
44
|
+
filter[:category] = category if category
|
|
45
|
+
filter[:price_min] = price_range[:min] if price_range&.dig(:min)
|
|
46
|
+
filter[:price_max] = price_range[:max] if price_range&.dig(:max)
|
|
47
|
+
|
|
48
|
+
@rate_limiter.acquire do
|
|
49
|
+
@circuit_breaker.call do
|
|
50
|
+
@cached_client.query(
|
|
51
|
+
index: "products",
|
|
52
|
+
vector: query_embedding,
|
|
53
|
+
top_k: limit,
|
|
54
|
+
filter: filter,
|
|
55
|
+
include_metadata: true
|
|
56
|
+
)
|
|
57
|
+
end
|
|
58
|
+
end
|
|
59
|
+
rescue Vectra::RateLimitError => e
|
|
60
|
+
# Handle rate limiting gracefully
|
|
61
|
+
Rails.logger.warn("Rate limit hit: #{e.retry_after}s")
|
|
62
|
+
sleep(e.retry_after || 1)
|
|
63
|
+
retry
|
|
64
|
+
rescue Vectra::CircuitBreakerOpenError
|
|
65
|
+
# Fallback to cached results or alternative search
|
|
66
|
+
fallback_search(query, category)
|
|
67
|
+
end
|
|
68
|
+
|
|
69
|
+
private
|
|
70
|
+
|
|
71
|
+
def generate_embedding(text)
|
|
72
|
+
# Use your embedding model (OpenAI, sentence-transformers, etc.)
|
|
73
|
+
embedding = OpenAI::Client.new.embeddings(
|
|
74
|
+
parameters: { model: "text-embedding-ada-002", input: text }
|
|
75
|
+
)["data"][0]["embedding"]
|
|
76
|
+
|
|
77
|
+
# Normalize embeddings before storing
|
|
78
|
+
Vectra::Vector.normalize(embedding)
|
|
79
|
+
end
|
|
80
|
+
|
|
81
|
+
def fallback_search(query, category)
|
|
82
|
+
# Fallback to database search or cached results
|
|
83
|
+
Product.where("name ILIKE ?", "%#{query}%")
|
|
84
|
+
.where(category: category)
|
|
85
|
+
.limit(20)
|
|
86
|
+
end
|
|
87
|
+
end
|
|
88
|
+
|
|
89
|
+
# Usage
|
|
90
|
+
service = ProductSearchService.new
|
|
91
|
+
results = service.search(
|
|
92
|
+
query: "wireless headphones with noise cancellation",
|
|
93
|
+
category: "Electronics",
|
|
94
|
+
price_range: { min: 50, max: 200 }
|
|
95
|
+
)
|
|
96
|
+
|
|
97
|
+
results.each do |product|
|
|
98
|
+
puts "#{product.metadata[:name]} - $#{product.metadata[:price]}"
|
|
99
|
+
end
|
|
100
|
+
```
|
|
101
|
+
|
|
102
|
+
## RAG Chatbot with Streaming
|
|
103
|
+
|
|
104
|
+
Retrieval-Augmented Generation chatbot with streaming responses and error handling.
|
|
105
|
+
|
|
106
|
+
```ruby
|
|
107
|
+
require "vectra"
|
|
108
|
+
|
|
109
|
+
class RAGChatbot
|
|
110
|
+
def initialize
|
|
111
|
+
@vectra_client = Vectra.qdrant(
|
|
112
|
+
host: ENV["QDRANT_HOST"],
|
|
113
|
+
api_key: ENV["QDRANT_API_KEY"]
|
|
114
|
+
)
|
|
115
|
+
|
|
116
|
+
@llm_client = OpenAI::Client.new
|
|
117
|
+
@streaming = Vectra::Streaming.new(@vectra_client)
|
|
118
|
+
|
|
119
|
+
# Monitoring
|
|
120
|
+
Vectra::Instrumentation::Sentry.setup! if defined?(Sentry)
|
|
121
|
+
Vectra::Logging.setup!(output: "log/chatbot.json.log")
|
|
122
|
+
end
|
|
123
|
+
|
|
124
|
+
def chat(user_message:, conversation_id:, &block)
|
|
125
|
+
# 1. Retrieve relevant context
|
|
126
|
+
context = retrieve_context(user_message, limit: 5)
|
|
127
|
+
|
|
128
|
+
# 2. Build prompt with context
|
|
129
|
+
prompt = build_prompt(user_message, context)
|
|
130
|
+
|
|
131
|
+
# 3. Stream LLM response
|
|
132
|
+
stream_llm_response(prompt, conversation_id, &block)
|
|
133
|
+
|
|
134
|
+
# 4. Log interaction
|
|
135
|
+
log_interaction(user_message, context, conversation_id)
|
|
136
|
+
end
|
|
137
|
+
|
|
138
|
+
private
|
|
139
|
+
|
|
140
|
+
def retrieve_context(query, limit:)
|
|
141
|
+
query_embedding = generate_embedding(query)
|
|
142
|
+
|
|
143
|
+
results = @streaming.query_each(
|
|
144
|
+
index: "knowledge_base",
|
|
145
|
+
vector: query_embedding,
|
|
146
|
+
top_k: limit,
|
|
147
|
+
batch_size: 10,
|
|
148
|
+
include_metadata: true
|
|
149
|
+
) do |batch|
|
|
150
|
+
# Process in batches for memory efficiency
|
|
151
|
+
batch
|
|
152
|
+
end
|
|
153
|
+
|
|
154
|
+
results.map { |r| r.metadata[:content] }.join("\n\n")
|
|
155
|
+
end
|
|
156
|
+
|
|
157
|
+
def build_prompt(user_message, context)
|
|
158
|
+
<<~PROMPT
|
|
159
|
+
You are a helpful assistant. Use the following context to answer the question.
|
|
160
|
+
|
|
161
|
+
Context:
|
|
162
|
+
#{context}
|
|
163
|
+
|
|
164
|
+
Question: #{user_message}
|
|
165
|
+
|
|
166
|
+
Answer:
|
|
167
|
+
PROMPT
|
|
168
|
+
end
|
|
169
|
+
|
|
170
|
+
def stream_llm_response(prompt, conversation_id, &block)
|
|
171
|
+
@llm_client.chat(
|
|
172
|
+
parameters: {
|
|
173
|
+
model: "gpt-4",
|
|
174
|
+
messages: [{ role: "user", content: prompt }],
|
|
175
|
+
stream: proc { |chunk, _bytesize|
|
|
176
|
+
block.call(chunk) if block
|
|
177
|
+
}
|
|
178
|
+
}
|
|
179
|
+
)
|
|
180
|
+
end
|
|
181
|
+
|
|
182
|
+
def log_interaction(user_message, context, conversation_id)
|
|
183
|
+
Vectra::Logging.log_info(
|
|
184
|
+
"Chat interaction",
|
|
185
|
+
conversation_id: conversation_id,
|
|
186
|
+
query_length: user_message.length,
|
|
187
|
+
context_snippets: context.split("\n\n").size
|
|
188
|
+
)
|
|
189
|
+
end
|
|
190
|
+
|
|
191
|
+
def generate_embedding(text)
|
|
192
|
+
# Embedding generation
|
|
193
|
+
@llm_client.embeddings(
|
|
194
|
+
parameters: { model: "text-embedding-ada-002", input: text }
|
|
195
|
+
)["data"][0]["embedding"]
|
|
196
|
+
end
|
|
197
|
+
end
|
|
198
|
+
|
|
199
|
+
# Usage with streaming
|
|
200
|
+
chatbot = RAGChatbot.new
|
|
201
|
+
|
|
202
|
+
chatbot.chat(
|
|
203
|
+
user_message: "How do I implement authentication in Rails?",
|
|
204
|
+
conversation_id: "conv-123"
|
|
205
|
+
) do |chunk|
|
|
206
|
+
print chunk.dig("choices", 0, "delta", "content")
|
|
207
|
+
end
|
|
208
|
+
```
|
|
209
|
+
|
|
210
|
+
## Multi-Tenant SaaS Application
|
|
211
|
+
|
|
212
|
+
SaaS application with tenant isolation, audit logging, and health monitoring.
|
|
213
|
+
|
|
214
|
+
```ruby
|
|
215
|
+
require "vectra"
|
|
216
|
+
|
|
217
|
+
class TenantDocumentService
|
|
218
|
+
def initialize(tenant_id:)
|
|
219
|
+
@tenant_id = tenant_id
|
|
220
|
+
@client = Vectra.pgvector(
|
|
221
|
+
connection_url: ENV["DATABASE_URL"],
|
|
222
|
+
pool_size: 10
|
|
223
|
+
)
|
|
224
|
+
|
|
225
|
+
# Audit logging for compliance
|
|
226
|
+
@audit = Vectra::AuditLog.new(
|
|
227
|
+
output: "log/audit.json.log",
|
|
228
|
+
enabled: true
|
|
229
|
+
)
|
|
230
|
+
|
|
231
|
+
# Health monitoring
|
|
232
|
+
@health_checker = Vectra::AggregateHealthCheck.new(
|
|
233
|
+
primary: @client
|
|
234
|
+
)
|
|
235
|
+
end
|
|
236
|
+
|
|
237
|
+
def index_document(document_id:, content:, metadata: {})
|
|
238
|
+
embedding = generate_embedding(content)
|
|
239
|
+
|
|
240
|
+
result = @client.upsert(
|
|
241
|
+
index: "documents",
|
|
242
|
+
vectors: [{
|
|
243
|
+
id: document_id,
|
|
244
|
+
values: embedding,
|
|
245
|
+
metadata: metadata.merge(tenant_id: @tenant_id)
|
|
246
|
+
}],
|
|
247
|
+
namespace: "tenant-#{@tenant_id}"
|
|
248
|
+
)
|
|
249
|
+
|
|
250
|
+
# Audit log
|
|
251
|
+
@audit.log_data_modification(
|
|
252
|
+
user_id: current_user&.id,
|
|
253
|
+
operation: "upsert",
|
|
254
|
+
index: "documents",
|
|
255
|
+
record_count: 1
|
|
256
|
+
)
|
|
257
|
+
|
|
258
|
+
result
|
|
259
|
+
end
|
|
260
|
+
|
|
261
|
+
def search_documents(query:, limit: 20)
|
|
262
|
+
query_embedding = generate_embedding(query)
|
|
263
|
+
|
|
264
|
+
# Ensure tenant isolation via namespace
|
|
265
|
+
results = @client
|
|
266
|
+
.query("documents")
|
|
267
|
+
.vector(query_embedding)
|
|
268
|
+
.top_k(limit)
|
|
269
|
+
.namespace("tenant-#{@tenant_id}")
|
|
270
|
+
.filter(tenant_id: @tenant_id) # Double protection
|
|
271
|
+
.with_metadata
|
|
272
|
+
.execute
|
|
273
|
+
|
|
274
|
+
# Audit log
|
|
275
|
+
@audit.log_access(
|
|
276
|
+
user_id: current_user&.id,
|
|
277
|
+
operation: "query",
|
|
278
|
+
index: "documents",
|
|
279
|
+
result_count: results.size
|
|
280
|
+
)
|
|
281
|
+
|
|
282
|
+
results
|
|
283
|
+
end
|
|
284
|
+
|
|
285
|
+
def health_status
|
|
286
|
+
@health_checker.check_all
|
|
287
|
+
end
|
|
288
|
+
|
|
289
|
+
private
|
|
290
|
+
|
|
291
|
+
def generate_embedding(text)
|
|
292
|
+
# Your embedding generation
|
|
293
|
+
end
|
|
294
|
+
end
|
|
295
|
+
|
|
296
|
+
# Usage per tenant
|
|
297
|
+
tenant_service = TenantDocumentService.new(tenant_id: "acme-corp")
|
|
298
|
+
|
|
299
|
+
# Index document (isolated to tenant)
|
|
300
|
+
tenant_service.index_document(
|
|
301
|
+
document_id: "doc-123",
|
|
302
|
+
content: "Important business document...",
|
|
303
|
+
metadata: { title: "Q4 Report", category: "Finance" }
|
|
304
|
+
)
|
|
305
|
+
|
|
306
|
+
# Search (only returns tenant's documents)
|
|
307
|
+
results = tenant_service.search_documents(query: "financial report")
|
|
308
|
+
|
|
309
|
+
# Health check
|
|
310
|
+
health = tenant_service.health_status
|
|
311
|
+
puts "System healthy: #{health[:overall_healthy]}"
|
|
312
|
+
```
|
|
313
|
+
|
|
314
|
+
## High-Performance Batch Processing
|
|
315
|
+
|
|
316
|
+
Processing large datasets with async batch operations and progress tracking.
|
|
317
|
+
|
|
318
|
+
```ruby
|
|
319
|
+
require "vectra"
|
|
320
|
+
|
|
321
|
+
class DocumentIndexer
|
|
322
|
+
def initialize
|
|
323
|
+
@client = Vectra.pinecone(
|
|
324
|
+
api_key: ENV["PINECONE_API_KEY"],
|
|
325
|
+
environment: "us-east-1"
|
|
326
|
+
)
|
|
327
|
+
|
|
328
|
+
@batch_client = Vectra::Batch.new(@client)
|
|
329
|
+
end
|
|
330
|
+
|
|
331
|
+
def index_large_dataset(documents, concurrency: 4)
|
|
332
|
+
total = documents.size
|
|
333
|
+
errors = []
|
|
334
|
+
|
|
335
|
+
# Create batch client with specified concurrency
|
|
336
|
+
batch_client = Vectra::Batch.new(@client, concurrency: concurrency)
|
|
337
|
+
|
|
338
|
+
# Convert to vectors
|
|
339
|
+
vectors = documents.map do |doc|
|
|
340
|
+
{
|
|
341
|
+
id: doc[:id],
|
|
342
|
+
values: generate_embedding(doc[:content]),
|
|
343
|
+
metadata: doc[:metadata]
|
|
344
|
+
}
|
|
345
|
+
end
|
|
346
|
+
|
|
347
|
+
# Process in async batches with progress tracking
|
|
348
|
+
result = batch_client.upsert_async(
|
|
349
|
+
index: "documents",
|
|
350
|
+
vectors: vectors,
|
|
351
|
+
chunk_size: 100,
|
|
352
|
+
on_progress: proc { |stats|
|
|
353
|
+
progress = stats[:percentage]
|
|
354
|
+
processed = stats[:processed]
|
|
355
|
+
total = stats[:total]
|
|
356
|
+
chunk = stats[:current_chunk] + 1
|
|
357
|
+
total_chunks = stats[:total_chunks]
|
|
358
|
+
|
|
359
|
+
puts "Progress: #{progress}% (#{processed}/#{total})"
|
|
360
|
+
puts " Chunk #{chunk}/#{total_chunks} | Success: #{stats[:success_count]}, Failed: #{stats[:failed_count]}"
|
|
361
|
+
}
|
|
362
|
+
)
|
|
363
|
+
|
|
364
|
+
{
|
|
365
|
+
success: result[:upserted_count],
|
|
366
|
+
failed: result[:errors].size,
|
|
367
|
+
errors: result[:errors],
|
|
368
|
+
total: total
|
|
369
|
+
}
|
|
370
|
+
end
|
|
371
|
+
|
|
372
|
+
private
|
|
373
|
+
|
|
374
|
+
def generate_embedding(text)
|
|
375
|
+
# Embedding generation
|
|
376
|
+
end
|
|
377
|
+
end
|
|
378
|
+
|
|
379
|
+
# Usage
|
|
380
|
+
indexer = DocumentIndexer.new
|
|
381
|
+
|
|
382
|
+
# Index 10,000 documents with 4 concurrent workers
|
|
383
|
+
result = indexer.index_large_dataset(
|
|
384
|
+
large_document_array,
|
|
385
|
+
concurrency: 4
|
|
386
|
+
)
|
|
387
|
+
|
|
388
|
+
puts "Indexed: #{result[:success]}"
|
|
389
|
+
puts "Failed: #{result[:failed]}"
|
|
390
|
+
puts "Errors: #{result[:errors].size}"
|
|
391
|
+
```
|
|
392
|
+
|
|
393
|
+
## Production-Ready Configuration
|
|
394
|
+
|
|
395
|
+
Complete production setup with all monitoring, resilience, and performance features.
|
|
396
|
+
|
|
397
|
+
```ruby
|
|
398
|
+
# config/initializers/vectra.rb
|
|
399
|
+
require "vectra"
|
|
400
|
+
|
|
401
|
+
Vectra.configure do |config|
|
|
402
|
+
config.provider = :pinecone
|
|
403
|
+
config.api_key = Rails.application.credentials.dig(:vectra, :api_key)
|
|
404
|
+
config.environment = ENV["PINECONE_ENVIRONMENT"] || "us-east-1"
|
|
405
|
+
|
|
406
|
+
# Performance
|
|
407
|
+
config.cache_enabled = true
|
|
408
|
+
config.cache_ttl = 600
|
|
409
|
+
config.cache_max_size = 10000
|
|
410
|
+
config.async_concurrency = 4
|
|
411
|
+
config.batch_size = 100
|
|
412
|
+
|
|
413
|
+
# Resilience
|
|
414
|
+
config.max_retries = 3
|
|
415
|
+
config.retry_delay = 1
|
|
416
|
+
config.timeout = 30
|
|
417
|
+
|
|
418
|
+
# Logging
|
|
419
|
+
config.logger = Rails.logger
|
|
420
|
+
end
|
|
421
|
+
|
|
422
|
+
# Setup monitoring
|
|
423
|
+
if defined?(Sentry)
|
|
424
|
+
Vectra::Instrumentation::Sentry.setup!
|
|
425
|
+
end
|
|
426
|
+
|
|
427
|
+
if defined?(Honeybadger)
|
|
428
|
+
Vectra::Instrumentation::Honeybadger.setup!
|
|
429
|
+
end
|
|
430
|
+
|
|
431
|
+
# Setup structured logging
|
|
432
|
+
Vectra::Logging.setup!(
|
|
433
|
+
output: Rails.root.join("log", "vectra.json.log"),
|
|
434
|
+
app: Rails.application.class.module_parent_name,
|
|
435
|
+
env: Rails.env
|
|
436
|
+
)
|
|
437
|
+
|
|
438
|
+
# Setup audit logging
|
|
439
|
+
Vectra::AuditLogging.setup!(
|
|
440
|
+
output: Rails.root.join("log", "audit.json.log"),
|
|
441
|
+
enabled: Rails.env.production?,
|
|
442
|
+
app: Rails.application.class.module_parent_name,
|
|
443
|
+
env: Rails.env
|
|
444
|
+
)
|
|
445
|
+
|
|
446
|
+
# Global rate limiter
|
|
447
|
+
$vectra_rate_limiter = Vectra::RateLimiter.new(
|
|
448
|
+
requests_per_second: ENV.fetch("VECTRA_RATE_LIMIT", 100).to_i,
|
|
449
|
+
burst_size: ENV.fetch("VECTRA_BURST_SIZE", 200).to_i
|
|
450
|
+
)
|
|
451
|
+
|
|
452
|
+
# Global circuit breaker
|
|
453
|
+
$vectra_circuit_breaker = Vectra::CircuitBreaker.new(
|
|
454
|
+
name: "vectra-main",
|
|
455
|
+
failure_threshold: 5,
|
|
456
|
+
recovery_timeout: 60
|
|
457
|
+
)
|
|
458
|
+
|
|
459
|
+
# Application helper
|
|
460
|
+
module VectraHelper
|
|
461
|
+
def vectra_client
|
|
462
|
+
@vectra_client ||= begin
|
|
463
|
+
client = Vectra::Client.new
|
|
464
|
+
cached = Vectra::CachedClient.new(client)
|
|
465
|
+
cached
|
|
466
|
+
end
|
|
467
|
+
end
|
|
468
|
+
|
|
469
|
+
def safe_vectra_query(**args)
|
|
470
|
+
$vectra_rate_limiter.acquire do
|
|
471
|
+
$vectra_circuit_breaker.call do
|
|
472
|
+
vectra_client.query(**args)
|
|
473
|
+
end
|
|
474
|
+
end
|
|
475
|
+
rescue Vectra::CircuitBreakerOpenError
|
|
476
|
+
# Fallback logic
|
|
477
|
+
Rails.logger.error("Circuit breaker open, using fallback")
|
|
478
|
+
fallback_search(args)
|
|
479
|
+
rescue Vectra::RateLimitError => e
|
|
480
|
+
Rails.logger.warn("Rate limit: #{e.retry_after}s")
|
|
481
|
+
sleep(e.retry_after || 1)
|
|
482
|
+
retry
|
|
483
|
+
end
|
|
484
|
+
end
|
|
485
|
+
```
|
|
486
|
+
|
|
487
|
+
## Testing with the Memory Provider
|
|
488
|
+
|
|
489
|
+
For fast, deterministic tests you can run Vectra entirely in memory without any external services:
|
|
490
|
+
|
|
491
|
+
```ruby
|
|
492
|
+
# config/initializers/vectra.rb (test environment)
|
|
493
|
+
Vectra.configure do |config|
|
|
494
|
+
config.provider = :memory if Rails.env.test?
|
|
495
|
+
end
|
|
496
|
+
|
|
497
|
+
RSpec.describe ProductSearchService do
|
|
498
|
+
let(:client) { Vectra::Client.new } # uses memory provider in test
|
|
499
|
+
|
|
500
|
+
before do
|
|
501
|
+
client.provider.clear! if client.provider.respond_to?(:clear!)
|
|
502
|
+
|
|
503
|
+
client.upsert(
|
|
504
|
+
index: "products",
|
|
505
|
+
vectors: [
|
|
506
|
+
{ id: "p1", values: [0.1, 0.2], metadata: { name: "Test Product" } }
|
|
507
|
+
]
|
|
508
|
+
)
|
|
509
|
+
end
|
|
510
|
+
|
|
511
|
+
it "returns relevant products" do
|
|
512
|
+
results = client.query(index: "products", vector: [0.1, 0.2], top_k: 5)
|
|
513
|
+
expect(results.ids).to include("p1")
|
|
514
|
+
end
|
|
515
|
+
end
|
|
516
|
+
```
|
|
517
|
+
|
|
518
|
+
## Best Practices
|
|
519
|
+
|
|
520
|
+
### 1. Always Use Caching for Frequent Queries
|
|
521
|
+
|
|
522
|
+
```ruby
|
|
523
|
+
cache = Vectra::Cache.new(ttl: 600, max_size: 10000)
|
|
524
|
+
cached_client = Vectra::CachedClient.new(client, cache: cache)
|
|
525
|
+
```
|
|
526
|
+
|
|
527
|
+
### 2. Implement Rate Limiting
|
|
528
|
+
|
|
529
|
+
```ruby
|
|
530
|
+
limiter = Vectra::RateLimiter.new(requests_per_second: 100)
|
|
531
|
+
limiter.acquire { client.query(...) }
|
|
532
|
+
```
|
|
533
|
+
|
|
534
|
+
### 3. Use Circuit Breaker for Resilience
|
|
535
|
+
|
|
536
|
+
```ruby
|
|
537
|
+
breaker = Vectra::CircuitBreaker.new(failure_threshold: 5)
|
|
538
|
+
breaker.call { client.query(...) }
|
|
539
|
+
```
|
|
540
|
+
|
|
541
|
+
### 4. Enable Monitoring
|
|
542
|
+
|
|
543
|
+
```ruby
|
|
544
|
+
Vectra::Instrumentation::Sentry.setup!
|
|
545
|
+
Vectra::Logging.setup!(output: "log/vectra.json.log")
|
|
546
|
+
```
|
|
547
|
+
|
|
548
|
+
### 5. Audit Critical Operations
|
|
549
|
+
|
|
550
|
+
```ruby
|
|
551
|
+
audit = Vectra::AuditLog.new(output: "log/audit.json.log")
|
|
552
|
+
audit.log_access(user_id: user.id, operation: "query", index: "docs")
|
|
553
|
+
```
|
|
554
|
+
|
|
555
|
+
### 6. Use Streaming for Large Queries
|
|
556
|
+
|
|
557
|
+
```ruby
|
|
558
|
+
streaming = Vectra::Streaming.new(client)
|
|
559
|
+
streaming.query_each(index: "docs", vector: vec, batch_size: 100) do |batch|
|
|
560
|
+
process_batch(batch)
|
|
561
|
+
end
|
|
562
|
+
```
|
|
563
|
+
|
|
564
|
+
### 7. Health Checks in Production
|
|
565
|
+
|
|
566
|
+
```ruby
|
|
567
|
+
health = client.health_check(include_stats: true)
|
|
568
|
+
raise "Unhealthy" unless health.healthy?
|
|
569
|
+
```
|
|
570
|
+
|
|
571
|
+
## Next Steps
|
|
572
|
+
|
|
573
|
+
- [Comprehensive Demo](../examples/) - Full feature demonstration
|
|
574
|
+
- [Performance Guide](../guides/performance/) - Optimization strategies
|
|
575
|
+
- [Monitoring Guide](../guides/monitoring/) - Production monitoring
|
|
576
|
+
- [Security Guide](../guides/security/) - Security best practices
|
|
Binary file
|
|
@@ -43,17 +43,49 @@ client.upsert(
|
|
|
43
43
|
### Query (Search)
|
|
44
44
|
|
|
45
45
|
```ruby
|
|
46
|
+
# Classic API
|
|
46
47
|
results = client.query(
|
|
47
48
|
vector: [0.1, 0.2, 0.3],
|
|
48
49
|
top_k: 5,
|
|
49
50
|
include_metadata: true
|
|
50
51
|
)
|
|
51
52
|
|
|
52
|
-
results.
|
|
53
|
-
puts "ID: #{match
|
|
53
|
+
results.each do |match|
|
|
54
|
+
puts "ID: #{match.id}, Score: #{match.score}"
|
|
55
|
+
end
|
|
56
|
+
|
|
57
|
+
# Chainable Query Builder
|
|
58
|
+
results = client
|
|
59
|
+
.query("my-index")
|
|
60
|
+
.vector([0.1, 0.2, 0.3])
|
|
61
|
+
.top_k(5)
|
|
62
|
+
.with_metadata
|
|
63
|
+
.execute
|
|
64
|
+
|
|
65
|
+
results.each do |match|
|
|
66
|
+
puts "ID: #{match.id}, Score: #{match.score}"
|
|
54
67
|
end
|
|
55
68
|
```
|
|
56
69
|
|
|
70
|
+
### Normalize Embeddings
|
|
71
|
+
|
|
72
|
+
For better cosine similarity results, normalize your embeddings before upserting:
|
|
73
|
+
|
|
74
|
+
```ruby
|
|
75
|
+
# Normalize OpenAI embeddings (recommended)
|
|
76
|
+
embedding = openai_response['data'][0]['embedding']
|
|
77
|
+
normalized = Vectra::Vector.normalize(embedding)
|
|
78
|
+
client.upsert(vectors: [{ id: 'doc-1', values: normalized }])
|
|
79
|
+
|
|
80
|
+
# Or normalize in-place
|
|
81
|
+
vector = Vectra::Vector.new(id: 'doc-1', values: embedding)
|
|
82
|
+
vector.normalize! # L2 normalization (default, unit vector)
|
|
83
|
+
client.upsert(vectors: [vector])
|
|
84
|
+
|
|
85
|
+
# L1 normalization (sum of absolute values = 1)
|
|
86
|
+
vector.normalize!(type: :l1)
|
|
87
|
+
```
|
|
88
|
+
|
|
57
89
|
### Delete Vectors
|
|
58
90
|
|
|
59
91
|
```ruby
|
|
@@ -68,6 +100,42 @@ puts "Index dimension: #{stats['dimension']}"
|
|
|
68
100
|
puts "Vector count: #{stats['vector_count']}"
|
|
69
101
|
```
|
|
70
102
|
|
|
103
|
+
### Health Check & Ping
|
|
104
|
+
|
|
105
|
+
```ruby
|
|
106
|
+
# Quick health check
|
|
107
|
+
if client.healthy?
|
|
108
|
+
client.upsert(...)
|
|
109
|
+
else
|
|
110
|
+
handle_unhealthy_connection
|
|
111
|
+
end
|
|
112
|
+
|
|
113
|
+
# Ping with latency measurement
|
|
114
|
+
status = client.ping
|
|
115
|
+
puts "Provider: #{status[:provider]}"
|
|
116
|
+
puts "Healthy: #{status[:healthy]}"
|
|
117
|
+
puts "Latency: #{status[:latency_ms]}ms"
|
|
118
|
+
|
|
119
|
+
if status[:error]
|
|
120
|
+
puts "Error: #{status[:error_message]}"
|
|
121
|
+
end
|
|
122
|
+
```
|
|
123
|
+
|
|
124
|
+
### Dimension Validation
|
|
125
|
+
|
|
126
|
+
Vectra automatically validates that all vectors in a batch have the same dimension:
|
|
127
|
+
|
|
128
|
+
```ruby
|
|
129
|
+
# This will raise ValidationError
|
|
130
|
+
vectors = [
|
|
131
|
+
{ id: "vec1", values: [0.1, 0.2, 0.3] }, # 3 dimensions
|
|
132
|
+
{ id: "vec2", values: [0.4, 0.5] } # 2 dimensions - ERROR!
|
|
133
|
+
]
|
|
134
|
+
|
|
135
|
+
client.upsert(vectors: vectors)
|
|
136
|
+
# => ValidationError: Inconsistent vector dimensions at index 1: expected 3, got 2
|
|
137
|
+
```
|
|
138
|
+
|
|
71
139
|
## Configuration
|
|
72
140
|
|
|
73
141
|
Create a configuration file (Rails: `config/initializers/vectra.rb`):
|