dspy 0.5.0 → 0.6.0
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- checksums.yaml +4 -4
- data/README.md +1 -0
- data/lib/dspy/code_act.rb +463 -0
- data/lib/dspy/instrumentation.rb +15 -0
- data/lib/dspy/lm/adapters/anthropic_adapter.rb +106 -0
- data/lib/dspy/lm.rb +28 -26
- data/lib/dspy/memory/embedding_engine.rb +68 -0
- data/lib/dspy/memory/in_memory_store.rb +216 -0
- data/lib/dspy/memory/local_embedding_engine.rb +241 -0
- data/lib/dspy/memory/memory_compactor.rb +299 -0
- data/lib/dspy/memory/memory_manager.rb +248 -0
- data/lib/dspy/memory/memory_record.rb +163 -0
- data/lib/dspy/memory/memory_store.rb +90 -0
- data/lib/dspy/memory.rb +30 -0
- data/lib/dspy/mixins/instrumentation_helpers.rb +12 -28
- data/lib/dspy/mixins/type_coercion.rb +3 -0
- data/lib/dspy/prompt.rb +48 -1
- data/lib/dspy/subscribers/logger_subscriber.rb +91 -1
- data/lib/dspy/tools/base.rb +1 -1
- data/lib/dspy/tools/memory_toolset.rb +117 -0
- data/lib/dspy/tools/text_processing_toolset.rb +186 -0
- data/lib/dspy/tools/toolset.rb +223 -0
- data/lib/dspy/tools.rb +1 -0
- data/lib/dspy/version.rb +1 -1
- data/lib/dspy.rb +2 -2
- metadata +28 -2
data/lib/dspy/lm.rb
CHANGED
@@ -39,15 +39,12 @@ module DSPy
|
|
39
39
|
input_text = messages.map { |m| m[:content] }.join(' ')
|
40
40
|
input_size = input_text.length
|
41
41
|
|
42
|
-
#
|
43
|
-
trace_level = DSPy.config.instrumentation.trace_level
|
44
|
-
|
45
|
-
# Extract token usage and prepare consolidated payload
|
42
|
+
# Use smart consolidation: emit LM events only when not in nested context
|
46
43
|
response = nil
|
47
44
|
token_usage = {}
|
48
45
|
|
49
|
-
if should_emit_lm_events?
|
50
|
-
#
|
46
|
+
if should_emit_lm_events?
|
47
|
+
# Emit all LM events when not in nested context
|
51
48
|
response = Instrumentation.instrument('dspy.lm.request', {
|
52
49
|
gen_ai_operation_name: 'chat',
|
53
50
|
gen_ai_system: provider,
|
@@ -92,19 +89,10 @@ module DSPy
|
|
92
89
|
|
93
90
|
private
|
94
91
|
|
95
|
-
# Determines if LM-level events should be emitted
|
96
|
-
def should_emit_lm_events?
|
97
|
-
|
98
|
-
|
99
|
-
false # Never emit LM events in minimal mode
|
100
|
-
when :standard
|
101
|
-
# In standard mode, emit LM events only if we're not in a nested context
|
102
|
-
!is_nested_context?
|
103
|
-
when :detailed
|
104
|
-
true # Always emit LM events in detailed mode
|
105
|
-
else
|
106
|
-
true
|
107
|
-
end
|
92
|
+
# Determines if LM-level events should be emitted using smart consolidation
|
93
|
+
def should_emit_lm_events?
|
94
|
+
# Emit LM events only if we're not in a nested context (smart consolidation)
|
95
|
+
!is_nested_context?
|
108
96
|
end
|
109
97
|
|
110
98
|
# Determines if we're in a nested context where higher-level events are being emitted
|
@@ -151,11 +139,16 @@ module DSPy
|
|
151
139
|
# Try to parse the response as JSON
|
152
140
|
content = response.content
|
153
141
|
|
154
|
-
#
|
155
|
-
if
|
156
|
-
content =
|
157
|
-
|
158
|
-
|
142
|
+
# Let adapters handle their own extraction logic if available
|
143
|
+
if adapter && adapter.respond_to?(:extract_json_from_response, true)
|
144
|
+
content = adapter.send(:extract_json_from_response, content)
|
145
|
+
else
|
146
|
+
# Fallback: Extract JSON if it's in a code block (legacy behavior)
|
147
|
+
if content.include?('```json')
|
148
|
+
content = content.split('```json').last.split('```').first.strip
|
149
|
+
elsif content.include?('```')
|
150
|
+
content = content.split('```').last.split('```').first.strip
|
151
|
+
end
|
159
152
|
end
|
160
153
|
|
161
154
|
begin
|
@@ -164,8 +157,17 @@ module DSPy
|
|
164
157
|
# For Sorbet signatures, just return the parsed JSON
|
165
158
|
# The Predict will handle validation
|
166
159
|
json_payload
|
167
|
-
rescue JSON::ParserError
|
168
|
-
|
160
|
+
rescue JSON::ParserError => e
|
161
|
+
# Enhanced error message with debugging information
|
162
|
+
error_details = {
|
163
|
+
original_content: response.content,
|
164
|
+
extracted_content: content,
|
165
|
+
provider: provider,
|
166
|
+
model: model
|
167
|
+
}
|
168
|
+
|
169
|
+
DSPy.logger.debug("JSON parsing failed: #{error_details}")
|
170
|
+
raise "Failed to parse LLM response as JSON: #{e.message}. Original content length: #{response.content&.length || 0} chars"
|
169
171
|
end
|
170
172
|
end
|
171
173
|
end
|
@@ -0,0 +1,68 @@
|
|
1
|
+
# frozen_string_literal: true
|
2
|
+
|
3
|
+
require 'sorbet-runtime'
|
4
|
+
|
5
|
+
module DSPy
|
6
|
+
module Memory
|
7
|
+
# Abstract base class for embedding engines
|
8
|
+
class EmbeddingEngine
|
9
|
+
extend T::Sig
|
10
|
+
extend T::Helpers
|
11
|
+
abstract!
|
12
|
+
|
13
|
+
# Generate embeddings for a single text
|
14
|
+
sig { abstract.params(text: String).returns(T::Array[Float]) }
|
15
|
+
def embed(text); end
|
16
|
+
|
17
|
+
# Generate embeddings for multiple texts (batch processing)
|
18
|
+
sig { abstract.params(texts: T::Array[String]).returns(T::Array[T::Array[Float]]) }
|
19
|
+
def embed_batch(texts); end
|
20
|
+
|
21
|
+
# Get the dimension of embeddings produced by this engine
|
22
|
+
sig { abstract.returns(Integer) }
|
23
|
+
def embedding_dimension; end
|
24
|
+
|
25
|
+
# Get the model name/identifier
|
26
|
+
sig { abstract.returns(String) }
|
27
|
+
def model_name; end
|
28
|
+
|
29
|
+
# Check if the engine is ready to use
|
30
|
+
sig { returns(T::Boolean) }
|
31
|
+
def ready?
|
32
|
+
true
|
33
|
+
end
|
34
|
+
|
35
|
+
# Get engine statistics
|
36
|
+
sig { returns(T::Hash[Symbol, T.untyped]) }
|
37
|
+
def stats
|
38
|
+
{
|
39
|
+
model_name: model_name,
|
40
|
+
embedding_dimension: embedding_dimension,
|
41
|
+
ready: ready?
|
42
|
+
}
|
43
|
+
end
|
44
|
+
|
45
|
+
# Normalize a vector to unit length
|
46
|
+
sig { params(vector: T::Array[Float]).returns(T::Array[Float]) }
|
47
|
+
def normalize_vector(vector)
|
48
|
+
magnitude = Math.sqrt(vector.sum { |x| x * x })
|
49
|
+
return vector if magnitude == 0.0
|
50
|
+
vector.map { |x| x / magnitude }
|
51
|
+
end
|
52
|
+
|
53
|
+
# Calculate cosine similarity between two vectors
|
54
|
+
sig { params(a: T::Array[Float], b: T::Array[Float]).returns(Float) }
|
55
|
+
def cosine_similarity(a, b)
|
56
|
+
return 0.0 if a.empty? || b.empty? || a.size != b.size
|
57
|
+
|
58
|
+
dot_product = a.zip(b).sum { |x, y| x * y }
|
59
|
+
magnitude_a = Math.sqrt(a.sum { |x| x * x })
|
60
|
+
magnitude_b = Math.sqrt(b.sum { |x| x * x })
|
61
|
+
|
62
|
+
return 0.0 if magnitude_a == 0.0 || magnitude_b == 0.0
|
63
|
+
|
64
|
+
dot_product / (magnitude_a * magnitude_b)
|
65
|
+
end
|
66
|
+
end
|
67
|
+
end
|
68
|
+
end
|
@@ -0,0 +1,216 @@
|
|
1
|
+
# frozen_string_literal: true
|
2
|
+
|
3
|
+
require 'sorbet-runtime'
|
4
|
+
require_relative 'memory_store'
|
5
|
+
|
6
|
+
module DSPy
|
7
|
+
module Memory
|
8
|
+
# In-memory implementation of MemoryStore for development and testing
|
9
|
+
class InMemoryStore < MemoryStore
|
10
|
+
extend T::Sig
|
11
|
+
|
12
|
+
sig { void }
|
13
|
+
def initialize
|
14
|
+
@memories = T.let({}, T::Hash[String, MemoryRecord])
|
15
|
+
@mutex = T.let(Mutex.new, Mutex)
|
16
|
+
end
|
17
|
+
|
18
|
+
sig { override.params(record: MemoryRecord).returns(T::Boolean) }
|
19
|
+
def store(record)
|
20
|
+
@mutex.synchronize do
|
21
|
+
@memories[record.id] = record
|
22
|
+
true
|
23
|
+
end
|
24
|
+
end
|
25
|
+
|
26
|
+
sig { override.params(id: String).returns(T.nilable(MemoryRecord)) }
|
27
|
+
def retrieve(id)
|
28
|
+
@mutex.synchronize do
|
29
|
+
record = @memories[id]
|
30
|
+
record&.record_access!
|
31
|
+
record
|
32
|
+
end
|
33
|
+
end
|
34
|
+
|
35
|
+
sig { override.params(record: MemoryRecord).returns(T::Boolean) }
|
36
|
+
def update(record)
|
37
|
+
@mutex.synchronize do
|
38
|
+
if @memories.key?(record.id)
|
39
|
+
@memories[record.id] = record
|
40
|
+
true
|
41
|
+
else
|
42
|
+
false
|
43
|
+
end
|
44
|
+
end
|
45
|
+
end
|
46
|
+
|
47
|
+
sig { override.params(id: String).returns(T::Boolean) }
|
48
|
+
def delete(id)
|
49
|
+
@mutex.synchronize do
|
50
|
+
@memories.delete(id) ? true : false
|
51
|
+
end
|
52
|
+
end
|
53
|
+
|
54
|
+
sig { override.params(user_id: T.nilable(String), limit: T.nilable(Integer), offset: T.nilable(Integer)).returns(T::Array[MemoryRecord]) }
|
55
|
+
def list(user_id: nil, limit: nil, offset: nil)
|
56
|
+
@mutex.synchronize do
|
57
|
+
records = @memories.values
|
58
|
+
|
59
|
+
# Filter by user_id if provided
|
60
|
+
records = records.select { |r| r.user_id == user_id } if user_id
|
61
|
+
|
62
|
+
# Sort by created_at (newest first)
|
63
|
+
records = records.sort_by(&:created_at).reverse
|
64
|
+
|
65
|
+
# Apply offset and limit
|
66
|
+
records = records.drop(offset) if offset
|
67
|
+
records = records.take(limit) if limit
|
68
|
+
|
69
|
+
records
|
70
|
+
end
|
71
|
+
end
|
72
|
+
|
73
|
+
sig { override.params(query: String, user_id: T.nilable(String), limit: T.nilable(Integer)).returns(T::Array[MemoryRecord]) }
|
74
|
+
def search(query, user_id: nil, limit: nil)
|
75
|
+
@mutex.synchronize do
|
76
|
+
regex = Regexp.new(Regexp.escape(query), Regexp::IGNORECASE)
|
77
|
+
|
78
|
+
records = @memories.values.select do |record|
|
79
|
+
# Filter by user_id if provided
|
80
|
+
next false if user_id && record.user_id != user_id
|
81
|
+
|
82
|
+
# Search in content and tags
|
83
|
+
record.content.match?(regex) || record.tags.any? { |tag| tag.match?(regex) }
|
84
|
+
end
|
85
|
+
|
86
|
+
# Sort by relevance (exact matches first, then by recency)
|
87
|
+
records = records.sort_by do |record|
|
88
|
+
exact_match = record.content.downcase.include?(query.downcase) ? 0 : 1
|
89
|
+
[exact_match, -record.created_at.to_f]
|
90
|
+
end
|
91
|
+
|
92
|
+
records = records.take(limit) if limit
|
93
|
+
records
|
94
|
+
end
|
95
|
+
end
|
96
|
+
|
97
|
+
sig { override.params(tags: T::Array[String], user_id: T.nilable(String), limit: T.nilable(Integer)).returns(T::Array[MemoryRecord]) }
|
98
|
+
def search_by_tags(tags, user_id: nil, limit: nil)
|
99
|
+
@mutex.synchronize do
|
100
|
+
records = @memories.values.select do |record|
|
101
|
+
# Filter by user_id if provided
|
102
|
+
next false if user_id && record.user_id != user_id
|
103
|
+
|
104
|
+
# Check if record has any of the specified tags
|
105
|
+
tags.any? { |tag| record.has_tag?(tag) }
|
106
|
+
end
|
107
|
+
|
108
|
+
# Sort by number of matching tags, then by recency
|
109
|
+
records = records.sort_by do |record|
|
110
|
+
matching_tags = tags.count { |tag| record.has_tag?(tag) }
|
111
|
+
[-matching_tags, -record.created_at.to_f]
|
112
|
+
end
|
113
|
+
|
114
|
+
records = records.take(limit) if limit
|
115
|
+
records
|
116
|
+
end
|
117
|
+
end
|
118
|
+
|
119
|
+
sig { override.params(embedding: T::Array[Float], user_id: T.nilable(String), limit: T.nilable(Integer), threshold: T.nilable(Float)).returns(T::Array[MemoryRecord]) }
|
120
|
+
def vector_search(embedding, user_id: nil, limit: nil, threshold: nil)
|
121
|
+
@mutex.synchronize do
|
122
|
+
records_with_similarity = []
|
123
|
+
|
124
|
+
@memories.values.each do |record|
|
125
|
+
# Filter by user_id if provided
|
126
|
+
next if user_id && record.user_id != user_id
|
127
|
+
|
128
|
+
# Skip records without embeddings
|
129
|
+
next unless record.embedding
|
130
|
+
|
131
|
+
# Calculate cosine similarity
|
132
|
+
similarity = cosine_similarity(embedding, record.embedding)
|
133
|
+
|
134
|
+
# Apply threshold if provided
|
135
|
+
next if threshold && similarity < threshold
|
136
|
+
|
137
|
+
records_with_similarity << [record, similarity]
|
138
|
+
end
|
139
|
+
|
140
|
+
# Sort by similarity (highest first)
|
141
|
+
records_with_similarity.sort_by! { |_, similarity| -similarity }
|
142
|
+
|
143
|
+
# Apply limit
|
144
|
+
records_with_similarity = records_with_similarity.take(limit) if limit
|
145
|
+
|
146
|
+
# Return just the records
|
147
|
+
records_with_similarity.map(&:first)
|
148
|
+
end
|
149
|
+
end
|
150
|
+
|
151
|
+
sig { override.params(user_id: T.nilable(String)).returns(Integer) }
|
152
|
+
def count(user_id: nil)
|
153
|
+
@mutex.synchronize do
|
154
|
+
if user_id
|
155
|
+
@memories.values.count { |record| record.user_id == user_id }
|
156
|
+
else
|
157
|
+
@memories.size
|
158
|
+
end
|
159
|
+
end
|
160
|
+
end
|
161
|
+
|
162
|
+
sig { override.params(user_id: T.nilable(String)).returns(Integer) }
|
163
|
+
def clear(user_id: nil)
|
164
|
+
@mutex.synchronize do
|
165
|
+
if user_id
|
166
|
+
count = @memories.values.count { |record| record.user_id == user_id }
|
167
|
+
@memories.reject! { |_, record| record.user_id == user_id }
|
168
|
+
count
|
169
|
+
else
|
170
|
+
count = @memories.size
|
171
|
+
@memories.clear
|
172
|
+
count
|
173
|
+
end
|
174
|
+
end
|
175
|
+
end
|
176
|
+
|
177
|
+
sig { override.returns(T::Boolean) }
|
178
|
+
def supports_vector_search?
|
179
|
+
true
|
180
|
+
end
|
181
|
+
|
182
|
+
sig { override.returns(T::Hash[Symbol, T.untyped]) }
|
183
|
+
def stats
|
184
|
+
@mutex.synchronize do
|
185
|
+
total = @memories.size
|
186
|
+
with_embeddings = @memories.values.count(&:embedding)
|
187
|
+
users = @memories.values.map(&:user_id).compact.uniq.size
|
188
|
+
|
189
|
+
{
|
190
|
+
total_memories: total,
|
191
|
+
memories_with_embeddings: with_embeddings,
|
192
|
+
unique_users: users,
|
193
|
+
supports_vector_search: supports_vector_search?,
|
194
|
+
avg_access_count: total > 0 ? @memories.values.sum(&:access_count) / total.to_f : 0
|
195
|
+
}
|
196
|
+
end
|
197
|
+
end
|
198
|
+
|
199
|
+
private
|
200
|
+
|
201
|
+
# Calculate cosine similarity between two vectors
|
202
|
+
sig { params(a: T::Array[Float], b: T::Array[Float]).returns(Float) }
|
203
|
+
def cosine_similarity(a, b)
|
204
|
+
return 0.0 if a.empty? || b.empty? || a.size != b.size
|
205
|
+
|
206
|
+
dot_product = a.zip(b).sum { |x, y| x * y }
|
207
|
+
magnitude_a = Math.sqrt(a.sum { |x| x * x })
|
208
|
+
magnitude_b = Math.sqrt(b.sum { |x| x * x })
|
209
|
+
|
210
|
+
return 0.0 if magnitude_a == 0.0 || magnitude_b == 0.0
|
211
|
+
|
212
|
+
dot_product / (magnitude_a * magnitude_b)
|
213
|
+
end
|
214
|
+
end
|
215
|
+
end
|
216
|
+
end
|
@@ -0,0 +1,241 @@
|
|
1
|
+
# frozen_string_literal: true
|
2
|
+
|
3
|
+
require 'sorbet-runtime'
|
4
|
+
|
5
|
+
require 'informers'
|
6
|
+
|
7
|
+
require_relative 'embedding_engine'
|
8
|
+
|
9
|
+
module DSPy
|
10
|
+
module Memory
|
11
|
+
# Local embedding engine using ankane/informers for privacy-preserving embeddings
|
12
|
+
class LocalEmbeddingEngine < EmbeddingEngine
|
13
|
+
extend T::Sig
|
14
|
+
|
15
|
+
# Default models supported by informers
|
16
|
+
DEFAULT_MODEL = 'Xenova/all-MiniLM-L6-v2'
|
17
|
+
SUPPORTED_MODELS = [
|
18
|
+
'Xenova/all-MiniLM-L6-v2',
|
19
|
+
'Xenova/all-MiniLM-L12-v2',
|
20
|
+
'Xenova/multi-qa-MiniLM-L6-cos-v1',
|
21
|
+
'Xenova/paraphrase-MiniLM-L6-v2'
|
22
|
+
].freeze
|
23
|
+
|
24
|
+
sig { returns(String) }
|
25
|
+
attr_reader :model_name
|
26
|
+
|
27
|
+
sig { params(model_name: String).void }
|
28
|
+
def initialize(model_name = DEFAULT_MODEL)
|
29
|
+
@model_name = model_name
|
30
|
+
@model = T.let(nil, T.nilable(T.untyped))
|
31
|
+
@embedding_dim = T.let(nil, T.nilable(Integer))
|
32
|
+
@ready = T.let(false, T::Boolean)
|
33
|
+
|
34
|
+
load_model!
|
35
|
+
end
|
36
|
+
|
37
|
+
sig { override.params(text: String).returns(T::Array[Float]) }
|
38
|
+
def embed(text)
|
39
|
+
ensure_ready!
|
40
|
+
|
41
|
+
# Preprocess text
|
42
|
+
cleaned_text = preprocess_text(text)
|
43
|
+
|
44
|
+
# Generate embedding
|
45
|
+
result = @model.call(cleaned_text)
|
46
|
+
|
47
|
+
# Extract embedding array and normalize
|
48
|
+
embedding = result.first.to_a
|
49
|
+
normalize_vector(embedding)
|
50
|
+
end
|
51
|
+
|
52
|
+
sig { override.params(texts: T::Array[String]).returns(T::Array[T::Array[Float]]) }
|
53
|
+
def embed_batch(texts)
|
54
|
+
ensure_ready!
|
55
|
+
|
56
|
+
# Preprocess all texts
|
57
|
+
cleaned_texts = texts.map { |text| preprocess_text(text) }
|
58
|
+
|
59
|
+
# Generate embeddings in batch
|
60
|
+
results = @model.call(cleaned_texts)
|
61
|
+
|
62
|
+
# Extract and normalize embeddings
|
63
|
+
results.map do |result|
|
64
|
+
# Handle both single embeddings and batch results
|
65
|
+
embedding = case result
|
66
|
+
when Array
|
67
|
+
result.flatten # Flatten in case of nested arrays
|
68
|
+
else
|
69
|
+
result.to_a.flatten
|
70
|
+
end
|
71
|
+
normalize_vector(embedding)
|
72
|
+
end
|
73
|
+
end
|
74
|
+
|
75
|
+
sig { override.returns(Integer) }
|
76
|
+
def embedding_dimension
|
77
|
+
@embedding_dim || load_model_info!
|
78
|
+
end
|
79
|
+
|
80
|
+
sig { override.returns(String) }
|
81
|
+
def model_name
|
82
|
+
@model_name
|
83
|
+
end
|
84
|
+
|
85
|
+
sig { override.returns(T::Boolean) }
|
86
|
+
def ready?
|
87
|
+
@ready
|
88
|
+
end
|
89
|
+
|
90
|
+
sig { override.returns(T::Hash[Symbol, T.untyped]) }
|
91
|
+
def stats
|
92
|
+
{
|
93
|
+
model_name: @model_name,
|
94
|
+
embedding_dimension: embedding_dimension,
|
95
|
+
ready: ready?,
|
96
|
+
supported_models: SUPPORTED_MODELS,
|
97
|
+
backend: 'informers'
|
98
|
+
}
|
99
|
+
end
|
100
|
+
|
101
|
+
# Check if a model is supported
|
102
|
+
sig { params(model_name: String).returns(T::Boolean) }
|
103
|
+
def self.model_supported?(model_name)
|
104
|
+
SUPPORTED_MODELS.include?(model_name)
|
105
|
+
end
|
106
|
+
|
107
|
+
# List all supported models
|
108
|
+
sig { returns(T::Array[String]) }
|
109
|
+
def self.supported_models
|
110
|
+
SUPPORTED_MODELS
|
111
|
+
end
|
112
|
+
|
113
|
+
private
|
114
|
+
|
115
|
+
# Load the embedding model
|
116
|
+
sig { void }
|
117
|
+
def load_model!
|
118
|
+
begin
|
119
|
+
@model = Informers.pipeline('feature-extraction', @model_name)
|
120
|
+
@ready = true
|
121
|
+
load_model_info!
|
122
|
+
rescue => e
|
123
|
+
@ready = false
|
124
|
+
raise "Failed to load embedding model '#{@model_name}': #{e.message}"
|
125
|
+
end
|
126
|
+
end
|
127
|
+
|
128
|
+
# Load model information (dimension, etc.)
|
129
|
+
sig { returns(Integer) }
|
130
|
+
def load_model_info!
|
131
|
+
return @embedding_dim if @embedding_dim
|
132
|
+
|
133
|
+
# Test with a simple string to get dimension
|
134
|
+
test_result = @model.call("test")
|
135
|
+
@embedding_dim = test_result.first.size
|
136
|
+
end
|
137
|
+
|
138
|
+
# Ensure the model is ready
|
139
|
+
sig { void }
|
140
|
+
def ensure_ready!
|
141
|
+
unless @ready
|
142
|
+
raise "Embedding engine not ready. Model '#{@model_name}' failed to load."
|
143
|
+
end
|
144
|
+
end
|
145
|
+
|
146
|
+
# Preprocess text for better embeddings
|
147
|
+
sig { params(text: String).returns(String) }
|
148
|
+
def preprocess_text(text)
|
149
|
+
# Basic text preprocessing
|
150
|
+
cleaned = text.strip
|
151
|
+
|
152
|
+
# Remove excessive whitespace
|
153
|
+
cleaned = cleaned.gsub(/\s+/, ' ')
|
154
|
+
|
155
|
+
# Truncate if too long (most models have token limits)
|
156
|
+
if cleaned.length > 8192 # Conservative limit
|
157
|
+
cleaned = cleaned[0..8191]
|
158
|
+
end
|
159
|
+
|
160
|
+
cleaned
|
161
|
+
end
|
162
|
+
end
|
163
|
+
|
164
|
+
# Fallback embedding engine when informers is not available
|
165
|
+
class NoOpEmbeddingEngine < EmbeddingEngine
|
166
|
+
extend T::Sig
|
167
|
+
|
168
|
+
sig { override.params(text: String).returns(T::Array[Float]) }
|
169
|
+
def embed(text)
|
170
|
+
# Return a simple hash-based embedding for basic functionality
|
171
|
+
simple_hash_embedding(text)
|
172
|
+
end
|
173
|
+
|
174
|
+
sig { override.params(texts: T::Array[String]).returns(T::Array[T::Array[Float]]) }
|
175
|
+
def embed_batch(texts)
|
176
|
+
texts.map { |text| embed(text) }
|
177
|
+
end
|
178
|
+
|
179
|
+
sig { override.returns(Integer) }
|
180
|
+
def embedding_dimension
|
181
|
+
128 # Fixed dimension for hash-based embeddings
|
182
|
+
end
|
183
|
+
|
184
|
+
sig { override.returns(String) }
|
185
|
+
def model_name
|
186
|
+
'simple-hash'
|
187
|
+
end
|
188
|
+
|
189
|
+
sig { override.returns(T::Boolean) }
|
190
|
+
def ready?
|
191
|
+
true
|
192
|
+
end
|
193
|
+
|
194
|
+
private
|
195
|
+
|
196
|
+
# Generate a simple hash-based embedding that captures semantic similarity
|
197
|
+
sig { params(text: String).returns(T::Array[Float]) }
|
198
|
+
def simple_hash_embedding(text)
|
199
|
+
# Create a deterministic but semantically aware embedding
|
200
|
+
words = text.downcase.split(/\W+/).reject(&:empty?)
|
201
|
+
|
202
|
+
# Initialize embedding vector
|
203
|
+
embedding = Array.new(128, 0.0)
|
204
|
+
|
205
|
+
# Create base embedding from all words
|
206
|
+
words.each_with_index do |word, word_idx|
|
207
|
+
word_hash = word.sum(&:ord)
|
208
|
+
|
209
|
+
# Distribute word influence across dimensions
|
210
|
+
(0..7).each do |i|
|
211
|
+
dim = (word_hash + i * 13) % 128
|
212
|
+
weight = Math.sin(word_hash + i) * 0.2
|
213
|
+
embedding[dim] += weight / Math.sqrt(words.length + 1)
|
214
|
+
end
|
215
|
+
end
|
216
|
+
|
217
|
+
# Add semantic clusters for common words
|
218
|
+
semantic_clusters = {
|
219
|
+
['programming', 'code', 'software', 'development'] => (0..15),
|
220
|
+
['ruby', 'python', 'java', 'javascript'] => (16..31),
|
221
|
+
['work', 'project', 'task', 'job'] => (32..47),
|
222
|
+
['tutorial', 'guide', 'learning', 'education'] => (48..63),
|
223
|
+
['memory', 'storage', 'data', 'information'] => (64..79),
|
224
|
+
['personal', 'private', 'individual', 'own'] => (80..95),
|
225
|
+
['important', 'critical', 'key', 'essential'] => (96..111),
|
226
|
+
['test', 'testing', 'spec', 'example'] => (112..127)
|
227
|
+
}
|
228
|
+
|
229
|
+
semantic_clusters.each do |cluster_words, range|
|
230
|
+
cluster_weight = words.count { |word| cluster_words.include?(word) }
|
231
|
+
if cluster_weight > 0
|
232
|
+
range.each { |dim| embedding[dim] += cluster_weight * 0.3 }
|
233
|
+
end
|
234
|
+
end
|
235
|
+
|
236
|
+
# Normalize to unit vector
|
237
|
+
normalize_vector(embedding)
|
238
|
+
end
|
239
|
+
end
|
240
|
+
end
|
241
|
+
end
|