htm 0.0.1
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- checksums.yaml +7 -0
- data/.architecture/decisions/adrs/001-use-postgresql-timescaledb-storage.md +227 -0
- data/.architecture/decisions/adrs/002-two-tier-memory-architecture.md +322 -0
- data/.architecture/decisions/adrs/003-ollama-default-embedding-provider.md +339 -0
- data/.architecture/decisions/adrs/004-multi-robot-shared-memory-hive-mind.md +374 -0
- data/.architecture/decisions/adrs/005-rag-based-retrieval-with-hybrid-search.md +443 -0
- data/.architecture/decisions/adrs/006-context-assembly-strategies.md +444 -0
- data/.architecture/decisions/adrs/007-working-memory-eviction-strategy.md +461 -0
- data/.architecture/decisions/adrs/008-robot-identification-system.md +550 -0
- data/.architecture/decisions/adrs/009-never-forget-explicit-deletion-only.md +570 -0
- data/.architecture/decisions/adrs/010-redis-working-memory-rejected.md +323 -0
- data/.architecture/decisions/adrs/011-database-side-embedding-generation-with-pgai.md +585 -0
- data/.architecture/decisions/adrs/012-llm-driven-ontology-topic-extraction.md +583 -0
- data/.architecture/decisions/adrs/013-activerecord-orm-and-many-to-many-tagging.md +299 -0
- data/.architecture/decisions/adrs/014-client-side-embedding-generation-workflow.md +569 -0
- data/.architecture/decisions/adrs/015-hierarchical-tag-ontology-and-llm-extraction.md +701 -0
- data/.architecture/decisions/adrs/016-async-embedding-and-tag-generation.md +694 -0
- data/.architecture/members.yml +144 -0
- data/.architecture/reviews/2025-10-29-llm-configuration-and-async-processing-review.md +1137 -0
- data/.architecture/reviews/initial-system-analysis.md +330 -0
- data/.envrc +32 -0
- data/.irbrc +145 -0
- data/CHANGELOG.md +150 -0
- data/COMMITS.md +196 -0
- data/LICENSE +21 -0
- data/README.md +1347 -0
- data/Rakefile +51 -0
- data/SETUP.md +268 -0
- data/config/database.yml +67 -0
- data/db/migrate/20250101000001_enable_extensions.rb +14 -0
- data/db/migrate/20250101000002_create_robots.rb +14 -0
- data/db/migrate/20250101000003_create_nodes.rb +42 -0
- data/db/migrate/20250101000005_create_tags.rb +38 -0
- data/db/migrate/20250101000007_add_node_vector_indexes.rb +30 -0
- data/db/schema.sql +473 -0
- data/db/seed_data/README.md +100 -0
- data/db/seed_data/presidents.md +136 -0
- data/db/seed_data/states.md +151 -0
- data/db/seeds.rb +208 -0
- data/dbdoc/README.md +173 -0
- data/dbdoc/public.node_stats.md +48 -0
- data/dbdoc/public.node_stats.svg +41 -0
- data/dbdoc/public.node_tags.md +40 -0
- data/dbdoc/public.node_tags.svg +112 -0
- data/dbdoc/public.nodes.md +54 -0
- data/dbdoc/public.nodes.svg +118 -0
- data/dbdoc/public.nodes_tags.md +39 -0
- data/dbdoc/public.nodes_tags.svg +112 -0
- data/dbdoc/public.ontology_structure.md +48 -0
- data/dbdoc/public.ontology_structure.svg +38 -0
- data/dbdoc/public.operations_log.md +42 -0
- data/dbdoc/public.operations_log.svg +130 -0
- data/dbdoc/public.relationships.md +39 -0
- data/dbdoc/public.relationships.svg +41 -0
- data/dbdoc/public.robot_activity.md +46 -0
- data/dbdoc/public.robot_activity.svg +35 -0
- data/dbdoc/public.robots.md +35 -0
- data/dbdoc/public.robots.svg +90 -0
- data/dbdoc/public.schema_migrations.md +29 -0
- data/dbdoc/public.schema_migrations.svg +26 -0
- data/dbdoc/public.tags.md +35 -0
- data/dbdoc/public.tags.svg +60 -0
- data/dbdoc/public.topic_relationships.md +45 -0
- data/dbdoc/public.topic_relationships.svg +32 -0
- data/dbdoc/schema.json +1437 -0
- data/dbdoc/schema.svg +154 -0
- data/docs/api/database.md +806 -0
- data/docs/api/embedding-service.md +532 -0
- data/docs/api/htm.md +797 -0
- data/docs/api/index.md +259 -0
- data/docs/api/long-term-memory.md +1096 -0
- data/docs/api/working-memory.md +665 -0
- data/docs/architecture/adrs/001-postgresql-timescaledb.md +314 -0
- data/docs/architecture/adrs/002-two-tier-memory.md +411 -0
- data/docs/architecture/adrs/003-ollama-embeddings.md +421 -0
- data/docs/architecture/adrs/004-hive-mind.md +437 -0
- data/docs/architecture/adrs/005-rag-retrieval.md +531 -0
- data/docs/architecture/adrs/006-context-assembly.md +496 -0
- data/docs/architecture/adrs/007-eviction-strategy.md +645 -0
- data/docs/architecture/adrs/008-robot-identification.md +625 -0
- data/docs/architecture/adrs/009-never-forget.md +648 -0
- data/docs/architecture/adrs/010-redis-working-memory-rejected.md +323 -0
- data/docs/architecture/adrs/011-pgai-integration.md +494 -0
- data/docs/architecture/adrs/index.md +215 -0
- data/docs/architecture/hive-mind.md +736 -0
- data/docs/architecture/index.md +351 -0
- data/docs/architecture/overview.md +538 -0
- data/docs/architecture/two-tier-memory.md +873 -0
- data/docs/assets/css/custom.css +83 -0
- data/docs/assets/images/htm-core-components.svg +63 -0
- data/docs/assets/images/htm-database-schema.svg +93 -0
- data/docs/assets/images/htm-hive-mind-architecture.svg +125 -0
- data/docs/assets/images/htm-importance-scoring-framework.svg +83 -0
- data/docs/assets/images/htm-layered-architecture.svg +71 -0
- data/docs/assets/images/htm-long-term-memory-architecture.svg +115 -0
- data/docs/assets/images/htm-working-memory-architecture.svg +120 -0
- data/docs/assets/images/htm.jpg +0 -0
- data/docs/assets/images/htm_demo.gif +0 -0
- data/docs/assets/js/mathjax.js +18 -0
- data/docs/assets/videos/htm_video.mp4 +0 -0
- data/docs/database_rake_tasks.md +322 -0
- data/docs/development/contributing.md +787 -0
- data/docs/development/index.md +336 -0
- data/docs/development/schema.md +596 -0
- data/docs/development/setup.md +719 -0
- data/docs/development/testing.md +819 -0
- data/docs/guides/adding-memories.md +824 -0
- data/docs/guides/context-assembly.md +1009 -0
- data/docs/guides/getting-started.md +577 -0
- data/docs/guides/index.md +118 -0
- data/docs/guides/long-term-memory.md +941 -0
- data/docs/guides/multi-robot.md +866 -0
- data/docs/guides/recalling-memories.md +927 -0
- data/docs/guides/search-strategies.md +953 -0
- data/docs/guides/working-memory.md +717 -0
- data/docs/index.md +214 -0
- data/docs/installation.md +477 -0
- data/docs/multi_framework_support.md +519 -0
- data/docs/quick-start.md +655 -0
- data/docs/setup_local_database.md +302 -0
- data/docs/using_rake_tasks_in_your_app.md +383 -0
- data/examples/basic_usage.rb +93 -0
- data/examples/cli_app/README.md +317 -0
- data/examples/cli_app/htm_cli.rb +270 -0
- data/examples/custom_llm_configuration.rb +183 -0
- data/examples/example_app/Rakefile +71 -0
- data/examples/example_app/app.rb +206 -0
- data/examples/sinatra_app/Gemfile +21 -0
- data/examples/sinatra_app/app.rb +335 -0
- data/lib/htm/active_record_config.rb +113 -0
- data/lib/htm/configuration.rb +342 -0
- data/lib/htm/database.rb +594 -0
- data/lib/htm/embedding_service.rb +115 -0
- data/lib/htm/errors.rb +34 -0
- data/lib/htm/job_adapter.rb +154 -0
- data/lib/htm/jobs/generate_embedding_job.rb +65 -0
- data/lib/htm/jobs/generate_tags_job.rb +82 -0
- data/lib/htm/long_term_memory.rb +965 -0
- data/lib/htm/models/node.rb +109 -0
- data/lib/htm/models/node_tag.rb +33 -0
- data/lib/htm/models/robot.rb +52 -0
- data/lib/htm/models/tag.rb +76 -0
- data/lib/htm/railtie.rb +76 -0
- data/lib/htm/sinatra.rb +157 -0
- data/lib/htm/tag_service.rb +135 -0
- data/lib/htm/tasks.rb +38 -0
- data/lib/htm/version.rb +5 -0
- data/lib/htm/working_memory.rb +182 -0
- data/lib/htm.rb +400 -0
- data/lib/tasks/db.rake +19 -0
- data/lib/tasks/htm.rake +147 -0
- data/lib/tasks/jobs.rake +312 -0
- data/mkdocs.yml +190 -0
- data/scripts/install_local_database.sh +309 -0
- metadata +341 -0
|
@@ -0,0 +1,965 @@
|
|
|
1
|
+
# frozen_string_literal: true
|
|
2
|
+
|
|
3
|
+
require 'pg'
|
|
4
|
+
require 'json'
|
|
5
|
+
require 'lru_redux'
|
|
6
|
+
require 'digest'
|
|
7
|
+
|
|
8
|
+
class HTM
|
|
9
|
+
# Long-term Memory - PostgreSQL/TimescaleDB-backed permanent storage
|
|
10
|
+
#
|
|
11
|
+
# LongTermMemory provides durable storage for all memory nodes with:
|
|
12
|
+
# - Vector similarity search (RAG)
|
|
13
|
+
# - Full-text search
|
|
14
|
+
# - Time-range queries
|
|
15
|
+
# - Relationship graphs
|
|
16
|
+
# - Tag system
|
|
17
|
+
# - ActiveRecord ORM for data access
|
|
18
|
+
# - Query result caching for efficiency
|
|
19
|
+
#
|
|
20
|
+
class LongTermMemory
|
|
21
|
+
DEFAULT_QUERY_TIMEOUT = 30_000 # milliseconds (30 seconds)
|
|
22
|
+
MAX_VECTOR_DIMENSION = 2000 # Maximum supported dimension with HNSW index (pgvector limitation)
|
|
23
|
+
DEFAULT_CACHE_SIZE = 1000 # Number of queries to cache
|
|
24
|
+
DEFAULT_CACHE_TTL = 300 # Cache lifetime in seconds (5 minutes)
|
|
25
|
+
|
|
26
|
+
attr_reader :query_timeout
|
|
27
|
+
|
|
28
|
+
def initialize(config, pool_size: nil, query_timeout: DEFAULT_QUERY_TIMEOUT, cache_size: DEFAULT_CACHE_SIZE, cache_ttl: DEFAULT_CACHE_TTL)
|
|
29
|
+
@config = config
|
|
30
|
+
@query_timeout = query_timeout # in milliseconds
|
|
31
|
+
|
|
32
|
+
# Set statement timeout for ActiveRecord queries
|
|
33
|
+
ActiveRecord::Base.connection.execute("SET statement_timeout = #{@query_timeout}")
|
|
34
|
+
|
|
35
|
+
# Initialize query result cache (disable with cache_size: 0)
|
|
36
|
+
if cache_size > 0
|
|
37
|
+
@query_cache = LruRedux::TTL::ThreadSafeCache.new(cache_size, cache_ttl)
|
|
38
|
+
@cache_stats = { hits: 0, misses: 0 }
|
|
39
|
+
end
|
|
40
|
+
end
|
|
41
|
+
|
|
42
|
+
# Add a node to long-term memory
|
|
43
|
+
#
|
|
44
|
+
# Embeddings should be generated client-side and provided via the embedding parameter.
|
|
45
|
+
#
|
|
46
|
+
# @param content [String] Conversation message/utterance
|
|
47
|
+
# @param speaker [String] Who said it: 'user' or robot name
|
|
48
|
+
# @param token_count [Integer] Token count
|
|
49
|
+
# @param robot_id [String] Robot identifier
|
|
50
|
+
# @param embedding [Array<Float>, nil] Pre-generated embedding vector
|
|
51
|
+
# @return [Integer] Node database ID
|
|
52
|
+
#
|
|
53
|
+
def add(content:, source:, token_count: 0, robot_id:, embedding: nil)
|
|
54
|
+
# Prepare embedding if provided
|
|
55
|
+
if embedding
|
|
56
|
+
# Pad embedding to 2000 dimensions if needed
|
|
57
|
+
actual_dimension = embedding.length
|
|
58
|
+
if actual_dimension < 2000
|
|
59
|
+
padded_embedding = embedding + Array.new(2000 - actual_dimension, 0.0)
|
|
60
|
+
else
|
|
61
|
+
padded_embedding = embedding
|
|
62
|
+
end
|
|
63
|
+
embedding_str = "[#{padded_embedding.join(',')}]"
|
|
64
|
+
end
|
|
65
|
+
|
|
66
|
+
# Create node using ActiveRecord
|
|
67
|
+
node = HTM::Models::Node.create!(
|
|
68
|
+
content: content,
|
|
69
|
+
source: source,
|
|
70
|
+
token_count: token_count,
|
|
71
|
+
robot_id: robot_id,
|
|
72
|
+
embedding: embedding ? embedding_str : nil,
|
|
73
|
+
embedding_dimension: embedding ? embedding.length : nil
|
|
74
|
+
)
|
|
75
|
+
|
|
76
|
+
# Invalidate cache since database content changed
|
|
77
|
+
invalidate_cache!
|
|
78
|
+
|
|
79
|
+
node.id
|
|
80
|
+
end
|
|
81
|
+
|
|
82
|
+
# Retrieve a node by ID
|
|
83
|
+
#
|
|
84
|
+
# Automatically tracks access by incrementing access_count and updating last_accessed
|
|
85
|
+
#
|
|
86
|
+
# @param node_id [Integer] Node database ID
|
|
87
|
+
# @return [Hash, nil] Node data or nil
|
|
88
|
+
#
|
|
89
|
+
def retrieve(node_id)
|
|
90
|
+
node = HTM::Models::Node.find_by(id: node_id)
|
|
91
|
+
return nil unless node
|
|
92
|
+
|
|
93
|
+
# Track access (atomic increment)
|
|
94
|
+
node.increment!(:access_count)
|
|
95
|
+
node.touch(:last_accessed)
|
|
96
|
+
|
|
97
|
+
node.attributes
|
|
98
|
+
end
|
|
99
|
+
|
|
100
|
+
# Update last_accessed timestamp
|
|
101
|
+
#
|
|
102
|
+
# @param node_id [Integer] Node database ID
|
|
103
|
+
# @return [void]
|
|
104
|
+
#
|
|
105
|
+
def update_last_accessed(node_id)
|
|
106
|
+
node = HTM::Models::Node.find_by(id: node_id)
|
|
107
|
+
node&.update(last_accessed: Time.current)
|
|
108
|
+
end
|
|
109
|
+
|
|
110
|
+
# Delete a node
|
|
111
|
+
#
|
|
112
|
+
# @param node_id [Integer] Node database ID
|
|
113
|
+
# @return [void]
|
|
114
|
+
#
|
|
115
|
+
def delete(node_id)
|
|
116
|
+
node = HTM::Models::Node.find_by(id: node_id)
|
|
117
|
+
node&.destroy
|
|
118
|
+
|
|
119
|
+
# Invalidate cache since database content changed
|
|
120
|
+
invalidate_cache!
|
|
121
|
+
end
|
|
122
|
+
|
|
123
|
+
# Check if a node exists
|
|
124
|
+
#
|
|
125
|
+
# @param node_id [Integer] Node database ID
|
|
126
|
+
# @return [Boolean] True if node exists
|
|
127
|
+
#
|
|
128
|
+
def exists?(node_id)
|
|
129
|
+
HTM::Models::Node.exists?(node_id)
|
|
130
|
+
end
|
|
131
|
+
|
|
132
|
+
# Vector similarity search
|
|
133
|
+
#
|
|
134
|
+
# @param timeframe [Range] Time range to search
|
|
135
|
+
# @param query [String] Search query
|
|
136
|
+
# @param limit [Integer] Maximum results
|
|
137
|
+
# @param embedding_service [Object] Service to generate embeddings
|
|
138
|
+
# @return [Array<Hash>] Matching nodes
|
|
139
|
+
#
|
|
140
|
+
def search(timeframe:, query:, limit:, embedding_service:)
|
|
141
|
+
# Return uncached if cache disabled
|
|
142
|
+
return search_uncached(timeframe: timeframe, query: query, limit: limit, embedding_service: embedding_service) unless @query_cache
|
|
143
|
+
|
|
144
|
+
# Generate cache key
|
|
145
|
+
cache_key = cache_key_for(:search, timeframe, query, limit)
|
|
146
|
+
|
|
147
|
+
# Try to get from cache
|
|
148
|
+
cached = @query_cache[cache_key]
|
|
149
|
+
if cached
|
|
150
|
+
@cache_stats[:hits] += 1
|
|
151
|
+
return cached
|
|
152
|
+
end
|
|
153
|
+
|
|
154
|
+
# Cache miss - execute query
|
|
155
|
+
@cache_stats[:misses] += 1
|
|
156
|
+
result = search_uncached(timeframe: timeframe, query: query, limit: limit, embedding_service: embedding_service)
|
|
157
|
+
|
|
158
|
+
# Store in cache
|
|
159
|
+
@query_cache[cache_key] = result
|
|
160
|
+
result
|
|
161
|
+
end
|
|
162
|
+
|
|
163
|
+
# Full-text search
|
|
164
|
+
#
|
|
165
|
+
# @param timeframe [Range] Time range to search
|
|
166
|
+
# @param query [String] Search query
|
|
167
|
+
# @param limit [Integer] Maximum results
|
|
168
|
+
# @return [Array<Hash>] Matching nodes
|
|
169
|
+
#
|
|
170
|
+
def search_fulltext(timeframe:, query:, limit:)
|
|
171
|
+
# Return uncached if cache disabled
|
|
172
|
+
return search_fulltext_uncached(timeframe: timeframe, query: query, limit: limit) unless @query_cache
|
|
173
|
+
|
|
174
|
+
# Generate cache key
|
|
175
|
+
cache_key = cache_key_for(:fulltext, timeframe, query, limit)
|
|
176
|
+
|
|
177
|
+
# Try to get from cache
|
|
178
|
+
cached = @query_cache[cache_key]
|
|
179
|
+
if cached
|
|
180
|
+
@cache_stats[:hits] += 1
|
|
181
|
+
return cached
|
|
182
|
+
end
|
|
183
|
+
|
|
184
|
+
# Cache miss - execute query
|
|
185
|
+
@cache_stats[:misses] += 1
|
|
186
|
+
result = search_fulltext_uncached(timeframe: timeframe, query: query, limit: limit)
|
|
187
|
+
|
|
188
|
+
# Store in cache
|
|
189
|
+
@query_cache[cache_key] = result
|
|
190
|
+
result
|
|
191
|
+
end
|
|
192
|
+
|
|
193
|
+
# Hybrid search (full-text + vector)
|
|
194
|
+
#
|
|
195
|
+
# @param timeframe [Range] Time range to search
|
|
196
|
+
# @param query [String] Search query
|
|
197
|
+
# @param limit [Integer] Maximum results
|
|
198
|
+
# @param embedding_service [Object] Service to generate embeddings
|
|
199
|
+
# @param prefilter_limit [Integer] Candidates to consider (default: 100)
|
|
200
|
+
# @return [Array<Hash>] Matching nodes
|
|
201
|
+
#
|
|
202
|
+
def search_hybrid(timeframe:, query:, limit:, embedding_service:, prefilter_limit: 100)
|
|
203
|
+
# Return uncached if cache disabled
|
|
204
|
+
return search_hybrid_uncached(timeframe: timeframe, query: query, limit: limit, embedding_service: embedding_service, prefilter_limit: prefilter_limit) unless @query_cache
|
|
205
|
+
|
|
206
|
+
# Generate cache key
|
|
207
|
+
cache_key = cache_key_for(:hybrid, timeframe, query, limit, prefilter_limit)
|
|
208
|
+
|
|
209
|
+
# Try to get from cache
|
|
210
|
+
cached = @query_cache[cache_key]
|
|
211
|
+
if cached
|
|
212
|
+
@cache_stats[:hits] += 1
|
|
213
|
+
return cached
|
|
214
|
+
end
|
|
215
|
+
|
|
216
|
+
# Cache miss - execute query
|
|
217
|
+
@cache_stats[:misses] += 1
|
|
218
|
+
result = search_hybrid_uncached(timeframe: timeframe, query: query, limit: limit, embedding_service: embedding_service, prefilter_limit: prefilter_limit)
|
|
219
|
+
|
|
220
|
+
# Store in cache
|
|
221
|
+
@query_cache[cache_key] = result
|
|
222
|
+
result
|
|
223
|
+
end
|
|
224
|
+
|
|
225
|
+
# Add a tag to a node
|
|
226
|
+
#
|
|
227
|
+
# @param node_id [Integer] Node database ID
|
|
228
|
+
# @param tag [String] Tag name
|
|
229
|
+
# @return [void]
|
|
230
|
+
#
|
|
231
|
+
def add_tag(node_id:, tag:)
|
|
232
|
+
tag_record = HTM::Models::Tag.find_or_create_by(name: tag)
|
|
233
|
+
HTM::Models::NodeTag.create(
|
|
234
|
+
node_id: node_id,
|
|
235
|
+
tag_id: tag_record.id
|
|
236
|
+
)
|
|
237
|
+
rescue ActiveRecord::RecordNotUnique
|
|
238
|
+
# Tag association already exists, ignore
|
|
239
|
+
end
|
|
240
|
+
|
|
241
|
+
# Mark nodes as evicted from working memory
|
|
242
|
+
#
|
|
243
|
+
# @param node_ids [Array<Integer>] Node IDs
|
|
244
|
+
# @return [void]
|
|
245
|
+
#
|
|
246
|
+
def mark_evicted(node_ids)
|
|
247
|
+
return if node_ids.empty?
|
|
248
|
+
|
|
249
|
+
HTM::Models::Node.where(id: node_ids).update_all(in_working_memory: false)
|
|
250
|
+
end
|
|
251
|
+
|
|
252
|
+
# Track access for multiple nodes (bulk operation)
|
|
253
|
+
#
|
|
254
|
+
# Updates access_count and last_accessed for all nodes in the array
|
|
255
|
+
#
|
|
256
|
+
# @param node_ids [Array<Integer>] Node IDs that were accessed
|
|
257
|
+
# @return [void]
|
|
258
|
+
#
|
|
259
|
+
def track_access(node_ids)
|
|
260
|
+
return if node_ids.empty?
|
|
261
|
+
|
|
262
|
+
# Atomic batch update
|
|
263
|
+
HTM::Models::Node.where(id: node_ids).update_all(
|
|
264
|
+
"access_count = access_count + 1, last_accessed = NOW()"
|
|
265
|
+
)
|
|
266
|
+
end
|
|
267
|
+
|
|
268
|
+
# Register a robot
|
|
269
|
+
#
|
|
270
|
+
# @param robot_id [String] Robot identifier
|
|
271
|
+
# @param robot_name [String] Robot name
|
|
272
|
+
# @return [void]
|
|
273
|
+
#
|
|
274
|
+
def register_robot(robot_name)
|
|
275
|
+
robot = HTM::Models::Robot.find_or_create_by(name: robot_name)
|
|
276
|
+
robot.update(last_active: Time.current)
|
|
277
|
+
robot.id
|
|
278
|
+
end
|
|
279
|
+
|
|
280
|
+
# Update robot activity timestamp
|
|
281
|
+
#
|
|
282
|
+
# @param robot_id [String] Robot identifier
|
|
283
|
+
# @return [void]
|
|
284
|
+
#
|
|
285
|
+
def update_robot_activity(robot_id)
|
|
286
|
+
robot = HTM::Models::Robot.find_by(id: robot_id)
|
|
287
|
+
robot&.update(last_active: Time.current)
|
|
288
|
+
end
|
|
289
|
+
|
|
290
|
+
# Get memory statistics
|
|
291
|
+
#
|
|
292
|
+
# @return [Hash] Statistics
|
|
293
|
+
#
|
|
294
|
+
def stats
|
|
295
|
+
base_stats = {
|
|
296
|
+
total_nodes: HTM::Models::Node.count,
|
|
297
|
+
nodes_by_robot: HTM::Models::Node.group(:robot_id).count,
|
|
298
|
+
total_tags: HTM::Models::Tag.count,
|
|
299
|
+
oldest_memory: HTM::Models::Node.minimum(:created_at),
|
|
300
|
+
newest_memory: HTM::Models::Node.maximum(:created_at),
|
|
301
|
+
active_robots: HTM::Models::Robot.count,
|
|
302
|
+
robot_activity: HTM::Models::Robot.select(:id, :name, :last_active).map(&:attributes),
|
|
303
|
+
database_size: ActiveRecord::Base.connection.select_value("SELECT pg_database_size(current_database())").to_i
|
|
304
|
+
}
|
|
305
|
+
|
|
306
|
+
# Include cache statistics if cache is enabled
|
|
307
|
+
if @query_cache
|
|
308
|
+
base_stats[:cache] = cache_stats
|
|
309
|
+
end
|
|
310
|
+
|
|
311
|
+
base_stats
|
|
312
|
+
end
|
|
313
|
+
|
|
314
|
+
# Shutdown - no-op with ActiveRecord (connection pool managed by ActiveRecord)
|
|
315
|
+
def shutdown
|
|
316
|
+
# ActiveRecord handles connection pool shutdown
|
|
317
|
+
# This method kept for API compatibility
|
|
318
|
+
end
|
|
319
|
+
|
|
320
|
+
# For backwards compatibility with tests/code that expect pool_size
|
|
321
|
+
def pool_size
|
|
322
|
+
ActiveRecord::Base.connection_pool.size
|
|
323
|
+
end
|
|
324
|
+
|
|
325
|
+
# Retrieve nodes by ontological topic
|
|
326
|
+
#
|
|
327
|
+
# @param topic_path [String] Topic hierarchy path
|
|
328
|
+
# @param exact [Boolean] Exact match or prefix match
|
|
329
|
+
# @param limit [Integer] Maximum results
|
|
330
|
+
# @return [Array<Hash>] Matching nodes
|
|
331
|
+
#
|
|
332
|
+
def nodes_by_topic(topic_path, exact: false, limit: 50)
|
|
333
|
+
if exact
|
|
334
|
+
nodes = HTM::Models::Node
|
|
335
|
+
.joins(:tags)
|
|
336
|
+
.where(tags: { name: topic_path })
|
|
337
|
+
.distinct
|
|
338
|
+
.order(created_at: :desc)
|
|
339
|
+
.limit(limit)
|
|
340
|
+
else
|
|
341
|
+
nodes = HTM::Models::Node
|
|
342
|
+
.joins(:tags)
|
|
343
|
+
.where("tags.name LIKE ?", "#{topic_path}%")
|
|
344
|
+
.distinct
|
|
345
|
+
.order(created_at: :desc)
|
|
346
|
+
.limit(limit)
|
|
347
|
+
end
|
|
348
|
+
|
|
349
|
+
nodes.map(&:attributes)
|
|
350
|
+
end
|
|
351
|
+
|
|
352
|
+
# Get ontology structure view
|
|
353
|
+
#
|
|
354
|
+
# @return [Array<Hash>] Ontology structure
|
|
355
|
+
#
|
|
356
|
+
def ontology_structure
|
|
357
|
+
result = ActiveRecord::Base.connection.select_all(
|
|
358
|
+
"SELECT * FROM ontology_structure WHERE root_topic IS NOT NULL ORDER BY root_topic, level1_topic, level2_topic"
|
|
359
|
+
)
|
|
360
|
+
result.to_a
|
|
361
|
+
end
|
|
362
|
+
|
|
363
|
+
# Get topic relationships (co-occurrence)
|
|
364
|
+
#
|
|
365
|
+
# @param min_shared_nodes [Integer] Minimum shared nodes
|
|
366
|
+
# @param limit [Integer] Maximum relationships
|
|
367
|
+
# @return [Array<Hash>] Topic relationships
|
|
368
|
+
#
|
|
369
|
+
def topic_relationships(min_shared_nodes: 2, limit: 50)
|
|
370
|
+
result = ActiveRecord::Base.connection.select_all(
|
|
371
|
+
<<~SQL,
|
|
372
|
+
SELECT t1.name AS topic1, t2.name AS topic2, COUNT(DISTINCT nt1.node_id) AS shared_nodes
|
|
373
|
+
FROM tags t1
|
|
374
|
+
JOIN node_tags nt1 ON t1.id = nt1.tag_id
|
|
375
|
+
JOIN node_tags nt2 ON nt1.node_id = nt2.node_id
|
|
376
|
+
JOIN tags t2 ON nt2.tag_id = t2.id
|
|
377
|
+
WHERE t1.name < t2.name
|
|
378
|
+
GROUP BY t1.name, t2.name
|
|
379
|
+
HAVING COUNT(DISTINCT nt1.node_id) >= #{min_shared_nodes.to_i}
|
|
380
|
+
ORDER BY shared_nodes DESC
|
|
381
|
+
LIMIT #{limit.to_i}
|
|
382
|
+
SQL
|
|
383
|
+
)
|
|
384
|
+
result.to_a
|
|
385
|
+
end
|
|
386
|
+
|
|
387
|
+
# Get topics for a specific node
|
|
388
|
+
#
|
|
389
|
+
# @param node_id [Integer] Node database ID
|
|
390
|
+
# @return [Array<String>] Topic paths
|
|
391
|
+
#
|
|
392
|
+
def node_topics(node_id)
|
|
393
|
+
HTM::Models::Tag
|
|
394
|
+
.joins(:node_tags)
|
|
395
|
+
.where(node_tags: { node_id: node_id })
|
|
396
|
+
.order(:name)
|
|
397
|
+
.pluck(:name)
|
|
398
|
+
end
|
|
399
|
+
|
|
400
|
+
# Calculate dynamic relevance score for a node given query context
|
|
401
|
+
#
|
|
402
|
+
# Combines multiple signals:
|
|
403
|
+
# - Vector similarity (semantic match)
|
|
404
|
+
# - Tag overlap (categorical match)
|
|
405
|
+
# - Recency (freshness)
|
|
406
|
+
# - Access frequency (popularity/utility)
|
|
407
|
+
#
|
|
408
|
+
# @param node [Hash] Node data with similarity, tags, created_at, access_count
|
|
409
|
+
# @param query_tags [Array<String>] Tags associated with the query
|
|
410
|
+
# @param vector_similarity [Float, nil] Pre-computed vector similarity (0-1)
|
|
411
|
+
# @return [Float] Composite relevance score (0-10)
|
|
412
|
+
#
|
|
413
|
+
def calculate_relevance(node:, query_tags: [], vector_similarity: nil)
|
|
414
|
+
# 1. Vector similarity (semantic match) - weight: 0.5
|
|
415
|
+
semantic_score = if vector_similarity
|
|
416
|
+
vector_similarity
|
|
417
|
+
elsif node['similarity']
|
|
418
|
+
node['similarity'].to_f
|
|
419
|
+
else
|
|
420
|
+
0.5 # Neutral if no embedding
|
|
421
|
+
end
|
|
422
|
+
|
|
423
|
+
# 2. Tag overlap (categorical relevance) - weight: 0.3
|
|
424
|
+
node_tags = get_node_tags(node['id'])
|
|
425
|
+
tag_score = if query_tags.any? && node_tags.any?
|
|
426
|
+
weighted_hierarchical_jaccard(query_tags, node_tags)
|
|
427
|
+
else
|
|
428
|
+
0.5 # Neutral if no tags
|
|
429
|
+
end
|
|
430
|
+
|
|
431
|
+
# 3. Recency (temporal relevance) - weight: 0.1
|
|
432
|
+
age_hours = (Time.now - Time.parse(node['created_at'].to_s)) / 3600.0
|
|
433
|
+
recency_score = Math.exp(-age_hours / 168.0) # 1-week half-life
|
|
434
|
+
|
|
435
|
+
# 4. Access frequency (behavioral signal) - weight: 0.1
|
|
436
|
+
access_count = node['access_count'] || 0
|
|
437
|
+
access_score = Math.log(1 + access_count) / 10.0 # Normalize to 0-1
|
|
438
|
+
|
|
439
|
+
# Weighted composite (scale to 0-10)
|
|
440
|
+
relevance = (
|
|
441
|
+
(semantic_score * 0.5) +
|
|
442
|
+
(tag_score * 0.3) +
|
|
443
|
+
(recency_score * 0.1) +
|
|
444
|
+
(access_score * 0.1)
|
|
445
|
+
) * 10.0
|
|
446
|
+
|
|
447
|
+
relevance.clamp(0.0, 10.0)
|
|
448
|
+
end
|
|
449
|
+
|
|
450
|
+
# Search with dynamic relevance scoring
|
|
451
|
+
#
|
|
452
|
+
# Returns nodes with calculated relevance scores based on query context
|
|
453
|
+
#
|
|
454
|
+
# @param timeframe [Range] Time range to search
|
|
455
|
+
# @param query [String, nil] Search query
|
|
456
|
+
# @param query_tags [Array<String>] Tags to match
|
|
457
|
+
# @param limit [Integer] Maximum results
|
|
458
|
+
# @param embedding_service [Object, nil] Service to generate embeddings
|
|
459
|
+
# @return [Array<Hash>] Nodes with relevance scores
|
|
460
|
+
#
|
|
461
|
+
def search_with_relevance(timeframe:, query: nil, query_tags: [], limit: 20, embedding_service: nil)
|
|
462
|
+
# Get candidates from appropriate search method
|
|
463
|
+
candidates = if query && embedding_service
|
|
464
|
+
# Vector search
|
|
465
|
+
search_uncached(timeframe: timeframe, query: query, limit: limit * 2, embedding_service: embedding_service)
|
|
466
|
+
elsif query
|
|
467
|
+
# Full-text search
|
|
468
|
+
search_fulltext_uncached(timeframe: timeframe, query: query, limit: limit * 2)
|
|
469
|
+
else
|
|
470
|
+
# Time-range only
|
|
471
|
+
HTM::Models::Node
|
|
472
|
+
.where(created_at: timeframe)
|
|
473
|
+
.order(created_at: :desc)
|
|
474
|
+
.limit(limit * 2)
|
|
475
|
+
.map(&:attributes)
|
|
476
|
+
end
|
|
477
|
+
|
|
478
|
+
# Calculate relevance for each candidate
|
|
479
|
+
scored_nodes = candidates.map do |node|
|
|
480
|
+
relevance = calculate_relevance(
|
|
481
|
+
node: node,
|
|
482
|
+
query_tags: query_tags,
|
|
483
|
+
vector_similarity: node['similarity']&.to_f
|
|
484
|
+
)
|
|
485
|
+
|
|
486
|
+
node.merge({
|
|
487
|
+
'relevance' => relevance,
|
|
488
|
+
'tags' => get_node_tags(node['id'])
|
|
489
|
+
})
|
|
490
|
+
end
|
|
491
|
+
|
|
492
|
+
# Sort by relevance and return top K
|
|
493
|
+
scored_nodes
|
|
494
|
+
.sort_by { |n| -n['relevance'] }
|
|
495
|
+
.take(limit)
|
|
496
|
+
end
|
|
497
|
+
|
|
498
|
+
# Get tags for a specific node
|
|
499
|
+
#
|
|
500
|
+
# @param node_id [Integer] Node database ID
|
|
501
|
+
# @return [Array<String>] Tag names
|
|
502
|
+
#
|
|
503
|
+
def get_node_tags(node_id)
|
|
504
|
+
HTM::Models::Tag
|
|
505
|
+
.joins(:node_tags)
|
|
506
|
+
.where(node_tags: { node_id: node_id })
|
|
507
|
+
.pluck(:name)
|
|
508
|
+
rescue
|
|
509
|
+
[]
|
|
510
|
+
end
|
|
511
|
+
|
|
512
|
+
# Search nodes by tags
|
|
513
|
+
#
|
|
514
|
+
# @param tags [Array<String>] Tags to search for
|
|
515
|
+
# @param match_all [Boolean] If true, match ALL tags; if false, match ANY tag
|
|
516
|
+
# @param timeframe [Range, nil] Optional time range filter
|
|
517
|
+
# @param limit [Integer] Maximum results
|
|
518
|
+
# @return [Array<Hash>] Matching nodes with relevance scores
|
|
519
|
+
#
|
|
520
|
+
def search_by_tags(tags:, match_all: false, timeframe: nil, limit: 20)
|
|
521
|
+
return [] if tags.empty?
|
|
522
|
+
|
|
523
|
+
# Build base query
|
|
524
|
+
query = HTM::Models::Node
|
|
525
|
+
.joins(:tags)
|
|
526
|
+
.where(tags: { name: tags })
|
|
527
|
+
.distinct
|
|
528
|
+
|
|
529
|
+
# Apply timeframe filter if provided
|
|
530
|
+
query = query.where(created_at: timeframe) if timeframe
|
|
531
|
+
|
|
532
|
+
if match_all
|
|
533
|
+
# Match ALL tags (intersection)
|
|
534
|
+
query = query
|
|
535
|
+
.group('nodes.id')
|
|
536
|
+
.having('COUNT(DISTINCT tags.name) = ?', tags.size)
|
|
537
|
+
end
|
|
538
|
+
|
|
539
|
+
# Get results
|
|
540
|
+
nodes = query.limit(limit).map(&:attributes)
|
|
541
|
+
|
|
542
|
+
# Calculate relevance and enrich with tags
|
|
543
|
+
nodes.map do |node|
|
|
544
|
+
relevance = calculate_relevance(
|
|
545
|
+
node: node,
|
|
546
|
+
query_tags: tags
|
|
547
|
+
)
|
|
548
|
+
|
|
549
|
+
node.merge({
|
|
550
|
+
'relevance' => relevance,
|
|
551
|
+
'tags' => get_node_tags(node['id'])
|
|
552
|
+
})
|
|
553
|
+
end.sort_by { |n| -n['relevance'] }
|
|
554
|
+
end
|
|
555
|
+
|
|
556
|
+
# Get most popular tags
|
|
557
|
+
#
|
|
558
|
+
# @param limit [Integer] Number of tags to return
|
|
559
|
+
# @param timeframe [Range, nil] Optional time range filter
|
|
560
|
+
# @return [Array<Hash>] Tags with usage counts
|
|
561
|
+
#
|
|
562
|
+
def popular_tags(limit: 20, timeframe: nil)
|
|
563
|
+
query = HTM::Models::Tag
|
|
564
|
+
.joins(:node_tags)
|
|
565
|
+
.joins('INNER JOIN nodes ON nodes.id = node_tags.node_id')
|
|
566
|
+
.group('tags.id', 'tags.name')
|
|
567
|
+
.select('tags.name, COUNT(node_tags.id) as usage_count')
|
|
568
|
+
|
|
569
|
+
query = query.where('nodes.created_at >= ? AND nodes.created_at <= ?', timeframe.begin, timeframe.end) if timeframe
|
|
570
|
+
|
|
571
|
+
query
|
|
572
|
+
.order('usage_count DESC')
|
|
573
|
+
.limit(limit)
|
|
574
|
+
.map { |tag| { name: tag.name, usage_count: tag.usage_count } }
|
|
575
|
+
end
|
|
576
|
+
|
|
577
|
+
private
|
|
578
|
+
|
|
579
|
+
# Generate cache key for query
|
|
580
|
+
#
|
|
581
|
+
# @param method [Symbol] Search method name
|
|
582
|
+
# @param timeframe [Range] Time range
|
|
583
|
+
# @param query [String] Search query
|
|
584
|
+
# @param limit [Integer] Result limit
|
|
585
|
+
# @param args [Array] Additional arguments
|
|
586
|
+
# @return [String] Cache key
|
|
587
|
+
#
|
|
588
|
+
def cache_key_for(method, timeframe, query, limit, *args)
|
|
589
|
+
key_parts = [
|
|
590
|
+
method,
|
|
591
|
+
timeframe.begin.to_i,
|
|
592
|
+
timeframe.end.to_i,
|
|
593
|
+
query,
|
|
594
|
+
limit,
|
|
595
|
+
*args
|
|
596
|
+
]
|
|
597
|
+
Digest::SHA256.hexdigest(key_parts.join('|'))
|
|
598
|
+
end
|
|
599
|
+
|
|
600
|
+
# Get cache statistics
|
|
601
|
+
#
|
|
602
|
+
# @return [Hash, nil] Cache stats or nil if cache disabled
|
|
603
|
+
#
|
|
604
|
+
def cache_stats
|
|
605
|
+
return nil unless @query_cache
|
|
606
|
+
|
|
607
|
+
total = @cache_stats[:hits] + @cache_stats[:misses]
|
|
608
|
+
hit_rate = total > 0 ? (@cache_stats[:hits].to_f / total * 100).round(2) : 0.0
|
|
609
|
+
|
|
610
|
+
{
|
|
611
|
+
hits: @cache_stats[:hits],
|
|
612
|
+
misses: @cache_stats[:misses],
|
|
613
|
+
hit_rate: hit_rate,
|
|
614
|
+
size: @query_cache.count
|
|
615
|
+
}
|
|
616
|
+
end
|
|
617
|
+
|
|
618
|
+
# Calculate Jaccard similarity between two sets
|
|
619
|
+
#
|
|
620
|
+
# @param set_a [Array] First set
|
|
621
|
+
# @param set_b [Array] Second set
|
|
622
|
+
# @return [Float] Jaccard similarity (0.0-1.0)
|
|
623
|
+
#
|
|
624
|
+
def jaccard_similarity(set_a, set_b)
|
|
625
|
+
return 0.0 if set_a.empty? && set_b.empty?
|
|
626
|
+
return 0.0 if set_a.empty? || set_b.empty?
|
|
627
|
+
|
|
628
|
+
intersection = (set_a & set_b).size
|
|
629
|
+
union = (set_a | set_b).size
|
|
630
|
+
|
|
631
|
+
intersection.to_f / union
|
|
632
|
+
end
|
|
633
|
+
|
|
634
|
+
def weighted_hierarchical_jaccard(set_a, set_b)
|
|
635
|
+
return 0.0 if set_a.empty? || set_b.empty?
|
|
636
|
+
|
|
637
|
+
total_weighted_similarity = 0.0
|
|
638
|
+
total_weights = 0.0
|
|
639
|
+
|
|
640
|
+
set_a.each do |tag_a|
|
|
641
|
+
set_b.each do |tag_b|
|
|
642
|
+
similarity, weight = calculate_hierarchical_similarity(tag_a, tag_b)
|
|
643
|
+
total_weighted_similarity += similarity * weight
|
|
644
|
+
total_weights += weight
|
|
645
|
+
end
|
|
646
|
+
end
|
|
647
|
+
|
|
648
|
+
total_weights > 0 ? total_weighted_similarity / total_weights : 0.0
|
|
649
|
+
end
|
|
650
|
+
|
|
651
|
+
|
|
652
|
+
|
|
653
|
+
# Invalidate (clear) the query cache
|
|
654
|
+
#
|
|
655
|
+
# @return [void]
|
|
656
|
+
#
|
|
657
|
+
def invalidate_cache!
|
|
658
|
+
@query_cache.clear if @query_cache
|
|
659
|
+
end
|
|
660
|
+
|
|
661
|
+
# Uncached vector similarity search
|
|
662
|
+
#
|
|
663
|
+
# Generates query embedding client-side and performs vector search in database.
|
|
664
|
+
#
|
|
665
|
+
# @param timeframe [Range] Time range to search
|
|
666
|
+
# @param query [String] Search query
|
|
667
|
+
# @param limit [Integer] Maximum results
|
|
668
|
+
# @param embedding_service [Object] Service to generate query embedding
|
|
669
|
+
# @return [Array<Hash>] Matching nodes
|
|
670
|
+
#
|
|
671
|
+
def search_uncached(timeframe:, query:, limit:, embedding_service:)
|
|
672
|
+
# Generate query embedding client-side
|
|
673
|
+
query_embedding = embedding_service.embed(query)
|
|
674
|
+
|
|
675
|
+
# Pad embedding to 2000 dimensions if needed (to match nodes.embedding vector(2000))
|
|
676
|
+
if query_embedding.length < 2000
|
|
677
|
+
query_embedding = query_embedding + Array.new(2000 - query_embedding.length, 0.0)
|
|
678
|
+
end
|
|
679
|
+
|
|
680
|
+
# Convert to PostgreSQL vector format
|
|
681
|
+
embedding_str = "[#{query_embedding.join(',')}]"
|
|
682
|
+
|
|
683
|
+
result = ActiveRecord::Base.connection.select_all(
|
|
684
|
+
<<~SQL,
|
|
685
|
+
SELECT id, content, source, access_count, created_at, robot_id, token_count,
|
|
686
|
+
1 - (embedding <=> '#{embedding_str}'::vector) as similarity
|
|
687
|
+
FROM nodes
|
|
688
|
+
WHERE created_at BETWEEN '#{timeframe.begin.iso8601}' AND '#{timeframe.end.iso8601}'
|
|
689
|
+
AND embedding IS NOT NULL
|
|
690
|
+
ORDER BY embedding <=> '#{embedding_str}'::vector
|
|
691
|
+
LIMIT #{limit.to_i}
|
|
692
|
+
SQL
|
|
693
|
+
)
|
|
694
|
+
|
|
695
|
+
# Track access for retrieved nodes
|
|
696
|
+
node_ids = result.map { |r| r['id'] }
|
|
697
|
+
track_access(node_ids)
|
|
698
|
+
|
|
699
|
+
result.to_a
|
|
700
|
+
end
|
|
701
|
+
|
|
702
|
+
# Uncached full-text search
|
|
703
|
+
#
|
|
704
|
+
# @param timeframe [Range] Time range to search
|
|
705
|
+
# @param query [String] Search query
|
|
706
|
+
# @param limit [Integer] Maximum results
|
|
707
|
+
# @return [Array<Hash>] Matching nodes
|
|
708
|
+
#
|
|
709
|
+
def search_fulltext_uncached(timeframe:, query:, limit:)
|
|
710
|
+
result = ActiveRecord::Base.connection.select_all(
|
|
711
|
+
ActiveRecord::Base.sanitize_sql_array([
|
|
712
|
+
<<~SQL,
|
|
713
|
+
SELECT id, content, source, access_count, created_at, robot_id, token_count,
|
|
714
|
+
ts_rank(to_tsvector('english', content), plainto_tsquery('english', ?)) as rank
|
|
715
|
+
FROM nodes
|
|
716
|
+
WHERE created_at BETWEEN ? AND ?
|
|
717
|
+
AND to_tsvector('english', content) @@ plainto_tsquery('english', ?)
|
|
718
|
+
ORDER BY rank DESC
|
|
719
|
+
LIMIT ?
|
|
720
|
+
SQL
|
|
721
|
+
query, timeframe.begin, timeframe.end, query, limit
|
|
722
|
+
])
|
|
723
|
+
)
|
|
724
|
+
|
|
725
|
+
# Track access for retrieved nodes
|
|
726
|
+
node_ids = result.map { |r| r['id'] }
|
|
727
|
+
track_access(node_ids)
|
|
728
|
+
|
|
729
|
+
result.to_a
|
|
730
|
+
end
|
|
731
|
+
|
|
732
|
+
# Uncached hybrid search
|
|
733
|
+
#
|
|
734
|
+
# Generates query embedding client-side, then combines full-text search for
|
|
735
|
+
# candidate selection with vector similarity for ranking.
|
|
736
|
+
#
|
|
737
|
+
# @param timeframe [Range] Time range to search
|
|
738
|
+
# @param query [String] Search query
|
|
739
|
+
# @param limit [Integer] Maximum results
|
|
740
|
+
# @param embedding_service [Object] Service to generate query embedding
|
|
741
|
+
# @param prefilter_limit [Integer] Candidates to consider
|
|
742
|
+
# @return [Array<Hash>] Matching nodes
|
|
743
|
+
#
|
|
744
|
+
def search_hybrid_uncached(timeframe:, query:, limit:, embedding_service:, prefilter_limit:)
|
|
745
|
+
# Generate query embedding client-side
|
|
746
|
+
query_embedding = embedding_service.embed(query)
|
|
747
|
+
|
|
748
|
+
# Pad embedding to 2000 dimensions if needed
|
|
749
|
+
if query_embedding.length < 2000
|
|
750
|
+
query_embedding = query_embedding + Array.new(2000 - query_embedding.length, 0.0)
|
|
751
|
+
end
|
|
752
|
+
|
|
753
|
+
# Convert to PostgreSQL vector format
|
|
754
|
+
embedding_str = "[#{query_embedding.join(',')}]"
|
|
755
|
+
|
|
756
|
+
result = ActiveRecord::Base.connection.select_all(
|
|
757
|
+
ActiveRecord::Base.sanitize_sql_array([
|
|
758
|
+
<<~SQL,
|
|
759
|
+
WITH candidates AS (
|
|
760
|
+
SELECT id, content, source, access_count, created_at, robot_id, token_count, embedding
|
|
761
|
+
FROM nodes
|
|
762
|
+
WHERE created_at BETWEEN ? AND ?
|
|
763
|
+
AND to_tsvector('english', content) @@ plainto_tsquery('english', ?)
|
|
764
|
+
AND embedding IS NOT NULL
|
|
765
|
+
LIMIT ?
|
|
766
|
+
)
|
|
767
|
+
SELECT id, content, source, access_count, created_at, robot_id, token_count,
|
|
768
|
+
1 - (embedding <=> '#{embedding_str}'::vector) as similarity
|
|
769
|
+
FROM candidates
|
|
770
|
+
ORDER BY embedding <=> '#{embedding_str}'::vector
|
|
771
|
+
LIMIT ?
|
|
772
|
+
SQL
|
|
773
|
+
timeframe.begin, timeframe.end, query, prefilter_limit, limit
|
|
774
|
+
])
|
|
775
|
+
)
|
|
776
|
+
|
|
777
|
+
# Track access for retrieved nodes
|
|
778
|
+
node_ids = result.map { |r| r['id'] }
|
|
779
|
+
track_access(node_ids)
|
|
780
|
+
|
|
781
|
+
result.to_a
|
|
782
|
+
end
|
|
783
|
+
|
|
784
|
+
|
|
785
|
+
def calculate_hierarchical_similarity(tag_a, tag_b)
|
|
786
|
+
parts_a = tag_a.split(':')
|
|
787
|
+
parts_b = tag_b.split(':')
|
|
788
|
+
|
|
789
|
+
# Calculate overlap at each level
|
|
790
|
+
common_levels = 0
|
|
791
|
+
max_depth = [parts_a.length, parts_b.length].max
|
|
792
|
+
|
|
793
|
+
(0...max_depth).each do |i|
|
|
794
|
+
if i < parts_a.length && i < parts_b.length && parts_a[i] == parts_b[i]
|
|
795
|
+
common_levels += 1
|
|
796
|
+
else
|
|
797
|
+
break
|
|
798
|
+
end
|
|
799
|
+
end
|
|
800
|
+
|
|
801
|
+
# Calculate weight based on hierarchy depth (higher levels = more weight)
|
|
802
|
+
depth_weight = 1.0 / max_depth
|
|
803
|
+
|
|
804
|
+
# Calculate normalized similarity (0-1)
|
|
805
|
+
similarity = max_depth > 0 ? (common_levels.to_f / max_depth) : 0.0
|
|
806
|
+
|
|
807
|
+
[similarity, depth_weight]
|
|
808
|
+
end
|
|
809
|
+
|
|
810
|
+
#######################################
|
|
811
|
+
=begin
|
|
812
|
+
|
|
813
|
+
# Enhanced hierarchical similarity (with term_bonus for deep term matches like "country-music")
|
|
814
|
+
# Replaces your private calculate_hierarchical_similarity
|
|
815
|
+
def calculate_hierarchical_similarity(tag_a, tag_b, max_depth: 5)
|
|
816
|
+
return [0.0, 1.0] if tag_a.empty? || tag_b.empty? # [similarity, weight]
|
|
817
|
+
|
|
818
|
+
parts_a = tag_a.split(':').reject(&:empty?)
|
|
819
|
+
parts_b = tag_b.split(':').reject(&:empty?)
|
|
820
|
+
return [0.0, 1.0] if parts_a.empty? || parts_b.empty?
|
|
821
|
+
|
|
822
|
+
# Prefix similarity
|
|
823
|
+
local_max = [parts_a.length, parts_b.length].max
|
|
824
|
+
common_levels = 0
|
|
825
|
+
(0...local_max).each do |i|
|
|
826
|
+
if i < parts_a.length && i < parts_b.length && parts_a[i] == parts_b[i]
|
|
827
|
+
common_levels += 1
|
|
828
|
+
else
|
|
829
|
+
break
|
|
830
|
+
end
|
|
831
|
+
end
|
|
832
|
+
prefix_sim = local_max > 0 ? common_levels.to_f / local_max : 0.0
|
|
833
|
+
|
|
834
|
+
# Term bonus: Shared terms weighted by avg depth
|
|
835
|
+
common_terms = parts_a.to_set & parts_b.to_set
|
|
836
|
+
term_bonus = 0.0
|
|
837
|
+
common_terms.each do |term|
|
|
838
|
+
depth_a = parts_a.index(term) + 1
|
|
839
|
+
depth_b = parts_b.index(term) + 1
|
|
840
|
+
avg_depth = (depth_a + depth_b) / 2.0
|
|
841
|
+
depth_weight = avg_depth / max_depth.to_f
|
|
842
|
+
term_bonus += depth_weight * 0.8 # Increased from 0.5 for more aggression
|
|
843
|
+
end
|
|
844
|
+
term_bonus = [1.0, term_bonus].min
|
|
845
|
+
|
|
846
|
+
# Combined similarity (your weight now favors deeper via local_max)
|
|
847
|
+
sim = (prefix_sim + term_bonus) / 2.0
|
|
848
|
+
weight = local_max.to_f / max_depth # Deeper = higher weight (flipped from your 1/max)
|
|
849
|
+
|
|
850
|
+
[sim, weight]
|
|
851
|
+
end
|
|
852
|
+
|
|
853
|
+
# Enhanced weighted_hierarchical_jaccard (uses new similarity; adds max_pairs fallback)
|
|
854
|
+
# Replaces your private weighted_hierarchical_jaccard
|
|
855
|
+
def weighted_hierarchical_jaccard(set_a, set_b, max_depth: 5, max_pairs: 1000)
|
|
856
|
+
return 0.0 if set_a.empty? || set_b.empty?
|
|
857
|
+
|
|
858
|
+
# Fallback to flat Jaccard for large sets (your jaccard_similarity)
|
|
859
|
+
if set_a.size * set_b.size > max_pairs
|
|
860
|
+
terms_a = set_a.flat_map { |tag| tag.split(':').reject(&:empty?) }.to_set
|
|
861
|
+
terms_b = set_b.flat_map { |tag| tag.split(':').reject(&:empty?) }.to_set
|
|
862
|
+
return jaccard_similarity(terms_a.to_a, terms_b.to_a)
|
|
863
|
+
end
|
|
864
|
+
|
|
865
|
+
total_weighted_similarity = 0.0
|
|
866
|
+
total_weights = 0.0
|
|
867
|
+
set_a.each do |tag_a|
|
|
868
|
+
set_b.each do |tag_b|
|
|
869
|
+
similarity, weight = calculate_hierarchical_similarity(tag_a, tag_b, max_depth: max_depth)
|
|
870
|
+
total_weighted_similarity += similarity * weight
|
|
871
|
+
total_weights += weight
|
|
872
|
+
end
|
|
873
|
+
end
|
|
874
|
+
total_weights > 0 ? total_weighted_similarity / total_weights : 0.0
|
|
875
|
+
end
|
|
876
|
+
|
|
877
|
+
# Updated calculate_relevance (adds ont_weight param; scales to 0-100 option)
|
|
878
|
+
# Enhances your existing method
|
|
879
|
+
def calculate_relevance(node:, query_tags: [], vector_similarity: nil, ont_weight: 1.0, scale_to_100: false)
|
|
880
|
+
# 1. Vector similarity (semantic) - weight: 0.5
|
|
881
|
+
semantic_score = if vector_similarity
|
|
882
|
+
vector_similarity
|
|
883
|
+
elsif node['similarity']
|
|
884
|
+
node['similarity'].to_f
|
|
885
|
+
else
|
|
886
|
+
0.5
|
|
887
|
+
end
|
|
888
|
+
|
|
889
|
+
# 2. Tag overlap (ontology) - weight: 0.3, boosted by ont_weight
|
|
890
|
+
node_tags = get_node_tags(node['id'])
|
|
891
|
+
tag_score = if query_tags.any? && node_tags.any?
|
|
892
|
+
weighted_hierarchical_jaccard(query_tags, node_tags) * ont_weight
|
|
893
|
+
else
|
|
894
|
+
0.5
|
|
895
|
+
end
|
|
896
|
+
tag_score = [tag_score, 1.0].min # Cap boosted score
|
|
897
|
+
|
|
898
|
+
# 3. Recency - weight: 0.1
|
|
899
|
+
age_hours = (Time.current - Time.parse(node['created_at'].to_s)) / 3600.0
|
|
900
|
+
recency_score = Math.exp(-age_hours / 168.0)
|
|
901
|
+
|
|
902
|
+
# 4. Access frequency - weight: 0.1
|
|
903
|
+
access_count = node['access_count'] || 0
|
|
904
|
+
access_score = Math.log(1 + access_count) / 10.0
|
|
905
|
+
|
|
906
|
+
# Weighted composite (0-10 base)
|
|
907
|
+
relevance_0_10 = (
|
|
908
|
+
(semantic_score * 0.5) +
|
|
909
|
+
(tag_score * 0.3) +
|
|
910
|
+
(recency_score * 0.1) +
|
|
911
|
+
(access_score * 0.1)
|
|
912
|
+
).clamp(0.0, 10.0)
|
|
913
|
+
|
|
914
|
+
# Scale to 0-100 if requested
|
|
915
|
+
final_relevance = scale_to_100 ? (relevance_0_10 * 10.0).round(2) : relevance_0_10
|
|
916
|
+
|
|
917
|
+
final_relevance
|
|
918
|
+
end
|
|
919
|
+
|
|
920
|
+
# Updated search_with_relevance (adds threshold: for 0-100 filtering; ont_weight)
|
|
921
|
+
# Enhances your existing method
|
|
922
|
+
def search_with_relevance(timeframe:, query: nil, query_tags: [], limit: 20, embedding_service: nil, threshold: nil, ont_weight: 1.0, scale_to_100: true)
|
|
923
|
+
# Get candidates (your logic)
|
|
924
|
+
candidates = if query && embedding_service
|
|
925
|
+
search_uncached(timeframe: timeframe, query: query, limit: limit * 3, embedding_service: embedding_service) # Oversample more for thresholds
|
|
926
|
+
elsif query
|
|
927
|
+
search_fulltext_uncached(timeframe: timeframe, query: query, limit: limit * 3)
|
|
928
|
+
else
|
|
929
|
+
HTM::Models::Node
|
|
930
|
+
.where(created_at: timeframe)
|
|
931
|
+
.order(created_at: :desc)
|
|
932
|
+
.limit(limit * 3)
|
|
933
|
+
.map(&:attributes)
|
|
934
|
+
end
|
|
935
|
+
|
|
936
|
+
# Score and enrich
|
|
937
|
+
scored_nodes = candidates.map do |node|
|
|
938
|
+
relevance = calculate_relevance(
|
|
939
|
+
node: node,
|
|
940
|
+
query_tags: query_tags,
|
|
941
|
+
vector_similarity: node['similarity']&.to_f,
|
|
942
|
+
ont_weight: ont_weight,
|
|
943
|
+
scale_to_100: scale_to_100
|
|
944
|
+
)
|
|
945
|
+
node.merge({
|
|
946
|
+
'relevance' => relevance,
|
|
947
|
+
'tags' => get_node_tags(node['id'])
|
|
948
|
+
})
|
|
949
|
+
end
|
|
950
|
+
|
|
951
|
+
# Filter by threshold if provided (e.g., >=80 for 0-100 scale)
|
|
952
|
+
scored_nodes = scored_nodes.select { |n| threshold.nil? || n['relevance'] >= threshold }
|
|
953
|
+
|
|
954
|
+
# Sort by relevance DESC, take limit (or all if threshold used)
|
|
955
|
+
scored_nodes
|
|
956
|
+
.sort_by { |n| -n['relevance'] }
|
|
957
|
+
.take(limit)
|
|
958
|
+
end
|
|
959
|
+
|
|
960
|
+
=end
|
|
961
|
+
|
|
962
|
+
|
|
963
|
+
|
|
964
|
+
end
|
|
965
|
+
end
|