deepsearch-rb 0.1.0
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- checksums.yaml +7 -0
- data/CHANGELOG.md +8 -0
- data/LICENSE.txt +21 -0
- data/README.md +138 -0
- data/lib/deepsearch/configuration.rb +88 -0
- data/lib/deepsearch/engine/pipeline.rb +126 -0
- data/lib/deepsearch/engine/steps/data_aggregation/parsed_website.rb +122 -0
- data/lib/deepsearch/engine/steps/data_aggregation/process.rb +56 -0
- data/lib/deepsearch/engine/steps/data_aggregation/result.rb +28 -0
- data/lib/deepsearch/engine/steps/parallel_search/process.rb +42 -0
- data/lib/deepsearch/engine/steps/parallel_search/result.rb +28 -0
- data/lib/deepsearch/engine/steps/parallel_search/search.rb +95 -0
- data/lib/deepsearch/engine/steps/prepare_subqueries/process.rb +87 -0
- data/lib/deepsearch/engine/steps/prepare_subqueries/result.rb +30 -0
- data/lib/deepsearch/engine/steps/rag/chunker.rb +31 -0
- data/lib/deepsearch/engine/steps/rag/process.rb +79 -0
- data/lib/deepsearch/engine/steps/rag/similarity.rb +60 -0
- data/lib/deepsearch/engine/steps/rag/values/chunk.rb +23 -0
- data/lib/deepsearch/engine/steps/rag/values/query.rb +44 -0
- data/lib/deepsearch/engine/steps/rag/values/result.rb +33 -0
- data/lib/deepsearch/engine/steps/summarization/process.rb +53 -0
- data/lib/deepsearch/engine/steps/summarization/values/result.rb +31 -0
- data/lib/deepsearch/engine.rb +25 -0
- data/lib/deepsearch/logger.rb +32 -0
- data/lib/deepsearch/prompts_config.rb +82 -0
- data/lib/deepsearch/version.rb +5 -0
- data/lib/deepsearch.rb +39 -0
- data/lib/search_adapters/mock_adapter.rb +73 -0
- data/lib/search_adapters/serper_adapter.rb +106 -0
- data/lib/search_adapters/tavily_adapter.rb +113 -0
- data/lib/search_adapters.rb +24 -0
- metadata +186 -0
@@ -0,0 +1,95 @@
|
|
1
|
+
# frozen_string_literal: true
|
2
|
+
|
3
|
+
require 'async'
|
4
|
+
require 'async/semaphore'
|
5
|
+
|
6
|
+
module Deepsearch
|
7
|
+
class Engine
|
8
|
+
module Steps
|
9
|
+
module ParallelSearch
|
10
|
+
# Performs concurrent web searches for a list of queries using a given search adapter.
|
11
|
+
# It manages concurrency, retries with exponential backoff for failed searches,
|
12
|
+
# and aggregates the unique results.
|
13
|
+
class Search
|
14
|
+
MAX_CONCURRENCY = 2
|
15
|
+
MAX_RETRIES = 1
|
16
|
+
INITIAL_BACKOFF = 1
|
17
|
+
|
18
|
+
def initialize(initial_query, sub_queries, search_adapter, **options)
|
19
|
+
@all_queries = [initial_query] + sub_queries
|
20
|
+
@search_adapter = search_adapter
|
21
|
+
@max_total_search_results = options[:max_total_search_results]
|
22
|
+
@search_options = build_search_options
|
23
|
+
@logger = Deepsearch.configuration.logger
|
24
|
+
end
|
25
|
+
|
26
|
+
def output
|
27
|
+
return [] if @all_queries.empty?
|
28
|
+
|
29
|
+
results = perform_all_searches
|
30
|
+
results.flatten.uniq { |result| result['url'] }
|
31
|
+
end
|
32
|
+
|
33
|
+
private
|
34
|
+
|
35
|
+
def build_search_options
|
36
|
+
return {} unless @max_total_search_results
|
37
|
+
|
38
|
+
max_results_per_search = (@max_total_search_results.to_f / @all_queries.size).ceil
|
39
|
+
{ max_results: max_results_per_search }
|
40
|
+
end
|
41
|
+
|
42
|
+
def perform_all_searches
|
43
|
+
@logger.debug("Starting parallel search for #{@all_queries.size} queries with max concurrency of #{MAX_CONCURRENCY}")
|
44
|
+
|
45
|
+
Sync do |task|
|
46
|
+
semaphore = Async::Semaphore.new(MAX_CONCURRENCY, parent: task)
|
47
|
+
|
48
|
+
tasks = @all_queries.each_with_index.map do |query, index|
|
49
|
+
# Add a small delay for subsequent tasks to avoid overwhelming the search api
|
50
|
+
sleep(1) if index > 0
|
51
|
+
|
52
|
+
semaphore.async do |sub_task|
|
53
|
+
sub_task.annotate("query ##{index + 1}: #{query}")
|
54
|
+
perform_search_with_retries(query, index + 1)
|
55
|
+
end
|
56
|
+
end
|
57
|
+
|
58
|
+
tasks.map(&:wait)
|
59
|
+
end
|
60
|
+
end
|
61
|
+
|
62
|
+
def perform_search_with_retries(query, query_number)
|
63
|
+
(MAX_RETRIES + 1).times do |attempt|
|
64
|
+
@logger.debug("Task #{query_number}: Searching '#{query}' (Attempt #{attempt + 1})")
|
65
|
+
|
66
|
+
results = @search_adapter.search(query, @search_options)
|
67
|
+
extracted = extract_results(results)
|
68
|
+
@logger.debug("✓ Task #{query_number} completed with #{extracted.size} results for '#{query}'")
|
69
|
+
return extracted
|
70
|
+
|
71
|
+
rescue StandardError => e
|
72
|
+
@logger.debug("✗ Task #{query_number} error for '#{query}': #{e.message}")
|
73
|
+
|
74
|
+
break if attempt >= MAX_RETRIES
|
75
|
+
|
76
|
+
sleep_duration = (INITIAL_BACKOFF * (2**attempt)) + rand(0.1..0.5)
|
77
|
+
@logger.debug(" Retrying Task #{query_number} in #{sleep_duration.round(2)}s...")
|
78
|
+
sleep(sleep_duration)
|
79
|
+
end
|
80
|
+
|
81
|
+
@logger.debug("✗ Task #{query_number} failed permanently for '#{query}' after #{MAX_RETRIES} retries.")
|
82
|
+
[]
|
83
|
+
end
|
84
|
+
|
85
|
+
def extract_results(results)
|
86
|
+
return [] if results.nil?
|
87
|
+
return results unless results.is_a?(Hash)
|
88
|
+
|
89
|
+
results['results'] || results[:results] || []
|
90
|
+
end
|
91
|
+
end
|
92
|
+
end
|
93
|
+
end
|
94
|
+
end
|
95
|
+
end
|
@@ -0,0 +1,87 @@
|
|
1
|
+
# frozen_string_literal: true
|
2
|
+
|
3
|
+
require_relative 'result'
|
4
|
+
|
5
|
+
module Deepsearch
|
6
|
+
class Engine
|
7
|
+
module Steps
|
8
|
+
module PrepareSubqueries
|
9
|
+
class Process
|
10
|
+
def initialize(original_query)
|
11
|
+
@original_query = original_query
|
12
|
+
end
|
13
|
+
|
14
|
+
def execute
|
15
|
+
validate_input
|
16
|
+
process_query
|
17
|
+
rescue StandardError => e
|
18
|
+
PrepareSubqueries::Result.new(
|
19
|
+
cleaned_query: "",
|
20
|
+
sub_queries: [],
|
21
|
+
original_query: @original_query.to_s,
|
22
|
+
error: e.message
|
23
|
+
)
|
24
|
+
end
|
25
|
+
|
26
|
+
private
|
27
|
+
|
28
|
+
def validate_input
|
29
|
+
unless @original_query && !@original_query.strip.empty?
|
30
|
+
raise StandardError, "Original query is required for preprocessing"
|
31
|
+
end
|
32
|
+
end
|
33
|
+
|
34
|
+
def process_query
|
35
|
+
cleaned_query = clean_query(@original_query)
|
36
|
+
subqueries = generate_subqueries(cleaned_query)
|
37
|
+
|
38
|
+
PrepareSubqueries::Result.new(
|
39
|
+
cleaned_query: cleaned_query,
|
40
|
+
original_query: @original_query,
|
41
|
+
sub_queries: subqueries
|
42
|
+
)
|
43
|
+
end
|
44
|
+
|
45
|
+
def clean_query(query)
|
46
|
+
query.strip.gsub(/\s+/, ' ')
|
47
|
+
end
|
48
|
+
|
49
|
+
def generate_subqueries(query)
|
50
|
+
begin
|
51
|
+
Deepsearch.configuration.logger.debug("Attempting to generate subqueries using LLM...")
|
52
|
+
chat = RubyLLM.chat
|
53
|
+
|
54
|
+
prompt = Deepsearch.configuration.prompts.subquery_prompt(query: query)
|
55
|
+
Deepsearch.configuration.logger.debug("Sending prompt to LLM...")
|
56
|
+
response = chat.ask(prompt)
|
57
|
+
|
58
|
+
Deepsearch.configuration.logger.debug("Received response from LLM")
|
59
|
+
subqueries = parse_subqueries(response.content)
|
60
|
+
Deepsearch.configuration.logger.debug("Generated #{subqueries.size} subqueries")
|
61
|
+
subqueries
|
62
|
+
rescue StandardError => e
|
63
|
+
Deepsearch.configuration.logger.debug("Error generating subqueries: #{e.message}")
|
64
|
+
Deepsearch.configuration.logger.debug("Error class: #{e.class}")
|
65
|
+
Deepsearch.configuration.logger.debug("Backtrace: #{e.backtrace.first(3).join('\n')}")
|
66
|
+
[]
|
67
|
+
end
|
68
|
+
end
|
69
|
+
|
70
|
+
def parse_subqueries(response_content)
|
71
|
+
return [] unless response_content
|
72
|
+
|
73
|
+
subqueries = response_content.split("\n")
|
74
|
+
.map(&:strip)
|
75
|
+
.reject(&:empty?)
|
76
|
+
.map { |line| line.gsub(/^\d+\.\s*|^[-*]\s*/, '') }
|
77
|
+
.map { |query| query.gsub(/^["']|["']$/, '') }
|
78
|
+
.reject(&:empty?)
|
79
|
+
.first(5)
|
80
|
+
|
81
|
+
subqueries
|
82
|
+
end
|
83
|
+
end
|
84
|
+
end
|
85
|
+
end
|
86
|
+
end
|
87
|
+
end
|
@@ -0,0 +1,30 @@
|
|
1
|
+
# frozen_string_literal: true
|
2
|
+
|
3
|
+
module Deepsearch
|
4
|
+
class Engine
|
5
|
+
module Steps
|
6
|
+
module PrepareSubqueries
|
7
|
+
# Represents the result of the sub-query preparation step.
|
8
|
+
# It holds the cleaned original query, the generated sub-queries, and any potential error.
|
9
|
+
class Result
|
10
|
+
attr_reader :cleaned_query, :sub_queries, :original_query, :error
|
11
|
+
|
12
|
+
def initialize(cleaned_query:, sub_queries:, original_query:, error: nil)
|
13
|
+
@cleaned_query = cleaned_query
|
14
|
+
@sub_queries = sub_queries
|
15
|
+
@original_query = original_query
|
16
|
+
@error = error
|
17
|
+
end
|
18
|
+
|
19
|
+
def success?
|
20
|
+
error.nil?
|
21
|
+
end
|
22
|
+
|
23
|
+
def failure?
|
24
|
+
!success?
|
25
|
+
end
|
26
|
+
end
|
27
|
+
end
|
28
|
+
end
|
29
|
+
end
|
30
|
+
end
|
@@ -0,0 +1,31 @@
|
|
1
|
+
# frozen_string_literal: true
|
2
|
+
|
3
|
+
module Deepsearch
|
4
|
+
class Engine
|
5
|
+
module Steps
|
6
|
+
module Rag
|
7
|
+
# Splits a large piece of text content into smaller, overlapping chunks.
|
8
|
+
# This is a prerequisite for generating embeddings and performing similarity searches in a RAG pipeline.
|
9
|
+
class Chunker
|
10
|
+
MAX_CHUNK_SIZE = 7500
|
11
|
+
OVERLAP_SIZE = 300
|
12
|
+
|
13
|
+
def chunk(content)
|
14
|
+
return [Values::Chunk.new(text: content)] if content.length <= MAX_CHUNK_SIZE
|
15
|
+
|
16
|
+
chunks = []
|
17
|
+
step = MAX_CHUNK_SIZE - OVERLAP_SIZE
|
18
|
+
|
19
|
+
i = 0
|
20
|
+
while i < content.length
|
21
|
+
chunk_text = content.slice(i, MAX_CHUNK_SIZE)
|
22
|
+
chunks << Values::Chunk.new(text: chunk_text)
|
23
|
+
i += step
|
24
|
+
end
|
25
|
+
chunks
|
26
|
+
end
|
27
|
+
end
|
28
|
+
end
|
29
|
+
end
|
30
|
+
end
|
31
|
+
end
|
@@ -0,0 +1,79 @@
|
|
1
|
+
# frozen_string_literal: true
|
2
|
+
|
3
|
+
require_relative 'values/chunk'
|
4
|
+
require_relative 'values/query'
|
5
|
+
require_relative 'values/result'
|
6
|
+
require_relative 'chunker'
|
7
|
+
require_relative 'similarity'
|
8
|
+
|
9
|
+
module Deepsearch
|
10
|
+
class Engine
|
11
|
+
module Steps
|
12
|
+
module Rag
|
13
|
+
# Implements the core Retrieval-Augmented Generation (RAG) logic.
|
14
|
+
# It takes a query and a set of parsed websites, then:
|
15
|
+
# 1. Chunks the website content into smaller pieces.
|
16
|
+
# 2. Generates embeddings for all text chunks in batches.
|
17
|
+
# 3. Uses a similarity search to find the chunks most relevant to the query.
|
18
|
+
# 4. Returns a result containing the relevant chunks.
|
19
|
+
class Process
|
20
|
+
CHUNK_BATCH_SIZE = 100
|
21
|
+
MAX_TOTAL_CHUNKS = 500
|
22
|
+
MAX_CHUNKS_PER_WEBSITE = 15
|
23
|
+
|
24
|
+
def initialize(query:, parsed_websites:)
|
25
|
+
@query = Values::Query.new(text: query)
|
26
|
+
@documents = parsed_websites.map do |website|
|
27
|
+
{ url: website.url, content: website.content }
|
28
|
+
end
|
29
|
+
end
|
30
|
+
|
31
|
+
def execute
|
32
|
+
begin
|
33
|
+
chunker = Chunker.new
|
34
|
+
all_chunks = @documents.each_with_object([]) do |doc, chunks|
|
35
|
+
next if doc[:content].to_s.strip.empty?
|
36
|
+
|
37
|
+
doc_chunks = chunker.chunk(doc[:content])
|
38
|
+
if doc_chunks.count > MAX_CHUNKS_PER_WEBSITE
|
39
|
+
Deepsearch.configuration.logger.debug("Truncating chunks for #{doc[:url]} from #{doc_chunks.count} to #{MAX_CHUNKS_PER_WEBSITE}")
|
40
|
+
doc_chunks = doc_chunks.first(MAX_CHUNKS_PER_WEBSITE)
|
41
|
+
end
|
42
|
+
doc_chunks.each { |chunk| chunk.document_url = doc[:url] }
|
43
|
+
chunks.concat(doc_chunks)
|
44
|
+
end
|
45
|
+
|
46
|
+
Deepsearch.configuration.logger.debug("Chunked #{@documents.count} documents into #{all_chunks.count} chunks")
|
47
|
+
|
48
|
+
if all_chunks.count > MAX_TOTAL_CHUNKS
|
49
|
+
Deepsearch.configuration.logger.debug("Chunk count (#{all_chunks.count}) exceeds limit of #{MAX_TOTAL_CHUNKS}. Truncating.")
|
50
|
+
all_chunks = all_chunks.first(MAX_TOTAL_CHUNKS)
|
51
|
+
end
|
52
|
+
|
53
|
+
all_chunks.each_slice(CHUNK_BATCH_SIZE) do |batch|
|
54
|
+
texts = batch.map(&:text)
|
55
|
+
embeddings = RubyLLM.embed(texts).vectors
|
56
|
+
batch.each_with_index { |chunk, i| chunk.embedding = embeddings[i] }
|
57
|
+
end
|
58
|
+
|
59
|
+
Deepsearch.configuration.logger.debug("Generated embeddings for #{all_chunks.count} chunks, initiating similarity match..")
|
60
|
+
relevant_chunks = Similarity.new.find_relevant(@query, all_chunks)
|
61
|
+
Deepsearch.configuration.logger.debug("Found #{relevant_chunks.count} relevant chunks for query: '#{@query.text}'")
|
62
|
+
|
63
|
+
Values::Result.new(
|
64
|
+
query: @query,
|
65
|
+
relevant_chunks: relevant_chunks
|
66
|
+
)
|
67
|
+
rescue StandardError => e
|
68
|
+
Values::Result.new(
|
69
|
+
query: @query,
|
70
|
+
relevant_chunks: [],
|
71
|
+
error: e.message
|
72
|
+
)
|
73
|
+
end
|
74
|
+
end
|
75
|
+
end
|
76
|
+
end
|
77
|
+
end
|
78
|
+
end
|
79
|
+
end
|
@@ -0,0 +1,60 @@
|
|
1
|
+
# frozen_string_literal: true
|
2
|
+
|
3
|
+
module Deepsearch
|
4
|
+
class Engine
|
5
|
+
module Steps
|
6
|
+
module Rag
|
7
|
+
# Calculates and filters text chunks based on their semantic similarity to a query.
|
8
|
+
# It uses cosine similarity to score chunks against a query embedding and employs a two-step
|
9
|
+
# filtering process: first, it retrieves a fixed number of top candidates (top-k), and
|
10
|
+
# second, it filters these candidates based on a score relative to the best-scoring chunk.
|
11
|
+
class Similarity
|
12
|
+
TOP_K_CANDIDATES = 75
|
13
|
+
RELATIVE_SCORE_THRESHOLD = 0.85
|
14
|
+
|
15
|
+
def find_relevant(query, chunks, threshold: RELATIVE_SCORE_THRESHOLD)
|
16
|
+
return [] if chunks.empty?
|
17
|
+
|
18
|
+
similarities = calculate(chunks.map(&:embedding), query.embedding)
|
19
|
+
|
20
|
+
top_candidates = top_k_with_scores(similarities, TOP_K_CANDIDATES)
|
21
|
+
|
22
|
+
return [] if top_candidates.empty?
|
23
|
+
|
24
|
+
best_score = top_candidates.first.first
|
25
|
+
cutoff_score = best_score * threshold
|
26
|
+
|
27
|
+
relevant_chunks = top_candidates.select { |score, _| score >= cutoff_score }
|
28
|
+
.map { |_, index| chunks[index] }
|
29
|
+
|
30
|
+
relevant_chunks
|
31
|
+
end
|
32
|
+
|
33
|
+
private
|
34
|
+
|
35
|
+
def calculate(embeddings, query_embedding)
|
36
|
+
embeddings.map { |embedding| cosine_similarity(embedding, query_embedding) }
|
37
|
+
end
|
38
|
+
|
39
|
+
def top_k_with_scores(similarities, k)
|
40
|
+
similarities.each_with_index
|
41
|
+
.sort_by { |score, _| -score }
|
42
|
+
.first(k)
|
43
|
+
end
|
44
|
+
|
45
|
+
def cosine_similarity(vec_a, vec_b)
|
46
|
+
return 0.0 unless vec_a.is_a?(Array) && vec_b.is_a?(Array)
|
47
|
+
return 0.0 if vec_a.empty? || vec_b.empty?
|
48
|
+
|
49
|
+
dot_product = vec_a.zip(vec_b).sum { |a, b| a * b }
|
50
|
+
magnitude_a = Math.sqrt(vec_a.sum { |v| v**2 })
|
51
|
+
magnitude_b = Math.sqrt(vec_b.sum { |v| v**2 })
|
52
|
+
|
53
|
+
return 0.0 if magnitude_a.zero? || magnitude_b.zero?
|
54
|
+
dot_product / (magnitude_a * magnitude_b)
|
55
|
+
end
|
56
|
+
end
|
57
|
+
end
|
58
|
+
end
|
59
|
+
end
|
60
|
+
end
|
@@ -0,0 +1,23 @@
|
|
1
|
+
# frozen_string_literal: true
|
2
|
+
|
3
|
+
module Deepsearch
|
4
|
+
class Engine
|
5
|
+
module Steps
|
6
|
+
module Rag
|
7
|
+
module Values
|
8
|
+
# Represents a chunk of text from a document, along with its embedding and source URL.
|
9
|
+
# This is the fundamental unit of data used in the RAG process.
|
10
|
+
class Chunk
|
11
|
+
attr_accessor :text, :embedding, :document_url
|
12
|
+
|
13
|
+
def initialize(text:, embedding: nil, document_url: nil)
|
14
|
+
@text = text
|
15
|
+
@embedding = embedding
|
16
|
+
@document_url = document_url
|
17
|
+
end
|
18
|
+
end
|
19
|
+
end
|
20
|
+
end
|
21
|
+
end
|
22
|
+
end
|
23
|
+
end
|
@@ -0,0 +1,44 @@
|
|
1
|
+
# frozen_string_literal: true
|
2
|
+
|
3
|
+
module Deepsearch
|
4
|
+
class Engine
|
5
|
+
module Steps
|
6
|
+
module Rag
|
7
|
+
module Values
|
8
|
+
# Represents a user query that has been prepared for the RAG process.
|
9
|
+
# It enriches the original query text with LLM-generated tags to improve
|
10
|
+
# embedding quality and then computes the embedding vector.
|
11
|
+
class Query
|
12
|
+
attr_reader :text, :embedding
|
13
|
+
|
14
|
+
def initialize(text:)
|
15
|
+
raise ArgumentError, "Query text cannot be blank" if text.to_s.strip.empty?
|
16
|
+
|
17
|
+
@text = text
|
18
|
+
enriched_text = enrich_query_with_tags(text)
|
19
|
+
@embedding = RubyLLM.embed(enriched_text).vectors
|
20
|
+
end
|
21
|
+
|
22
|
+
private
|
23
|
+
|
24
|
+
def enrich_query_with_tags(original_text)
|
25
|
+
prompt = Deepsearch.configuration.prompts.enrich_query_prompt(query: original_text)
|
26
|
+
|
27
|
+
Deepsearch.configuration.logger.debug("Enriching query with LLM-generated tags...")
|
28
|
+
response = RubyLLM.chat.ask(prompt)
|
29
|
+
tags_list = response.content.strip
|
30
|
+
Deepsearch.configuration.logger.debug("Generated tags for query enrichment: #{tags_list}")
|
31
|
+
|
32
|
+
enriched_text = "#{original_text} - related concepts: #{tags_list}"
|
33
|
+
Deepsearch.configuration.logger.debug("Enriched query for embedding: \"#{enriched_text}\"")
|
34
|
+
enriched_text
|
35
|
+
rescue StandardError => e
|
36
|
+
Deepsearch.configuration.logger.debug("Failed to enrich query due to '#{e.message}'. Using original query for embedding.")
|
37
|
+
original_text
|
38
|
+
end
|
39
|
+
end
|
40
|
+
end
|
41
|
+
end
|
42
|
+
end
|
43
|
+
end
|
44
|
+
end
|
@@ -0,0 +1,33 @@
|
|
1
|
+
# frozen_string_literal: true
|
2
|
+
|
3
|
+
module Deepsearch
|
4
|
+
class Engine
|
5
|
+
module Steps
|
6
|
+
module Rag
|
7
|
+
module Values
|
8
|
+
# Represents the result of the RAG processing step.
|
9
|
+
# It contains the original query object and a list of text chunks
|
10
|
+
# deemed most relevant to the query.
|
11
|
+
class Result
|
12
|
+
attr_reader :query, :relevant_chunks, :error, :success
|
13
|
+
|
14
|
+
def initialize(query: nil, relevant_chunks: [], error: nil)
|
15
|
+
@query = query
|
16
|
+
@relevant_chunks = relevant_chunks
|
17
|
+
@success = error.nil?
|
18
|
+
@error = error
|
19
|
+
end
|
20
|
+
|
21
|
+
def success?
|
22
|
+
@success
|
23
|
+
end
|
24
|
+
|
25
|
+
def failure?
|
26
|
+
!success?
|
27
|
+
end
|
28
|
+
end
|
29
|
+
end
|
30
|
+
end
|
31
|
+
end
|
32
|
+
end
|
33
|
+
end
|
@@ -0,0 +1,53 @@
|
|
1
|
+
# frozen_string_literal: true
|
2
|
+
|
3
|
+
require_relative 'values/result'
|
4
|
+
|
5
|
+
module Deepsearch
|
6
|
+
class Engine
|
7
|
+
module Steps
|
8
|
+
module Summarization
|
9
|
+
# Generates a final, synthesized answer to the user's query based on relevant text chunks.
|
10
|
+
# It constructs a detailed prompt for an LLM, including the query, context from chunks,
|
11
|
+
# and instructions for citing sources, then returns the LLM's response.
|
12
|
+
class Process
|
13
|
+
attr_reader :query, :relevant_chunks
|
14
|
+
|
15
|
+
def initialize(query:, relevant_chunks:)
|
16
|
+
@query = query
|
17
|
+
@relevant_chunks = relevant_chunks
|
18
|
+
end
|
19
|
+
|
20
|
+
def execute
|
21
|
+
return Values::Result.new(summary: "No relevant content found to summarize.") if relevant_chunks.empty?
|
22
|
+
|
23
|
+
prompt = build_summary_prompt
|
24
|
+
Deepsearch.configuration.logger.debug("Summarizing content with LLM...")
|
25
|
+
response = RubyLLM.chat.ask(prompt)
|
26
|
+
Deepsearch.configuration.logger.debug("Summarization complete.")
|
27
|
+
|
28
|
+
Values::Result.new(summary: response.content)
|
29
|
+
rescue StandardError => e
|
30
|
+
Deepsearch.configuration.logger.debug("Error during summarization: #{e.message}")
|
31
|
+
Values::Result.new(summary: nil, error: e.message)
|
32
|
+
end
|
33
|
+
|
34
|
+
private
|
35
|
+
|
36
|
+
def build_summary_prompt
|
37
|
+
chunks_by_url = relevant_chunks.group_by(&:document_url)
|
38
|
+
citation_map = chunks_by_url.keys.each_with_index.to_h { |url, i| [url, i + 1] }
|
39
|
+
|
40
|
+
context_text = chunks_by_url.map do |url, chunks|
|
41
|
+
citation_number = citation_map[url]
|
42
|
+
chunk_contents = chunks.map(&:text).join("\n\n")
|
43
|
+
"Source [#{citation_number}]:\n#{chunk_contents}"
|
44
|
+
end.join("\n\n---\n\n")
|
45
|
+
|
46
|
+
sources_list = citation_map.map { |url, number| "[#{number}]: #{url}" }.join("\n")
|
47
|
+
Deepsearch.configuration.prompts.summarization_prompt(query: @query.text, context_text: context_text, sources_list: sources_list)
|
48
|
+
end
|
49
|
+
end
|
50
|
+
end
|
51
|
+
end
|
52
|
+
end
|
53
|
+
end
|
@@ -0,0 +1,31 @@
|
|
1
|
+
# frozen_string_literal: true
|
2
|
+
|
3
|
+
module Deepsearch
|
4
|
+
class Engine
|
5
|
+
module Steps
|
6
|
+
module Summarization
|
7
|
+
module Values
|
8
|
+
# Represents the result of the summarization step.
|
9
|
+
# It holds the final, synthesized summary and any potential error message.
|
10
|
+
class Result
|
11
|
+
attr_reader :summary, :error, :success
|
12
|
+
|
13
|
+
def initialize(summary: nil, error: nil)
|
14
|
+
@summary = summary
|
15
|
+
@success = error.nil?
|
16
|
+
@error = error
|
17
|
+
end
|
18
|
+
|
19
|
+
def success?
|
20
|
+
@success
|
21
|
+
end
|
22
|
+
|
23
|
+
def failure?
|
24
|
+
!success?
|
25
|
+
end
|
26
|
+
end
|
27
|
+
end
|
28
|
+
end
|
29
|
+
end
|
30
|
+
end
|
31
|
+
end
|
@@ -0,0 +1,25 @@
|
|
1
|
+
# frozen_string_literal: true
|
2
|
+
|
3
|
+
require_relative "engine/pipeline"
|
4
|
+
|
5
|
+
module Deepsearch
|
6
|
+
# The main entry point for performing a deep search.
|
7
|
+
# This class initializes the search pipeline with the configured or specified
|
8
|
+
# search adapter and provides a `search` method to execute the query.
|
9
|
+
class Engine
|
10
|
+
attr_reader :pipeline
|
11
|
+
|
12
|
+
def initialize(adapter_type: nil)
|
13
|
+
adapter_source = adapter_type ||
|
14
|
+
Deepsearch.configuration.custom_search_adapter_class ||
|
15
|
+
Deepsearch.configuration.search_adapter
|
16
|
+
|
17
|
+
search_adapter = Deepsearch::SearchAdapters.create(adapter_source)
|
18
|
+
@pipeline = Engine::Pipeline.new(search_adapter)
|
19
|
+
end
|
20
|
+
|
21
|
+
def search(query, **options)
|
22
|
+
@pipeline.execute(query, **options)
|
23
|
+
end
|
24
|
+
end
|
25
|
+
end
|
@@ -0,0 +1,32 @@
|
|
1
|
+
# frozen_string_literal: true
|
2
|
+
|
3
|
+
require 'logger'
|
4
|
+
require 'forwardable'
|
5
|
+
|
6
|
+
module Deepsearch
|
7
|
+
# A custom logger that wraps Ruby's standard `::Logger` to provide a default format.
|
8
|
+
class Logger
|
9
|
+
extend Forwardable
|
10
|
+
|
11
|
+
def_delegators :@logger, :debug, :level=, :level, :progname=, :progname, :formatter=, :formatter
|
12
|
+
|
13
|
+
# Re-exporting constants from ::Logger for compatibility.
|
14
|
+
DEBUG = ::Logger::DEBUG
|
15
|
+
|
16
|
+
def initialize(logdev, level: DEBUG, progname: 'DeepSearch', formatter: nil)
|
17
|
+
@logger = ::Logger.new(logdev)
|
18
|
+
@logger.level = level
|
19
|
+
@logger.progname = progname
|
20
|
+
@logger.formatter = formatter || default_formatter
|
21
|
+
end
|
22
|
+
|
23
|
+
private
|
24
|
+
|
25
|
+
def default_formatter
|
26
|
+
proc do |severity, datetime, progname, msg|
|
27
|
+
formatted_time = datetime.strftime('%Y-%m-%d %H:%M:%S.%L')
|
28
|
+
"[#{formatted_time}] #{severity.ljust(5)} -- #{progname}: #{msg}\n"
|
29
|
+
end
|
30
|
+
end
|
31
|
+
end
|
32
|
+
end
|