llm_chain 0.5.4 → 0.5.5

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
checksums.yaml CHANGED
@@ -1,7 +1,7 @@
1
1
  ---
2
2
  SHA256:
3
- metadata.gz: f7c55f5d0965bd544f4f3eb647e85127207f41d257803023d4a5df854faa3b7b
4
- data.tar.gz: 988b46a9300325e3ab6c4bf94ea07c016c79d308b7c8c0b6bde7a12ec8fcbdca
3
+ metadata.gz: 1963821b40f17c255520b55422bbc7cf6603be88555a99403a8ce24b1c4914f4
4
+ data.tar.gz: f27902811ba56ee4b1d9da09b22bd99204f01573b848c16e6a9e368f68eb40a4
5
5
  SHA512:
6
- metadata.gz: 1f59c7598a9a84d10bde4100aba65c7d55d5a986e0d7bfe2af71a08d83ae5ea689acc4ed22349377b023a7e23ba85facb151b6f70da3d9bd6549ddb8d7552f40
7
- data.tar.gz: dc672ea465a8d0f91591b00b3f75ff0c95319e2eee54cad0709976b254f9173bd701c607e2fbe7025fb01e044fbe20d6f9f0998c61c80da7128b1c0daeea6ed8
6
+ metadata.gz: 4a3611448d4e85e2f3bcec56f0c8d1c599bcf9da467a963b23fcffe6466e28e1d99dcae34449bffde4bb4672b1be5a6fe4dc35146c97dba8d968ab1fd6b77af1
7
+ data.tar.gz: e918646ffa824e762d7cefa92da2effcb5a23b412202c6d967e99e6d719035fc89834c0c6f65aea68fe131a92e41474bcf20d93d6ec1cdc2900943f0d51faf39
data/CHANGELOG.md CHANGED
@@ -7,7 +7,22 @@ and this project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0
7
7
 
8
8
  ## [Unreleased]
9
9
 
10
- ## [0.5.4] - 2025-01-17
10
+ ## [0.5.5] - 2025-07-17
11
+
12
+ ### Changed
13
+ * **Major refactor:**
14
+ * Core classes have been extensively refactored for improved modularity, clarity, and maintainability:
15
+ * Adopted SOLID principles throughout the codebase.
16
+ * Extracted interfaces for memory, tool management, and all builder components, now located in a dedicated `interfaces` namespace.
17
+ * Introduced a `builders` folder and builder pattern for prompt, memory context, tool responses, RAG documents, and retriever context.
18
+ * Improved dependency injection and separation of concerns, making the codebase easier to extend and test.
19
+ * Centralized error handling and configuration validation.
20
+ * Enhanced documentation and type signatures for all major classes.
21
+ * The public API remains minimal and idiomatic, with extensibility via interfaces and factories.
22
+ * `Chain#ask` method rewritten following Ruby best practices: now declarative, each pipeline stage is a private method, code is cleaner and easier to extend.
23
+ * All ToolManager creation and configuration is now done via a dedicated factory: `ToolManagerFactory`. Old calls (`ToolManager.create_default_toolset`, `ToolManager.from_config`) have been replaced with factory methods.
24
+
25
+ ## [0.5.4] - 2025-07-08
11
26
 
12
27
  ### Added
13
28
  * **Deepseek-Coder-V2 Client** - Support for Deepseek-Coder-V2 models via Ollama
@@ -100,4 +115,4 @@ and this project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0
100
115
  [0.5.3]: https://github.com/FuryCow/llm_chain/compare/v0.5.2...v0.5.3
101
116
  [0.5.2]: https://github.com/FuryCow/llm_chain/compare/v0.5.1...v0.5.2
102
117
  [0.5.1]: https://github.com/FuryCow/llm_chain/compare/v0.5.0...v0.5.1
103
- [0.5.0]: https://github.com/FuryCow/llm_chain/releases/tag/v0.5.0
118
+ [0.5.0]: https://github.com/FuryCow/llm_chain/releases/tag/v0.5.0
data/README.md CHANGED
@@ -159,7 +159,7 @@ chain = LLMChain.quick_chain(
159
159
  # Manual validation
160
160
  LLMChain::ConfigurationValidator.validate_chain_config!(
161
161
  model: "qwen3:1.7b",
162
- tools: LLMChain::Tools::ToolManager.create_default_toolset
162
+ tools: LLMChain::Tools::ToolManagerFactory.create_default_toolset
163
163
  )
164
164
  ```
165
165
 
@@ -182,7 +182,7 @@ chain.ask("Execute code: puts (1..10).sum")
182
182
  # 💻 Result: 55
183
183
 
184
184
  # Traditional setup
185
- tool_manager = LLMChain::Tools::ToolManager.create_default_toolset
185
+ tool_manager = LLMChain::Tools::ToolManagerFactory.create_default_toolset
186
186
  chain = LLMChain::Chain.new(
187
187
  model: "qwen3:1.7b",
188
188
  tools: tool_manager
@@ -342,6 +342,7 @@ end
342
342
 
343
343
  # Usage
344
344
  weather = WeatherTool.new(api_key: "your-key")
345
+ tool_manager = LLMChain::Tools::ToolManagerFactory.create_default_toolset
345
346
  tool_manager.register_tool(weather)
346
347
  ```
347
348
 
@@ -488,7 +489,7 @@ chain.ask("Tell me about Ruby history", stream: true) do |chunk|
488
489
  end
489
490
 
490
491
  # Streaming with tools
491
- tool_manager = LLMChain::Tools::ToolManager.create_default_toolset
492
+ tool_manager = LLMChain::Tools::ToolManagerFactory.create_default_toolset
492
493
  chain = LLMChain::Chain.new(
493
494
  model: "qwen3:1.7b",
494
495
  tools: tool_manager
@@ -543,7 +544,7 @@ tools_config = [
543
544
  }
544
545
  ]
545
546
 
546
- tool_manager = LLMChain::Tools::ToolManager.from_config(tools_config)
547
+ tool_manager = LLMChain::Tools::ToolManagerFactory.from_config(tools_config)
547
548
  ```
548
549
 
549
550
  ### Client Settings
@@ -633,7 +634,7 @@ require 'llm_chain'
633
634
 
634
635
  class ChatBot
635
636
  def initialize
636
- @tool_manager = LLMChain::Tools::ToolManager.create_default_toolset
637
+ @tool_manager = LLMChain::Tools::ToolManagerFactory.create_default_toolset
637
638
  @memory = LLMChain::Memory::Array.new(max_size: 20)
638
639
  @chain = LLMChain::Chain.new(
639
640
  model: "qwen3:1.7b",
@@ -668,7 +669,7 @@ bot.chat_loop
668
669
  ```ruby
669
670
  data_chain = LLMChain::Chain.new(
670
671
  model: "qwen3:7b",
671
- tools: LLMChain::Tools::ToolManager.create_default_toolset
672
+ tools: LLMChain::Tools::ToolManagerFactory.create_default_toolset
672
673
  )
673
674
 
674
675
  # Analyze CSV data
@@ -71,7 +71,7 @@ end
71
71
  # 5. Chain with tools
72
72
  puts "\n5. 🛠️ Chain with automatic tools"
73
73
  begin
74
- tool_manager = LLMChain::Tools::ToolManager.create_default_toolset
74
+ tool_manager = LLMChain::Tools::ToolManagerFactory.create_default_toolset
75
75
  smart_chain = LLMChain::Chain.new(
76
76
  model: "qwen3:1.7b",
77
77
  tools: tool_manager,
@@ -101,7 +101,7 @@ end
101
101
 
102
102
  # 4. Tool Manager Usage
103
103
  puts "\n4. 🎯 Tool Manager"
104
- tool_manager = LLMChain::Tools::ToolManager.create_default_toolset
104
+ tool_manager = LLMChain::Tools::ToolManagerFactory.create_default_toolset
105
105
 
106
106
  puts "Registered tools: #{tool_manager.list_tools.map(&:name).join(', ')}"
107
107
 
@@ -243,7 +243,7 @@ tools_config = [
243
243
  }
244
244
  ]
245
245
 
246
- config_tool_manager = LLMChain::Tools::ToolManager.from_config(tools_config)
246
+ config_tool_manager = LLMChain::Tools::ToolManagerFactory.from_config(tools_config)
247
247
  puts "Tools from config: #{config_tool_manager.list_tools.map(&:name).join(', ')}"
248
248
 
249
249
  # Test configuration-based setup
data/exe/llm-chain CHANGED
@@ -112,7 +112,7 @@ when "tools"
112
112
  sub = ARGV.shift
113
113
  case sub
114
114
  when "list"
115
- tm = LLMChain::Tools::ToolManager.create_default_toolset
115
+ tm = LLMChain::Tools::ToolManagerFactory.create_default_toolset
116
116
  puts tm.tools_description
117
117
  else
118
118
  warn "Unknown tools subcommand"; warn USAGE
@@ -0,0 +1,24 @@
1
+ # frozen_string_literal: true
2
+
3
+ require_relative '../interfaces/builders/memory_context_builder'
4
+
5
+ module LLMChain
6
+ module Builders
7
+ # Production implementation of memory context builder for LLMChain.
8
+ # Formats conversation history for inclusion in the prompt.
9
+ class MemoryContext < Interfaces::Builders::MemoryContext
10
+ # Build the memory context string for the prompt.
11
+ # @param memory_history [Array<Hash>] conversation history
12
+ # @return [String] formatted memory context
13
+ def build(memory_history)
14
+ return "" if memory_history.nil? || memory_history.empty?
15
+ parts = ["Dialogue history:"]
16
+ memory_history.each do |item|
17
+ parts << "User: #{item[:prompt]}"
18
+ parts << "Assistant: #{item[:response]}"
19
+ end
20
+ parts.join("\n")
21
+ end
22
+ end
23
+ end
24
+ end
@@ -0,0 +1,26 @@
1
+ # frozen_string_literal: true
2
+
3
+ require_relative '../interfaces/builders/prompt_builder'
4
+
5
+ module LLMChain
6
+ module Builders
7
+ # Production implementation of prompt builder for LLMChain.
8
+ # Assembles the final prompt from memory, tools, RAG, and user prompt.
9
+ class Prompt < Interfaces::Builders::Prompt
10
+ # Build the final prompt for the LLM.
11
+ # @param memory_context [String]
12
+ # @param tool_responses [String]
13
+ # @param rag_documents [String]
14
+ # @param prompt [String]
15
+ # @return [String] final prompt for LLM
16
+ def build(memory_context:, tool_responses:, rag_documents:, prompt:)
17
+ parts = []
18
+ parts << memory_context if memory_context && !memory_context.empty?
19
+ parts << rag_documents if rag_documents && !rag_documents.empty?
20
+ parts << tool_responses if tool_responses && !tool_responses.empty?
21
+ parts << "Current question: #{prompt}"
22
+ parts.join("\n\n")
23
+ end
24
+ end
25
+ end
26
+ end
@@ -0,0 +1,25 @@
1
+ # frozen_string_literal: true
2
+
3
+ require_relative '../interfaces/builders/rag_documents_builder'
4
+
5
+ module LLMChain
6
+ module Builders
7
+ # Production implementation of RAG documents builder for LLMChain.
8
+ # Formats retrieved documents for inclusion in the prompt.
9
+ class RagDocuments < Interfaces::Builders::RagDocuments
10
+ # Build the RAG documents string for the prompt.
11
+ # @param rag_documents [Array<Hash>] list of retrieved documents
12
+ # @return [String] formatted RAG context
13
+ def build(rag_documents)
14
+ return "" if rag_documents.nil? || rag_documents.empty?
15
+ parts = ["Relevant documents:"]
16
+ rag_documents.each_with_index do |doc, i|
17
+ parts << "Document #{i + 1}: #{doc['content'] || doc[:content]}"
18
+ meta = doc['metadata'] || doc[:metadata]
19
+ parts << "Metadata: #{meta.to_json}" if meta
20
+ end
21
+ parts.join("\n")
22
+ end
23
+ end
24
+ end
25
+ end
@@ -0,0 +1,25 @@
1
+ # frozen_string_literal: true
2
+
3
+ require_relative '../interfaces/builders/retriever_context_builder'
4
+
5
+ module LLMChain
6
+ module Builders
7
+ # Production implementation of retriever context builder for LLMChain.
8
+ # Retrieves and formats RAG context documents.
9
+ class RetrieverContext < Interfaces::Builders::RetrieverContext
10
+ # Retrieve and format RAG context documents.
11
+ # @param retriever [Object] retriever instance (must respond to #search)
12
+ # @param query [String] user query
13
+ # @param options [Hash]
14
+ # @return [Array<Hash>] list of retrieved documents
15
+ def retrieve(retriever, query, options = {})
16
+ return [] unless retriever && retriever.respond_to?(:search)
17
+ limit = options[:limit] || 3
18
+ retriever.search(query, limit: limit)
19
+ rescue => e
20
+ warn "[RetrieverContext] Error retrieving context: #{e.message}"
21
+ []
22
+ end
23
+ end
24
+ end
25
+ end
@@ -0,0 +1,27 @@
1
+ # frozen_string_literal: true
2
+
3
+ require_relative '../interfaces/builders/tool_responses_builder'
4
+
5
+ module LLMChain
6
+ module Builders
7
+ # Production implementation of tool responses builder for LLMChain.
8
+ # Formats tool results for inclusion in the prompt.
9
+ class ToolResponses < Interfaces::Builders::ToolResponses
10
+ # Build the tool responses string for the prompt.
11
+ # @param tool_results [Hash] tool name => result
12
+ # @return [String] formatted tool responses
13
+ def build(tool_results)
14
+ return "" if tool_results.nil? || tool_results.empty?
15
+ parts = ["Tool results:"]
16
+ tool_results.each do |name, response|
17
+ if response.is_a?(Hash) && response[:formatted]
18
+ parts << "#{name}: #{response[:formatted]}"
19
+ else
20
+ parts << "#{name}: #{response}"
21
+ end
22
+ end
23
+ parts.join("\n")
24
+ end
25
+ end
26
+ end
27
+ end
@@ -1,4 +1,11 @@
1
1
  require 'json'
2
+ require_relative 'memory/array'
3
+ require_relative 'tools/tool_manager_factory'
4
+ require_relative 'builders/prompt'
5
+ require_relative 'builders/memory_context'
6
+ require_relative 'builders/tool_responses'
7
+ require_relative 'builders/rag_documents'
8
+ require_relative 'builders/retriever_context'
2
9
 
3
10
  module LLMChain
4
11
  # High-level interface that ties together an LLM client, optional memory,
@@ -14,18 +21,29 @@ module LLMChain
14
21
  # Create a new chain.
15
22
  #
16
23
  # @param model [String] model name, e.g. "gpt-4" or "qwen3:1.7b"
17
- # @param memory [#recall, #store, nil] conversation memory backend
18
- # @param tools [Array<Tools::Base>, Tools::ToolManager, nil]
24
+ # @param memory [LLMChain::Interfaces::Memory] conversation memory backend
25
+ # @param tools [LLMChain::Interfaces::ToolManager, Array, true, false, nil]
19
26
  # @param retriever [#search, false, nil] document retriever for RAG
27
+ # @param prompt_builder [LLMChain::Interfaces::Builders::Prompt]
28
+ # @param memory_context_builder [LLMChain::Interfaces::Builders::MemoryContext]
29
+ # @param tool_responses_builder [LLMChain::Interfaces::Builders::ToolResponses]
30
+ # @param rag_documents_builder [LLMChain::Interfaces::Builders::RagDocuments]
31
+ # @param retriever_context_builder [LLMChain::Interfaces::Builders::RetrieverContext]
20
32
  # @param validate_config [Boolean] run {ConfigurationValidator}
21
33
  # @param client_options [Hash] extra LLM-client options (api_key etc.)
22
- def initialize(model: nil, memory: nil, tools: [], retriever: false, validate_config: true, **client_options)
23
- # Валидация конфигурации (можно отключить через validate_config: false)
34
+ def initialize(
35
+ model: nil,
36
+ memory: nil,
37
+ tools: true,
38
+ retriever: false,
39
+ validate_config: true,
40
+ **client_options
41
+ )
24
42
  if validate_config
25
43
  begin
26
44
  ConfigurationValidator.validate_chain_config!(
27
- model: model,
28
- tools: tools,
45
+ model: model,
46
+ tools: tools,
29
47
  memory: memory,
30
48
  retriever: retriever,
31
49
  **client_options
@@ -37,7 +55,18 @@ module LLMChain
37
55
 
38
56
  @model = model
39
57
  @memory = memory || Memory::Array.new
40
- @tools = tools
58
+ @tools =
59
+ if tools == true
60
+ Tools::ToolManagerFactory.create_default_toolset
61
+ elsif tools.is_a?(Array)
62
+ Tools::ToolManager.new(tools: tools)
63
+ elsif tools.is_a?(Tools::ToolManager)
64
+ tools
65
+ elsif tools.is_a?(Hash) && tools[:config]
66
+ Tools::ToolManagerFactory.from_config(tools[:config])
67
+ else
68
+ nil
69
+ end
41
70
  @retriever = if retriever.nil?
42
71
  Embeddings::Clients::Local::WeaviateRetriever.new
43
72
  elsif retriever == false
@@ -46,6 +75,13 @@ module LLMChain
46
75
  retriever
47
76
  end
48
77
  @client = ClientRegistry.client_for(model, **client_options)
78
+
79
+ # Always use default builders
80
+ @prompt_builder = Builders::Prompt.new
81
+ @memory_context_builder = Builders::MemoryContext.new
82
+ @tool_responses_builder = Builders::ToolResponses.new
83
+ @rag_documents_builder = Builders::RagDocuments.new
84
+ @retriever_context_builder = Builders::RetrieverContext.new
49
85
  end
50
86
 
51
87
  # Main inference entrypoint.
@@ -57,94 +93,45 @@ module LLMChain
57
93
  # @yield [String] chunk — called when `stream` is true
58
94
  # @return [String] assistant response
59
95
  def ask(prompt, stream: false, rag_context: false, rag_options: {}, &block)
60
- context = collect_context(prompt, rag_context, rag_options)
61
- full_prompt = build_prompt(prompt: prompt, **context)
96
+ memory_context = build_memory_context(prompt)
97
+ tool_responses = build_tool_responses(prompt)
98
+ rag_documents = build_rag_documents(prompt, rag_context, rag_options)
99
+ full_prompt = build_full_prompt(prompt, memory_context, tool_responses, rag_documents)
100
+
62
101
  response = generate_response(full_prompt, stream: stream, &block)
63
- memory.store(prompt, response)
102
+ store_memory(prompt, response)
64
103
  response
65
104
  end
66
105
 
67
- # Collect memory, tool results and RAG docs for current request.
68
- # @api private
69
- def collect_context(prompt, rag_context, rag_options)
70
- context = memory.recall(prompt)
71
- tool_responses = process_tools(prompt)
72
- rag_documents = retrieve_rag_context(prompt, rag_options) if rag_context
73
- { memory_context: context, tool_responses: tool_responses, rag_documents: rag_documents }
74
- end
75
-
76
106
  private
77
107
 
78
- def retrieve_rag_context(query, options = {})
79
- return [] unless @retriever
80
-
81
- limit = options[:limit] || 3
82
- @retriever.search(query, limit: limit)
83
- rescue => e
84
- raise Error, "Cannot retrieve rag context"
108
+ def build_memory_context(prompt)
109
+ history = @memory&.recall(prompt)
110
+ @memory_context_builder.build(history)
85
111
  end
86
112
 
87
- def process_tools(prompt)
88
- return {} if @tools.nil? || (@tools.respond_to?(:empty?) && @tools.empty?)
89
-
90
- # Если @tools - это ToolManager
91
- if @tools.respond_to?(:auto_execute)
92
- @tools.auto_execute(prompt)
93
- elsif @tools.is_a?(Array)
94
- # Старая логика для массива инструментов
95
- @tools.each_with_object({}) do |tool, acc|
96
- if tool.match?(prompt)
97
- response = tool.call(prompt)
98
- acc[tool.name] = response unless response.nil?
99
- end
100
- end
101
- else
102
- {}
103
- end
104
- end
105
-
106
- def build_prompt(prompt:, memory_context: nil, tool_responses: {}, rag_documents: nil)
107
- parts = []
108
- parts << build_memory_context(memory_context) if memory_context&.any?
109
- parts << build_rag_documents(rag_documents) if rag_documents&.any?
110
- parts << build_tool_responses(tool_responses) unless tool_responses.empty?
111
- parts << "Сurrent question: #{prompt}"
112
- parts.join("\n\n")
113
+ def build_tool_responses(prompt)
114
+ results = @tools&.execute_tools(prompt) || {}
115
+ @tool_responses_builder.build(results)
113
116
  end
114
117
 
115
- def build_memory_context(memory_context)
116
- parts = ["Dialogue history:"]
117
- memory_context.each do |item|
118
- parts << "User: #{item[:prompt]}"
119
- parts << "Assistant: #{item[:response]}"
120
- end
121
- parts.join("\n")
118
+ def build_rag_documents(prompt, rag_context, rag_options)
119
+ return "" unless rag_context && @retriever
120
+ docs = @retriever_context_builder.retrieve(@retriever, prompt, rag_options)
121
+ @rag_documents_builder.build(docs)
122
122
  end
123
123
 
124
- def build_rag_documents(rag_documents)
125
- parts = ["Relevant documents:"]
126
- rag_documents.each_with_index do |doc, i|
127
- parts << "Document #{i + 1}: #{doc['content']}"
128
- parts << "Metadata: #{doc['metadata'].to_json}" if doc['metadata']
129
- end
130
- parts.join("\n")
124
+ def build_full_prompt(prompt, memory_context, tool_responses, rag_documents)
125
+ @prompt_builder.build(
126
+ memory_context: memory_context,
127
+ tool_responses: tool_responses,
128
+ rag_documents: rag_documents,
129
+ prompt: prompt
130
+ )
131
131
  end
132
132
 
133
- def build_tool_responses(tool_responses)
134
- parts = ["Tool results:"]
135
- tool_responses.each do |name, response|
136
- if response.is_a?(Hash) && response[:formatted]
137
- # Особая обработка для поиска без результатов
138
- if name == "web_search" && response[:results] && response[:results].empty?
139
- parts << "#{name}: No search results found. Please answer based on your knowledge, but indicate that search was unavailable."
140
- else
141
- parts << "#{name}: #{response[:formatted]}"
142
- end
143
- else
144
- parts << "#{name}: #{response}"
145
- end
146
- end
147
- parts.join("\n")
133
+ def store_memory(prompt, response)
134
+ @memory&.store(prompt, response)
148
135
  end
149
136
 
150
137
  def generate_response(prompt, stream: false, &block)
@@ -0,0 +1,20 @@
1
+ # frozen_string_literal: true
2
+
3
+ module LLMChain
4
+ module Interfaces
5
+ module Builders
6
+ # Abstract interface for building memory context in LLMChain.
7
+ # Implementations must provide a method to format conversation history for the prompt.
8
+ #
9
+ # @abstract
10
+ class MemoryContext
11
+ # Build the memory context string for the prompt.
12
+ # @param memory_history [Array<Hash>] conversation history
13
+ # @return [String] formatted memory context
14
+ def build(memory_history)
15
+ raise NotImplementedError, "Implement in subclass"
16
+ end
17
+ end
18
+ end
19
+ end
20
+ end
@@ -0,0 +1,23 @@
1
+ # frozen_string_literal: true
2
+
3
+ module LLMChain
4
+ module Interfaces
5
+ module Builders
6
+ # Abstract interface for prompt building in LLMChain.
7
+ # Implementations must provide a method to assemble the final prompt for the LLM.
8
+ #
9
+ # @abstract
10
+ class Prompt
11
+ # Build the final prompt for the LLM.
12
+ # @param memory_context [String]
13
+ # @param tool_responses [String]
14
+ # @param rag_documents [String]
15
+ # @param prompt [String]
16
+ # @return [String] final prompt for LLM
17
+ def build(memory_context:, tool_responses:, rag_documents:, prompt:)
18
+ raise NotImplementedError, "Implement in subclass"
19
+ end
20
+ end
21
+ end
22
+ end
23
+ end
@@ -0,0 +1,20 @@
1
+ # frozen_string_literal: true
2
+
3
+ module LLMChain
4
+ module Interfaces
5
+ module Builders
6
+ # Abstract interface for building RAG documents context in LLMChain.
7
+ # Implementations must provide a method to format retrieved documents for the prompt.
8
+ #
9
+ # @abstract
10
+ class RagDocuments
11
+ # Build the RAG documents string for the prompt.
12
+ # @param rag_documents [Array<Hash>] list of retrieved documents
13
+ # @return [String] formatted RAG context
14
+ def build(rag_documents)
15
+ raise NotImplementedError, "Implement in subclass"
16
+ end
17
+ end
18
+ end
19
+ end
20
+ end
@@ -0,0 +1,22 @@
1
+ # frozen_string_literal: true
2
+
3
+ module LLMChain
4
+ module Interfaces
5
+ module Builders
6
+ # Abstract interface for retrieving and formatting RAG context in LLMChain.
7
+ # Implementations must provide a method to retrieve and format context documents.
8
+ #
9
+ # @abstract
10
+ class RetrieverContext
11
+ # Retrieve and format RAG context documents.
12
+ # @param retriever [Object] retriever instance
13
+ # @param query [String] user query
14
+ # @param options [Hash]
15
+ # @return [Array<Hash>] list of retrieved documents
16
+ def retrieve(retriever, query, options = {})
17
+ raise NotImplementedError, "Implement in subclass"
18
+ end
19
+ end
20
+ end
21
+ end
22
+ end
@@ -0,0 +1,20 @@
1
+ # frozen_string_literal: true
2
+
3
+ module LLMChain
4
+ module Interfaces
5
+ module Builders
6
+ # Abstract interface for building tool responses context in LLMChain.
7
+ # Implementations must provide a method to format tool results for the prompt.
8
+ #
9
+ # @abstract
10
+ class ToolResponses
11
+ # Build the tool responses string for the prompt.
12
+ # @param tool_results [Hash] tool name => result
13
+ # @return [String] formatted tool responses
14
+ def build(tool_results)
15
+ raise NotImplementedError, "Implement in subclass"
16
+ end
17
+ end
18
+ end
19
+ end
20
+ end
@@ -0,0 +1,38 @@
1
+ # frozen_string_literal: true
2
+
3
+ module LLMChain
4
+ module Interfaces
5
+ # Abstract interface for memory adapters in LLMChain.
6
+ # Implementations must provide methods for storing and recalling conversation history.
7
+ #
8
+ # @abstract
9
+ class Memory
10
+ # Store a prompt/response pair in memory.
11
+ # @param prompt [String]
12
+ # @param response [String]
13
+ # @return [void]
14
+ def store(prompt, response)
15
+ raise NotImplementedError, "Implement in subclass"
16
+ end
17
+
18
+ # Recall conversation history (optionally filtered by prompt).
19
+ # @param prompt [String, nil]
20
+ # @return [Array<Hash>] [{ prompt: ..., response: ... }, ...]
21
+ def recall(prompt = nil)
22
+ raise NotImplementedError, "Implement in subclass"
23
+ end
24
+
25
+ # Clear all memory.
26
+ # @return [void]
27
+ def clear
28
+ raise NotImplementedError, "Implement in subclass"
29
+ end
30
+
31
+ # Return number of stored items.
32
+ # @return [Integer]
33
+ def size
34
+ raise NotImplementedError, "Implement in subclass"
35
+ end
36
+ end
37
+ end
38
+ end