llm_chain 0.5.3 → 0.5.5

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (37) hide show
  1. checksums.yaml +4 -4
  2. data/CHANGELOG.md +30 -2
  3. data/README.md +15 -6
  4. data/examples/quick_demo.rb +1 -1
  5. data/examples/tools_example.rb +2 -2
  6. data/exe/llm-chain +7 -4
  7. data/lib/llm_chain/builders/memory_context.rb +24 -0
  8. data/lib/llm_chain/builders/prompt.rb +26 -0
  9. data/lib/llm_chain/builders/rag_documents.rb +25 -0
  10. data/lib/llm_chain/builders/retriever_context.rb +25 -0
  11. data/lib/llm_chain/builders/tool_responses.rb +27 -0
  12. data/lib/llm_chain/chain.rb +89 -88
  13. data/lib/llm_chain/client_registry.rb +2 -0
  14. data/lib/llm_chain/clients/base.rb +24 -2
  15. data/lib/llm_chain/clients/deepseek_coder_v2.rb +32 -0
  16. data/lib/llm_chain/configuration_validator.rb +1 -1
  17. data/lib/llm_chain/interfaces/builders/memory_context_builder.rb +20 -0
  18. data/lib/llm_chain/interfaces/builders/prompt_builder.rb +23 -0
  19. data/lib/llm_chain/interfaces/builders/rag_documents_builder.rb +20 -0
  20. data/lib/llm_chain/interfaces/builders/retriever_context_builder.rb +22 -0
  21. data/lib/llm_chain/interfaces/builders/tool_responses_builder.rb +20 -0
  22. data/lib/llm_chain/interfaces/memory.rb +38 -0
  23. data/lib/llm_chain/interfaces/tool_manager.rb +87 -0
  24. data/lib/llm_chain/memory/array.rb +18 -1
  25. data/lib/llm_chain/memory/redis.rb +20 -3
  26. data/lib/llm_chain/system_diagnostics.rb +73 -0
  27. data/lib/llm_chain/tools/base.rb +103 -0
  28. data/lib/llm_chain/tools/base_tool.rb +6 -76
  29. data/lib/llm_chain/tools/calculator.rb +118 -45
  30. data/lib/llm_chain/tools/code_interpreter.rb +43 -43
  31. data/lib/llm_chain/tools/date_time.rb +58 -0
  32. data/lib/llm_chain/tools/tool_manager.rb +46 -88
  33. data/lib/llm_chain/tools/tool_manager_factory.rb +44 -0
  34. data/lib/llm_chain/tools/web_search.rb +168 -336
  35. data/lib/llm_chain/version.rb +1 -1
  36. data/lib/llm_chain.rb +58 -56
  37. metadata +19 -2
@@ -0,0 +1,20 @@
1
+ # frozen_string_literal: true
2
+
3
+ module LLMChain
4
+ module Interfaces
5
+ module Builders
6
+ # Abstract interface for building memory context in LLMChain.
7
+ # Implementations must provide a method to format conversation history for the prompt.
8
+ #
9
+ # @abstract
10
+ class MemoryContext
11
+ # Build the memory context string for the prompt.
12
+ # @param memory_history [Array<Hash>] conversation history
13
+ # @return [String] formatted memory context
14
+ def build(memory_history)
15
+ raise NotImplementedError, "Implement in subclass"
16
+ end
17
+ end
18
+ end
19
+ end
20
+ end
@@ -0,0 +1,23 @@
1
+ # frozen_string_literal: true
2
+
3
+ module LLMChain
4
+ module Interfaces
5
+ module Builders
6
+ # Abstract interface for prompt building in LLMChain.
7
+ # Implementations must provide a method to assemble the final prompt for the LLM.
8
+ #
9
+ # @abstract
10
+ class Prompt
11
+ # Build the final prompt for the LLM.
12
+ # @param memory_context [String]
13
+ # @param tool_responses [String]
14
+ # @param rag_documents [String]
15
+ # @param prompt [String]
16
+ # @return [String] final prompt for LLM
17
+ def build(memory_context:, tool_responses:, rag_documents:, prompt:)
18
+ raise NotImplementedError, "Implement in subclass"
19
+ end
20
+ end
21
+ end
22
+ end
23
+ end
@@ -0,0 +1,20 @@
1
+ # frozen_string_literal: true
2
+
3
+ module LLMChain
4
+ module Interfaces
5
+ module Builders
6
+ # Abstract interface for building RAG documents context in LLMChain.
7
+ # Implementations must provide a method to format retrieved documents for the prompt.
8
+ #
9
+ # @abstract
10
+ class RagDocuments
11
+ # Build the RAG documents string for the prompt.
12
+ # @param rag_documents [Array<Hash>] list of retrieved documents
13
+ # @return [String] formatted RAG context
14
+ def build(rag_documents)
15
+ raise NotImplementedError, "Implement in subclass"
16
+ end
17
+ end
18
+ end
19
+ end
20
+ end
@@ -0,0 +1,22 @@
1
+ # frozen_string_literal: true
2
+
3
+ module LLMChain
4
+ module Interfaces
5
+ module Builders
6
+ # Abstract interface for retrieving and formatting RAG context in LLMChain.
7
+ # Implementations must provide a method to retrieve and format context documents.
8
+ #
9
+ # @abstract
10
+ class RetrieverContext
11
+ # Retrieve and format RAG context documents.
12
+ # @param retriever [Object] retriever instance
13
+ # @param query [String] user query
14
+ # @param options [Hash]
15
+ # @return [Array<Hash>] list of retrieved documents
16
+ def retrieve(retriever, query, options = {})
17
+ raise NotImplementedError, "Implement in subclass"
18
+ end
19
+ end
20
+ end
21
+ end
22
+ end
@@ -0,0 +1,20 @@
1
+ # frozen_string_literal: true
2
+
3
+ module LLMChain
4
+ module Interfaces
5
+ module Builders
6
+ # Abstract interface for building tool responses context in LLMChain.
7
+ # Implementations must provide a method to format tool results for the prompt.
8
+ #
9
+ # @abstract
10
+ class ToolResponses
11
+ # Build the tool responses string for the prompt.
12
+ # @param tool_results [Hash] tool name => result
13
+ # @return [String] formatted tool responses
14
+ def build(tool_results)
15
+ raise NotImplementedError, "Implement in subclass"
16
+ end
17
+ end
18
+ end
19
+ end
20
+ end
@@ -0,0 +1,38 @@
1
+ # frozen_string_literal: true
2
+
3
+ module LLMChain
4
+ module Interfaces
5
+ # Abstract interface for memory adapters in LLMChain.
6
+ # Implementations must provide methods for storing and recalling conversation history.
7
+ #
8
+ # @abstract
9
+ class Memory
10
+ # Store a prompt/response pair in memory.
11
+ # @param prompt [String]
12
+ # @param response [String]
13
+ # @return [void]
14
+ def store(prompt, response)
15
+ raise NotImplementedError, "Implement in subclass"
16
+ end
17
+
18
+ # Recall conversation history (optionally filtered by prompt).
19
+ # @param prompt [String, nil]
20
+ # @return [Array<Hash>] [{ prompt: ..., response: ... }, ...]
21
+ def recall(prompt = nil)
22
+ raise NotImplementedError, "Implement in subclass"
23
+ end
24
+
25
+ # Clear all memory.
26
+ # @return [void]
27
+ def clear
28
+ raise NotImplementedError, "Implement in subclass"
29
+ end
30
+
31
+ # Return number of stored items.
32
+ # @return [Integer]
33
+ def size
34
+ raise NotImplementedError, "Implement in subclass"
35
+ end
36
+ end
37
+ end
38
+ end
@@ -0,0 +1,87 @@
1
+ # frozen_string_literal: true
2
+
3
+ module LLMChain
4
+ module Interfaces
5
+ # Abstract interface for tool management in LLMChain.
6
+ # Implementations must provide methods for registering, finding, and executing tools.
7
+ #
8
+ # @abstract
9
+ class ToolManager
10
+ # Register a new tool instance.
11
+ # @param tool [LLMChain::Tools::Base]
12
+ # @return [void]
13
+ def register_tool(tool)
14
+ raise NotImplementedError, "Implement in subclass"
15
+ end
16
+
17
+ # Unregister a tool by name.
18
+ # @param name [String]
19
+ # @return [void]
20
+ def unregister_tool(name)
21
+ raise NotImplementedError, "Implement in subclass"
22
+ end
23
+
24
+ # Fetch a tool by its name.
25
+ # @param name [String]
26
+ # @return [LLMChain::Tools::Base, nil]
27
+ def get_tool(name)
28
+ raise NotImplementedError, "Implement in subclass"
29
+ end
30
+
31
+ # List all registered tools.
32
+ # @return [Array<LLMChain::Tools::Base>]
33
+ def list_tools
34
+ raise NotImplementedError, "Implement in subclass"
35
+ end
36
+
37
+ # Find tools whose #match? returns true for the prompt.
38
+ # @param prompt [String]
39
+ # @return [Array<LLMChain::Tools::Base>]
40
+ def find_matching_tools(prompt)
41
+ raise NotImplementedError, "Implement in subclass"
42
+ end
43
+
44
+ # Execute every matching tool and collect results.
45
+ # @param prompt [String]
46
+ # @param context [Hash]
47
+ # @return [Hash] mapping tool name → result hash
48
+ def execute_tools(prompt, context: {})
49
+ raise NotImplementedError, "Implement in subclass"
50
+ end
51
+
52
+ # Format tool execution results for inclusion into an LLM prompt.
53
+ # @param results [Hash]
54
+ # @return [String]
55
+ def format_tool_results(results)
56
+ raise NotImplementedError, "Implement in subclass"
57
+ end
58
+
59
+ # Human-readable list of available tools.
60
+ # @return [String]
61
+ def tools_description
62
+ raise NotImplementedError, "Implement in subclass"
63
+ end
64
+
65
+ # Determine if prompt likely needs tool usage.
66
+ # @param prompt [String]
67
+ # @return [Boolean]
68
+ def needs_tools?(prompt)
69
+ raise NotImplementedError, "Implement in subclass"
70
+ end
71
+
72
+ # Auto-select and execute best tools for prompt.
73
+ # @param prompt [String]
74
+ # @param context [Hash]
75
+ # @return [Hash]
76
+ def auto_execute(prompt, context: {})
77
+ raise NotImplementedError, "Implement in subclass"
78
+ end
79
+
80
+ # Build JSON schemas for all registered tools.
81
+ # @return [Array<Hash>]
82
+ def get_tools_schema
83
+ raise NotImplementedError, "Implement in subclass"
84
+ end
85
+ end
86
+ end
87
+ end
@@ -1,24 +1,41 @@
1
+ # frozen_string_literal: true
2
+
3
+ require_relative '../interfaces/memory'
4
+
1
5
  module LLMChain
2
6
  module Memory
3
- class Array
7
+ # In-memory array-based memory adapter for LLMChain.
8
+ # Stores conversation history in a simple Ruby array.
9
+ class Array < Interfaces::Memory
4
10
  def initialize(max_size: 10)
5
11
  @storage = []
6
12
  @max_size = max_size
7
13
  end
8
14
 
15
+ # Store a prompt/response pair in memory.
16
+ # @param prompt [String]
17
+ # @param response [String]
18
+ # @return [void]
9
19
  def store(prompt, response)
10
20
  @storage << { prompt: prompt, response: response }
11
21
  @storage.shift if @storage.size > @max_size
12
22
  end
13
23
 
24
+ # Recall conversation history (optionally filtered by prompt).
25
+ # @param prompt [String, nil]
26
+ # @return [Array<Hash>]
14
27
  def recall(_ = nil)
15
28
  @storage.dup
16
29
  end
17
30
 
31
+ # Clear all memory.
32
+ # @return [void]
18
33
  def clear
19
34
  @storage.clear
20
35
  end
21
36
 
37
+ # Return number of stored items.
38
+ # @return [Integer]
22
39
  def size
23
40
  @storage.size
24
41
  end
@@ -1,9 +1,15 @@
1
+ # frozen_string_literal: true
2
+
3
+ require_relative '../interfaces/memory'
4
+
1
5
  require 'redis'
2
6
  require 'json'
3
7
 
4
8
  module LLMChain
5
9
  module Memory
6
- class Redis
10
+ # Redis-based memory adapter for LLMChain.
11
+ # Stores conversation history in a Redis list.
12
+ class Redis < Interfaces::Memory
7
13
  class Error < StandardError; end
8
14
 
9
15
  def initialize(max_size: 10, redis_url: nil, namespace: 'llm_chain')
@@ -13,27 +19,38 @@ module LLMChain
13
19
  @session_key = "#{namespace}:session"
14
20
  end
15
21
 
22
+ # Store a prompt/response pair in memory.
23
+ # @param prompt [String]
24
+ # @param response [String]
25
+ # @return [void]
16
26
  def store(prompt, response)
17
27
  entry = { prompt: prompt, response: response, timestamp: Time.now.to_i }.to_json
18
28
  @redis.multi do
19
29
  @redis.rpush(@session_key, entry)
20
- @redis.ltrim(@session_key, -@max_size, -1) # Сохраняем только последние max_size записей
30
+ @redis.ltrim(@session_key, -@max_size, -1)
21
31
  end
22
32
  end
23
33
 
34
+ # Recall conversation history (optionally filtered by prompt).
35
+ # @param prompt [String, nil]
36
+ # @return [Array<Hash>]
24
37
  def recall(_ = nil)
25
38
  entries = @redis.lrange(@session_key, 0, -1)
26
39
  entries.map { |e| symbolize_keys(JSON.parse(e)) }
27
40
  rescue JSON::ParserError
28
41
  []
29
42
  rescue ::Redis::CannotConnectError
30
- raise MemoryError, "Cannot connect to Redis server"
43
+ raise Error, "Cannot connect to Redis server"
31
44
  end
32
45
 
46
+ # Clear all memory.
47
+ # @return [void]
33
48
  def clear
34
49
  @redis.del(@session_key)
35
50
  end
36
51
 
52
+ # Return number of stored items.
53
+ # @return [Integer]
37
54
  def size
38
55
  @redis.llen(@session_key)
39
56
  end
@@ -0,0 +1,73 @@
1
+ # frozen_string_literal: true
2
+
3
+ module LLMChain
4
+ # System diagnostics utility for checking LLMChain environment
5
+ class SystemDiagnostics
6
+ DIAGNOSTICS_HEADER = "🔍 LLMChain System Diagnostics"
7
+ SEPARATOR = "=" * 50
8
+
9
+ def self.run
10
+ new.run
11
+ end
12
+
13
+ def run
14
+ puts_header
15
+ results = ConfigurationValidator.validate_environment
16
+ display_results(results)
17
+ display_recommendations(results)
18
+ puts_footer
19
+ results
20
+ end
21
+
22
+ private
23
+
24
+ def puts_header
25
+ puts DIAGNOSTICS_HEADER
26
+ puts SEPARATOR
27
+ end
28
+
29
+ def puts_footer
30
+ puts SEPARATOR
31
+ end
32
+
33
+ def display_results(results)
34
+ display_system_components(results)
35
+ display_api_keys(results)
36
+ display_warnings(results)
37
+ end
38
+
39
+ def display_system_components(results)
40
+ puts "\n📋 System Components:"
41
+ puts " Ruby: #{status_icon(results[:ruby])} (#{RUBY_VERSION})"
42
+ puts " Python: #{status_icon(results[:python])}"
43
+ puts " Node.js: #{status_icon(results[:node])}"
44
+ puts " Internet: #{status_icon(results[:internet])}"
45
+ puts " Ollama: #{status_icon(results[:ollama])}"
46
+ end
47
+
48
+ def display_api_keys(results)
49
+ puts "\n🔑 API Keys:"
50
+ results[:apis].each do |api, available|
51
+ puts " #{api.to_s.capitalize}: #{status_icon(available)}"
52
+ end
53
+ end
54
+
55
+ def display_warnings(results)
56
+ return unless results[:warnings].any?
57
+
58
+ puts "\n⚠️ Warnings:"
59
+ results[:warnings].each { |warning| puts " • #{warning}" }
60
+ end
61
+
62
+ def display_recommendations(results)
63
+ puts "\n💡 Recommendations:"
64
+ puts " • Install missing components for full functionality"
65
+ puts " • Configure API keys for enhanced features"
66
+ puts " • Start Ollama server: ollama serve" unless results[:ollama]
67
+ end
68
+
69
+ def status_icon(status)
70
+ status ? '✅' : '❌'
71
+ end
72
+ end
73
+ end
@@ -0,0 +1,103 @@
1
+ # frozen_string_literal: true
2
+ module LLMChain
3
+ module Tools
4
+ # Base class for all LLMChain tools.
5
+ #
6
+ # Subclasses must implement:
7
+ # * {#match?} – decide whether the tool should run for a given prompt.
8
+ # * {#call} – perform the work and return result (`String` or `Hash`).
9
+ #
10
+ # Optional overrides: {#extract_parameters}, {#format_result}.
11
+ #
12
+ # @abstract
13
+ class Base
14
+ attr_reader :name, :description, :parameters
15
+
16
+ # @param name [String]
17
+ # @param description [String]
18
+ # @param parameters [Hash]
19
+ def initialize(name:, description:, parameters: {})
20
+ @name = name
21
+ @description = description
22
+ @parameters = parameters
23
+ end
24
+
25
+ # Check whether this tool matches the given prompt.
26
+ # @param prompt [String]
27
+ # @return [Boolean]
28
+ def match?(prompt)
29
+ raise NotImplementedError, "Subclasses must implement #match?"
30
+ end
31
+
32
+ # Perform the tool action.
33
+ # @param prompt [String]
34
+ # @param context [Hash]
35
+ # @return [String, Hash]
36
+ def call(prompt, context: {})
37
+ raise NotImplementedError, "Subclasses must implement #call"
38
+ end
39
+
40
+ # Build a JSON schema describing the tool interface for LLMs.
41
+ # @return [Hash]
42
+ def to_schema
43
+ {
44
+ name: @name,
45
+ description: @description,
46
+ parameters: {
47
+ type: "object",
48
+ properties: @parameters,
49
+ required: required_parameters
50
+ }
51
+ }
52
+ end
53
+
54
+ # Extract parameters from prompt if needed.
55
+ # @param prompt [String]
56
+ # @return [Hash]
57
+ def extract_parameters(prompt)
58
+ {}
59
+ end
60
+
61
+ # Format result for inclusion into LLM prompt.
62
+ # @param result [Object]
63
+ # @return [String]
64
+ def format_result(result)
65
+ case result
66
+ when String then result
67
+ when Hash, Array then JSON.pretty_generate(result)
68
+ else result.to_s
69
+ end
70
+ end
71
+
72
+ protected
73
+
74
+ # List of required parameter names
75
+ # @return [Array<String>]
76
+ def required_parameters
77
+ []
78
+ end
79
+
80
+ # Helper: checks if prompt contains any keyword
81
+ # @param prompt [String]
82
+ # @param keywords [Array<String>]
83
+ # @return [Boolean]
84
+ def contains_keywords?(prompt, keywords)
85
+ keywords.any? { |keyword| prompt.downcase.include?(keyword.downcase) }
86
+ end
87
+
88
+ # Helper: extract numeric values from text
89
+ # @param text [String]
90
+ # @return [Array<Float>]
91
+ def extract_numbers(text)
92
+ text.scan(/-?\d+\.?\d*/).map(&:to_f)
93
+ end
94
+
95
+ # Helper: extract URLs from text
96
+ # @param text [String]
97
+ # @return [Array<String>]
98
+ def extract_urls(text)
99
+ text.scan(%r{https?://[^\s]+})
100
+ end
101
+ end
102
+ end
103
+ end
@@ -1,81 +1,11 @@
1
+ # frozen_string_literal: true
2
+ require_relative 'base'
3
+
1
4
  module LLMChain
2
5
  module Tools
3
- class BaseTool
4
- attr_reader :name, :description, :parameters
5
-
6
- def initialize(name:, description:, parameters: {})
7
- @name = name
8
- @description = description
9
- @parameters = parameters
10
- end
11
-
12
- # Проверяет, подходит ли инструмент для данного промпта
13
- # @param prompt [String] Входной промпт от пользователя
14
- # @return [Boolean] true если инструмент должен быть вызван
15
- def match?(prompt)
16
- raise NotImplementedError, "Subclasses must implement #match?"
17
- end
18
-
19
- # Выполняет инструмент
20
- # @param prompt [String] Входной промпт от пользователя
21
- # @param context [Hash] Дополнительный контекст
22
- # @return [String, Hash] Результат выполнения инструмента
23
- def call(prompt, context: {})
24
- raise NotImplementedError, "Subclasses must implement #call"
25
- end
26
-
27
- # Возвращает JSON-схему для LLM
28
- def to_schema
29
- {
30
- name: @name,
31
- description: @description,
32
- parameters: {
33
- type: "object",
34
- properties: @parameters,
35
- required: required_parameters
36
- }
37
- }
38
- end
39
-
40
- # Извлекает параметры из промпта (для автоматического парсинга)
41
- # @param prompt [String] Входной промпт
42
- # @return [Hash] Извлеченные параметры
43
- def extract_parameters(prompt)
44
- {}
45
- end
46
-
47
- # Форматирует результат для включения в промпт
48
- # @param result [Object] Результат выполнения инструмента
49
- # @return [String] Форматированный результат
50
- def format_result(result)
51
- case result
52
- when String then result
53
- when Hash, Array then JSON.pretty_generate(result)
54
- else result.to_s
55
- end
56
- end
57
-
58
- protected
59
-
60
- # Список обязательных параметров
61
- def required_parameters
62
- []
63
- end
64
-
65
- # Помощник для проверки ключевых слов в промпте
66
- def contains_keywords?(prompt, keywords)
67
- keywords.any? { |keyword| prompt.downcase.include?(keyword.downcase) }
68
- end
69
-
70
- # Помощник для извлечения числовых значений
71
- def extract_numbers(text)
72
- text.scan(/-?\d+\.?\d*/).map(&:to_f)
73
- end
74
-
75
- # Помощник для извлечения URL
76
- def extract_urls(text)
77
- text.scan(%r{https?://[^\s]+})
78
- end
6
+ # @deprecated Use {LLMChain::Tools::Base}. Will be removed in 0.7.0.
7
+ class BaseTool < Base
8
+ # Empty shim for backward compatibility
79
9
  end
80
10
  end
81
11
  end