llm_chain 0.5.3 → 0.5.4

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
checksums.yaml CHANGED
@@ -1,7 +1,7 @@
1
1
  ---
2
2
  SHA256:
3
- metadata.gz: 8c94520a98ace3d2bb61ca2b470af041e0f95d6818120ce01dfc5a9cfe76316b
4
- data.tar.gz: 5155099cdf777ecaed3a30c9da1e7e200476258369ef3b976967888daf001876
3
+ metadata.gz: f7c55f5d0965bd544f4f3eb647e85127207f41d257803023d4a5df854faa3b7b
4
+ data.tar.gz: 988b46a9300325e3ab6c4bf94ea07c016c79d308b7c8c0b6bde7a12ec8fcbdca
5
5
  SHA512:
6
- metadata.gz: b37c6ed16c6b6a66d1f9c6e2a62717e576c5211aba605b98b97e5ba54b436d9a38602aabcedf34a23963ee034d6332dadefe03fa6c6a5d4d3f4941b2df9efd88
7
- data.tar.gz: 4e5c3b6921e4f0656ea8266ca67a00dbcbc8c11736573df9172b8cb2d8821d3799f87085f31e7975d916d56c29f10a4637c6df5f102520fc2da3f5284143ef79
6
+ metadata.gz: 1f59c7598a9a84d10bde4100aba65c7d55d5a986e0d7bfe2af71a08d83ae5ea689acc4ed22349377b023a7e23ba85facb151b6f70da3d9bd6549ddb8d7552f40
7
+ data.tar.gz: dc672ea465a8d0f91591b00b3f75ff0c95319e2eee54cad0709976b254f9173bd701c607e2fbe7025fb01e044fbe20d6f9f0998c61c80da7128b1c0daeea6ed8
data/CHANGELOG.md CHANGED
@@ -7,6 +7,18 @@ and this project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0
7
7
 
8
8
  ## [Unreleased]
9
9
 
10
+ ## [0.5.4] - 2025-01-17
11
+
12
+ ### Added
13
+ * **Deepseek-Coder-V2 Client** - Support for Deepseek-Coder-V2 models via Ollama
14
+ * Available variants: `deepseek-coder-v2:latest`, `deepseek-coder-v2:16b`, `deepseek-coder-v2:236b`
15
+ * Optimized settings for code generation tasks (low temperature, large context)
16
+ * Integrated with existing tool ecosystem (CodeInterpreter, WebSearch, Calculator)
17
+ * Full compatibility with Chain, ClientRegistry, and CLI
18
+
19
+ ### Changed
20
+ * Updated model support table in README with Deepseek-Coder-V2 information
21
+
10
22
  ## [0.5.3] - 2025-07-05
11
23
 
12
24
  ### Added
@@ -83,7 +95,8 @@ and this project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0
83
95
  ### Changed
84
96
  - Initial stable release with core functionality
85
97
 
86
- [Unreleased]: https://github.com/FuryCow/llm_chain/compare/v0.5.3...HEAD
98
+ [Unreleased]: https://github.com/FuryCow/llm_chain/compare/v0.5.4...HEAD
99
+ [0.5.4]: https://github.com/FuryCow/llm_chain/compare/v0.5.3...v0.5.4
87
100
  [0.5.3]: https://github.com/FuryCow/llm_chain/compare/v0.5.2...v0.5.3
88
101
  [0.5.2]: https://github.com/FuryCow/llm_chain/compare/v0.5.1...v0.5.2
89
102
  [0.5.1]: https://github.com/FuryCow/llm_chain/compare/v0.5.0...v0.5.1
data/README.md CHANGED
@@ -353,6 +353,7 @@ tool_manager.register_tool(weather)
353
353
  | **Qwen/Qwen2** | Ollama | ✅ Supported | 0.5B - 72B parameters |
354
354
  | **LLaMA2/3** | Ollama | ✅ Supported | 7B, 13B, 70B |
355
355
  | **Gemma** | Ollama | ✅ Supported | 2B, 7B, 9B, 27B |
356
+ | **Deepseek-Coder-V2** | Ollama | ✅ Supported | 16B, 236B - Code specialist |
356
357
  | **Mistral/Mixtral** | Ollama | 🔄 In development | 7B, 8x7B |
357
358
  | **Claude** | Anthropic | 🔄 Planned | Haiku, Sonnet, Opus |
358
359
  | **Command R+** | Cohere | 🔄 Planned | Optimized for RAG |
@@ -375,6 +376,13 @@ llama_chain = LLMChain::Chain.new(
375
376
  temperature: 0.8,
376
377
  top_p: 0.95
377
378
  )
379
+
380
+ # Deepseek-Coder-V2 for code tasks
381
+ deepseek_chain = LLMChain::Chain.new(model: "deepseek-coder-v2:16b")
382
+
383
+ # Direct client usage
384
+ deepseek_client = LLMChain::Clients::DeepseekCoderV2.new(model: "deepseek-coder-v2:16b")
385
+ response = deepseek_client.chat("Create a Ruby method to sort an array")
378
386
  ```
379
387
 
380
388
  ## 💾 Memory System
data/exe/llm-chain CHANGED
@@ -2,15 +2,18 @@
2
2
  # frozen_string_literal: true
3
3
 
4
4
  # Load Bundler only if running from the development repo (Gemfile present)
5
- if File.exist?(File.expand_path("../../Gemfile", __dir__))
5
+ if File.exist?(File.expand_path("../Gemfile", __dir__))
6
+ # In development mode, always load local version
7
+ require_relative "../lib/llm_chain"
6
8
  begin
7
9
  require "bundler/setup"
8
10
  rescue LoadError
9
11
  warn "[llm-chain] Bundler not available; continuing without it"
10
12
  end
13
+ else
14
+ # In production mode, load installed gem
15
+ require "llm_chain"
11
16
  end
12
-
13
- require "llm_chain"
14
17
  require "optparse"
15
18
  require "readline"
16
19
 
@@ -1,15 +1,25 @@
1
1
  require 'json'
2
2
 
3
3
  module LLMChain
4
+ # High-level interface that ties together an LLM client, optional memory,
5
+ # tool system and RAG retriever. Use {LLMChain.quick_chain} for the common
6
+ # defaults or build manually via this class.
4
7
  class Chain
8
+ # @return [String] selected model identifier
9
+ # @return [Object] memory backend
10
+ # @return [Array, Tools::ToolManager, nil] tools collection
11
+ # @return [Object, nil] RAG retriever
5
12
  attr_reader :model, :memory, :tools, :retriever
6
13
 
7
- # @param model [String] Имя модели (gpt-4, llama3 и т.д.)
8
- # @param memory [#recall, #store] Объект памяти
9
- # @param tools [Array<Tool>] Массив инструментов
10
- # @param retriever [#search] RAG-ретривер (Weaviate, Pinecone и т.д.)
11
- # @param client_options [Hash] Опции для клиента LLM
12
- def initialize(model: nil, memory: nil, tools: [], retriever: nil, validate_config: true, **client_options)
14
+ # Create a new chain.
15
+ #
16
+ # @param model [String] model name, e.g. "gpt-4" or "qwen3:1.7b"
17
+ # @param memory [#recall, #store, nil] conversation memory backend
18
+ # @param tools [Array<Tools::Base>, Tools::ToolManager, nil]
19
+ # @param retriever [#search, false, nil] document retriever for RAG
20
+ # @param validate_config [Boolean] run {ConfigurationValidator}
21
+ # @param client_options [Hash] extra LLM-client options (api_key etc.)
22
+ def initialize(model: nil, memory: nil, tools: [], retriever: false, validate_config: true, **client_options)
13
23
  # Валидация конфигурации (можно отключить через validate_config: false)
14
24
  if validate_config
15
25
  begin
@@ -38,12 +48,14 @@ module LLMChain
38
48
  @client = ClientRegistry.client_for(model, **client_options)
39
49
  end
40
50
 
41
- # Основной метод для взаимодействия с цепочкой
42
- # @param prompt [String] Входной промпт
43
- # @param stream [Boolean] Использовать ли потоковый вывод
44
- # @param rag_context [Boolean] Использовать ли RAG-контекст
45
- # @param rag_options [Hash] Опции для RAG-поиска
46
- # @yield [String] Передает чанки ответа если stream=true
51
+ # Main inference entrypoint.
52
+ #
53
+ # @param prompt [String] user prompt
54
+ # @param stream [Boolean] if `true` yields chunks and returns full string
55
+ # @param rag_context [Boolean] whether to include retriever context
56
+ # @param rag_options [Hash] options passed to retriever (eg. :limit)
57
+ # @yield [String] chunk — called when `stream` is true
58
+ # @return [String] assistant response
47
59
  def ask(prompt, stream: false, rag_context: false, rag_options: {}, &block)
48
60
  context = collect_context(prompt, rag_context, rag_options)
49
61
  full_prompt = build_prompt(prompt: prompt, **context)
@@ -52,10 +64,12 @@ module LLMChain
52
64
  response
53
65
  end
54
66
 
67
+ # Collect memory, tool results and RAG docs for current request.
68
+ # @api private
55
69
  def collect_context(prompt, rag_context, rag_options)
56
- context = memory.recall(prompt)
70
+ context = memory.recall(prompt)
57
71
  tool_responses = process_tools(prompt)
58
- rag_documents = retrieve_rag_context(prompt, rag_options) if rag_context
72
+ rag_documents = retrieve_rag_context(prompt, rag_options) if rag_context
59
73
  { memory_context: context, tool_responses: tool_responses, rag_documents: rag_documents }
60
74
  end
61
75
 
@@ -16,6 +16,8 @@ module LLMChain
16
16
  Clients::Llama2
17
17
  when /gemma3/
18
18
  Clients::Gemma3
19
+ when /deepseek-coder-v2/
20
+ Clients::DeepseekCoderV2
19
21
  else
20
22
  raise UnknownModelError, "Unknown model: #{model}"
21
23
  end
@@ -1,15 +1,37 @@
1
1
  module LLMChain
2
2
  module Clients
3
+ # Abstract base class for an LLM client adapter.
4
+ #
5
+ # Concrete clients **must** implement two methods:
6
+ # * `#chat(prompt, **options)` – single-shot request
7
+ # * `#stream_chat(prompt, **options)` – streaming request yielding chunks
8
+ #
9
+ # Constructor should accept `model:` plus any client-specific options
10
+ # (`api_key`, `base_url`, …).
11
+ #
12
+ # @abstract
3
13
  class Base
14
+ # @param model [String]
4
15
  def initialize(model)
5
16
  @model = model
6
17
  end
7
18
 
8
- def chat(_prompt)
19
+ # Send a non-streaming chat request.
20
+ #
21
+ # @param prompt [String]
22
+ # @param options [Hash]
23
+ # @return [String] assistant response
24
+ def chat(prompt, **options)
9
25
  raise NotImplementedError
10
26
  end
11
27
 
12
- def stream_chat(_prompt)
28
+ # Send a streaming chat request.
29
+ #
30
+ # @param prompt [String]
31
+ # @param options [Hash]
32
+ # @yieldparam chunk [String] partial response chunk
33
+ # @return [String] full concatenated response
34
+ def stream_chat(prompt, **options, &block)
13
35
  raise NotImplementedError
14
36
  end
15
37
  end
@@ -0,0 +1,32 @@
1
+ module LLMChain
2
+ module Clients
3
+ # Deepseek-Coder-V2 client for Ollama
4
+ #
5
+ # An open-source Mixture-of-Experts (MoE) code language model that achieves
6
+ # performance comparable to GPT4-Turbo in code-specific tasks.
7
+ #
8
+ # @example Using default model
9
+ # client = LLMChain::Clients::DeepseekCoderV2.new
10
+ # response = client.chat("Write a Python function to sort a list")
11
+ #
12
+ # @example Using specific model variant
13
+ # client = LLMChain::Clients::DeepseekCoderV2.new(model: "deepseek-coder-v2:16b")
14
+ # response = client.chat("Explain this algorithm")
15
+ #
16
+ class DeepseekCoderV2 < OllamaBase
17
+ DEFAULT_MODEL = "deepseek-coder-v2:latest".freeze
18
+
19
+ # Optimized settings for code generation tasks
20
+ DEFAULT_OPTIONS = {
21
+ temperature: 0.1, # Lower temperature for more precise code
22
+ top_p: 0.95, # High top_p for diverse but relevant responses
23
+ num_ctx: 8192, # Large context for complex code analysis
24
+ stop: ["User:", "Assistant:"] # Stop tokens for chat format
25
+ }.freeze
26
+
27
+ def initialize(model: DEFAULT_MODEL, base_url: nil, **options)
28
+ super(model: model, base_url: base_url, default_options: DEFAULT_OPTIONS.merge(options))
29
+ end
30
+ end
31
+ end
32
+ end
@@ -57,7 +57,7 @@ module LLMChain
57
57
  case model.to_s
58
58
  when /^gpt/
59
59
  validate_openai_requirements!(model)
60
- when /qwen|llama|gemma/
60
+ when /qwen|llama|gemma|deepseek-coder-v2/
61
61
  validate_ollama_requirements!(model)
62
62
  else
63
63
  add_warning("Unknown model type: #{model}. Proceeding with default settings.")
@@ -0,0 +1,103 @@
1
+ # frozen_string_literal: true
2
+ module LLMChain
3
+ module Tools
4
+ # Base class for all LLMChain tools.
5
+ #
6
+ # Subclasses must implement:
7
+ # * {#match?} – decide whether the tool should run for a given prompt.
8
+ # * {#call} – perform the work and return result (`String` or `Hash`).
9
+ #
10
+ # Optional overrides: {#extract_parameters}, {#format_result}.
11
+ #
12
+ # @abstract
13
+ class Base
14
+ attr_reader :name, :description, :parameters
15
+
16
+ # @param name [String]
17
+ # @param description [String]
18
+ # @param parameters [Hash]
19
+ def initialize(name:, description:, parameters: {})
20
+ @name = name
21
+ @description = description
22
+ @parameters = parameters
23
+ end
24
+
25
+ # Check whether this tool matches the given prompt.
26
+ # @param prompt [String]
27
+ # @return [Boolean]
28
+ def match?(prompt)
29
+ raise NotImplementedError, "Subclasses must implement #match?"
30
+ end
31
+
32
+ # Perform the tool action.
33
+ # @param prompt [String]
34
+ # @param context [Hash]
35
+ # @return [String, Hash]
36
+ def call(prompt, context: {})
37
+ raise NotImplementedError, "Subclasses must implement #call"
38
+ end
39
+
40
+ # Build a JSON schema describing the tool interface for LLMs.
41
+ # @return [Hash]
42
+ def to_schema
43
+ {
44
+ name: @name,
45
+ description: @description,
46
+ parameters: {
47
+ type: "object",
48
+ properties: @parameters,
49
+ required: required_parameters
50
+ }
51
+ }
52
+ end
53
+
54
+ # Extract parameters from prompt if needed.
55
+ # @param prompt [String]
56
+ # @return [Hash]
57
+ def extract_parameters(prompt)
58
+ {}
59
+ end
60
+
61
+ # Format result for inclusion into LLM prompt.
62
+ # @param result [Object]
63
+ # @return [String]
64
+ def format_result(result)
65
+ case result
66
+ when String then result
67
+ when Hash, Array then JSON.pretty_generate(result)
68
+ else result.to_s
69
+ end
70
+ end
71
+
72
+ protected
73
+
74
+ # List of required parameter names
75
+ # @return [Array<String>]
76
+ def required_parameters
77
+ []
78
+ end
79
+
80
+ # Helper: checks if prompt contains any keyword
81
+ # @param prompt [String]
82
+ # @param keywords [Array<String>]
83
+ # @return [Boolean]
84
+ def contains_keywords?(prompt, keywords)
85
+ keywords.any? { |keyword| prompt.downcase.include?(keyword.downcase) }
86
+ end
87
+
88
+ # Helper: extract numeric values from text
89
+ # @param text [String]
90
+ # @return [Array<Float>]
91
+ def extract_numbers(text)
92
+ text.scan(/-?\d+\.?\d*/).map(&:to_f)
93
+ end
94
+
95
+ # Helper: extract URLs from text
96
+ # @param text [String]
97
+ # @return [Array<String>]
98
+ def extract_urls(text)
99
+ text.scan(%r{https?://[^\s]+})
100
+ end
101
+ end
102
+ end
103
+ end
@@ -1,81 +1,11 @@
1
+ # frozen_string_literal: true
2
+ require_relative 'base'
3
+
1
4
  module LLMChain
2
5
  module Tools
3
- class BaseTool
4
- attr_reader :name, :description, :parameters
5
-
6
- def initialize(name:, description:, parameters: {})
7
- @name = name
8
- @description = description
9
- @parameters = parameters
10
- end
11
-
12
- # Проверяет, подходит ли инструмент для данного промпта
13
- # @param prompt [String] Входной промпт от пользователя
14
- # @return [Boolean] true если инструмент должен быть вызван
15
- def match?(prompt)
16
- raise NotImplementedError, "Subclasses must implement #match?"
17
- end
18
-
19
- # Выполняет инструмент
20
- # @param prompt [String] Входной промпт от пользователя
21
- # @param context [Hash] Дополнительный контекст
22
- # @return [String, Hash] Результат выполнения инструмента
23
- def call(prompt, context: {})
24
- raise NotImplementedError, "Subclasses must implement #call"
25
- end
26
-
27
- # Возвращает JSON-схему для LLM
28
- def to_schema
29
- {
30
- name: @name,
31
- description: @description,
32
- parameters: {
33
- type: "object",
34
- properties: @parameters,
35
- required: required_parameters
36
- }
37
- }
38
- end
39
-
40
- # Извлекает параметры из промпта (для автоматического парсинга)
41
- # @param prompt [String] Входной промпт
42
- # @return [Hash] Извлеченные параметры
43
- def extract_parameters(prompt)
44
- {}
45
- end
46
-
47
- # Форматирует результат для включения в промпт
48
- # @param result [Object] Результат выполнения инструмента
49
- # @return [String] Форматированный результат
50
- def format_result(result)
51
- case result
52
- when String then result
53
- when Hash, Array then JSON.pretty_generate(result)
54
- else result.to_s
55
- end
56
- end
57
-
58
- protected
59
-
60
- # Список обязательных параметров
61
- def required_parameters
62
- []
63
- end
64
-
65
- # Помощник для проверки ключевых слов в промпте
66
- def contains_keywords?(prompt, keywords)
67
- keywords.any? { |keyword| prompt.downcase.include?(keyword.downcase) }
68
- end
69
-
70
- # Помощник для извлечения числовых значений
71
- def extract_numbers(text)
72
- text.scan(/-?\d+\.?\d*/).map(&:to_f)
73
- end
74
-
75
- # Помощник для извлечения URL
76
- def extract_urls(text)
77
- text.scan(%r{https?://[^\s]+})
78
- end
6
+ # @deprecated Use {LLMChain::Tools::Base}. Will be removed in 0.7.0.
7
+ class BaseTool < Base
8
+ # Empty shim for backward compatibility
79
9
  end
80
10
  end
81
11
  end
@@ -2,7 +2,7 @@ require 'bigdecimal'
2
2
 
3
3
  module LLMChain
4
4
  module Tools
5
- class Calculator < BaseTool
5
+ class Calculator < Base
6
6
  KEYWORDS = %w[
7
7
  calculate compute math equation formula
8
8
  add subtract multiply divide
@@ -3,7 +3,7 @@ require 'timeout'
3
3
 
4
4
  module LLMChain
5
5
  module Tools
6
- class CodeInterpreter < BaseTool
6
+ class CodeInterpreter < Base
7
7
  KEYWORDS = %w[
8
8
  code run execute script program
9
9
  ruby python javascript
@@ -108,26 +108,26 @@ module LLMChain
108
108
  end
109
109
 
110
110
  def extract_code(prompt)
111
- # Нормализуем line endings
111
+ # Normalize line endings (CRLF -> LF)
112
112
  normalized_prompt = normalize_line_endings(prompt)
113
113
 
114
- # 1. Пробуем различные паттерны markdown блоков
114
+ # 1. Try various markdown block patterns
115
115
  code = extract_markdown_code_blocks(normalized_prompt)
116
116
  return clean_code(code) if code && !code.empty?
117
117
 
118
- # 2. Ищем код после ключевых команд в одной строке
118
+ # 2. Attempt inline "run code:" patterns
119
119
  code = extract_inline_code_commands(normalized_prompt)
120
120
  return clean_code(code) if code && !code.empty?
121
121
 
122
- # 3. Ищем код после ключевых слов в разных строках
122
+ # 3. Look for code after keywords across multiple lines
123
123
  code = extract_multiline_code_blocks(normalized_prompt)
124
124
  return clean_code(code) if code && !code.empty?
125
125
 
126
- # 4. Ищем строки, которые выглядят как код
126
+ # 4. Fallback: detect code-like lines
127
127
  code = extract_code_like_lines(normalized_prompt)
128
128
  return clean_code(code) if code && !code.empty?
129
129
 
130
- # 5. Последняя попытка - весь текст после первого кода
130
+ # 5. Last resort everything after first code-looking line
131
131
  code = extract_fallback_code(normalized_prompt)
132
132
  clean_code(code)
133
133
  end
@@ -139,19 +139,19 @@ module LLMChain
139
139
  end
140
140
 
141
141
  def extract_markdown_code_blocks(prompt)
142
- # Различные паттерны для markdown блоков
142
+ # Pattern list for markdown code blocks
143
143
  patterns = [
144
- # Стандартный markdown с указанием языка
144
+ # Standard fenced block with language tag
145
145
  /```(?:ruby|python|javascript|js)\s*\n(.*?)\n```/mi,
146
- # Markdown без указания языка
146
+ # Fenced block without language tag
147
147
  /```\s*\n(.*?)\n```/mi,
148
- # Markdown с любым языком
148
+ # Fenced block any language
149
149
  /```\w*\s*\n(.*?)\n```/mi,
150
- # Тильды вместо backticks
150
+ # Using ~~~ instead of ```
151
151
  /~~~(?:ruby|python|javascript|js)?\s*\n(.*?)\n~~~/mi,
152
- # Без переносов строк
152
+ # Single-line fenced block
153
153
  /```(?:ruby|python|javascript|js)?(.*?)```/mi,
154
- # Четыре пробела (indented code blocks)
154
+ # Indented code block (4 spaces)
155
155
  /^ (.+)$/m
156
156
  ]
157
157
 
@@ -164,7 +164,7 @@ module LLMChain
164
164
  end
165
165
 
166
166
  def extract_inline_code_commands(prompt)
167
- # Команды в одной строке
167
+ # Inline "run code" commands
168
168
  inline_patterns = [
169
169
  /execute\s+code:\s*(.+)/i,
170
170
  /run\s+code:\s*(.+)/i,
@@ -189,30 +189,30 @@ module LLMChain
189
189
  keyword_line_index = lines.find_index { |line| line.downcase.include?(keyword.downcase) }
190
190
  next unless keyword_line_index
191
191
 
192
- # Берем строки после ключевого слова
192
+ # Take lines after the keyword
193
193
  code_lines = lines[(keyword_line_index + 1)..-1]
194
194
  next unless code_lines
195
195
 
196
- # Найдем первую непустую строку
196
+ # Find the first non-empty line
197
197
  first_code_line = code_lines.find_index { |line| !line.strip.empty? }
198
198
  next unless first_code_line
199
199
 
200
- # Берем все строки начиная с первой непустой
200
+ # Take all lines starting from the first non-empty line
201
201
  relevant_lines = code_lines[first_code_line..-1]
202
202
 
203
- # Определяем отступ первой строки кода
203
+ # Determine indentation of the first code line
204
204
  first_line = relevant_lines.first
205
205
  indent = first_line.match(/^(\s*)/)[1].length
206
206
 
207
- # Собираем все строки с таким же или большим отступом
207
+ # Collect all lines with the same or greater indentation
208
208
  code_block = []
209
209
  relevant_lines.each do |line|
210
210
  if line.strip.empty?
211
- code_block << "" # Сохраняем пустые строки
211
+ code_block << "" # Preserve empty lines
212
212
  elsif line.match(/^(\s*)/)[1].length >= indent
213
213
  code_block << line
214
214
  else
215
- break # Прекращаем при уменьшении отступа
215
+ break # Stop when indentation decreases
216
216
  end
217
217
  end
218
218
 
@@ -229,26 +229,26 @@ module LLMChain
229
229
  stripped = line.strip
230
230
  next false if stripped.empty?
231
231
 
232
- # Проверяем различные паттерны кода
232
+ # Check various code patterns
233
233
  stripped.match?(/^(def|class|function|var|let|const|print|puts|console\.log)/i) ||
234
234
  stripped.match?(/^\w+\s*[=+\-*\/]\s*/) ||
235
235
  stripped.match?(/^\s*(if|for|while|return|import|require)[\s(]/i) ||
236
236
  stripped.match?(/puts\s+/) ||
237
237
  stripped.match?(/print\s*\(/) ||
238
238
  stripped.match?(/^\w+\(.*\)/) ||
239
- stripped.match?(/^\s*#.*/) || # Комментарии
240
- stripped.match?(/^\s*\/\/.*/) || # JS комментарии
241
- stripped.match?(/^\s*\/\*.*\*\//) # Блочные комментарии
239
+ stripped.match?(/^\s*#.*/) || # Comments
240
+ stripped.match?(/^\s*\/\/.*/) || # JS comments
241
+ stripped.match?(/^\s*\/\*.*\*\//) # Block comments
242
242
  end
243
243
 
244
244
  code_lines.join("\n") if code_lines.any?
245
245
  end
246
246
 
247
247
  def extract_fallback_code(prompt)
248
- # Последняя попытка - ищем что-то похожее на код
248
+ # Final attempt look for anything resembling code
249
249
  lines = prompt.split("\n")
250
250
 
251
- # Найдем первую строку, которая выглядит как код
251
+ # Find first line that looks like code
252
252
  start_index = lines.find_index do |line|
253
253
  stripped = line.strip
254
254
  stripped.match?(/^(def|class|function|puts|print|console\.log|var|let|const)/i) ||
@@ -258,14 +258,14 @@ module LLMChain
258
258
 
259
259
  return nil unless start_index
260
260
 
261
- # Берем все строки после найденной
261
+ # Take all subsequent lines
262
262
  code_lines = lines[start_index..-1]
263
263
 
264
- # Останавливаемся на первой строке, которая явно не код
264
+ # Stop when line clearly not code
265
265
  end_index = code_lines.find_index do |line|
266
266
  stripped = line.strip
267
- stripped.match?(/^(что|как|где|когда|зачем|почему|what|how|where|when|why)/i) ||
268
- stripped.length > 100 # Слишком длинная строка
267
+ stripped.match?(/^(что|как|где|когда|зачем|почему|what|how|where|when|why)/i) || # Russian/English question words
268
+ stripped.length > 100 # Too long -> unlikely code
269
269
  end
270
270
 
271
271
  relevant_lines = end_index ? code_lines[0...end_index] : code_lines
@@ -277,16 +277,16 @@ module LLMChain
277
277
 
278
278
  lines = code.strip.lines
279
279
 
280
- # Удаляем только комментарии, которые не являются частью кода
280
+ # Remove pure comment lines, keep inline comments
281
281
  cleaned_lines = lines.reject do |line|
282
282
  stripped = line.strip
283
- # Удаляем только строки, которые содержат ТОЛЬКО комментарии
284
- stripped.match?(/^\s*#[^{]*$/) || # Ruby комментарии (но не интерполяция)
285
- stripped.match?(/^\s*\/\/.*$/) || # JS комментарии
286
- stripped.match?(/^\s*\/\*.*\*\/\s*$/) # Блочные комментарии
283
+ # Remove only lines that contain ONLY comments
284
+ stripped.match?(/^\s*#[^{]*$/) || # Ruby comments (excluding interpolation)
285
+ stripped.match?(/^\s*\/\/.*$/) || # JS comments
286
+ stripped.match?(/^\s*\/\*.*\*\/\s*$/) # Block comments
287
287
  end
288
288
 
289
- # Убираем пустые строки в начале и конце, но сохраняем внутри
289
+ # Remove blank lines at the beginning and end, but keep them inside
290
290
  start_index = cleaned_lines.find_index { |line| !line.strip.empty? }
291
291
  return "" unless start_index
292
292
 
@@ -297,12 +297,12 @@ module LLMChain
297
297
  end
298
298
 
299
299
  def detect_language(code, prompt)
300
- # Явное указание языка
300
+ # Explicit language specification
301
301
  return 'ruby' if prompt.match?(/```ruby/i) || prompt.include?('Ruby')
302
302
  return 'python' if prompt.match?(/```python/i) || prompt.include?('Python')
303
303
  return 'javascript' if prompt.match?(/```(javascript|js)/i) || prompt.include?('JavaScript')
304
304
 
305
- # Определение по синтаксису
305
+ # Determine by syntax
306
306
  return 'ruby' if code.include?('puts') || code.include?('def ') || code.match?(/\bend\b/)
307
307
  return 'python' if code.include?('print(') || code.match?(/def \w+\(.*\):/) || code.include?('import ')
308
308
  return 'javascript' if code.include?('console.log') || code.include?('function ') || code.include?('var ') || code.include?('let ')
@@ -329,12 +329,12 @@ module LLMChain
329
329
 
330
330
  def execute_ruby(code)
331
331
  Timeout.timeout(@timeout) do
332
- # Создаем временный файл
332
+ # Create a temporary file
333
333
  Tempfile.create(['code', '.rb']) do |file|
334
334
  file.write(code)
335
335
  file.flush
336
336
 
337
- # Выполняем код в отдельном процессе
337
+ # Execute code in a separate process
338
338
  result = `ruby #{file.path} 2>&1`
339
339
 
340
340
  if $?.success?
@@ -369,7 +369,7 @@ module LLMChain
369
369
  file.write(code)
370
370
  file.flush
371
371
 
372
- # Пробуем node.js
372
+ # Try node.js
373
373
  result = `node #{file.path} 2>&1`
374
374
 
375
375
  if $?.success?
@@ -0,0 +1,58 @@
1
+ # frozen_string_literal: true
2
+ require 'time'
3
+
4
+ module LLMChain
5
+ module Tools
6
+ # Simple tool that returns current date and time.
7
+ class DateTime < Base
8
+ KEYWORDS = %w[time date today now current].freeze
9
+
10
+ def initialize
11
+ super(
12
+ name: "date_time",
13
+ description: "Returns current date and time (optionally for given timezone)",
14
+ parameters: {
15
+ timezone: {
16
+ type: "string",
17
+ description: "IANA timezone name, e.g. 'Europe/Moscow'. Defaults to system TZ"
18
+ }
19
+ }
20
+ )
21
+ end
22
+
23
+ # @param prompt [String]
24
+ # @return [Boolean]
25
+ def match?(prompt)
26
+ contains_keywords?(prompt, KEYWORDS)
27
+ end
28
+
29
+ # @param prompt [String]
30
+ # @param context [Hash]
31
+ def call(prompt, context: {})
32
+ params = extract_parameters(prompt)
33
+ tz = params[:timezone]
34
+ time = tz ? Time.now.getlocal(timezone_offset(tz)) : Time.now
35
+ {
36
+ timezone: tz || Time.now.zone,
37
+ iso: time.iso8601,
38
+ formatted: time.strftime("%Y-%m-%d %H:%M:%S %Z")
39
+ }
40
+ end
41
+
42
+ def extract_parameters(prompt)
43
+ tz_match = prompt.match(/in\s+([A-Za-z_\/]+)/)
44
+ { timezone: tz_match && tz_match[1] }
45
+ end
46
+
47
+ private
48
+
49
+ def timezone_offset(tz)
50
+ # Fallback: use TZInfo if available, else default to system
51
+ require 'tzinfo'
52
+ TZInfo::Timezone.get(tz).current_period.offset
53
+ rescue LoadError, TZInfo::InvalidTimezoneIdentifier
54
+ 0
55
+ end
56
+ end
57
+ end
58
+ end
@@ -8,40 +8,47 @@ module LLMChain
8
8
  tools.each { |tool| register_tool(tool) }
9
9
  end
10
10
 
11
- # Регистрирует новый инструмент
11
+ # Register a new tool instance.
12
+ #
13
+ # @param tool [LLMChain::Tools::Base]
14
+ # @raise [ArgumentError] if object does not inherit from Tools::Base
12
15
  def register_tool(tool)
13
- unless tool.is_a?(BaseTool)
14
- raise ArgumentError, "Tool must inherit from BaseTool"
16
+ unless tool.is_a?(Base)
17
+ raise ArgumentError, "Tool must inherit from LLMChain::Tools::Base"
15
18
  end
16
19
  @tools[tool.name] = tool
17
20
  end
18
21
 
19
- # Удаляет инструмент
22
+ # Unregister a tool by name.
20
23
  def unregister_tool(name)
21
24
  @tools.delete(name.to_s)
22
25
  end
23
26
 
24
- # Получает инструмент по имени
27
+ # Fetch a tool by its name.
25
28
  def get_tool(name)
26
29
  @tools[name.to_s]
27
30
  end
28
31
 
29
- # Возвращает список всех инструментов
32
+ # @return [Array<LLMChain::Tools::Base>] list of registered tools
30
33
  def list_tools
31
34
  @tools.values
32
35
  end
33
36
 
34
- # Получает схемы всех инструментов для LLM
37
+ # Build JSON schemas for all registered tools.
35
38
  def get_tools_schema
36
39
  @tools.values.map(&:to_schema)
37
40
  end
38
41
 
39
- # Находит подходящие инструменты для промпта
42
+ # Find tools whose {Tools::Base#match?} returns `true` for the prompt.
40
43
  def find_matching_tools(prompt)
41
44
  @tools.values.select { |tool| tool.match?(prompt) }
42
45
  end
43
46
 
44
- # Выполняет все подходящие инструменты
47
+ # Execute every matching tool and collect results.
48
+ #
49
+ # @param prompt [String]
50
+ # @param context [Hash]
51
+ # @return [Hash] mapping tool name → result hash
45
52
  def execute_tools(prompt, context: {})
46
53
  matching_tools = find_matching_tools(prompt)
47
54
 
@@ -66,7 +73,12 @@ module LLMChain
66
73
  results
67
74
  end
68
75
 
69
- # Выполняет конкретный инструмент по имени
76
+ # Execute a single tool by name.
77
+ #
78
+ # @param name [String]
79
+ # @param prompt [String]
80
+ # @param context [Hash]
81
+ # @return [Hash] result wrapper
70
82
  def execute_tool(name, prompt, context: {})
71
83
  tool = get_tool(name)
72
84
  raise ArgumentError, "Tool '#{name}' not found" unless tool
@@ -87,18 +99,19 @@ module LLMChain
87
99
  end
88
100
  end
89
101
 
90
- # Создает стандартный набор инструментов
102
+ # Create default toolset (Calculator, WebSearch, CodeInterpreter, DateTime).
91
103
  def self.create_default_toolset
92
104
  tools = [
93
105
  Calculator.new,
94
106
  WebSearch.new,
95
- CodeInterpreter.new
107
+ CodeInterpreter.new,
108
+ DateTime.new
96
109
  ]
97
110
 
98
111
  new(tools: tools)
99
112
  end
100
113
 
101
- # Создает набор инструментов из конфигурации
114
+ # Build toolset from a config array.
102
115
  def self.from_config(config)
103
116
  tools = []
104
117
 
@@ -121,7 +134,7 @@ module LLMChain
121
134
  new(tools: tools)
122
135
  end
123
136
 
124
- # Форматирует результаты выполнения для включения в промпт
137
+ # Format tool execution results for inclusion into an LLM prompt.
125
138
  def format_tool_results(results)
126
139
  return "" if results.empty?
127
140
 
@@ -132,7 +145,7 @@ module LLMChain
132
145
  "Tool Results:\n#{formatted_results.join("\n\n")}"
133
146
  end
134
147
 
135
- # Получает краткое описание доступных инструментов
148
+ # Human-readable list of available tools.
136
149
  def tools_description
137
150
  descriptions = @tools.values.map do |tool|
138
151
  "- #{tool.name}: #{tool.description}"
@@ -141,20 +154,20 @@ module LLMChain
141
154
  "Available tools:\n#{descriptions.join("\n")}"
142
155
  end
143
156
 
144
- # Проверяет, содержит ли промпт запрос на использование инструментов
157
+ # Determine if prompt likely needs tool usage.
145
158
  def needs_tools?(prompt)
146
- # Проверяем явные запросы на использование инструментов
159
+ # Check for explicit tool usage requests
147
160
  return true if prompt.match?(/\b(use tool|call tool|execute|calculate|search|run code)\b/i)
148
161
 
149
- # Проверяем, есть ли подходящие инструменты
162
+ # Check if there are any matching tools
150
163
  find_matching_tools(prompt).any?
151
164
  end
152
165
 
153
- # Автоматически решает, какие инструменты использовать
166
+ # Auto-select and execute best tools for prompt.
154
167
  def auto_execute(prompt, context: {})
155
168
  return {} unless needs_tools?(prompt)
156
169
 
157
- # Ограничиваем количество одновременно выполняемых инструментов
170
+ # Limit the number of tools executed at once
158
171
  matching_tools = find_matching_tools(prompt)
159
172
  selected_tools = select_best_tools(matching_tools, prompt)
160
173
 
@@ -181,9 +194,9 @@ module LLMChain
181
194
 
182
195
  private
183
196
 
184
- # Выбирает лучшие инструменты для выполнения (ограничение по количеству)
185
- def select_best_tools(tools, prompt, limit: 3)
186
- # Простая логика приоритизации
197
+ # Simple heuristic to rank matching tools.
198
+ def select_best_tools(tools, prompt, limit: 3)
199
+ # Simple prioritization logic
187
200
  prioritized = tools.sort_by do |tool|
188
201
  case tool.name
189
202
  when 'calculator'
@@ -4,7 +4,7 @@ require 'uri'
4
4
 
5
5
  module LLMChain
6
6
  module Tools
7
- class WebSearch < BaseTool
7
+ class WebSearch < Base
8
8
  KEYWORDS = %w[
9
9
  search find lookup google bing
10
10
  what is who is where is when is
@@ -1,5 +1,5 @@
1
1
  # frozen_string_literal: true
2
2
 
3
3
  module LlmChain
4
- VERSION = "0.5.3"
4
+ VERSION = "0.5.4"
5
5
  end
data/lib/llm_chain.rb CHANGED
@@ -9,13 +9,15 @@ require_relative "llm_chain/clients/ollama_base"
9
9
  require_relative "llm_chain/clients/qwen"
10
10
  require_relative "llm_chain/clients/llama2"
11
11
  require_relative "llm_chain/clients/gemma3"
12
+ require_relative "llm_chain/clients/deepseek_coder_v2"
12
13
  require_relative "llm_chain/memory/array"
13
14
  require_relative "llm_chain/memory/redis"
14
- require_relative "llm_chain/tools/base_tool"
15
+ require_relative "llm_chain/tools/base"
15
16
  require_relative "llm_chain/tools/calculator"
16
17
  require_relative "llm_chain/tools/web_search"
17
18
  require_relative "llm_chain/tools/code_interpreter"
18
19
  require_relative "llm_chain/tools/tool_manager"
20
+ require_relative "llm_chain/tools/date_time"
19
21
  require_relative "llm_chain/embeddings/clients/local/weaviate_vector_store"
20
22
  require_relative "llm_chain/embeddings/clients/local/weaviate_retriever"
21
23
  require_relative "llm_chain/embeddings/clients/local/ollama_client"
metadata CHANGED
@@ -1,14 +1,14 @@
1
1
  --- !ruby/object:Gem::Specification
2
2
  name: llm_chain
3
3
  version: !ruby/object:Gem::Version
4
- version: 0.5.3
4
+ version: 0.5.4
5
5
  platform: ruby
6
6
  authors:
7
7
  - FuryCow
8
8
  autorequire:
9
9
  bindir: exe
10
10
  cert_chain: []
11
- date: 2025-07-05 00:00:00.000000000 Z
11
+ date: 2025-07-08 00:00:00.000000000 Z
12
12
  dependencies:
13
13
  - !ruby/object:Gem::Dependency
14
14
  name: httparty
@@ -130,6 +130,7 @@ files:
130
130
  - lib/llm_chain/chain.rb
131
131
  - lib/llm_chain/client_registry.rb
132
132
  - lib/llm_chain/clients/base.rb
133
+ - lib/llm_chain/clients/deepseek_coder_v2.rb
133
134
  - lib/llm_chain/clients/gemma3.rb
134
135
  - lib/llm_chain/clients/llama2.rb
135
136
  - lib/llm_chain/clients/ollama_base.rb
@@ -141,9 +142,11 @@ files:
141
142
  - lib/llm_chain/embeddings/clients/local/weaviate_vector_store.rb
142
143
  - lib/llm_chain/memory/array.rb
143
144
  - lib/llm_chain/memory/redis.rb
145
+ - lib/llm_chain/tools/base.rb
144
146
  - lib/llm_chain/tools/base_tool.rb
145
147
  - lib/llm_chain/tools/calculator.rb
146
148
  - lib/llm_chain/tools/code_interpreter.rb
149
+ - lib/llm_chain/tools/date_time.rb
147
150
  - lib/llm_chain/tools/tool_manager.rb
148
151
  - lib/llm_chain/tools/web_search.rb
149
152
  - lib/llm_chain/version.rb