llm_chain 0.6.0 → 0.7.0
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- checksums.yaml +4 -4
- data/CHANGELOG.md +11 -0
- data/README.md +10 -1
- data/lib/llm_chain/client_registry.rb +2 -0
- data/lib/llm_chain/clients/mistral.rb +42 -0
- data/lib/llm_chain/configuration_validator.rb +2 -2
- data/lib/llm_chain/version.rb +1 -1
- data/lib/llm_chain.rb +1 -0
- metadata +3 -2
checksums.yaml
CHANGED
@@ -1,7 +1,7 @@
|
|
1
1
|
---
|
2
2
|
SHA256:
|
3
|
-
metadata.gz:
|
4
|
-
data.tar.gz:
|
3
|
+
metadata.gz: debff76fd3adbc6ec79cf70877975847b9e2f86795bd5b86b2ad11483775bfb9
|
4
|
+
data.tar.gz: f086848c7c0a01cc3baf0d29fa946c4b264c71834a87c196e7c74eb6b5ceb4f6
|
5
5
|
SHA512:
|
6
|
-
metadata.gz:
|
7
|
-
data.tar.gz:
|
6
|
+
metadata.gz: bbead94dc1b7a6e11ed8d628630d52254babb3d96b562bd9ed829a4f789e793d7b14857f16477c19bbf78cb8f76770ac6461027601741c78f23a0ee9d4a8ea9a
|
7
|
+
data.tar.gz: f63a2cf032b4cd03c4d40455fcbfc85c6217e46623ced44369f253856472c53f04c56cb6b9d3b278ebe7ccbe0077be9561713baf095f3001efda4736bb00122a
|
data/CHANGELOG.md
CHANGED
@@ -7,6 +7,17 @@ and this project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0
|
|
7
7
|
|
8
8
|
## [Unreleased]
|
9
9
|
|
10
|
+
## [0.7.0] - 2025-01-27
|
11
|
+
|
12
|
+
### Added
|
13
|
+
* **Mistral Client** - Support for Mistral models via Ollama:
|
14
|
+
* Available variants: `mistral:latest`, `mixtral:8x7b`, `mistral-small:latest`, `mistral-medium:latest`, `mistral-large:latest`
|
15
|
+
* Optimized settings for different model types
|
16
|
+
* Integrated with existing tool ecosystem and Chain functionality
|
17
|
+
* Full compatibility with ClientRegistry and streaming support
|
18
|
+
* Lean implementation following SOLID principles
|
19
|
+
* YARD documentation with English examples
|
20
|
+
|
10
21
|
## [0.6.0] - 2025-07-24
|
11
22
|
|
12
23
|
### Added
|
data/README.md
CHANGED
@@ -376,7 +376,7 @@ tool_manager.register_tool(weather)
|
|
376
376
|
| **LLaMA2/3** | Ollama | ✅ Supported | 7B, 13B, 70B |
|
377
377
|
| **Gemma** | Ollama | ✅ Supported | 2B, 7B, 9B, 27B |
|
378
378
|
| **Deepseek-Coder-V2** | Ollama | ✅ Supported | 16B, 236B - Code specialist |
|
379
|
-
| **Mistral/Mixtral** | Ollama |
|
379
|
+
| **Mistral/Mixtral** | Ollama | ✅ Supported | 7B, 8x7B, Tiny, Small, Medium, Large |
|
380
380
|
| **Claude** | Anthropic | 🔄 Planned | Haiku, Sonnet, Opus |
|
381
381
|
| **Command R+** | Cohere | 🔄 Planned | Optimized for RAG |
|
382
382
|
|
@@ -402,9 +402,18 @@ llama_chain = LLMChain::Chain.new(
|
|
402
402
|
# Deepseek-Coder-V2 for code tasks
|
403
403
|
deepseek_chain = LLMChain::Chain.new(model: "deepseek-coder-v2:16b")
|
404
404
|
|
405
|
+
# Mistral via Ollama
|
406
|
+
mistral_chain = LLMChain::Chain.new(model: "mistral:7b")
|
407
|
+
|
408
|
+
# Mixtral for complex tasks
|
409
|
+
mixtral_chain = LLMChain::Chain.new(model: "mixtral:8x7b")
|
410
|
+
|
405
411
|
# Direct client usage
|
406
412
|
deepseek_client = LLMChain::Clients::DeepseekCoderV2.new(model: "deepseek-coder-v2:16b")
|
407
413
|
response = deepseek_client.chat("Create a Ruby method to sort an array")
|
414
|
+
|
415
|
+
mistral_client = LLMChain::Clients::Mistral.new
|
416
|
+
response = mistral_client.chat("Explain quantum computing in simple terms")
|
408
417
|
```
|
409
418
|
|
410
419
|
## 💾 Memory System
|
@@ -0,0 +1,42 @@
|
|
1
|
+
require 'faraday'
|
2
|
+
require 'json'
|
3
|
+
|
4
|
+
module LLMChain
|
5
|
+
module Clients
|
6
|
+
# Mistral client for Ollama
|
7
|
+
#
|
8
|
+
# Provides access to Mistral models through Ollama with support for
|
9
|
+
# streaming and non-streaming responses.
|
10
|
+
#
|
11
|
+
# @example Basic usage
|
12
|
+
# client = LLMChain::Clients::Mistral.new
|
13
|
+
# response = client.chat("Hello, how are you?")
|
14
|
+
#
|
15
|
+
# @example Using specific model variant
|
16
|
+
# client = LLMChain::Clients::Mistral.new(model: "mixtral:8x7b")
|
17
|
+
# response = client.chat("Explain quantum computing")
|
18
|
+
#
|
19
|
+
class Mistral < OllamaBase
|
20
|
+
DEFAULT_MODEL = "mistral:latest".freeze
|
21
|
+
|
22
|
+
# Optimized settings for Mistral models
|
23
|
+
# @return [Hash] Default options for Mistral models
|
24
|
+
DEFAULT_OPTIONS = {
|
25
|
+
temperature: 0.7,
|
26
|
+
top_p: 0.9,
|
27
|
+
top_k: 40,
|
28
|
+
repeat_penalty: 1.1,
|
29
|
+
num_ctx: 8192,
|
30
|
+
stop: ["<|im_end|>", "<|endoftext|>", "<|user|>", "<|assistant|>"]
|
31
|
+
}.freeze
|
32
|
+
|
33
|
+
# Initialize the Mistral client
|
34
|
+
# @param model [String] Model to use (defaults to mistral:latest)
|
35
|
+
# @param base_url [String] Custom base URL for API calls
|
36
|
+
# @param options [Hash] Additional options to merge with defaults
|
37
|
+
def initialize(model: DEFAULT_MODEL, base_url: nil, **options)
|
38
|
+
super(model: model, base_url: base_url, default_options: DEFAULT_OPTIONS.merge(options))
|
39
|
+
end
|
40
|
+
end
|
41
|
+
end
|
42
|
+
end
|
@@ -57,7 +57,7 @@ module LLMChain
|
|
57
57
|
case model.to_s
|
58
58
|
when /^gpt/
|
59
59
|
validate_openai_requirements!(model)
|
60
|
-
when /qwen|llama|gemma|deepseek-coder-v2/
|
60
|
+
when /qwen|llama|gemma|deepseek-coder-v2|mistral|mixtral/
|
61
61
|
validate_ollama_requirements!(model)
|
62
62
|
else
|
63
63
|
add_warning("Unknown model type: #{model}. Proceeding with default settings.")
|
@@ -118,7 +118,7 @@ module LLMChain
|
|
118
118
|
|
119
119
|
def validate_client_availability!(model)
|
120
120
|
case model.to_s
|
121
|
-
when /qwen|llama|gemma/
|
121
|
+
when /qwen|llama|gemma|mistral|mixtral/
|
122
122
|
unless check_ollama_availability
|
123
123
|
raise ValidationError, "Ollama server is not running for model '#{model}'"
|
124
124
|
end
|
data/lib/llm_chain/version.rb
CHANGED
data/lib/llm_chain.rb
CHANGED
@@ -10,6 +10,7 @@ require_relative "llm_chain/clients/qwen"
|
|
10
10
|
require_relative "llm_chain/clients/llama2"
|
11
11
|
require_relative "llm_chain/clients/gemma3"
|
12
12
|
require_relative "llm_chain/clients/deepseek_coder_v2"
|
13
|
+
require_relative "llm_chain/clients/mistral"
|
13
14
|
require_relative "llm_chain/memory/array"
|
14
15
|
require_relative "llm_chain/memory/redis"
|
15
16
|
require_relative "llm_chain/tools/base"
|
metadata
CHANGED
@@ -1,14 +1,14 @@
|
|
1
1
|
--- !ruby/object:Gem::Specification
|
2
2
|
name: llm_chain
|
3
3
|
version: !ruby/object:Gem::Version
|
4
|
-
version: 0.
|
4
|
+
version: 0.7.0
|
5
5
|
platform: ruby
|
6
6
|
authors:
|
7
7
|
- FuryCow
|
8
8
|
autorequire:
|
9
9
|
bindir: exe
|
10
10
|
cert_chain: []
|
11
|
-
date: 2025-07-
|
11
|
+
date: 2025-07-29 00:00:00.000000000 Z
|
12
12
|
dependencies:
|
13
13
|
- !ruby/object:Gem::Dependency
|
14
14
|
name: httparty
|
@@ -222,6 +222,7 @@ files:
|
|
222
222
|
- lib/llm_chain/clients/deepseek_coder_v2.rb
|
223
223
|
- lib/llm_chain/clients/gemma3.rb
|
224
224
|
- lib/llm_chain/clients/llama2.rb
|
225
|
+
- lib/llm_chain/clients/mistral.rb
|
225
226
|
- lib/llm_chain/clients/ollama_base.rb
|
226
227
|
- lib/llm_chain/clients/openai.rb
|
227
228
|
- lib/llm_chain/clients/qwen.rb
|