smart_prompt 0.1.6 → 0.1.8

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
checksums.yaml CHANGED
@@ -1,7 +1,7 @@
1
1
  ---
2
2
  SHA256:
3
- metadata.gz: 18a10421c03b8707b2ff6d7cf40a5e58eee621b62a177c7ab3d10c2b5e75b767
4
- data.tar.gz: 4f43968f0b7cf8e9dab3ce5959f2a837729b335b0a1fcc533852339f130b7e43
3
+ metadata.gz: ab0d62158bbd98af9d2f57d9708426cceb95c2d3b2797abc0094de3caf96adf8
4
+ data.tar.gz: d2a50c4ede575eb70caeda1893b60412b04afdec665690b8fd2d4d0f3bdd410e
5
5
  SHA512:
6
- metadata.gz: e0fd7f1e1995821f53135e01f92c3d3cdf1296405fec138c588ff23450c9be452d81b8a3dd42fb1e9fdf73748a1cc0119fe09285bac281eb035c2bec4742987c
7
- data.tar.gz: 45e702684683ff912f149a2ae2f7eb94f0f4efa183f991923db8858223a1c721e975821972f7183fcb958e902168a65b22d5ea14f84be6cea87a434ced74e447
6
+ metadata.gz: 6d61c7f8db100a715b16a8bd08ef27bf49f3e38127d68cb37a5fc55c3d5ee781e505d8c4acbc6ac98206bd2aeb22bcdd4dc6d749ed65293fecb5abbc361ffac9
7
+ data.tar.gz: 7f8ff17971d9979a95400bb1e4497737fab2c503d69a1c415a0003ff514400b55ade390d0f007a146e43569978b98e29f5e308091d74e25cd8b405689b73b136
@@ -1,7 +1,10 @@
1
1
  require 'yaml'
2
+ require 'retriable'
3
+ require "numo/narray"
2
4
 
3
5
  module SmartPrompt
4
6
  class Conversation
7
+ include APIHandler
5
8
  attr_reader :messages, :last_response, :config_file
6
9
 
7
10
  def initialize(engine)
@@ -26,12 +29,17 @@ module SmartPrompt
26
29
  end
27
30
 
28
31
  def prompt(template_name, params = {})
29
- template_name = template_name.to_s
30
- SmartPrompt.logger.info "Use template #{template_name}"
31
- raise "Template #{template_name} not found" unless @templates.key?(template_name)
32
- content = @templates[template_name].render(params)
33
- @messages << { role: 'user', content: content }
34
- self
32
+ if template_name.class == Symbol
33
+ template_name = template_name.to_s
34
+ SmartPrompt.logger.info "Use template #{template_name}"
35
+ raise "Template #{template_name} not found" unless @templates.key?(template_name)
36
+ content = @templates[template_name].render(params)
37
+ @messages << { role: 'user', content: content }
38
+ self
39
+ else
40
+ @messages << { role: 'user', content: template_name }
41
+ self
42
+ end
35
43
  end
36
44
 
37
45
  def sys_msg(message)
@@ -47,5 +55,43 @@ module SmartPrompt
47
55
  @messages << { role: 'system', content: @sys_msg }
48
56
  @last_response
49
57
  end
58
+
59
+ def safe_send_msg
60
+ Retriable.retriable(RETRY_OPTIONS) do
61
+ raise ConfigurationError, "No LLM selected" if @current_llm.nil?
62
+ @last_response = @current_llm.send_request(@messages, @model_name)
63
+ @messages=[]
64
+ @messages << { role: 'system', content: @sys_msg }
65
+ @last_response
66
+ end
67
+ rescue => e
68
+ return "Failed to call LLM after #{MAX_RETRIES} attempts: #{e.message}"
69
+ end
70
+
71
+ def normalize(x, length)
72
+ if x.length > length
73
+ x = Numo::NArray.cast(x[0..length-1])
74
+ norm = Math.sqrt((x * x).sum)
75
+ return (x / norm).to_a
76
+ else
77
+ return x.concat([0] * (x.length - length))
78
+ end
79
+ end
80
+
81
+ def embeddings(length)
82
+ Retriable.retriable(RETRY_OPTIONS) do
83
+ raise ConfigurationError, "No LLM selected" if @current_llm.nil?
84
+ text = ""
85
+ @messages.each do |msg|
86
+ if msg[:role]=="user"
87
+ text = msg[:content]
88
+ end
89
+ end
90
+ @last_response = @current_llm.embeddings(text, @model_name)
91
+ @messages=[]
92
+ @messages << { role: 'system', content: @sys_msg }
93
+ normalize(@last_response, length)
94
+ end
95
+ end
50
96
  end
51
97
  end
@@ -11,26 +11,39 @@ module SmartPrompt
11
11
  end
12
12
 
13
13
  def load_config(config_file)
14
- @config_file = config_file
15
- @config = YAML.load_file(config_file)
16
- if @config['logger_file']
17
- SmartPrompt.logger = Logger.new(@config['logger_file'])
18
- end
19
- SmartPrompt.logger.info "Loading configuration from file: #{config_file}"
20
- @config['adapters'].each do |adapter_name, adapter_class|
21
- adapter_class = SmartPrompt.const_get(adapter_class)
22
- @adapters[adapter_name] = adapter_class
23
- end
24
- @config['llms'].each do |llm_name,llm_config|
25
- adapter_class = @adapters[llm_config['adapter']]
26
- @llms[llm_name]=adapter_class.new(llm_config)
27
- end
28
- @current_llm = @config['default_llm'] if @config['default_llm']
29
- Dir.glob(File.join(@config['template_path'], '*.erb')).each do |file|
30
- template_name = file.gsub(@config['template_path']+"/","").gsub("\.erb","")
31
- @templates[template_name] = PromptTemplate.new(file)
32
- end
33
- load_workers
14
+ begin
15
+ @config_file = config_file
16
+ @config = YAML.load_file(config_file)
17
+ if @config['logger_file']
18
+ SmartPrompt.logger = Logger.new(@config['logger_file'])
19
+ end
20
+ SmartPrompt.logger.info "Loading configuration from file: #{config_file}"
21
+ @config['adapters'].each do |adapter_name, adapter_class|
22
+ adapter_class = SmartPrompt.const_get(adapter_class)
23
+ @adapters[adapter_name] = adapter_class
24
+ end
25
+ @config['llms'].each do |llm_name,llm_config|
26
+ adapter_class = @adapters[llm_config['adapter']]
27
+ @llms[llm_name]=adapter_class.new(llm_config)
28
+ end
29
+ @current_llm = @config['default_llm'] if @config['default_llm']
30
+ Dir.glob(File.join(@config['template_path'], '*.erb')).each do |file|
31
+ template_name = file.gsub(@config['template_path']+"/","").gsub("\.erb","")
32
+ @templates[template_name] = PromptTemplate.new(file)
33
+ end
34
+ load_workers
35
+ rescue Psych::SyntaxError => ex
36
+ SmartPrompt.logger.error "YAML syntax error in config file: #{ex.message}"
37
+ raise ConfigurationError, "Invalid YAML syntax in config file: #{ex.message}"
38
+ rescue Errno::ENOENT => ex
39
+ SmartPrompt.logger.error "Config file not found: #{ex.message}"
40
+ raise ConfigurationError, "Config file not found: #{ex.message}"
41
+ rescue StandardError => ex
42
+ SmartPrompt.logger.error "Error loading configuration: #{ex.message}"
43
+ raise ConfigurationError, "Error loading configuration: #{ex.message}"
44
+ ensure
45
+ SmartPrompt.logger.info "Configuration loaded successfully"
46
+ end
34
47
  end
35
48
 
36
49
  def load_workers
@@ -38,6 +51,15 @@ module SmartPrompt
38
51
  require(file)
39
52
  end
40
53
  end
54
+
55
+ def check_worker(worker_name)
56
+ if SmartPrompt::Worker.workers[worker_name]
57
+ return true
58
+ else
59
+ SmartPrompt.logger.warn "Invalid worker: #{worker_name}"
60
+ return false
61
+ end
62
+ end
41
63
 
42
64
  def call_worker(worker_name, params = {})
43
65
  SmartPrompt.logger.info "Calling worker: #{worker_name} with params: #{params}"
@@ -0,0 +1,68 @@
1
+ require 'openai'
2
+
3
+ module SmartPrompt
4
+ class LlamacppAdapter < LLMAdapter
5
+ def initialize(config)
6
+ super
7
+ begin
8
+ @client = OpenAI::Client.new(
9
+ uri_base: @config['url']
10
+ )
11
+ rescue OpenAI::ConfigurationError => e
12
+ SmartPrompt.logger.error "Failed to initialize Llamacpp client: #{e.message}"
13
+ raise LLMAPIError, "Invalid Llamacpp configuration: #{e.message}"
14
+ rescue OpenAI::AuthenticationError => e
15
+ SmartPrompt.logger.error "Failed to initialize Llamacpp client: #{e.message}"
16
+ raise LLMAPIError, "Llamacpp authentication failed: #{e.message}"
17
+ rescue SocketError => e
18
+ SmartPrompt.logger.error "Failed to initialize Llamacpp client: #{e.message}"
19
+ raise LLMAPIError, "Network error: Unable to connect to Llamacpp API"
20
+ rescue => e
21
+ SmartPrompt.logger.error "Failed to initialize Llamacpp client: #{e.message}"
22
+ raise Error, "Unexpected error initializing Llamacpp client: #{e.message}"
23
+ ensure
24
+ SmartPrompt.logger.info "Successful creation an Llamacpp client."
25
+ end
26
+ end
27
+
28
+ def send_request(messages, model=nil)
29
+ SmartPrompt.logger.info "LlamacppAdapter: Sending request to Llamacpp"
30
+ begin
31
+ response = @client.chat(
32
+ parameters: {
33
+ messages: messages,
34
+ temperature: @config['temperature'] || 0.7
35
+ }
36
+ )
37
+ rescue OpenAI::APIError => e
38
+ SmartPrompt.logger.error "Llamacpp API error: #{e.message}"
39
+ raise LLMAPIError, "Llamacpp API error: #{e.message}"
40
+ rescue OpenAI::APIConnectionError => e
41
+ SmartPrompt.logger.error "Connection error: Unable to reach Llamacpp API"
42
+ raise LLMAPIError, "Connection error: Unable to reach Llamacpp API"
43
+ rescue OpenAI::APITimeoutError => e
44
+ SmartPrompt.logger.error "Request to Llamacpp API timed out"
45
+ raise LLMAPIError, "Request to Llamacpp API timed out"
46
+ rescue OpenAI::InvalidRequestError => e
47
+ SmartPrompt.logger.error "Invalid request to Llamacpp API: #{e.message}"
48
+ raise LLMAPIError, "Invalid request to Llamacpp API: #{e.message}"
49
+ rescue OpenAI::AuthenticationError => e
50
+ SmartPrompt.logger.error "Authentication error with Llamacpp API: #{e.message}"
51
+ raise LLMAPIError, "Authentication error with Llamacpp API: #{e.message}"
52
+ rescue OpenAI::RateLimitError => e
53
+ SmartPrompt.logger.error "Rate limit exceeded for Llamacpp API"
54
+ raise LLMAPIError, "Rate limit exceeded for Llamacpp API"
55
+ rescue JSON::ParserError => e
56
+ SmartPrompt.logger.error "Failed to parse Llamacpp API response"
57
+ raise LLMAPIError, "Failed to parse Llamacpp API response"
58
+ rescue => e
59
+ SmartPrompt.logger.error "Unexpected error during Llamacpp request: #{e.message}"
60
+ raise Error, "Unexpected error during Llamacpp request: #{e.message}"
61
+ ensure
62
+ SmartPrompt.logger.info "Successful send a message"
63
+ end
64
+ SmartPrompt.logger.info "LlamacppAdapter: Received response from Llamacpp"
65
+ response.dig("choices", 0, "message", "content")
66
+ end
67
+ end
68
+ end
@@ -1,8 +1,6 @@
1
1
  require 'net/http'
2
2
  require 'json'
3
3
  require 'uri'
4
- require 'openai'
5
- require 'ollama-ai'
6
4
 
7
5
  module SmartPrompt
8
6
  class LLMAdapter
@@ -17,86 +15,6 @@ module SmartPrompt
17
15
  end
18
16
  end
19
17
 
20
- class OpenAIAdapter < LLMAdapter
21
- def initialize(config)
22
- super
23
- api_key = @config['api_key']
24
- if api_key.is_a?(String) && api_key.start_with?('ENV[') && api_key.end_with?(']')
25
- api_key = eval(api_key)
26
- end
27
- @client = OpenAI::Client.new(
28
- access_token: api_key,
29
- uri_base: @config['url'],
30
- request_timeout: 240
31
- )
32
- end
33
-
34
- def send_request(messages, model=nil)
35
- SmartPrompt.logger.info "OpenAIAdapter: Sending request to OpenAI"
36
- if model
37
- model_name = model
38
- else
39
- model_name = @config['model']
40
- end
41
- SmartPrompt.logger.info "OpenAIAdapter: Using model #{model_name}"
42
- response = @client.chat(
43
- parameters: {
44
- model: model_name,
45
- messages: messages,
46
- temperature: @config['temperature'] || 0.7
47
- }
48
- )
49
- SmartPrompt.logger.info "OpenAIAdapter: Received response from OpenAI"
50
- response.dig("choices", 0, "message", "content")
51
- end
52
- end
53
-
54
- class LlamacppAdapter < LLMAdapter
55
- def initialize(config)
56
- super
57
- @client = OpenAI::Client.new(
58
- uri_base: @config['url']
59
- )
60
- end
61
- def send_request(messages, model=nil)
62
- SmartPrompt.logger.info "LlamacppAdapter: Sending request to Llamacpp"
63
- response = @client.chat(
64
- parameters: {
65
- messages: messages,
66
- temperature: @config['temperature'] || 0.7
67
- }
68
- )
69
- SmartPrompt.logger.info "LlamacppAdapter: Received response from Llamacpp"
70
- response.dig("choices", 0, "message", "content")
71
- end
72
- end
73
-
74
- class OllamaAdapter < LLMAdapter
75
- def initialize(config)
76
- super
77
- @client = Ollama.new(credentials: { address: @config['url'] })
78
- end
79
-
80
- def send_request(messages, model=nil)
81
- SmartPrompt.logger.info "OllamaAdapter: Sending request to Ollama"
82
- if model
83
- model_name = model
84
- else
85
- model_name = @config['model']
86
- end
87
- SmartPrompt.logger.info "OllamaAdapter: Using model #{model_name}"
88
- response = @client.generate(
89
- {
90
- model: model_name,
91
- prompt: messages.to_s,
92
- stream: false
93
- }
94
- )
95
- SmartPrompt.logger.info "OllamaAdapter: Received response from Ollama"
96
- return response[0]["response"]
97
- end
98
- end
99
-
100
18
  class MockAdapter < LLMAdapter
101
19
  def send_request(messages)
102
20
  puts "Mock adapter received #{messages.length} messages"
@@ -0,0 +1,79 @@
1
+ require 'ollama-ai'
2
+
3
+ module SmartPrompt
4
+ class OllamaAdapter < LLMAdapter
5
+ def initialize(config)
6
+ super
7
+ begin
8
+ @client = Ollama.new(credentials: { address: @config['url'] })
9
+ rescue Ollama::Errors => e
10
+ SmartPrompt.logger.error "Failed to initialize Ollama client: #{e.message}"
11
+ raise LLMAPIError, "Invalid Ollama configuration: #{e.message}"
12
+ rescue SocketError => e
13
+ SmartPrompt.logger.error "Failed to initialize Ollama client: #{e.message}"
14
+ raise LLMAPIError, "Network error: Unable to connect to Ollama API"
15
+ rescue => e
16
+ SmartPrompt.logger.error "Failed to initialize Ollama client: #{e.message}"
17
+ raise Error, "Unexpected error initializing Ollama client: #{e.message}"
18
+ ensure
19
+ SmartPrompt.logger.info "Successful creation an Ollama client."
20
+ end
21
+ end
22
+
23
+ def send_request(messages, model=nil)
24
+ SmartPrompt.logger.info "OllamaAdapter: Sending request to Ollama"
25
+ if model
26
+ model_name = model
27
+ else
28
+ model_name = @config['model']
29
+ end
30
+ SmartPrompt.logger.info "OllamaAdapter: Using model #{model_name}"
31
+ begin
32
+ response = @client.generate(
33
+ {
34
+ model: model_name,
35
+ prompt: messages.to_s,
36
+ stream: false
37
+ }
38
+ )
39
+ rescue Ollama::Errors => e
40
+ SmartPrompt.logger.error "Ollama API error: #{e.message}"
41
+ raise LLMAPIError, "Ollama API error: #{e.message}"
42
+ rescue JSON::ParserError => e
43
+ SmartPrompt.logger.error "Failed to parse Ollama API response"
44
+ raise LLMAPIError, "Failed to parse Ollama API response"
45
+ rescue => e
46
+ SmartPrompt.logger.error "Unexpected error during Ollama request: #{e.message}"
47
+ raise Error, "Unexpected error during Ollama request: #{e.message}"
48
+ ensure
49
+ SmartPrompt.logger.info "Successful send a message"
50
+ end
51
+ SmartPrompt.logger.info "OllamaAdapter: Received response from Ollama"
52
+ return response.dig(0,"response")
53
+ end
54
+
55
+ def embeddings(text, model)
56
+ SmartPrompt.logger.info "OllamaAdapter: get embeddings from Ollama"
57
+ if model
58
+ model_name = model
59
+ else
60
+ model_name = @config['model']
61
+ end
62
+ SmartPrompt.logger.info "OllamaAdapter: Using model #{model_name}"
63
+ begin
64
+ response = @client.embeddings(
65
+ {
66
+ model: model_name,
67
+ prompt: text.to_s
68
+ }
69
+ )
70
+ rescue => e
71
+ SmartPrompt.logger.error "Unexpected error during Ollama request: #{e.message}"
72
+ raise Error, "Unexpected error during Ollama request: #{e.message}"
73
+ ensure
74
+ SmartPrompt.logger.info "Successful send a message"
75
+ end
76
+ return response.dig(0,"embedding")
77
+ end
78
+ end
79
+ end
@@ -0,0 +1,93 @@
1
+ require 'openai'
2
+
3
+ module SmartPrompt
4
+ class OpenAIAdapter < LLMAdapter
5
+ def initialize(config)
6
+ super
7
+ api_key = @config['api_key']
8
+ if api_key.is_a?(String) && api_key.start_with?('ENV[') && api_key.end_with?(']')
9
+ api_key = eval(api_key)
10
+ end
11
+ begin
12
+ @client = OpenAI::Client.new(
13
+ access_token: api_key,
14
+ uri_base: @config['url'],
15
+ request_timeout: 240
16
+ )
17
+ rescue OpenAI::ConfigurationError => e
18
+ SmartPrompt.logger.error "Failed to initialize OpenAI client: #{e.message}"
19
+ raise LLMAPIError, "Invalid OpenAI configuration: #{e.message}"
20
+ rescue OpenAI::Error => e
21
+ SmartPrompt.logger.error "Failed to initialize OpenAI client: #{e.message}"
22
+ raise LLMAPIError, "OpenAI authentication failed: #{e.message}"
23
+ rescue SocketError => e
24
+ SmartPrompt.logger.error "Failed to initialize OpenAI client: #{e.message}"
25
+ raise LLMAPIError, "Network error: Unable to connect to OpenAI API"
26
+ rescue => e
27
+ SmartPrompt.logger.error "Failed to initialize OpenAI client: #{e.message}"
28
+ raise Error, "Unexpected error initializing OpenAI client: #{e.message}"
29
+ ensure
30
+ SmartPrompt.logger.info "Successful creation an OpenAI client."
31
+ end
32
+ end
33
+
34
+ def send_request(messages, model=nil)
35
+ SmartPrompt.logger.info "OpenAIAdapter: Sending request to OpenAI"
36
+ if model
37
+ model_name = model
38
+ else
39
+ model_name = @config['model']
40
+ end
41
+ SmartPrompt.logger.info "OpenAIAdapter: Using model #{model_name}"
42
+ begin
43
+ response = @client.chat(
44
+ parameters: {
45
+ model: model_name,
46
+ messages: messages,
47
+ temperature: @config['temperature'] || 0.7
48
+ }
49
+ )
50
+ rescue OpenAI::Error => e
51
+ SmartPrompt.logger.error "OpenAI API error: #{e.message}"
52
+ raise LLMAPIError, "OpenAI API error: #{e.message}"
53
+ rescue OpenAI::MiddlewareErrors => e
54
+ SmartPrompt.logger.error "OpenAI HTTP Error: #{e.message}"
55
+ raise LLMAPIError, "OpenAI HTTP Error"
56
+ rescue JSON::ParserError => e
57
+ SmartPrompt.logger.error "Failed to parse OpenAI API response"
58
+ raise LLMAPIError, "Failed to parse OpenAI API response"
59
+ rescue => e
60
+ SmartPrompt.logger.error "Unexpected error during OpenAI request: #{e.message}"
61
+ raise Error, "Unexpected error during OpenAI request: #{e.message}"
62
+ ensure
63
+ SmartPrompt.logger.info "Successful send a message"
64
+ end
65
+ SmartPrompt.logger.info "OpenAIAdapter: Received response from OpenAI"
66
+ response.dig("choices", 0, "message", "content")
67
+ end
68
+
69
+ def embeddings(text, model)
70
+ SmartPrompt.logger.info "OpenAIAdapter: get embeddings from Ollama"
71
+ if model
72
+ model_name = model
73
+ else
74
+ model_name = @config['model']
75
+ end
76
+ SmartPrompt.logger.info "OpenAIAdapter: Using model #{model_name}"
77
+ begin
78
+ response = @client.embeddings(
79
+ parameters: {
80
+ model: model_name,
81
+ input: text.to_s
82
+ }
83
+ )
84
+ rescue => e
85
+ SmartPrompt.logger.error "Unexpected error during Ollama request: #{e.message}"
86
+ raise Error, "Unexpected error during Ollama request: #{e.message}"
87
+ ensure
88
+ SmartPrompt.logger.info "Successful send a message"
89
+ end
90
+ return response.dig("data", 0, "embedding")
91
+ end
92
+ end
93
+ end
@@ -1,3 +1,3 @@
1
1
  module SmartPrompt
2
- VERSION = "0.1.6"
2
+ VERSION = "0.1.8"
3
3
  end
data/lib/smart_prompt.rb CHANGED
@@ -1,12 +1,20 @@
1
1
  require File.expand_path('../smart_prompt/version', __FILE__)
2
2
  require File.expand_path('../smart_prompt/engine', __FILE__)
3
+ require File.expand_path('../smart_prompt/api_handler', __FILE__)
3
4
  require File.expand_path('../smart_prompt/conversation', __FILE__)
4
5
  require File.expand_path('../smart_prompt/llm_adapter', __FILE__)
6
+ require File.expand_path('../smart_prompt/openai_adapter', __FILE__)
7
+ require File.expand_path('../smart_prompt/llamacpp_adapter', __FILE__)
8
+ require File.expand_path('../smart_prompt/ollama_adapter', __FILE__)
5
9
  require File.expand_path('../smart_prompt/prompt_template', __FILE__)
6
10
  require File.expand_path('../smart_prompt/worker', __FILE__)
7
11
 
8
12
  module SmartPrompt
9
13
  class Error < StandardError; end
14
+ class ConfigurationError < Error; end
15
+ class LLMAPIError < Error; end
16
+ class CallWorkerError < Error; end
17
+
10
18
  attr_writer :logger
11
19
 
12
20
  def self.define_worker(name, &block)
metadata CHANGED
@@ -1,14 +1,14 @@
1
1
  --- !ruby/object:Gem::Specification
2
2
  name: smart_prompt
3
3
  version: !ruby/object:Gem::Version
4
- version: 0.1.6
4
+ version: 0.1.8
5
5
  platform: ruby
6
6
  authors:
7
7
  - zhuang biaowei
8
8
  autorequire:
9
9
  bindir: exe
10
10
  cert_chain: []
11
- date: 2024-10-08 00:00:00.000000000 Z
11
+ date: 2024-11-28 00:00:00.000000000 Z
12
12
  dependencies:
13
13
  - !ruby/object:Gem::Dependency
14
14
  name: yaml
@@ -80,6 +80,20 @@ dependencies:
80
80
  - - "~>"
81
81
  - !ruby/object:Gem::Version
82
82
  version: 1.0.4
83
+ - !ruby/object:Gem::Dependency
84
+ name: retriable
85
+ requirement: !ruby/object:Gem::Requirement
86
+ requirements:
87
+ - - "~>"
88
+ - !ruby/object:Gem::Version
89
+ version: 3.1.2
90
+ type: :runtime
91
+ prerelease: false
92
+ version_requirements: !ruby/object:Gem::Requirement
93
+ requirements:
94
+ - - "~>"
95
+ - !ruby/object:Gem::Version
96
+ version: 3.1.2
83
97
  description: SmartPrompt provides a flexible DSL for managing prompts, interacting
84
98
  with multiple LLMs, and creating composable task workers.
85
99
  email:
@@ -95,7 +109,10 @@ files:
95
109
  - lib/smart_prompt.rb
96
110
  - lib/smart_prompt/conversation.rb
97
111
  - lib/smart_prompt/engine.rb
112
+ - lib/smart_prompt/llamacpp_adapter.rb
98
113
  - lib/smart_prompt/llm_adapter.rb
114
+ - lib/smart_prompt/ollama_adapter.rb
115
+ - lib/smart_prompt/openai_adapter.rb
99
116
  - lib/smart_prompt/prompt_template.rb
100
117
  - lib/smart_prompt/version.rb
101
118
  - lib/smart_prompt/worker.rb
@@ -122,7 +139,7 @@ required_rubygems_version: !ruby/object:Gem::Requirement
122
139
  - !ruby/object:Gem::Version
123
140
  version: '0'
124
141
  requirements: []
125
- rubygems_version: 3.5.18
142
+ rubygems_version: 3.5.23
126
143
  signing_key:
127
144
  specification_version: 4
128
145
  summary: A smart prompt management and LLM interaction gem