smart_prompt 0.1.7 → 0.1.9

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
checksums.yaml CHANGED
@@ -1,7 +1,7 @@
1
1
  ---
2
2
  SHA256:
3
- metadata.gz: 3bf70c6e843930ff7e1a03c9dd8bfc97b3d097786f84613aab65863e1e43a624
4
- data.tar.gz: e9ee9d99378453d90e2ac25b88248f3226f241074c7f80e8bf35104279f05202
3
+ metadata.gz: dce823cedfccc6a2288e6a37b2a91571d4bc784d26440d11503697c43d5558de
4
+ data.tar.gz: b8f4cbb2104d640f423f727afcb38962a7cfa663887ada0d32ec3a61d2ce513c
5
5
  SHA512:
6
- metadata.gz: 02fbfcdb0ae2292994404219eaa2f1218d54de132ceab061ca4466603186cdc9db5826b7e6e1fcfaa8a37f2527f42c3f863154dfd4cfe94b5b7df58969183285
7
- data.tar.gz: b0034a25a0fa4bc6c197beee4635528c3b4887f36a4c8981ebb9894b7c5db61a245a0ff823f6ce171d629ddc65a61a960a172d4235d319d7220050225b7a8a48
6
+ metadata.gz: 70c6bd8c45f433cbc4a60bd4d3964f694b836178ff09a84873aeaedc62cc9064e5c4d043d45b7f864d95b5f60f3a8eb6571230abb9c45385a8525014417493db
7
+ data.tar.gz: 43eca2651ae52dab6e8ca1fff67acbc1099e3adc839c8035111148aae45b3bdcaf64f8b1f5daed8e45c4b2962449eef86c19a16d7e69fe59f874af2e4b66b4d1
@@ -0,0 +1,17 @@
1
+ module SmartPrompt
2
+ module APIHandler
3
+ MAX_RETRIES = 3
4
+ RETRY_OPTIONS = {
5
+ tries: MAX_RETRIES,
6
+ base_interval: 1,
7
+ max_interval: 10,
8
+ rand_factor: 0.5,
9
+ on: [
10
+ Errno::ECONNRESET,
11
+ Errno::ECONNABORTED,
12
+ Errno::EPIPE,
13
+ Errno::ETIMEDOUT
14
+ ]
15
+ }
16
+ end
17
+ end
@@ -1,7 +1,10 @@
1
1
  require 'yaml'
2
+ require 'retriable'
3
+ require "numo/narray"
2
4
 
3
5
  module SmartPrompt
4
6
  class Conversation
7
+ include APIHandler
5
8
  attr_reader :messages, :last_response, :config_file
6
9
 
7
10
  def initialize(engine)
@@ -25,13 +28,22 @@ module SmartPrompt
25
28
  @model_name = model_name
26
29
  end
27
30
 
31
+ def temperature(temperature)
32
+ @temperature = temperature
33
+ end
34
+
28
35
  def prompt(template_name, params = {})
29
- template_name = template_name.to_s
30
- SmartPrompt.logger.info "Use template #{template_name}"
31
- raise "Template #{template_name} not found" unless @templates.key?(template_name)
32
- content = @templates[template_name].render(params)
33
- @messages << { role: 'user', content: content }
34
- self
36
+ if template_name.class == Symbol
37
+ template_name = template_name.to_s
38
+ SmartPrompt.logger.info "Use template #{template_name}"
39
+ raise "Template #{template_name} not found" unless @templates.key?(template_name)
40
+ content = @templates[template_name].render(params)
41
+ @messages << { role: 'user', content: content }
42
+ self
43
+ else
44
+ @messages << { role: 'user', content: template_name }
45
+ self
46
+ end
35
47
  end
36
48
 
37
49
  def sys_msg(message)
@@ -42,10 +54,48 @@ module SmartPrompt
42
54
 
43
55
  def send_msg
44
56
  raise "No LLM selected" if @current_llm.nil?
45
- @last_response = @current_llm.send_request(@messages, @model_name)
57
+ @last_response = @current_llm.send_request(@messages, @model_name, @temperature)
46
58
  @messages=[]
47
59
  @messages << { role: 'system', content: @sys_msg }
48
60
  @last_response
49
61
  end
62
+
63
+ def safe_send_msg
64
+ Retriable.retriable(RETRY_OPTIONS) do
65
+ raise ConfigurationError, "No LLM selected" if @current_llm.nil?
66
+ @last_response = @current_llm.send_request(@messages, @model_name)
67
+ @messages=[]
68
+ @messages << { role: 'system', content: @sys_msg }
69
+ @last_response
70
+ end
71
+ rescue => e
72
+ return "Failed to call LLM after #{MAX_RETRIES} attempts: #{e.message}"
73
+ end
74
+
75
+ def normalize(x, length)
76
+ if x.length > length
77
+ x = Numo::NArray.cast(x[0..length-1])
78
+ norm = Math.sqrt((x * x).sum)
79
+ return (x / norm).to_a
80
+ else
81
+ return x.concat([0] * (x.length - length))
82
+ end
83
+ end
84
+
85
+ def embeddings(length)
86
+ Retriable.retriable(RETRY_OPTIONS) do
87
+ raise ConfigurationError, "No LLM selected" if @current_llm.nil?
88
+ text = ""
89
+ @messages.each do |msg|
90
+ if msg[:role]=="user"
91
+ text = msg[:content]
92
+ end
93
+ end
94
+ @last_response = @current_llm.embeddings(text, @model_name)
95
+ @messages=[]
96
+ @messages << { role: 'system', content: @sys_msg }
97
+ normalize(@last_response, length)
98
+ end
99
+ end
50
100
  end
51
101
  end
@@ -0,0 +1,45 @@
1
+ require "sequel"
2
+ require "json"
3
+
4
+ module SmartPrompt
5
+ class DBAdapter
6
+ attr_reader :db, :tables
7
+ def initialize(config)
8
+ db_uri = config[:database]
9
+ @db = Sequel.connect(db_uri)
10
+ @tables = {}
11
+ @db.tables.each do |table_name|
12
+ define_table(table_name)
13
+ end
14
+ end
15
+
16
+ def define_table(table_name, class_name=table_name.to_s.capitalize)
17
+ class_define = <<-EOT
18
+ class #{class_name} < Sequel::Model(:#{table_name})
19
+ end
20
+ EOT
21
+ eval(class_define)
22
+ @tables[table_name] = eval(class_name)
23
+ end
24
+
25
+ def get_table_schema(table_name)
26
+ @tables[table_name].db_schema
27
+ end
28
+
29
+ def get_table_schema_str(table_name)
30
+ JSON.pretty_generate(get_table_schema(table_name))
31
+ end
32
+
33
+ def get_db_schema
34
+ schema = {}
35
+ @db.tables.each do |table_name|
36
+ schema[table_name] = get_table_schema(table_name)
37
+ end
38
+ schema
39
+ end
40
+
41
+ def get_db_schema_str
42
+ JSON.pretty_generate(get_db_schema)
43
+ end
44
+ end
45
+ end
@@ -51,6 +51,15 @@ module SmartPrompt
51
51
  require(file)
52
52
  end
53
53
  end
54
+
55
+ def check_worker(worker_name)
56
+ if SmartPrompt::Worker.workers[worker_name]
57
+ return true
58
+ else
59
+ SmartPrompt.logger.warn "Invalid worker: #{worker_name}"
60
+ return false
61
+ end
62
+ end
54
63
 
55
64
  def call_worker(worker_name, params = {})
56
65
  SmartPrompt.logger.info "Calling worker: #{worker_name} with params: #{params}"
@@ -6,7 +6,7 @@ module SmartPrompt
6
6
  super
7
7
  begin
8
8
  @client = Ollama.new(credentials: { address: @config['url'] })
9
- rescue Ollama::Error => e
9
+ rescue Ollama::Errors => e
10
10
  SmartPrompt.logger.error "Failed to initialize Ollama client: #{e.message}"
11
11
  raise LLMAPIError, "Invalid Ollama configuration: #{e.message}"
12
12
  rescue SocketError => e
@@ -36,18 +36,9 @@ module SmartPrompt
36
36
  stream: false
37
37
  }
38
38
  )
39
- rescue Ollama::Error => e
39
+ rescue Ollama::Errors => e
40
40
  SmartPrompt.logger.error "Ollama API error: #{e.message}"
41
- raise LLMAPIError, "Ollama API error: #{e.message}"
42
- rescue Ollama::ConnectionError => e
43
- SmartPrompt.logger.error "Connection error: Unable to reach Ollama API"
44
- raise LLMAPIError, "Connection error: Unable to reach Ollama API"
45
- rescue Ollama::TimeoutError => e
46
- SmartPrompt.logger.error "Request to Ollama API timed out"
47
- raise LLMAPIError, "Request to Ollama API timed out"
48
- rescue Ollama::InvalidRequestError => e
49
- SmartPrompt.logger.error "Invalid request to Ollama API: #{e.message}"
50
- raise LLMAPIError, "Invalid request to Ollama API: #{e.message}"
41
+ raise LLMAPIError, "Ollama API error: #{e.message}"
51
42
  rescue JSON::ParserError => e
52
43
  SmartPrompt.logger.error "Failed to parse Ollama API response"
53
44
  raise LLMAPIError, "Failed to parse Ollama API response"
@@ -58,7 +49,31 @@ module SmartPrompt
58
49
  SmartPrompt.logger.info "Successful send a message"
59
50
  end
60
51
  SmartPrompt.logger.info "OllamaAdapter: Received response from Ollama"
61
- return response[0]["response"]
52
+ return response.dig(0,"response")
53
+ end
54
+
55
+ def embeddings(text, model)
56
+ SmartPrompt.logger.info "OllamaAdapter: get embeddings from Ollama"
57
+ if model
58
+ model_name = model
59
+ else
60
+ model_name = @config['model']
61
+ end
62
+ SmartPrompt.logger.info "OllamaAdapter: Using model #{model_name}"
63
+ begin
64
+ response = @client.embeddings(
65
+ {
66
+ model: model_name,
67
+ prompt: text.to_s
68
+ }
69
+ )
70
+ rescue => e
71
+ SmartPrompt.logger.error "Unexpected error during Ollama request: #{e.message}"
72
+ raise Error, "Unexpected error during Ollama request: #{e.message}"
73
+ ensure
74
+ SmartPrompt.logger.info "Successful send a message"
75
+ end
76
+ return response.dig(0,"embedding")
62
77
  end
63
78
  end
64
79
  end
@@ -17,7 +17,7 @@ module SmartPrompt
17
17
  rescue OpenAI::ConfigurationError => e
18
18
  SmartPrompt.logger.error "Failed to initialize OpenAI client: #{e.message}"
19
19
  raise LLMAPIError, "Invalid OpenAI configuration: #{e.message}"
20
- rescue OpenAI::AuthenticationError => e
20
+ rescue OpenAI::Error => e
21
21
  SmartPrompt.logger.error "Failed to initialize OpenAI client: #{e.message}"
22
22
  raise LLMAPIError, "OpenAI authentication failed: #{e.message}"
23
23
  rescue SocketError => e
@@ -31,7 +31,7 @@ module SmartPrompt
31
31
  end
32
32
  end
33
33
 
34
- def send_request(messages, model=nil)
34
+ def send_request(messages, model=nil, send_request=0.7)
35
35
  SmartPrompt.logger.info "OpenAIAdapter: Sending request to OpenAI"
36
36
  if model
37
37
  model_name = model
@@ -44,27 +44,15 @@ module SmartPrompt
44
44
  parameters: {
45
45
  model: model_name,
46
46
  messages: messages,
47
- temperature: @config['temperature'] || 0.7
47
+ temperature: @config['temperature'] || send_request
48
48
  }
49
49
  )
50
- rescue OpenAI::APIError => e
50
+ rescue OpenAI::Error => e
51
51
  SmartPrompt.logger.error "OpenAI API error: #{e.message}"
52
52
  raise LLMAPIError, "OpenAI API error: #{e.message}"
53
- rescue OpenAI::APIConnectionError => e
54
- SmartPrompt.logger.error "Connection error: Unable to reach OpenAI API"
55
- raise LLMAPIError, "Connection error: Unable to reach OpenAI API"
56
- rescue OpenAI::APITimeoutError => e
57
- SmartPrompt.logger.error "Request to OpenAI API timed out"
58
- raise LLMAPIError, "Request to OpenAI API timed out"
59
- rescue OpenAI::InvalidRequestError => e
60
- SmartPrompt.logger.error "Invalid request to OpenAI API: #{e.message}"
61
- raise LLMAPIError, "Invalid request to OpenAI API: #{e.message}"
62
- rescue OpenAI::AuthenticationError => e
63
- SmartPrompt.logger.error "Authentication error with OpenAI API: #{e.message}"
64
- raise LLMAPIError, "Authentication error with OpenAI API: #{e.message}"
65
- rescue OpenAI::RateLimitError => e
66
- SmartPrompt.logger.error "Rate limit exceeded for OpenAI API"
67
- raise LLMAPIError, "Rate limit exceeded for OpenAI API"
53
+ rescue OpenAI::MiddlewareErrors => e
54
+ SmartPrompt.logger.error "OpenAI HTTP Error: #{e.message}"
55
+ raise LLMAPIError, "OpenAI HTTP Error"
68
56
  rescue JSON::ParserError => e
69
57
  SmartPrompt.logger.error "Failed to parse OpenAI API response"
70
58
  raise LLMAPIError, "Failed to parse OpenAI API response"
@@ -77,5 +65,29 @@ module SmartPrompt
77
65
  SmartPrompt.logger.info "OpenAIAdapter: Received response from OpenAI"
78
66
  response.dig("choices", 0, "message", "content")
79
67
  end
68
+
69
+ def embeddings(text, model)
70
+ SmartPrompt.logger.info "OpenAIAdapter: get embeddings from Ollama"
71
+ if model
72
+ model_name = model
73
+ else
74
+ model_name = @config['model']
75
+ end
76
+ SmartPrompt.logger.info "OpenAIAdapter: Using model #{model_name}"
77
+ begin
78
+ response = @client.embeddings(
79
+ parameters: {
80
+ model: model_name,
81
+ input: text.to_s
82
+ }
83
+ )
84
+ rescue => e
85
+ SmartPrompt.logger.error "Unexpected error during Ollama request: #{e.message}"
86
+ raise Error, "Unexpected error during Ollama request: #{e.message}"
87
+ ensure
88
+ SmartPrompt.logger.info "Successful send a message"
89
+ end
90
+ return response.dig("data", 0, "embedding")
91
+ end
80
92
  end
81
93
  end
@@ -1,3 +1,3 @@
1
1
  module SmartPrompt
2
- VERSION = "0.1.7"
2
+ VERSION = "0.1.9"
3
3
  end
data/lib/smart_prompt.rb CHANGED
@@ -1,5 +1,6 @@
1
1
  require File.expand_path('../smart_prompt/version', __FILE__)
2
2
  require File.expand_path('../smart_prompt/engine', __FILE__)
3
+ require File.expand_path('../smart_prompt/api_handler', __FILE__)
3
4
  require File.expand_path('../smart_prompt/conversation', __FILE__)
4
5
  require File.expand_path('../smart_prompt/llm_adapter', __FILE__)
5
6
  require File.expand_path('../smart_prompt/openai_adapter', __FILE__)
metadata CHANGED
@@ -1,14 +1,13 @@
1
1
  --- !ruby/object:Gem::Specification
2
2
  name: smart_prompt
3
3
  version: !ruby/object:Gem::Version
4
- version: 0.1.7
4
+ version: 0.1.9
5
5
  platform: ruby
6
6
  authors:
7
7
  - zhuang biaowei
8
- autorequire:
9
8
  bindir: exe
10
9
  cert_chain: []
11
- date: 2024-10-17 00:00:00.000000000 Z
10
+ date: 2025-01-14 00:00:00.000000000 Z
12
11
  dependencies:
13
12
  - !ruby/object:Gem::Dependency
14
13
  name: yaml
@@ -39,19 +38,19 @@ dependencies:
39
38
  - !ruby/object:Gem::Version
40
39
  version: 7.1.0
41
40
  - !ruby/object:Gem::Dependency
42
- name: ollama-ai
41
+ name: ollama-ruby
43
42
  requirement: !ruby/object:Gem::Requirement
44
43
  requirements:
45
44
  - - "~>"
46
45
  - !ruby/object:Gem::Version
47
- version: 1.3.0
46
+ version: 0.13.0
48
47
  type: :runtime
49
48
  prerelease: false
50
49
  version_requirements: !ruby/object:Gem::Requirement
51
50
  requirements:
52
51
  - - "~>"
53
52
  - !ruby/object:Gem::Version
54
- version: 1.3.0
53
+ version: 0.13.0
55
54
  - !ruby/object:Gem::Dependency
56
55
  name: json
57
56
  requirement: !ruby/object:Gem::Requirement
@@ -80,6 +79,20 @@ dependencies:
80
79
  - - "~>"
81
80
  - !ruby/object:Gem::Version
82
81
  version: 1.0.4
82
+ - !ruby/object:Gem::Dependency
83
+ name: retriable
84
+ requirement: !ruby/object:Gem::Requirement
85
+ requirements:
86
+ - - "~>"
87
+ - !ruby/object:Gem::Version
88
+ version: 3.1.2
89
+ type: :runtime
90
+ prerelease: false
91
+ version_requirements: !ruby/object:Gem::Requirement
92
+ requirements:
93
+ - - "~>"
94
+ - !ruby/object:Gem::Version
95
+ version: 3.1.2
83
96
  description: SmartPrompt provides a flexible DSL for managing prompts, interacting
84
97
  with multiple LLMs, and creating composable task workers.
85
98
  email:
@@ -93,7 +106,9 @@ files:
93
106
  - README.md
94
107
  - Rakefile
95
108
  - lib/smart_prompt.rb
109
+ - lib/smart_prompt/api_handler.rb
96
110
  - lib/smart_prompt/conversation.rb
111
+ - lib/smart_prompt/db_adapter.rb
97
112
  - lib/smart_prompt/engine.rb
98
113
  - lib/smart_prompt/llamacpp_adapter.rb
99
114
  - lib/smart_prompt/llm_adapter.rb
@@ -110,7 +125,6 @@ metadata:
110
125
  homepage_uri: https://github.com/zhuangbiaowei/smart_prompt
111
126
  source_code_uri: https://github.com/zhuangbiaowei/smart_prompt
112
127
  changelog_uri: https://github.com/zhuangbiaowei/smart_prompt/blob/master/CHANGELOG.md
113
- post_install_message:
114
128
  rdoc_options: []
115
129
  require_paths:
116
130
  - lib
@@ -125,8 +139,7 @@ required_rubygems_version: !ruby/object:Gem::Requirement
125
139
  - !ruby/object:Gem::Version
126
140
  version: '0'
127
141
  requirements: []
128
- rubygems_version: 3.5.22
129
- signing_key:
142
+ rubygems_version: 3.6.2
130
143
  specification_version: 4
131
144
  summary: A smart prompt management and LLM interaction gem
132
145
  test_files: []