smart_prompt 0.1.8 → 0.2.1

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
checksums.yaml CHANGED
@@ -1,7 +1,7 @@
1
1
  ---
2
2
  SHA256:
3
- metadata.gz: ab0d62158bbd98af9d2f57d9708426cceb95c2d3b2797abc0094de3caf96adf8
4
- data.tar.gz: d2a50c4ede575eb70caeda1893b60412b04afdec665690b8fd2d4d0f3bdd410e
3
+ metadata.gz: 844d04d23e32f35f10feb9da06389b78f81d9f7616316cc04f0e8987e5bd6bae
4
+ data.tar.gz: 364703fc100dfad5a3180deb89fcb49d5aafe9686bc4bbe0f900c99a8d1fee1d
5
5
  SHA512:
6
- metadata.gz: 6d61c7f8db100a715b16a8bd08ef27bf49f3e38127d68cb37a5fc55c3d5ee781e505d8c4acbc6ac98206bd2aeb22bcdd4dc6d749ed65293fecb5abbc361ffac9
7
- data.tar.gz: 7f8ff17971d9979a95400bb1e4497737fab2c503d69a1c415a0003ff514400b55ade390d0f007a146e43569978b98e29f5e308091d74e25cd8b405689b73b136
6
+ metadata.gz: 503ade825af0875584fe49d31fba389b482ff60ae7d8deb0c657d778bc6a0be0a037e65efc95dd66bf88c52ae08508248c851828e2185ad231c0ebd78886e564
7
+ data.tar.gz: 07370b6a99cd6f4146c06a3b4de88d0fd0abcfce144b4e4c333cbdd39383fc601ba1f5fed536a4a05601262d643fa12ff57c9d7cb8cbb46389fa46f96644bbc2
@@ -0,0 +1,17 @@
1
+ module SmartPrompt
2
+ module APIHandler
3
+ MAX_RETRIES = 3
4
+ RETRY_OPTIONS = {
5
+ tries: MAX_RETRIES,
6
+ base_interval: 1,
7
+ max_interval: 10,
8
+ rand_factor: 0.5,
9
+ on: [
10
+ Errno::ECONNRESET,
11
+ Errno::ECONNABORTED,
12
+ Errno::EPIPE,
13
+ Errno::ETIMEDOUT
14
+ ]
15
+ }
16
+ end
17
+ end
@@ -28,6 +28,10 @@ module SmartPrompt
28
28
  @model_name = model_name
29
29
  end
30
30
 
31
+ def temperature(temperature)
32
+ @temperature = temperature
33
+ end
34
+
31
35
  def prompt(template_name, params = {})
32
36
  if template_name.class == Symbol
33
37
  template_name = template_name.to_s
@@ -48,22 +52,33 @@ module SmartPrompt
48
52
  self
49
53
  end
50
54
 
51
- def send_msg
55
+ def send_msg_once
52
56
  raise "No LLM selected" if @current_llm.nil?
53
- @last_response = @current_llm.send_request(@messages, @model_name)
57
+ @last_response = @current_llm.send_request(@messages, @model_name, @temperature)
54
58
  @messages=[]
55
59
  @messages << { role: 'system', content: @sys_msg }
56
60
  @last_response
57
61
  end
58
62
 
59
- def safe_send_msg
63
+ def send_msg
60
64
  Retriable.retriable(RETRY_OPTIONS) do
61
65
  raise ConfigurationError, "No LLM selected" if @current_llm.nil?
62
- @last_response = @current_llm.send_request(@messages, @model_name)
66
+ @last_response = @current_llm.send_request(@messages, @model_name, @temperature)
63
67
  @messages=[]
64
68
  @messages << { role: 'system', content: @sys_msg }
65
69
  @last_response
66
70
  end
71
+ rescue => e
72
+ return "Failed to call LLM after #{MAX_RETRIES} attempts: #{e.message}"
73
+ end
74
+
75
+ def send_msg_by_stream(&proc)
76
+ Retriable.retriable(RETRY_OPTIONS) do
77
+ raise ConfigurationError, "No LLM selected" if @current_llm.nil?
78
+ @current_llm.send_request(@messages, @model_name, @temperature, proc)
79
+ @messages=[]
80
+ @messages << { role: 'system', content: @sys_msg }
81
+ end
67
82
  rescue => e
68
83
  return "Failed to call LLM after #{MAX_RETRIES} attempts: #{e.message}"
69
84
  end
@@ -0,0 +1,45 @@
1
+ require "sequel"
2
+ require "json"
3
+
4
+ module SmartPrompt
5
+ class DBAdapter
6
+ attr_reader :db, :tables
7
+ def initialize(config)
8
+ db_uri = config[:database]
9
+ @db = Sequel.connect(db_uri)
10
+ @tables = {}
11
+ @db.tables.each do |table_name|
12
+ define_table(table_name)
13
+ end
14
+ end
15
+
16
+ def define_table(table_name, class_name=table_name.to_s.capitalize)
17
+ class_define = <<-EOT
18
+ class #{class_name} < Sequel::Model(:#{table_name})
19
+ end
20
+ EOT
21
+ eval(class_define)
22
+ @tables[table_name] = eval(class_name)
23
+ end
24
+
25
+ def get_table_schema(table_name)
26
+ @tables[table_name].db_schema
27
+ end
28
+
29
+ def get_table_schema_str(table_name)
30
+ JSON.pretty_generate(get_table_schema(table_name))
31
+ end
32
+
33
+ def get_db_schema
34
+ schema = {}
35
+ @db.tables.each do |table_name|
36
+ schema[table_name] = get_table_schema(table_name)
37
+ end
38
+ schema
39
+ end
40
+
41
+ def get_db_schema_str
42
+ JSON.pretty_generate(get_db_schema)
43
+ end
44
+ end
45
+ end
@@ -10,11 +10,18 @@ module SmartPrompt
10
10
  SmartPrompt.logger.info "Started create the SmartPrompt engine."
11
11
  end
12
12
 
13
+ def create_dir(filename)
14
+ path = File::path(filename).to_s
15
+ parent_dir = File::dirname(path)
16
+ Dir.mkdir(parent_dir, 0755) unless File.directory?(parent_dir)
17
+ end
18
+
13
19
  def load_config(config_file)
14
20
  begin
15
21
  @config_file = config_file
16
22
  @config = YAML.load_file(config_file)
17
23
  if @config['logger_file']
24
+ create_dir(@config['logger_file'])
18
25
  SmartPrompt.logger = Logger.new(@config['logger_file'])
19
26
  end
20
27
  SmartPrompt.logger.info "Loading configuration from file: #{config_file}"
@@ -60,11 +67,10 @@ module SmartPrompt
60
67
  return false
61
68
  end
62
69
  end
63
-
70
+
64
71
  def call_worker(worker_name, params = {})
65
72
  SmartPrompt.logger.info "Calling worker: #{worker_name} with params: #{params}"
66
- worker = get_worker(worker_name)
67
-
73
+ worker = get_worker(worker_name)
68
74
  begin
69
75
  result = worker.execute(params)
70
76
  SmartPrompt.logger.info "Worker #{worker_name} executed successfully"
@@ -75,12 +81,23 @@ module SmartPrompt
75
81
  raise
76
82
  end
77
83
  end
78
-
79
- private
80
-
84
+
85
+ def call_worker_by_stream(worker_name, params = {}, &proc)
86
+ SmartPrompt.logger.info "Calling worker: #{worker_name} with params: #{params}"
87
+ worker = get_worker(worker_name)
88
+ begin
89
+ worker.execute_by_stream(params, &proc)
90
+ SmartPrompt.logger.info "Worker #{worker_name} executed(stream) successfully"
91
+ rescue => e
92
+ SmartPrompt.logger.error "Error executing worker #{worker_name}: #{e.message}"
93
+ SmartPrompt.logger.debug e.backtrace.join("\n")
94
+ raise
95
+ end
96
+ end
97
+ private
81
98
  def get_worker(worker_name)
82
99
  SmartPrompt.logger.info "Creating worker instance for: #{worker_name}"
83
100
  Worker.new(worker_name, self)
84
101
  end
85
102
  end
86
- end
103
+ end
@@ -13,7 +13,7 @@ module SmartPrompt
13
13
  access_token: api_key,
14
14
  uri_base: @config['url'],
15
15
  request_timeout: 240
16
- )
16
+ )
17
17
  rescue OpenAI::ConfigurationError => e
18
18
  SmartPrompt.logger.error "Failed to initialize OpenAI client: #{e.message}"
19
19
  raise LLMAPIError, "Invalid OpenAI configuration: #{e.message}"
@@ -31,22 +31,33 @@ module SmartPrompt
31
31
  end
32
32
  end
33
33
 
34
- def send_request(messages, model=nil)
34
+ def send_request(messages, model=nil, temperature=0.7, proc)
35
35
  SmartPrompt.logger.info "OpenAIAdapter: Sending request to OpenAI"
36
36
  if model
37
37
  model_name = model
38
38
  else
39
- model_name = @config['model']
39
+ model_name = @config['model']
40
40
  end
41
41
  SmartPrompt.logger.info "OpenAIAdapter: Using model #{model_name}"
42
42
  begin
43
- response = @client.chat(
44
- parameters: {
45
- model: model_name,
46
- messages: messages,
47
- temperature: @config['temperature'] || 0.7
48
- }
49
- )
43
+ if proc == nil
44
+ response = @client.chat(
45
+ parameters: {
46
+ model: model_name,
47
+ messages: messages,
48
+ temperature: @config['temperature'] || temperature
49
+ }
50
+ )
51
+ else
52
+ @client.chat(
53
+ parameters: {
54
+ model: model_name,
55
+ messages: messages,
56
+ temperature: @config['temperature'] || temperature,
57
+ stream: proc
58
+ }
59
+ )
60
+ end
50
61
  rescue OpenAI::Error => e
51
62
  SmartPrompt.logger.error "OpenAI API error: #{e.message}"
52
63
  raise LLMAPIError, "OpenAI API error: #{e.message}"
@@ -63,7 +74,9 @@ module SmartPrompt
63
74
  SmartPrompt.logger.info "Successful send a message"
64
75
  end
65
76
  SmartPrompt.logger.info "OpenAIAdapter: Received response from OpenAI"
66
- response.dig("choices", 0, "message", "content")
77
+ if proc == nil
78
+ return response.dig("choices", 0, "message", "content")
79
+ end
67
80
  end
68
81
 
69
82
  def embeddings(text, model)
@@ -1,3 +1,3 @@
1
1
  module SmartPrompt
2
- VERSION = "0.1.8"
2
+ VERSION = "0.2.1"
3
3
  end
@@ -16,6 +16,12 @@ module SmartPrompt
16
16
  context.instance_eval(&@code)
17
17
  end
18
18
 
19
+ def execute_by_stream(params = {}, &proc)
20
+ conversation = Conversation.new(@engine)
21
+ context = WorkerContext.new(conversation, params, @engine, proc)
22
+ context.instance_eval(&@code)
23
+ end
24
+
19
25
  class << self
20
26
  def workers
21
27
  @workers ||= {}
@@ -28,15 +34,24 @@ module SmartPrompt
28
34
  end
29
35
 
30
36
  class WorkerContext
31
- def initialize(conversation, params, engine)
37
+ def initialize(conversation, params, engine, proc=nil)
32
38
  @conversation = conversation
33
39
  @params = params
34
40
  @engine = engine
41
+ @proc = proc
35
42
  end
36
43
 
37
44
  def method_missing(method, *args, &block)
38
45
  if @conversation.respond_to?(method)
39
- @conversation.send(method, *args, &block)
46
+ if method==:send_msg
47
+ if @proc==nil
48
+ @conversation.send_msg
49
+ else
50
+ @conversation.send_msg_by_stream(&@proc)
51
+ end
52
+ else
53
+ @conversation.send(method, *args, &block)
54
+ end
40
55
  else
41
56
  super
42
57
  end
@@ -50,9 +65,18 @@ module SmartPrompt
50
65
  @params
51
66
  end
52
67
 
68
+ def proc
69
+ @proc
70
+ end
71
+
53
72
  def call_worker(worker_name, params = {})
54
73
  worker = Worker.new(worker_name, @engine)
55
74
  worker.execute(params)
56
75
  end
76
+
77
+ def call_worker_by_stream(worker_name, params = {}, proc)
78
+ worker = Worker.new(worker_name, @engine)
79
+ worker.execute_by_stream(params, proc)
80
+ end
57
81
  end
58
82
  end
data/lib/smart_prompt.rb CHANGED
@@ -5,7 +5,6 @@ require File.expand_path('../smart_prompt/conversation', __FILE__)
5
5
  require File.expand_path('../smart_prompt/llm_adapter', __FILE__)
6
6
  require File.expand_path('../smart_prompt/openai_adapter', __FILE__)
7
7
  require File.expand_path('../smart_prompt/llamacpp_adapter', __FILE__)
8
- require File.expand_path('../smart_prompt/ollama_adapter', __FILE__)
9
8
  require File.expand_path('../smart_prompt/prompt_template', __FILE__)
10
9
  require File.expand_path('../smart_prompt/worker', __FILE__)
11
10
 
metadata CHANGED
@@ -1,14 +1,13 @@
1
1
  --- !ruby/object:Gem::Specification
2
2
  name: smart_prompt
3
3
  version: !ruby/object:Gem::Version
4
- version: 0.1.8
4
+ version: 0.2.1
5
5
  platform: ruby
6
6
  authors:
7
7
  - zhuang biaowei
8
- autorequire:
9
8
  bindir: exe
10
9
  cert_chain: []
11
- date: 2024-11-28 00:00:00.000000000 Z
10
+ date: 2025-03-10 00:00:00.000000000 Z
12
11
  dependencies:
13
12
  - !ruby/object:Gem::Dependency
14
13
  name: yaml
@@ -39,19 +38,19 @@ dependencies:
39
38
  - !ruby/object:Gem::Version
40
39
  version: 7.1.0
41
40
  - !ruby/object:Gem::Dependency
42
- name: ollama-ai
41
+ name: ollama-ruby
43
42
  requirement: !ruby/object:Gem::Requirement
44
43
  requirements:
45
44
  - - "~>"
46
45
  - !ruby/object:Gem::Version
47
- version: 1.3.0
46
+ version: 0.13.0
48
47
  type: :runtime
49
48
  prerelease: false
50
49
  version_requirements: !ruby/object:Gem::Requirement
51
50
  requirements:
52
51
  - - "~>"
53
52
  - !ruby/object:Gem::Version
54
- version: 1.3.0
53
+ version: 0.13.0
55
54
  - !ruby/object:Gem::Dependency
56
55
  name: json
57
56
  requirement: !ruby/object:Gem::Requirement
@@ -107,11 +106,12 @@ files:
107
106
  - README.md
108
107
  - Rakefile
109
108
  - lib/smart_prompt.rb
109
+ - lib/smart_prompt/api_handler.rb
110
110
  - lib/smart_prompt/conversation.rb
111
+ - lib/smart_prompt/db_adapter.rb
111
112
  - lib/smart_prompt/engine.rb
112
113
  - lib/smart_prompt/llamacpp_adapter.rb
113
114
  - lib/smart_prompt/llm_adapter.rb
114
- - lib/smart_prompt/ollama_adapter.rb
115
115
  - lib/smart_prompt/openai_adapter.rb
116
116
  - lib/smart_prompt/prompt_template.rb
117
117
  - lib/smart_prompt/version.rb
@@ -124,7 +124,6 @@ metadata:
124
124
  homepage_uri: https://github.com/zhuangbiaowei/smart_prompt
125
125
  source_code_uri: https://github.com/zhuangbiaowei/smart_prompt
126
126
  changelog_uri: https://github.com/zhuangbiaowei/smart_prompt/blob/master/CHANGELOG.md
127
- post_install_message:
128
127
  rdoc_options: []
129
128
  require_paths:
130
129
  - lib
@@ -139,8 +138,7 @@ required_rubygems_version: !ruby/object:Gem::Requirement
139
138
  - !ruby/object:Gem::Version
140
139
  version: '0'
141
140
  requirements: []
142
- rubygems_version: 3.5.23
143
- signing_key:
141
+ rubygems_version: 3.6.4
144
142
  specification_version: 4
145
143
  summary: A smart prompt management and LLM interaction gem
146
144
  test_files: []
@@ -1,79 +0,0 @@
1
- require 'ollama-ai'
2
-
3
- module SmartPrompt
4
- class OllamaAdapter < LLMAdapter
5
- def initialize(config)
6
- super
7
- begin
8
- @client = Ollama.new(credentials: { address: @config['url'] })
9
- rescue Ollama::Errors => e
10
- SmartPrompt.logger.error "Failed to initialize Ollama client: #{e.message}"
11
- raise LLMAPIError, "Invalid Ollama configuration: #{e.message}"
12
- rescue SocketError => e
13
- SmartPrompt.logger.error "Failed to initialize Ollama client: #{e.message}"
14
- raise LLMAPIError, "Network error: Unable to connect to Ollama API"
15
- rescue => e
16
- SmartPrompt.logger.error "Failed to initialize Ollama client: #{e.message}"
17
- raise Error, "Unexpected error initializing Ollama client: #{e.message}"
18
- ensure
19
- SmartPrompt.logger.info "Successful creation an Ollama client."
20
- end
21
- end
22
-
23
- def send_request(messages, model=nil)
24
- SmartPrompt.logger.info "OllamaAdapter: Sending request to Ollama"
25
- if model
26
- model_name = model
27
- else
28
- model_name = @config['model']
29
- end
30
- SmartPrompt.logger.info "OllamaAdapter: Using model #{model_name}"
31
- begin
32
- response = @client.generate(
33
- {
34
- model: model_name,
35
- prompt: messages.to_s,
36
- stream: false
37
- }
38
- )
39
- rescue Ollama::Errors => e
40
- SmartPrompt.logger.error "Ollama API error: #{e.message}"
41
- raise LLMAPIError, "Ollama API error: #{e.message}"
42
- rescue JSON::ParserError => e
43
- SmartPrompt.logger.error "Failed to parse Ollama API response"
44
- raise LLMAPIError, "Failed to parse Ollama API response"
45
- rescue => e
46
- SmartPrompt.logger.error "Unexpected error during Ollama request: #{e.message}"
47
- raise Error, "Unexpected error during Ollama request: #{e.message}"
48
- ensure
49
- SmartPrompt.logger.info "Successful send a message"
50
- end
51
- SmartPrompt.logger.info "OllamaAdapter: Received response from Ollama"
52
- return response.dig(0,"response")
53
- end
54
-
55
- def embeddings(text, model)
56
- SmartPrompt.logger.info "OllamaAdapter: get embeddings from Ollama"
57
- if model
58
- model_name = model
59
- else
60
- model_name = @config['model']
61
- end
62
- SmartPrompt.logger.info "OllamaAdapter: Using model #{model_name}"
63
- begin
64
- response = @client.embeddings(
65
- {
66
- model: model_name,
67
- prompt: text.to_s
68
- }
69
- )
70
- rescue => e
71
- SmartPrompt.logger.error "Unexpected error during Ollama request: #{e.message}"
72
- raise Error, "Unexpected error during Ollama request: #{e.message}"
73
- ensure
74
- SmartPrompt.logger.info "Successful send a message"
75
- end
76
- return response.dig(0,"embedding")
77
- end
78
- end
79
- end