smart_prompt 0.1.9 → 0.2.2

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
checksums.yaml CHANGED
@@ -1,7 +1,7 @@
1
1
  ---
2
2
  SHA256:
3
- metadata.gz: dce823cedfccc6a2288e6a37b2a91571d4bc784d26440d11503697c43d5558de
4
- data.tar.gz: b8f4cbb2104d640f423f727afcb38962a7cfa663887ada0d32ec3a61d2ce513c
3
+ metadata.gz: 671745a347fa5443d42ce018a0c00870f5c1c13771060d1facea3ecf6cff41d6
4
+ data.tar.gz: 5a2f1177a1ed5de38daee6422586841c8358c869cd3e9fc71d4b67078a5438e1
5
5
  SHA512:
6
- metadata.gz: 70c6bd8c45f433cbc4a60bd4d3964f694b836178ff09a84873aeaedc62cc9064e5c4d043d45b7f864d95b5f60f3a8eb6571230abb9c45385a8525014417493db
7
- data.tar.gz: 43eca2651ae52dab6e8ca1fff67acbc1099e3adc839c8035111148aae45b3bdcaf64f8b1f5daed8e45c4b2962449eef86c19a16d7e69fe59f874af2e4b66b4d1
6
+ metadata.gz: 9fed26cf6880bf7b9bb2e71c2e4013c66a25b9b542bc88e5727025eef57731d5024dc3a01d548ed3d76f404646835666540dc7ea056477f8abd9cc2b28bddeeb
7
+ data.tar.gz: 1ac2f3ed8fc1b664a623888193d82fe114ef8a8a8f43b6dd8600875132d617af624ccca6d0c6f10814759ae881e51c2351ba6f4bba3b09f89b426381da470fb7
@@ -1,5 +1,5 @@
1
- require 'yaml'
2
- require 'retriable'
1
+ require "yaml"
2
+ require "retriable"
3
3
  require "numo/narray"
4
4
 
5
5
  module SmartPrompt
@@ -7,7 +7,7 @@ module SmartPrompt
7
7
  include APIHandler
8
8
  attr_reader :messages, :last_response, :config_file
9
9
 
10
- def initialize(engine)
10
+ def initialize(engine, tools = nil)
11
11
  SmartPrompt.logger.info "Create Conversation"
12
12
  @messages = []
13
13
  @engine = engine
@@ -16,6 +16,7 @@ module SmartPrompt
16
16
  @templates = engine.templates
17
17
  @current_adapter = engine.current_adapter
18
18
  @last_response = nil
19
+ @tools = tools
19
20
  end
20
21
 
21
22
  def use(llm_name)
@@ -38,43 +39,57 @@ module SmartPrompt
38
39
  SmartPrompt.logger.info "Use template #{template_name}"
39
40
  raise "Template #{template_name} not found" unless @templates.key?(template_name)
40
41
  content = @templates[template_name].render(params)
41
- @messages << { role: 'user', content: content }
42
+ @messages << { role: "user", content: content }
42
43
  self
43
44
  else
44
- @messages << { role: 'user', content: template_name }
45
+ @messages << { role: "user", content: template_name }
45
46
  self
46
47
  end
47
48
  end
48
49
 
49
50
  def sys_msg(message)
50
51
  @sys_msg = message
51
- @messages << { role: 'system', content: message }
52
+ @messages << { role: "system", content: message }
52
53
  self
53
54
  end
54
55
 
55
- def send_msg
56
+ def send_msg_once
56
57
  raise "No LLM selected" if @current_llm.nil?
57
58
  @last_response = @current_llm.send_request(@messages, @model_name, @temperature)
58
- @messages=[]
59
- @messages << { role: 'system', content: @sys_msg }
59
+ @messages = []
60
+ @messages << { role: "system", content: @sys_msg }
60
61
  @last_response
61
62
  end
62
63
 
63
- def safe_send_msg
64
+ def send_msg
64
65
  Retriable.retriable(RETRY_OPTIONS) do
65
66
  raise ConfigurationError, "No LLM selected" if @current_llm.nil?
66
- @last_response = @current_llm.send_request(@messages, @model_name)
67
- @messages=[]
68
- @messages << { role: 'system', content: @sys_msg }
67
+ @last_response = @current_llm.send_request(@messages, @model_name, @temperature, @tools, nil)
68
+ if @last_response == ""
69
+ @last_response = @current_llm.last_response
70
+ end
71
+ @messages = []
72
+ @messages << { role: "system", content: @sys_msg }
69
73
  @last_response
70
74
  end
71
75
  rescue => e
72
76
  return "Failed to call LLM after #{MAX_RETRIES} attempts: #{e.message}"
73
77
  end
74
78
 
79
+ def send_msg_by_stream(&proc)
80
+ Retriable.retriable(RETRY_OPTIONS) do
81
+ raise ConfigurationError, "No LLM selected" if @current_llm.nil?
82
+ @current_llm.send_request(@messages, @model_name, @temperature, @tools, proc)
83
+ @messages = []
84
+ @messages << { role: "system", content: @sys_msg }
85
+ end
86
+ rescue => e
87
+ return "Failed to call LLM after #{MAX_RETRIES} attempts: #{e.message}"
88
+ end
89
+
75
90
  def normalize(x, length)
76
91
  if x.length > length
77
- x = Numo::NArray.cast(x[0..length-1])
92
+ x = Numo::NArray.cast(x[0..length - 1])
78
93
  norm = Math.sqrt((x * x).sum)
79
94
  return (x / norm).to_a
80
95
  else
@@ -87,15 +102,15 @@ module SmartPrompt
87
102
  raise ConfigurationError, "No LLM selected" if @current_llm.nil?
88
103
  text = ""
89
104
  @messages.each do |msg|
90
- if msg[:role]=="user"
105
+ if msg[:role] == "user"
91
106
  text = msg[:content]
92
107
  end
93
108
  end
94
109
  @last_response = @current_llm.embeddings(text, @model_name)
95
- @messages=[]
96
- @messages << { role: 'system', content: @sys_msg }
110
+ @messages = []
111
+ @messages << { role: "system", content: @sys_msg }
97
112
  normalize(@last_response, length)
98
113
  end
99
114
  end
100
115
  end
101
- end
116
+ end
@@ -10,11 +10,18 @@ module SmartPrompt
10
10
  SmartPrompt.logger.info "Started create the SmartPrompt engine."
11
11
  end
12
12
 
13
+ def create_dir(filename)
14
+ path = File::path(filename).to_s
15
+ parent_dir = File::dirname(path)
16
+ Dir.mkdir(parent_dir, 0755) unless File.directory?(parent_dir)
17
+ end
18
+
13
19
  def load_config(config_file)
14
20
  begin
15
21
  @config_file = config_file
16
22
  @config = YAML.load_file(config_file)
17
23
  if @config['logger_file']
24
+ create_dir(@config['logger_file'])
18
25
  SmartPrompt.logger = Logger.new(@config['logger_file'])
19
26
  end
20
27
  SmartPrompt.logger.info "Loading configuration from file: #{config_file}"
@@ -60,11 +67,10 @@ module SmartPrompt
60
67
  return false
61
68
  end
62
69
  end
63
-
70
+
64
71
  def call_worker(worker_name, params = {})
65
72
  SmartPrompt.logger.info "Calling worker: #{worker_name} with params: #{params}"
66
- worker = get_worker(worker_name)
67
-
73
+ worker = get_worker(worker_name)
68
74
  begin
69
75
  result = worker.execute(params)
70
76
  SmartPrompt.logger.info "Worker #{worker_name} executed successfully"
@@ -75,12 +81,23 @@ module SmartPrompt
75
81
  raise
76
82
  end
77
83
  end
78
-
79
- private
80
-
84
+
85
+ def call_worker_by_stream(worker_name, params = {}, &proc)
86
+ SmartPrompt.logger.info "Calling worker: #{worker_name} with params: #{params}"
87
+ worker = get_worker(worker_name)
88
+ begin
89
+ worker.execute_by_stream(params, &proc)
90
+ SmartPrompt.logger.info "Worker #{worker_name} executed(stream) successfully"
91
+ rescue => e
92
+ SmartPrompt.logger.error "Error executing worker #{worker_name}: #{e.message}"
93
+ SmartPrompt.logger.debug e.backtrace.join("\n")
94
+ raise
95
+ end
96
+ end
97
+ private
81
98
  def get_worker(worker_name)
82
99
  SmartPrompt.logger.info "Creating worker instance for: #{worker_name}"
83
100
  Worker.new(worker_name, self)
84
101
  end
85
102
  end
86
- end
103
+ end
@@ -4,6 +4,7 @@ require 'uri'
4
4
 
5
5
  module SmartPrompt
6
6
  class LLMAdapter
7
+ attr_accessor :last_response
7
8
  def initialize(config)
8
9
  SmartPrompt.logger.info "Start create the SmartPrompt LLMAdapter."
9
10
  @config = config
@@ -13,7 +13,7 @@ module SmartPrompt
13
13
  access_token: api_key,
14
14
  uri_base: @config['url'],
15
15
  request_timeout: 240
16
- )
16
+ )
17
17
  rescue OpenAI::ConfigurationError => e
18
18
  SmartPrompt.logger.error "Failed to initialize OpenAI client: #{e.message}"
19
19
  raise LLMAPIError, "Invalid OpenAI configuration: #{e.message}"
@@ -31,22 +31,27 @@ module SmartPrompt
31
31
  end
32
32
  end
33
33
 
34
- def send_request(messages, model=nil, send_request=0.7)
34
+ def send_request(messages, model=nil, temperature=0.7, tools=nil, proc=nil)
35
35
  SmartPrompt.logger.info "OpenAIAdapter: Sending request to OpenAI"
36
36
  if model
37
37
  model_name = model
38
38
  else
39
- model_name = @config['model']
39
+ model_name = @config['model']
40
40
  end
41
41
  SmartPrompt.logger.info "OpenAIAdapter: Using model #{model_name}"
42
42
  begin
43
- response = @client.chat(
44
- parameters: {
45
- model: model_name,
46
- messages: messages,
47
- temperature: @config['temperature'] || send_request
48
- }
49
- )
43
+ parameters = {
44
+ model: model_name,
45
+ messages: messages,
46
+ temperature: @config['temperature'] || temperature
47
+ }
48
+ if proc
49
+ parameters[:stream]=proc
50
+ end
51
+ if tools
52
+ parameters[:tools]=tools
53
+ end
54
+ response = @client.chat(parameters: parameters)
50
55
  rescue OpenAI::Error => e
51
56
  SmartPrompt.logger.error "OpenAI API error: #{e.message}"
52
57
  raise LLMAPIError, "OpenAI API error: #{e.message}"
@@ -63,7 +68,10 @@ module SmartPrompt
63
68
  SmartPrompt.logger.info "Successful send a message"
64
69
  end
65
70
  SmartPrompt.logger.info "OpenAIAdapter: Received response from OpenAI"
66
- response.dig("choices", 0, "message", "content")
71
+ if proc == nil
72
+ @last_response = response
73
+ return response.dig("choices", 0, "message", "content")
74
+ end
67
75
  end
68
76
 
69
77
  def embeddings(text, model)
@@ -1,3 +1,3 @@
1
1
  module SmartPrompt
2
- VERSION = "0.1.9"
2
+ VERSION = "0.2.2"
3
3
  end
@@ -11,11 +11,17 @@ module SmartPrompt
11
11
  end
12
12
 
13
13
  def execute(params = {})
14
- conversation = Conversation.new(@engine)
14
+ conversation = Conversation.new(@engine, params[:tools])
15
15
  context = WorkerContext.new(conversation, params, @engine)
16
16
  context.instance_eval(&@code)
17
17
  end
18
18
 
19
+ def execute_by_stream(params = {}, &proc)
20
+ conversation = Conversation.new(@engine, params[:tools])
21
+ context = WorkerContext.new(conversation, params, @engine, proc)
22
+ context.instance_eval(&@code)
23
+ end
24
+
19
25
  class << self
20
26
  def workers
21
27
  @workers ||= {}
@@ -28,15 +34,24 @@ module SmartPrompt
28
34
  end
29
35
 
30
36
  class WorkerContext
31
- def initialize(conversation, params, engine)
37
+ def initialize(conversation, params, engine, proc=nil)
32
38
  @conversation = conversation
33
39
  @params = params
34
40
  @engine = engine
41
+ @proc = proc
35
42
  end
36
43
 
37
44
  def method_missing(method, *args, &block)
38
45
  if @conversation.respond_to?(method)
39
- @conversation.send(method, *args, &block)
46
+ if method==:send_msg
47
+ if @proc==nil
48
+ @conversation.send_msg
49
+ else
50
+ @conversation.send_msg_by_stream(&@proc)
51
+ end
52
+ else
53
+ @conversation.send(method, *args, &block)
54
+ end
40
55
  else
41
56
  super
42
57
  end
@@ -50,9 +65,18 @@ module SmartPrompt
50
65
  @params
51
66
  end
52
67
 
68
+ def proc
69
+ @proc
70
+ end
71
+
53
72
  def call_worker(worker_name, params = {})
54
73
  worker = Worker.new(worker_name, @engine)
55
74
  worker.execute(params)
56
75
  end
76
+
77
+ def call_worker_by_stream(worker_name, params = {}, proc)
78
+ worker = Worker.new(worker_name, @engine)
79
+ worker.execute_by_stream(params, proc)
80
+ end
57
81
  end
58
82
  end
data/lib/smart_prompt.rb CHANGED
@@ -5,7 +5,6 @@ require File.expand_path('../smart_prompt/conversation', __FILE__)
5
5
  require File.expand_path('../smart_prompt/llm_adapter', __FILE__)
6
6
  require File.expand_path('../smart_prompt/openai_adapter', __FILE__)
7
7
  require File.expand_path('../smart_prompt/llamacpp_adapter', __FILE__)
8
- require File.expand_path('../smart_prompt/ollama_adapter', __FILE__)
9
8
  require File.expand_path('../smart_prompt/prompt_template', __FILE__)
10
9
  require File.expand_path('../smart_prompt/worker', __FILE__)
11
10
 
metadata CHANGED
@@ -1,13 +1,13 @@
1
1
  --- !ruby/object:Gem::Specification
2
2
  name: smart_prompt
3
3
  version: !ruby/object:Gem::Version
4
- version: 0.1.9
4
+ version: 0.2.2
5
5
  platform: ruby
6
6
  authors:
7
7
  - zhuang biaowei
8
8
  bindir: exe
9
9
  cert_chain: []
10
- date: 2025-01-14 00:00:00.000000000 Z
10
+ date: 2025-03-28 00:00:00.000000000 Z
11
11
  dependencies:
12
12
  - !ruby/object:Gem::Dependency
13
13
  name: yaml
@@ -112,7 +112,6 @@ files:
112
112
  - lib/smart_prompt/engine.rb
113
113
  - lib/smart_prompt/llamacpp_adapter.rb
114
114
  - lib/smart_prompt/llm_adapter.rb
115
- - lib/smart_prompt/ollama_adapter.rb
116
115
  - lib/smart_prompt/openai_adapter.rb
117
116
  - lib/smart_prompt/prompt_template.rb
118
117
  - lib/smart_prompt/version.rb
@@ -139,7 +138,7 @@ required_rubygems_version: !ruby/object:Gem::Requirement
139
138
  - !ruby/object:Gem::Version
140
139
  version: '0'
141
140
  requirements: []
142
- rubygems_version: 3.6.2
141
+ rubygems_version: 3.6.4
143
142
  specification_version: 4
144
143
  summary: A smart prompt management and LLM interaction gem
145
144
  test_files: []
@@ -1,79 +0,0 @@
1
- require 'ollama-ai'
2
-
3
- module SmartPrompt
4
- class OllamaAdapter < LLMAdapter
5
- def initialize(config)
6
- super
7
- begin
8
- @client = Ollama.new(credentials: { address: @config['url'] })
9
- rescue Ollama::Errors => e
10
- SmartPrompt.logger.error "Failed to initialize Ollama client: #{e.message}"
11
- raise LLMAPIError, "Invalid Ollama configuration: #{e.message}"
12
- rescue SocketError => e
13
- SmartPrompt.logger.error "Failed to initialize Ollama client: #{e.message}"
14
- raise LLMAPIError, "Network error: Unable to connect to Ollama API"
15
- rescue => e
16
- SmartPrompt.logger.error "Failed to initialize Ollama client: #{e.message}"
17
- raise Error, "Unexpected error initializing Ollama client: #{e.message}"
18
- ensure
19
- SmartPrompt.logger.info "Successful creation an Ollama client."
20
- end
21
- end
22
-
23
- def send_request(messages, model=nil)
24
- SmartPrompt.logger.info "OllamaAdapter: Sending request to Ollama"
25
- if model
26
- model_name = model
27
- else
28
- model_name = @config['model']
29
- end
30
- SmartPrompt.logger.info "OllamaAdapter: Using model #{model_name}"
31
- begin
32
- response = @client.generate(
33
- {
34
- model: model_name,
35
- prompt: messages.to_s,
36
- stream: false
37
- }
38
- )
39
- rescue Ollama::Errors => e
40
- SmartPrompt.logger.error "Ollama API error: #{e.message}"
41
- raise LLMAPIError, "Ollama API error: #{e.message}"
42
- rescue JSON::ParserError => e
43
- SmartPrompt.logger.error "Failed to parse Ollama API response"
44
- raise LLMAPIError, "Failed to parse Ollama API response"
45
- rescue => e
46
- SmartPrompt.logger.error "Unexpected error during Ollama request: #{e.message}"
47
- raise Error, "Unexpected error during Ollama request: #{e.message}"
48
- ensure
49
- SmartPrompt.logger.info "Successful send a message"
50
- end
51
- SmartPrompt.logger.info "OllamaAdapter: Received response from Ollama"
52
- return response.dig(0,"response")
53
- end
54
-
55
- def embeddings(text, model)
56
- SmartPrompt.logger.info "OllamaAdapter: get embeddings from Ollama"
57
- if model
58
- model_name = model
59
- else
60
- model_name = @config['model']
61
- end
62
- SmartPrompt.logger.info "OllamaAdapter: Using model #{model_name}"
63
- begin
64
- response = @client.embeddings(
65
- {
66
- model: model_name,
67
- prompt: text.to_s
68
- }
69
- )
70
- rescue => e
71
- SmartPrompt.logger.error "Unexpected error during Ollama request: #{e.message}"
72
- raise Error, "Unexpected error during Ollama request: #{e.message}"
73
- ensure
74
- SmartPrompt.logger.info "Successful send a message"
75
- end
76
- return response.dig(0,"embedding")
77
- end
78
- end
79
- end