smart_prompt 0.1.9 → 0.2.1

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
checksums.yaml CHANGED
@@ -1,7 +1,7 @@
1
1
  ---
2
2
  SHA256:
3
- metadata.gz: dce823cedfccc6a2288e6a37b2a91571d4bc784d26440d11503697c43d5558de
4
- data.tar.gz: b8f4cbb2104d640f423f727afcb38962a7cfa663887ada0d32ec3a61d2ce513c
3
+ metadata.gz: 844d04d23e32f35f10feb9da06389b78f81d9f7616316cc04f0e8987e5bd6bae
4
+ data.tar.gz: 364703fc100dfad5a3180deb89fcb49d5aafe9686bc4bbe0f900c99a8d1fee1d
5
5
  SHA512:
6
- metadata.gz: 70c6bd8c45f433cbc4a60bd4d3964f694b836178ff09a84873aeaedc62cc9064e5c4d043d45b7f864d95b5f60f3a8eb6571230abb9c45385a8525014417493db
7
- data.tar.gz: 43eca2651ae52dab6e8ca1fff67acbc1099e3adc839c8035111148aae45b3bdcaf64f8b1f5daed8e45c4b2962449eef86c19a16d7e69fe59f874af2e4b66b4d1
6
+ metadata.gz: 503ade825af0875584fe49d31fba389b482ff60ae7d8deb0c657d778bc6a0be0a037e65efc95dd66bf88c52ae08508248c851828e2185ad231c0ebd78886e564
7
+ data.tar.gz: 07370b6a99cd6f4146c06a3b4de88d0fd0abcfce144b4e4c333cbdd39383fc601ba1f5fed536a4a05601262d643fa12ff57c9d7cb8cbb46389fa46f96644bbc2
@@ -52,7 +52,7 @@ module SmartPrompt
52
52
  self
53
53
  end
54
54
 
55
- def send_msg
55
+ def send_msg_once
56
56
  raise "No LLM selected" if @current_llm.nil?
57
57
  @last_response = @current_llm.send_request(@messages, @model_name, @temperature)
58
58
  @messages=[]
@@ -60,14 +60,25 @@ module SmartPrompt
60
60
  @last_response
61
61
  end
62
62
 
63
- def safe_send_msg
63
+ def send_msg
64
64
  Retriable.retriable(RETRY_OPTIONS) do
65
65
  raise ConfigurationError, "No LLM selected" if @current_llm.nil?
66
- @last_response = @current_llm.send_request(@messages, @model_name)
66
+ @last_response = @current_llm.send_request(@messages, @model_name, @temperature)
67
67
  @messages=[]
68
68
  @messages << { role: 'system', content: @sys_msg }
69
69
  @last_response
70
70
  end
71
+ rescue => e
72
+ return "Failed to call LLM after #{MAX_RETRIES} attempts: #{e.message}"
73
+ end
74
+
75
+ def send_msg_by_stream(&proc)
76
+ Retriable.retriable(RETRY_OPTIONS) do
77
+ raise ConfigurationError, "No LLM selected" if @current_llm.nil?
78
+ @current_llm.send_request(@messages, @model_name, @temperature, proc)
79
+ @messages=[]
80
+ @messages << { role: 'system', content: @sys_msg }
81
+ end
71
82
  rescue => e
72
83
  return "Failed to call LLM after #{MAX_RETRIES} attempts: #{e.message}"
73
84
  end
@@ -10,11 +10,18 @@ module SmartPrompt
10
10
  SmartPrompt.logger.info "Started create the SmartPrompt engine."
11
11
  end
12
12
 
13
+ def create_dir(filename)
14
+ path = File::path(filename).to_s
15
+ parent_dir = File::dirname(path)
16
+ Dir.mkdir(parent_dir, 0755) unless File.directory?(parent_dir)
17
+ end
18
+
13
19
  def load_config(config_file)
14
20
  begin
15
21
  @config_file = config_file
16
22
  @config = YAML.load_file(config_file)
17
23
  if @config['logger_file']
24
+ create_dir(@config['logger_file'])
18
25
  SmartPrompt.logger = Logger.new(@config['logger_file'])
19
26
  end
20
27
  SmartPrompt.logger.info "Loading configuration from file: #{config_file}"
@@ -60,11 +67,10 @@ module SmartPrompt
60
67
  return false
61
68
  end
62
69
  end
63
-
70
+
64
71
  def call_worker(worker_name, params = {})
65
72
  SmartPrompt.logger.info "Calling worker: #{worker_name} with params: #{params}"
66
- worker = get_worker(worker_name)
67
-
73
+ worker = get_worker(worker_name)
68
74
  begin
69
75
  result = worker.execute(params)
70
76
  SmartPrompt.logger.info "Worker #{worker_name} executed successfully"
@@ -75,12 +81,23 @@ module SmartPrompt
75
81
  raise
76
82
  end
77
83
  end
78
-
79
- private
80
-
84
+
85
+ def call_worker_by_stream(worker_name, params = {}, &proc)
86
+ SmartPrompt.logger.info "Calling worker: #{worker_name} with params: #{params}"
87
+ worker = get_worker(worker_name)
88
+ begin
89
+ worker.execute_by_stream(params, &proc)
90
+ SmartPrompt.logger.info "Worker #{worker_name} executed(stream) successfully"
91
+ rescue => e
92
+ SmartPrompt.logger.error "Error executing worker #{worker_name}: #{e.message}"
93
+ SmartPrompt.logger.debug e.backtrace.join("\n")
94
+ raise
95
+ end
96
+ end
97
+ private
81
98
  def get_worker(worker_name)
82
99
  SmartPrompt.logger.info "Creating worker instance for: #{worker_name}"
83
100
  Worker.new(worker_name, self)
84
101
  end
85
102
  end
86
- end
103
+ end
@@ -13,7 +13,7 @@ module SmartPrompt
13
13
  access_token: api_key,
14
14
  uri_base: @config['url'],
15
15
  request_timeout: 240
16
- )
16
+ )
17
17
  rescue OpenAI::ConfigurationError => e
18
18
  SmartPrompt.logger.error "Failed to initialize OpenAI client: #{e.message}"
19
19
  raise LLMAPIError, "Invalid OpenAI configuration: #{e.message}"
@@ -31,22 +31,33 @@ module SmartPrompt
31
31
  end
32
32
  end
33
33
 
34
- def send_request(messages, model=nil, send_request=0.7)
34
+ def send_request(messages, model=nil, temperature=0.7, proc)
35
35
  SmartPrompt.logger.info "OpenAIAdapter: Sending request to OpenAI"
36
36
  if model
37
37
  model_name = model
38
38
  else
39
- model_name = @config['model']
39
+ model_name = @config['model']
40
40
  end
41
41
  SmartPrompt.logger.info "OpenAIAdapter: Using model #{model_name}"
42
42
  begin
43
- response = @client.chat(
44
- parameters: {
45
- model: model_name,
46
- messages: messages,
47
- temperature: @config['temperature'] || send_request
48
- }
49
- )
43
+ if proc == nil
44
+ response = @client.chat(
45
+ parameters: {
46
+ model: model_name,
47
+ messages: messages,
48
+ temperature: @config['temperature'] || temperature
49
+ }
50
+ )
51
+ else
52
+ @client.chat(
53
+ parameters: {
54
+ model: model_name,
55
+ messages: messages,
56
+ temperature: @config['temperature'] || temperature,
57
+ stream: proc
58
+ }
59
+ )
60
+ end
50
61
  rescue OpenAI::Error => e
51
62
  SmartPrompt.logger.error "OpenAI API error: #{e.message}"
52
63
  raise LLMAPIError, "OpenAI API error: #{e.message}"
@@ -63,7 +74,9 @@ module SmartPrompt
63
74
  SmartPrompt.logger.info "Successful send a message"
64
75
  end
65
76
  SmartPrompt.logger.info "OpenAIAdapter: Received response from OpenAI"
66
- response.dig("choices", 0, "message", "content")
77
+ if proc == nil
78
+ return response.dig("choices", 0, "message", "content")
79
+ end
67
80
  end
68
81
 
69
82
  def embeddings(text, model)
@@ -1,3 +1,3 @@
1
1
  module SmartPrompt
2
- VERSION = "0.1.9"
2
+ VERSION = "0.2.1"
3
3
  end
@@ -16,6 +16,12 @@ module SmartPrompt
16
16
  context.instance_eval(&@code)
17
17
  end
18
18
 
19
+ def execute_by_stream(params = {}, &proc)
20
+ conversation = Conversation.new(@engine)
21
+ context = WorkerContext.new(conversation, params, @engine, proc)
22
+ context.instance_eval(&@code)
23
+ end
24
+
19
25
  class << self
20
26
  def workers
21
27
  @workers ||= {}
@@ -28,15 +34,24 @@ module SmartPrompt
28
34
  end
29
35
 
30
36
  class WorkerContext
31
- def initialize(conversation, params, engine)
37
+ def initialize(conversation, params, engine, proc=nil)
32
38
  @conversation = conversation
33
39
  @params = params
34
40
  @engine = engine
41
+ @proc = proc
35
42
  end
36
43
 
37
44
  def method_missing(method, *args, &block)
38
45
  if @conversation.respond_to?(method)
39
- @conversation.send(method, *args, &block)
46
+ if method==:send_msg
47
+ if @proc==nil
48
+ @conversation.send_msg
49
+ else
50
+ @conversation.send_msg_by_stream(&@proc)
51
+ end
52
+ else
53
+ @conversation.send(method, *args, &block)
54
+ end
40
55
  else
41
56
  super
42
57
  end
@@ -50,9 +65,18 @@ module SmartPrompt
50
65
  @params
51
66
  end
52
67
 
68
+ def proc
69
+ @proc
70
+ end
71
+
53
72
  def call_worker(worker_name, params = {})
54
73
  worker = Worker.new(worker_name, @engine)
55
74
  worker.execute(params)
56
75
  end
76
+
77
+ def call_worker_by_stream(worker_name, params = {}, proc)
78
+ worker = Worker.new(worker_name, @engine)
79
+ worker.execute_by_stream(params, proc)
80
+ end
57
81
  end
58
82
  end
data/lib/smart_prompt.rb CHANGED
@@ -5,7 +5,6 @@ require File.expand_path('../smart_prompt/conversation', __FILE__)
5
5
  require File.expand_path('../smart_prompt/llm_adapter', __FILE__)
6
6
  require File.expand_path('../smart_prompt/openai_adapter', __FILE__)
7
7
  require File.expand_path('../smart_prompt/llamacpp_adapter', __FILE__)
8
- require File.expand_path('../smart_prompt/ollama_adapter', __FILE__)
9
8
  require File.expand_path('../smart_prompt/prompt_template', __FILE__)
10
9
  require File.expand_path('../smart_prompt/worker', __FILE__)
11
10
 
metadata CHANGED
@@ -1,13 +1,13 @@
1
1
  --- !ruby/object:Gem::Specification
2
2
  name: smart_prompt
3
3
  version: !ruby/object:Gem::Version
4
- version: 0.1.9
4
+ version: 0.2.1
5
5
  platform: ruby
6
6
  authors:
7
7
  - zhuang biaowei
8
8
  bindir: exe
9
9
  cert_chain: []
10
- date: 2025-01-14 00:00:00.000000000 Z
10
+ date: 2025-03-10 00:00:00.000000000 Z
11
11
  dependencies:
12
12
  - !ruby/object:Gem::Dependency
13
13
  name: yaml
@@ -112,7 +112,6 @@ files:
112
112
  - lib/smart_prompt/engine.rb
113
113
  - lib/smart_prompt/llamacpp_adapter.rb
114
114
  - lib/smart_prompt/llm_adapter.rb
115
- - lib/smart_prompt/ollama_adapter.rb
116
115
  - lib/smart_prompt/openai_adapter.rb
117
116
  - lib/smart_prompt/prompt_template.rb
118
117
  - lib/smart_prompt/version.rb
@@ -139,7 +138,7 @@ required_rubygems_version: !ruby/object:Gem::Requirement
139
138
  - !ruby/object:Gem::Version
140
139
  version: '0'
141
140
  requirements: []
142
- rubygems_version: 3.6.2
141
+ rubygems_version: 3.6.4
143
142
  specification_version: 4
144
143
  summary: A smart prompt management and LLM interaction gem
145
144
  test_files: []
@@ -1,79 +0,0 @@
1
- require 'ollama-ai'
2
-
3
- module SmartPrompt
4
- class OllamaAdapter < LLMAdapter
5
- def initialize(config)
6
- super
7
- begin
8
- @client = Ollama.new(credentials: { address: @config['url'] })
9
- rescue Ollama::Errors => e
10
- SmartPrompt.logger.error "Failed to initialize Ollama client: #{e.message}"
11
- raise LLMAPIError, "Invalid Ollama configuration: #{e.message}"
12
- rescue SocketError => e
13
- SmartPrompt.logger.error "Failed to initialize Ollama client: #{e.message}"
14
- raise LLMAPIError, "Network error: Unable to connect to Ollama API"
15
- rescue => e
16
- SmartPrompt.logger.error "Failed to initialize Ollama client: #{e.message}"
17
- raise Error, "Unexpected error initializing Ollama client: #{e.message}"
18
- ensure
19
- SmartPrompt.logger.info "Successful creation an Ollama client."
20
- end
21
- end
22
-
23
- def send_request(messages, model=nil)
24
- SmartPrompt.logger.info "OllamaAdapter: Sending request to Ollama"
25
- if model
26
- model_name = model
27
- else
28
- model_name = @config['model']
29
- end
30
- SmartPrompt.logger.info "OllamaAdapter: Using model #{model_name}"
31
- begin
32
- response = @client.generate(
33
- {
34
- model: model_name,
35
- prompt: messages.to_s,
36
- stream: false
37
- }
38
- )
39
- rescue Ollama::Errors => e
40
- SmartPrompt.logger.error "Ollama API error: #{e.message}"
41
- raise LLMAPIError, "Ollama API error: #{e.message}"
42
- rescue JSON::ParserError => e
43
- SmartPrompt.logger.error "Failed to parse Ollama API response"
44
- raise LLMAPIError, "Failed to parse Ollama API response"
45
- rescue => e
46
- SmartPrompt.logger.error "Unexpected error during Ollama request: #{e.message}"
47
- raise Error, "Unexpected error during Ollama request: #{e.message}"
48
- ensure
49
- SmartPrompt.logger.info "Successful send a message"
50
- end
51
- SmartPrompt.logger.info "OllamaAdapter: Received response from Ollama"
52
- return response.dig(0,"response")
53
- end
54
-
55
- def embeddings(text, model)
56
- SmartPrompt.logger.info "OllamaAdapter: get embeddings from Ollama"
57
- if model
58
- model_name = model
59
- else
60
- model_name = @config['model']
61
- end
62
- SmartPrompt.logger.info "OllamaAdapter: Using model #{model_name}"
63
- begin
64
- response = @client.embeddings(
65
- {
66
- model: model_name,
67
- prompt: text.to_s
68
- }
69
- )
70
- rescue => e
71
- SmartPrompt.logger.error "Unexpected error during Ollama request: #{e.message}"
72
- raise Error, "Unexpected error during Ollama request: #{e.message}"
73
- ensure
74
- SmartPrompt.logger.info "Successful send a message"
75
- end
76
- return response.dig(0,"embedding")
77
- end
78
- end
79
- end