smart_prompt 0.1.5 → 0.1.7

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
checksums.yaml CHANGED
@@ -1,7 +1,7 @@
1
1
  ---
2
2
  SHA256:
3
- metadata.gz: 844c87d47dccd945bedcab69c58377ae144238ac9087b49219041382db539146
4
- data.tar.gz: 3e70f88fbe8d26a0a15408e73ae9060b9f711a18e6b44eb561910b90156ca14b
3
+ metadata.gz: 3bf70c6e843930ff7e1a03c9dd8bfc97b3d097786f84613aab65863e1e43a624
4
+ data.tar.gz: e9ee9d99378453d90e2ac25b88248f3226f241074c7f80e8bf35104279f05202
5
5
  SHA512:
6
- metadata.gz: 398854ce070f96f794ae944cc656a10d2f4752268cb35b4f9a857fcd8a722489b3ee112de97e5cdfe1cc1efb75cf92e4cd4a9b36cb338cbd8de6e22f1d6cc7dc
7
- data.tar.gz: babac8b7d0479eb376df22b00cd693e6e657293e6b28b654aaa6b2ef4f9fcfdccb9709d6f1dd0b066cb6f22c07eed0173426816121061a80c91fc9afdd62ba06
6
+ metadata.gz: 02fbfcdb0ae2292994404219eaa2f1218d54de132ceab061ca4466603186cdc9db5826b7e6e1fcfaa8a37f2527f42c3f863154dfd4cfe94b5b7df58969183285
7
+ data.tar.gz: b0034a25a0fa4bc6c197beee4635528c3b4887f36a4c8981ebb9894b7c5db61a245a0ff823f6ce171d629ddc65a61a960a172d4235d319d7220050225b7a8a48
data/README.cn.md CHANGED
@@ -24,90 +24,4 @@ $ gem install smart_prompt
24
24
 
25
25
  ## 用法
26
26
 
27
- 以下是一些基本用法示例:
28
-
29
- ### 配置文件
30
-
31
- ```
32
- adapters:
33
- openai: OpenAIAdapter
34
- ollama: OllamaAdapter
35
- llms:
36
- siliconflow:
37
- adapter: openai
38
- url: https://api.siliconflow.cn/v1/
39
- api_key: ENV["APIKey"]
40
- default_model: Qwen/Qwen2.5-7B-Instruct
41
- llamacpp:
42
- adapter: openai
43
- url: http://localhost:8080/
44
- ollama:
45
- adapter: ollama
46
- url: http://localhost:11434/
47
- default_model: qwen2.5
48
- default_llm: siliconflow
49
- worker_path: "./workers"
50
- template_path: "./templates"
51
- ```
52
-
53
- ### 基本使用
54
-
55
- ```
56
- require 'smart_prompt'
57
- engine = SmartPrompt::Engine.new('./config/llm_config.yml')
58
- result = engine.call_worker(:daily_report, {location: "Shanghai"})
59
- puts result
60
- ```
61
-
62
- ### workers/daily_report.rb
63
-
64
- ```
65
- SmartPrompt.define_worker :daily_report do
66
- use "ollama"
67
- model "gemma2"
68
- system "You are a helpful report writer."
69
- weather = call_worker(:weather_summary, { location: params[:location], date: "today" })
70
- prompt :daily_report, { weather: weather, location: params[:location] }
71
- send_msg
72
- end
73
- ```
74
-
75
- ### workers/weather_summary.rb
76
-
77
- ```
78
- SmartPrompt.define_worker :weather_summary do
79
- use "ollama"
80
- model "gemma2"
81
- sys_msg "You are a helpful weather assistant."
82
- prompt :weather, { location: params[:location], date: params[:date] }
83
- weather_info = send_msg
84
- prompt :summarize, { text: weather_info }
85
- send_msg
86
- end
87
- ```
88
-
89
- ### templates/daily_report.erb
90
-
91
- ```
92
- Please create a brief daily report for <%= location %> based on the following weather information:
93
-
94
- <%= weather %>
95
-
96
- The report should include:
97
- 1. A summary of the weather
98
- 2. Any notable events or conditions
99
- 3. Recommendations for residents
100
- ```
101
- ### templates/weather.erb
102
-
103
- ```
104
- What's the weather like in <%= location %> <%= date %>? Please provide a brief description including temperature and general conditions.
105
- ```
106
-
107
- ### templates/summarize.erb
108
-
109
- ```
110
- Please summarize the following text in one sentence:
111
-
112
- <%= text %>
113
- ```
27
+ 更多示例请看项目 [SmartPromptDemo](https://github.com/zhuangbiaowei/SmartPromptDemo)
data/README.md CHANGED
@@ -26,90 +26,4 @@ $ gem install smart_prompt
26
26
 
27
27
  ## Usage
28
28
 
29
- The following are some examples of basic usage:
30
-
31
- ### llm_config.yml
32
-
33
- ```
34
- adapters:
35
- openai: OpenAIAdapter
36
- ollama: OllamaAdapter
37
- llms:
38
- siliconflow:
39
- adapter: openai
40
- url: https://api.siliconflow.cn/v1/
41
- api_key: ENV["APIKey"]
42
- default_model: Qwen/Qwen2.5-7B-Instruct
43
- llamacpp:
44
- adapter: openai
45
- url: http://localhost:8080/
46
- ollama:
47
- adapter: ollama
48
- url: http://localhost:11434/
49
- default_model: qwen2.5
50
- default_llm: siliconflow
51
- worker_path: "./workers"
52
- template_path: "./templates"
53
- ```
54
-
55
- ### Basic usage
56
-
57
- ```
58
- require 'smart_prompt'
59
- engine = SmartPrompt::Engine.new('./config/llm_config.yml')
60
- result = engine.call_worker(:daily_report, {location: "Shanghai"})
61
- puts result
62
- ```
63
-
64
- ### workers/daily_report.rb
65
-
66
- ```
67
- SmartPrompt.define_worker :daily_report do
68
- use "ollama"
69
- model "gemma2"
70
- system "You are a helpful report writer."
71
- weather = call_worker(:weather_summary, { location: params[:location], date: "today" })
72
- prompt :daily_report, { weather: weather, location: params[:location] }
73
- send_msg
74
- end
75
- ```
76
-
77
- ### workers/weather_summary.rb
78
-
79
- ```
80
- SmartPrompt.define_worker :weather_summary do
81
- use "ollama"
82
- model "gemma2"
83
- sys_msg "You are a helpful weather assistant."
84
- prompt :weather, { location: params[:location], date: params[:date] }
85
- weather_info = send_msg
86
- prompt :summarize, { text: weather_info }
87
- send_msg
88
- end
89
- ```
90
-
91
- ### templates/daily_report.erb
92
-
93
- ```
94
- Please create a brief daily report for <%= location %> based on the following weather information:
95
-
96
- <%= weather %>
97
-
98
- The report should include:
99
- 1. A summary of the weather
100
- 2. Any notable events or conditions
101
- 3. Recommendations for residents
102
- ```
103
- ### templates/weather.erb
104
-
105
- ```
106
- What's the weather like in <%= location %> <%= date %>? Please provide a brief description including temperature and general conditions.
107
- ```
108
-
109
- ### templates/summarize.erb
110
-
111
- ```
112
- Please summarize the following text in one sentence:
113
-
114
- <%= text %>
115
- ```
29
+ See project [SmartPromptDemo](https://github.com/zhuangbiaowei/SmartPromptDemo) for more examples.
@@ -2,32 +2,48 @@ module SmartPrompt
2
2
  class Engine
3
3
  attr_reader :config_file, :config, :adapters, :current_adapter, :llms, :templates
4
4
  def initialize(config_file)
5
- SmartPrompt.logger.info "Start create the SmartPrompt engine."
6
5
  @config_file = config_file
7
6
  @adapters={}
8
7
  @llms={}
9
8
  @templates={}
10
9
  load_config(config_file)
10
+ SmartPrompt.logger.info "Started create the SmartPrompt engine."
11
11
  end
12
12
 
13
13
  def load_config(config_file)
14
- SmartPrompt.logger.info "Loading configuration from file: #{config_file}"
15
- @config_file = config_file
16
- @config = YAML.load_file(config_file)
17
- @config['adapters'].each do |adapter_name, adapter_class|
18
- adapter_class = SmartPrompt.const_get(adapter_class)
19
- @adapters[adapter_name] = adapter_class
20
- end
21
- @config['llms'].each do |llm_name,llm_config|
22
- adapter_class = @adapters[llm_config['adapter']]
23
- @llms[llm_name]=adapter_class.new(llm_config)
24
- end
25
- @current_llm = @config['default_llm'] if @config['default_llm']
26
- Dir.glob(File.join(@config['template_path'], '*.erb')).each do |file|
27
- template_name = file.gsub(@config['template_path']+"/","").gsub("\.erb","")
28
- @templates[template_name] = PromptTemplate.new(file)
29
- end
30
- load_workers
14
+ begin
15
+ @config_file = config_file
16
+ @config = YAML.load_file(config_file)
17
+ if @config['logger_file']
18
+ SmartPrompt.logger = Logger.new(@config['logger_file'])
19
+ end
20
+ SmartPrompt.logger.info "Loading configuration from file: #{config_file}"
21
+ @config['adapters'].each do |adapter_name, adapter_class|
22
+ adapter_class = SmartPrompt.const_get(adapter_class)
23
+ @adapters[adapter_name] = adapter_class
24
+ end
25
+ @config['llms'].each do |llm_name,llm_config|
26
+ adapter_class = @adapters[llm_config['adapter']]
27
+ @llms[llm_name]=adapter_class.new(llm_config)
28
+ end
29
+ @current_llm = @config['default_llm'] if @config['default_llm']
30
+ Dir.glob(File.join(@config['template_path'], '*.erb')).each do |file|
31
+ template_name = file.gsub(@config['template_path']+"/","").gsub("\.erb","")
32
+ @templates[template_name] = PromptTemplate.new(file)
33
+ end
34
+ load_workers
35
+ rescue Psych::SyntaxError => ex
36
+ SmartPrompt.logger.error "YAML syntax error in config file: #{ex.message}"
37
+ raise ConfigurationError, "Invalid YAML syntax in config file: #{ex.message}"
38
+ rescue Errno::ENOENT => ex
39
+ SmartPrompt.logger.error "Config file not found: #{ex.message}"
40
+ raise ConfigurationError, "Config file not found: #{ex.message}"
41
+ rescue StandardError => ex
42
+ SmartPrompt.logger.error "Error loading configuration: #{ex.message}"
43
+ raise ConfigurationError, "Error loading configuration: #{ex.message}"
44
+ ensure
45
+ SmartPrompt.logger.info "Configuration loaded successfully"
46
+ end
31
47
  end
32
48
 
33
49
  def load_workers
@@ -0,0 +1,68 @@
1
+ require 'openai'
2
+
3
+ module SmartPrompt
4
+ class LlamacppAdapter < LLMAdapter
5
+ def initialize(config)
6
+ super
7
+ begin
8
+ @client = OpenAI::Client.new(
9
+ uri_base: @config['url']
10
+ )
11
+ rescue OpenAI::ConfigurationError => e
12
+ SmartPrompt.logger.error "Failed to initialize Llamacpp client: #{e.message}"
13
+ raise LLMAPIError, "Invalid Llamacpp configuration: #{e.message}"
14
+ rescue OpenAI::AuthenticationError => e
15
+ SmartPrompt.logger.error "Failed to initialize Llamacpp client: #{e.message}"
16
+ raise LLMAPIError, "Llamacpp authentication failed: #{e.message}"
17
+ rescue SocketError => e
18
+ SmartPrompt.logger.error "Failed to initialize Llamacpp client: #{e.message}"
19
+ raise LLMAPIError, "Network error: Unable to connect to Llamacpp API"
20
+ rescue => e
21
+ SmartPrompt.logger.error "Failed to initialize Llamacpp client: #{e.message}"
22
+ raise Error, "Unexpected error initializing Llamacpp client: #{e.message}"
23
+ ensure
24
+ SmartPrompt.logger.info "Successful creation an Llamacpp client."
25
+ end
26
+ end
27
+
28
+ def send_request(messages, model=nil)
29
+ SmartPrompt.logger.info "LlamacppAdapter: Sending request to Llamacpp"
30
+ begin
31
+ response = @client.chat(
32
+ parameters: {
33
+ messages: messages,
34
+ temperature: @config['temperature'] || 0.7
35
+ }
36
+ )
37
+ rescue OpenAI::APIError => e
38
+ SmartPrompt.logger.error "Llamacpp API error: #{e.message}"
39
+ raise LLMAPIError, "Llamacpp API error: #{e.message}"
40
+ rescue OpenAI::APIConnectionError => e
41
+ SmartPrompt.logger.error "Connection error: Unable to reach Llamacpp API"
42
+ raise LLMAPIError, "Connection error: Unable to reach Llamacpp API"
43
+ rescue OpenAI::APITimeoutError => e
44
+ SmartPrompt.logger.error "Request to Llamacpp API timed out"
45
+ raise LLMAPIError, "Request to Llamacpp API timed out"
46
+ rescue OpenAI::InvalidRequestError => e
47
+ SmartPrompt.logger.error "Invalid request to Llamacpp API: #{e.message}"
48
+ raise LLMAPIError, "Invalid request to Llamacpp API: #{e.message}"
49
+ rescue OpenAI::AuthenticationError => e
50
+ SmartPrompt.logger.error "Authentication error with Llamacpp API: #{e.message}"
51
+ raise LLMAPIError, "Authentication error with Llamacpp API: #{e.message}"
52
+ rescue OpenAI::RateLimitError => e
53
+ SmartPrompt.logger.error "Rate limit exceeded for Llamacpp API"
54
+ raise LLMAPIError, "Rate limit exceeded for Llamacpp API"
55
+ rescue JSON::ParserError => e
56
+ SmartPrompt.logger.error "Failed to parse Llamacpp API response"
57
+ raise LLMAPIError, "Failed to parse Llamacpp API response"
58
+ rescue => e
59
+ SmartPrompt.logger.error "Unexpected error during Llamacpp request: #{e.message}"
60
+ raise Error, "Unexpected error during Llamacpp request: #{e.message}"
61
+ ensure
62
+ SmartPrompt.logger.info "Successful send a message"
63
+ end
64
+ SmartPrompt.logger.info "LlamacppAdapter: Received response from Llamacpp"
65
+ response.dig("choices", 0, "message", "content")
66
+ end
67
+ end
68
+ end
@@ -1,8 +1,6 @@
1
1
  require 'net/http'
2
2
  require 'json'
3
3
  require 'uri'
4
- require 'openai'
5
- require 'ollama-ai'
6
4
 
7
5
  module SmartPrompt
8
6
  class LLMAdapter
@@ -17,86 +15,6 @@ module SmartPrompt
17
15
  end
18
16
  end
19
17
 
20
- class OpenAIAdapter < LLMAdapter
21
- def initialize(config)
22
- super
23
- api_key = @config['api_key']
24
- if api_key.is_a?(String) && api_key.start_with?('ENV[') && api_key.end_with?(']')
25
- api_key = eval(api_key)
26
- end
27
- @client = OpenAI::Client.new(
28
- access_token: api_key,
29
- uri_base: @config['url'],
30
- request_timeout: 240
31
- )
32
- end
33
-
34
- def send_request(messages, model=nil)
35
- SmartPrompt.logger.info "OpenAIAdapter: Sending request to OpenAI"
36
- if model
37
- model_name = model
38
- else
39
- model_name = @config['model']
40
- end
41
- SmartPrompt.logger.info "OpenAIAdapter: Using model #{model_name}"
42
- response = @client.chat(
43
- parameters: {
44
- model: model_name,
45
- messages: messages,
46
- temperature: @config['temperature'] || 0.7
47
- }
48
- )
49
- SmartPrompt.logger.info "OpenAIAdapter: Received response from OpenAI"
50
- response.dig("choices", 0, "message", "content")
51
- end
52
- end
53
-
54
- class LlamacppAdapter < LLMAdapter
55
- def initialize(config)
56
- super
57
- @client = OpenAI::Client.new(
58
- uri_base: @config['url']
59
- )
60
- end
61
- def send_request(messages, model=nil)
62
- SmartPrompt.logger.info "LlamacppAdapter: Sending request to Llamacpp"
63
- response = @client.chat(
64
- parameters: {
65
- messages: messages,
66
- temperature: @config['temperature'] || 0.7
67
- }
68
- )
69
- SmartPrompt.logger.info "LlamacppAdapter: Received response from Llamacpp"
70
- response.dig("choices", 0, "message", "content")
71
- end
72
- end
73
-
74
- class OllamaAdapter < LLMAdapter
75
- def initialize(config)
76
- super
77
- @client = Ollama.new(credentials: { address: @config['url'] })
78
- end
79
-
80
- def send_request(messages, model=nil)
81
- SmartPrompt.logger.info "OllamaAdapter: Sending request to Ollama"
82
- if model
83
- model_name = model
84
- else
85
- model_name = @config['model']
86
- end
87
- SmartPrompt.logger.info "OllamaAdapter: Using model #{model_name}"
88
- response = @client.generate(
89
- {
90
- model: model_name,
91
- prompt: messages.to_s,
92
- stream: false
93
- }
94
- )
95
- SmartPrompt.logger.info "OllamaAdapter: Received response from Ollama"
96
- return response[0]["response"]
97
- end
98
- end
99
-
100
18
  class MockAdapter < LLMAdapter
101
19
  def send_request(messages)
102
20
  puts "Mock adapter received #{messages.length} messages"
@@ -0,0 +1,64 @@
1
+ require 'ollama-ai'
2
+
3
+ module SmartPrompt
4
+ class OllamaAdapter < LLMAdapter
5
+ def initialize(config)
6
+ super
7
+ begin
8
+ @client = Ollama.new(credentials: { address: @config['url'] })
9
+ rescue Ollama::Error => e
10
+ SmartPrompt.logger.error "Failed to initialize Ollama client: #{e.message}"
11
+ raise LLMAPIError, "Invalid Ollama configuration: #{e.message}"
12
+ rescue SocketError => e
13
+ SmartPrompt.logger.error "Failed to initialize Ollama client: #{e.message}"
14
+ raise LLMAPIError, "Network error: Unable to connect to Ollama API"
15
+ rescue => e
16
+ SmartPrompt.logger.error "Failed to initialize Ollama client: #{e.message}"
17
+ raise Error, "Unexpected error initializing Ollama client: #{e.message}"
18
+ ensure
19
+ SmartPrompt.logger.info "Successful creation an Ollama client."
20
+ end
21
+ end
22
+
23
+ def send_request(messages, model=nil)
24
+ SmartPrompt.logger.info "OllamaAdapter: Sending request to Ollama"
25
+ if model
26
+ model_name = model
27
+ else
28
+ model_name = @config['model']
29
+ end
30
+ SmartPrompt.logger.info "OllamaAdapter: Using model #{model_name}"
31
+ begin
32
+ response = @client.generate(
33
+ {
34
+ model: model_name,
35
+ prompt: messages.to_s,
36
+ stream: false
37
+ }
38
+ )
39
+ rescue Ollama::Error => e
40
+ SmartPrompt.logger.error "Ollama API error: #{e.message}"
41
+ raise LLMAPIError, "Ollama API error: #{e.message}"
42
+ rescue Ollama::ConnectionError => e
43
+ SmartPrompt.logger.error "Connection error: Unable to reach Ollama API"
44
+ raise LLMAPIError, "Connection error: Unable to reach Ollama API"
45
+ rescue Ollama::TimeoutError => e
46
+ SmartPrompt.logger.error "Request to Ollama API timed out"
47
+ raise LLMAPIError, "Request to Ollama API timed out"
48
+ rescue Ollama::InvalidRequestError => e
49
+ SmartPrompt.logger.error "Invalid request to Ollama API: #{e.message}"
50
+ raise LLMAPIError, "Invalid request to Ollama API: #{e.message}"
51
+ rescue JSON::ParserError => e
52
+ SmartPrompt.logger.error "Failed to parse Ollama API response"
53
+ raise LLMAPIError, "Failed to parse Ollama API response"
54
+ rescue => e
55
+ SmartPrompt.logger.error "Unexpected error during Ollama request: #{e.message}"
56
+ raise Error, "Unexpected error during Ollama request: #{e.message}"
57
+ ensure
58
+ SmartPrompt.logger.info "Successful send a message"
59
+ end
60
+ SmartPrompt.logger.info "OllamaAdapter: Received response from Ollama"
61
+ return response[0]["response"]
62
+ end
63
+ end
64
+ end
@@ -0,0 +1,81 @@
1
+ require 'openai'
2
+
3
+ module SmartPrompt
4
+ class OpenAIAdapter < LLMAdapter
5
+ def initialize(config)
6
+ super
7
+ api_key = @config['api_key']
8
+ if api_key.is_a?(String) && api_key.start_with?('ENV[') && api_key.end_with?(']')
9
+ api_key = eval(api_key)
10
+ end
11
+ begin
12
+ @client = OpenAI::Client.new(
13
+ access_token: api_key,
14
+ uri_base: @config['url'],
15
+ request_timeout: 240
16
+ )
17
+ rescue OpenAI::ConfigurationError => e
18
+ SmartPrompt.logger.error "Failed to initialize OpenAI client: #{e.message}"
19
+ raise LLMAPIError, "Invalid OpenAI configuration: #{e.message}"
20
+ rescue OpenAI::AuthenticationError => e
21
+ SmartPrompt.logger.error "Failed to initialize OpenAI client: #{e.message}"
22
+ raise LLMAPIError, "OpenAI authentication failed: #{e.message}"
23
+ rescue SocketError => e
24
+ SmartPrompt.logger.error "Failed to initialize OpenAI client: #{e.message}"
25
+ raise LLMAPIError, "Network error: Unable to connect to OpenAI API"
26
+ rescue => e
27
+ SmartPrompt.logger.error "Failed to initialize OpenAI client: #{e.message}"
28
+ raise Error, "Unexpected error initializing OpenAI client: #{e.message}"
29
+ ensure
30
+ SmartPrompt.logger.info "Successful creation an OpenAI client."
31
+ end
32
+ end
33
+
34
+ def send_request(messages, model=nil)
35
+ SmartPrompt.logger.info "OpenAIAdapter: Sending request to OpenAI"
36
+ if model
37
+ model_name = model
38
+ else
39
+ model_name = @config['model']
40
+ end
41
+ SmartPrompt.logger.info "OpenAIAdapter: Using model #{model_name}"
42
+ begin
43
+ response = @client.chat(
44
+ parameters: {
45
+ model: model_name,
46
+ messages: messages,
47
+ temperature: @config['temperature'] || 0.7
48
+ }
49
+ )
50
+ rescue OpenAI::APIError => e
51
+ SmartPrompt.logger.error "OpenAI API error: #{e.message}"
52
+ raise LLMAPIError, "OpenAI API error: #{e.message}"
53
+ rescue OpenAI::APIConnectionError => e
54
+ SmartPrompt.logger.error "Connection error: Unable to reach OpenAI API"
55
+ raise LLMAPIError, "Connection error: Unable to reach OpenAI API"
56
+ rescue OpenAI::APITimeoutError => e
57
+ SmartPrompt.logger.error "Request to OpenAI API timed out"
58
+ raise LLMAPIError, "Request to OpenAI API timed out"
59
+ rescue OpenAI::InvalidRequestError => e
60
+ SmartPrompt.logger.error "Invalid request to OpenAI API: #{e.message}"
61
+ raise LLMAPIError, "Invalid request to OpenAI API: #{e.message}"
62
+ rescue OpenAI::AuthenticationError => e
63
+ SmartPrompt.logger.error "Authentication error with OpenAI API: #{e.message}"
64
+ raise LLMAPIError, "Authentication error with OpenAI API: #{e.message}"
65
+ rescue OpenAI::RateLimitError => e
66
+ SmartPrompt.logger.error "Rate limit exceeded for OpenAI API"
67
+ raise LLMAPIError, "Rate limit exceeded for OpenAI API"
68
+ rescue JSON::ParserError => e
69
+ SmartPrompt.logger.error "Failed to parse OpenAI API response"
70
+ raise LLMAPIError, "Failed to parse OpenAI API response"
71
+ rescue => e
72
+ SmartPrompt.logger.error "Unexpected error during OpenAI request: #{e.message}"
73
+ raise Error, "Unexpected error during OpenAI request: #{e.message}"
74
+ ensure
75
+ SmartPrompt.logger.info "Successful send a message"
76
+ end
77
+ SmartPrompt.logger.info "OpenAIAdapter: Received response from OpenAI"
78
+ response.dig("choices", 0, "message", "content")
79
+ end
80
+ end
81
+ end
@@ -1,3 +1,3 @@
1
1
  module SmartPrompt
2
- VERSION = "0.1.5"
2
+ VERSION = "0.1.7"
3
3
  end
data/lib/smart_prompt.rb CHANGED
@@ -2,11 +2,18 @@ require File.expand_path('../smart_prompt/version', __FILE__)
2
2
  require File.expand_path('../smart_prompt/engine', __FILE__)
3
3
  require File.expand_path('../smart_prompt/conversation', __FILE__)
4
4
  require File.expand_path('../smart_prompt/llm_adapter', __FILE__)
5
+ require File.expand_path('../smart_prompt/openai_adapter', __FILE__)
6
+ require File.expand_path('../smart_prompt/llamacpp_adapter', __FILE__)
7
+ require File.expand_path('../smart_prompt/ollama_adapter', __FILE__)
5
8
  require File.expand_path('../smart_prompt/prompt_template', __FILE__)
6
9
  require File.expand_path('../smart_prompt/worker', __FILE__)
7
10
 
8
11
  module SmartPrompt
9
12
  class Error < StandardError; end
13
+ class ConfigurationError < Error; end
14
+ class LLMAPIError < Error; end
15
+ class CallWorkerError < Error; end
16
+
10
17
  attr_writer :logger
11
18
 
12
19
  def self.define_worker(name, &block)
@@ -17,6 +24,10 @@ module SmartPrompt
17
24
  worker = Worker.new(name, config_file)
18
25
  worker.execute(params)
19
26
  end
27
+
28
+ def self.logger=(logger)
29
+ @logger = logger
30
+ end
20
31
 
21
32
  def self.logger
22
33
  @logger ||= Logger.new($stdout).tap do |log|
metadata CHANGED
@@ -1,14 +1,14 @@
1
1
  --- !ruby/object:Gem::Specification
2
2
  name: smart_prompt
3
3
  version: !ruby/object:Gem::Version
4
- version: 0.1.5
4
+ version: 0.1.7
5
5
  platform: ruby
6
6
  authors:
7
7
  - zhuang biaowei
8
8
  autorequire:
9
9
  bindir: exe
10
10
  cert_chain: []
11
- date: 2024-10-07 00:00:00.000000000 Z
11
+ date: 2024-10-17 00:00:00.000000000 Z
12
12
  dependencies:
13
13
  - !ruby/object:Gem::Dependency
14
14
  name: yaml
@@ -95,7 +95,10 @@ files:
95
95
  - lib/smart_prompt.rb
96
96
  - lib/smart_prompt/conversation.rb
97
97
  - lib/smart_prompt/engine.rb
98
+ - lib/smart_prompt/llamacpp_adapter.rb
98
99
  - lib/smart_prompt/llm_adapter.rb
100
+ - lib/smart_prompt/ollama_adapter.rb
101
+ - lib/smart_prompt/openai_adapter.rb
99
102
  - lib/smart_prompt/prompt_template.rb
100
103
  - lib/smart_prompt/version.rb
101
104
  - lib/smart_prompt/worker.rb
@@ -122,7 +125,7 @@ required_rubygems_version: !ruby/object:Gem::Requirement
122
125
  - !ruby/object:Gem::Version
123
126
  version: '0'
124
127
  requirements: []
125
- rubygems_version: 3.5.18
128
+ rubygems_version: 3.5.22
126
129
  signing_key:
127
130
  specification_version: 4
128
131
  summary: A smart prompt management and LLM interaction gem