appydave-tools 0.10.1 → 0.10.2
Sign up to get free protection for your applications and to get access to all the features.
- checksums.yaml +4 -4
- data/CHANGELOG.md +7 -0
- data/lib/appydave/tools/cli_actions/prompt_completion_action.rb +20 -50
- data/lib/appydave/tools/prompt_tools/models/llm_info.rb +15 -0
- data/lib/appydave/tools/prompt_tools/prompt_completion.rb +90 -0
- data/lib/appydave/tools/version.rb +1 -1
- data/lib/appydave/tools.rb +3 -0
- data/package-lock.json +2 -2
- data/package.json +1 -1
- metadata +4 -3
- data/lib/appydave/tools/prompt_tools/models/platform_info.rb +0 -0
checksums.yaml
CHANGED
@@ -1,7 +1,7 @@
|
|
1
1
|
---
|
2
2
|
SHA256:
|
3
|
-
metadata.gz:
|
4
|
-
data.tar.gz:
|
3
|
+
metadata.gz: 63c99bf6126fd82479ca9280e75f8d5c9226b1e8f6df4f6f581476363fd45245
|
4
|
+
data.tar.gz: 2f171fac8f490c259e6d259c848af330f3b7c4c1a35aea456fcc85251224f69e
|
5
5
|
SHA512:
|
6
|
-
metadata.gz:
|
7
|
-
data.tar.gz:
|
6
|
+
metadata.gz: 06227ef4baf7765404e787a91dd53728032cfcea6718b36d19bb378f35b754eabd365d1a2eea270299f6b2fb2ab98bfa529feff71426fd78b0968768c23131b2
|
7
|
+
data.tar.gz: 6ac8d46c1f8afb609faf5145fe864e24b09de807691295eef2613029c0ebcfb30f92adbb60a437e810b14b7ba841e1e1bb5cb1499ca5fab5b7864907e91b4e53
|
data/CHANGELOG.md
CHANGED
@@ -1,3 +1,10 @@
|
|
1
|
+
## [0.10.1](https://github.com/klueless-io/appydave-tools/compare/v0.10.0...v0.10.1) (2024-06-13)
|
2
|
+
|
3
|
+
|
4
|
+
### Bug Fixes
|
5
|
+
|
6
|
+
* add base model ([b2952d6](https://github.com/klueless-io/appydave-tools/commit/b2952d661c48dd7f8b7f384d365010ef89713758))
|
7
|
+
|
1
8
|
# [0.10.0](https://github.com/klueless-io/appydave-tools/compare/v0.9.5...v0.10.0) (2024-06-13)
|
2
9
|
|
3
10
|
|
@@ -1,24 +1,19 @@
|
|
1
1
|
# frozen_string_literal: true
|
2
2
|
|
3
|
-
require 'openai'
|
4
|
-
|
5
3
|
module Appydave
|
6
4
|
module Tools
|
7
5
|
module CliActions
|
8
|
-
#
|
6
|
+
# PromptCompletionAction is a CLI action for running a completion prompt against the model
|
9
7
|
class PromptCompletionAction < BaseAction
|
10
|
-
DEFAULT_MODEL = 'gpt-4o'
|
11
|
-
DEFAULT_PLATFORM = 'openai'
|
12
|
-
|
13
8
|
protected
|
14
9
|
|
15
10
|
def define_options(opts, options)
|
16
11
|
opts.on('-p', '--prompt PROMPT', 'The prompt text') { |v| options[:prompt] = v }
|
17
12
|
opts.on('-f', '--file FILE', 'The prompt file') { |v| options[:file] = v }
|
18
13
|
opts.on('-o', '--output FILE', 'Output file') { |v| options[:output] = v }
|
19
|
-
opts.on('-c', '--clipboard', 'Copy result to clipboard') { |
|
20
|
-
opts.on('-m', '--model MODEL',
|
21
|
-
opts.on('-k', '--
|
14
|
+
opts.on('-c', '--clipboard', 'Copy result to clipboard') { |_v| options[:clipboard] = true }
|
15
|
+
opts.on('-m', '--model MODEL', 'Model to use') { |v| options[:model] = v }
|
16
|
+
opts.on('-k', '--placeholders PAIRS', 'Placeholders for interpolation (format: key1=value1,key2=value2)') { |v| options[:placeholders] = v }
|
22
17
|
end
|
23
18
|
|
24
19
|
def validate_options(options)
|
@@ -27,52 +22,27 @@ module Appydave
|
|
27
22
|
exit
|
28
23
|
end
|
29
24
|
|
30
|
-
if options[:
|
31
|
-
|
32
|
-
options[:
|
25
|
+
if options[:placeholders]
|
26
|
+
placeholders = options[:placeholders].split(',').to_h { |pair| pair.split('=') }
|
27
|
+
options[:placeholders] = placeholders
|
33
28
|
else
|
34
|
-
options[:
|
29
|
+
options[:placeholders] = {}
|
35
30
|
end
|
36
31
|
end
|
37
32
|
|
38
33
|
def execute(options)
|
39
|
-
|
40
|
-
|
41
|
-
|
42
|
-
|
43
|
-
|
44
|
-
|
45
|
-
|
46
|
-
|
47
|
-
|
48
|
-
|
49
|
-
|
50
|
-
|
51
|
-
|
52
|
-
# USE Library class to run this type of code
|
53
|
-
# client = OpenAI::Client.new(api_key: ENV['OPENAI_API_KEY'])
|
54
|
-
# response = client.completions(
|
55
|
-
# engine: model,
|
56
|
-
# parameters: {
|
57
|
-
# prompt: prompt_text,
|
58
|
-
# max_tokens: 100
|
59
|
-
# }
|
60
|
-
# )
|
61
|
-
|
62
|
-
# result = response['choices'].first['text'].strip
|
63
|
-
|
64
|
-
# if options[:output]
|
65
|
-
# File.write(options[:output], result)
|
66
|
-
# puts "Output written to #{options[:output]}"
|
67
|
-
# end
|
68
|
-
|
69
|
-
# if options[:clipboard]
|
70
|
-
# Clipboard.copy(result)
|
71
|
-
# puts 'Output copied to clipboard'
|
72
|
-
# end
|
73
|
-
|
74
|
-
puts 'Result:'
|
75
|
-
# puts result unless options[:output]
|
34
|
+
prompt_options = {
|
35
|
+
prompt: options[:prompt],
|
36
|
+
prompt_file: options[:file],
|
37
|
+
platform: options[:platform],
|
38
|
+
model: options[:model],
|
39
|
+
placeholders: options[:placeholders],
|
40
|
+
output_file: options[:output],
|
41
|
+
clipboard: options[:clipboard]
|
42
|
+
}
|
43
|
+
|
44
|
+
completion = Appydave::Tools::PromptTools::PromptCompletion.new(prompt_options)
|
45
|
+
completion.run
|
76
46
|
end
|
77
47
|
end
|
78
48
|
end
|
@@ -0,0 +1,15 @@
|
|
1
|
+
# frozen_string_literal: true
|
2
|
+
|
3
|
+
module Appydave
|
4
|
+
module Tools
|
5
|
+
module PromptTools
|
6
|
+
module Models
|
7
|
+
# What LLM are we using?
|
8
|
+
class LlmInfo < Appydave::Tools::Types::BaseModel
|
9
|
+
attribute :platform, :string, default: 'openai'
|
10
|
+
attribute :model, :string, default: 'gpt-4o'
|
11
|
+
end
|
12
|
+
end
|
13
|
+
end
|
14
|
+
end
|
15
|
+
end
|
@@ -0,0 +1,90 @@
|
|
1
|
+
# frozen_string_literal: true
|
2
|
+
|
3
|
+
module Appydave
|
4
|
+
module Tools
|
5
|
+
module PromptTools
|
6
|
+
# PromptCompletion is a class for running a completion prompt against the model
|
7
|
+
class PromptCompletion
|
8
|
+
include KLog::Logging
|
9
|
+
|
10
|
+
attr_reader :prompt
|
11
|
+
attr_reader :prompt_file
|
12
|
+
attr_reader :expanded_prompt
|
13
|
+
attr_reader :llm
|
14
|
+
attr_reader :placeholders
|
15
|
+
attr_reader :output_file
|
16
|
+
attr_reader :clipboard
|
17
|
+
|
18
|
+
def initialize(options = {})
|
19
|
+
configure(options)
|
20
|
+
|
21
|
+
validate_options
|
22
|
+
end
|
23
|
+
|
24
|
+
def run
|
25
|
+
result = run_prompt(expand_prompt)
|
26
|
+
|
27
|
+
log.subheading 'Expanded Prompt'
|
28
|
+
puts expanded_prompt
|
29
|
+
log.subheading 'Result'
|
30
|
+
puts result
|
31
|
+
|
32
|
+
if output_file
|
33
|
+
File.write(output_file, result)
|
34
|
+
puts "Output written to #{output_file}"
|
35
|
+
end
|
36
|
+
|
37
|
+
if clipboard
|
38
|
+
Clipboard.copy(result)
|
39
|
+
puts 'Output copied to clipboard'
|
40
|
+
end
|
41
|
+
|
42
|
+
puts result unless output_file
|
43
|
+
end
|
44
|
+
|
45
|
+
private
|
46
|
+
|
47
|
+
def configure(options)
|
48
|
+
@prompt = options.delete(:prompt)
|
49
|
+
@prompt_file = options.delete(:prompt_file)
|
50
|
+
@llm = Appydave::Tools::PromptTools::Models::LlmInfo.new(
|
51
|
+
platform: options.delete(:llm_platform),
|
52
|
+
model: options.delete(:llm_model)
|
53
|
+
)
|
54
|
+
@placeholders = options.delete(:placeholders) || {}
|
55
|
+
@output_file = options.delete(:output_file)
|
56
|
+
@clipboard = options.key?(:clipboard) ? options.delete(:clipboard) : false
|
57
|
+
end
|
58
|
+
|
59
|
+
def validate_options
|
60
|
+
return unless prompt.nil? && prompt_file.nil?
|
61
|
+
|
62
|
+
raise ArgumentError, 'Either prompt or prompt_file must be provided.'
|
63
|
+
end
|
64
|
+
|
65
|
+
def expand_prompt
|
66
|
+
@expanded_prompt ||= begin
|
67
|
+
prompt_content = prompt || File.read(prompt_file)
|
68
|
+
placeholders.each do |key, value|
|
69
|
+
prompt_content.gsub!("{#{key}}", value)
|
70
|
+
end
|
71
|
+
prompt_content
|
72
|
+
end
|
73
|
+
end
|
74
|
+
|
75
|
+
def run_prompt(prompt_content)
|
76
|
+
client = OpenAI::Client.new(access_token: ENV.fetch('OPENAI_ACCESS_TOKEN', nil))
|
77
|
+
response = client.completions(
|
78
|
+
engine: llm_model,
|
79
|
+
prompt: prompt_content,
|
80
|
+
max_tokens: 1000
|
81
|
+
)
|
82
|
+
response['choices'][0]['text'].strip
|
83
|
+
rescue StandardError => e
|
84
|
+
puts "Error: Failed to run prompt. #{e.message}"
|
85
|
+
exit 1
|
86
|
+
end
|
87
|
+
end
|
88
|
+
end
|
89
|
+
end
|
90
|
+
end
|
data/lib/appydave/tools.rb
CHANGED
@@ -46,6 +46,9 @@ require 'appydave/tools/bank_reconciliation/clean/read_transactions'
|
|
46
46
|
require 'appydave/tools/bank_reconciliation/clean/mapper'
|
47
47
|
require 'appydave/tools/bank_reconciliation/models/transaction'
|
48
48
|
|
49
|
+
require 'appydave/tools/prompt_tools/models/llm_info'
|
50
|
+
require 'appydave/tools/prompt_tools/prompt_completion'
|
51
|
+
|
49
52
|
require 'appydave/tools/subtitle_master/clean'
|
50
53
|
|
51
54
|
require 'appydave/tools/youtube_automation/gpt_agent'
|
data/package-lock.json
CHANGED
@@ -1,12 +1,12 @@
|
|
1
1
|
{
|
2
2
|
"name": "appydave-tools",
|
3
|
-
"version": "0.10.
|
3
|
+
"version": "0.10.2",
|
4
4
|
"lockfileVersion": 3,
|
5
5
|
"requires": true,
|
6
6
|
"packages": {
|
7
7
|
"": {
|
8
8
|
"name": "appydave-tools",
|
9
|
-
"version": "0.10.
|
9
|
+
"version": "0.10.2",
|
10
10
|
"devDependencies": {
|
11
11
|
"@klueless-js/semantic-release-rubygem": "github:klueless-js/semantic-release-rubygem",
|
12
12
|
"@semantic-release/changelog": "^6.0.3",
|
data/package.json
CHANGED
metadata
CHANGED
@@ -1,14 +1,14 @@
|
|
1
1
|
--- !ruby/object:Gem::Specification
|
2
2
|
name: appydave-tools
|
3
3
|
version: !ruby/object:Gem::Version
|
4
|
-
version: 0.10.
|
4
|
+
version: 0.10.2
|
5
5
|
platform: ruby
|
6
6
|
authors:
|
7
7
|
- David Cruwys
|
8
8
|
autorequire:
|
9
9
|
bindir: exe
|
10
10
|
cert_chain: []
|
11
|
-
date: 2024-06-
|
11
|
+
date: 2024-06-17 00:00:00.000000000 Z
|
12
12
|
dependencies:
|
13
13
|
- !ruby/object:Gem::Dependency
|
14
14
|
name: activemodel
|
@@ -191,7 +191,8 @@ files:
|
|
191
191
|
- lib/appydave/tools/gpt_context/file_collector.rb
|
192
192
|
- lib/appydave/tools/name_manager/_doc.md
|
193
193
|
- lib/appydave/tools/name_manager/project_name.rb
|
194
|
-
- lib/appydave/tools/prompt_tools/models/
|
194
|
+
- lib/appydave/tools/prompt_tools/models/llm_info.rb
|
195
|
+
- lib/appydave/tools/prompt_tools/prompt_completion.rb
|
195
196
|
- lib/appydave/tools/subtitle_master/_doc.md
|
196
197
|
- lib/appydave/tools/subtitle_master/clean.rb
|
197
198
|
- lib/appydave/tools/types/array_type.rb
|
File without changes
|