promptcraft 0.1.0

Sign up to get free protection for your applications and to get access to all the features.
@@ -0,0 +1,30 @@
1
+ ---
2
+ system_prompt: I like to solve maths problems.
3
+ llm:
4
+ provider: groq
5
+ model: llama3-70b-8192
6
+ messages:
7
+ - role: user
8
+ content: What is 2+2?
9
+ - role: assistant
10
+ content: That's an easy one! The answer is... 4!
11
+ ---
12
+ system_prompt: I like to solve maths problems.
13
+ llm:
14
+ provider: groq
15
+ model: llama3-8b-8192
16
+ messages:
17
+ - role: user
18
+ content: What is 2+2?
19
+ - role: assistant
20
+ content: The answer to 2+2 is 4.
21
+ ---
22
+ system_prompt: I like to solve maths problems.
23
+ llm:
24
+ provider: openai
25
+ model: gpt-4-turbo
26
+ messages:
27
+ - role: user
28
+ content: What is 6 divided by 2?
29
+ - role: assistant
30
+ content: 6 divided by 2 is 3.
@@ -0,0 +1,11 @@
1
+ ---
2
+ system_prompt: |-
3
+ I like to solve maths problems.
4
+
5
+ messages:
6
+ - role: "user"
7
+ content: "What is 2+2?"
8
+ - role: assistant
9
+ content: 2 + 2 = 4
10
+ - role: user
11
+ content: "What is 3+3?"
@@ -0,0 +1,9 @@
1
+ ---
2
+ system_prompt: |-
3
+ I like to solve maths problems.
4
+
5
+ messages:
6
+ - role: "user"
7
+ content: "What is 2+2?"
8
+
9
+ # Missing assistant final response so next iteration will add it.
@@ -0,0 +1,51 @@
1
+ ---
2
+ system_prompt: I am excellent at limericks.
3
+ llm:
4
+ provider: groq
5
+ model: llama3-70b-8192
6
+ messages:
7
+ - role: user
8
+ content: A limerick about a llama.
9
+ - role: assistant
10
+ content: |-
11
+ Here is a limerick about a llama:
12
+
13
+ There once was a llama named Pete,
14
+ Whose fur was so soft, it was neat.
15
+ He'd spit with a flair,
16
+ When his space was not fair,
17
+ And his hums were so loud, they'd repeat!
18
+ ---
19
+ system_prompt: I am excellent at limericks.
20
+ llm:
21
+ provider: groq
22
+ model: llama3-70b-8192
23
+ messages:
24
+ - role: user
25
+ content: A limerick about cats.
26
+ - role: assistant
27
+ content: |-
28
+ Here is a limerick about cats:
29
+
30
+ There once was a feline so fine,
31
+ Whose whiskers were long and divine.
32
+ She'd purr with delight,
33
+ As she hunted at night,
34
+ And her claws were as sharp as a vine.
35
+ ---
36
+ system_prompt: I am excellent at limericks.
37
+ llm:
38
+ provider: groq
39
+ model: llama3-70b-8192
40
+ messages:
41
+ - role: user
42
+ content: A limerick about a dog.
43
+ - role: assistant
44
+ content: |-
45
+ Here is a limerick about a dog:
46
+
47
+ There once was a pup named Pete,
48
+ Whose wagging tail couldn't be beat.
49
+ He'd chase after balls,
50
+ And fetch them through halls,
51
+ And always bring back a treat to eat.
@@ -0,0 +1,3 @@
1
+ ---
2
+ system_prompt: |-
3
+ I like to solve maths problems.
@@ -0,0 +1,5 @@
1
+ ---
2
+ system_prompt: |-
3
+ I like to solve maths problems.
4
+
5
+ messages: []
data/exe/promptcraft ADDED
@@ -0,0 +1,7 @@
1
+ #!/usr/bin/env ruby
2
+
3
+ require "promptcraft"
4
+
5
+ cmd = Promptcraft::Cli::RunCommand.new
6
+
7
+ cmd.parse.run(stdin: $stdin)
@@ -0,0 +1,177 @@
1
+ require "concurrent"
2
+ require "langchain"
3
+ require "tty-option"
4
+
5
+ # Pick an LLM provider + model:
6
+ # promptcraft --provider groq
7
+ # promptcraft --provider openai --model gpt-3.5-turbo
8
+ # Pass in a prompt via CLI (-p,--prompt expects a string, or filename)
9
+ # promptcraft -c tmp/maths/start/basic.yml -p "I'm terrible at maths. If I'm asked a maths question, I reply with a question."
10
+ # promptcraft -c tmp/maths/start/basic.yml -p <(echo "I'm terrible at maths. If I'm asked a maths question, I reply with a question.")
11
+ # The prompt file can also be YAML with system_prompt: key.
12
+ class Promptcraft::Cli::RunCommand
13
+ include TTY::Option
14
+
15
+ usage do
16
+ program "promptcraft"
17
+
18
+ command "run"
19
+
20
+ desc "Re-run conversation against new system prompt"
21
+ end
22
+
23
+ option :conversation do
24
+ arity zero_or_more
25
+ short "-c"
26
+ long "--conversation filename"
27
+ desc "Filename of conversation (or use STDIN)"
28
+ end
29
+
30
+ option :prompt do
31
+ short "-p"
32
+ long "--prompt prompt"
33
+ desc "String or filename containing system prompt"
34
+ end
35
+
36
+ flag :help do
37
+ short "-h"
38
+ long "--help"
39
+ desc "Print usage"
40
+ end
41
+
42
+ option :model do
43
+ short "-m"
44
+ long "--model model_name"
45
+ desc "Model name to use for chat completion"
46
+ end
47
+
48
+ option :provider do
49
+ long "--provider provider_name"
50
+ desc "Provider name to use for chat completion"
51
+ end
52
+
53
+ option :format do
54
+ short "-f"
55
+ long "--format format"
56
+ desc "Output format (yaml, json)"
57
+ default "yaml"
58
+ end
59
+
60
+ option :threads do
61
+ long "--threads threads"
62
+ desc "Number of threads to use for concurrent processing"
63
+ convert :int
64
+ default 5
65
+ end
66
+
67
+ # TODO: --debug
68
+ # * faraday debugging
69
+ # * Promptcraft::Llm.new(debug: true)
70
+ flag :debug do
71
+ long "--debug"
72
+ desc "Enable debug mode"
73
+ end
74
+
75
+ def run(stdin: nil)
76
+ if params[:help]
77
+ warn help
78
+ elsif params.errors.any?
79
+ warn params.errors.summary
80
+ else
81
+ # Load files in threads
82
+ pool = Concurrent::FixedThreadPool.new(params[:threads])
83
+ conversations = Concurrent::Array.new
84
+ # TODO: load in thread pool
85
+ (params[:conversation] || []).each do |filename|
86
+ pool.post do
87
+ # check if --conversation=filename is an actual file, else store it in StringIO and pass to load_from_io
88
+ if File.exist?(filename)
89
+ conversations.push(*Promptcraft::Conversation.load_from_file(filename))
90
+ else
91
+ conversations.push(*Promptcraft::Conversation.load_from_io(StringIO.new(filename)))
92
+ end
93
+ end
94
+ end
95
+ pool.shutdown
96
+ pool.wait_for_termination
97
+
98
+ # if STDIN piped into the command, read stream of YAML conversations from STDIN
99
+ if io_ready?(stdin)
100
+ conversations.push(*Promptcraft::Conversation.load_from_io(stdin))
101
+ end
102
+
103
+ if conversations.empty?
104
+ conversations << Promptcraft::Conversation.new(system_prompt: "You are helpful. If you're first, then ask a question. You like brevity.")
105
+ end
106
+
107
+ if (prompt = params[:prompt])
108
+ # if prompt is a file, load it; else set the prompt to the value
109
+ new_system_prompt = if File.exist?(prompt)
110
+ File.read(prompt)
111
+ else
112
+ prompt
113
+ end
114
+
115
+ # If new_system_prompt is YAML and a Hash, use "system_prompt" key
116
+ begin
117
+ obj = YAML.load(new_system_prompt, symbolize_keys: true)
118
+ if obj.is_a?(Hash) && obj[:system_prompt]
119
+ new_system_prompt = obj[:system_prompt]
120
+ end
121
+ rescue
122
+ end
123
+ end
124
+
125
+ # Process each conversation in a concurrent thread via a thread pool
126
+ pool = Concurrent::FixedThreadPool.new(params[:threads])
127
+ mutex = Mutex.new
128
+
129
+ updated_conversations = Concurrent::Array.new
130
+ conversations.each do |conversation|
131
+ pool.post do
132
+ # warn "Post processing conversation for #{conversation.messages.inspect}"
133
+ llm = if params[:provider]
134
+ Promptcraft::Llm.new(provider: params[:provider], model: params[:model])
135
+ elsif conversation.llm
136
+ conversation.llm
137
+ else
138
+ Promptcraft::Llm.new
139
+ end
140
+ llm.model = params[:model] if params[:model]
141
+
142
+ system_prompt = new_system_prompt || conversation.system_prompt
143
+
144
+ cmd = Promptcraft::Command::RechatConversationCommand.new(system_prompt:, conversation:, llm:)
145
+ cmd.execute
146
+ updated_conversations << cmd.updated_conversation
147
+
148
+ mutex.synchronize do
149
+ dump_conversation(cmd.updated_conversation, format: params[:format])
150
+ end
151
+ rescue => e
152
+ mutex.synchronize do
153
+ warn "Error: #{e.message}"
154
+ warn "for conversation: #{conversation.inspect}"
155
+ end
156
+ end
157
+ end
158
+ pool.shutdown
159
+ pool.wait_for_termination
160
+ end
161
+ end
162
+
163
+ # Currently we support only streaming YAML and JSON objects so can immediately
164
+ # dump them to STDOUT
165
+ def dump_conversation(conversation, format:)
166
+ if format == "json"
167
+ puts conversation.to_json
168
+ else
169
+ puts conversation.to_yaml
170
+ end
171
+ end
172
+
173
+ def io_ready?(io)
174
+ return false unless io
175
+ IO.select([io], nil, nil, 5)
176
+ end
177
+ end
@@ -0,0 +1,3 @@
1
+ module Promptcraft::Cli
2
+ autoload :RunCommand, "promptcraft/cli/run_command"
3
+ end
@@ -0,0 +1,22 @@
1
+ require "langchain"
2
+
3
+ class Promptcraft::Command::LlmChatCommand
4
+ attr_reader :messages, :llm
5
+
6
+ def initialize(messages:, llm:)
7
+ @messages = messages
8
+ @llm = llm
9
+ end
10
+
11
+ def execute
12
+ # cleanse messages of missing content, role, etc
13
+ messages = @messages.reject { |m| m[:content].nil? || m[:content].empty? || m[:role].nil? || m[:role].empty? }
14
+ response = @llm.chat(messages:)
15
+
16
+ response_text = response.chat_completion
17
+ {role: "assistant", content: response_text}
18
+ rescue => e
19
+ puts e.message
20
+ raise
21
+ end
22
+ end
@@ -0,0 +1,37 @@
1
+ class Promptcraft::Command::RechatConversationCommand
2
+ include Promptcraft::Helpers
3
+
4
+ def initialize(system_prompt:, conversation:, llm:)
5
+ @system_prompt = system_prompt
6
+ @conversation = conversation
7
+ @llm = llm
8
+ end
9
+
10
+ attr_accessor :system_prompt, :conversation, :llm
11
+ attr_reader :updated_conversation
12
+
13
+ # At each point in @conversation messages where the assistant has replied, or not yet replied,
14
+ # then ask the LLM to re-chat the preceding messages and generate a new response.
15
+ def execute
16
+ @updated_conversation = Promptcraft::Conversation.new(system_prompt:, llm:)
17
+
18
+ conversation.messages.each do |message|
19
+ message = deep_symbolize_keys(message)
20
+ role = message[:role]
21
+ if role == "assistant"
22
+ messages = @updated_conversation.to_messages
23
+ response_message = Promptcraft::Command::LlmChatCommand.new(messages: messages, llm: @llm).execute
24
+ @updated_conversation.messages << response_message
25
+ else
26
+ @updated_conversation.messages << message
27
+ end
28
+ end
29
+
30
+ # if last message is from user, then ask the LLM to generate a response
31
+ unless @updated_conversation.messages.last&.dig(:role) == "assistant"
32
+ messages = @updated_conversation.to_messages
33
+ response_message = Promptcraft::Command::LlmChatCommand.new(messages: messages, llm: @llm).execute
34
+ @updated_conversation.messages << response_message
35
+ end
36
+ end
37
+ end
@@ -0,0 +1,4 @@
1
+ module Promptcraft::Command
2
+ autoload :LlmChatCommand, "promptcraft/command/llm_chat_command"
3
+ autoload :RechatConversationCommand, "promptcraft/command/rechat_conversation_command"
4
+ end
@@ -0,0 +1,105 @@
1
+ require "yaml"
2
+
3
+ class Promptcraft::Conversation
4
+ include Promptcraft::Helpers
5
+ extend Promptcraft::Helpers
6
+
7
+ attr_accessor :system_prompt, :messages
8
+ attr_accessor :llm
9
+
10
+ def initialize(system_prompt:, messages: [], llm: nil)
11
+ @system_prompt = system_prompt
12
+ @messages = messages
13
+ @llm = llm
14
+ end
15
+
16
+ def add_message(role:, content:)
17
+ @messages << {role:, content:}
18
+ end
19
+
20
+ class << self
21
+ def load_from_io(io = $stdin)
22
+ conversations = []
23
+ begin
24
+ YAML.load_stream(io) do |doc|
25
+ next unless doc
26
+ conversations << build_from(doc)
27
+ end
28
+ rescue Psych::SyntaxError => e
29
+ warn "Error: #{e.message}"
30
+ warn "Contents:\n#{io.read}"
31
+ end
32
+ conversations
33
+ end
34
+
35
+ def load_from_file(filename)
36
+ conversations = []
37
+ File.open(filename, "r") do |file|
38
+ YAML.parse_stream(file) do |doc|
39
+ next unless doc
40
+ conversations << build_from(doc.to_ruby)
41
+ end
42
+ end
43
+ conversations
44
+ end
45
+
46
+ def build_from(doc)
47
+ if doc.is_a?(Hash)
48
+ doc = deep_symbolize_keys(doc)
49
+ elsif doc.is_a?(String)
50
+ doc = {messages: [{role: "user", content: doc}]}
51
+ else
52
+ raise ArgumentError, "Invalid document type: #{doc.class}"
53
+ end
54
+
55
+ system_prompt = doc[:system_prompt]
56
+ messages = doc[:messages] || []
57
+ convo = new(system_prompt: system_prompt, messages: messages)
58
+ if (llm = doc[:llm])
59
+ convo.llm = Promptcraft::Llm.from_h(llm)
60
+ end
61
+ convo
62
+ end
63
+
64
+ # Class method to create a Conversation from an array of messages
65
+ def from_messages(messages)
66
+ if messages.empty? || messages.first[:role] != "system"
67
+ raise ArgumentError, "First message must be from 'system' with the prompt"
68
+ end
69
+
70
+ system_prompt = messages.first[:content]
71
+ remaining_messages = messages[1..] # all messages after the first
72
+ new(system_prompt:, messages: remaining_messages)
73
+ end
74
+ end
75
+
76
+ def save_to_file(filename)
77
+ File.write(filename, to_yaml)
78
+ end
79
+
80
+ # system_prompt: 'I like to solve maths problems.'
81
+ # messages:
82
+ # - role: "user"
83
+ # content: "What is 2+2?"
84
+ # - role: assistant
85
+ # content: 2 + 2 = 4
86
+ def to_yaml
87
+ YAML.dump(deep_stringify_keys({
88
+ system_prompt: @system_prompt&.strip,
89
+ llm: @llm&.to_h,
90
+ messages: @messages
91
+ }.compact))
92
+ end
93
+
94
+ def to_json
95
+ deep_stringify_keys({
96
+ system_prompt: @system_prompt&.strip,
97
+ llm: @llm&.to_h,
98
+ messages: @messages
99
+ }.compact).to_json
100
+ end
101
+
102
+ def to_messages
103
+ [{role: "system", content: @system_prompt}] + @messages
104
+ end
105
+ end
@@ -0,0 +1,25 @@
1
+ module Promptcraft::Helpers
2
+ def deep_symbolize_keys(value)
3
+ case value
4
+ when Hash
5
+ value.each_with_object({}) do |(key, v), result|
6
+ result[key.to_sym] = deep_symbolize_keys(v) # Convert keys to symbols and recursively handle values
7
+ end
8
+ when Array
9
+ value.map { |v| deep_symbolize_keys(v) } # Apply symbolization to each element in the array
10
+ else
11
+ value # Return the value as is if it is neither a hash nor an array
12
+ end
13
+ end
14
+
15
+ def deep_stringify_keys(value)
16
+ case value
17
+ when Hash
18
+ value.map { |k, v| [k.to_s, deep_stringify_keys(v)] }.to_h
19
+ when Array
20
+ value.map { |v| deep_stringify_keys(v) }
21
+ else
22
+ value
23
+ end
24
+ end
25
+ end
@@ -0,0 +1,55 @@
1
+ require "active_support/core_ext/module/delegation"
2
+
3
+ class Promptcraft::Llm
4
+ DEFAULT_PROVIDER = "groq"
5
+
6
+ attr_reader :langchain
7
+ attr_accessor :provider, :model
8
+
9
+ delegate_missing_to :langchain
10
+
11
+ def initialize(provider: DEFAULT_PROVIDER, model: nil, api_key: nil)
12
+ @provider = provider
13
+ @langchain = case provider
14
+ when "groq"
15
+ @model = model || "llama3-70b-8192"
16
+ require "openai"
17
+ Langchain::LLM::OpenAI.new(
18
+ api_key: api_key || ENV.fetch("GROQ_API_KEY"),
19
+ llm_options: {uri_base: "https://api.groq.com/openai/"},
20
+ default_options: {chat_completion_model_name: @model}
21
+ )
22
+ when "openai"
23
+ @model = model || "gpt-3.5-turbo"
24
+ require "openai"
25
+ Langchain::LLM::OpenAI.new(
26
+ api_key: api_key || ENV.fetch("OPENAI_API_KEY"),
27
+ default_options: {chat_completion_model_name: @model}
28
+ )
29
+ when "openrouter"
30
+ @model = model || "meta-llama/llama-3-8b-instruct:free"
31
+ require "openai"
32
+ Langchain::LLM::OpenAI.new(
33
+ api_key: api_key || ENV.fetch("OPENROUTER_API_KEY"),
34
+ llm_options: {uri_base: "https://openrouter.ai/api/"},
35
+ default_options: {chat_completion_model_name: @model}
36
+ )
37
+ when "ollama"
38
+ @model = model || "llama3"
39
+ Langchain::LLM::Ollama.new(
40
+ default_options: {
41
+ completion_model_name: @model,
42
+ chat_completion_model_name: @model
43
+ }
44
+ )
45
+ end
46
+ end
47
+
48
+ def to_h
49
+ {provider: provider, model: model}
50
+ end
51
+
52
+ def self.from_h(hash)
53
+ new(provider: hash[:provider], model: hash[:model])
54
+ end
55
+ end
@@ -0,0 +1,5 @@
1
+ # frozen_string_literal: true
2
+
3
+ module Promptcraft
4
+ VERSION = "0.1.0"
5
+ end
@@ -0,0 +1,11 @@
1
+ # frozen_string_literal: true
2
+
3
+ require_relative "promptcraft/version"
4
+
5
+ module Promptcraft
6
+ autoload :Cli, "promptcraft/cli"
7
+ autoload :Command, "promptcraft/command"
8
+ autoload :Conversation, "promptcraft/conversation"
9
+ autoload :Helpers, "promptcraft/helpers"
10
+ autoload :Llm, "promptcraft/llm"
11
+ end
@@ -0,0 +1,4 @@
1
+ module Promptcraft
2
+ VERSION: String
3
+ # See the writing guide of rbs: https://github.com/ruby/rbs#guides
4
+ end