aia 0.5.18 → 0.8.0

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (51) hide show
  1. checksums.yaml +4 -4
  2. data/.envrc +1 -0
  3. data/.version +1 -1
  4. data/CHANGELOG.md +39 -5
  5. data/README.md +388 -219
  6. data/Rakefile +16 -5
  7. data/_notes.txt +231 -0
  8. data/bin/aia +3 -2
  9. data/examples/README.md +140 -0
  10. data/examples/headlines +21 -0
  11. data/lib/aia/ai_client_adapter.rb +210 -0
  12. data/lib/aia/chat_processor_service.rb +120 -0
  13. data/lib/aia/config.rb +473 -4
  14. data/lib/aia/context_manager.rb +58 -0
  15. data/lib/aia/directive_processor.rb +267 -0
  16. data/lib/aia/{tools/fzf.rb → fzf.rb} +9 -17
  17. data/lib/aia/history_manager.rb +85 -0
  18. data/lib/aia/prompt_handler.rb +178 -0
  19. data/lib/aia/session.rb +215 -0
  20. data/lib/aia/shell_command_executor.rb +109 -0
  21. data/lib/aia/ui_presenter.rb +110 -0
  22. data/lib/aia/utility.rb +24 -0
  23. data/lib/aia/version.rb +9 -6
  24. data/lib/aia.rb +57 -61
  25. data/lib/extensions/openstruct_merge.rb +44 -0
  26. metadata +29 -43
  27. data/LICENSE.txt +0 -21
  28. data/doc/aia_and_pre_compositional_prompts.md +0 -474
  29. data/lib/aia/clause.rb +0 -7
  30. data/lib/aia/cli.rb +0 -452
  31. data/lib/aia/directives.rb +0 -142
  32. data/lib/aia/dynamic_content.rb +0 -26
  33. data/lib/aia/logging.rb +0 -62
  34. data/lib/aia/main.rb +0 -265
  35. data/lib/aia/prompt.rb +0 -275
  36. data/lib/aia/tools/ai_client_backend.rb +0 -92
  37. data/lib/aia/tools/backend_common.rb +0 -58
  38. data/lib/aia/tools/client.rb +0 -197
  39. data/lib/aia/tools/editor.rb +0 -52
  40. data/lib/aia/tools/glow.rb +0 -90
  41. data/lib/aia/tools/llm.rb +0 -77
  42. data/lib/aia/tools/mods.rb +0 -100
  43. data/lib/aia/tools/sgpt.rb +0 -79
  44. data/lib/aia/tools/subl.rb +0 -68
  45. data/lib/aia/tools/vim.rb +0 -93
  46. data/lib/aia/tools.rb +0 -88
  47. data/lib/aia/user_query.rb +0 -21
  48. data/lib/core_ext/string_wrap.rb +0 -73
  49. data/lib/core_ext/tty-spinner_log.rb +0 -25
  50. data/man/aia.1 +0 -272
  51. data/man/aia.1.md +0 -236
@@ -1,197 +0,0 @@
1
- # lib/aia/tools/client.rb
2
-
3
- require_relative 'backend_common'
4
-
5
- OpenAI.configure do |config|
6
- config.access_token = ENV.fetch("OPENAI_ACCESS_TOKEN")
7
- end
8
-
9
- class AIA::Client < AIA::Tools
10
- include AIA::BackendCommon
11
-
12
- meta(
13
- name: 'client',
14
- role: :backend,
15
- desc: 'Ruby implementation of the OpenAI API',
16
- url: 'https://github.com/alexrudall/ruby-openai',
17
- install: 'gem install ruby-openai',
18
- )
19
-
20
- attr_reader :client, :raw_response
21
-
22
- DEFAULT_PARAMETERS = ''
23
- DIRECTIVES = []
24
-
25
- def initialize(text: "", files: [])
26
- super
27
-
28
- @client = OpenAI::Client.new
29
- end
30
-
31
- def build_command
32
- # No-Op
33
- end
34
-
35
-
36
- def run
37
- handle_model(AIA.config.model)
38
- rescue => e
39
- puts "Error handling model #{AIA.config.model}: #{e.message}"
40
- end
41
-
42
- def speak(what = @text)
43
- print "Speaking ... " if AIA.verbose?
44
- text2audio(what)
45
- puts "Done." if AIA.verbose?
46
- end
47
-
48
-
49
- ###########################################################
50
- private
51
-
52
- # Handling different models more abstractly
53
- def handle_model(model_name)
54
- case model_name
55
- when /vision/
56
- image2text
57
-
58
- when /^gpt.*$/, /^babbage.*$/, /^davinci.*$/
59
- text2text
60
-
61
- when /^dall-e.*$/
62
- text2image
63
-
64
- when /^tts.*$/
65
- text2audio
66
-
67
- when /^whisper.*$/
68
- audio2text
69
-
70
- else
71
- raise "Unsupported model: #{model_name}"
72
- end
73
- end
74
-
75
-
76
- def image2text
77
- # TODO: Implement
78
- end
79
-
80
-
81
- def text2text
82
- @raw_response = client.chat(
83
- parameters: {
84
- model: AIA.config.model, # Required.
85
- messages: [{ role: "user", content: text}], # Required.
86
- temperature: AIA.config.temp,
87
- }
88
- )
89
-
90
- response = raw_response.dig('choices', 0, 'message', 'content')
91
-
92
- response
93
- end
94
-
95
-
96
- def text2image
97
- parameters = {
98
- model: AIA.config.model,
99
- prompt: text
100
- }
101
-
102
- parameters[:size] = AIA.config.image_size unless AIA.config.image_size.empty?
103
- parameters[:quality] = AIA.config.image_quality unless AIA.config.image_quality.empty?
104
-
105
- raw_response = client.images.generate(parameters:)
106
-
107
- response = raw_response.dig("data", 0, "url")
108
-
109
- response
110
- end
111
-
112
-
113
- def text2audio(what = @text, save: false, play: true)
114
- raise "OpenAI's text to speech capability is not available" unless client
115
-
116
- player = select_audio_player
117
-
118
- response = client.audio.speech(
119
- parameters: {
120
- model: AIA.config.speech_model,
121
- input: what,
122
- voice: AIA.config.voice
123
- }
124
- )
125
-
126
- handle_audio_response(response, player, save, play)
127
- end
128
-
129
-
130
- def audio2text(path_to_audio_file = @files.first)
131
- response = client.audio.transcribe(
132
- parameters: {
133
- model: AIA.config.model,
134
- file: File.open(path_to_audio_file, "rb")
135
- }
136
- )
137
-
138
- response["text"]
139
- rescue => e
140
- "An error occurred: #{e.message}"
141
- end
142
-
143
-
144
- # Helper methods
145
- def select_audio_player
146
- case OS.host_os
147
- when /mac|darwin/
148
- 'afplay'
149
- when /linux/
150
- 'mpg123'
151
- when /mswin|mingw|cygwin/
152
- 'cmdmp3'
153
- else
154
- raise "No MP3 player available"
155
- end
156
- end
157
-
158
-
159
- def handle_audio_response(response, player, save, play)
160
- Tempfile.create(['speech', '.mp3']) do |f|
161
- f.binmode
162
- f.write(response)
163
- f.close
164
- `cp #{f.path} #{Pathname.pwd + "speech.mp3"}` if save
165
- `#{player} #{f.path}` if play
166
- end
167
- end
168
-
169
-
170
- ###########################################################
171
- public
172
-
173
- class << self
174
-
175
- def list_models
176
- new.client.model.list
177
- end
178
-
179
-
180
- def speak(what)
181
- save_model = AIA.config.model
182
- AIA.config.model = AIA.config.speech_model
183
-
184
- new(text: what).speak
185
-
186
- AIA.config.model = save_model
187
- end
188
-
189
- end
190
-
191
- end
192
-
193
-
194
- __END__
195
-
196
-
197
- ##########################################################
@@ -1,52 +0,0 @@
1
- # lib/aia/tools/editor.rb
2
- # This is the default editor setup in the
3
- # system environment variable EDITOR
4
-
5
-
6
- class AIA::Editor < AIA::Tools
7
-
8
- meta(
9
- name: 'editor',
10
- role: :editor,
11
- desc: "Your default system $EDITOR",
12
- url: "unknown",
13
- install: "should already be installed",
14
- )
15
-
16
- DEFAULT_PARAMETERS = ""
17
-
18
- attr_accessor :command
19
-
20
-
21
- def initialize(file: "")
22
- @file = file
23
-
24
- discover_editor
25
-
26
- build_command
27
- end
28
-
29
-
30
- def discover_editor
31
- editor = ENV['EDITOR'] # This might be nil
32
-
33
- if editor.nil?
34
- @name = "echo"
35
- @description = "You have no default editor"
36
- @install = "Set your system environment variable EDITOR"
37
- else
38
- @name = editor
39
- end
40
- end
41
-
42
-
43
- def build_command
44
- @command = "#{meta.name} #{DEFAULT_PARAMETERS} #{@file}"
45
- end
46
-
47
-
48
- def run
49
- `#{command}`
50
- end
51
- end
52
-
@@ -1,90 +0,0 @@
1
- # aia/lib/aia/tools/glow.rb
2
-
3
- require 'tempfile'
4
- require 'tty-screen'
5
- require 'shellwords'
6
-
7
-
8
- =begin
9
-
10
- This class supports two use cases:
11
- 1) rendering markdown from an existing file
12
- 2) rendering markdown from a String object via a temporary file
13
-
14
- In both cases a String object is created and returned that contains the
15
- rendered version of the content so that it can be written to STDOUT
16
- by the caller.
17
-
18
- =end
19
-
20
- class AIA::Glow < AIA::Tools
21
-
22
- meta(
23
- name: 'glow',
24
- role: :markdown_renderer,
25
- desc: "A markdown renderer utility",
26
- url: "https://github.com/charmbracelet/glow",
27
- install: "brew install glow",
28
- )
29
-
30
- DEFAULT_PARAMETERS = "--width #{TTY::Screen.width-2}" # Magic: -2 just because I want it
31
-
32
- attr_accessor :content, :file_path
33
-
34
-
35
- def initialize(content: nil, file_path: nil)
36
- @content = content
37
- @file_path = file_path
38
- end
39
-
40
-
41
- def build_command(file_path)
42
- "#{self.class.meta[:name]} #{DEFAULT_PARAMETERS} #{Shellwords.escape(file_path)}"
43
- end
44
-
45
-
46
- def run
47
- return unless content || file_path
48
-
49
- if @file_path && File.exist?(@file_path)
50
- command = build_command(@file_path)
51
- system(command)
52
- else
53
- Tempfile.create(['glow', '.md']) do |file|
54
- file.write(@content)
55
- file.close
56
- command = build_command(file.path)
57
- system(command)
58
- end
59
- end
60
- end
61
- end
62
-
63
- __END__
64
-
65
- $ glow --help
66
-
67
- Render markdown on the CLI, with pizzazz!
68
-
69
- Usage:
70
- glow [SOURCE|DIR] [flags]
71
- glow [command]
72
-
73
- Available Commands:
74
- completion Generate the autocompletion script for the specified shell
75
- config Edit the glow config file
76
- help Help about any command
77
- stash Stash a markdown
78
-
79
- Flags:
80
- -a, --all show system files and directories (TUI-mode only)
81
- --config string config file (default /Users/dewayne/Library/Preferences/glow/glow.yml)
82
- -h, --help help for glow
83
- -l, --local show local files only; no network (TUI-mode only)
84
- -p, --pager display with pager
85
- -s, --style string style name or JSON path (default "auto")
86
- -v, --version version for glow
87
- -w, --width uint word-wrap at width
88
-
89
- Use "glow [command] --help" for more information about a command.
90
-
data/lib/aia/tools/llm.rb DELETED
@@ -1,77 +0,0 @@
1
- # lib/aia/tools/llm.rb
2
-
3
- require_relative 'backend_common'
4
-
5
- class AIA::Llm < AIA::Tools
6
- include AIA::BackendCommon
7
-
8
- meta(
9
- name: 'llm',
10
- role: :backend,
11
- desc: "llm on the command line using local and remote models",
12
- url: "https://llm.datasette.io/",
13
- install: "brew install llm",
14
- )
15
-
16
-
17
- DEFAULT_PARAMETERS = [
18
- # "--verbose", # enable verbose logging (if applicable)
19
- # Add default parameters here
20
- ].join(' ').freeze
21
-
22
- DIRECTIVES = %w[
23
- api_key
24
- frequency_penalty
25
- max_tokens
26
- model
27
- presence_penalty
28
- stop_sequence
29
- temperature
30
- top_p
31
- ]
32
- end
33
-
34
- __END__
35
-
36
- #########################################################
37
-
38
- llm, version 0.13.1
39
-
40
- Usage: llm [OPTIONS] COMMAND [ARGS]...
41
-
42
- Access large language models from the command-line
43
-
44
- Documentation: https://llm.datasette.io/
45
-
46
- To get started, obtain an OpenAI key and set it like this:
47
-
48
- $ llm keys set openai
49
- Enter key: ...
50
-
51
- Then execute a prompt like this:
52
-
53
- llm 'Five outrageous names for a pet pelican'
54
-
55
- Options:
56
- --version Show the version and exit.
57
- --help Show this message and exit.
58
-
59
- Commands:
60
- prompt* Execute a prompt
61
- aliases Manage model aliases
62
- chat Hold an ongoing chat with a model.
63
- collections View and manage collections of embeddings
64
- embed Embed text and store or return the result
65
- embed-models Manage available embedding models
66
- embed-multi Store embeddings for multiple strings at once
67
- install Install packages from PyPI into the same environment as LLM
68
- keys Manage stored API keys for different models
69
- logs Tools for exploring logged prompts and responses
70
- models Manage available models
71
- openai Commands for working directly with the OpenAI API
72
- plugins List installed plugins
73
- similar Return top N similar IDs from a collection
74
- templates Manage stored prompt templates
75
- uninstall Uninstall Python packages from the LLM environment
76
-
77
-
@@ -1,100 +0,0 @@
1
- # lib/aia/tools/mods.rb
2
-
3
- require_relative 'backend_common'
4
-
5
- class AIA::Mods < AIA::Tools
6
- include AIA::BackendCommon
7
-
8
- meta(
9
- name: 'mods',
10
- role: :backend,
11
- desc: 'GPT on the command line. Built for pipelines.',
12
- url: 'https://github.com/charmbracelet/mods',
13
- install: 'brew install mods',
14
- )
15
-
16
-
17
- DEFAULT_PARAMETERS = [
18
- # "--no-cache", # do not save prompt and response
19
- "--no-limit", # no limit on input context
20
- "--quiet", # Quiet mode (hide the spinner while loading and stderr messages for success).
21
- ].join(' ').freeze
22
-
23
-
24
- DIRECTIVES = %w[
25
- api
26
- ask-model
27
- continue
28
- continue-last
29
- fanciness
30
- format-as
31
- http-proxy
32
- max-retries
33
- max-retries
34
- max-tokens
35
- max-tokens
36
- model
37
- no-cache
38
- no-limit
39
- prompt
40
- prompt-args
41
- quiet
42
- raw
43
- status-text
44
- temp
45
- title
46
- topp
47
- word-wrap
48
- ]
49
- end
50
-
51
- __END__
52
-
53
-
54
- ##########################################################
55
-
56
- mods version 1.2.1 (Homebre)
57
-
58
- GPT on the command line. Built for pipelines.
59
-
60
- Usage:
61
- mods [OPTIONS] [PREFIX TERM]
62
-
63
- Options:
64
- -m, --model Default model (gpt-3.5-turbo, gpt-4, ggml-gpt4all-j...).
65
- -M, --ask-model Ask which model to use with an interactive prompt.
66
- -a, --api OpenAI compatible REST API (openai, localai).
67
- -x, --http-proxy HTTP proxy to use for API requests.
68
- -f, --format Ask for the response to be formatted as markdown unless otherwise set.
69
- --format-as
70
- -r, --raw Render output as raw text when connected to a TTY.
71
- -P, --prompt Include the prompt from the arguments and stdin, truncate stdin to specified number of lines.
72
- -p, --prompt-args Include the prompt from the arguments in the response.
73
- -c, --continue Continue from the last response or a given save title.
74
- -C, --continue-last Continue from the last response.
75
- -l, --list Lists saved conversations.
76
- -t, --title Saves the current conversation with the given title.
77
- -d, --delete Deletes a saved conversation with the given title or ID.
78
- --delete-older-than Deletes all saved conversations older than the specified duration. Valid units are: ns, us, µs, μs, ms, s, m, h, d, w, mo, and y.
79
- -s, --show Show a saved conversation with the given title or ID.
80
- -S, --show-last Show the last saved conversation.
81
- -q, --quiet Quiet mode (hide the spinner while loading and stderr messages for success).
82
- -h, --help Show help and exit.
83
- -v, --version Show version and exit.
84
- --max-retries Maximum number of times to retry API calls.
85
- --no-limit Turn off the client-side limit on the size of the input into the model.
86
- --max-tokens Maximum number of tokens in response.
87
- --word-wrap Wrap formatted output at specific width (default is 80)
88
- --temp Temperature (randomness) of results, from 0.0 to 2.0.
89
- --topp TopP, an alternative to temperature that narrows response, from 0.0 to 1.0.
90
- --fanciness Your desired level of fanciness.
91
- --status-text Text to show while generating.
92
- --no-cache Disables caching of the prompt/response.
93
- --reset-settings Backup your old settings file and reset everything to the defaults.
94
- --settings Open settings in your $EDITOR.
95
- --dirs Print the directories in which mods store its data
96
-
97
- Example:
98
- # Editorialize your video files
99
- ls ~/vids | mods -f "summarize each of these titles, group them by decade" | glow
100
-
@@ -1,79 +0,0 @@
1
- # lib/aia/tools/sgpt.rb
2
-
3
- require_relative 'backend_common'
4
-
5
- class AIA::Sgpt < AIA::Tools
6
- include AIA::BackendCommon
7
-
8
- meta(
9
- name: 'sgpt',
10
- role: :backend,
11
- desc: "shell-gpt",
12
- url: "https://github.com/TheR1D/shell_gpt",
13
- install: "pip install shell-gpt",
14
- )
15
-
16
-
17
- DEFAULT_PARAMETERS = [
18
- # "--verbose", # enable verbose logging (if applicable)
19
- # Add default parameters here
20
- ].join(' ').freeze
21
-
22
- DIRECTIVES = %w[
23
- model
24
- temperature
25
- max_tokens
26
- top_p
27
- frequency_penalty
28
- presence_penalty
29
- stop_sequence
30
- api_key
31
- ]
32
- end
33
-
34
- __END__
35
-
36
- Usage: sgpt [OPTIONS] [PROMPT]
37
-
38
- ╭─ Arguments ─────────────────────────────────────────────────────────────────────────────────╮
39
- │ prompt [PROMPT] The prompt to generate completions for. │
40
- ╰─────────────────────────────────────────────────────────────────────────────────────────────╯
41
- ╭─ Options ───────────────────────────────────────────────────────────────────────────────────╮
42
- │ --model TEXT Large language model to use. │
43
- │ [default: gpt-3.5-turbo] │
44
- │ --temperature FLOAT RANGE [0.0<=x<=2.0] Randomness of generated │
45
- │ output. │
46
- │ [default: 0.1] │
47
- │ --top-probability FLOAT RANGE [0.1<=x<=1.0] Limits highest probable │
48
- │ tokens (words). │
49
- │ [default: 1.0] │
50
- │ --editor --no-editor Open $EDITOR to provide a │
51
- │ prompt. │
52
- │ [default: no-editor] │
53
- │ --cache --no-cache Cache completion results. │
54
- │ [default: cache] │
55
- │ --help Show this message and exit. │
56
- ╰─────────────────────────────────────────────────────────────────────────────────────────────╯
57
- ╭─ Assistance Options ────────────────────────────────────────────────────────────────────────╮
58
- │ --shell -s Generate and execute shell commands. │
59
- │ --describe-shell -d Describe a shell command. │
60
- │ --code --no-code Generate only code. [default: no-code] │
61
- ╰─────────────────────────────────────────────────────────────────────────────────────────────╯
62
- ╭─ Chat Options ──────────────────────────────────────────────────────────────────────────────╮
63
- │ --chat TEXT Follow conversation with id, use "temp" for quick │
64
- │ session. │
65
- │ [default: None] │
66
- │ --repl TEXT Start a REPL (Read–eval–print loop) session. │
67
- │ [default: None] │
68
- │ --show-chat TEXT Show all messages from provided chat id. │
69
- │ [default: None] │
70
- │ --list-chats --no-list-chats List all existing chat ids. │
71
- │ [default: no-list-chats] │
72
- ╰─────────────────────────────────────────────────────────────────────────────────────────────╯
73
- ╭─ Role Options ──────────────────────────────────────────────────────────────────────────────╮
74
- │ --role TEXT System role for GPT model. [default: None] │
75
- │ --create-role TEXT Create role. [default: None] │
76
- │ --show-role TEXT Show role. [default: None] │
77
- │ --list-roles --no-list-roles List roles. [default: no-list-roles] │
78
- ╰─────────────────────────────────────────────────────────────────────────────────────────────╯
79
-
@@ -1,68 +0,0 @@
1
- # lib/aia/tools/subl.rb
2
-
3
- class AIA::Subl < AIA::Tools
4
-
5
- meta(
6
- name: 'subl',
7
- role: :editor,
8
- desc: "Sublime Text Editor",
9
- url: "https://www.sublimetext.com/",
10
- install: "echo 'Download from website'",
11
- )
12
-
13
-
14
- DEFAULT_PARAMETERS = [
15
- "--new-window", # Open a new window
16
- "--wait", # Wait for the files to be closed before returning
17
- ].join(' ')
18
-
19
- attr_accessor :command
20
-
21
-
22
- def initialize(file: "")
23
- @file = file
24
-
25
- build_command
26
- end
27
-
28
-
29
- def build_command
30
- @command = "#{meta.name} #{DEFAULT_PARAMETERS} #{@file}"
31
- end
32
-
33
-
34
- def run
35
- `#{command}`
36
- end
37
- end
38
-
39
- __END__
40
-
41
- $ subl --help
42
- Sublime Text build 4166
43
-
44
- Usage: subl [arguments] [files] Edit the given files
45
- or: subl [arguments] [directories] Open the given directories
46
- or: subl [arguments] -- [files] Edit files that may start with '-'
47
- or: subl [arguments] - Edit stdin
48
- or: subl [arguments] - >out Edit stdin and write the edit to stdout
49
-
50
- Arguments:
51
- --project <project>: Load the given project
52
- --command <command>: Run the given command
53
- -n or --new-window: Open a new window
54
- --launch-or-new-window: Only open a new window if the application is open
55
- -a or --add: Add folders to the current window
56
- -w or --wait: Wait for the files to be closed before returning
57
- -b or --background: Don't activate the application
58
- -s or --stay: Keep the application activated after closing the file
59
- --safe-mode: Launch using a sandboxed (clean) environment
60
- -h or --help: Show help (this message) and exit
61
- -v or --version: Show version and exit
62
-
63
- --wait is implied if reading from stdin. Use --stay to not switch back
64
- to the terminal when a file is closed (only relevant if waiting for a file).
65
-
66
- Filenames may be given a :line or :line:column suffix
67
-
68
-