llm_meta_client 0.1.0

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (56) hide show
  1. checksums.yaml +7 -0
  2. data/CHANGELOG.md +29 -0
  3. data/LICENSE +21 -0
  4. data/README.md +88 -0
  5. data/Rakefile +6 -0
  6. data/app/assets/stylesheets/llm_meta_client/application.css +15 -0
  7. data/app/controllers/llm_meta_client/application_controller.rb +4 -0
  8. data/app/helpers/llm_meta_client/application_helper.rb +4 -0
  9. data/app/jobs/llm_meta_client/application_job.rb +4 -0
  10. data/app/mailers/llm_meta_client/application_mailer.rb +6 -0
  11. data/app/models/llm_meta_client/application_record.rb +5 -0
  12. data/app/views/layouts/llm_meta_client/application.html.erb +17 -0
  13. data/config/routes.rb +2 -0
  14. data/lib/generators/llm_meta_client/authentication/authentication_generator.rb +74 -0
  15. data/lib/generators/llm_meta_client/authentication/templates/app/controllers/users/omniauth_callbacks_controller.rb +26 -0
  16. data/lib/generators/llm_meta_client/authentication/templates/app/controllers/users/sessions_controller.rb +13 -0
  17. data/lib/generators/llm_meta_client/authentication/templates/app/models/user.rb +50 -0
  18. data/lib/generators/llm_meta_client/authentication/templates/config/initializers/devise.rb +330 -0
  19. data/lib/generators/llm_meta_client/authentication/templates/config/initializers/omniauth.rb +4 -0
  20. data/lib/generators/llm_meta_client/authentication/templates/config/locales/devise.en.yml +65 -0
  21. data/lib/generators/llm_meta_client/authentication/templates/db/migrate/create_users.rb +14 -0
  22. data/lib/generators/llm_meta_client/scaffold/scaffold_generator.rb +97 -0
  23. data/lib/generators/llm_meta_client/scaffold/templates/app/controllers/chats_controller.rb +188 -0
  24. data/lib/generators/llm_meta_client/scaffold/templates/app/controllers/prompts_controller.rb +33 -0
  25. data/lib/generators/llm_meta_client/scaffold/templates/app/javascript/controllers/chat_title_edit_controller.js +99 -0
  26. data/lib/generators/llm_meta_client/scaffold/templates/app/javascript/controllers/chats_form_controller.js +95 -0
  27. data/lib/generators/llm_meta_client/scaffold/templates/app/javascript/controllers/llm_selector_controller.js +236 -0
  28. data/lib/generators/llm_meta_client/scaffold/templates/app/javascript/popover.js +34 -0
  29. data/lib/generators/llm_meta_client/scaffold/templates/app/models/chat.rb +154 -0
  30. data/lib/generators/llm_meta_client/scaffold/templates/app/models/message.rb +8 -0
  31. data/lib/generators/llm_meta_client/scaffold/templates/app/views/chats/_message.html.erb +16 -0
  32. data/lib/generators/llm_meta_client/scaffold/templates/app/views/chats/_messages_list.html.erb +9 -0
  33. data/lib/generators/llm_meta_client/scaffold/templates/app/views/chats/create.turbo_stream.erb +84 -0
  34. data/lib/generators/llm_meta_client/scaffold/templates/app/views/chats/edit.html.erb +74 -0
  35. data/lib/generators/llm_meta_client/scaffold/templates/app/views/chats/new.html.erb +62 -0
  36. data/lib/generators/llm_meta_client/scaffold/templates/app/views/chats/update.turbo_stream.erb +78 -0
  37. data/lib/generators/llm_meta_client/scaffold/templates/app/views/layouts/_header.html.erb +35 -0
  38. data/lib/generators/llm_meta_client/scaffold/templates/app/views/layouts/_sidebar.html.erb +27 -0
  39. data/lib/generators/llm_meta_client/scaffold/templates/app/views/layouts/application.html.erb +59 -0
  40. data/lib/generators/llm_meta_client/scaffold/templates/app/views/shared/_api_key_field.html.erb +15 -0
  41. data/lib/generators/llm_meta_client/scaffold/templates/app/views/shared/_family_field.html.erb +18 -0
  42. data/lib/generators/llm_meta_client/scaffold/templates/app/views/shared/_model_field.html.erb +12 -0
  43. data/lib/generators/llm_meta_client/scaffold/templates/config/initializers/llm_service.rb +10 -0
  44. data/lib/generators/llm_meta_client/scaffold/templates/db/migrate/create_chats.rb +13 -0
  45. data/lib/generators/llm_meta_client/scaffold/templates/db/migrate/create_messages.rb +12 -0
  46. data/lib/llm_meta_client/chat_manageable.rb +11 -0
  47. data/lib/llm_meta_client/engine.rb +11 -0
  48. data/lib/llm_meta_client/exceptions.rb +13 -0
  49. data/lib/llm_meta_client/helpers.rb +6 -0
  50. data/lib/llm_meta_client/history_manageable.rb +11 -0
  51. data/lib/llm_meta_client/server_query.rb +47 -0
  52. data/lib/llm_meta_client/server_resource.rb +127 -0
  53. data/lib/llm_meta_client/version.rb +3 -0
  54. data/lib/llm_meta_client.rb +13 -0
  55. data/lib/tasks/llm_meta_client_tasks.rake +4 -0
  56. metadata +162 -0
@@ -0,0 +1,10 @@
1
+ # frozen_string_literal: true
2
+
3
+ # LLM Service Configuration
4
+ # External LLM service base URL for API key and model management
5
+ Rails.application.configure do
6
+ # Base URL for LLM service
7
+ # Retrieved from environment variable LLM_SERVICE_BASE_URL, uses default value if not set
8
+ config.llm_service_base_url = ENV.fetch("LLM_SERVICE_BASE_URL", "http://localhost:3000")
9
+ config.summarize_conversation_count = ENV.fetch("SUMMARIZE_CONVERSATION_COUNT", 10).to_i
10
+ end
@@ -0,0 +1,13 @@
1
+ class CreateChats < ActiveRecord::Migration[8.1]
2
+ def change
3
+ create_table :chats do |t|
4
+ t.references :user, null: true, foreign_key: true
5
+ t.string :uuid, null: false
6
+ t.string :title
7
+ t.string :llm_uuid
8
+ t.string :model
9
+
10
+ t.timestamps
11
+ end
12
+ end
13
+ end
@@ -0,0 +1,12 @@
1
+ class CreateMessages < ActiveRecord::Migration[8.1]
2
+ def change
3
+ create_table :messages do |t|
4
+ t.references :chat, null: false, foreign_key: true
5
+ # t.references :parent, foreign_key: { to_table: :messages }, index: true, null: true
6
+ t.references :prompt_manager_prompt_execution, null: true, foreign_key: { to_table: :prompt_manager_prompt_executions }
7
+ t.string :role
8
+
9
+ t.timestamps
10
+ end
11
+ end
12
+ end
@@ -0,0 +1,11 @@
1
+ require "chat_manager"
2
+
3
+ module LlmMetaClient
4
+ module ChatManageable
5
+ extend ActiveSupport::Concern
6
+
7
+ included do
8
+ include ChatManager::ChatManageable
9
+ end
10
+ end
11
+ end
@@ -0,0 +1,11 @@
1
+ module LlmMetaClient
2
+ class Engine < ::Rails::Engine
3
+ isolate_namespace LlmMetaClient
4
+
5
+ initializer "llm_meta_client.helpers" do
6
+ ActiveSupport.on_load(:action_view) do
7
+ include LlmMetaClient::Helpers
8
+ end
9
+ end
10
+ end
11
+ end
@@ -0,0 +1,13 @@
1
+ module LlmMetaClient
2
+ module Exceptions
3
+ class OllamaUnavailableError < StandardError
4
+ def initialize(msg = "Ollama is not available in LLM service. Please contact the administrator.")
5
+ super(msg)
6
+ end
7
+ end
8
+
9
+ class ServerError < StandardError; end
10
+ class InvalidResponseError < StandardError; end
11
+ class EmptyResponseError < StandardError; end
12
+ end
13
+ end
@@ -0,0 +1,6 @@
1
+ module LlmMetaClient
2
+ module Helpers
3
+ include PromptNavigator::Helpers
4
+ include ChatManager::Helpers
5
+ end
6
+ end
@@ -0,0 +1,11 @@
1
+ require "prompt_navigator"
2
+
3
+ module LlmMetaClient
4
+ module HistoryManageable
5
+ extend ActiveSupport::Concern
6
+
7
+ included do
8
+ include PromptNavigator::HistoryManageable
9
+ end
10
+ end
11
+ end
@@ -0,0 +1,47 @@
1
+ module LlmMetaClient
2
+ class ServerQuery
3
+ def call(id_token, api_key_uuid, model_id, context, user_content)
4
+ debug_log "Context: #{context}"
5
+ context_and_user_content = "Context:#{context}, User Prompt: #{user_content}"
6
+ debug_log "Request to LLM: \n===>\n#{context_and_user_content}\n===>"
7
+
8
+ response = request(api_key_uuid, id_token, model_id, context_and_user_content)
9
+
10
+ raise Exceptions::ServerError, "LLM server returned HTTP #{response.code}" unless response.success?
11
+
12
+ response_body = response.parsed_response
13
+
14
+ raise Exceptions::InvalidResponseError, "LLM server returned non-JSON response" unless response_body.is_a?(Hash)
15
+
16
+ content = response_body.dig("response", "message") || ""
17
+
18
+ raise Exceptions::EmptyResponseError, "LLM server returned empty response" if content.blank?
19
+
20
+ debug_log "Response from LLM: \n<===\n#{content}\n<==>"
21
+
22
+ content
23
+ end
24
+
25
+ private
26
+
27
+ def debug_log(message)
28
+ Rails.logger.info(message) if Rails.env.development?
29
+ end
30
+
31
+ def request(api_key_uuid, id_token, model_id, user_content)
32
+ headers = { "Content-Type" => "application/json" }
33
+ headers["Authorization"] = "Bearer #{id_token}" if id_token.present?
34
+
35
+ HTTParty.post(
36
+ url(api_key_uuid, model_id),
37
+ headers: headers,
38
+ body: { prompt: "#{user_content}" }.to_json,
39
+ timeout: 300 # 5 minute timeout setting (both read and connect)
40
+ )
41
+ end
42
+
43
+ def url(api_key_uuid, model_id)
44
+ "#{Rails.application.config.llm_service_base_url}/api/llm_api_keys/#{api_key_uuid}/models/#{model_id}/chats"
45
+ end
46
+ end
47
+ end
@@ -0,0 +1,127 @@
1
+ module LlmMetaClient
2
+ class ServerResource
3
+ # This is a non-persisted model for fetching external server prompts
4
+
5
+ FAMILY_DISPLAY_NAMES = {
6
+ "openai" => "OpenAI",
7
+ "anthropic" => "Anthropic",
8
+ "google" => "Google",
9
+ "ollama" => "Ollama"
10
+ }.freeze
11
+
12
+ class << self
13
+ # Retrieve LLM options available for user selection (API Keys + Ollama)
14
+ # For guest users (no jwt_token), only Ollama is returned
15
+ def available_llm_options(jwt_token)
16
+ # For guest users: Ollama is required
17
+ # return only Ollama
18
+ return format ollama_options if jwt_token.blank?
19
+
20
+ # Logged-in user: return API Keys + Ollama (if available)
21
+ options = llm_api_keys jwt_token
22
+
23
+ # Try to add Ollama, but don't fail if unavailable
24
+ begin
25
+ options.concat ollama_options
26
+ rescue LlmMetaClient::Exceptions::OllamaUnavailableError => e
27
+ Rails.logger.warn "Ollama unavailable: #{e.message}"
28
+ # Continue with API Keys only if at least one is available
29
+ raise e if options.empty?
30
+ end
31
+
32
+ format options
33
+ end
34
+
35
+ # Retrieve LLM families with their API keys grouped by llm_type
36
+ # Returns: [{name:, llm_type:, api_keys: [{uuid:, description:, available_models:}]}]
37
+ def available_llm_families(jwt_token)
38
+ if jwt_token.blank?
39
+ # Guest users: only Ollama
40
+ return build_families(ollama_options, [])
41
+ end
42
+
43
+ api_keys = llm_api_keys(jwt_token)
44
+
45
+ ollama_opts = begin
46
+ ollama_options
47
+ rescue LlmMetaClient::Exceptions::OllamaUnavailableError => e
48
+ Rails.logger.warn "Ollama unavailable: #{e.message}"
49
+ raise e if api_keys.empty?
50
+ []
51
+ end
52
+
53
+ build_families(ollama_opts, api_keys)
54
+ end
55
+
56
+ private
57
+
58
+ def build_families(ollama_opts, api_keys)
59
+ # Group user API keys by llm_type
60
+ families = api_keys.group_by { it["llm_type"] }.map do |llm_type, keys|
61
+ {
62
+ name: FAMILY_DISPLAY_NAMES[llm_type] || llm_type.capitalize,
63
+ llm_type: llm_type,
64
+ api_keys: keys.map { format_api_key(it) }
65
+ }
66
+ end
67
+
68
+ # Add Ollama family if available
69
+ if ollama_opts.present?
70
+ families << {
71
+ name: FAMILY_DISPLAY_NAMES["ollama"],
72
+ llm_type: "ollama",
73
+ api_keys: ollama_opts.map { format_api_key(it) }
74
+ }
75
+ end
76
+
77
+ families
78
+ end
79
+
80
+ def format_api_key(resource)
81
+ common_keys = %w[uuid description llm_type available_models]
82
+ resource.slice(*common_keys).symbolize_keys
83
+ end
84
+
85
+ def ollama_options
86
+ ollama_list = llms.filter { it["family"] == "ollama" }
87
+ raise LlmMetaClient::Exceptions::OllamaUnavailableError if ollama_list.empty?
88
+ ollama_list
89
+ end
90
+
91
+ # Builds normalized option hashes from an array of prompts by slicing common keys
92
+ # Accepts only arrays
93
+ def format(resources)
94
+ common_keys = %w[uuid description llm_type available_models]
95
+ resources.map { it.slice(*common_keys).symbolize_keys }
96
+ end
97
+
98
+ def llms
99
+ api_url = "#{Rails.configuration.llm_service_base_url}/api/llms"
100
+ headers = { "Content-Type" => "application/json" }
101
+
102
+ response = HTTParty.get api_url, headers: headers
103
+
104
+ if response.success?
105
+ response.parsed_response["llms"] || []
106
+ else
107
+ Rails.logger.error "Failed to fetch LLMs: HTTP #{response.code}"
108
+ []
109
+ end
110
+ end
111
+
112
+ def llm_api_keys(jwt_token)
113
+ api_url = "#{Rails.configuration.llm_service_base_url}/api/llm_api_keys"
114
+ headers = { "Content-Type" => "application/json", "Authorization" => "Bearer #{jwt_token}" }
115
+
116
+ response = HTTParty.get api_url, headers: headers
117
+
118
+ if response.success?
119
+ response.parsed_response["llm_api_keys"] || []
120
+ else
121
+ Rails.logger.error "Failed to fetch LLM API keys: HTTP #{response.code}"
122
+ []
123
+ end
124
+ end
125
+ end
126
+ end
127
+ end
@@ -0,0 +1,3 @@
1
+ module LlmMetaClient
2
+ VERSION = "0.1.0"
3
+ end
@@ -0,0 +1,13 @@
1
+ require "llm_meta_client/version"
2
+ require "llm_meta_client/engine"
3
+ require "prompt_navigator"
4
+ require "chat_manager"
5
+ require "llm_meta_client/helpers"
6
+ require "llm_meta_client/history_manageable"
7
+ require "llm_meta_client/chat_manageable"
8
+ require "llm_meta_client/exceptions"
9
+ require "llm_meta_client/server_resource"
10
+ require "llm_meta_client/server_query"
11
+
12
+ module LlmMetaClient
13
+ end
@@ -0,0 +1,4 @@
1
+ # desc "Explaining what the task does"
2
+ # task :llm_meta_client do
3
+ # # Task goes here
4
+ # end
metadata ADDED
@@ -0,0 +1,162 @@
1
+ --- !ruby/object:Gem::Specification
2
+ name: llm_meta_client
3
+ version: !ruby/object:Gem::Version
4
+ version: 0.1.0
5
+ platform: ruby
6
+ authors:
7
+ - dhq_boiler
8
+ bindir: bin
9
+ cert_chain: []
10
+ date: 1980-01-02 00:00:00.000000000 Z
11
+ dependencies:
12
+ - !ruby/object:Gem::Dependency
13
+ name: rails
14
+ requirement: !ruby/object:Gem::Requirement
15
+ requirements:
16
+ - - "~>"
17
+ - !ruby/object:Gem::Version
18
+ version: '8.1'
19
+ - - ">="
20
+ - !ruby/object:Gem::Version
21
+ version: 8.1.1
22
+ type: :runtime
23
+ prerelease: false
24
+ version_requirements: !ruby/object:Gem::Requirement
25
+ requirements:
26
+ - - "~>"
27
+ - !ruby/object:Gem::Version
28
+ version: '8.1'
29
+ - - ">="
30
+ - !ruby/object:Gem::Version
31
+ version: 8.1.1
32
+ - !ruby/object:Gem::Dependency
33
+ name: httparty
34
+ requirement: !ruby/object:Gem::Requirement
35
+ requirements:
36
+ - - "~>"
37
+ - !ruby/object:Gem::Version
38
+ version: '0.22'
39
+ type: :runtime
40
+ prerelease: false
41
+ version_requirements: !ruby/object:Gem::Requirement
42
+ requirements:
43
+ - - "~>"
44
+ - !ruby/object:Gem::Version
45
+ version: '0.22'
46
+ - !ruby/object:Gem::Dependency
47
+ name: prompt_navigator
48
+ requirement: !ruby/object:Gem::Requirement
49
+ requirements:
50
+ - - "~>"
51
+ - !ruby/object:Gem::Version
52
+ version: '0.1'
53
+ type: :runtime
54
+ prerelease: false
55
+ version_requirements: !ruby/object:Gem::Requirement
56
+ requirements:
57
+ - - "~>"
58
+ - !ruby/object:Gem::Version
59
+ version: '0.1'
60
+ - !ruby/object:Gem::Dependency
61
+ name: chat_manager
62
+ requirement: !ruby/object:Gem::Requirement
63
+ requirements:
64
+ - - "~>"
65
+ - !ruby/object:Gem::Version
66
+ version: '0.1'
67
+ type: :runtime
68
+ prerelease: false
69
+ version_requirements: !ruby/object:Gem::Requirement
70
+ requirements:
71
+ - - "~>"
72
+ - !ruby/object:Gem::Version
73
+ version: '0.1'
74
+ description: llm_meta_client provides a Rails Engine with scaffold and authentication
75
+ generators for building LLM-powered chat applications. Supports OpenAI, Anthropic,
76
+ Google, and Ollama providers.
77
+ email:
78
+ - dhq_boiler@live.jp
79
+ executables: []
80
+ extensions: []
81
+ extra_rdoc_files: []
82
+ files:
83
+ - CHANGELOG.md
84
+ - LICENSE
85
+ - README.md
86
+ - Rakefile
87
+ - app/assets/stylesheets/llm_meta_client/application.css
88
+ - app/controllers/llm_meta_client/application_controller.rb
89
+ - app/helpers/llm_meta_client/application_helper.rb
90
+ - app/jobs/llm_meta_client/application_job.rb
91
+ - app/mailers/llm_meta_client/application_mailer.rb
92
+ - app/models/llm_meta_client/application_record.rb
93
+ - app/views/layouts/llm_meta_client/application.html.erb
94
+ - config/routes.rb
95
+ - lib/generators/llm_meta_client/authentication/authentication_generator.rb
96
+ - lib/generators/llm_meta_client/authentication/templates/app/controllers/users/omniauth_callbacks_controller.rb
97
+ - lib/generators/llm_meta_client/authentication/templates/app/controllers/users/sessions_controller.rb
98
+ - lib/generators/llm_meta_client/authentication/templates/app/models/user.rb
99
+ - lib/generators/llm_meta_client/authentication/templates/config/initializers/devise.rb
100
+ - lib/generators/llm_meta_client/authentication/templates/config/initializers/omniauth.rb
101
+ - lib/generators/llm_meta_client/authentication/templates/config/locales/devise.en.yml
102
+ - lib/generators/llm_meta_client/authentication/templates/db/migrate/create_users.rb
103
+ - lib/generators/llm_meta_client/scaffold/scaffold_generator.rb
104
+ - lib/generators/llm_meta_client/scaffold/templates/app/controllers/chats_controller.rb
105
+ - lib/generators/llm_meta_client/scaffold/templates/app/controllers/prompts_controller.rb
106
+ - lib/generators/llm_meta_client/scaffold/templates/app/javascript/controllers/chat_title_edit_controller.js
107
+ - lib/generators/llm_meta_client/scaffold/templates/app/javascript/controllers/chats_form_controller.js
108
+ - lib/generators/llm_meta_client/scaffold/templates/app/javascript/controllers/llm_selector_controller.js
109
+ - lib/generators/llm_meta_client/scaffold/templates/app/javascript/popover.js
110
+ - lib/generators/llm_meta_client/scaffold/templates/app/models/chat.rb
111
+ - lib/generators/llm_meta_client/scaffold/templates/app/models/message.rb
112
+ - lib/generators/llm_meta_client/scaffold/templates/app/views/chats/_message.html.erb
113
+ - lib/generators/llm_meta_client/scaffold/templates/app/views/chats/_messages_list.html.erb
114
+ - lib/generators/llm_meta_client/scaffold/templates/app/views/chats/create.turbo_stream.erb
115
+ - lib/generators/llm_meta_client/scaffold/templates/app/views/chats/edit.html.erb
116
+ - lib/generators/llm_meta_client/scaffold/templates/app/views/chats/new.html.erb
117
+ - lib/generators/llm_meta_client/scaffold/templates/app/views/chats/update.turbo_stream.erb
118
+ - lib/generators/llm_meta_client/scaffold/templates/app/views/layouts/_header.html.erb
119
+ - lib/generators/llm_meta_client/scaffold/templates/app/views/layouts/_sidebar.html.erb
120
+ - lib/generators/llm_meta_client/scaffold/templates/app/views/layouts/application.html.erb
121
+ - lib/generators/llm_meta_client/scaffold/templates/app/views/shared/_api_key_field.html.erb
122
+ - lib/generators/llm_meta_client/scaffold/templates/app/views/shared/_family_field.html.erb
123
+ - lib/generators/llm_meta_client/scaffold/templates/app/views/shared/_model_field.html.erb
124
+ - lib/generators/llm_meta_client/scaffold/templates/config/initializers/llm_service.rb
125
+ - lib/generators/llm_meta_client/scaffold/templates/db/migrate/create_chats.rb
126
+ - lib/generators/llm_meta_client/scaffold/templates/db/migrate/create_messages.rb
127
+ - lib/llm_meta_client.rb
128
+ - lib/llm_meta_client/chat_manageable.rb
129
+ - lib/llm_meta_client/engine.rb
130
+ - lib/llm_meta_client/exceptions.rb
131
+ - lib/llm_meta_client/helpers.rb
132
+ - lib/llm_meta_client/history_manageable.rb
133
+ - lib/llm_meta_client/server_query.rb
134
+ - lib/llm_meta_client/server_resource.rb
135
+ - lib/llm_meta_client/version.rb
136
+ - lib/tasks/llm_meta_client_tasks.rake
137
+ homepage: https://github.com/dhq-boiler/llm_meta_client
138
+ licenses:
139
+ - MIT
140
+ metadata:
141
+ allowed_push_host: https://rubygems.org
142
+ homepage_uri: https://github.com/dhq-boiler/llm_meta_client
143
+ source_code_uri: https://github.com/dhq-boiler/llm_meta_client/tree/main
144
+ changelog_uri: https://github.com/dhq-boiler/llm_meta_client/blob/main/CHANGELOG.md
145
+ rdoc_options: []
146
+ require_paths:
147
+ - lib
148
+ required_ruby_version: !ruby/object:Gem::Requirement
149
+ requirements:
150
+ - - ">="
151
+ - !ruby/object:Gem::Version
152
+ version: '3.4'
153
+ required_rubygems_version: !ruby/object:Gem::Requirement
154
+ requirements:
155
+ - - ">="
156
+ - !ruby/object:Gem::Version
157
+ version: '0'
158
+ requirements: []
159
+ rubygems_version: 3.6.9
160
+ specification_version: 4
161
+ summary: A Rails Engine for integrating multiple LLM providers into your application.
162
+ test_files: []