rubyllm-observ 0.6.5 → 0.6.7

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (47) hide show
  1. checksums.yaml +4 -4
  2. data/README.md +319 -2
  3. data/app/assets/javascripts/observ/controllers/config_editor_controller.js +178 -0
  4. data/app/assets/javascripts/observ/controllers/index.js +29 -0
  5. data/app/assets/javascripts/observ/controllers/message_form_controller.js +24 -2
  6. data/app/assets/stylesheets/observ/_chat.scss +199 -0
  7. data/app/assets/stylesheets/observ/_config_editor.scss +119 -0
  8. data/app/assets/stylesheets/observ/application.scss +1 -0
  9. data/app/controllers/observ/dataset_items_controller.rb +2 -2
  10. data/app/controllers/observ/dataset_runs_controller.rb +1 -1
  11. data/app/controllers/observ/datasets_controller.rb +2 -2
  12. data/app/controllers/observ/messages_controller.rb +5 -1
  13. data/app/controllers/observ/prompts_controller.rb +11 -3
  14. data/app/controllers/observ/scores_controller.rb +1 -1
  15. data/app/controllers/observ/traces_controller.rb +1 -1
  16. data/app/helpers/observ/application_helper.rb +1 -0
  17. data/app/helpers/observ/markdown_helper.rb +29 -0
  18. data/app/helpers/observ/prompts_helper.rb +48 -0
  19. data/app/jobs/observ/moderation_guardrail_job.rb +115 -0
  20. data/app/models/observ/embedding.rb +45 -0
  21. data/app/models/observ/image_generation.rb +38 -0
  22. data/app/models/observ/moderation.rb +40 -0
  23. data/app/models/observ/null_prompt.rb +49 -2
  24. data/app/models/observ/observation.rb +3 -1
  25. data/app/models/observ/session.rb +33 -0
  26. data/app/models/observ/trace.rb +90 -4
  27. data/app/models/observ/transcription.rb +38 -0
  28. data/app/services/observ/chat_instrumenter.rb +96 -6
  29. data/app/services/observ/concerns/observable_service.rb +108 -3
  30. data/app/services/observ/embedding_instrumenter.rb +193 -0
  31. data/app/services/observ/guardrail_service.rb +9 -0
  32. data/app/services/observ/image_generation_instrumenter.rb +243 -0
  33. data/app/services/observ/moderation_guardrail_service.rb +235 -0
  34. data/app/services/observ/moderation_instrumenter.rb +141 -0
  35. data/app/services/observ/transcription_instrumenter.rb +187 -0
  36. data/app/views/layouts/observ/application.html.erb +1 -1
  37. data/app/views/observ/chats/show.html.erb +9 -0
  38. data/app/views/observ/messages/_message.html.erb +1 -1
  39. data/app/views/observ/messages/create.turbo_stream.erb +1 -3
  40. data/app/views/observ/prompts/_config_editor.html.erb +115 -0
  41. data/app/views/observ/prompts/_form.html.erb +2 -13
  42. data/app/views/observ/prompts/_new_form.html.erb +2 -12
  43. data/lib/generators/observ/install_chat/templates/jobs/chat_response_job.rb.tt +9 -3
  44. data/lib/observ/configuration.rb +0 -2
  45. data/lib/observ/engine.rb +7 -0
  46. data/lib/observ/version.rb +1 -1
  47. metadata +31 -1
@@ -0,0 +1,141 @@
1
+ # frozen_string_literal: true
2
+
3
+ module Observ
4
+ class ModerationInstrumenter
5
+ attr_reader :session, :context
6
+
7
+ def initialize(session, context: {})
8
+ @session = session
9
+ @context = context
10
+ @original_moderate_method = nil
11
+ @instrumented = false
12
+ end
13
+
14
+ def instrument!
15
+ return if @instrumented
16
+
17
+ wrap_moderate_method
18
+ @instrumented = true
19
+
20
+ Rails.logger.info "[Observability] Instrumented RubyLLM.moderate for session #{session.session_id}"
21
+ end
22
+
23
+ def uninstrument!
24
+ return unless @instrumented
25
+ return unless @original_moderate_method
26
+
27
+ RubyLLM.define_singleton_method(:moderate, @original_moderate_method)
28
+ @instrumented = false
29
+
30
+ Rails.logger.info "[Observability] Uninstrumented RubyLLM.moderate"
31
+ end
32
+
33
+ private
34
+
35
+ def wrap_moderate_method
36
+ return if @original_moderate_method
37
+
38
+ @original_moderate_method = RubyLLM.method(:moderate)
39
+ instrumenter = self
40
+
41
+ RubyLLM.define_singleton_method(:moderate) do |*args, **kwargs|
42
+ instrumenter.send(:handle_moderate_call, args, kwargs)
43
+ end
44
+ end
45
+
46
+ def handle_moderate_call(args, kwargs)
47
+ text = args[0]
48
+ model_id = kwargs[:model] || default_moderation_model
49
+
50
+ trace = session.create_trace(
51
+ name: "moderation",
52
+ input: { text: text&.truncate(500) },
53
+ metadata: @context.merge(
54
+ model: model_id
55
+ ).compact
56
+ )
57
+
58
+ moderation_obs = trace.create_moderation(
59
+ name: "moderate",
60
+ model: model_id,
61
+ metadata: {}
62
+ )
63
+
64
+ result = @original_moderate_method.call(*args, **kwargs)
65
+
66
+ finalize_moderation(moderation_obs, result, text)
67
+ trace.finalize(
68
+ output: format_output(result),
69
+ metadata: extract_trace_metadata(result)
70
+ )
71
+
72
+ result
73
+ rescue StandardError => e
74
+ handle_error(e, trace, moderation_obs)
75
+ raise
76
+ end
77
+
78
+ def finalize_moderation(moderation_obs, result, text)
79
+ moderation_obs.finalize(
80
+ output: format_output(result),
81
+ usage: {},
82
+ cost_usd: 0.0 # Moderation is typically free
83
+ )
84
+
85
+ moderation_obs.update!(
86
+ input: text&.truncate(1000),
87
+ metadata: moderation_obs.metadata.merge(
88
+ flagged: result.flagged?,
89
+ categories: result.categories,
90
+ category_scores: result.category_scores,
91
+ flagged_categories: result.flagged_categories
92
+ ).compact
93
+ )
94
+ end
95
+
96
+ def format_output(result)
97
+ {
98
+ model: result.model,
99
+ flagged: result.flagged?,
100
+ flagged_categories: result.flagged_categories,
101
+ id: result.respond_to?(:id) ? result.id : nil
102
+ }.compact
103
+ end
104
+
105
+ def extract_trace_metadata(result)
106
+ {
107
+ flagged: result.flagged?,
108
+ flagged_categories_count: result.flagged_categories&.count || 0
109
+ }.compact
110
+ end
111
+
112
+ def default_moderation_model
113
+ if RubyLLM.config.respond_to?(:default_moderation_model)
114
+ RubyLLM.config.default_moderation_model
115
+ else
116
+ "omni-moderation-latest"
117
+ end
118
+ end
119
+
120
+ def handle_error(error, trace, moderation_obs)
121
+ return unless trace
122
+
123
+ error_span = trace.create_span(
124
+ name: "error",
125
+ metadata: {
126
+ error_type: error.class.name,
127
+ level: "ERROR"
128
+ },
129
+ input: {
130
+ error_message: error.message,
131
+ backtrace: error.backtrace&.first(10)
132
+ }.to_json
133
+ )
134
+ error_span.finalize(output: { error_captured: true }.to_json)
135
+
136
+ moderation_obs&.update(status_message: "FAILED") rescue nil
137
+
138
+ Rails.logger.error "[Observability] Moderation error captured: #{error.class.name} - #{error.message}"
139
+ end
140
+ end
141
+ end
@@ -0,0 +1,187 @@
1
+ # frozen_string_literal: true
2
+
3
+ module Observ
4
+ class TranscriptionInstrumenter
5
+ attr_reader :session, :context
6
+
7
+ def initialize(session, context: {})
8
+ @session = session
9
+ @context = context
10
+ @original_transcribe_method = nil
11
+ @instrumented = false
12
+ end
13
+
14
+ def instrument!
15
+ return if @instrumented
16
+
17
+ wrap_transcribe_method
18
+ @instrumented = true
19
+
20
+ Rails.logger.info "[Observability] Instrumented RubyLLM.transcribe for session #{session.session_id}"
21
+ end
22
+
23
+ def uninstrument!
24
+ return unless @instrumented
25
+ return unless @original_transcribe_method
26
+
27
+ RubyLLM.define_singleton_method(:transcribe, @original_transcribe_method)
28
+ @instrumented = false
29
+
30
+ Rails.logger.info "[Observability] Uninstrumented RubyLLM.transcribe"
31
+ end
32
+
33
+ private
34
+
35
+ def wrap_transcribe_method
36
+ return if @original_transcribe_method
37
+
38
+ @original_transcribe_method = RubyLLM.method(:transcribe)
39
+ instrumenter = self
40
+
41
+ RubyLLM.define_singleton_method(:transcribe) do |*args, **kwargs|
42
+ instrumenter.send(:handle_transcribe_call, args, kwargs)
43
+ end
44
+ end
45
+
46
+ def handle_transcribe_call(args, kwargs)
47
+ audio_path = args[0]
48
+ model_id = kwargs[:model] || default_transcription_model
49
+ language = kwargs[:language]
50
+
51
+ trace = session.create_trace(
52
+ name: "transcription",
53
+ input: { audio_path: audio_path.to_s },
54
+ metadata: @context.merge(
55
+ model: model_id,
56
+ language: language
57
+ ).compact
58
+ )
59
+
60
+ transcription_obs = trace.create_transcription(
61
+ name: "transcribe",
62
+ model: model_id,
63
+ metadata: {
64
+ language: language,
65
+ has_diarization: kwargs[:speaker_names].present?
66
+ }.compact
67
+ )
68
+
69
+ result = @original_transcribe_method.call(*args, **kwargs)
70
+
71
+ finalize_transcription(transcription_obs, result)
72
+ trace.finalize(
73
+ output: format_output(result),
74
+ metadata: extract_trace_metadata(result)
75
+ )
76
+
77
+ result
78
+ rescue StandardError => e
79
+ handle_error(e, trace, transcription_obs)
80
+ raise
81
+ end
82
+
83
+ def finalize_transcription(transcription_obs, result)
84
+ cost = calculate_cost(result)
85
+
86
+ transcription_obs.finalize(
87
+ output: format_output(result),
88
+ usage: {},
89
+ cost_usd: cost
90
+ )
91
+
92
+ transcription_obs.update!(
93
+ input: result.text&.truncate(1000),
94
+ metadata: transcription_obs.metadata.merge(
95
+ audio_duration_s: result.duration,
96
+ language: result.respond_to?(:language) ? result.language : nil,
97
+ segments_count: result.segments&.count || 0,
98
+ speakers_count: extract_speakers_count(result),
99
+ has_diarization: has_diarization?(result)
100
+ ).compact
101
+ )
102
+ end
103
+
104
+ def calculate_cost(result)
105
+ model_id = result.model
106
+ return 0.0 unless model_id
107
+
108
+ model_info = RubyLLM.models.find(model_id)
109
+ return 0.0 unless model_info
110
+
111
+ duration_minutes = (result.duration || 0) / 60.0
112
+
113
+ # Transcription models typically use per-minute pricing
114
+ if model_info.respond_to?(:audio_price_per_minute) && model_info.audio_price_per_minute
115
+ (duration_minutes * model_info.audio_price_per_minute).round(6)
116
+ elsif model_info.respond_to?(:input_price_per_million) && model_info.input_price_per_million
117
+ # Fallback: some models might use token-based pricing
118
+ # Estimate ~150 tokens per minute of audio
119
+ estimated_tokens = duration_minutes * 150
120
+ (estimated_tokens * model_info.input_price_per_million / 1_000_000.0).round(6)
121
+ else
122
+ 0.0
123
+ end
124
+ rescue StandardError => e
125
+ Rails.logger.warn "[Observability] Failed to calculate transcription cost: #{e.message}"
126
+ 0.0
127
+ end
128
+
129
+ def extract_speakers_count(result)
130
+ return nil unless has_diarization?(result)
131
+ return nil unless result.segments
132
+
133
+ result.segments.map { |s| s.respond_to?(:speaker) ? s.speaker : nil }.compact.uniq.count
134
+ end
135
+
136
+ def has_diarization?(result)
137
+ return false unless result.segments&.any?
138
+
139
+ result.segments.first.respond_to?(:speaker)
140
+ end
141
+
142
+ def format_output(result)
143
+ {
144
+ model: result.model,
145
+ text_length: result.text&.length || 0,
146
+ duration_s: result.duration,
147
+ segments_count: result.segments&.count || 0
148
+ }.compact
149
+ end
150
+
151
+ def extract_trace_metadata(result)
152
+ {
153
+ audio_duration_s: result.duration,
154
+ language: result.respond_to?(:language) ? result.language : nil
155
+ }.compact
156
+ end
157
+
158
+ def default_transcription_model
159
+ if RubyLLM.config.respond_to?(:default_transcription_model)
160
+ RubyLLM.config.default_transcription_model
161
+ else
162
+ "whisper-1"
163
+ end
164
+ end
165
+
166
+ def handle_error(error, trace, transcription_obs)
167
+ return unless trace
168
+
169
+ error_span = trace.create_span(
170
+ name: "error",
171
+ metadata: {
172
+ error_type: error.class.name,
173
+ level: "ERROR"
174
+ },
175
+ input: {
176
+ error_message: error.message,
177
+ backtrace: error.backtrace&.first(10)
178
+ }.to_json
179
+ )
180
+ error_span.finalize(output: { error_captured: true }.to_json)
181
+
182
+ transcription_obs&.update(status_message: "FAILED") rescue nil
183
+
184
+ Rails.logger.error "[Observability] Transcription error captured: #{error.class.name} - #{error.message}"
185
+ end
186
+ end
187
+ end
@@ -86,7 +86,7 @@
86
86
  <div class="observ-sidebar__footer">
87
87
  <%= link_to main_app.instance_exec(&Observ.config.back_to_app_path), class: "observ-sidebar__back-link" do %>
88
88
  <span>&larr;</span>
89
- <span><%= Observ.config.back_to_app_label %></span>
89
+ <span>Back to App</span>
90
90
  <% end %>
91
91
  </div>
92
92
  </aside>
@@ -19,6 +19,15 @@
19
19
  <%= render message %>
20
20
  <% end %>
21
21
  </div>
22
+
23
+ <div id="typing-indicator" class="observ-typing-indicator" style="display: none;">
24
+ <div class="observ-typing-indicator__dots">
25
+ <span></span>
26
+ <span></span>
27
+ <span></span>
28
+ </div>
29
+ <span class="observ-typing-indicator__text">AI is thinking...</span>
30
+ </div>
22
31
 
23
32
  <div class="observ-form-separator">
24
33
  <%= render "observ/messages/form", chat: @chat, message: @message %>
@@ -5,7 +5,7 @@
5
5
  </div>
6
6
 
7
7
  <div id="message_<%= message.id %>_content" class="observ-chat-message__content">
8
- <%= simple_format(message.content) %>
8
+ <%= render_markdown(message.content) %>
9
9
  </div>
10
10
 
11
11
  <% if message.tool_call? %>
@@ -1,7 +1,5 @@
1
1
  <%= turbo_stream.append "messages" do %>
2
- <% @chat.messages.last(2).each do |message| %>
3
- <%= render message %>
4
- <% end %>
2
+ <%= render @message %>
5
3
  <% end %>
6
4
 
7
5
  <%= turbo_stream.replace "new_message" do %>
@@ -0,0 +1,115 @@
1
+ <%#
2
+ Config Editor Partial
3
+
4
+ Provides a hybrid interface for editing prompt configuration:
5
+ - Structured fields for common settings (model, temperature, max_tokens)
6
+ - Collapsible advanced section for raw JSON editing
7
+
8
+ Local variables:
9
+ - prompt: The Observ::Prompt record or PromptForm being edited
10
+ - f: The form builder
11
+ %>
12
+
13
+ <% config = prompt_config_hash(prompt) %>
14
+ <% config_json = config.present? ? JSON.pretty_generate(config) : "" %>
15
+
16
+ <div class="observ-config-editor"
17
+ data-controller="observ--config-editor"
18
+ data-observ--config-editor-known-keys-value='["model", "temperature", "max_tokens"]'>
19
+
20
+ <!-- Common Settings -->
21
+ <fieldset class="observ-config-editor__fieldset">
22
+ <legend class="observ-config-editor__legend">Model Settings</legend>
23
+
24
+ <!-- Model Select -->
25
+ <div class="observ-config-editor__row">
26
+ <label class="observ-form__label" for="config_model">Model</label>
27
+ <select id="config_model"
28
+ class="observ-form__select"
29
+ data-observ--config-editor-target="model"
30
+ data-action="change->observ--config-editor#syncToJson">
31
+ <option value="">-- Select a model (optional) --</option>
32
+ <% chat_model_options_grouped.each do |provider, models| %>
33
+ <optgroup label="<%= provider %>">
34
+ <% models.each do |display_name, model_id| %>
35
+ <option value="<%= model_id %>" <%= 'selected' if config_value(prompt, :model) == model_id %>>
36
+ <%= display_name %>
37
+ </option>
38
+ <% end %>
39
+ </optgroup>
40
+ <% end %>
41
+ </select>
42
+ <p class="observ-form__hint">The LLM model to use for this prompt</p>
43
+ </div>
44
+
45
+ <!-- Temperature -->
46
+ <div class="observ-config-editor__row">
47
+ <label class="observ-form__label" for="config_temperature">Temperature</label>
48
+ <div class="observ-config-editor__input-group">
49
+ <input type="number"
50
+ id="config_temperature"
51
+ class="observ-form__input observ-config-editor__number-input"
52
+ min="0"
53
+ max="2"
54
+ step="0.1"
55
+ placeholder="0.7"
56
+ value="<%= config_value(prompt, :temperature) %>"
57
+ data-observ--config-editor-target="temperature"
58
+ data-action="input->observ--config-editor#syncToJson">
59
+ <span class="observ-config-editor__range-hint">0.0 - 2.0</span>
60
+ </div>
61
+ <p class="observ-form__hint">
62
+ Controls randomness: 0.0 = deterministic, 1.0 = balanced, 2.0 = creative
63
+ </p>
64
+ </div>
65
+
66
+ <!-- Max Tokens -->
67
+ <div class="observ-config-editor__row">
68
+ <label class="observ-form__label" for="config_max_tokens">Max Tokens</label>
69
+ <div class="observ-config-editor__input-group">
70
+ <input type="number"
71
+ id="config_max_tokens"
72
+ class="observ-form__input observ-config-editor__number-input"
73
+ min="1"
74
+ max="128000"
75
+ placeholder="2000"
76
+ value="<%= config_value(prompt, :max_tokens) %>"
77
+ data-observ--config-editor-target="maxTokens"
78
+ data-action="input->observ--config-editor#syncToJson">
79
+ </div>
80
+ <p class="observ-form__hint">
81
+ Maximum number of tokens in the response
82
+ </p>
83
+ </div>
84
+ </fieldset>
85
+
86
+ <!-- Advanced JSON Section (Collapsible) -->
87
+ <details class="observ-config-editor__advanced">
88
+ <summary class="observ-config-editor__advanced-summary">
89
+ Advanced Configuration (JSON)
90
+ </summary>
91
+
92
+ <div class="observ-config-editor__advanced-content">
93
+ <p class="observ-form__hint">
94
+ Edit the raw JSON configuration. Changes here will sync with the fields above.
95
+ You can add custom parameters not available in the structured fields.
96
+ </p>
97
+
98
+ <textarea class="observ-form__textarea observ-form__textarea--code"
99
+ rows="8"
100
+ placeholder='{"model": "gpt-4o", "temperature": 0.7, "max_tokens": 2000}'
101
+ data-observ--config-editor-target="jsonInput"
102
+ data-action="input->observ--config-editor#syncFromJson"><%= config_json %></textarea>
103
+
104
+ <!-- Validation Status -->
105
+ <div class="observ-config-editor__status"
106
+ data-observ--config-editor-target="status">
107
+ </div>
108
+ </div>
109
+ </details>
110
+
111
+ <!-- Hidden field for form submission -->
112
+ <%= f.hidden_field :config,
113
+ value: config_json,
114
+ data: { "observ--config-editor-target": "hiddenField" } %>
115
+ </div>
@@ -66,19 +66,8 @@
66
66
  </div>
67
67
  </div>
68
68
 
69
- <!-- Configuration (JSON) -->
70
- <div class="observ-form__group">
71
- <%= f.label :config, "Configuration (JSON)", class: "observ-form__label" %>
72
- <%= f.text_area :config,
73
- value: prompt.config.present? ? JSON.pretty_generate(prompt.config) : "",
74
- rows: 8,
75
- placeholder: '{\n "model": "gpt-4o",\n "temperature": 0.7,\n "max_tokens": 2000\n}',
76
- class: "observ-form__textarea observ-form__textarea--code",
77
- data: { controller: "json-editor" } %>
78
- <p class="observ-form__hint">
79
- Optional JSON configuration for model parameters and metadata
80
- </p>
81
- </div>
69
+ <!-- Configuration -->
70
+ <%= render 'observ/prompts/config_editor', prompt: prompt, f: f %>
82
71
 
83
72
  <!-- Actions -->
84
73
  <div class="observ-form__actions observ-form__actions--between">
@@ -60,18 +60,8 @@
60
60
  </div>
61
61
  </div>
62
62
 
63
- <!-- Configuration (JSON) -->
64
- <div class="observ-form__group">
65
- <%= f.label :config, "Configuration (JSON)", class: "observ-form__label" %>
66
- <%= f.text_area :config,
67
- rows: 8,
68
- placeholder: '{\n "model": "gpt-4o",\n "temperature": 0.7,\n "max_tokens": 2000\n}',
69
- class: "observ-form__textarea observ-form__textarea--code",
70
- data: { controller: "json-editor" } %>
71
- <p class="observ-form__hint">
72
- Optional JSON configuration for model parameters and metadata
73
- </p>
74
- </div>
63
+ <!-- Configuration -->
64
+ <%= render 'observ/prompts/config_editor', prompt: form, f: f %>
75
65
 
76
66
  <!-- Hidden field for from_version -->
77
67
  <%= f.hidden_field :from_version %>
@@ -1,8 +1,11 @@
1
1
  class ChatResponseJob < ApplicationJob
2
2
  retry_on RubyLLM::BadRequestError, wait: 2.seconds, attempts: 1
3
3
 
4
- def perform(chat_id, content)
4
+ # @param chat_id [Integer] The chat ID
5
+ # @param user_message_id [Integer] The ID of the user message (already created by the controller)
6
+ def perform(chat_id, user_message_id)
5
7
  chat = Chat.find(chat_id)
8
+ user_message = chat.messages.find(user_message_id)
6
9
 
7
10
  # Observability is automatically enabled via after_find callback
8
11
  # All LLM calls, tool calls, and metrics are tracked automatically
@@ -12,7 +15,10 @@ class ChatResponseJob < ApplicationJob
12
15
  begin
13
16
  # Model parameters (temperature, max_tokens, etc.) are automatically configured
14
17
  # via the initialize_agent callback when the chat is created
15
- chat.ask(content) do |chunk|
18
+ #
19
+ # Use complete instead of ask to avoid creating a duplicate user message.
20
+ # The user message was already created by the controller for immediate UI feedback.
21
+ chat.complete do |chunk|
16
22
  if chunk.content && !chunk.content.blank?
17
23
  message = chat.messages.last
18
24
  message.broadcast_append_chunk(chunk.content)
@@ -23,7 +29,7 @@ class ChatResponseJob < ApplicationJob
23
29
 
24
30
  error_message = chat.messages.create!(
25
31
  role: :assistant,
26
- content: "I apologize, but I encountered an error while processing your request. This might be due to a tool call issue. Please try rephrasing your question or try again."
32
+ content: "**Error:** #{e.message}"
27
33
  )
28
34
 
29
35
  error_message.broadcast_replace_to(
@@ -17,7 +17,6 @@ module Observ
17
17
  :prompt_config_schema,
18
18
  :prompt_config_schema_strict,
19
19
  :back_to_app_path,
20
- :back_to_app_label,
21
20
  :chat_ui_enabled,
22
21
  :agent_path,
23
22
  :pagination_per_page
@@ -38,7 +37,6 @@ module Observ
38
37
  @prompt_config_schema = default_prompt_config_schema
39
38
  @prompt_config_schema_strict = false
40
39
  @back_to_app_path = -> { "/" }
41
- @back_to_app_label = "← Back to App"
42
40
  @chat_ui_enabled = -> { defined?(::Chat) && ::Chat.respond_to?(:acts_as_chat) }
43
41
  @agent_path = nil # Defaults to Rails.root.join("app", "agents")
44
42
  @pagination_per_page = 25
data/lib/observ/engine.rb CHANGED
@@ -8,6 +8,13 @@ module Observ
8
8
  g.factory_bot dir: "spec/factories"
9
9
  end
10
10
 
11
+ # Make helpers available to host app for Turbo broadcasts
12
+ initializer "observ.helpers" do
13
+ ActiveSupport.on_load(:action_controller_base) do
14
+ helper Observ::MarkdownHelper
15
+ end
16
+ end
17
+
11
18
  # Make concerns available to host app
12
19
  initializer "observ.load_concerns" do
13
20
  config.to_prepare do
@@ -1,3 +1,3 @@
1
1
  module Observ
2
- VERSION = "0.6.5"
2
+ VERSION = "0.6.7"
3
3
  end