langchainrb 0.15.6 → 0.16.0
Sign up to get free protection for your applications and to get access to all the features.
- checksums.yaml +4 -4
- data/CHANGELOG.md +4 -0
- data/lib/langchain/assistants/assistant.rb +66 -49
- data/lib/langchain/contextual_logger.rb +2 -0
- data/lib/langchain/llm/aws_bedrock.rb +42 -4
- data/lib/langchain/llm/google_gemini.rb +1 -1
- data/lib/langchain/llm/google_vertex_ai.rb +1 -1
- data/lib/langchain/llm/ollama.rb +1 -1
- data/lib/langchain/version.rb +1 -1
- metadata +2 -3
- data/lib/langchain/assistants/thread.rb +0 -46
checksums.yaml
CHANGED
@@ -1,7 +1,7 @@
|
|
1
1
|
---
|
2
2
|
SHA256:
|
3
|
-
metadata.gz:
|
4
|
-
data.tar.gz:
|
3
|
+
metadata.gz: 3f685910b0f3f1816c3822debc2ec470d72d85203b2695ab8ab780b5d0f1cb09
|
4
|
+
data.tar.gz: 7ec406ad7980e12739aa70e9710b21e1f7df0a1e46f66820d7003026e3bbc877
|
5
5
|
SHA512:
|
6
|
-
metadata.gz:
|
7
|
-
data.tar.gz:
|
6
|
+
metadata.gz: 1145ffbab814f09acb539df3662f0c4c5536ded25252a4db3e640d29cc550930b11ac9310b11436d591e1ec13449af59f125bdab3fe463f9a59683ea9d8f38ed
|
7
|
+
data.tar.gz: d9e76d70f24f964b3f17addda08ace46695149af48c7bd4aff1936a10888640f9cb222bef15bdb205ad7997028f88c81142dcb3ef49d96fcd37682c56409f4c6
|
data/CHANGELOG.md
CHANGED
@@ -1,5 +1,9 @@
|
|
1
1
|
## [Unreleased]
|
2
2
|
|
3
|
+
## [0.16.0] - 2024-09-19
|
4
|
+
- Remove `Langchain::Thread` class as it was not needed.
|
5
|
+
- Support `cohere` provider for `Langchain::LLM::AwsBedrock#embed`
|
6
|
+
|
3
7
|
## [0.15.6] - 2024-09-16
|
4
8
|
- Throw an error when `Langchain::Assistant#add_message_callback` is not a callable proc.
|
5
9
|
- Resetting instructions on Langchain::Assistant with Google Gemini no longer throws an error.
|
@@ -12,27 +12,23 @@ module Langchain
|
|
12
12
|
# tools: [Langchain::Tool::NewsRetriever.new(api_key: ENV["NEWS_API_KEY"])]
|
13
13
|
# )
|
14
14
|
class Assistant
|
15
|
-
|
16
|
-
|
17
|
-
|
18
|
-
attr_reader :llm, :thread, :instructions, :state, :llm_adapter, :tool_choice
|
19
|
-
attr_reader :total_prompt_tokens, :total_completion_tokens, :total_tokens
|
15
|
+
attr_reader :llm, :instructions, :state, :llm_adapter, :tool_choice
|
16
|
+
attr_reader :total_prompt_tokens, :total_completion_tokens, :total_tokens, :messages
|
20
17
|
attr_accessor :tools, :add_message_callback
|
21
18
|
|
22
19
|
# Create a new assistant
|
23
20
|
#
|
24
21
|
# @param llm [Langchain::LLM::Base] LLM instance that the assistant will use
|
25
|
-
# @param thread [Langchain::Thread] The thread that'll keep track of the conversation
|
26
22
|
# @param tools [Array<Langchain::Tool::Base>] Tools that the assistant has access to
|
27
|
-
# @param instructions [String] The system instructions
|
23
|
+
# @param instructions [String] The system instructions
|
28
24
|
# @param tool_choice [String] Specify how tools should be selected. Options: "auto", "any", "none", or <specific function name>
|
29
25
|
# @params add_message_callback [Proc] A callback function (Proc or lambda) that is called when any message is added to the conversation
|
30
26
|
def initialize(
|
31
27
|
llm:,
|
32
|
-
thread: nil,
|
33
28
|
tools: [],
|
34
29
|
instructions: nil,
|
35
30
|
tool_choice: "auto",
|
31
|
+
messages: [],
|
36
32
|
add_message_callback: nil
|
37
33
|
)
|
38
34
|
unless tools.is_a?(Array) && tools.all? { |tool| tool.class.singleton_class.included_modules.include?(Langchain::ToolDefinition) }
|
@@ -42,14 +38,13 @@ module Langchain
|
|
42
38
|
@llm = llm
|
43
39
|
@llm_adapter = LLM::Adapter.build(llm)
|
44
40
|
|
45
|
-
@thread = thread || Langchain::Thread.new
|
46
|
-
|
47
41
|
# TODO: Validate that it is, indeed, a Proc or lambda
|
48
42
|
if !add_message_callback.nil? && !add_message_callback.respond_to?(:call)
|
49
43
|
raise ArgumentError, "add_message_callback must be a callable object, like Proc or lambda"
|
50
44
|
end
|
51
|
-
@
|
45
|
+
@add_message_callback = add_message_callback
|
52
46
|
|
47
|
+
self.messages = messages
|
53
48
|
@tools = tools
|
54
49
|
self.tool_choice = tool_choice
|
55
50
|
@instructions = instructions
|
@@ -59,41 +54,60 @@ module Langchain
|
|
59
54
|
@total_completion_tokens = 0
|
60
55
|
@total_tokens = 0
|
61
56
|
|
62
|
-
|
63
|
-
|
64
|
-
# The first message in the thread should be the system instructions
|
57
|
+
# The first message in the messages array should be the system instructions
|
65
58
|
# For Google Gemini, and Anthropic system instructions are added to the `system:` param in the `chat` method
|
66
59
|
initialize_instructions
|
67
60
|
end
|
68
61
|
|
69
|
-
# Add a user message to the
|
62
|
+
# Add a user message to the messages array
|
70
63
|
#
|
71
64
|
# @param content [String] The content of the message
|
72
65
|
# @param role [String] The role attribute of the message. Default: "user"
|
73
66
|
# @param tool_calls [Array<Hash>] The tool calls to include in the message
|
74
67
|
# @param tool_call_id [String] The ID of the tool call to include in the message
|
75
|
-
# @return [Array<Langchain::Message>] The messages
|
68
|
+
# @return [Array<Langchain::Message>] The messages
|
76
69
|
def add_message(content: nil, role: "user", tool_calls: [], tool_call_id: nil)
|
77
70
|
message = build_message(role: role, content: content, tool_calls: tool_calls, tool_call_id: tool_call_id)
|
78
|
-
|
71
|
+
|
72
|
+
# Call the callback with the message
|
73
|
+
add_message_callback.call(message) if add_message_callback # rubocop:disable Style/SafeNavigation
|
74
|
+
|
75
|
+
# Prepend the message to the messages array
|
76
|
+
messages << message
|
77
|
+
|
79
78
|
@state = :ready
|
80
79
|
|
81
80
|
messages
|
82
81
|
end
|
83
82
|
|
84
|
-
#
|
83
|
+
# Convert messages to an LLM APIs-compatible array of hashes
|
85
84
|
#
|
86
|
-
# @
|
87
|
-
|
85
|
+
# @return [Array<Hash>] Messages as an OpenAI API-compatible array of hashes
|
86
|
+
def array_of_message_hashes
|
87
|
+
messages
|
88
|
+
.map(&:to_hash)
|
89
|
+
.compact
|
90
|
+
end
|
91
|
+
|
92
|
+
# Only used by the Assistant when it calls the LLM#complete() method
|
93
|
+
def prompt_of_concatenated_messages
|
94
|
+
messages.map(&:to_s).join
|
95
|
+
end
|
96
|
+
|
97
|
+
# Set multiple messages
|
98
|
+
#
|
99
|
+
# @param messages [Array<Langchain::Message>] The messages to set
|
100
|
+
# @return [Array<Langchain::Message>] The messages
|
88
101
|
def messages=(messages)
|
89
|
-
|
90
|
-
|
102
|
+
raise ArgumentError, "messages array must only contain Langchain::Message instance(s)" unless messages.is_a?(Array) && messages.all? { |m| m.is_a?(Langchain::Messages::Base) }
|
103
|
+
|
104
|
+
@messages = messages
|
91
105
|
end
|
92
106
|
|
93
|
-
# Add multiple messages
|
107
|
+
# Add multiple messages
|
94
108
|
#
|
95
109
|
# @param messages [Array<Hash>] The messages to add
|
96
|
-
# @return [Array<Langchain::Message>] The messages
|
110
|
+
# @return [Array<Langchain::Message>] The messages
|
97
111
|
def add_messages(messages:)
|
98
112
|
messages.each do |message_hash|
|
99
113
|
add_message(**message_hash.slice(:content, :role, :tool_calls, :tool_call_id))
|
@@ -103,10 +117,10 @@ module Langchain
|
|
103
117
|
# Run the assistant
|
104
118
|
#
|
105
119
|
# @param auto_tool_execution [Boolean] Whether or not to automatically run tools
|
106
|
-
# @return [Array<Langchain::Message>] The messages
|
120
|
+
# @return [Array<Langchain::Message>] The messages
|
107
121
|
def run(auto_tool_execution: false)
|
108
|
-
if
|
109
|
-
Langchain.logger.warn("No messages
|
122
|
+
if messages.empty?
|
123
|
+
Langchain.logger.warn("No messages to process")
|
110
124
|
@state = :completed
|
111
125
|
return
|
112
126
|
end
|
@@ -114,39 +128,39 @@ module Langchain
|
|
114
128
|
@state = :in_progress
|
115
129
|
@state = handle_state until run_finished?(auto_tool_execution)
|
116
130
|
|
117
|
-
|
131
|
+
messages
|
118
132
|
end
|
119
133
|
|
120
134
|
# Run the assistant with automatic tool execution
|
121
135
|
#
|
122
|
-
# @return [Array<Langchain::Message>] The messages
|
136
|
+
# @return [Array<Langchain::Message>] The messages
|
123
137
|
def run!
|
124
138
|
run(auto_tool_execution: true)
|
125
139
|
end
|
126
140
|
|
127
|
-
# Add a user message
|
141
|
+
# Add a user message and run the assistant
|
128
142
|
#
|
129
143
|
# @param content [String] The content of the message
|
130
144
|
# @param auto_tool_execution [Boolean] Whether or not to automatically run tools
|
131
|
-
# @return [Array<Langchain::Message>] The messages
|
145
|
+
# @return [Array<Langchain::Message>] The messages
|
132
146
|
def add_message_and_run(content:, auto_tool_execution: false)
|
133
147
|
add_message(content: content, role: "user")
|
134
148
|
run(auto_tool_execution: auto_tool_execution)
|
135
149
|
end
|
136
150
|
|
137
|
-
# Add a user message
|
151
|
+
# Add a user message and run the assistant with automatic tool execution
|
138
152
|
#
|
139
153
|
# @param content [String] The content of the message
|
140
|
-
# @return [Array<Langchain::Message>] The messages
|
154
|
+
# @return [Array<Langchain::Message>] The messages
|
141
155
|
def add_message_and_run!(content:)
|
142
156
|
add_message_and_run(content: content, auto_tool_execution: true)
|
143
157
|
end
|
144
158
|
|
145
|
-
# Submit tool output
|
159
|
+
# Submit tool output
|
146
160
|
#
|
147
161
|
# @param tool_call_id [String] The ID of the tool call to submit output for
|
148
162
|
# @param output [String] The output of the tool
|
149
|
-
# @return [Array<Langchain::Message>] The messages
|
163
|
+
# @return [Array<Langchain::Message>] The messages
|
150
164
|
def submit_tool_output(tool_call_id:, output:)
|
151
165
|
tool_role = determine_tool_role
|
152
166
|
|
@@ -154,18 +168,21 @@ module Langchain
|
|
154
168
|
add_message(role: tool_role, content: output, tool_call_id: tool_call_id)
|
155
169
|
end
|
156
170
|
|
157
|
-
# Delete all messages
|
171
|
+
# Delete all messages
|
158
172
|
#
|
159
173
|
# @return [Array] Empty messages array
|
160
|
-
def
|
174
|
+
def clear_messages!
|
161
175
|
# TODO: If this a bug? Should we keep the "system" message?
|
162
|
-
|
176
|
+
@messages = []
|
163
177
|
end
|
164
178
|
|
179
|
+
# TODO: Remove in the next major release
|
180
|
+
alias_method :clear_thread!, :clear_messages!
|
181
|
+
|
165
182
|
# Set new instructions
|
166
183
|
#
|
167
184
|
# @param new_instructions [String] New instructions that will be set as a system message
|
168
|
-
# @return [Array<Langchain::Message>] The messages
|
185
|
+
# @return [Array<Langchain::Message>] The messages
|
169
186
|
def instructions=(new_instructions)
|
170
187
|
@instructions = new_instructions
|
171
188
|
|
@@ -173,7 +190,7 @@ module Langchain
|
|
173
190
|
if !llm.is_a?(Langchain::LLM::GoogleGemini) &&
|
174
191
|
!llm.is_a?(Langchain::LLM::GoogleVertexAI) &&
|
175
192
|
!llm.is_a?(Langchain::LLM::Anthropic)
|
176
|
-
# Find message with role: "system" in
|
193
|
+
# Find message with role: "system" in messages and delete it from the messages array
|
177
194
|
replace_system_message!(content: new_instructions)
|
178
195
|
end
|
179
196
|
end
|
@@ -192,12 +209,12 @@ module Langchain
|
|
192
209
|
# Replace old system message with new one
|
193
210
|
#
|
194
211
|
# @param content [String] New system message content
|
195
|
-
# @return [Array<Langchain::Message>] The messages
|
212
|
+
# @return [Array<Langchain::Message>] The messages
|
196
213
|
def replace_system_message!(content:)
|
197
|
-
|
214
|
+
messages.delete_if(&:system?)
|
198
215
|
|
199
216
|
message = build_message(role: "system", content: content)
|
200
|
-
|
217
|
+
messages.unshift(message)
|
201
218
|
end
|
202
219
|
|
203
220
|
# TODO: If tool_choice = "tool_function_name" and then tool is removed from the assistant, should we set tool_choice back to "auto"?
|
@@ -231,11 +248,11 @@ module Langchain
|
|
231
248
|
end
|
232
249
|
end
|
233
250
|
|
234
|
-
# Process the latest message
|
251
|
+
# Process the latest message
|
235
252
|
#
|
236
253
|
# @return [Symbol] The next state
|
237
254
|
def process_latest_message
|
238
|
-
last_message =
|
255
|
+
last_message = messages.last
|
239
256
|
|
240
257
|
case last_message.standard_role
|
241
258
|
when :system
|
@@ -261,14 +278,14 @@ module Langchain
|
|
261
278
|
#
|
262
279
|
# @return [Symbol] The next state
|
263
280
|
def handle_llm_message
|
264
|
-
|
281
|
+
messages.last.tool_calls.any? ? :requires_action : :completed
|
265
282
|
end
|
266
283
|
|
267
284
|
# Handle unexpected message scenario
|
268
285
|
#
|
269
286
|
# @return [Symbol] The failed state
|
270
287
|
def handle_unexpected_message
|
271
|
-
Langchain.logger.error("Unexpected message role encountered: #{
|
288
|
+
Langchain.logger.error("Unexpected message role encountered: #{messages.last.standard_role}")
|
272
289
|
:failed
|
273
290
|
end
|
274
291
|
|
@@ -301,7 +318,7 @@ module Langchain
|
|
301
318
|
#
|
302
319
|
# @return [Symbol] The next state
|
303
320
|
def execute_tools
|
304
|
-
run_tools(
|
321
|
+
run_tools(messages.last.tool_calls)
|
305
322
|
:in_progress
|
306
323
|
rescue => e
|
307
324
|
Langchain.logger.error("Error running tools: #{e.message}; #{e.backtrace.join('\n')}")
|
@@ -340,7 +357,7 @@ module Langchain
|
|
340
357
|
|
341
358
|
params = @llm_adapter.build_chat_params(
|
342
359
|
instructions: @instructions,
|
343
|
-
messages:
|
360
|
+
messages: array_of_message_hashes,
|
344
361
|
tools: @tools,
|
345
362
|
tool_choice: tool_choice
|
346
363
|
)
|
@@ -49,6 +49,8 @@ module Langchain
|
|
49
49
|
"[#{for_class_name}]:"
|
50
50
|
end
|
51
51
|
log_line_parts << colorize(args.first, MESSAGE_COLOR_OPTIONS[method])
|
52
|
+
log_line_parts << kwargs if !!kwargs && kwargs.any?
|
53
|
+
log_line_parts << block.call if block
|
52
54
|
log_line = log_line_parts.compact.join(" ")
|
53
55
|
|
54
56
|
@logger.send(
|
@@ -50,7 +50,7 @@ module Langchain::LLM
|
|
50
50
|
|
51
51
|
SUPPORTED_COMPLETION_PROVIDERS = %i[anthropic ai21 cohere meta].freeze
|
52
52
|
SUPPORTED_CHAT_COMPLETION_PROVIDERS = %i[anthropic].freeze
|
53
|
-
SUPPORTED_EMBEDDING_PROVIDERS = %i[amazon].freeze
|
53
|
+
SUPPORTED_EMBEDDING_PROVIDERS = %i[amazon cohere].freeze
|
54
54
|
|
55
55
|
def initialize(completion_model: DEFAULTS[:completion_model_name], embedding_model: DEFAULTS[:embedding_model_name], aws_client_options: {}, default_options: {})
|
56
56
|
depends_on "aws-sdk-bedrockruntime", req: "aws-sdk-bedrockruntime"
|
@@ -82,8 +82,7 @@ module Langchain::LLM
|
|
82
82
|
def embed(text:, **params)
|
83
83
|
raise "Completion provider #{embedding_provider} is not supported." unless SUPPORTED_EMBEDDING_PROVIDERS.include?(embedding_provider)
|
84
84
|
|
85
|
-
parameters =
|
86
|
-
parameters = parameters.merge(params)
|
85
|
+
parameters = compose_embedding_parameters params.merge(text:)
|
87
86
|
|
88
87
|
response = client.invoke_model({
|
89
88
|
model_id: @defaults[:embedding_model_name],
|
@@ -92,7 +91,7 @@ module Langchain::LLM
|
|
92
91
|
accept: "application/json"
|
93
92
|
})
|
94
93
|
|
95
|
-
|
94
|
+
parse_embedding_response response
|
96
95
|
end
|
97
96
|
|
98
97
|
#
|
@@ -214,6 +213,14 @@ module Langchain::LLM
|
|
214
213
|
end
|
215
214
|
end
|
216
215
|
|
216
|
+
def compose_embedding_parameters(params)
|
217
|
+
if embedding_provider == :amazon
|
218
|
+
compose_embedding_parameters_amazon params
|
219
|
+
elsif embedding_provider == :cohere
|
220
|
+
compose_embedding_parameters_cohere params
|
221
|
+
end
|
222
|
+
end
|
223
|
+
|
217
224
|
def parse_response(response)
|
218
225
|
if completion_provider == :anthropic
|
219
226
|
Langchain::LLM::AnthropicResponse.new(JSON.parse(response.body.string))
|
@@ -226,6 +233,37 @@ module Langchain::LLM
|
|
226
233
|
end
|
227
234
|
end
|
228
235
|
|
236
|
+
def parse_embedding_response(response)
|
237
|
+
json_response = JSON.parse(response.body.string)
|
238
|
+
|
239
|
+
if embedding_provider == :amazon
|
240
|
+
Langchain::LLM::AwsTitanResponse.new(json_response)
|
241
|
+
elsif embedding_provider == :cohere
|
242
|
+
Langchain::LLM::CohereResponse.new(json_response)
|
243
|
+
end
|
244
|
+
end
|
245
|
+
|
246
|
+
def compose_embedding_parameters_amazon(params)
|
247
|
+
default_params = @defaults.merge(params)
|
248
|
+
|
249
|
+
{
|
250
|
+
inputText: default_params[:text],
|
251
|
+
dimensions: default_params[:dimensions],
|
252
|
+
normalize: default_params[:normalize]
|
253
|
+
}.compact
|
254
|
+
end
|
255
|
+
|
256
|
+
def compose_embedding_parameters_cohere(params)
|
257
|
+
default_params = @defaults.merge(params)
|
258
|
+
|
259
|
+
{
|
260
|
+
texts: [default_params[:text]],
|
261
|
+
truncate: default_params[:truncate],
|
262
|
+
input_type: default_params[:input_type],
|
263
|
+
embedding_types: default_params[:embedding_types]
|
264
|
+
}.compact
|
265
|
+
end
|
266
|
+
|
229
267
|
def compose_parameters_cohere(params)
|
230
268
|
default_params = @defaults.merge(params)
|
231
269
|
|
data/lib/langchain/llm/ollama.rb
CHANGED
@@ -270,7 +270,7 @@ module Langchain::LLM
|
|
270
270
|
conn.request :json
|
271
271
|
conn.response :json
|
272
272
|
conn.response :raise_error
|
273
|
-
conn.response :logger,
|
273
|
+
conn.response :logger, Langchain.logger, {headers: true, bodies: true, errors: true}
|
274
274
|
end
|
275
275
|
end
|
276
276
|
|
data/lib/langchain/version.rb
CHANGED
metadata
CHANGED
@@ -1,14 +1,14 @@
|
|
1
1
|
--- !ruby/object:Gem::Specification
|
2
2
|
name: langchainrb
|
3
3
|
version: !ruby/object:Gem::Version
|
4
|
-
version: 0.
|
4
|
+
version: 0.16.0
|
5
5
|
platform: ruby
|
6
6
|
authors:
|
7
7
|
- Andrei Bondarev
|
8
8
|
autorequire:
|
9
9
|
bindir: exe
|
10
10
|
cert_chain: []
|
11
|
-
date: 2024-09-
|
11
|
+
date: 2024-09-19 00:00:00.000000000 Z
|
12
12
|
dependencies:
|
13
13
|
- !ruby/object:Gem::Dependency
|
14
14
|
name: baran
|
@@ -672,7 +672,6 @@ files:
|
|
672
672
|
- lib/langchain/assistants/messages/mistral_ai_message.rb
|
673
673
|
- lib/langchain/assistants/messages/ollama_message.rb
|
674
674
|
- lib/langchain/assistants/messages/openai_message.rb
|
675
|
-
- lib/langchain/assistants/thread.rb
|
676
675
|
- lib/langchain/chunk.rb
|
677
676
|
- lib/langchain/chunker/base.rb
|
678
677
|
- lib/langchain/chunker/markdown.rb
|
@@ -1,46 +0,0 @@
|
|
1
|
-
# frozen_string_literal: true
|
2
|
-
|
3
|
-
module Langchain
|
4
|
-
# Langchain::Thread keeps track of messages in a conversation.
|
5
|
-
# TODO: Add functionality to persist to the thread to disk, DB, storage, etc.
|
6
|
-
class Thread
|
7
|
-
attr_accessor :messages, :add_message_callback
|
8
|
-
|
9
|
-
# @param messages [Array<Langchain::Message>]
|
10
|
-
# @param add_message_callback [Proc] A callback to call when a message is added to the thread
|
11
|
-
def initialize(messages: [], add_message_callback: nil)
|
12
|
-
raise ArgumentError, "messages array must only contain Langchain::Message instance(s)" unless messages.is_a?(Array) && messages.all? { |m| m.is_a?(Langchain::Messages::Base) }
|
13
|
-
|
14
|
-
@add_message_callback = add_message_callback
|
15
|
-
@messages = messages
|
16
|
-
end
|
17
|
-
|
18
|
-
# Convert the thread to an LLM APIs-compatible array of hashes
|
19
|
-
#
|
20
|
-
# @return [Array<Hash>] The thread as an OpenAI API-compatible array of hashes
|
21
|
-
def array_of_message_hashes
|
22
|
-
messages
|
23
|
-
.map(&:to_hash)
|
24
|
-
.compact
|
25
|
-
end
|
26
|
-
|
27
|
-
# Only used by the Assistant when it calls the LLM#complete() method
|
28
|
-
def prompt_of_concatenated_messages
|
29
|
-
messages.map(&:to_s).join
|
30
|
-
end
|
31
|
-
|
32
|
-
# Add a message to the thread
|
33
|
-
#
|
34
|
-
# @param message [Langchain::Message] The message to add
|
35
|
-
# @return [Array<Langchain::Message>] The updated messages array
|
36
|
-
def add_message(message)
|
37
|
-
raise ArgumentError, "message must be a Langchain::Message instance" unless message.is_a?(Langchain::Messages::Base)
|
38
|
-
|
39
|
-
# Call the callback with the message
|
40
|
-
add_message_callback.call(message) if add_message_callback # rubocop:disable Style/SafeNavigation
|
41
|
-
|
42
|
-
# Prepend the message to the thread
|
43
|
-
messages << message
|
44
|
-
end
|
45
|
-
end
|
46
|
-
end
|