langchainrb 0.6.11 → 0.6.12
Sign up to get free protection for your applications and to get access to all the features.
- checksums.yaml +4 -4
- data/CHANGELOG.md +4 -0
- data/README.md +1 -7
- data/lib/langchain/agent/base.rb +1 -0
- data/lib/langchain/agent/{react_agent/react_agent.rb → react_agent.rb} +12 -11
- data/lib/langchain/ai_message.rb +9 -0
- data/lib/langchain/conversation.rb +11 -11
- data/lib/langchain/conversation_memory.rb +3 -7
- data/lib/langchain/human_message.rb +9 -0
- data/lib/langchain/llm/cohere.rb +2 -1
- data/lib/langchain/llm/google_palm.rb +15 -10
- data/lib/langchain/llm/llama_cpp.rb +5 -5
- data/lib/langchain/llm/openai.rb +24 -25
- data/lib/langchain/llm/replicate.rb +2 -1
- data/lib/langchain/message.rb +35 -0
- data/lib/langchain/output_parsers/base.rb +5 -4
- data/lib/langchain/output_parsers/{fix.rb → output_fixing_parser.rb} +3 -1
- data/lib/langchain/prompt/loading.rb +73 -67
- data/lib/langchain/prompt.rb +5 -0
- data/lib/langchain/system_message.rb +9 -0
- data/lib/langchain/tool/base.rb +14 -14
- data/lib/langchain/vectorsearch/pgvector.rb +7 -5
- data/lib/langchain/version.rb +1 -1
- data/lib/langchain.rb +19 -97
- metadata +37 -38
- data/.env.example +0 -21
- data/.rspec +0 -3
- data/.rubocop.yml +0 -11
- data/.tool-versions +0 -1
- data/Gemfile +0 -14
- data/Gemfile.lock +0 -360
- data/Rakefile +0 -17
- data/examples/conversation_with_openai.rb +0 -52
- data/examples/create_and_manage_few_shot_prompt_templates.rb +0 -36
- data/examples/create_and_manage_prompt_templates.rb +0 -25
- data/examples/create_and_manage_prompt_templates_using_structured_output_parser.rb +0 -116
- data/examples/llama_cpp.rb +0 -24
- data/examples/open_ai_function_calls.rb +0 -41
- data/examples/open_ai_qdrant_function_calls.rb +0 -39
- data/examples/pdf_store_and_query_with_chroma.rb +0 -40
- data/examples/store_and_query_with_pinecone.rb +0 -46
- data/examples/store_and_query_with_qdrant.rb +0 -37
- data/examples/store_and_query_with_weaviate.rb +0 -32
- data/lefthook.yml +0 -5
- data/sig/langchain.rbs +0 -4
- /data/lib/langchain/agent/{sql_query_agent/sql_query_agent.rb → sql_query_agent.rb} +0 -0
- /data/lib/langchain/output_parsers/{structured.rb → structured_output_parser.rb} +0 -0
data/Gemfile.lock
DELETED
@@ -1,360 +0,0 @@
|
|
1
|
-
PATH
|
2
|
-
remote: .
|
3
|
-
specs:
|
4
|
-
langchainrb (0.6.11)
|
5
|
-
baran (~> 0.1.6)
|
6
|
-
colorize (~> 0.8.1)
|
7
|
-
json-schema (~> 4.0.0)
|
8
|
-
tiktoken_ruby (~> 0.0.5)
|
9
|
-
|
10
|
-
GEM
|
11
|
-
remote: https://rubygems.org/
|
12
|
-
specs:
|
13
|
-
Ascii85 (1.0.3)
|
14
|
-
actionpack (7.0.4.3)
|
15
|
-
actionview (= 7.0.4.3)
|
16
|
-
activesupport (= 7.0.4.3)
|
17
|
-
rack (~> 2.0, >= 2.2.0)
|
18
|
-
rack-test (>= 0.6.3)
|
19
|
-
rails-dom-testing (~> 2.0)
|
20
|
-
rails-html-sanitizer (~> 1.0, >= 1.2.0)
|
21
|
-
actionview (7.0.4.3)
|
22
|
-
activesupport (= 7.0.4.3)
|
23
|
-
builder (~> 3.1)
|
24
|
-
erubi (~> 1.4)
|
25
|
-
rails-dom-testing (~> 2.0)
|
26
|
-
rails-html-sanitizer (~> 1.1, >= 1.2.0)
|
27
|
-
activesupport (7.0.4.3)
|
28
|
-
concurrent-ruby (~> 1.0, >= 1.0.2)
|
29
|
-
i18n (>= 1.6, < 2)
|
30
|
-
minitest (>= 5.1)
|
31
|
-
tzinfo (~> 2.0)
|
32
|
-
addressable (2.8.4)
|
33
|
-
public_suffix (>= 2.0.2, < 6.0)
|
34
|
-
afm (0.2.2)
|
35
|
-
ai21 (0.2.1)
|
36
|
-
anthropic (0.1.0)
|
37
|
-
faraday (>= 1)
|
38
|
-
faraday-multipart (>= 1)
|
39
|
-
ast (2.4.2)
|
40
|
-
baran (0.1.7)
|
41
|
-
builder (3.2.4)
|
42
|
-
byebug (11.1.3)
|
43
|
-
childprocess (4.1.0)
|
44
|
-
chroma-db (0.3.0)
|
45
|
-
dry-monads (~> 1.6)
|
46
|
-
ruby-next-core (>= 0.15.0)
|
47
|
-
coderay (1.1.3)
|
48
|
-
cohere-ruby (0.9.5)
|
49
|
-
faraday (>= 1.0.0)
|
50
|
-
faraday_middleware (>= 1.0.0)
|
51
|
-
colorize (0.8.1)
|
52
|
-
concurrent-ruby (1.2.2)
|
53
|
-
crass (1.0.6)
|
54
|
-
diff-lcs (1.5.0)
|
55
|
-
docx (0.8.0)
|
56
|
-
nokogiri (~> 1.13, >= 1.13.0)
|
57
|
-
rubyzip (~> 2.0)
|
58
|
-
dotenv (2.7.6)
|
59
|
-
dotenv-rails (2.7.6)
|
60
|
-
dotenv (= 2.7.6)
|
61
|
-
railties (>= 3.2)
|
62
|
-
dry-configurable (1.0.1)
|
63
|
-
dry-core (~> 1.0, < 2)
|
64
|
-
zeitwerk (~> 2.6)
|
65
|
-
dry-core (1.0.0)
|
66
|
-
concurrent-ruby (~> 1.0)
|
67
|
-
zeitwerk (~> 2.6)
|
68
|
-
dry-inflector (1.0.0)
|
69
|
-
dry-initializer (3.1.1)
|
70
|
-
dry-logic (1.5.0)
|
71
|
-
concurrent-ruby (~> 1.0)
|
72
|
-
dry-core (~> 1.0, < 2)
|
73
|
-
zeitwerk (~> 2.6)
|
74
|
-
dry-monads (1.6.0)
|
75
|
-
concurrent-ruby (~> 1.0)
|
76
|
-
dry-core (~> 1.0, < 2)
|
77
|
-
zeitwerk (~> 2.6)
|
78
|
-
dry-schema (1.13.1)
|
79
|
-
concurrent-ruby (~> 1.0)
|
80
|
-
dry-configurable (~> 1.0, >= 1.0.1)
|
81
|
-
dry-core (~> 1.0, < 2)
|
82
|
-
dry-initializer (~> 3.0)
|
83
|
-
dry-logic (>= 1.4, < 2)
|
84
|
-
dry-types (>= 1.7, < 2)
|
85
|
-
zeitwerk (~> 2.6)
|
86
|
-
dry-struct (1.6.0)
|
87
|
-
dry-core (~> 1.0, < 2)
|
88
|
-
dry-types (>= 1.7, < 2)
|
89
|
-
ice_nine (~> 0.11)
|
90
|
-
zeitwerk (~> 2.6)
|
91
|
-
dry-types (1.7.1)
|
92
|
-
concurrent-ruby (~> 1.0)
|
93
|
-
dry-core (~> 1.0)
|
94
|
-
dry-inflector (~> 1.0)
|
95
|
-
dry-logic (~> 1.4)
|
96
|
-
zeitwerk (~> 2.6)
|
97
|
-
dry-validation (1.10.0)
|
98
|
-
concurrent-ruby (~> 1.0)
|
99
|
-
dry-core (~> 1.0, < 2)
|
100
|
-
dry-initializer (~> 3.0)
|
101
|
-
dry-schema (>= 1.12, < 2)
|
102
|
-
zeitwerk (~> 2.6)
|
103
|
-
eqn (1.6.5)
|
104
|
-
treetop (>= 1.2.0)
|
105
|
-
erubi (1.12.0)
|
106
|
-
faraday (1.10.3)
|
107
|
-
faraday-em_http (~> 1.0)
|
108
|
-
faraday-em_synchrony (~> 1.0)
|
109
|
-
faraday-excon (~> 1.1)
|
110
|
-
faraday-httpclient (~> 1.0)
|
111
|
-
faraday-multipart (~> 1.0)
|
112
|
-
faraday-net_http (~> 1.0)
|
113
|
-
faraday-net_http_persistent (~> 1.0)
|
114
|
-
faraday-patron (~> 1.0)
|
115
|
-
faraday-rack (~> 1.0)
|
116
|
-
faraday-retry (~> 1.0)
|
117
|
-
ruby2_keywords (>= 0.0.4)
|
118
|
-
faraday-em_http (1.0.0)
|
119
|
-
faraday-em_synchrony (1.0.0)
|
120
|
-
faraday-excon (1.1.0)
|
121
|
-
faraday-httpclient (1.0.1)
|
122
|
-
faraday-multipart (1.0.4)
|
123
|
-
multipart-post (~> 2)
|
124
|
-
faraday-net_http (1.0.1)
|
125
|
-
faraday-net_http_persistent (1.2.0)
|
126
|
-
faraday-patron (1.0.0)
|
127
|
-
faraday-rack (1.0.0)
|
128
|
-
faraday-retry (1.0.3)
|
129
|
-
faraday_middleware (1.2.0)
|
130
|
-
faraday (~> 1.0)
|
131
|
-
google_palm_api (0.1.2)
|
132
|
-
faraday (>= 1.0.0)
|
133
|
-
faraday_middleware (>= 1.0.0)
|
134
|
-
google_search_results (2.0.1)
|
135
|
-
graphlient (0.6.0)
|
136
|
-
faraday (>= 1.0)
|
137
|
-
faraday_middleware
|
138
|
-
graphql-client
|
139
|
-
graphql (2.0.24)
|
140
|
-
graphql-client (0.18.0)
|
141
|
-
activesupport (>= 3.0)
|
142
|
-
graphql
|
143
|
-
hashery (2.1.2)
|
144
|
-
hashie (5.0.0)
|
145
|
-
hnswlib (0.8.1)
|
146
|
-
httparty (0.21.0)
|
147
|
-
mini_mime (>= 1.0.0)
|
148
|
-
multi_xml (>= 0.5.2)
|
149
|
-
hugging-face (0.3.4)
|
150
|
-
faraday (>= 1.0)
|
151
|
-
i18n (1.13.0)
|
152
|
-
concurrent-ruby (~> 1.0)
|
153
|
-
ice_nine (0.11.2)
|
154
|
-
json (2.6.3)
|
155
|
-
json-schema (4.0.0)
|
156
|
-
addressable (>= 2.8)
|
157
|
-
language_server-protocol (3.17.0.3)
|
158
|
-
lint_roller (1.0.0)
|
159
|
-
llama_cpp (0.3.0)
|
160
|
-
loofah (2.21.1)
|
161
|
-
crass (~> 1.0.2)
|
162
|
-
nokogiri (>= 1.5.9)
|
163
|
-
method_source (1.0.0)
|
164
|
-
milvus (0.9.1)
|
165
|
-
faraday (~> 1)
|
166
|
-
mini_mime (1.1.2)
|
167
|
-
mini_portile2 (2.8.2)
|
168
|
-
minitest (5.18.0)
|
169
|
-
multi_xml (0.6.0)
|
170
|
-
multipart-post (2.3.0)
|
171
|
-
nokogiri (1.14.3)
|
172
|
-
mini_portile2 (~> 2.8.0)
|
173
|
-
racc (~> 1.4)
|
174
|
-
nokogiri (1.14.3-arm64-darwin)
|
175
|
-
racc (~> 1.4)
|
176
|
-
nokogiri (1.14.3-x86_64-darwin)
|
177
|
-
racc (~> 1.4)
|
178
|
-
nokogiri (1.14.3-x86_64-linux)
|
179
|
-
racc (~> 1.4)
|
180
|
-
open-weather-ruby-client (0.3.0)
|
181
|
-
activesupport
|
182
|
-
faraday (>= 1.0.0)
|
183
|
-
faraday_middleware
|
184
|
-
hashie
|
185
|
-
parallel (1.23.0)
|
186
|
-
parser (3.2.2.1)
|
187
|
-
ast (~> 2.4.1)
|
188
|
-
pdf-reader (1.4.1)
|
189
|
-
Ascii85 (~> 1.0.0)
|
190
|
-
afm (~> 0.2.1)
|
191
|
-
hashery (~> 2.0)
|
192
|
-
ruby-rc4
|
193
|
-
ttfunk
|
194
|
-
pg (1.5.3)
|
195
|
-
pgvector (0.2.1)
|
196
|
-
pinecone (0.1.71)
|
197
|
-
dry-struct (~> 1.6.0)
|
198
|
-
dry-validation (~> 1.10.0)
|
199
|
-
httparty (~> 0.21.0)
|
200
|
-
polyglot (0.3.5)
|
201
|
-
pry (0.14.2)
|
202
|
-
coderay (~> 1.1)
|
203
|
-
method_source (~> 1.0)
|
204
|
-
pry-byebug (3.10.1)
|
205
|
-
byebug (~> 11.0)
|
206
|
-
pry (>= 0.13, < 0.15)
|
207
|
-
public_suffix (5.0.1)
|
208
|
-
qdrant-ruby (0.9.2)
|
209
|
-
faraday (~> 1)
|
210
|
-
faraday_middleware (~> 1)
|
211
|
-
racc (1.6.2)
|
212
|
-
rack (2.2.7)
|
213
|
-
rack-test (2.1.0)
|
214
|
-
rack (>= 1.3)
|
215
|
-
rails-dom-testing (2.0.3)
|
216
|
-
activesupport (>= 4.2.0)
|
217
|
-
nokogiri (>= 1.6)
|
218
|
-
rails-html-sanitizer (1.5.0)
|
219
|
-
loofah (~> 2.19, >= 2.19.1)
|
220
|
-
railties (7.0.4.3)
|
221
|
-
actionpack (= 7.0.4.3)
|
222
|
-
activesupport (= 7.0.4.3)
|
223
|
-
method_source
|
224
|
-
rake (>= 12.2)
|
225
|
-
thor (~> 1.0)
|
226
|
-
zeitwerk (~> 2.5)
|
227
|
-
rainbow (3.1.1)
|
228
|
-
rake (13.0.6)
|
229
|
-
rb_sys (0.9.81)
|
230
|
-
rdiscount (2.2.7)
|
231
|
-
regexp_parser (2.8.0)
|
232
|
-
replicate-ruby (0.2.2)
|
233
|
-
addressable
|
234
|
-
faraday (>= 1.0)
|
235
|
-
faraday-multipart
|
236
|
-
faraday-retry
|
237
|
-
rexml (3.2.5)
|
238
|
-
roo (2.10.0)
|
239
|
-
nokogiri (~> 1)
|
240
|
-
rubyzip (>= 1.3.0, < 3.0.0)
|
241
|
-
rspec (3.12.0)
|
242
|
-
rspec-core (~> 3.12.0)
|
243
|
-
rspec-expectations (~> 3.12.0)
|
244
|
-
rspec-mocks (~> 3.12.0)
|
245
|
-
rspec-core (3.12.2)
|
246
|
-
rspec-support (~> 3.12.0)
|
247
|
-
rspec-expectations (3.12.3)
|
248
|
-
diff-lcs (>= 1.2.0, < 2.0)
|
249
|
-
rspec-support (~> 3.12.0)
|
250
|
-
rspec-mocks (3.12.5)
|
251
|
-
diff-lcs (>= 1.2.0, < 2.0)
|
252
|
-
rspec-support (~> 3.12.0)
|
253
|
-
rspec-support (3.12.0)
|
254
|
-
rubocop (1.50.2)
|
255
|
-
json (~> 2.3)
|
256
|
-
parallel (~> 1.10)
|
257
|
-
parser (>= 3.2.0.0)
|
258
|
-
rainbow (>= 2.2.2, < 4.0)
|
259
|
-
regexp_parser (>= 1.8, < 3.0)
|
260
|
-
rexml (>= 3.2.5, < 4.0)
|
261
|
-
rubocop-ast (>= 1.28.0, < 2.0)
|
262
|
-
ruby-progressbar (~> 1.7)
|
263
|
-
unicode-display_width (>= 2.4.0, < 3.0)
|
264
|
-
rubocop-ast (1.28.1)
|
265
|
-
parser (>= 3.2.1.0)
|
266
|
-
rubocop-performance (1.16.0)
|
267
|
-
rubocop (>= 1.7.0, < 2.0)
|
268
|
-
rubocop-ast (>= 0.4.0)
|
269
|
-
ruby-next-core (0.15.3)
|
270
|
-
ruby-openai (4.1.0)
|
271
|
-
faraday (>= 1)
|
272
|
-
faraday-multipart (>= 1)
|
273
|
-
ruby-progressbar (1.13.0)
|
274
|
-
ruby-rc4 (0.1.5)
|
275
|
-
ruby2_keywords (0.0.5)
|
276
|
-
rubyzip (2.3.2)
|
277
|
-
safe_ruby (1.0.4)
|
278
|
-
childprocess (>= 0.3.9)
|
279
|
-
sequel (5.68.0)
|
280
|
-
standard (1.28.2)
|
281
|
-
language_server-protocol (~> 3.17.0.2)
|
282
|
-
lint_roller (~> 1.0)
|
283
|
-
rubocop (~> 1.50.2)
|
284
|
-
standard-custom (~> 1.0.0)
|
285
|
-
standard-performance (~> 1.0.1)
|
286
|
-
standard-custom (1.0.0)
|
287
|
-
lint_roller (~> 1.0)
|
288
|
-
standard-performance (1.0.1)
|
289
|
-
lint_roller (~> 1.0)
|
290
|
-
rubocop-performance (~> 1.16.0)
|
291
|
-
standardrb (1.0.1)
|
292
|
-
standard
|
293
|
-
thor (1.2.1)
|
294
|
-
tiktoken_ruby (0.0.5)
|
295
|
-
rb_sys (~> 0.9.68)
|
296
|
-
tiktoken_ruby (0.0.5-arm64-darwin)
|
297
|
-
tiktoken_ruby (0.0.5-x86_64-darwin)
|
298
|
-
tiktoken_ruby (0.0.5-x86_64-linux)
|
299
|
-
treetop (1.6.12)
|
300
|
-
polyglot (~> 0.3)
|
301
|
-
ttfunk (1.7.0)
|
302
|
-
tzinfo (2.0.6)
|
303
|
-
concurrent-ruby (~> 1.0)
|
304
|
-
unicode-display_width (2.4.2)
|
305
|
-
weaviate-ruby (0.8.4)
|
306
|
-
faraday (~> 1)
|
307
|
-
faraday_middleware (~> 1)
|
308
|
-
graphlient (~> 0.6.0)
|
309
|
-
wikipedia-client (1.17.0)
|
310
|
-
addressable (~> 2.7)
|
311
|
-
yard (0.9.34)
|
312
|
-
zeitwerk (2.6.8)
|
313
|
-
|
314
|
-
PLATFORMS
|
315
|
-
arm64-darwin-21
|
316
|
-
arm64-darwin-22
|
317
|
-
ruby
|
318
|
-
x86_64-darwin-19
|
319
|
-
x86_64-darwin-22
|
320
|
-
x86_64-linux
|
321
|
-
|
322
|
-
DEPENDENCIES
|
323
|
-
ai21 (~> 0.2.1)
|
324
|
-
anthropic (~> 0.1.0)
|
325
|
-
chroma-db (~> 0.3.0)
|
326
|
-
cohere-ruby (~> 0.9.5)
|
327
|
-
docx (~> 0.8.0)
|
328
|
-
dotenv-rails (~> 2.7.6)
|
329
|
-
eqn (~> 1.6.5)
|
330
|
-
google_palm_api (~> 0.1.2)
|
331
|
-
google_search_results (~> 2.0.0)
|
332
|
-
hnswlib (~> 0.8.1)
|
333
|
-
hugging-face (~> 0.3.4)
|
334
|
-
langchainrb!
|
335
|
-
llama_cpp
|
336
|
-
milvus (~> 0.9.0)
|
337
|
-
nokogiri (~> 1.13)
|
338
|
-
open-weather-ruby-client (~> 0.3.0)
|
339
|
-
pdf-reader (~> 1.4)
|
340
|
-
pg (~> 1.5)
|
341
|
-
pgvector (~> 0.2.1)
|
342
|
-
pinecone (~> 0.1.6)
|
343
|
-
pry-byebug (~> 3.10.0)
|
344
|
-
qdrant-ruby (~> 0.9.0)
|
345
|
-
rake (~> 13.0)
|
346
|
-
rdiscount
|
347
|
-
replicate-ruby (~> 0.2.2)
|
348
|
-
roo (~> 2.10.0)
|
349
|
-
rspec (~> 3.0)
|
350
|
-
rubocop
|
351
|
-
ruby-openai (~> 4.1.0)
|
352
|
-
safe_ruby (~> 1.0.4)
|
353
|
-
sequel (~> 5.68.0)
|
354
|
-
standardrb
|
355
|
-
weaviate-ruby (~> 0.8.3)
|
356
|
-
wikipedia-client (~> 1.17.0)
|
357
|
-
yard
|
358
|
-
|
359
|
-
BUNDLED WITH
|
360
|
-
2.3.22
|
data/Rakefile
DELETED
@@ -1,17 +0,0 @@
|
|
1
|
-
# frozen_string_literal: true
|
2
|
-
|
3
|
-
require "bundler/gem_tasks"
|
4
|
-
require "rspec/core/rake_task"
|
5
|
-
require "standard/rake"
|
6
|
-
require "yard"
|
7
|
-
|
8
|
-
RSpec::Core::RakeTask.new(:spec)
|
9
|
-
|
10
|
-
task default: :spec
|
11
|
-
|
12
|
-
Rake::Task["spec"].enhance do
|
13
|
-
Rake::Task["standard:fix"].invoke
|
14
|
-
end
|
15
|
-
|
16
|
-
YARD::Rake::YardocTask.new do |t|
|
17
|
-
end
|
@@ -1,52 +0,0 @@
|
|
1
|
-
require "langchain"
|
2
|
-
require "reline"
|
3
|
-
|
4
|
-
# gem install reline
|
5
|
-
# or add `gem "reline"` to your Gemfile
|
6
|
-
|
7
|
-
openai = Langchain::LLM::OpenAI.new(api_key: ENV["OPENAI_API_KEY"])
|
8
|
-
|
9
|
-
chat = Langchain::Conversation.new(llm: openai)
|
10
|
-
chat.set_context("You are a chatbot from the future")
|
11
|
-
|
12
|
-
DONE = %w[done end eof exit].freeze
|
13
|
-
|
14
|
-
puts "Welcome to the chatbot from the future!"
|
15
|
-
|
16
|
-
def prompt_for_message
|
17
|
-
puts "(multiline input; type 'end' on its own line when done. or exit to exit)"
|
18
|
-
|
19
|
-
user_message = Reline.readmultiline("Question: ", true) do |multiline_input|
|
20
|
-
last = multiline_input.split.last
|
21
|
-
DONE.include?(last)
|
22
|
-
end
|
23
|
-
|
24
|
-
return :noop unless user_message
|
25
|
-
|
26
|
-
lines = user_message.split("\n")
|
27
|
-
if lines.size > 1 && DONE.include?(lines.last)
|
28
|
-
# remove the "done" from the message
|
29
|
-
user_message = lines[0..-2].join("\n")
|
30
|
-
end
|
31
|
-
|
32
|
-
return :exit if DONE.include?(user_message.downcase)
|
33
|
-
|
34
|
-
user_message
|
35
|
-
end
|
36
|
-
|
37
|
-
begin
|
38
|
-
loop do
|
39
|
-
user_message = prompt_for_message
|
40
|
-
|
41
|
-
case user_message
|
42
|
-
when :noop
|
43
|
-
next
|
44
|
-
when :exit
|
45
|
-
break
|
46
|
-
end
|
47
|
-
|
48
|
-
puts chat.message(user_message)
|
49
|
-
end
|
50
|
-
rescue Interrupt
|
51
|
-
exit 0
|
52
|
-
end
|
@@ -1,36 +0,0 @@
|
|
1
|
-
require "langchain"
|
2
|
-
|
3
|
-
# Create a prompt with a few shot examples
|
4
|
-
prompt = Langchain::Prompt::FewShotPromptTemplate.new(
|
5
|
-
prefix: "Write antonyms for the following words.",
|
6
|
-
suffix: "Input: {adjective}\nOutput:",
|
7
|
-
example_prompt: Langchain::Prompt::PromptTemplate.new(
|
8
|
-
input_variables: ["input", "output"],
|
9
|
-
template: "Input: {input}\nOutput: {output}"
|
10
|
-
),
|
11
|
-
examples: [
|
12
|
-
{input: "happy", output: "sad"},
|
13
|
-
{input: "tall", output: "short"}
|
14
|
-
],
|
15
|
-
input_variables: ["adjective"]
|
16
|
-
)
|
17
|
-
|
18
|
-
prompt.format(adjective: "good")
|
19
|
-
|
20
|
-
# Write antonyms for the following words.
|
21
|
-
#
|
22
|
-
# Input: happy
|
23
|
-
# Output: sad
|
24
|
-
#
|
25
|
-
# Input: tall
|
26
|
-
# Output: short
|
27
|
-
#
|
28
|
-
# Input: good
|
29
|
-
# Output:
|
30
|
-
|
31
|
-
# Save prompt template to JSON file
|
32
|
-
prompt.save(file_path: "spec/fixtures/prompt/few_shot_prompt_template.json")
|
33
|
-
|
34
|
-
# Loading a new prompt template using a JSON file
|
35
|
-
prompt = Langchain::Prompt.load_from_path(file_path: "spec/fixtures/prompt/few_shot_prompt_template.json")
|
36
|
-
prompt.prefix # "Write antonyms for the following words."
|
@@ -1,25 +0,0 @@
|
|
1
|
-
require "langchain"
|
2
|
-
|
3
|
-
# Create a prompt with one input variable
|
4
|
-
prompt = Langchain::Prompt::PromptTemplate.new(template: "Tell me a {adjective} joke.", input_variables: ["adjective"])
|
5
|
-
prompt.format(adjective: "funny") # "Tell me a funny joke."
|
6
|
-
|
7
|
-
# Create a prompt with multiple input variables
|
8
|
-
prompt = Langchain::Prompt::PromptTemplate.new(template: "Tell me a {adjective} joke about {content}.", input_variables: ["adjective", "content"])
|
9
|
-
prompt.format(adjective: "funny", content: "chickens") # "Tell me a funny joke about chickens."
|
10
|
-
|
11
|
-
# Creating a PromptTemplate using just a prompt and no input_variables
|
12
|
-
prompt = Langchain::Prompt::PromptTemplate.from_template("Tell me a {adjective} joke about {content}.")
|
13
|
-
prompt.input_variables # ["adjective", "content"]
|
14
|
-
prompt.format(adjective: "funny", content: "chickens") # "Tell me a funny joke about chickens."
|
15
|
-
|
16
|
-
# Save prompt template to JSON file
|
17
|
-
prompt.save(file_path: "spec/fixtures/prompt/prompt_template.json")
|
18
|
-
|
19
|
-
# Loading a new prompt template using a JSON file
|
20
|
-
prompt = Langchain::Prompt.load_from_path(file_path: "spec/fixtures/prompt/prompt_template.json")
|
21
|
-
prompt.input_variables # ["adjective", "content"]
|
22
|
-
|
23
|
-
# Loading a new prompt template using a YAML file
|
24
|
-
prompt = Langchain::Prompt.load_from_path(file_path: "spec/fixtures/prompt/prompt_template.yaml")
|
25
|
-
prompt.input_variables # ["adjective", "content"]
|
@@ -1,116 +0,0 @@
|
|
1
|
-
require "langchain"
|
2
|
-
|
3
|
-
# Generate a prompt that directs the LLM to provide a JSON response that adheres to a specific JSON schema.
|
4
|
-
json_schema = {
|
5
|
-
type: "object",
|
6
|
-
properties: {
|
7
|
-
name: {
|
8
|
-
type: "string",
|
9
|
-
description: "Persons name"
|
10
|
-
},
|
11
|
-
age: {
|
12
|
-
type: "number",
|
13
|
-
description: "Persons age"
|
14
|
-
},
|
15
|
-
interests: {
|
16
|
-
type: "array",
|
17
|
-
items: {
|
18
|
-
type: "object",
|
19
|
-
properties: {
|
20
|
-
interest: {
|
21
|
-
type: "string",
|
22
|
-
description: "A topic of interest"
|
23
|
-
},
|
24
|
-
levelOfInterest: {
|
25
|
-
type: "number",
|
26
|
-
description: "A value between 0 and 100 of how interested the person is in this interest"
|
27
|
-
}
|
28
|
-
},
|
29
|
-
required: ["interest", "levelOfInterest"],
|
30
|
-
additionalProperties: false
|
31
|
-
},
|
32
|
-
minItems: 1,
|
33
|
-
maxItems: 3,
|
34
|
-
description: "A list of the person's interests"
|
35
|
-
}
|
36
|
-
},
|
37
|
-
required: ["name", "age", "interests"],
|
38
|
-
additionalProperties: false
|
39
|
-
}
|
40
|
-
parser = Langchain::OutputParsers::StructuredOutputParser.from_json_schema(json_schema)
|
41
|
-
prompt = Langchain::Prompt::PromptTemplate.new(template: "Generate details of a fictional character.\n{format_instructions}\nCharacter description: {description}", input_variables: ["description", "format_instructions"])
|
42
|
-
prompt.format(description: "Korean chemistry student", format_instructions: parser.get_format_instructions)
|
43
|
-
# Generate details of a fictional character.
|
44
|
-
# You must format your output as a JSON value that adheres to a given "JSON Schema" instance.
|
45
|
-
|
46
|
-
# "JSON Schema" is a declarative language that allows you to annotate and validate JSON documents.
|
47
|
-
|
48
|
-
# For example, the example "JSON Schema" instance {"properties": {"foo": {"description": "a list of test words", "type": "array", "items": {"type": "string"}}, "required": ["foo"]}
|
49
|
-
# would match an object with one required property, "foo". The "type" property specifies "foo" must be an "array", and the "description" property semantically describes it as "a list of test words". The items within "foo" must be strings.
|
50
|
-
# Thus, the object {"foo": ["bar", "baz"]} is a well-formatted instance of this example "JSON Schema". The object {"properties": {"foo": ["bar", "baz"]}} is not well-formatted.
|
51
|
-
|
52
|
-
# Your output will be parsed and type-checked according to the provided schema instance, so make sure all fields in your output match the schema exactly and there are no trailing commas!
|
53
|
-
|
54
|
-
# Here is the JSON Schema instance your output must adhere to. Include the enclosing markdown codeblock:
|
55
|
-
# ```json
|
56
|
-
# {"type":"object","properties":{"name":{"type":"string","description":"Persons name"},"age":{"type":"number","description":"Persons age"},"interests":{"type":"array","items":{"type":"object","properties":{"interest":{"type":"string","description":"A topic of interest"},"levelOfInterest":{"type":"number","description":"A value between 0 and 100 of how interested the person is in this interest"},"required":["interest","levelOfInterest"],"additionalProperties":false},"minItems":1,"maxItems":3,"description":"A list of the person's interests"},"required":["name","age","interests"],"additionalProperties":false}
|
57
|
-
# ```
|
58
|
-
|
59
|
-
# Character description: Korean chemistry student
|
60
|
-
|
61
|
-
llm = Langchain::LLM::OpenAI.new(api_key: ENV["OPENAI_API_KEY"])
|
62
|
-
# llm_response = llm.chat(
|
63
|
-
# prompt: prompt.format(description: "Korean chemistry student", format_instructions: parser.get_format_instructions)
|
64
|
-
# )
|
65
|
-
|
66
|
-
# LLM example response:
|
67
|
-
llm_example_response = <<~RESPONSE
|
68
|
-
Here is your character:
|
69
|
-
```json
|
70
|
-
{
|
71
|
-
"name": "Kim Ji-hyun",
|
72
|
-
"age": 22,
|
73
|
-
"interests": [
|
74
|
-
{
|
75
|
-
"interest": "Organic Chemistry",
|
76
|
-
"levelOfInterest": 85
|
77
|
-
},
|
78
|
-
{
|
79
|
-
"interest": "Biochemistry",
|
80
|
-
"levelOfInterest": 70
|
81
|
-
},
|
82
|
-
{
|
83
|
-
"interest": "Analytical Chemistry",
|
84
|
-
"levelOfInterest": 60
|
85
|
-
}
|
86
|
-
]
|
87
|
-
}
|
88
|
-
```
|
89
|
-
RESPONSE
|
90
|
-
|
91
|
-
fix_parser = Langchain::OutputParsers::OutputFixingParser.from_llm(
|
92
|
-
llm: llm,
|
93
|
-
parser: parser
|
94
|
-
)
|
95
|
-
# The OutputFixingParser wraps the StructuredOutputParser such that if initial
|
96
|
-
# LLM response does not conform to the schema, will call out the LLM to fix
|
97
|
-
# the error
|
98
|
-
fix_parser.parse(llm_example_response)
|
99
|
-
# {
|
100
|
-
# "name" => "Kim Ji-hyun",
|
101
|
-
# "age" => 22,
|
102
|
-
# "interests" => [
|
103
|
-
# {
|
104
|
-
# "interest" => "Organic Chemistry",
|
105
|
-
# "levelOfInterest" => 85
|
106
|
-
# },
|
107
|
-
# {
|
108
|
-
# "interest" => "Biochemistry",
|
109
|
-
# "levelOfInterest" => 70
|
110
|
-
# },
|
111
|
-
# {
|
112
|
-
# "interest" => "Analytical Chemistry",
|
113
|
-
# "levelOfInterest" => 60
|
114
|
-
# }
|
115
|
-
# ]
|
116
|
-
# }
|
data/examples/llama_cpp.rb
DELETED
@@ -1,24 +0,0 @@
|
|
1
|
-
require "langchain"
|
2
|
-
|
3
|
-
llm = Langchain::LLM::LlamaCpp.new(
|
4
|
-
model_path: ENV["LLAMACPP_MODEL_PATH"],
|
5
|
-
n_gpu_layers: Integer(ENV["LLAMACPP_N_GPU_LAYERS"]),
|
6
|
-
n_threads: Integer(ENV["LLAMACPP_N_THREADS"])
|
7
|
-
)
|
8
|
-
|
9
|
-
instructions = [
|
10
|
-
"Tell me about the creator of Ruby",
|
11
|
-
"Write a story about a pony who goes to the store to buy some apples."
|
12
|
-
]
|
13
|
-
|
14
|
-
template = Langchain::Prompt::PromptTemplate.new(
|
15
|
-
template: "{instruction}\n\n### Response:",
|
16
|
-
input_variables: %w[instruction]
|
17
|
-
)
|
18
|
-
|
19
|
-
instructions.each do |instruction|
|
20
|
-
puts "USER: #{instruction}"
|
21
|
-
prompt = template.format(instruction: instruction)
|
22
|
-
response = llm.complete prompt: prompt, n_predict: 1024
|
23
|
-
puts "ASSISTANT: #{response}"
|
24
|
-
end
|
@@ -1,41 +0,0 @@
|
|
1
|
-
require "langchain"
|
2
|
-
require "dotenv/load"
|
3
|
-
|
4
|
-
functions = [
|
5
|
-
{
|
6
|
-
name: "get_current_weather",
|
7
|
-
description: "Get the current weather in a given location",
|
8
|
-
parameters: {
|
9
|
-
type: :object,
|
10
|
-
properties: {
|
11
|
-
location: {
|
12
|
-
type: :string,
|
13
|
-
description: "The city and state, e.g. San Francisco, CA"
|
14
|
-
},
|
15
|
-
unit: {
|
16
|
-
type: "string",
|
17
|
-
enum: %w[celsius fahrenheit]
|
18
|
-
}
|
19
|
-
},
|
20
|
-
required: ["location"]
|
21
|
-
}
|
22
|
-
}
|
23
|
-
]
|
24
|
-
|
25
|
-
openai = Langchain::LLM::OpenAI.new(
|
26
|
-
api_key: ENV["OPENAI_API_KEY"],
|
27
|
-
default_options: {
|
28
|
-
chat_completion_model_name: "gpt-3.5-turbo-16k"
|
29
|
-
}
|
30
|
-
)
|
31
|
-
|
32
|
-
chat = Langchain::Conversation.new(llm: openai)
|
33
|
-
|
34
|
-
chat.set_context("You are the climate bot")
|
35
|
-
chat.set_functions(functions)
|
36
|
-
|
37
|
-
DONE = %w[done end eof exit].freeze
|
38
|
-
|
39
|
-
user_message = "what's the weather in NYC?"
|
40
|
-
|
41
|
-
puts chat.message(user_message)
|