langchainrb 0.6.4 → 0.6.5
Sign up to get free protection for your applications and to get access to all the features.
- checksums.yaml +4 -4
- data/.env.example +4 -1
- data/Gemfile.lock +3 -1
- data/README.md +100 -0
- data/examples/create_and_manage_prompt_templates_using_structured_output_parser.rb +13 -1
- data/examples/llama_cpp.rb +24 -0
- data/lib/langchain/llm/base.rb +1 -0
- data/lib/langchain/llm/llama_cpp.rb +102 -0
- data/lib/langchain/output_parsers/fix.rb +84 -0
- data/lib/langchain/output_parsers/prompts/naive_fix_prompt.yaml +22 -0
- data/lib/langchain/version.rb +1 -1
- data/lib/langchain.rb +2 -0
- metadata +20 -2
checksums.yaml
CHANGED
@@ -1,7 +1,7 @@
|
|
1
1
|
---
|
2
2
|
SHA256:
|
3
|
-
metadata.gz:
|
4
|
-
data.tar.gz:
|
3
|
+
metadata.gz: 3404535e036c3efe68fd12706d2ebb269caed87b562fc38434122b1be01a356d
|
4
|
+
data.tar.gz: e3be77b32cf754235e8895fb1af60edca54cb5acb84278bfa2e39b6ed7c2abbe
|
5
5
|
SHA512:
|
6
|
-
metadata.gz:
|
7
|
-
data.tar.gz:
|
6
|
+
metadata.gz: b3fae04c73176c758c2d2d32c3ac538f3e094eb10f378b9a8befbbdcc62b60e55941a1bfefcb61eac7daca43ef91d0e57306dbc26bd59afbdad6ab4efff2ba89
|
7
|
+
data.tar.gz: 626bb4a226112ee6fe709077a6d49ba91c0483fee657848153e9cff61693183709aede5844237c24cc02c561f59be82ea1fd296fe1c3f4ee4d971494ee4dcd75
|
data/.env.example
CHANGED
@@ -1,10 +1,13 @@
|
|
1
1
|
AI21_API_KEY=
|
2
2
|
CHROMA_URL=
|
3
3
|
COHERE_API_KEY=
|
4
|
+
GOOGLE_PALM_API_KEY=
|
4
5
|
HUGGING_FACE_API_KEY=
|
6
|
+
LLAMACPP_MODEL_PATH=
|
7
|
+
LLAMACPP_N_THREADS=
|
8
|
+
LLAMACPP_N_GPU_LAYERS=
|
5
9
|
MILVUS_URL=
|
6
10
|
OPENAI_API_KEY=
|
7
|
-
GOOGLE_PALM_API_KEY=
|
8
11
|
OPEN_WEATHER_API_KEY=
|
9
12
|
PINECONE_API_KEY=
|
10
13
|
PINECONE_ENVIRONMENT=
|
data/Gemfile.lock
CHANGED
@@ -1,7 +1,7 @@
|
|
1
1
|
PATH
|
2
2
|
remote: .
|
3
3
|
specs:
|
4
|
-
langchainrb (0.6.
|
4
|
+
langchainrb (0.6.5)
|
5
5
|
baran (~> 0.1.6)
|
6
6
|
colorize (~> 0.8.1)
|
7
7
|
json-schema (~> 4.0.0)
|
@@ -153,6 +153,7 @@ GEM
|
|
153
153
|
addressable (>= 2.8)
|
154
154
|
language_server-protocol (3.17.0.3)
|
155
155
|
lint_roller (1.0.0)
|
156
|
+
llama_cpp (0.3.0)
|
156
157
|
loofah (2.21.1)
|
157
158
|
crass (~> 1.0.2)
|
158
159
|
nokogiri (>= 1.5.9)
|
@@ -327,6 +328,7 @@ DEPENDENCIES
|
|
327
328
|
hnswlib (~> 0.8.1)
|
328
329
|
hugging-face (~> 0.3.4)
|
329
330
|
langchainrb!
|
331
|
+
llama_cpp
|
330
332
|
milvus (~> 0.9.0)
|
331
333
|
nokogiri (~> 1.13)
|
332
334
|
open-weather-ruby-client (~> 0.3.0)
|
data/README.md
CHANGED
@@ -274,6 +274,106 @@ prompt = Langchain::Prompt.load_from_path(file_path: "spec/fixtures/prompt/promp
|
|
274
274
|
prompt.input_variables #=> ["adjective", "content"]
|
275
275
|
```
|
276
276
|
|
277
|
+
### Using Output Parsers
|
278
|
+
|
279
|
+
Parse LLM text responses into structured output, such as JSON.
|
280
|
+
|
281
|
+
#### Structured Output Parser
|
282
|
+
|
283
|
+
You can use the `StructuredOutputParser` to generate a prompt that instructs the LLM to provide a JSON response adhering to a specific JSON schema:
|
284
|
+
|
285
|
+
```ruby
|
286
|
+
json_schema = {
|
287
|
+
type: "object",
|
288
|
+
properties: {
|
289
|
+
name: {
|
290
|
+
type: "string",
|
291
|
+
description: "Persons name"
|
292
|
+
},
|
293
|
+
age: {
|
294
|
+
type: "number",
|
295
|
+
description: "Persons age"
|
296
|
+
},
|
297
|
+
interests: {
|
298
|
+
type: "array",
|
299
|
+
items: {
|
300
|
+
type: "object",
|
301
|
+
properties: {
|
302
|
+
interest: {
|
303
|
+
type: "string",
|
304
|
+
description: "A topic of interest"
|
305
|
+
},
|
306
|
+
levelOfInterest: {
|
307
|
+
type: "number",
|
308
|
+
description: "A value between 0 and 100 of how interested the person is in this interest"
|
309
|
+
}
|
310
|
+
},
|
311
|
+
required: ["interest", "levelOfInterest"],
|
312
|
+
additionalProperties: false
|
313
|
+
},
|
314
|
+
minItems: 1,
|
315
|
+
maxItems: 3,
|
316
|
+
description: "A list of the person's interests"
|
317
|
+
}
|
318
|
+
},
|
319
|
+
required: ["name", "age", "interests"],
|
320
|
+
additionalProperties: false
|
321
|
+
}
|
322
|
+
parser = Langchain::OutputParsers::StructuredOutputParser.from_json_schema(json_schema)
|
323
|
+
prompt = Langchain::Prompt::PromptTemplate.new(template: "Generate details of a fictional character.\n{format_instructions}\nCharacter description: {description}", input_variables: ["description", "format_instructions"])
|
324
|
+
prompt_text = prompt.format(description: "Korean chemistry student", format_instructions: parser.get_format_instructions)
|
325
|
+
# Generate details of a fictional character.
|
326
|
+
# You must format your output as a JSON value that adheres to a given "JSON Schema" instance.
|
327
|
+
# ...
|
328
|
+
```
|
329
|
+
|
330
|
+
Then parse the llm response:
|
331
|
+
|
332
|
+
```ruby
|
333
|
+
llm = Langchain::LLM::OpenAI.new(api_key: ENV["OPENAI_API_KEY"])
|
334
|
+
llm_response = llm.chat(prompt: prompt_text)
|
335
|
+
parser.parse(llm_response)
|
336
|
+
# {
|
337
|
+
# "name" => "Kim Ji-hyun",
|
338
|
+
# "age" => 22,
|
339
|
+
# "interests" => [
|
340
|
+
# {
|
341
|
+
# "interest" => "Organic Chemistry",
|
342
|
+
# "levelOfInterest" => 85
|
343
|
+
# },
|
344
|
+
# ...
|
345
|
+
# ]
|
346
|
+
# }
|
347
|
+
```
|
348
|
+
|
349
|
+
If the parser fails to parse the LLM response, you can use the `OutputFixingParser`. It sends an error message, prior output, and the original prompt text to the LLM, asking for a "fixed" response:
|
350
|
+
|
351
|
+
```ruby
|
352
|
+
begin
|
353
|
+
parser.parse(llm_response)
|
354
|
+
rescue Langchain::OutputParsers::OutputParserException => e
|
355
|
+
fix_parser = Langchain::OutputParsers::OutputFixingParser.from_llm(
|
356
|
+
llm: llm,
|
357
|
+
parser: parser
|
358
|
+
)
|
359
|
+
fix_parser.parse(llm_response)
|
360
|
+
end
|
361
|
+
```
|
362
|
+
|
363
|
+
Alternatively, if you don't need to handle the `OutputParserException`, you can simplify the code:
|
364
|
+
|
365
|
+
```ruby
|
366
|
+
# we already have the `OutputFixingParser`:
|
367
|
+
# parser = Langchain::OutputParsers::StructuredOutputParser.from_json_schema(json_schema)
|
368
|
+
fix_parser = Langchain::OutputParsers::OutputFixingParser.from_llm(
|
369
|
+
llm: llm,
|
370
|
+
parser: parser
|
371
|
+
)
|
372
|
+
fix_parser.parse(llm_response)
|
373
|
+
```
|
374
|
+
|
375
|
+
See [here](https://github.com/andreibondarev/langchainrb/tree/main/examples/create_and_manage_prompt_templates_using_structured_output_parser.rb) for a concrete example
|
376
|
+
|
277
377
|
### Using Agents 🤖
|
278
378
|
Agents are semi-autonomous bots that can respond to user questions and use available to them Tools to provide informed replies. They break down problems into series of steps and define Actions (and Action Inputs) along the way that are executed and fed back to them as additional information. Once an Agent decides that it has the Final Answer it responds with it.
|
279
379
|
|
@@ -58,6 +58,11 @@ prompt.format(description: "Korean chemistry student", format_instructions: pars
|
|
58
58
|
|
59
59
|
# Character description: Korean chemistry student
|
60
60
|
|
61
|
+
llm = Langchain::LLM::OpenAI.new(api_key: ENV["OPENAI_API_KEY"])
|
62
|
+
# llm_response = llm.chat(
|
63
|
+
# prompt: prompt.format(description: "Korean chemistry student", format_instructions: parser.get_format_instructions)
|
64
|
+
# )
|
65
|
+
|
61
66
|
# LLM example response:
|
62
67
|
llm_example_response = <<~RESPONSE
|
63
68
|
Here is your character:
|
@@ -83,7 +88,14 @@ llm_example_response = <<~RESPONSE
|
|
83
88
|
```
|
84
89
|
RESPONSE
|
85
90
|
|
86
|
-
|
91
|
+
fix_parser = Langchain::OutputParsers::OutputFixingParser.from_llm(
|
92
|
+
llm: llm,
|
93
|
+
parser: parser
|
94
|
+
)
|
95
|
+
# The OutputFixingParser wraps the StructuredOutputParser such that if initial
|
96
|
+
# LLM response does not conform to the schema, will call out the LLM to fix
|
97
|
+
# the error
|
98
|
+
fix_parser.parse(llm_example_response)
|
87
99
|
# {
|
88
100
|
# "name" => "Kim Ji-hyun",
|
89
101
|
# "age" => 22,
|
@@ -0,0 +1,24 @@
|
|
1
|
+
require "langchain"
|
2
|
+
|
3
|
+
llm = Langchain::LLM::LlamaCpp.new(
|
4
|
+
model_path: ENV["LLAMACPP_MODEL_PATH"],
|
5
|
+
n_gpu_layers: Integer(ENV["LLAMACPP_N_GPU_LAYERS"]),
|
6
|
+
n_threads: Integer(ENV["LLAMACPP_N_THREADS"])
|
7
|
+
)
|
8
|
+
|
9
|
+
instructions = [
|
10
|
+
"Tell me about the creator of Ruby",
|
11
|
+
"Write a story about a pony who goes to the store to buy some apples."
|
12
|
+
]
|
13
|
+
|
14
|
+
template = Langchain::Prompt::PromptTemplate.new(
|
15
|
+
template: "{instruction}\n\n### Response:",
|
16
|
+
input_variables: %w[instruction]
|
17
|
+
)
|
18
|
+
|
19
|
+
instructions.each do |instruction|
|
20
|
+
puts "USER: #{instruction}"
|
21
|
+
prompt = template.format(instruction: instruction)
|
22
|
+
response = llm.complete prompt: prompt, n_predict: 1024
|
23
|
+
puts "ASSISTANT: #{response}"
|
24
|
+
end
|
data/lib/langchain/llm/base.rb
CHANGED
@@ -0,0 +1,102 @@
|
|
1
|
+
# frozen_string_literal: true
|
2
|
+
|
3
|
+
module Langchain::LLM
|
4
|
+
# A wrapper around the LlamaCpp.rb library
|
5
|
+
#
|
6
|
+
# Gem requirements:
|
7
|
+
# gem "llama_cpp"
|
8
|
+
#
|
9
|
+
# Usage:
|
10
|
+
# llama = Langchain::LLM::LlamaCpp.new(
|
11
|
+
# model_path: ENV["LLAMACPP_MODEL_PATH"],
|
12
|
+
# n_gpu_layers: Integer(ENV["LLAMACPP_N_GPU_LAYERS"]),
|
13
|
+
# n_threads: Integer(ENV["LLAMACPP_N_THREADS"])
|
14
|
+
# )
|
15
|
+
#
|
16
|
+
class LlamaCpp < Base
|
17
|
+
attr_accessor :model_path, :n_gpu_layers, :n_ctx, :seed
|
18
|
+
attr_writer :n_threads
|
19
|
+
|
20
|
+
# @param model_path [String] The path to the model to use
|
21
|
+
# @param n_gpu_layers [Integer] The number of GPU layers to use
|
22
|
+
# @param n_ctx [Integer] The number of context tokens to use
|
23
|
+
# @param n_threads [Integer] The CPU number of threads to use
|
24
|
+
# @param seed [Integer] The seed to use
|
25
|
+
def initialize(model_path:, n_gpu_layers: 1, n_ctx: 2048, n_threads: 1, seed: -1)
|
26
|
+
depends_on "llama_cpp"
|
27
|
+
require "llama_cpp"
|
28
|
+
|
29
|
+
@model_path = model_path
|
30
|
+
@n_gpu_layers = n_gpu_layers
|
31
|
+
@n_ctx = n_ctx
|
32
|
+
@n_threads = n_threads
|
33
|
+
@seed = seed
|
34
|
+
end
|
35
|
+
|
36
|
+
# @params text [String] The text to embed
|
37
|
+
# @params n_threads [Integer] The number of CPU threads to use
|
38
|
+
# @return [Array] The embedding
|
39
|
+
def embed(text:, n_threads: nil)
|
40
|
+
# contexts are kinda stateful when it comes to embeddings, so allocate one each time
|
41
|
+
context = embedding_context
|
42
|
+
|
43
|
+
embedding_input = context.tokenize(text: text, add_bos: true)
|
44
|
+
return unless embedding_input.size.positive?
|
45
|
+
|
46
|
+
n_threads ||= self.n_threads
|
47
|
+
|
48
|
+
context.eval(tokens: embedding_input, n_past: 0, n_threads: n_threads)
|
49
|
+
context.embeddings
|
50
|
+
end
|
51
|
+
|
52
|
+
# @params prompt [String] The prompt to complete
|
53
|
+
# @params n_predict [Integer] The number of tokens to predict
|
54
|
+
# @params n_threads [Integer] The number of CPU threads to use
|
55
|
+
# @return [String] The completed prompt
|
56
|
+
def complete(prompt:, n_predict: 128, n_threads: nil)
|
57
|
+
n_threads ||= self.n_threads
|
58
|
+
# contexts do not appear to be stateful when it comes to completion, so re-use the same one
|
59
|
+
context = completion_context
|
60
|
+
::LLaMACpp.generate(context, prompt, n_threads: n_threads, n_predict: n_predict)
|
61
|
+
end
|
62
|
+
|
63
|
+
private
|
64
|
+
|
65
|
+
def n_threads
|
66
|
+
# Use the maximum number of CPU threads available, if not configured
|
67
|
+
@n_threads ||= `sysctl -n hw.ncpu`.strip.to_i
|
68
|
+
end
|
69
|
+
|
70
|
+
def build_context_params(embeddings: false)
|
71
|
+
context_params = ::LLaMACpp::ContextParams.new
|
72
|
+
|
73
|
+
context_params.seed = seed
|
74
|
+
context_params.n_ctx = n_ctx
|
75
|
+
context_params.n_gpu_layers = n_gpu_layers
|
76
|
+
context_params.embedding = embeddings
|
77
|
+
|
78
|
+
context_params
|
79
|
+
end
|
80
|
+
|
81
|
+
def build_model(embeddings: false)
|
82
|
+
return @model if defined?(@model)
|
83
|
+
@model = ::LLaMACpp::Model.new(model_path: model_path, params: build_context_params(embeddings: embeddings))
|
84
|
+
end
|
85
|
+
|
86
|
+
def build_completion_context
|
87
|
+
::LLaMACpp::Context.new(model: build_model)
|
88
|
+
end
|
89
|
+
|
90
|
+
def build_embedding_context
|
91
|
+
::LLaMACpp::Context.new(model: build_model(embeddings: true))
|
92
|
+
end
|
93
|
+
|
94
|
+
def completion_context
|
95
|
+
@completion_context ||= build_completion_context
|
96
|
+
end
|
97
|
+
|
98
|
+
def embedding_context
|
99
|
+
@embedding_context ||= build_embedding_context
|
100
|
+
end
|
101
|
+
end
|
102
|
+
end
|
@@ -0,0 +1,84 @@
|
|
1
|
+
# frozen_string_literal: true
|
2
|
+
|
3
|
+
module Langchain::OutputParsers
|
4
|
+
# = Output Fixing Parser
|
5
|
+
#
|
6
|
+
class OutputFixingParser < Base
|
7
|
+
attr_reader :llm, :parser, :prompt
|
8
|
+
|
9
|
+
#
|
10
|
+
# Initializes a new instance of the class.
|
11
|
+
#
|
12
|
+
# @param llm [Langchain::LLM] The LLM used in the fixing process
|
13
|
+
# @param parser [Langchain::OutputParsers] The parser originally used which resulted in parsing error
|
14
|
+
# @param prompt [Langchain::Prompt::PromptTemplate]
|
15
|
+
#
|
16
|
+
def initialize(llm:, parser:, prompt:)
|
17
|
+
raise ArgumentError.new("llm must be an instance of Langchain::LLM got: #{llm.class}") unless llm.is_a?(Langchain::LLM::Base)
|
18
|
+
raise ArgumentError.new("parser must be an instance of Langchain::OutputParsers got #{parser.class}") unless parser.is_a?(Langchain::OutputParsers::Base)
|
19
|
+
raise ArgumentError.new("prompt must be an instance of Langchain::Prompt::PromptTemplate got #{prompt.class}") unless prompt.is_a?(Langchain::Prompt::PromptTemplate)
|
20
|
+
@llm = llm
|
21
|
+
@parser = parser
|
22
|
+
@prompt = prompt
|
23
|
+
end
|
24
|
+
|
25
|
+
def to_h
|
26
|
+
{
|
27
|
+
_type: "OutputFixingParser",
|
28
|
+
parser: parser.to_h,
|
29
|
+
prompt: prompt.to_h
|
30
|
+
}
|
31
|
+
end
|
32
|
+
|
33
|
+
#
|
34
|
+
# calls get_format_instructions on the @parser
|
35
|
+
#
|
36
|
+
# @return [String] Instructions for how the output of a language model should be formatted
|
37
|
+
# according to the @schema.
|
38
|
+
#
|
39
|
+
def get_format_instructions
|
40
|
+
parser.get_format_instructions
|
41
|
+
end
|
42
|
+
|
43
|
+
#
|
44
|
+
# Parse the output of an LLM call, if fails with OutputParserException
|
45
|
+
# then call the LLM with a fix prompt in an attempt to get the correctly
|
46
|
+
# formatted response
|
47
|
+
#
|
48
|
+
# @param completion [String] Text output from the LLM call
|
49
|
+
#
|
50
|
+
# @return [Object] object that is succesfully parsed by @parser.parse
|
51
|
+
#
|
52
|
+
def parse(completion)
|
53
|
+
parser.parse(completion)
|
54
|
+
rescue OutputParserException => e
|
55
|
+
new_completion = llm.chat(
|
56
|
+
prompt: prompt.format(
|
57
|
+
instructions: parser.get_format_instructions,
|
58
|
+
completion: completion,
|
59
|
+
error: e
|
60
|
+
)
|
61
|
+
)
|
62
|
+
parser.parse(new_completion)
|
63
|
+
end
|
64
|
+
|
65
|
+
#
|
66
|
+
# Creates a new instance of the class using the given JSON::Schema.
|
67
|
+
#
|
68
|
+
# @param schema [JSON::Schema] The JSON::Schema to use
|
69
|
+
#
|
70
|
+
# @return [Object] A new instance of the class
|
71
|
+
#
|
72
|
+
def self.from_llm(llm:, parser:, prompt: nil)
|
73
|
+
new(llm: llm, parser: parser, prompt: prompt || naive_fix_prompt)
|
74
|
+
end
|
75
|
+
|
76
|
+
private
|
77
|
+
|
78
|
+
private_class_method def self.naive_fix_prompt
|
79
|
+
Langchain::Prompt.load_from_path(
|
80
|
+
file_path: Langchain.root.join("langchain/output_parsers/prompts/naive_fix_prompt.yaml")
|
81
|
+
)
|
82
|
+
end
|
83
|
+
end
|
84
|
+
end
|
@@ -0,0 +1,22 @@
|
|
1
|
+
_type: prompt
|
2
|
+
input_variables:
|
3
|
+
- instructions
|
4
|
+
- completion
|
5
|
+
- error
|
6
|
+
template: |
|
7
|
+
Instructions:
|
8
|
+
--------------
|
9
|
+
{instructions}
|
10
|
+
--------------
|
11
|
+
Completion:
|
12
|
+
--------------
|
13
|
+
{completion}
|
14
|
+
--------------
|
15
|
+
|
16
|
+
Above, the Completion did not satisfy the constraints given in the Instructions.
|
17
|
+
Error:
|
18
|
+
--------------
|
19
|
+
{error}
|
20
|
+
--------------
|
21
|
+
|
22
|
+
Please try again. Please only respond with an answer that satisfies the constraints laid out in the Instructions:
|
data/lib/langchain/version.rb
CHANGED
data/lib/langchain.rb
CHANGED
@@ -134,6 +134,7 @@ module Langchain
|
|
134
134
|
autoload :Cohere, "langchain/llm/cohere"
|
135
135
|
autoload :GooglePalm, "langchain/llm/google_palm"
|
136
136
|
autoload :HuggingFace, "langchain/llm/hugging_face"
|
137
|
+
autoload :LlamaCpp, "langchain/llm/llama_cpp"
|
137
138
|
autoload :OpenAI, "langchain/llm/openai"
|
138
139
|
autoload :Replicate, "langchain/llm/replicate"
|
139
140
|
end
|
@@ -153,6 +154,7 @@ module Langchain
|
|
153
154
|
module OutputParsers
|
154
155
|
autoload :Base, "langchain/output_parsers/base"
|
155
156
|
autoload :StructuredOutputParser, "langchain/output_parsers/structured"
|
157
|
+
autoload :OutputFixingParser, "langchain/output_parsers/fix"
|
156
158
|
end
|
157
159
|
|
158
160
|
module Errors
|
metadata
CHANGED
@@ -1,14 +1,14 @@
|
|
1
1
|
--- !ruby/object:Gem::Specification
|
2
2
|
name: langchainrb
|
3
3
|
version: !ruby/object:Gem::Version
|
4
|
-
version: 0.6.
|
4
|
+
version: 0.6.5
|
5
5
|
platform: ruby
|
6
6
|
authors:
|
7
7
|
- Andrei Bondarev
|
8
8
|
autorequire:
|
9
9
|
bindir: exe
|
10
10
|
cert_chain: []
|
11
|
-
date: 2023-07-
|
11
|
+
date: 2023-07-06 00:00:00.000000000 Z
|
12
12
|
dependencies:
|
13
13
|
- !ruby/object:Gem::Dependency
|
14
14
|
name: baran
|
@@ -262,6 +262,20 @@ dependencies:
|
|
262
262
|
- - "~>"
|
263
263
|
- !ruby/object:Gem::Version
|
264
264
|
version: 0.9.0
|
265
|
+
- !ruby/object:Gem::Dependency
|
266
|
+
name: llama_cpp
|
267
|
+
requirement: !ruby/object:Gem::Requirement
|
268
|
+
requirements:
|
269
|
+
- - ">="
|
270
|
+
- !ruby/object:Gem::Version
|
271
|
+
version: '0'
|
272
|
+
type: :development
|
273
|
+
prerelease: false
|
274
|
+
version_requirements: !ruby/object:Gem::Requirement
|
275
|
+
requirements:
|
276
|
+
- - ">="
|
277
|
+
- !ruby/object:Gem::Version
|
278
|
+
version: '0'
|
265
279
|
- !ruby/object:Gem::Dependency
|
266
280
|
name: nokogiri
|
267
281
|
requirement: !ruby/object:Gem::Requirement
|
@@ -478,6 +492,7 @@ files:
|
|
478
492
|
- examples/create_and_manage_few_shot_prompt_templates.rb
|
479
493
|
- examples/create_and_manage_prompt_templates.rb
|
480
494
|
- examples/create_and_manage_prompt_templates_using_structured_output_parser.rb
|
495
|
+
- examples/llama_cpp.rb
|
481
496
|
- examples/pdf_store_and_query_with_chroma.rb
|
482
497
|
- examples/store_and_query_with_pinecone.rb
|
483
498
|
- examples/store_and_query_with_qdrant.rb
|
@@ -503,11 +518,14 @@ files:
|
|
503
518
|
- lib/langchain/llm/cohere.rb
|
504
519
|
- lib/langchain/llm/google_palm.rb
|
505
520
|
- lib/langchain/llm/hugging_face.rb
|
521
|
+
- lib/langchain/llm/llama_cpp.rb
|
506
522
|
- lib/langchain/llm/openai.rb
|
507
523
|
- lib/langchain/llm/prompts/summarize_template.yaml
|
508
524
|
- lib/langchain/llm/replicate.rb
|
509
525
|
- lib/langchain/loader.rb
|
510
526
|
- lib/langchain/output_parsers/base.rb
|
527
|
+
- lib/langchain/output_parsers/fix.rb
|
528
|
+
- lib/langchain/output_parsers/prompts/naive_fix_prompt.yaml
|
511
529
|
- lib/langchain/output_parsers/structured.rb
|
512
530
|
- lib/langchain/processors/base.rb
|
513
531
|
- lib/langchain/processors/csv.rb
|