langchainrb 0.5.5 → 0.5.7

Sign up to get free protection for your applications and to get access to all the features.
Files changed (49) hide show
  1. checksums.yaml +4 -4
  2. data/CHANGELOG.md +12 -0
  3. data/Gemfile.lock +3 -1
  4. data/README.md +7 -5
  5. data/examples/store_and_query_with_pinecone.rb +5 -4
  6. data/lib/langchain/agent/base.rb +5 -0
  7. data/lib/langchain/agent/chain_of_thought_agent/chain_of_thought_agent.rb +22 -10
  8. data/lib/langchain/agent/chain_of_thought_agent/chain_of_thought_agent_prompt.yaml +26 -0
  9. data/lib/langchain/agent/sql_query_agent/sql_query_agent.rb +8 -8
  10. data/lib/langchain/agent/sql_query_agent/sql_query_agent_answer_prompt.yaml +11 -0
  11. data/lib/langchain/agent/sql_query_agent/sql_query_agent_sql_prompt.yaml +21 -0
  12. data/lib/langchain/chunker/base.rb +15 -0
  13. data/lib/langchain/chunker/text.rb +38 -0
  14. data/lib/langchain/contextual_logger.rb +60 -0
  15. data/lib/langchain/conversation.rb +35 -4
  16. data/lib/langchain/data.rb +4 -0
  17. data/lib/langchain/llm/ai21.rb +16 -2
  18. data/lib/langchain/llm/cohere.rb +5 -4
  19. data/lib/langchain/llm/google_palm.rb +15 -7
  20. data/lib/langchain/llm/openai.rb +67 -17
  21. data/lib/langchain/llm/prompts/summarize_template.yaml +9 -0
  22. data/lib/langchain/llm/replicate.rb +6 -5
  23. data/lib/langchain/prompt/base.rb +2 -2
  24. data/lib/langchain/tool/base.rb +9 -3
  25. data/lib/langchain/tool/calculator.rb +7 -9
  26. data/lib/langchain/tool/database.rb +29 -8
  27. data/lib/langchain/tool/{serp_api.rb → google_search.rb} +9 -9
  28. data/lib/langchain/tool/ruby_code_interpreter.rb +1 -1
  29. data/lib/langchain/tool/weather.rb +2 -2
  30. data/lib/langchain/tool/wikipedia.rb +1 -1
  31. data/lib/langchain/utils/token_length/base_validator.rb +38 -0
  32. data/lib/langchain/utils/token_length/google_palm_validator.rb +9 -29
  33. data/lib/langchain/utils/token_length/openai_validator.rb +10 -27
  34. data/lib/langchain/utils/token_length/token_limit_exceeded.rb +17 -0
  35. data/lib/langchain/vectorsearch/base.rb +6 -0
  36. data/lib/langchain/vectorsearch/chroma.rb +1 -1
  37. data/lib/langchain/vectorsearch/hnswlib.rb +2 -2
  38. data/lib/langchain/vectorsearch/milvus.rb +1 -14
  39. data/lib/langchain/vectorsearch/pgvector.rb +1 -5
  40. data/lib/langchain/vectorsearch/pinecone.rb +1 -4
  41. data/lib/langchain/vectorsearch/qdrant.rb +1 -4
  42. data/lib/langchain/vectorsearch/weaviate.rb +1 -4
  43. data/lib/langchain/version.rb +1 -1
  44. data/lib/langchain.rb +28 -12
  45. metadata +30 -11
  46. data/lib/langchain/agent/chain_of_thought_agent/chain_of_thought_agent_prompt.json +0 -10
  47. data/lib/langchain/agent/sql_query_agent/sql_query_agent_answer_prompt.json +0 -10
  48. data/lib/langchain/agent/sql_query_agent/sql_query_agent_sql_prompt.json +0 -10
  49. data/lib/langchain/llm/prompts/summarize_template.json +0 -5
@@ -9,7 +9,7 @@ module Langchain
9
9
  # This class is meant to validate the length of the text passed in to OpenAI's API.
10
10
  # It is used to validate the token length before the API call is made
11
11
  #
12
- class OpenAIValidator
12
+ class OpenAIValidator < BaseValidator
13
13
  TOKEN_LIMITS = {
14
14
  # Source:
15
15
  # https://platform.openai.com/docs/api-reference/embeddings
@@ -17,6 +17,9 @@ module Langchain
17
17
  "text-embedding-ada-002" => 8191,
18
18
  "gpt-3.5-turbo" => 4096,
19
19
  "gpt-3.5-turbo-0301" => 4096,
20
+ "gpt-3.5-turbo-0613" => 4096,
21
+ "gpt-3.5-turbo-16k" => 16384,
22
+ "gpt-3.5-turbo-16k-0613" => 16384,
20
23
  "text-davinci-003" => 4097,
21
24
  "text-davinci-002" => 4097,
22
25
  "code-davinci-002" => 8001,
@@ -24,6 +27,7 @@ module Langchain
24
27
  "gpt-4-0314" => 8192,
25
28
  "gpt-4-32k" => 32768,
26
29
  "gpt-4-32k-0314" => 32768,
30
+ "gpt-4-32k-0613" => 32768,
27
31
  "text-curie-001" => 2049,
28
32
  "text-babbage-001" => 2049,
29
33
  "text-ada-001" => 2049,
@@ -33,31 +37,6 @@ module Langchain
33
37
  "ada" => 2049
34
38
  }.freeze
35
39
 
36
- #
37
- # Calculate the `max_tokens:` parameter to be set by calculating the context length of the text minus the prompt length
38
- #
39
- # @param content [String | Array<String>] The text or array of texts to validate
40
- # @param model_name [String] The model name to validate against
41
- # @return [Integer] Whether the text is valid or not
42
- # @raise [TokenLimitExceeded] If the text is too long
43
- #
44
- def self.validate_max_tokens!(content, model_name)
45
- text_token_length = if content.is_a?(Array)
46
- content.sum { |item| token_length(item.to_json, model_name) }
47
- else
48
- token_length(content, model_name)
49
- end
50
-
51
- max_tokens = TOKEN_LIMITS[model_name] - text_token_length
52
-
53
- # Raise an error even if whole prompt is equal to the model's token limit (max_tokens == 0) since not response will be returned
54
- if max_tokens <= 0
55
- raise TokenLimitExceeded, "This model's maximum context length is #{TOKEN_LIMITS[model_name]} tokens, but the given text is #{text_token_length} tokens long."
56
- end
57
-
58
- max_tokens
59
- end
60
-
61
40
  #
62
41
  # Calculate token length for a given text and model name
63
42
  #
@@ -65,10 +44,14 @@ module Langchain
65
44
  # @param model_name [String] The model name to validate against
66
45
  # @return [Integer] The token length of the text
67
46
  #
68
- def self.token_length(text, model_name)
47
+ def self.token_length(text, model_name, options = {})
69
48
  encoder = Tiktoken.encoding_for_model(model_name)
70
49
  encoder.encode(text).length
71
50
  end
51
+
52
+ def self.token_limit(model_name)
53
+ TOKEN_LIMITS[model_name]
54
+ end
72
55
  end
73
56
  end
74
57
  end
@@ -0,0 +1,17 @@
1
+ # frozen_string_literal: true
2
+
3
+ module Langchain
4
+ module Utils
5
+ module TokenLength
6
+ class TokenLimitExceeded < StandardError
7
+ attr_reader :token_overflow
8
+
9
+ def initialize(message = "", token_overflow = 0)
10
+ super message
11
+
12
+ @token_overflow = token_overflow
13
+ end
14
+ end
15
+ end
16
+ end
17
+ end
@@ -155,5 +155,11 @@ module Langchain::Vectorsearch
155
155
 
156
156
  add_texts(texts: texts)
157
157
  end
158
+
159
+ def self.logger_options
160
+ {
161
+ color: :blue
162
+ }
163
+ end
158
164
  end
159
165
  end
@@ -8,7 +8,7 @@ module Langchain::Vectorsearch
8
8
  # Gem requirements: gem "chroma-db", "~> 0.3.0"
9
9
  #
10
10
  # Usage:
11
- # chroma = Langchain::Vectorsearch::Chroma.new(url:, index_name:, llm:, api_key: nil)
11
+ # chroma = Langchain::Vectorsearch::Chroma.new(url:, index_name:, llm:, llm_api_key:, api_key: nil)
12
12
  #
13
13
 
14
14
  # Initialize the Chroma client
@@ -110,12 +110,12 @@ module Langchain::Vectorsearch
110
110
  if File.exist?(path_to_index)
111
111
  client.load_index(path_to_index)
112
112
 
113
- Langchain.logger.info("[#{self.class.name}]".blue + ": Successfully loaded the index at \"#{path_to_index}\"")
113
+ Langchain.logger.info("Successfully loaded the index at \"#{path_to_index}\"", for: self.class)
114
114
  else
115
115
  # Default max_elements: 100, but we constantly resize the index as new data is written to it
116
116
  client.init_index(max_elements: 100)
117
117
 
118
- Langchain.logger.info("[#{self.class.name}]".blue + ": Creating a new index at \"#{path_to_index}\"")
118
+ Langchain.logger.info("Creating a new index at \"#{path_to_index}\"", for: self.class)
119
119
  end
120
120
  end
121
121
  end
@@ -8,17 +8,9 @@ module Langchain::Vectorsearch
8
8
  # Gem requirements: gem "milvus", "~> 0.9.0"
9
9
  #
10
10
  # Usage:
11
- # milvus = Langchain::Vectorsearch::Milvus.new(url:, index_name:, llm:)
11
+ # milvus = Langchain::Vectorsearch::Milvus.new(url:, index_name:, llm:, llm_api_key:)
12
12
  #
13
13
 
14
- #
15
- # Initialize the Milvus client
16
- #
17
- # @param url [String] The URL of the Milvus server
18
- # @param api_key [String] The API key to use
19
- # @param index_name [String] The name of the index to use
20
- # @param llm [Object] The LLM client to use
21
- #
22
14
  def initialize(url:, index_name:, llm:, api_key: nil)
23
15
  depends_on "milvus"
24
16
  require "milvus"
@@ -29,11 +21,6 @@ module Langchain::Vectorsearch
29
21
  super(llm: llm)
30
22
  end
31
23
 
32
- #
33
- # Add a list of texts to the index
34
- #
35
- # @param texts [Array] The list of texts to add
36
- #
37
24
  def add_texts(texts:)
38
25
  client.entities.insert(
39
26
  collection_name: index_name,
@@ -8,7 +8,7 @@ module Langchain::Vectorsearch
8
8
  # Gem requirements: gem "pgvector", "~> 0.2"
9
9
  #
10
10
  # Usage:
11
- # pgvector = Langchain::Vectorsearch::Pgvector.new(url:, index_name:, llm:)
11
+ # pgvector = Langchain::Vectorsearch::Pgvector.new(url:, index_name:, llm:, llm_api_key:)
12
12
  #
13
13
 
14
14
  # The operators supported by the PostgreSQL vector search adapter
@@ -20,14 +20,10 @@ module Langchain::Vectorsearch
20
20
 
21
21
  attr_reader :operator, :quoted_table_name
22
22
 
23
- #
24
- # Initialize the PostgreSQL client
25
- #
26
23
  # @param url [String] The URL of the PostgreSQL database
27
24
  # @param index_name [String] The name of the table to use for the index
28
25
  # @param llm [Object] The LLM client to use
29
26
  # @param api_key [String] The API key for the Vectorsearch DB (not used for PostgreSQL)
30
- #
31
27
  def initialize(url:, index_name:, llm:, api_key: nil)
32
28
  require "pg"
33
29
  require "pgvector"
@@ -8,17 +8,14 @@ module Langchain::Vectorsearch
8
8
  # Gem requirements: gem "pinecone", "~> 0.1.6"
9
9
  #
10
10
  # Usage:
11
- # pinecone = Langchain::Vectorsearch::Pinecone.new(environment:, api_key:, index_name:, llm:)
11
+ # pinecone = Langchain::Vectorsearch::Pinecone.new(environment:, api_key:, index_name:, llm:, llm_api_key:)
12
12
  #
13
13
 
14
- #
15
14
  # Initialize the Pinecone client
16
- #
17
15
  # @param environment [String] The environment to use
18
16
  # @param api_key [String] The API key to use
19
17
  # @param index_name [String] The name of the index to use
20
18
  # @param llm [Object] The LLM client to use
21
- #
22
19
  def initialize(environment:, api_key:, index_name:, llm:)
23
20
  depends_on "pinecone"
24
21
  require "pinecone"
@@ -8,17 +8,14 @@ module Langchain::Vectorsearch
8
8
  # Gem requirements: gem "qdrant-ruby", "~> 0.9.0"
9
9
  #
10
10
  # Usage:
11
- # qdrant = Langchain::Vectorsearch::Qdrant.new(url:, api_key:, index_name:, llm:)
11
+ # qdrant = Langchain::Vectorsearch::Qdrant.new(url:, api_key:, index_name:, llm:, llm_api_key:)
12
12
  #
13
13
 
14
- #
15
14
  # Initialize the Qdrant client
16
- #
17
15
  # @param url [String] The URL of the Qdrant server
18
16
  # @param api_key [String] The API key to use
19
17
  # @param index_name [String] The name of the index to use
20
18
  # @param llm [Object] The LLM client to use
21
- #
22
19
  def initialize(url:, api_key:, index_name:, llm:)
23
20
  depends_on "qdrant-ruby"
24
21
  require "qdrant"
@@ -8,17 +8,14 @@ module Langchain::Vectorsearch
8
8
  # Gem requirements: gem "weaviate-ruby", "~> 0.8.0"
9
9
  #
10
10
  # Usage:
11
- # weaviate = Langchain::Vectorsearch::Weaviate.new(url:, api_key:, index_name:, llm:)
11
+ # weaviate = Langchain::Vectorsearch::Weaviate.new(url:, api_key:, index_name:, llm:, llm_api_key:)
12
12
  #
13
13
 
14
- #
15
14
  # Initialize the Weaviate adapter
16
- #
17
15
  # @param url [String] The URL of the Weaviate instance
18
16
  # @param api_key [String] The API key to use
19
17
  # @param index_name [String] The name of the index to use
20
18
  # @param llm [Object] The LLM client to use
21
- #
22
19
  def initialize(url:, api_key:, index_name:, llm:)
23
20
  depends_on "weaviate-ruby"
24
21
  require "weaviate"
@@ -1,5 +1,5 @@
1
1
  # frozen_string_literal: true
2
2
 
3
3
  module Langchain
4
- VERSION = "0.5.5"
4
+ VERSION = "0.5.7"
5
5
  end
data/lib/langchain.rb CHANGED
@@ -46,36 +46,48 @@ require_relative "./langchain/version"
46
46
  #
47
47
  # LangChain.rb uses standard logging mechanisms and defaults to :debug level. Most messages are at info level, but we will add debug or warn statements as needed. To show all log messages:
48
48
  #
49
- # Langchain.logger.level = :info
49
+ # Langchain.logger.level = :info
50
50
  module Langchain
51
+ autoload :Loader, "langchain/loader"
52
+ autoload :Data, "langchain/data"
53
+ autoload :Conversation, "langchain/conversation"
54
+ autoload :DependencyHelper, "langchain/dependency_helper"
55
+ autoload :ContextualLogger, "langchain/contextual_logger"
56
+
51
57
  class << self
52
- # @return [Logger]
53
- attr_accessor :logger
58
+ # @return [ContextualLogger]
59
+ attr_reader :logger
60
+
61
+ # @param logger [Logger]
62
+ # @return [ContextualLogger]
63
+ def logger=(logger)
64
+ @logger = ContextualLogger.new(logger)
65
+ end
54
66
 
55
67
  # @return [Pathname]
56
68
  attr_reader :root
57
69
  end
58
70
 
59
- @logger ||= ::Logger.new($stdout, level: :warn, formatter: ->(severity, datetime, progname, msg) { "[LangChain.rb]".yellow + " #{msg}\n" })
71
+ self.logger ||= ::Logger.new($stdout, level: :warn)
60
72
 
61
73
  @root = Pathname.new(__dir__)
62
74
 
63
- autoload :Loader, "langchain/loader"
64
- autoload :Data, "langchain/data"
65
- autoload :Conversation, "langchain/conversation"
66
- autoload :DependencyHelper, "langchain/dependency_helper"
67
-
68
75
  module Agent
69
76
  autoload :Base, "langchain/agent/base"
70
77
  autoload :ChainOfThoughtAgent, "langchain/agent/chain_of_thought_agent/chain_of_thought_agent.rb"
71
78
  autoload :SQLQueryAgent, "langchain/agent/sql_query_agent/sql_query_agent.rb"
72
79
  end
73
80
 
81
+ module Chunker
82
+ autoload :Base, "langchain/chunker/base"
83
+ autoload :Text, "langchain/chunker/text"
84
+ end
85
+
74
86
  module Tool
75
87
  autoload :Base, "langchain/tool/base"
76
88
  autoload :Calculator, "langchain/tool/calculator"
77
89
  autoload :RubyCodeInterpreter, "langchain/tool/ruby_code_interpreter"
78
- autoload :SerpApi, "langchain/tool/serp_api"
90
+ autoload :GoogleSearch, "langchain/tool/google_search"
79
91
  autoload :Weather, "langchain/tool/weather"
80
92
  autoload :Wikipedia, "langchain/tool/wikipedia"
81
93
  autoload :Database, "langchain/tool/database"
@@ -95,8 +107,8 @@ module Langchain
95
107
 
96
108
  module Utils
97
109
  module TokenLength
98
- class TokenLimitExceeded < StandardError; end
99
-
110
+ autoload :BaseValidator, "langchain/utils/token_length/base_validator"
111
+ autoload :TokenLimitExceeded, "langchain/utils/token_length/token_limit_exceeded"
100
112
  autoload :OpenAIValidator, "langchain/utils/token_length/openai_validator"
101
113
  autoload :GooglePalmValidator, "langchain/utils/token_length/google_palm_validator"
102
114
  end
@@ -130,4 +142,8 @@ module Langchain
130
142
  autoload :PromptTemplate, "langchain/prompt/prompt_template"
131
143
  autoload :FewShotPromptTemplate, "langchain/prompt/few_shot_prompt_template"
132
144
  end
145
+
146
+ module Errors
147
+ class BaseError < StandardError; end
148
+ end
133
149
  end
metadata CHANGED
@@ -1,29 +1,29 @@
1
1
  --- !ruby/object:Gem::Specification
2
2
  name: langchainrb
3
3
  version: !ruby/object:Gem::Version
4
- version: 0.5.5
4
+ version: 0.5.7
5
5
  platform: ruby
6
6
  authors:
7
7
  - Andrei Bondarev
8
8
  autorequire:
9
9
  bindir: exe
10
10
  cert_chain: []
11
- date: 2023-06-12 00:00:00.000000000 Z
11
+ date: 2023-06-20 00:00:00.000000000 Z
12
12
  dependencies:
13
13
  - !ruby/object:Gem::Dependency
14
- name: tiktoken_ruby
14
+ name: baran
15
15
  requirement: !ruby/object:Gem::Requirement
16
16
  requirements:
17
17
  - - "~>"
18
18
  - !ruby/object:Gem::Version
19
- version: 0.0.5
19
+ version: 0.1.6
20
20
  type: :runtime
21
21
  prerelease: false
22
22
  version_requirements: !ruby/object:Gem::Requirement
23
23
  requirements:
24
24
  - - "~>"
25
25
  - !ruby/object:Gem::Version
26
- version: 0.0.5
26
+ version: 0.1.6
27
27
  - !ruby/object:Gem::Dependency
28
28
  name: colorize
29
29
  requirement: !ruby/object:Gem::Requirement
@@ -38,6 +38,20 @@ dependencies:
38
38
  - - "~>"
39
39
  - !ruby/object:Gem::Version
40
40
  version: 0.8.1
41
+ - !ruby/object:Gem::Dependency
42
+ name: tiktoken_ruby
43
+ requirement: !ruby/object:Gem::Requirement
44
+ requirements:
45
+ - - "~>"
46
+ - !ruby/object:Gem::Version
47
+ version: 0.0.5
48
+ type: :runtime
49
+ prerelease: false
50
+ version_requirements: !ruby/object:Gem::Requirement
51
+ requirements:
52
+ - - "~>"
53
+ - !ruby/object:Gem::Version
54
+ version: 0.0.5
41
55
  - !ruby/object:Gem::Dependency
42
56
  name: dotenv-rails
43
57
  requirement: !ruby/object:Gem::Requirement
@@ -456,10 +470,13 @@ files:
456
470
  - lib/langchain.rb
457
471
  - lib/langchain/agent/base.rb
458
472
  - lib/langchain/agent/chain_of_thought_agent/chain_of_thought_agent.rb
459
- - lib/langchain/agent/chain_of_thought_agent/chain_of_thought_agent_prompt.json
473
+ - lib/langchain/agent/chain_of_thought_agent/chain_of_thought_agent_prompt.yaml
460
474
  - lib/langchain/agent/sql_query_agent/sql_query_agent.rb
461
- - lib/langchain/agent/sql_query_agent/sql_query_agent_answer_prompt.json
462
- - lib/langchain/agent/sql_query_agent/sql_query_agent_sql_prompt.json
475
+ - lib/langchain/agent/sql_query_agent/sql_query_agent_answer_prompt.yaml
476
+ - lib/langchain/agent/sql_query_agent/sql_query_agent_sql_prompt.yaml
477
+ - lib/langchain/chunker/base.rb
478
+ - lib/langchain/chunker/text.rb
479
+ - lib/langchain/contextual_logger.rb
463
480
  - lib/langchain/conversation.rb
464
481
  - lib/langchain/data.rb
465
482
  - lib/langchain/dependency_helper.rb
@@ -469,7 +486,7 @@ files:
469
486
  - lib/langchain/llm/google_palm.rb
470
487
  - lib/langchain/llm/hugging_face.rb
471
488
  - lib/langchain/llm/openai.rb
472
- - lib/langchain/llm/prompts/summarize_template.json
489
+ - lib/langchain/llm/prompts/summarize_template.yaml
473
490
  - lib/langchain/llm/replicate.rb
474
491
  - lib/langchain/loader.rb
475
492
  - lib/langchain/processors/base.rb
@@ -488,12 +505,14 @@ files:
488
505
  - lib/langchain/tool/base.rb
489
506
  - lib/langchain/tool/calculator.rb
490
507
  - lib/langchain/tool/database.rb
508
+ - lib/langchain/tool/google_search.rb
491
509
  - lib/langchain/tool/ruby_code_interpreter.rb
492
- - lib/langchain/tool/serp_api.rb
493
510
  - lib/langchain/tool/weather.rb
494
511
  - lib/langchain/tool/wikipedia.rb
512
+ - lib/langchain/utils/token_length/base_validator.rb
495
513
  - lib/langchain/utils/token_length/google_palm_validator.rb
496
514
  - lib/langchain/utils/token_length/openai_validator.rb
515
+ - lib/langchain/utils/token_length/token_limit_exceeded.rb
497
516
  - lib/langchain/vectorsearch/base.rb
498
517
  - lib/langchain/vectorsearch/chroma.rb
499
518
  - lib/langchain/vectorsearch/hnswlib.rb
@@ -528,7 +547,7 @@ required_rubygems_version: !ruby/object:Gem::Requirement
528
547
  - !ruby/object:Gem::Version
529
548
  version: '0'
530
549
  requirements: []
531
- rubygems_version: 3.3.7
550
+ rubygems_version: 3.2.3
532
551
  signing_key:
533
552
  specification_version: 4
534
553
  summary: Build LLM-backed Ruby applications with Ruby's LangChain
@@ -1,10 +0,0 @@
1
- {
2
- "_type": "prompt",
3
- "template": "Today is {date} and you can use tools to get new information. Answer the following questions as best you can using the following tools:\n\n{tools}\n\nUse the following format:\n\nQuestion: the input question you must answer\nThought: you should always think about what to do\nAction: the action to take, should be one of {tool_names}\nAction Input: the input to the action\nObservation: the result of the action\n... (this Thought/Action/Action Input/Observation can repeat N times)\nThought: I now know the final answer\nFinal Answer: the final answer to the original input question\n\nBegin!\n\nQuestion: {question}\nThought:",
4
- "input_variables": [
5
- "date",
6
- "question",
7
- "tools",
8
- "tool_names"
9
- ]
10
- }
@@ -1,10 +0,0 @@
1
- {
2
- "_type": "prompt",
3
- "template":
4
- "Given an input question and results of a SQL query, look at the results and return the answer. Use the following format:\nQuestion: {question}\nThe SQL query: {sql_query}\nResult of the SQLQuery: {results}\nFinal answer: Final answer here",
5
- "input_variables": [
6
- "question",
7
- "sql_query",
8
- "results"
9
- ]
10
- }
@@ -1,10 +0,0 @@
1
- {
2
- "_type": "prompt",
3
- "template":
4
- "Given an input question, create a syntactically correct {dialect} query to run, then return the query in valid SQL.\nNever query for all the columns from a specific table, only ask for a the few relevant columns given the question.\nPay attention to use only the column names that you can see in the schema description. Be careful to not query for columns that do not exist. Pay attention to which column is in which table. Also, qualify column names with the table name when needed.\nOnly use the tables listed below.\n{schema}\nUse the following format:\nQuestion: {question}\nSQLQuery:",
5
- "input_variables": [
6
- "dialect",
7
- "schema",
8
- "question"
9
- ]
10
- }
@@ -1,5 +0,0 @@
1
- {
2
- "_type": "prompt",
3
- "input_variables": ["text"],
4
- "template": "Write a concise summary of the following:\n\n{text}\n\nCONCISE SUMMARY:"
5
- }