vanna 0.7.5__py3-none-any.whl → 0.7.6__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- vanna/base/base.py +5 -2
- vanna/deepseek/__init__.py +1 -0
- vanna/deepseek/deepseek_chat.py +60 -0
- vanna/google/gemini_chat.py +23 -1
- vanna/ollama/ollama.py +1 -1
- vanna/opensearch/__init__.py +1 -0
- vanna/opensearch/opensearch_vector_semantic.py +175 -0
- vanna/oracle/__init__.py +1 -0
- vanna/oracle/oracle_vector.py +585 -0
- vanna/remote.py +1 -1
- {vanna-0.7.5.dist-info → vanna-0.7.6.dist-info}/METADATA +52 -5
- {vanna-0.7.5.dist-info → vanna-0.7.6.dist-info}/RECORD +13 -8
- {vanna-0.7.5.dist-info → vanna-0.7.6.dist-info}/WHEEL +1 -1
vanna/base/base.py
CHANGED
|
@@ -306,7 +306,7 @@ class VannaBase(ABC):
|
|
|
306
306
|
|
|
307
307
|
message_log = [
|
|
308
308
|
self.system_message(
|
|
309
|
-
f"You are a helpful data assistant. The user asked the question: '{question}'\n\nThe SQL query for this question was: {sql}\n\nThe following is a pandas DataFrame with the results of the query: \n{df.to_markdown()}\n\n"
|
|
309
|
+
f"You are a helpful data assistant. The user asked the question: '{question}'\n\nThe SQL query for this question was: {sql}\n\nThe following is a pandas DataFrame with the results of the query: \n{df.head(25).to_markdown()}\n\n"
|
|
310
310
|
),
|
|
311
311
|
self.user_message(
|
|
312
312
|
f"Generate a list of {n_questions} followup questions that the user might ask about this data. Respond with a list of questions, one per line. Do not answer with any explanations -- just the questions. Remember that there should be an unambiguous SQL query that can be generated from the question. Prefer questions that are answerable outside of the context of this conversation. Prefer questions that are slight modifications of the SQL query that was generated that allow digging deeper into the data. Each question will be turned into a button that the user can click to generate a new SQL query so don't use 'example' type questions. Each question must have a one-to-one correspondence with an instantiated SQL query." +
|
|
@@ -689,6 +689,9 @@ class VannaBase(ABC):
|
|
|
689
689
|
return response
|
|
690
690
|
|
|
691
691
|
def _extract_python_code(self, markdown_string: str) -> str:
|
|
692
|
+
# Strip whitespace to avoid indentation errors in LLM-generated code
|
|
693
|
+
markdown_string = markdown_string.strip()
|
|
694
|
+
|
|
692
695
|
# Regex pattern to match Python code blocks
|
|
693
696
|
pattern = r"```[\w\s]*python\n([\s\S]*?)```|```([\s\S]*?)```"
|
|
694
697
|
|
|
@@ -1167,7 +1170,7 @@ class VannaBase(ABC):
|
|
|
1167
1170
|
vn.connect_to_oracle(
|
|
1168
1171
|
user="username",
|
|
1169
1172
|
password="password",
|
|
1170
|
-
|
|
1173
|
+
dsn="host:port/sid",
|
|
1171
1174
|
)
|
|
1172
1175
|
```
|
|
1173
1176
|
Args:
|
|
@@ -0,0 +1 @@
|
|
|
1
|
+
from .deepseek_chat import DeepSeekChat
|
|
@@ -0,0 +1,60 @@
|
|
|
1
|
+
import os
|
|
2
|
+
|
|
3
|
+
from openai import OpenAI
|
|
4
|
+
|
|
5
|
+
from ..base import VannaBase
|
|
6
|
+
|
|
7
|
+
|
|
8
|
+
|
|
9
|
+
# from vanna.chromadb import ChromaDB_VectorStore
|
|
10
|
+
|
|
11
|
+
# class DeepSeekVanna(ChromaDB_VectorStore, DeepSeekChat):
|
|
12
|
+
# def __init__(self, config=None):
|
|
13
|
+
# ChromaDB_VectorStore.__init__(self, config=config)
|
|
14
|
+
# DeepSeekChat.__init__(self, config=config)
|
|
15
|
+
|
|
16
|
+
# vn = DeepSeekVanna(config={"api_key": "sk-************", "model": "deepseek-chat"})
|
|
17
|
+
|
|
18
|
+
|
|
19
|
+
class DeepSeekChat(VannaBase):
|
|
20
|
+
def __init__(self, config=None):
|
|
21
|
+
if config is None:
|
|
22
|
+
raise ValueError(
|
|
23
|
+
"For DeepSeek, config must be provided with an api_key and model"
|
|
24
|
+
)
|
|
25
|
+
if "api_key" not in config:
|
|
26
|
+
raise ValueError("config must contain a DeepSeek api_key")
|
|
27
|
+
|
|
28
|
+
if "model" not in config:
|
|
29
|
+
raise ValueError("config must contain a DeepSeek model")
|
|
30
|
+
|
|
31
|
+
api_key = config["api_key"]
|
|
32
|
+
model = config["model"]
|
|
33
|
+
self.model = model
|
|
34
|
+
self.client = OpenAI(api_key=api_key, base_url="https://api.deepseek.com/v1")
|
|
35
|
+
|
|
36
|
+
def system_message(self, message: str) -> any:
|
|
37
|
+
return {"role": "system", "content": message}
|
|
38
|
+
|
|
39
|
+
def user_message(self, message: str) -> any:
|
|
40
|
+
return {"role": "user", "content": message}
|
|
41
|
+
|
|
42
|
+
def assistant_message(self, message: str) -> any:
|
|
43
|
+
return {"role": "assistant", "content": message}
|
|
44
|
+
|
|
45
|
+
def generate_sql(self, question: str, **kwargs) -> str:
|
|
46
|
+
# 使用父类的 generate_sql
|
|
47
|
+
sql = super().generate_sql(question, **kwargs)
|
|
48
|
+
|
|
49
|
+
# 替换 "\_" 为 "_"
|
|
50
|
+
sql = sql.replace("\\_", "_")
|
|
51
|
+
|
|
52
|
+
return sql
|
|
53
|
+
|
|
54
|
+
def submit_prompt(self, prompt, **kwargs) -> str:
|
|
55
|
+
chat_response = self.client.chat.completions.create(
|
|
56
|
+
model=self.model,
|
|
57
|
+
messages=prompt,
|
|
58
|
+
)
|
|
59
|
+
|
|
60
|
+
return chat_response.choices[0].message.content
|
vanna/google/gemini_chat.py
CHANGED
|
@@ -1,4 +1,5 @@
|
|
|
1
1
|
import os
|
|
2
|
+
|
|
2
3
|
from ..base import VannaBase
|
|
3
4
|
|
|
4
5
|
|
|
@@ -30,8 +31,29 @@ class GoogleGeminiChat(VannaBase):
|
|
|
30
31
|
self.chat_model = genai.GenerativeModel(model_name)
|
|
31
32
|
else:
|
|
32
33
|
# Authenticate using VertexAI
|
|
34
|
+
import google.auth
|
|
35
|
+
import vertexai
|
|
33
36
|
from vertexai.generative_models import GenerativeModel
|
|
34
|
-
|
|
37
|
+
|
|
38
|
+
json_file_path = config.get("google_credentials") # Assuming the JSON file path is provided in the config
|
|
39
|
+
|
|
40
|
+
if not json_file_path or not os.path.exists(json_file_path):
|
|
41
|
+
raise FileNotFoundError(f"JSON credentials file not found at: {json_file_path}")
|
|
42
|
+
|
|
43
|
+
try:
|
|
44
|
+
# Validate and set the JSON file path for GOOGLE_APPLICATION_CREDENTIALS
|
|
45
|
+
os.environ['GOOGLE_APPLICATION_CREDENTIALS'] = json_file_path
|
|
46
|
+
|
|
47
|
+
# Initialize VertexAI with the credentials
|
|
48
|
+
credentials, _ = google.auth.default()
|
|
49
|
+
vertexai.init(credentials=credentials)
|
|
50
|
+
self.chat_model = GenerativeModel(model_name)
|
|
51
|
+
except google.auth.exceptions.DefaultCredentialsError as e:
|
|
52
|
+
raise RuntimeError(f"Default credentials error: {e}")
|
|
53
|
+
except google.auth.exceptions.TransportError as e:
|
|
54
|
+
raise RuntimeError(f"Transport error during authentication: {e}")
|
|
55
|
+
except Exception as e:
|
|
56
|
+
raise RuntimeError(f"Failed to authenticate using JSON file: {e}")
|
|
35
57
|
|
|
36
58
|
def system_message(self, message: str) -> any:
|
|
37
59
|
return message
|
vanna/ollama/ollama.py
CHANGED
|
@@ -91,7 +91,7 @@ class Ollama(VannaBase):
|
|
|
91
91
|
f"model={self.model},\n"
|
|
92
92
|
f"options={self.ollama_options},\n"
|
|
93
93
|
f"keep_alive={self.keep_alive}")
|
|
94
|
-
self.log(f"Prompt Content:\n{json.dumps(prompt)}")
|
|
94
|
+
self.log(f"Prompt Content:\n{json.dumps(prompt, ensure_ascii=False)}")
|
|
95
95
|
response_dict = self.ollama_client.chat(model=self.model,
|
|
96
96
|
messages=prompt,
|
|
97
97
|
stream=False,
|
vanna/opensearch/__init__.py
CHANGED
|
@@ -0,0 +1,175 @@
|
|
|
1
|
+
import json
|
|
2
|
+
|
|
3
|
+
import pandas as pd
|
|
4
|
+
from langchain_community.vectorstores import OpenSearchVectorSearch
|
|
5
|
+
|
|
6
|
+
from ..base import VannaBase
|
|
7
|
+
from ..utils import deterministic_uuid
|
|
8
|
+
|
|
9
|
+
|
|
10
|
+
class OpenSearch_Semantic_VectorStore(VannaBase):
|
|
11
|
+
def __init__(self, config=None):
|
|
12
|
+
VannaBase.__init__(self, config=config)
|
|
13
|
+
if config is None:
|
|
14
|
+
config = {}
|
|
15
|
+
|
|
16
|
+
if "embedding_function" in config:
|
|
17
|
+
self.embedding_function = config.get("embedding_function")
|
|
18
|
+
else:
|
|
19
|
+
from langchain_huggingface import HuggingFaceEmbeddings
|
|
20
|
+
self.embedding_function = HuggingFaceEmbeddings(model_name="all-MiniLM-L6-v2")
|
|
21
|
+
|
|
22
|
+
self.n_results_sql = config.get("n_results_sql", config.get("n_results", 10))
|
|
23
|
+
self.n_results_documentation = config.get("n_results_documentation", config.get("n_results", 10))
|
|
24
|
+
self.n_results_ddl = config.get("n_results_ddl", config.get("n_results", 10))
|
|
25
|
+
|
|
26
|
+
self.document_index = config.get("es_document_index", "vanna_document_index")
|
|
27
|
+
self.ddl_index = config.get("es_ddl_index", "vanna_ddl_index")
|
|
28
|
+
self.question_sql_index = config.get("es_question_sql_index", "vanna_questions_sql_index")
|
|
29
|
+
|
|
30
|
+
self.log(f"OpenSearch_Semantic_VectorStore initialized with document_index: {self.document_index}, ddl_index: {self.ddl_index}, question_sql_index: {self.question_sql_index}")
|
|
31
|
+
|
|
32
|
+
es_urls = config.get("es_urls", "https://localhost:9200")
|
|
33
|
+
ssl = config.get("es_ssl", True)
|
|
34
|
+
verify_certs = config.get("es_verify_certs", True)
|
|
35
|
+
|
|
36
|
+
if "es_user" in config:
|
|
37
|
+
auth = (config["es_user"], config["es_password"])
|
|
38
|
+
else:
|
|
39
|
+
auth = None
|
|
40
|
+
|
|
41
|
+
headers = config.get("es_headers", None)
|
|
42
|
+
timeout = config.get("es_timeout", 60)
|
|
43
|
+
max_retries = config.get("es_max_retries", 10)
|
|
44
|
+
|
|
45
|
+
common_args = {
|
|
46
|
+
"opensearch_url": es_urls,
|
|
47
|
+
"embedding_function": self.embedding_function,
|
|
48
|
+
"engine": "faiss",
|
|
49
|
+
"http_auth": auth,
|
|
50
|
+
"use_ssl": ssl,
|
|
51
|
+
"verify_certs": verify_certs,
|
|
52
|
+
"timeout": timeout,
|
|
53
|
+
"max_retries": max_retries,
|
|
54
|
+
"retry_on_timeout": True,
|
|
55
|
+
"headers": headers,
|
|
56
|
+
}
|
|
57
|
+
|
|
58
|
+
self.documentation_store = OpenSearchVectorSearch(index_name=self.document_index, **common_args)
|
|
59
|
+
self.ddl_store = OpenSearchVectorSearch(index_name=self.ddl_index, **common_args)
|
|
60
|
+
self.sql_store = OpenSearchVectorSearch(index_name=self.question_sql_index, **common_args)
|
|
61
|
+
|
|
62
|
+
def add_ddl(self, ddl: str, **kwargs) -> str:
|
|
63
|
+
_id = deterministic_uuid(ddl) + "-ddl"
|
|
64
|
+
self.ddl_store.add_texts(texts=[ddl], ids=[_id], **kwargs)
|
|
65
|
+
return _id
|
|
66
|
+
|
|
67
|
+
def add_documentation(self, documentation: str, **kwargs) -> str:
|
|
68
|
+
_id = deterministic_uuid(documentation) + "-doc"
|
|
69
|
+
self.documentation_store.add_texts(texts=[documentation], ids=[_id], **kwargs)
|
|
70
|
+
return _id
|
|
71
|
+
|
|
72
|
+
def add_question_sql(self, question: str, sql: str, **kwargs) -> str:
|
|
73
|
+
question_sql_json = json.dumps(
|
|
74
|
+
{
|
|
75
|
+
"question": question,
|
|
76
|
+
"sql": sql,
|
|
77
|
+
},
|
|
78
|
+
ensure_ascii=False,
|
|
79
|
+
)
|
|
80
|
+
|
|
81
|
+
_id = deterministic_uuid(question_sql_json) + "-sql"
|
|
82
|
+
self.sql_store.add_texts(texts=[question_sql_json], ids=[_id], **kwargs)
|
|
83
|
+
return _id
|
|
84
|
+
|
|
85
|
+
def get_related_ddl(self, question: str, **kwargs) -> list:
|
|
86
|
+
documents = self.ddl_store.similarity_search(query=question, k=self.n_results_ddl)
|
|
87
|
+
return [document.page_content for document in documents]
|
|
88
|
+
|
|
89
|
+
def get_related_documentation(self, question: str, **kwargs) -> list:
|
|
90
|
+
documents = self.documentation_store.similarity_search(query=question, k=self.n_results_documentation)
|
|
91
|
+
return [document.page_content for document in documents]
|
|
92
|
+
|
|
93
|
+
def get_similar_question_sql(self, question: str, **kwargs) -> list:
|
|
94
|
+
documents = self.sql_store.similarity_search(query=question, k=self.n_results_sql)
|
|
95
|
+
return [json.loads(document.page_content) for document in documents]
|
|
96
|
+
|
|
97
|
+
def get_training_data(self, **kwargs) -> pd.DataFrame:
|
|
98
|
+
data = []
|
|
99
|
+
query = {
|
|
100
|
+
"query": {
|
|
101
|
+
"match_all": {}
|
|
102
|
+
}
|
|
103
|
+
}
|
|
104
|
+
|
|
105
|
+
indices = [
|
|
106
|
+
{"index": self.document_index, "type": "documentation"},
|
|
107
|
+
{"index": self.question_sql_index, "type": "sql"},
|
|
108
|
+
{"index": self.ddl_index, "type": "ddl"},
|
|
109
|
+
]
|
|
110
|
+
|
|
111
|
+
# Use documentation_store.client consistently for search on all indices
|
|
112
|
+
opensearch_client = self.documentation_store.client
|
|
113
|
+
|
|
114
|
+
for index_info in indices:
|
|
115
|
+
index_name = index_info["index"]
|
|
116
|
+
training_data_type = index_info["type"]
|
|
117
|
+
scroll = '1m' # keep scroll context for 1 minute
|
|
118
|
+
response = opensearch_client.search(
|
|
119
|
+
index=index_name,
|
|
120
|
+
ignore_unavailable=True,
|
|
121
|
+
body=query,
|
|
122
|
+
scroll=scroll,
|
|
123
|
+
size=1000
|
|
124
|
+
)
|
|
125
|
+
|
|
126
|
+
scroll_id = response.get('_scroll_id')
|
|
127
|
+
|
|
128
|
+
while scroll_id:
|
|
129
|
+
hits = response['hits']['hits']
|
|
130
|
+
if not hits:
|
|
131
|
+
break # No more hits, exit loop
|
|
132
|
+
|
|
133
|
+
for hit in hits:
|
|
134
|
+
source = hit['_source']
|
|
135
|
+
if training_data_type == "sql":
|
|
136
|
+
try:
|
|
137
|
+
doc_dict = json.loads(source['text'])
|
|
138
|
+
content = doc_dict.get("sql")
|
|
139
|
+
question = doc_dict.get("question")
|
|
140
|
+
except json.JSONDecodeError as e:
|
|
141
|
+
self.log(f"Skipping row with custom_id {hit['_id']} due to JSON parsing error: {e}","Error")
|
|
142
|
+
continue
|
|
143
|
+
else: # documentation or ddl
|
|
144
|
+
content = source['text']
|
|
145
|
+
question = None
|
|
146
|
+
|
|
147
|
+
data.append({
|
|
148
|
+
"id": hit["_id"],
|
|
149
|
+
"training_data_type": training_data_type,
|
|
150
|
+
"question": question,
|
|
151
|
+
"content": content,
|
|
152
|
+
})
|
|
153
|
+
|
|
154
|
+
# Get next batch of results, using documentation_store.client.scroll
|
|
155
|
+
response = opensearch_client.scroll(scroll_id=scroll_id, scroll=scroll)
|
|
156
|
+
scroll_id = response.get('_scroll_id')
|
|
157
|
+
|
|
158
|
+
return pd.DataFrame(data)
|
|
159
|
+
|
|
160
|
+
def remove_training_data(self, id: str, **kwargs) -> bool:
|
|
161
|
+
try:
|
|
162
|
+
if id.endswith("-sql"):
|
|
163
|
+
return self.sql_store.delete(ids=[id], **kwargs)
|
|
164
|
+
elif id.endswith("-ddl"):
|
|
165
|
+
return self.ddl_store.delete(ids=[id], **kwargs)
|
|
166
|
+
elif id.endswith("-doc"):
|
|
167
|
+
return self.documentation_store.delete(ids=[id], **kwargs)
|
|
168
|
+
else:
|
|
169
|
+
return False
|
|
170
|
+
except Exception as e:
|
|
171
|
+
self.log(f"Error deleting training dataError deleting training data: {e}", "Error")
|
|
172
|
+
return False
|
|
173
|
+
|
|
174
|
+
def generate_embedding(self, data: str, **kwargs) -> list[float]:
|
|
175
|
+
pass
|
vanna/oracle/__init__.py
ADDED
|
@@ -0,0 +1 @@
|
|
|
1
|
+
from .oracle_vector import Oracle_VectorStore
|
|
@@ -0,0 +1,585 @@
|
|
|
1
|
+
import json
|
|
2
|
+
import uuid
|
|
3
|
+
from typing import List, Optional, Tuple
|
|
4
|
+
|
|
5
|
+
import oracledb
|
|
6
|
+
import pandas as pd
|
|
7
|
+
from chromadb.utils import embedding_functions
|
|
8
|
+
|
|
9
|
+
from ..base import VannaBase
|
|
10
|
+
|
|
11
|
+
default_ef = embedding_functions.DefaultEmbeddingFunction()
|
|
12
|
+
|
|
13
|
+
|
|
14
|
+
class Oracle_VectorStore(VannaBase):
|
|
15
|
+
def __init__(self, config=None):
|
|
16
|
+
VannaBase.__init__(self, config=config)
|
|
17
|
+
|
|
18
|
+
if config is not None:
|
|
19
|
+
self.embedding_function = config.get(
|
|
20
|
+
"embedding_function",
|
|
21
|
+
default_ef
|
|
22
|
+
)
|
|
23
|
+
self.pre_delete_collection = config.get("pre_delete_collection",
|
|
24
|
+
False)
|
|
25
|
+
self.cmetadata = config.get("cmetadata", {"created_by": "oracle"})
|
|
26
|
+
else:
|
|
27
|
+
self.embedding_function = default_ef
|
|
28
|
+
self.pre_delete_collection = False
|
|
29
|
+
self.cmetadata = {"created_by": "oracle"}
|
|
30
|
+
|
|
31
|
+
self.oracle_conn = oracledb.connect(dsn=config.get("dsn"))
|
|
32
|
+
self.oracle_conn.call_timeout = 30000
|
|
33
|
+
self.documentation_collection = "documentation"
|
|
34
|
+
self.ddl_collection = "ddl"
|
|
35
|
+
self.sql_collection = "sql"
|
|
36
|
+
self.n_results = config.get("n_results", 10)
|
|
37
|
+
self.n_results_ddl = config.get("n_results_ddl", self.n_results)
|
|
38
|
+
self.n_results_sql = config.get("n_results_sql", self.n_results)
|
|
39
|
+
self.n_results_documentation = config.get("n_results_documentation",
|
|
40
|
+
self.n_results)
|
|
41
|
+
self.create_tables_if_not_exists()
|
|
42
|
+
self.create_collections_if_not_exists(self.documentation_collection)
|
|
43
|
+
self.create_collections_if_not_exists(self.ddl_collection)
|
|
44
|
+
self.create_collections_if_not_exists(self.sql_collection)
|
|
45
|
+
|
|
46
|
+
def generate_embedding(self, data: str, **kwargs) -> List[float]:
|
|
47
|
+
embeddings = self.embedding_function([data])
|
|
48
|
+
if len(embeddings) == 1:
|
|
49
|
+
return list(embeddings[0].astype(float))
|
|
50
|
+
return list(embeddings.astype(float))
|
|
51
|
+
|
|
52
|
+
def add_question_sql(self, question: str, sql: str, **kwargs) -> str:
|
|
53
|
+
cmetadata = self.cmetadata.copy()
|
|
54
|
+
collection = self.get_collection(self.sql_collection)
|
|
55
|
+
question_sql_json = json.dumps(
|
|
56
|
+
{
|
|
57
|
+
"question": question,
|
|
58
|
+
"sql": sql,
|
|
59
|
+
},
|
|
60
|
+
ensure_ascii=False,
|
|
61
|
+
)
|
|
62
|
+
id = str(uuid.uuid4())
|
|
63
|
+
embeddings = self.generate_embedding(question)
|
|
64
|
+
custom_id = id + "-sql"
|
|
65
|
+
|
|
66
|
+
cursor = self.oracle_conn.cursor()
|
|
67
|
+
cursor.setinputsizes(None, oracledb.DB_TYPE_VECTOR)
|
|
68
|
+
cursor.execute(
|
|
69
|
+
"""
|
|
70
|
+
INSERT INTO oracle_embedding (
|
|
71
|
+
collection_id,
|
|
72
|
+
embedding,
|
|
73
|
+
document,
|
|
74
|
+
cmetadata,
|
|
75
|
+
custom_id,
|
|
76
|
+
uuid
|
|
77
|
+
) VALUES (
|
|
78
|
+
:1,
|
|
79
|
+
TO_VECTOR(:2),
|
|
80
|
+
:3,
|
|
81
|
+
:4,
|
|
82
|
+
:5,
|
|
83
|
+
:6
|
|
84
|
+
)
|
|
85
|
+
""", [
|
|
86
|
+
collection["uuid"],
|
|
87
|
+
embeddings,
|
|
88
|
+
question_sql_json,
|
|
89
|
+
json.dumps(cmetadata),
|
|
90
|
+
custom_id,
|
|
91
|
+
id
|
|
92
|
+
]
|
|
93
|
+
)
|
|
94
|
+
|
|
95
|
+
self.oracle_conn.commit()
|
|
96
|
+
cursor.close()
|
|
97
|
+
return id
|
|
98
|
+
|
|
99
|
+
def add_ddl(self, ddl: str, **kwargs) -> str:
|
|
100
|
+
collection = self.get_collection(self.ddl_collection)
|
|
101
|
+
question_ddl_json = json.dumps(
|
|
102
|
+
{
|
|
103
|
+
"question": None,
|
|
104
|
+
"ddl": ddl,
|
|
105
|
+
},
|
|
106
|
+
ensure_ascii=False,
|
|
107
|
+
)
|
|
108
|
+
id = str(uuid.uuid4())
|
|
109
|
+
custom_id = id + "-ddl"
|
|
110
|
+
cursor = self.oracle_conn.cursor()
|
|
111
|
+
cursor.setinputsizes(None, oracledb.DB_TYPE_VECTOR)
|
|
112
|
+
cursor.execute(
|
|
113
|
+
"""
|
|
114
|
+
INSERT INTO oracle_embedding (
|
|
115
|
+
collection_id,
|
|
116
|
+
embedding,
|
|
117
|
+
document,
|
|
118
|
+
cmetadata,
|
|
119
|
+
custom_id,
|
|
120
|
+
uuid
|
|
121
|
+
) VALUES (
|
|
122
|
+
:1,
|
|
123
|
+
TO_VECTOR(:2),
|
|
124
|
+
:3,
|
|
125
|
+
:4,
|
|
126
|
+
:5,
|
|
127
|
+
:6
|
|
128
|
+
)
|
|
129
|
+
""", [
|
|
130
|
+
collection["uuid"],
|
|
131
|
+
self.generate_embedding(ddl),
|
|
132
|
+
question_ddl_json,
|
|
133
|
+
json.dumps(self.cmetadata),
|
|
134
|
+
custom_id,
|
|
135
|
+
id
|
|
136
|
+
]
|
|
137
|
+
)
|
|
138
|
+
self.oracle_conn.commit()
|
|
139
|
+
cursor.close()
|
|
140
|
+
return id
|
|
141
|
+
|
|
142
|
+
def add_documentation(self, documentation: str, **kwargs) -> str:
|
|
143
|
+
collection = self.get_collection(self.documentation_collection)
|
|
144
|
+
question_documentation_json = json.dumps(
|
|
145
|
+
{
|
|
146
|
+
"question": None,
|
|
147
|
+
"documentation": documentation,
|
|
148
|
+
},
|
|
149
|
+
ensure_ascii=False,
|
|
150
|
+
)
|
|
151
|
+
id = str(uuid.uuid4())
|
|
152
|
+
custom_id = id + "-doc"
|
|
153
|
+
cursor = self.oracle_conn.cursor()
|
|
154
|
+
cursor.setinputsizes(None, oracledb.DB_TYPE_VECTOR)
|
|
155
|
+
cursor.execute(
|
|
156
|
+
"""
|
|
157
|
+
INSERT INTO oracle_embedding (
|
|
158
|
+
collection_id,
|
|
159
|
+
embedding,
|
|
160
|
+
document,
|
|
161
|
+
cmetadata,
|
|
162
|
+
custom_id,
|
|
163
|
+
uuid
|
|
164
|
+
) VALUES (
|
|
165
|
+
:1,
|
|
166
|
+
TO_VECTOR(:2),
|
|
167
|
+
:3,
|
|
168
|
+
:4,
|
|
169
|
+
:5,
|
|
170
|
+
:6
|
|
171
|
+
)
|
|
172
|
+
""", [
|
|
173
|
+
collection["uuid"],
|
|
174
|
+
self.generate_embedding(documentation),
|
|
175
|
+
question_documentation_json,
|
|
176
|
+
json.dumps(self.cmetadata),
|
|
177
|
+
custom_id,
|
|
178
|
+
id
|
|
179
|
+
]
|
|
180
|
+
)
|
|
181
|
+
self.oracle_conn.commit()
|
|
182
|
+
cursor.close()
|
|
183
|
+
return id
|
|
184
|
+
|
|
185
|
+
def get_training_data(self, **kwargs) -> pd.DataFrame:
|
|
186
|
+
df = pd.DataFrame()
|
|
187
|
+
|
|
188
|
+
cursor = self.oracle_conn.cursor()
|
|
189
|
+
sql_collection = self.get_collection(self.sql_collection)
|
|
190
|
+
cursor.execute(
|
|
191
|
+
"""
|
|
192
|
+
SELECT
|
|
193
|
+
document,
|
|
194
|
+
uuid
|
|
195
|
+
FROM oracle_embedding
|
|
196
|
+
WHERE
|
|
197
|
+
collection_id = :1
|
|
198
|
+
""", [
|
|
199
|
+
sql_collection["uuid"]
|
|
200
|
+
]
|
|
201
|
+
)
|
|
202
|
+
sql_data = cursor.fetchall()
|
|
203
|
+
|
|
204
|
+
if sql_data is not None:
|
|
205
|
+
# Extract the documents and ids
|
|
206
|
+
documents = [row_data[0] for row_data in sql_data]
|
|
207
|
+
ids = [row_data[1] for row_data in sql_data]
|
|
208
|
+
|
|
209
|
+
# Create a DataFrame
|
|
210
|
+
df_sql = pd.DataFrame(
|
|
211
|
+
{
|
|
212
|
+
"id": ids,
|
|
213
|
+
"question": [
|
|
214
|
+
json.loads(doc)["question"] if isinstance(doc,
|
|
215
|
+
str) else
|
|
216
|
+
doc[
|
|
217
|
+
"question"] for doc in documents],
|
|
218
|
+
"content": [
|
|
219
|
+
json.loads(doc)["sql"] if isinstance(doc, str) else
|
|
220
|
+
doc["sql"] for
|
|
221
|
+
doc in documents],
|
|
222
|
+
}
|
|
223
|
+
)
|
|
224
|
+
df_sql["training_data_type"] = "sql"
|
|
225
|
+
df = pd.concat([df, df_sql])
|
|
226
|
+
|
|
227
|
+
ddl_collection = self.get_collection(self.ddl_collection)
|
|
228
|
+
cursor.execute(
|
|
229
|
+
"""
|
|
230
|
+
SELECT
|
|
231
|
+
document,
|
|
232
|
+
uuid
|
|
233
|
+
FROM oracle_embedding
|
|
234
|
+
WHERE
|
|
235
|
+
collection_id = :1
|
|
236
|
+
""", [ddl_collection["uuid"]])
|
|
237
|
+
ddl_data = cursor.fetchall()
|
|
238
|
+
|
|
239
|
+
if ddl_data is not None:
|
|
240
|
+
# Extract the documents and ids
|
|
241
|
+
documents = [row_data[0] for row_data in ddl_data]
|
|
242
|
+
ids = [row_data[1] for row_data in ddl_data]
|
|
243
|
+
|
|
244
|
+
# Create a DataFrame
|
|
245
|
+
df_ddl = pd.DataFrame(
|
|
246
|
+
{
|
|
247
|
+
"id": ids,
|
|
248
|
+
"question": [None for _ in documents],
|
|
249
|
+
"content": [
|
|
250
|
+
json.loads(doc)["ddl"] if isinstance(doc, str) else
|
|
251
|
+
doc["ddl"] for
|
|
252
|
+
doc in documents],
|
|
253
|
+
}
|
|
254
|
+
)
|
|
255
|
+
df_ddl["training_data_type"] = "ddl"
|
|
256
|
+
df = pd.concat([df, df_ddl])
|
|
257
|
+
|
|
258
|
+
doc_collection = self.get_collection(self.documentation_collection)
|
|
259
|
+
cursor.execute(
|
|
260
|
+
"""
|
|
261
|
+
SELECT
|
|
262
|
+
document,
|
|
263
|
+
uuid
|
|
264
|
+
FROM oracle_embedding
|
|
265
|
+
WHERE
|
|
266
|
+
collection_id = :1
|
|
267
|
+
""", [doc_collection["uuid"]])
|
|
268
|
+
doc_data = cursor.fetchall()
|
|
269
|
+
|
|
270
|
+
if doc_data is not None:
|
|
271
|
+
# Extract the documents and ids
|
|
272
|
+
documents = [row_data[0] for row_data in doc_data]
|
|
273
|
+
ids = [row_data[1] for row_data in doc_data]
|
|
274
|
+
|
|
275
|
+
# Create a DataFrame
|
|
276
|
+
df_doc = pd.DataFrame(
|
|
277
|
+
{
|
|
278
|
+
"id": ids,
|
|
279
|
+
"question": [None for _ in documents],
|
|
280
|
+
"content": [
|
|
281
|
+
json.loads(doc)["documentation"] if isinstance(doc,
|
|
282
|
+
str) else
|
|
283
|
+
doc[
|
|
284
|
+
"documentation"] for
|
|
285
|
+
doc in documents],
|
|
286
|
+
}
|
|
287
|
+
)
|
|
288
|
+
df_doc["training_data_type"] = "documentation"
|
|
289
|
+
df = pd.concat([df, df_doc])
|
|
290
|
+
|
|
291
|
+
self.oracle_conn.commit()
|
|
292
|
+
cursor.close()
|
|
293
|
+
return df
|
|
294
|
+
|
|
295
|
+
def remove_training_data(self, id: str, **kwargs) -> bool:
|
|
296
|
+
cursor = self.oracle_conn.cursor()
|
|
297
|
+
cursor.execute(
|
|
298
|
+
"""
|
|
299
|
+
DELETE
|
|
300
|
+
FROM
|
|
301
|
+
oracle_embedding
|
|
302
|
+
WHERE
|
|
303
|
+
uuid = :1
|
|
304
|
+
""", [id])
|
|
305
|
+
|
|
306
|
+
self.oracle_conn.commit()
|
|
307
|
+
cursor.close()
|
|
308
|
+
return True
|
|
309
|
+
|
|
310
|
+
def update_training_data(self, id: str, train_type: str, question: str,
|
|
311
|
+
**kwargs) -> bool:
|
|
312
|
+
print(f"{train_type=}")
|
|
313
|
+
update_content = kwargs["content"]
|
|
314
|
+
if train_type == 'sql':
|
|
315
|
+
update_json = json.dumps(
|
|
316
|
+
{
|
|
317
|
+
"question": question,
|
|
318
|
+
"sql": update_content,
|
|
319
|
+
}
|
|
320
|
+
)
|
|
321
|
+
elif train_type == 'ddl':
|
|
322
|
+
update_json = json.dumps(
|
|
323
|
+
{
|
|
324
|
+
"question": None,
|
|
325
|
+
"ddl": update_content,
|
|
326
|
+
}
|
|
327
|
+
)
|
|
328
|
+
elif train_type == 'documentation':
|
|
329
|
+
update_json = json.dumps(
|
|
330
|
+
{
|
|
331
|
+
"question": None,
|
|
332
|
+
"documentation": update_content,
|
|
333
|
+
}
|
|
334
|
+
)
|
|
335
|
+
else:
|
|
336
|
+
update_json = json.dumps(
|
|
337
|
+
{
|
|
338
|
+
"question": question,
|
|
339
|
+
"sql": update_content,
|
|
340
|
+
}
|
|
341
|
+
)
|
|
342
|
+
cursor = self.oracle_conn.cursor()
|
|
343
|
+
cursor.setinputsizes(oracledb.DB_TYPE_VECTOR, oracledb.DB_TYPE_JSON)
|
|
344
|
+
cursor.execute(
|
|
345
|
+
"""
|
|
346
|
+
UPDATE
|
|
347
|
+
oracle_embedding
|
|
348
|
+
SET
|
|
349
|
+
embedding = TO_VECTOR(:1),
|
|
350
|
+
document = JSON_MERGEPATCH(document, :2)
|
|
351
|
+
WHERE
|
|
352
|
+
uuid = :3
|
|
353
|
+
""", [
|
|
354
|
+
self.generate_embedding(update_content),
|
|
355
|
+
update_json,
|
|
356
|
+
id
|
|
357
|
+
]
|
|
358
|
+
)
|
|
359
|
+
|
|
360
|
+
self.oracle_conn.commit()
|
|
361
|
+
cursor.close()
|
|
362
|
+
return True
|
|
363
|
+
|
|
364
|
+
@staticmethod
|
|
365
|
+
def _extract_documents(query_results) -> list:
|
|
366
|
+
"""
|
|
367
|
+
Static method to extract the documents from the results of a query.
|
|
368
|
+
|
|
369
|
+
Args:
|
|
370
|
+
query_results (pd.DataFrame): The dataframe to use.
|
|
371
|
+
|
|
372
|
+
Returns:
|
|
373
|
+
List[str] or None: The extracted documents, or an empty list or single document if an error occurred.
|
|
374
|
+
"""
|
|
375
|
+
if query_results is None or len(query_results) == 0:
|
|
376
|
+
return []
|
|
377
|
+
|
|
378
|
+
documents = [
|
|
379
|
+
json.loads(row_data[0]) if isinstance(row_data[0], str) else
|
|
380
|
+
row_data[0]
|
|
381
|
+
for row_data in query_results]
|
|
382
|
+
|
|
383
|
+
return documents
|
|
384
|
+
|
|
385
|
+
def get_similar_question_sql(self, question: str, **kwargs) -> list:
|
|
386
|
+
embeddings = self.generate_embedding(question)
|
|
387
|
+
collection = self.get_collection(self.sql_collection)
|
|
388
|
+
cursor = self.oracle_conn.cursor()
|
|
389
|
+
cursor.setinputsizes(None, oracledb.DB_TYPE_VECTOR,
|
|
390
|
+
oracledb.DB_TYPE_VECTOR)
|
|
391
|
+
cursor.execute(
|
|
392
|
+
"""
|
|
393
|
+
SELECT document
|
|
394
|
+
FROM oracle_embedding
|
|
395
|
+
WHERE collection_id = :1
|
|
396
|
+
ORDER BY VECTOR_DISTANCE(embedding, TO_VECTOR(:2), COSINE)
|
|
397
|
+
FETCH FIRST :3 ROWS ONLY
|
|
398
|
+
""", [
|
|
399
|
+
collection["uuid"],
|
|
400
|
+
embeddings,
|
|
401
|
+
self.n_results_sql
|
|
402
|
+
]
|
|
403
|
+
)
|
|
404
|
+
results = cursor.fetchall()
|
|
405
|
+
cursor.close()
|
|
406
|
+
return self._extract_documents(results)
|
|
407
|
+
|
|
408
|
+
def get_related_ddl(self, question: str, **kwargs) -> list:
|
|
409
|
+
collection = self.get_collection(self.ddl_collection)
|
|
410
|
+
cursor = self.oracle_conn.cursor()
|
|
411
|
+
cursor.setinputsizes(None, oracledb.DB_TYPE_VECTOR)
|
|
412
|
+
cursor.execute(
|
|
413
|
+
"""
|
|
414
|
+
SELECT
|
|
415
|
+
document
|
|
416
|
+
FROM oracle_embedding
|
|
417
|
+
WHERE
|
|
418
|
+
collection_id = :1
|
|
419
|
+
ORDER BY VECTOR_DISTANCE(embedding, TO_VECTOR(:2), COSINE)
|
|
420
|
+
FETCH FIRST :top_k ROWS ONLY
|
|
421
|
+
""", [
|
|
422
|
+
collection["uuid"],
|
|
423
|
+
self.generate_embedding(question),
|
|
424
|
+
100
|
|
425
|
+
]
|
|
426
|
+
)
|
|
427
|
+
results = cursor.fetchall()
|
|
428
|
+
|
|
429
|
+
self.oracle_conn.commit()
|
|
430
|
+
cursor.close()
|
|
431
|
+
return Oracle_VectorStore._extract_documents(results)
|
|
432
|
+
|
|
433
|
+
def search_tables_metadata(self,
|
|
434
|
+
engine: str = None,
|
|
435
|
+
catalog: str = None,
|
|
436
|
+
schema: str = None,
|
|
437
|
+
table_name: str = None,
|
|
438
|
+
ddl: str = None,
|
|
439
|
+
size: int = 10,
|
|
440
|
+
**kwargs) -> list:
|
|
441
|
+
pass
|
|
442
|
+
|
|
443
|
+
def get_related_documentation(self, question: str, **kwargs) -> list:
|
|
444
|
+
collection = self.get_collection(self.documentation_collection)
|
|
445
|
+
cursor = self.oracle_conn.cursor()
|
|
446
|
+
cursor.setinputsizes(None, oracledb.DB_TYPE_VECTOR)
|
|
447
|
+
cursor.execute(
|
|
448
|
+
"""
|
|
449
|
+
SELECT
|
|
450
|
+
document
|
|
451
|
+
FROM oracle_embedding
|
|
452
|
+
WHERE
|
|
453
|
+
collection_id = :1
|
|
454
|
+
ORDER BY VECTOR_DISTANCE(embedding, TO_VECTOR(:2), DOT)
|
|
455
|
+
FETCH FIRST :top_k ROWS ONLY
|
|
456
|
+
""", [
|
|
457
|
+
collection["uuid"],
|
|
458
|
+
self.generate_embedding(question),
|
|
459
|
+
100
|
|
460
|
+
]
|
|
461
|
+
)
|
|
462
|
+
results = cursor.fetchall()
|
|
463
|
+
|
|
464
|
+
self.oracle_conn.commit()
|
|
465
|
+
cursor.close()
|
|
466
|
+
|
|
467
|
+
return Oracle_VectorStore._extract_documents(results)
|
|
468
|
+
|
|
469
|
+
def create_tables_if_not_exists(self) -> None:
|
|
470
|
+
cursor = self.oracle_conn.cursor()
|
|
471
|
+
cursor.execute(
|
|
472
|
+
"""
|
|
473
|
+
CREATE TABLE IF NOT EXISTS oracle_collection (
|
|
474
|
+
name VARCHAR2(200) NOT NULL,
|
|
475
|
+
cmetadata json NOT NULL,
|
|
476
|
+
uuid VARCHAR2(200) NOT NULL,
|
|
477
|
+
CONSTRAINT oc_key_uuid PRIMARY KEY ( uuid )
|
|
478
|
+
)
|
|
479
|
+
"""
|
|
480
|
+
)
|
|
481
|
+
|
|
482
|
+
cursor.execute(
|
|
483
|
+
"""
|
|
484
|
+
CREATE TABLE IF NOT EXISTS oracle_embedding (
|
|
485
|
+
collection_id VARCHAR2(200) NOT NULL,
|
|
486
|
+
embedding vector NOT NULL,
|
|
487
|
+
document json NOT NULL,
|
|
488
|
+
cmetadata json NOT NULL,
|
|
489
|
+
custom_id VARCHAR2(200) NOT NULL,
|
|
490
|
+
uuid VARCHAR2(200) NOT NULL,
|
|
491
|
+
CONSTRAINT oe_key_uuid PRIMARY KEY ( uuid )
|
|
492
|
+
)
|
|
493
|
+
"""
|
|
494
|
+
)
|
|
495
|
+
|
|
496
|
+
self.oracle_conn.commit()
|
|
497
|
+
cursor.close()
|
|
498
|
+
|
|
499
|
+
def create_collections_if_not_exists(
|
|
500
|
+
self,
|
|
501
|
+
name: str,
|
|
502
|
+
cmetadata: Optional[dict] = None,
|
|
503
|
+
) -> Tuple[dict, bool]:
|
|
504
|
+
"""
|
|
505
|
+
Get or create a collection.
|
|
506
|
+
Returns [Collection, bool] where the bool is True if the collection was created.
|
|
507
|
+
"""
|
|
508
|
+
if self.pre_delete_collection:
|
|
509
|
+
self.delete_collection(name)
|
|
510
|
+
created = False
|
|
511
|
+
collection = self.get_collection(name)
|
|
512
|
+
if collection:
|
|
513
|
+
return collection, created
|
|
514
|
+
|
|
515
|
+
cmetadata = json.dumps(
|
|
516
|
+
self.cmetadata) if cmetadata is None else json.dumps(cmetadata)
|
|
517
|
+
collection_id = str(uuid.uuid4())
|
|
518
|
+
cursor = self.oracle_conn.cursor()
|
|
519
|
+
cursor.execute(
|
|
520
|
+
"""
|
|
521
|
+
INSERT INTO oracle_collection(name, cmetadata, uuid)
|
|
522
|
+
VALUES (:1, :2, :3)
|
|
523
|
+
""", [
|
|
524
|
+
name,
|
|
525
|
+
cmetadata,
|
|
526
|
+
str(collection_id)
|
|
527
|
+
]
|
|
528
|
+
)
|
|
529
|
+
|
|
530
|
+
self.oracle_conn.commit()
|
|
531
|
+
cursor.close()
|
|
532
|
+
|
|
533
|
+
collection = {"name": name, "cmetadata": cmetadata,
|
|
534
|
+
"uuid": collection_id}
|
|
535
|
+
created = True
|
|
536
|
+
return collection, created
|
|
537
|
+
|
|
538
|
+
def get_collection(self, name) -> Optional[dict]:
|
|
539
|
+
return self.get_by_name(name)
|
|
540
|
+
|
|
541
|
+
def get_by_name(self, name: str) -> Optional[dict]:
|
|
542
|
+
cursor = self.oracle_conn.cursor()
|
|
543
|
+
cursor.execute(
|
|
544
|
+
"""
|
|
545
|
+
SELECT
|
|
546
|
+
name,
|
|
547
|
+
cmetadata,
|
|
548
|
+
uuid
|
|
549
|
+
FROM
|
|
550
|
+
oracle_collection
|
|
551
|
+
WHERE
|
|
552
|
+
name = :1
|
|
553
|
+
FETCH FIRST 1 ROWS ONLY
|
|
554
|
+
""", [name])
|
|
555
|
+
|
|
556
|
+
for row in cursor:
|
|
557
|
+
return {"name": row[0], "cmetadata": row[1], "uuid": row[2]}
|
|
558
|
+
|
|
559
|
+
return # type: ignore
|
|
560
|
+
|
|
561
|
+
def delete_collection(self, name) -> None:
|
|
562
|
+
collection = self.get_collection(name)
|
|
563
|
+
if not collection:
|
|
564
|
+
return
|
|
565
|
+
|
|
566
|
+
cursor = self.oracle_conn.cursor()
|
|
567
|
+
cursor.execute(
|
|
568
|
+
"""
|
|
569
|
+
DELETE
|
|
570
|
+
FROM
|
|
571
|
+
oracle_embedding
|
|
572
|
+
WHERE
|
|
573
|
+
collection_id = ( SELECT uuid FROM oracle_collection WHERE name = :1 )
|
|
574
|
+
""", [name])
|
|
575
|
+
cursor.execute(
|
|
576
|
+
"""
|
|
577
|
+
DELETE
|
|
578
|
+
FROM
|
|
579
|
+
oracle_collection
|
|
580
|
+
WHERE
|
|
581
|
+
name = :1
|
|
582
|
+
""", [name])
|
|
583
|
+
|
|
584
|
+
self.oracle_conn.commit()
|
|
585
|
+
cursor.close()
|
vanna/remote.py
CHANGED
|
@@ -62,7 +62,7 @@ class VannaDefault(VannaDB_VectorStore):
|
|
|
62
62
|
|
|
63
63
|
def submit_prompt(self, prompt, **kwargs) -> str:
|
|
64
64
|
# JSON-ify the prompt
|
|
65
|
-
json_prompt = json.dumps(prompt)
|
|
65
|
+
json_prompt = json.dumps(prompt, ensure_ascii=False)
|
|
66
66
|
|
|
67
67
|
params = [StringData(data=json_prompt)]
|
|
68
68
|
|
|
@@ -1,6 +1,6 @@
|
|
|
1
|
-
Metadata-Version: 2.
|
|
1
|
+
Metadata-Version: 2.3
|
|
2
2
|
Name: vanna
|
|
3
|
-
Version: 0.7.
|
|
3
|
+
Version: 0.7.6
|
|
4
4
|
Summary: Generate SQL queries from natural language
|
|
5
5
|
Author-email: Zain Hoda <zain@vanna.ai>
|
|
6
6
|
Requires-Python: >=3.9
|
|
@@ -52,6 +52,8 @@ Requires-Dist: boto3 ; extra == "all"
|
|
|
52
52
|
Requires-Dist: botocore ; extra == "all"
|
|
53
53
|
Requires-Dist: langchain_core ; extra == "all"
|
|
54
54
|
Requires-Dist: langchain_postgres ; extra == "all"
|
|
55
|
+
Requires-Dist: langchain-community ; extra == "all"
|
|
56
|
+
Requires-Dist: langchain-huggingface ; extra == "all"
|
|
55
57
|
Requires-Dist: xinference-client ; extra == "all"
|
|
56
58
|
Requires-Dist: anthropic ; extra == "anthropic"
|
|
57
59
|
Requires-Dist: azure-search-documents ; extra == "azuresearch"
|
|
@@ -79,6 +81,10 @@ Requires-Dist: httpx ; extra == "ollama"
|
|
|
79
81
|
Requires-Dist: openai ; extra == "openai"
|
|
80
82
|
Requires-Dist: opensearch-py ; extra == "opensearch"
|
|
81
83
|
Requires-Dist: opensearch-dsl ; extra == "opensearch"
|
|
84
|
+
Requires-Dist: langchain-community ; extra == "opensearch"
|
|
85
|
+
Requires-Dist: langchain-huggingface ; extra == "opensearch"
|
|
86
|
+
Requires-Dist: oracledb ; extra == "oracle"
|
|
87
|
+
Requires-Dist: chromadb ; extra == "oracle"
|
|
82
88
|
Requires-Dist: langchain-postgres>=0.0.12 ; extra == "pgvector"
|
|
83
89
|
Requires-Dist: pinecone-client ; extra == "pinecone"
|
|
84
90
|
Requires-Dist: fastembed ; extra == "pinecone"
|
|
@@ -115,6 +121,7 @@ Provides-Extra: mysql
|
|
|
115
121
|
Provides-Extra: ollama
|
|
116
122
|
Provides-Extra: openai
|
|
117
123
|
Provides-Extra: opensearch
|
|
124
|
+
Provides-Extra: oracle
|
|
118
125
|
Provides-Extra: pgvector
|
|
119
126
|
Provides-Extra: pinecone
|
|
120
127
|
Provides-Extra: postgres
|
|
@@ -129,9 +136,9 @@ Provides-Extra: zhipuai
|
|
|
129
136
|
|
|
130
137
|
|
|
131
138
|
|
|
132
|
-
| GitHub | PyPI | Documentation |
|
|
133
|
-
| ------ | ---- | ------------- |
|
|
134
|
-
| [](https://github.com/vanna-ai/vanna) | [](https://pypi.org/project/vanna/) | [](https://vanna.ai/docs/) |
|
|
139
|
+
| GitHub | PyPI | Documentation | Gurubase |
|
|
140
|
+
| ------ | ---- | ------------- | -------- |
|
|
141
|
+
| [](https://github.com/vanna-ai/vanna) | [](https://pypi.org/project/vanna/) | [](https://vanna.ai/docs/) | [](https://gurubase.io/g/vanna) |
|
|
135
142
|
|
|
136
143
|
# Vanna
|
|
137
144
|
Vanna is an MIT-licensed open-source Python RAG (Retrieval-Augmented Generation) framework for SQL generation and related functionality.
|
|
@@ -164,6 +171,46 @@ These are some of the user interfaces that we've built using Vanna. You can use
|
|
|
164
171
|
- [vanna-ai/vanna-flask](https://github.com/vanna-ai/vanna-flask)
|
|
165
172
|
- [vanna-ai/vanna-slack](https://github.com/vanna-ai/vanna-slack)
|
|
166
173
|
|
|
174
|
+
## Supported LLMs
|
|
175
|
+
|
|
176
|
+
- [OpenAI](https://github.com/vanna-ai/vanna/tree/main/src/vanna/openai)
|
|
177
|
+
- [Anthropic](https://github.com/vanna-ai/vanna/tree/main/src/vanna/anthropic)
|
|
178
|
+
- [Gemini](https://github.com/vanna-ai/vanna/blob/main/src/vanna/google/gemini_chat.py)
|
|
179
|
+
- [HuggingFace](https://github.com/vanna-ai/vanna/blob/main/src/vanna/hf/hf.py)
|
|
180
|
+
- [AWS Bedrock](https://github.com/vanna-ai/vanna/tree/main/src/vanna/bedrock)
|
|
181
|
+
- [Ollama](https://github.com/vanna-ai/vanna/tree/main/src/vanna/ollama)
|
|
182
|
+
- [Qianwen](https://github.com/vanna-ai/vanna/tree/main/src/vanna/qianwen)
|
|
183
|
+
- [Qianfan](https://github.com/vanna-ai/vanna/tree/main/src/vanna/qianfan)
|
|
184
|
+
- [Zhipu](https://github.com/vanna-ai/vanna/tree/main/src/vanna/ZhipuAI)
|
|
185
|
+
|
|
186
|
+
## Supported VectorStores
|
|
187
|
+
|
|
188
|
+
- [AzureSearch](https://github.com/vanna-ai/vanna/tree/main/src/vanna/azuresearch)
|
|
189
|
+
- [Opensearch](https://github.com/vanna-ai/vanna/tree/main/src/vanna/opensearch)
|
|
190
|
+
- [PgVector](https://github.com/vanna-ai/vanna/tree/main/src/vanna/pgvector)
|
|
191
|
+
- [PineCone](https://github.com/vanna-ai/vanna/tree/main/src/vanna/pinecone)
|
|
192
|
+
- [ChromaDB](https://github.com/vanna-ai/vanna/tree/main/src/vanna/chromadb)
|
|
193
|
+
- [FAISS](https://github.com/vanna-ai/vanna/tree/main/src/vanna/faiss)
|
|
194
|
+
- [Marqo](https://github.com/vanna-ai/vanna/tree/main/src/vanna/marqo)
|
|
195
|
+
- [Milvus](https://github.com/vanna-ai/vanna/tree/main/src/vanna/milvus)
|
|
196
|
+
- [Qdrant](https://github.com/vanna-ai/vanna/tree/main/src/vanna/qdrant)
|
|
197
|
+
- [Weaviate](https://github.com/vanna-ai/vanna/tree/main/src/vanna/weaviate)
|
|
198
|
+
- [Oracle](https://github.com/vanna-ai/vanna/tree/main/src/vanna/oracle)
|
|
199
|
+
|
|
200
|
+
## Supported Databases
|
|
201
|
+
|
|
202
|
+
- [PostgreSQL](https://www.postgresql.org/)
|
|
203
|
+
- [MySQL](https://www.mysql.com/)
|
|
204
|
+
- [PrestoDB](https://prestodb.io/)
|
|
205
|
+
- [Apache Hive](https://hive.apache.org/)
|
|
206
|
+
- [ClickHouse](https://clickhouse.com/)
|
|
207
|
+
- [Snowflake](https://www.snowflake.com/en/)
|
|
208
|
+
- [Oracle](https://www.oracle.com/)
|
|
209
|
+
- [Microsoft SQL Server](https://www.microsoft.com/en-us/sql-server/sql-server-downloads)
|
|
210
|
+
- [BigQuery](https://cloud.google.com/bigquery)
|
|
211
|
+
- [SQLite](https://www.sqlite.org/)
|
|
212
|
+
- [DuckDB](https://duckdb.org/)
|
|
213
|
+
|
|
167
214
|
|
|
168
215
|
## Getting started
|
|
169
216
|
See the [documentation](https://vanna.ai/docs/) for specifics on your desired database, LLM, etc.
|
|
@@ -1,6 +1,6 @@
|
|
|
1
1
|
vanna/__init__.py,sha256=4zz2kSkVZenjwJQg-ETWsIVYdz3gio275i9DMa_aHxM,9248
|
|
2
2
|
vanna/local.py,sha256=U5s8ybCRQhBUizi8I69o3jqOpTeu_6KGYY6DMwZxjG4,313
|
|
3
|
-
vanna/remote.py,sha256=
|
|
3
|
+
vanna/remote.py,sha256=3SGyXBmNWofkZL3vGevypvMAhqAYru0KzoUCpX2N6Vc,1876
|
|
4
4
|
vanna/utils.py,sha256=cs0B_0MwhmPI2nWjVHifDYCmCR0kkddylQ2vloaPDSw,2247
|
|
5
5
|
vanna/ZhipuAI/ZhipuAI_Chat.py,sha256=WtZKUBIwlNH0BGbb4lZbVR7pTWIrn7b4RLIk-7u0SuQ,8725
|
|
6
6
|
vanna/ZhipuAI/ZhipuAI_embeddings.py,sha256=lUqzJg9fOx7rVFhjdkFjXcDeVGV4aAB5Ss0oERsa8pE,2849
|
|
@@ -11,11 +11,13 @@ vanna/anthropic/anthropic_chat.py,sha256=7X3x8SYwDY28aGyBnt0YNRMG8YY1p_t-foMfKGj
|
|
|
11
11
|
vanna/azuresearch/__init__.py,sha256=tZfvsrCJESiL3EnxA4PrOc5NoO8MXEzCfHX_hnj8n-c,58
|
|
12
12
|
vanna/azuresearch/azuresearch_vector.py,sha256=_-t53PUnJM914GYbTYlyee06ocfu7l2NkZerBQtlJcs,9566
|
|
13
13
|
vanna/base/__init__.py,sha256=Sl-HM1RRYzAZoSqmL1CZQmF3ZF-byYTCFQP3JZ2A5MU,28
|
|
14
|
-
vanna/base/base.py,sha256=
|
|
14
|
+
vanna/base/base.py,sha256=dGSOuidPBCUOuKxCJ9U_B9qSQ7w5Y4XIM0olox9RCYY,73816
|
|
15
15
|
vanna/bedrock/__init__.py,sha256=hRT2bgJbHEqViLdL-t9hfjSfFdIOkPU2ADBt-B1En-8,46
|
|
16
16
|
vanna/bedrock/bedrock_converse.py,sha256=Nx5kYm-diAfYmsWAnTP5xnv7V84Og69-AP9b3seIe0E,2869
|
|
17
17
|
vanna/chromadb/__init__.py,sha256=-iL0nW_g4uM8nWKMuWnNePfN4nb9uk8P3WzGvezOqRg,50
|
|
18
18
|
vanna/chromadb/chromadb_vector.py,sha256=eKyPck99Y6Jt-BNWojvxLG-zvAERzLSm-3zY-bKXvaA,8792
|
|
19
|
+
vanna/deepseek/__init__.py,sha256=7SVY3DGJcNH7GTk7Uq922QM8yZKu3-5IO33WQ_-bgCM,40
|
|
20
|
+
vanna/deepseek/deepseek_chat.py,sha256=dbTIfVSNmPKYJVI8YeJu3a2Du8U6VqDHdT0gOeqISTc,1878
|
|
19
21
|
vanna/exceptions/__init__.py,sha256=dJ65xxxZh1lqBeg6nz6Tq_r34jLVmjvBvPO9Q6hFaQ8,685
|
|
20
22
|
vanna/faiss/__init__.py,sha256=MXuojmLPt4kUtkES9XKWJcCDHVa4L5a6YF5gebhmKLw,24
|
|
21
23
|
vanna/faiss/faiss.py,sha256=HLUO5PQdnJio9OXJiJcgmRuxVWXvg_XRBnnohS21Z0w,8304
|
|
@@ -24,7 +26,7 @@ vanna/flask/assets.py,sha256=af-vact_5HSftltugBpPxzLkAI14Z0lVWcObyVe6eKE,453462
|
|
|
24
26
|
vanna/flask/auth.py,sha256=UpKxh7W5cd43W0LGch0VqhncKwB78L6dtOQkl1JY5T0,1246
|
|
25
27
|
vanna/google/__init__.py,sha256=6D8rDBjKJJm_jpVn9b4Vc2NR-R779ed_bnHhWmxCJXE,92
|
|
26
28
|
vanna/google/bigquery_vector.py,sha256=mHggjvCsWMt4HK6Y4dAZUPgHi1uytxp2AEQ696TSsJA,9315
|
|
27
|
-
vanna/google/gemini_chat.py,sha256=
|
|
29
|
+
vanna/google/gemini_chat.py,sha256=Tm4S0uywQNuZ5y0eQsE0-rv0NkAw_IhlyMiQqiqn8ro,2683
|
|
28
30
|
vanna/hf/__init__.py,sha256=vD0bIhfLkA1UsvVSF4MAz3Da8aQunkQo3wlDztmMuj0,19
|
|
29
31
|
vanna/hf/hf.py,sha256=N8N5g3xvKDBt3dez2r_U0qATxbl2pN8SVLTZK9CSRA0,3020
|
|
30
32
|
vanna/marqo/__init__.py,sha256=GaAWtJ0B-H5rTY607iLCCrLD7T0zMYM5qWIomEB9gLk,37
|
|
@@ -38,12 +40,15 @@ vanna/mock/embedding.py,sha256=ggnP7KuPh6dlqeUFtoN8t0J0P7_yRNtn9rIq6h8g8-w,250
|
|
|
38
40
|
vanna/mock/llm.py,sha256=WpG9f1pKZftPBHqgIYdARKB2Z9DZhOALYOJWoOjjFEc,518
|
|
39
41
|
vanna/mock/vectordb.py,sha256=h45znfYMUnttE2BBC8v6TKeMaA58pFJL-5B3OGeRNFI,2681
|
|
40
42
|
vanna/ollama/__init__.py,sha256=4xyu8aHPdnEHg5a-QAMwr5o0ns5wevsp_zkI-ndMO2k,27
|
|
41
|
-
vanna/ollama/ollama.py,sha256=
|
|
43
|
+
vanna/ollama/ollama.py,sha256=pqHkh2UEIAwBqxRebsLVmmkpiF30yRwCwO_92WY4p0E,3891
|
|
42
44
|
vanna/openai/__init__.py,sha256=tGkeQ7wTIPsando7QhoSHehtoQVdYLwFbKNlSmCmNeQ,86
|
|
43
45
|
vanna/openai/openai_chat.py,sha256=KU6ynOQ5v7vwrQQ13phXoUXeQUrH6_vmhfiPvWddTrQ,4427
|
|
44
46
|
vanna/openai/openai_embeddings.py,sha256=g4pNh9LVcYP9wOoO8ecaccDFWmCUYMInebfHucAa2Gc,1260
|
|
45
|
-
vanna/opensearch/__init__.py,sha256=
|
|
47
|
+
vanna/opensearch/__init__.py,sha256=dc9fNtIrOOpkSGp_JKOhGOk26ffyK6W1bm_Cdn9X09I,126
|
|
46
48
|
vanna/opensearch/opensearch_vector.py,sha256=VhIcrSyNzWR9ZrqrJnyGFOyuQZs3swfbhr8QyVGI0eI,12226
|
|
49
|
+
vanna/opensearch/opensearch_vector_semantic.py,sha256=XV0ApIMXTj_dc3tnmTg4vkQXMaUxsh2Npk_JBEGIj1Q,6325
|
|
50
|
+
vanna/oracle/__init__.py,sha256=lE9IB9nK4wsAQ0KdAqoidMoLH80QBsB1HRbI0GQJh8c,46
|
|
51
|
+
vanna/oracle/oracle_vector.py,sha256=uWcDFs5uhdKdjdEhFXy4RouTOiS-XMFmaUFuuOLtqho,15974
|
|
47
52
|
vanna/pgvector/__init__.py,sha256=7Wvu9qcNdNvZu26Dn53jhO9YXELm0_YsrwBab4BdgVM,37
|
|
48
53
|
vanna/pgvector/pgvector.py,sha256=dJfm8rswYZvbaIbnjmyRjL071iw4siE0INibsZtaLXY,9919
|
|
49
54
|
vanna/pinecone/__init__.py,sha256=eO5l8aX8vKL6aIUMgAXGPt1jdqKxB_Hic6cmoVAUrD0,90
|
|
@@ -65,6 +70,6 @@ vanna/weaviate/__init__.py,sha256=HL6PAl7ePBAkeG8uln-BmM7IUtWohyTPvDfcPzSGSCg,46
|
|
|
65
70
|
vanna/weaviate/weaviate_vector.py,sha256=tUJIZjEy2mda8CB6C8zeN2SKkEO-UJdLsIqy69skuF0,7584
|
|
66
71
|
vanna/xinference/__init__.py,sha256=EFW_sz-BSB2XgmjACOTZmneeIk3I2EiWgue-VVJpnB0,35
|
|
67
72
|
vanna/xinference/xinference.py,sha256=2PI-f7XoBUyL_jfuXPqxCsd0W72h8j6CtEDneFw1AtI,1876
|
|
68
|
-
vanna-0.7.
|
|
69
|
-
vanna-0.7.
|
|
70
|
-
vanna-0.7.
|
|
73
|
+
vanna-0.7.6.dist-info/WHEEL,sha256=CpUCUxeHQbRN5UGRQHYRJorO5Af-Qy_fHMctcQ8DSGI,82
|
|
74
|
+
vanna-0.7.6.dist-info/METADATA,sha256=hXqEvvTbOUOs9ZkGYNSkxvPYHb4kZE7JDfDyGGZt5q4,15600
|
|
75
|
+
vanna-0.7.6.dist-info/RECORD,,
|