vanna 0.6.5__py3-none-any.whl → 0.7.0__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- vanna/azuresearch/__init__.py +1 -0
- vanna/azuresearch/azuresearch_vector.py +236 -0
- vanna/base/base.py +66 -26
- vanna/flask/__init__.py +28 -2
- vanna/flask/assets.py +35 -35
- vanna/google/__init__.py +2 -1
- vanna/google/bigquery_vector.py +230 -0
- vanna/mistral/mistral.py +8 -6
- vanna/ollama/ollama.py +3 -1
- vanna/qianwen/QianwenAI_chat.py +133 -0
- vanna/qianwen/QianwenAI_embeddings.py +46 -0
- vanna/qianwen/__init__.py +2 -0
- {vanna-0.6.5.dist-info → vanna-0.7.0.dist-info}/METADATA +11 -3
- {vanna-0.6.5.dist-info → vanna-0.7.0.dist-info}/RECORD +15 -9
- {vanna-0.6.5.dist-info → vanna-0.7.0.dist-info}/WHEEL +0 -0
vanna/google/__init__.py
CHANGED
|
@@ -1 +1,2 @@
|
|
|
1
|
-
from .
|
|
1
|
+
from .bigquery_vector import BigQuery_VectorStore
|
|
2
|
+
from .gemini_chat import GoogleGeminiChat
|
|
@@ -0,0 +1,230 @@
|
|
|
1
|
+
import datetime
|
|
2
|
+
import os
|
|
3
|
+
import uuid
|
|
4
|
+
from typing import List, Optional
|
|
5
|
+
|
|
6
|
+
import pandas as pd
|
|
7
|
+
from google.cloud import bigquery
|
|
8
|
+
|
|
9
|
+
from ..base import VannaBase
|
|
10
|
+
|
|
11
|
+
|
|
12
|
+
class BigQuery_VectorStore(VannaBase):
|
|
13
|
+
def __init__(self, config: dict, **kwargs):
|
|
14
|
+
self.config = config
|
|
15
|
+
|
|
16
|
+
self.n_results_sql = config.get("n_results_sql", config.get("n_results", 10))
|
|
17
|
+
self.n_results_documentation = config.get("n_results_documentation", config.get("n_results", 10))
|
|
18
|
+
self.n_results_ddl = config.get("n_results_ddl", config.get("n_results", 10))
|
|
19
|
+
|
|
20
|
+
if "api_key" in config or os.getenv("GOOGLE_API_KEY"):
|
|
21
|
+
"""
|
|
22
|
+
If Google api_key is provided through config
|
|
23
|
+
or set as an environment variable, assign it.
|
|
24
|
+
"""
|
|
25
|
+
print("Configuring genai")
|
|
26
|
+
import google.generativeai as genai
|
|
27
|
+
|
|
28
|
+
genai.configure(api_key=config["api_key"])
|
|
29
|
+
|
|
30
|
+
self.genai = genai
|
|
31
|
+
else:
|
|
32
|
+
# Authenticate using VertexAI
|
|
33
|
+
from vertexai.language_models import (
|
|
34
|
+
TextEmbeddingInput,
|
|
35
|
+
TextEmbeddingModel,
|
|
36
|
+
)
|
|
37
|
+
|
|
38
|
+
if self.config.get("project_id"):
|
|
39
|
+
self.project_id = self.config.get("project_id")
|
|
40
|
+
else:
|
|
41
|
+
self.project_id = os.getenv("GOOGLE_CLOUD_PROJECT")
|
|
42
|
+
|
|
43
|
+
if self.project_id is None:
|
|
44
|
+
raise ValueError("Project ID is not set")
|
|
45
|
+
|
|
46
|
+
self.conn = bigquery.Client(project=self.project_id)
|
|
47
|
+
|
|
48
|
+
dataset_name = self.config.get('bigquery_dataset_name', 'vanna_managed')
|
|
49
|
+
self.dataset_id = f"{self.project_id}.{dataset_name}"
|
|
50
|
+
dataset = bigquery.Dataset(self.dataset_id)
|
|
51
|
+
|
|
52
|
+
try:
|
|
53
|
+
self.conn.get_dataset(self.dataset_id) # Make an API request.
|
|
54
|
+
print(f"Dataset {self.dataset_id} already exists")
|
|
55
|
+
except Exception:
|
|
56
|
+
# Dataset does not exist, create it
|
|
57
|
+
dataset.location = "US"
|
|
58
|
+
self.conn.create_dataset(dataset, timeout=30) # Make an API request.
|
|
59
|
+
print(f"Created dataset {self.dataset_id}")
|
|
60
|
+
|
|
61
|
+
# Create a table called training_data in the dataset that contains the columns:
|
|
62
|
+
# id, training_data_type, question, content, embedding, created_at
|
|
63
|
+
|
|
64
|
+
self.table_id = f"{self.dataset_id}.training_data"
|
|
65
|
+
schema = [
|
|
66
|
+
bigquery.SchemaField("id", "STRING", mode="REQUIRED"),
|
|
67
|
+
bigquery.SchemaField("training_data_type", "STRING", mode="REQUIRED"),
|
|
68
|
+
bigquery.SchemaField("question", "STRING", mode="REQUIRED"),
|
|
69
|
+
bigquery.SchemaField("content", "STRING", mode="REQUIRED"),
|
|
70
|
+
bigquery.SchemaField("embedding", "FLOAT64", mode="REPEATED"),
|
|
71
|
+
bigquery.SchemaField("created_at", "TIMESTAMP", mode="REQUIRED"),
|
|
72
|
+
]
|
|
73
|
+
|
|
74
|
+
table = bigquery.Table(self.table_id, schema=schema)
|
|
75
|
+
|
|
76
|
+
try:
|
|
77
|
+
self.conn.get_table(self.table_id) # Make an API request.
|
|
78
|
+
print(f"Table {self.table_id} already exists")
|
|
79
|
+
except Exception:
|
|
80
|
+
# Table does not exist, create it
|
|
81
|
+
self.conn.create_table(table, timeout=30) # Make an API request.
|
|
82
|
+
print(f"Created table {self.table_id}")
|
|
83
|
+
|
|
84
|
+
# Create VECTOR INDEX IF NOT EXISTS
|
|
85
|
+
# TODO: This requires 5000 rows before it can be created
|
|
86
|
+
# vector_index_query = f"""
|
|
87
|
+
# CREATE VECTOR INDEX IF NOT EXISTS my_index
|
|
88
|
+
# ON `{self.table_id}`(embedding)
|
|
89
|
+
# OPTIONS(
|
|
90
|
+
# distance_type='COSINE',
|
|
91
|
+
# index_type='IVF',
|
|
92
|
+
# ivf_options='{{"num_lists": 1000}}'
|
|
93
|
+
# )
|
|
94
|
+
# """
|
|
95
|
+
|
|
96
|
+
# try:
|
|
97
|
+
# self.conn.query(vector_index_query).result() # Make an API request.
|
|
98
|
+
# print(f"Vector index on {self.table_id} created or already exists")
|
|
99
|
+
# except Exception as e:
|
|
100
|
+
# print(f"Failed to create vector index: {e}")
|
|
101
|
+
|
|
102
|
+
def store_training_data(self, training_data_type: str, question: str, content: str, embedding: List[float], **kwargs) -> str:
|
|
103
|
+
id = str(uuid.uuid4())
|
|
104
|
+
created_at = datetime.datetime.now()
|
|
105
|
+
self.conn.insert_rows_json(self.table_id, [{
|
|
106
|
+
"id": id,
|
|
107
|
+
"training_data_type": training_data_type,
|
|
108
|
+
"question": question,
|
|
109
|
+
"content": content,
|
|
110
|
+
"embedding": embedding,
|
|
111
|
+
"created_at": created_at.isoformat()
|
|
112
|
+
}])
|
|
113
|
+
|
|
114
|
+
return id
|
|
115
|
+
|
|
116
|
+
def fetch_similar_training_data(self, training_data_type: str, question: str, n_results, **kwargs) -> pd.DataFrame:
|
|
117
|
+
question_embedding = self.generate_question_embedding(question)
|
|
118
|
+
|
|
119
|
+
query = f"""
|
|
120
|
+
SELECT
|
|
121
|
+
base.id as id,
|
|
122
|
+
base.question as question,
|
|
123
|
+
base.training_data_type as training_data_type,
|
|
124
|
+
base.content as content,
|
|
125
|
+
distance
|
|
126
|
+
FROM
|
|
127
|
+
VECTOR_SEARCH(
|
|
128
|
+
TABLE `{self.table_id}`,
|
|
129
|
+
'embedding',
|
|
130
|
+
(SELECT * FROM UNNEST([STRUCT({question_embedding})])),
|
|
131
|
+
top_k => 5,
|
|
132
|
+
distance_type => 'COSINE',
|
|
133
|
+
options => '{{"use_brute_force":true}}'
|
|
134
|
+
)
|
|
135
|
+
WHERE
|
|
136
|
+
base.training_data_type = '{training_data_type}'
|
|
137
|
+
"""
|
|
138
|
+
|
|
139
|
+
results = self.conn.query(query).result().to_dataframe()
|
|
140
|
+
return results
|
|
141
|
+
|
|
142
|
+
def generate_question_embedding(self, data: str, **kwargs) -> List[float]:
|
|
143
|
+
result = self.genai.embed_content(
|
|
144
|
+
model="models/text-embedding-004",
|
|
145
|
+
content=data,
|
|
146
|
+
task_type="retrieval_query")
|
|
147
|
+
|
|
148
|
+
if 'embedding' in result:
|
|
149
|
+
return result['embedding']
|
|
150
|
+
else:
|
|
151
|
+
raise ValueError("No embeddings returned")
|
|
152
|
+
|
|
153
|
+
def generate_storage_embedding(self, data: str, **kwargs) -> List[float]:
|
|
154
|
+
result = self.genai.embed_content(
|
|
155
|
+
model="models/text-embedding-004",
|
|
156
|
+
content=data,
|
|
157
|
+
task_type="retrieval_document")
|
|
158
|
+
|
|
159
|
+
if 'embedding' in result:
|
|
160
|
+
return result['embedding']
|
|
161
|
+
else:
|
|
162
|
+
raise ValueError("No embeddings returned")
|
|
163
|
+
|
|
164
|
+
# task = "RETRIEVAL_DOCUMENT"
|
|
165
|
+
# inputs = [TextEmbeddingInput(data, task)]
|
|
166
|
+
# embeddings = self.vertex_embedding_model.get_embeddings(inputs)
|
|
167
|
+
|
|
168
|
+
# if len(embeddings) == 0:
|
|
169
|
+
# raise ValueError("No embeddings returned")
|
|
170
|
+
|
|
171
|
+
# return embeddings[0].values
|
|
172
|
+
|
|
173
|
+
return result
|
|
174
|
+
|
|
175
|
+
def generate_embedding(self, data: str, **kwargs) -> List[float]:
|
|
176
|
+
return self.generate_storage_embedding(data, **kwargs)
|
|
177
|
+
|
|
178
|
+
def get_similar_question_sql(self, question: str, **kwargs) -> list:
|
|
179
|
+
df = self.fetch_similar_training_data(training_data_type="sql", question=question, n_results=self.n_results_sql)
|
|
180
|
+
|
|
181
|
+
# Return a list of dictionaries with only question, sql fields. The content field needs to be renamed to sql
|
|
182
|
+
return df.rename(columns={"content": "sql"})[["question", "sql"]].to_dict(orient="records")
|
|
183
|
+
|
|
184
|
+
def get_related_ddl(self, question: str, **kwargs) -> list:
|
|
185
|
+
df = self.fetch_similar_training_data(training_data_type="ddl", question=question, n_results=self.n_results_ddl)
|
|
186
|
+
|
|
187
|
+
# Return a list of strings of the content
|
|
188
|
+
return df["content"].tolist()
|
|
189
|
+
|
|
190
|
+
def get_related_documentation(self, question: str, **kwargs) -> list:
|
|
191
|
+
df = self.fetch_similar_training_data(training_data_type="documentation", question=question, n_results=self.n_results_documentation)
|
|
192
|
+
|
|
193
|
+
# Return a list of strings of the content
|
|
194
|
+
return df["content"].tolist()
|
|
195
|
+
|
|
196
|
+
def add_question_sql(self, question: str, sql: str, **kwargs) -> str:
|
|
197
|
+
doc = {
|
|
198
|
+
"question": question,
|
|
199
|
+
"sql": sql
|
|
200
|
+
}
|
|
201
|
+
|
|
202
|
+
embedding = self.generate_embedding(str(doc))
|
|
203
|
+
|
|
204
|
+
return self.store_training_data(training_data_type="sql", question=question, content=sql, embedding=embedding)
|
|
205
|
+
|
|
206
|
+
def add_ddl(self, ddl: str, **kwargs) -> str:
|
|
207
|
+
embedding = self.generate_embedding(ddl)
|
|
208
|
+
|
|
209
|
+
return self.store_training_data(training_data_type="ddl", question="", content=ddl, embedding=embedding)
|
|
210
|
+
|
|
211
|
+
def add_documentation(self, documentation: str, **kwargs) -> str:
|
|
212
|
+
embedding = self.generate_embedding(documentation)
|
|
213
|
+
|
|
214
|
+
return self.store_training_data(training_data_type="documentation", question="", content=documentation, embedding=embedding)
|
|
215
|
+
|
|
216
|
+
def get_training_data(self, **kwargs) -> pd.DataFrame:
|
|
217
|
+
query = f"SELECT id, training_data_type, question, content FROM `{self.table_id}`"
|
|
218
|
+
|
|
219
|
+
return self.conn.query(query).result().to_dataframe()
|
|
220
|
+
|
|
221
|
+
def remove_training_data(self, id: str, **kwargs) -> bool:
|
|
222
|
+
query = f"DELETE FROM `{self.table_id}` WHERE id = '{id}'"
|
|
223
|
+
|
|
224
|
+
try:
|
|
225
|
+
self.conn.query(query).result()
|
|
226
|
+
return True
|
|
227
|
+
|
|
228
|
+
except Exception as e:
|
|
229
|
+
print(f"Failed to remove training data: {e}")
|
|
230
|
+
return False
|
vanna/mistral/mistral.py
CHANGED
|
@@ -1,5 +1,7 @@
|
|
|
1
|
-
|
|
2
|
-
|
|
1
|
+
import os
|
|
2
|
+
|
|
3
|
+
from mistralai import Mistral as MistralClient
|
|
4
|
+
from mistralai import UserMessage
|
|
3
5
|
|
|
4
6
|
from ..base import VannaBase
|
|
5
7
|
|
|
@@ -23,13 +25,13 @@ class Mistral(VannaBase):
|
|
|
23
25
|
self.model = model
|
|
24
26
|
|
|
25
27
|
def system_message(self, message: str) -> any:
|
|
26
|
-
return
|
|
28
|
+
return {"role": "system", "content": message}
|
|
27
29
|
|
|
28
30
|
def user_message(self, message: str) -> any:
|
|
29
|
-
return
|
|
31
|
+
return {"role": "user", "content": message}
|
|
30
32
|
|
|
31
33
|
def assistant_message(self, message: str) -> any:
|
|
32
|
-
return
|
|
34
|
+
return {"role": "assistant", "content": message}
|
|
33
35
|
|
|
34
36
|
def generate_sql(self, question: str, **kwargs) -> str:
|
|
35
37
|
# Use the super generate_sql
|
|
@@ -41,7 +43,7 @@ class Mistral(VannaBase):
|
|
|
41
43
|
return sql
|
|
42
44
|
|
|
43
45
|
def submit_prompt(self, prompt, **kwargs) -> str:
|
|
44
|
-
chat_response = self.client.chat(
|
|
46
|
+
chat_response = self.client.chat.complete(
|
|
45
47
|
model=self.model,
|
|
46
48
|
messages=prompt,
|
|
47
49
|
)
|
vanna/ollama/ollama.py
CHANGED
|
@@ -27,7 +27,9 @@ class Ollama(VannaBase):
|
|
|
27
27
|
if ":" not in self.model:
|
|
28
28
|
self.model += ":latest"
|
|
29
29
|
|
|
30
|
-
self.
|
|
30
|
+
self.ollama_timeout = config.get("ollama_timeout", 240.0)
|
|
31
|
+
|
|
32
|
+
self.ollama_client = ollama.Client(self.host, timeout=Timeout(self.ollama_timeout))
|
|
31
33
|
self.keep_alive = config.get('keep_alive', None)
|
|
32
34
|
self.ollama_options = config.get('options', {})
|
|
33
35
|
self.num_ctx = self.ollama_options.get('num_ctx', 2048)
|
|
@@ -0,0 +1,133 @@
|
|
|
1
|
+
import os
|
|
2
|
+
|
|
3
|
+
from openai import OpenAI
|
|
4
|
+
|
|
5
|
+
from ..base import VannaBase
|
|
6
|
+
|
|
7
|
+
|
|
8
|
+
class QianWenAI_Chat(VannaBase):
|
|
9
|
+
def __init__(self, client=None, config=None):
|
|
10
|
+
VannaBase.__init__(self, config=config)
|
|
11
|
+
|
|
12
|
+
# default parameters - can be overrided using config
|
|
13
|
+
self.temperature = 0.7
|
|
14
|
+
|
|
15
|
+
if "temperature" in config:
|
|
16
|
+
self.temperature = config["temperature"]
|
|
17
|
+
|
|
18
|
+
if "api_type" in config:
|
|
19
|
+
raise Exception(
|
|
20
|
+
"Passing api_type is now deprecated. Please pass an OpenAI client instead."
|
|
21
|
+
)
|
|
22
|
+
|
|
23
|
+
if "api_base" in config:
|
|
24
|
+
raise Exception(
|
|
25
|
+
"Passing api_base is now deprecated. Please pass an OpenAI client instead."
|
|
26
|
+
)
|
|
27
|
+
|
|
28
|
+
if "api_version" in config:
|
|
29
|
+
raise Exception(
|
|
30
|
+
"Passing api_version is now deprecated. Please pass an OpenAI client instead."
|
|
31
|
+
)
|
|
32
|
+
|
|
33
|
+
if client is not None:
|
|
34
|
+
self.client = client
|
|
35
|
+
return
|
|
36
|
+
|
|
37
|
+
if config is None and client is None:
|
|
38
|
+
self.client = OpenAI(api_key=os.getenv("OPENAI_API_KEY"))
|
|
39
|
+
return
|
|
40
|
+
|
|
41
|
+
if "api_key" in config:
|
|
42
|
+
if "base_url" not in config:
|
|
43
|
+
self.client = OpenAI(api_key=config["api_key"],
|
|
44
|
+
base_url="https://dashscope.aliyuncs.com/compatible-mode/v1")
|
|
45
|
+
else:
|
|
46
|
+
self.client = OpenAI(api_key=config["api_key"],
|
|
47
|
+
base_url=config["base_url"])
|
|
48
|
+
|
|
49
|
+
def system_message(self, message: str) -> any:
|
|
50
|
+
return {"role": "system", "content": message}
|
|
51
|
+
|
|
52
|
+
def user_message(self, message: str) -> any:
|
|
53
|
+
return {"role": "user", "content": message}
|
|
54
|
+
|
|
55
|
+
def assistant_message(self, message: str) -> any:
|
|
56
|
+
return {"role": "assistant", "content": message}
|
|
57
|
+
|
|
58
|
+
def submit_prompt(self, prompt, **kwargs) -> str:
|
|
59
|
+
if prompt is None:
|
|
60
|
+
raise Exception("Prompt is None")
|
|
61
|
+
|
|
62
|
+
if len(prompt) == 0:
|
|
63
|
+
raise Exception("Prompt is empty")
|
|
64
|
+
|
|
65
|
+
# Count the number of tokens in the message log
|
|
66
|
+
# Use 4 as an approximation for the number of characters per token
|
|
67
|
+
num_tokens = 0
|
|
68
|
+
for message in prompt:
|
|
69
|
+
num_tokens += len(message["content"]) / 4
|
|
70
|
+
|
|
71
|
+
if kwargs.get("model", None) is not None:
|
|
72
|
+
model = kwargs.get("model", None)
|
|
73
|
+
print(
|
|
74
|
+
f"Using model {model} for {num_tokens} tokens (approx)"
|
|
75
|
+
)
|
|
76
|
+
response = self.client.chat.completions.create(
|
|
77
|
+
model=model,
|
|
78
|
+
messages=prompt,
|
|
79
|
+
stop=None,
|
|
80
|
+
temperature=self.temperature,
|
|
81
|
+
)
|
|
82
|
+
elif kwargs.get("engine", None) is not None:
|
|
83
|
+
engine = kwargs.get("engine", None)
|
|
84
|
+
print(
|
|
85
|
+
f"Using model {engine} for {num_tokens} tokens (approx)"
|
|
86
|
+
)
|
|
87
|
+
response = self.client.chat.completions.create(
|
|
88
|
+
engine=engine,
|
|
89
|
+
messages=prompt,
|
|
90
|
+
stop=None,
|
|
91
|
+
temperature=self.temperature,
|
|
92
|
+
)
|
|
93
|
+
elif self.config is not None and "engine" in self.config:
|
|
94
|
+
print(
|
|
95
|
+
f"Using engine {self.config['engine']} for {num_tokens} tokens (approx)"
|
|
96
|
+
)
|
|
97
|
+
response = self.client.chat.completions.create(
|
|
98
|
+
engine=self.config["engine"],
|
|
99
|
+
messages=prompt,
|
|
100
|
+
stop=None,
|
|
101
|
+
temperature=self.temperature,
|
|
102
|
+
)
|
|
103
|
+
elif self.config is not None and "model" in self.config:
|
|
104
|
+
print(
|
|
105
|
+
f"Using model {self.config['model']} for {num_tokens} tokens (approx)"
|
|
106
|
+
)
|
|
107
|
+
response = self.client.chat.completions.create(
|
|
108
|
+
model=self.config["model"],
|
|
109
|
+
messages=prompt,
|
|
110
|
+
stop=None,
|
|
111
|
+
temperature=self.temperature,
|
|
112
|
+
)
|
|
113
|
+
else:
|
|
114
|
+
if num_tokens > 3500:
|
|
115
|
+
model = "qwen-long"
|
|
116
|
+
else:
|
|
117
|
+
model = "qwen-plus"
|
|
118
|
+
|
|
119
|
+
print(f"Using model {model} for {num_tokens} tokens (approx)")
|
|
120
|
+
response = self.client.chat.completions.create(
|
|
121
|
+
model=model,
|
|
122
|
+
messages=prompt,
|
|
123
|
+
stop=None,
|
|
124
|
+
temperature=self.temperature,
|
|
125
|
+
)
|
|
126
|
+
|
|
127
|
+
# Find the first response from the chatbot that has text in it (some responses may not have text)
|
|
128
|
+
for choice in response.choices:
|
|
129
|
+
if "text" in choice:
|
|
130
|
+
return choice.text
|
|
131
|
+
|
|
132
|
+
# If no response with text is found, return the first response's content (which may be empty)
|
|
133
|
+
return response.choices[0].message.content
|
|
@@ -0,0 +1,46 @@
|
|
|
1
|
+
from openai import OpenAI
|
|
2
|
+
|
|
3
|
+
from ..base import VannaBase
|
|
4
|
+
|
|
5
|
+
|
|
6
|
+
class QianWenAI_Embeddings(VannaBase):
|
|
7
|
+
def __init__(self, client=None, config=None):
|
|
8
|
+
VannaBase.__init__(self, config=config)
|
|
9
|
+
|
|
10
|
+
if client is not None:
|
|
11
|
+
self.client = client
|
|
12
|
+
return
|
|
13
|
+
|
|
14
|
+
if self.client is not None:
|
|
15
|
+
return
|
|
16
|
+
|
|
17
|
+
self.client = OpenAI()
|
|
18
|
+
|
|
19
|
+
if config is None:
|
|
20
|
+
return
|
|
21
|
+
|
|
22
|
+
if "api_type" in config:
|
|
23
|
+
self.client.api_type = config["api_type"]
|
|
24
|
+
|
|
25
|
+
if "api_base" in config:
|
|
26
|
+
self.client.api_base = config["api_base"]
|
|
27
|
+
|
|
28
|
+
if "api_version" in config:
|
|
29
|
+
self.client.api_version = config["api_version"]
|
|
30
|
+
|
|
31
|
+
if "api_key" in config:
|
|
32
|
+
self.client.api_key = config["api_key"]
|
|
33
|
+
|
|
34
|
+
def generate_embedding(self, data: str, **kwargs) -> list[float]:
|
|
35
|
+
if self.config is not None and "engine" in self.config:
|
|
36
|
+
embedding = self.client.embeddings.create(
|
|
37
|
+
engine=self.config["engine"],
|
|
38
|
+
input=data,
|
|
39
|
+
)
|
|
40
|
+
else:
|
|
41
|
+
embedding = self.client.embeddings.create(
|
|
42
|
+
model="bge-large-zh",
|
|
43
|
+
input=data,
|
|
44
|
+
)
|
|
45
|
+
|
|
46
|
+
return embedding.get("data")[0]["embedding"]
|
|
@@ -1,6 +1,6 @@
|
|
|
1
1
|
Metadata-Version: 2.1
|
|
2
2
|
Name: vanna
|
|
3
|
-
Version: 0.
|
|
3
|
+
Version: 0.7.0
|
|
4
4
|
Summary: Generate SQL queries from natural language
|
|
5
5
|
Author-email: Zain Hoda <zain@vanna.ai>
|
|
6
6
|
Requires-Python: >=3.9
|
|
@@ -26,7 +26,7 @@ Requires-Dist: snowflake-connector-python ; extra == "all"
|
|
|
26
26
|
Requires-Dist: duckdb ; extra == "all"
|
|
27
27
|
Requires-Dist: openai ; extra == "all"
|
|
28
28
|
Requires-Dist: qianfan ; extra == "all"
|
|
29
|
-
Requires-Dist: mistralai ; extra == "all"
|
|
29
|
+
Requires-Dist: mistralai>=1.0.0 ; extra == "all"
|
|
30
30
|
Requires-Dist: chromadb ; extra == "all"
|
|
31
31
|
Requires-Dist: anthropic ; extra == "all"
|
|
32
32
|
Requires-Dist: zhipuai ; extra == "all"
|
|
@@ -43,7 +43,14 @@ Requires-Dist: transformers ; extra == "all"
|
|
|
43
43
|
Requires-Dist: pinecone-client ; extra == "all"
|
|
44
44
|
Requires-Dist: pymilvus[model] ; extra == "all"
|
|
45
45
|
Requires-Dist: weaviate-client ; extra == "all"
|
|
46
|
+
Requires-Dist: azure-search-documents ; extra == "all"
|
|
47
|
+
Requires-Dist: azure-identity ; extra == "all"
|
|
48
|
+
Requires-Dist: azure-common ; extra == "all"
|
|
46
49
|
Requires-Dist: anthropic ; extra == "anthropic"
|
|
50
|
+
Requires-Dist: azure-search-documents ; extra == "azuresearch"
|
|
51
|
+
Requires-Dist: azure-identity ; extra == "azuresearch"
|
|
52
|
+
Requires-Dist: azure-common ; extra == "azuresearch"
|
|
53
|
+
Requires-Dist: fastembed ; extra == "azuresearch"
|
|
47
54
|
Requires-Dist: boto3 ; extra == "bedrock"
|
|
48
55
|
Requires-Dist: botocore ; extra == "bedrock"
|
|
49
56
|
Requires-Dist: google-cloud-bigquery ; extra == "bigquery"
|
|
@@ -56,7 +63,7 @@ Requires-Dist: google-cloud-aiplatform ; extra == "google"
|
|
|
56
63
|
Requires-Dist: transformers ; extra == "hf"
|
|
57
64
|
Requires-Dist: marqo ; extra == "marqo"
|
|
58
65
|
Requires-Dist: pymilvus[model] ; extra == "milvus"
|
|
59
|
-
Requires-Dist: mistralai ; extra == "mistralai"
|
|
66
|
+
Requires-Dist: mistralai>=1.0.0 ; extra == "mistralai"
|
|
60
67
|
Requires-Dist: PyMySQL ; extra == "mysql"
|
|
61
68
|
Requires-Dist: ollama ; extra == "ollama"
|
|
62
69
|
Requires-Dist: httpx ; extra == "ollama"
|
|
@@ -79,6 +86,7 @@ Project-URL: Bug Tracker, https://github.com/vanna-ai/vanna/issues
|
|
|
79
86
|
Project-URL: Homepage, https://github.com/vanna-ai/vanna
|
|
80
87
|
Provides-Extra: all
|
|
81
88
|
Provides-Extra: anthropic
|
|
89
|
+
Provides-Extra: azuresearch
|
|
82
90
|
Provides-Extra: bedrock
|
|
83
91
|
Provides-Extra: bigquery
|
|
84
92
|
Provides-Extra: chromadb
|
|
@@ -8,17 +8,20 @@ vanna/ZhipuAI/__init__.py,sha256=NlsijtcZp5Tj9jkOe9fNcOQND_QsGgu7otODsCLBPr0,116
|
|
|
8
8
|
vanna/advanced/__init__.py,sha256=oDj9g1JbrbCfp4WWdlr_bhgdMqNleyHgr6VXX6DcEbo,658
|
|
9
9
|
vanna/anthropic/__init__.py,sha256=85s_2mAyyPxc0T_0JEvYeAkEKWJwkwqoyUwSC5dw9Gk,43
|
|
10
10
|
vanna/anthropic/anthropic_chat.py,sha256=7X3x8SYwDY28aGyBnt0YNRMG8YY1p_t-foMfKGj8_Oo,2627
|
|
11
|
+
vanna/azuresearch/__init__.py,sha256=tZfvsrCJESiL3EnxA4PrOc5NoO8MXEzCfHX_hnj8n-c,58
|
|
12
|
+
vanna/azuresearch/azuresearch_vector.py,sha256=fc7w_bE6IgOsWb1vkA8t8k-AtuzLyH6zlW4ej4d2lE8,9558
|
|
11
13
|
vanna/base/__init__.py,sha256=Sl-HM1RRYzAZoSqmL1CZQmF3ZF-byYTCFQP3JZ2A5MU,28
|
|
12
|
-
vanna/base/base.py,sha256=
|
|
14
|
+
vanna/base/base.py,sha256=j5xQmK-MeFKAuPjgYLSl1ThCHZieG-ab-RFFSkDlbiw,73679
|
|
13
15
|
vanna/bedrock/__init__.py,sha256=hRT2bgJbHEqViLdL-t9hfjSfFdIOkPU2ADBt-B1En-8,46
|
|
14
16
|
vanna/bedrock/bedrock_converse.py,sha256=Nx5kYm-diAfYmsWAnTP5xnv7V84Og69-AP9b3seIe0E,2869
|
|
15
17
|
vanna/chromadb/__init__.py,sha256=-iL0nW_g4uM8nWKMuWnNePfN4nb9uk8P3WzGvezOqRg,50
|
|
16
18
|
vanna/chromadb/chromadb_vector.py,sha256=eKyPck99Y6Jt-BNWojvxLG-zvAERzLSm-3zY-bKXvaA,8792
|
|
17
19
|
vanna/exceptions/__init__.py,sha256=dJ65xxxZh1lqBeg6nz6Tq_r34jLVmjvBvPO9Q6hFaQ8,685
|
|
18
|
-
vanna/flask/__init__.py,sha256=
|
|
19
|
-
vanna/flask/assets.py,sha256=
|
|
20
|
+
vanna/flask/__init__.py,sha256=jcdaau1tQ142nL1ZsDklk0ilMkEyRxgQZdmsl1IN4LQ,43866
|
|
21
|
+
vanna/flask/assets.py,sha256=af-vact_5HSftltugBpPxzLkAI14Z0lVWcObyVe6eKE,453462
|
|
20
22
|
vanna/flask/auth.py,sha256=UpKxh7W5cd43W0LGch0VqhncKwB78L6dtOQkl1JY5T0,1246
|
|
21
|
-
vanna/google/__init__.py,sha256=
|
|
23
|
+
vanna/google/__init__.py,sha256=6D8rDBjKJJm_jpVn9b4Vc2NR-R779ed_bnHhWmxCJXE,92
|
|
24
|
+
vanna/google/bigquery_vector.py,sha256=rkP94Xd1lNYjU1x3MDLvqmGSPUYtDfQwvlqVmX44jyM,8839
|
|
22
25
|
vanna/google/gemini_chat.py,sha256=j1szC2PamMLFrs0Z4lYPS69i017FYICe-mNObNYFBPQ,1576
|
|
23
26
|
vanna/hf/__init__.py,sha256=vD0bIhfLkA1UsvVSF4MAz3Da8aQunkQo3wlDztmMuj0,19
|
|
24
27
|
vanna/hf/hf.py,sha256=N8N5g3xvKDBt3dez2r_U0qATxbl2pN8SVLTZK9CSRA0,3020
|
|
@@ -27,13 +30,13 @@ vanna/marqo/marqo.py,sha256=W7WTtzWp4RJjZVy6OaXHqncUBIPdI4Q7qH7BRCxZ1_A,5242
|
|
|
27
30
|
vanna/milvus/__init__.py,sha256=VBasJG2eTKbJI6CEand7kPLNBrqYrn0QCAhSYVz814s,46
|
|
28
31
|
vanna/milvus/milvus_vector.py,sha256=Mq0eaSh0UcTYhgh8mTm0fvS6rbfL6tQONVnDZGemWoM,11268
|
|
29
32
|
vanna/mistral/__init__.py,sha256=70rTY-69Z2ehkkMj84dNMCukPo6AWdflBGvIB_pztS0,29
|
|
30
|
-
vanna/mistral/mistral.py,sha256=
|
|
33
|
+
vanna/mistral/mistral.py,sha256=rcdgmUSQniLkah2VL23VGYRa9WXpOy_dZN4S0kc__V8,1494
|
|
31
34
|
vanna/mock/__init__.py,sha256=nYR2WfcV5NdwpK3V64QGOWHBGc3ESN9uV68JLS76aRw,97
|
|
32
35
|
vanna/mock/embedding.py,sha256=ggnP7KuPh6dlqeUFtoN8t0J0P7_yRNtn9rIq6h8g8-w,250
|
|
33
36
|
vanna/mock/llm.py,sha256=WpG9f1pKZftPBHqgIYdARKB2Z9DZhOALYOJWoOjjFEc,518
|
|
34
37
|
vanna/mock/vectordb.py,sha256=h45znfYMUnttE2BBC8v6TKeMaA58pFJL-5B3OGeRNFI,2681
|
|
35
38
|
vanna/ollama/__init__.py,sha256=4xyu8aHPdnEHg5a-QAMwr5o0ns5wevsp_zkI-ndMO2k,27
|
|
36
|
-
vanna/ollama/ollama.py,sha256=
|
|
39
|
+
vanna/ollama/ollama.py,sha256=yD7UHn4GNzWfQMi2OHlfWwIEJ_sTDzpPcgv_MCGRp6E,3871
|
|
37
40
|
vanna/openai/__init__.py,sha256=tGkeQ7wTIPsando7QhoSHehtoQVdYLwFbKNlSmCmNeQ,86
|
|
38
41
|
vanna/openai/openai_chat.py,sha256=KU6ynOQ5v7vwrQQ13phXoUXeQUrH6_vmhfiPvWddTrQ,4427
|
|
39
42
|
vanna/openai/openai_embeddings.py,sha256=g4pNh9LVcYP9wOoO8ecaccDFWmCUYMInebfHucAa2Gc,1260
|
|
@@ -46,6 +49,9 @@ vanna/qdrant/qdrant.py,sha256=qkTWhGrVSAngJZkrcRQ8YFVHcI9j_ZoOGbF6ZVUUdsU,12567
|
|
|
46
49
|
vanna/qianfan/Qianfan_Chat.py,sha256=Z-s9MwH22T4KMR8AViAjms6qoj67pHeQkMsbK-aXf1M,5273
|
|
47
50
|
vanna/qianfan/Qianfan_embeddings.py,sha256=TYynAJXlyuZfmoj49h8nU6bXu_GjlXREp3tgfQUca04,954
|
|
48
51
|
vanna/qianfan/__init__.py,sha256=QpR43BjZQZcrcDRkyYcYiS-kyqtYmu23AHDzK0Wy1D0,90
|
|
52
|
+
vanna/qianwen/QianwenAI_chat.py,sha256=c4stx4QzX-Af28c0H4h2ZDDKJknWcun0L9LevMTSHSE,4076
|
|
53
|
+
vanna/qianwen/QianwenAI_embeddings.py,sha256=55cwKpB_N3OVgXkC8uSGQCaIAK8vojz2UnlANtiXWS8,1253
|
|
54
|
+
vanna/qianwen/__init__.py,sha256=fBl4zQTpvObGRNJV6EVNjIUQ9aKDDYq-zLPsEZrRpwg,98
|
|
49
55
|
vanna/types/__init__.py,sha256=Qhn_YscKtJh7mFPCyCDLa2K8a4ORLMGVnPpTbv9uB2U,4957
|
|
50
56
|
vanna/vannadb/__init__.py,sha256=C6UkYocmO6dmzfPKZaWojN0mI5YlZZ9VIbdcquBE58A,48
|
|
51
57
|
vanna/vannadb/vannadb_vector.py,sha256=N8poMYvAojoaOF5gI4STD5pZWK9lBKPvyIjbh9dPBa0,14189
|
|
@@ -53,6 +59,6 @@ vanna/vllm/__init__.py,sha256=aNlUkF9tbURdeXAJ8ytuaaF1gYwcG3ny1MfNl_cwQYg,23
|
|
|
53
59
|
vanna/vllm/vllm.py,sha256=oM_aA-1Chyl7T_Qc_yRKlL6oSX1etsijY9zQdjeMGMQ,2827
|
|
54
60
|
vanna/weaviate/__init__.py,sha256=HL6PAl7ePBAkeG8uln-BmM7IUtWohyTPvDfcPzSGSCg,46
|
|
55
61
|
vanna/weaviate/weaviate_vector.py,sha256=GEiu4Vd9w-7j10aB-zTxJ8gefqe_F-LUUGvttFs1vlg,7539
|
|
56
|
-
vanna-0.
|
|
57
|
-
vanna-0.
|
|
58
|
-
vanna-0.
|
|
62
|
+
vanna-0.7.0.dist-info/WHEEL,sha256=EZbGkh7Ie4PoZfRQ8I0ZuP9VklN_TvcZ6DSE5Uar4z4,81
|
|
63
|
+
vanna-0.7.0.dist-info/METADATA,sha256=O-4tGHTmtlFTk-JMoYMNw0S3R6rfcu8geWDIUojQ36U,12407
|
|
64
|
+
vanna-0.7.0.dist-info/RECORD,,
|
|
File without changes
|