vanna 0.6.6__py3-none-any.whl → 0.7.0__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
vanna/ollama/ollama.py CHANGED
@@ -27,7 +27,9 @@ class Ollama(VannaBase):
27
27
  if ":" not in self.model:
28
28
  self.model += ":latest"
29
29
 
30
- self.ollama_client = ollama.Client(self.host, timeout=Timeout(240.0))
30
+ self.ollama_timeout = config.get("ollama_timeout", 240.0)
31
+
32
+ self.ollama_client = ollama.Client(self.host, timeout=Timeout(self.ollama_timeout))
31
33
  self.keep_alive = config.get('keep_alive', None)
32
34
  self.ollama_options = config.get('options', {})
33
35
  self.num_ctx = self.ollama_options.get('num_ctx', 2048)
@@ -0,0 +1,133 @@
1
+ import os
2
+
3
+ from openai import OpenAI
4
+
5
+ from ..base import VannaBase
6
+
7
+
8
+ class QianWenAI_Chat(VannaBase):
9
+ def __init__(self, client=None, config=None):
10
+ VannaBase.__init__(self, config=config)
11
+
12
+ # default parameters - can be overrided using config
13
+ self.temperature = 0.7
14
+
15
+ if "temperature" in config:
16
+ self.temperature = config["temperature"]
17
+
18
+ if "api_type" in config:
19
+ raise Exception(
20
+ "Passing api_type is now deprecated. Please pass an OpenAI client instead."
21
+ )
22
+
23
+ if "api_base" in config:
24
+ raise Exception(
25
+ "Passing api_base is now deprecated. Please pass an OpenAI client instead."
26
+ )
27
+
28
+ if "api_version" in config:
29
+ raise Exception(
30
+ "Passing api_version is now deprecated. Please pass an OpenAI client instead."
31
+ )
32
+
33
+ if client is not None:
34
+ self.client = client
35
+ return
36
+
37
+ if config is None and client is None:
38
+ self.client = OpenAI(api_key=os.getenv("OPENAI_API_KEY"))
39
+ return
40
+
41
+ if "api_key" in config:
42
+ if "base_url" not in config:
43
+ self.client = OpenAI(api_key=config["api_key"],
44
+ base_url="https://dashscope.aliyuncs.com/compatible-mode/v1")
45
+ else:
46
+ self.client = OpenAI(api_key=config["api_key"],
47
+ base_url=config["base_url"])
48
+
49
+ def system_message(self, message: str) -> any:
50
+ return {"role": "system", "content": message}
51
+
52
+ def user_message(self, message: str) -> any:
53
+ return {"role": "user", "content": message}
54
+
55
+ def assistant_message(self, message: str) -> any:
56
+ return {"role": "assistant", "content": message}
57
+
58
+ def submit_prompt(self, prompt, **kwargs) -> str:
59
+ if prompt is None:
60
+ raise Exception("Prompt is None")
61
+
62
+ if len(prompt) == 0:
63
+ raise Exception("Prompt is empty")
64
+
65
+ # Count the number of tokens in the message log
66
+ # Use 4 as an approximation for the number of characters per token
67
+ num_tokens = 0
68
+ for message in prompt:
69
+ num_tokens += len(message["content"]) / 4
70
+
71
+ if kwargs.get("model", None) is not None:
72
+ model = kwargs.get("model", None)
73
+ print(
74
+ f"Using model {model} for {num_tokens} tokens (approx)"
75
+ )
76
+ response = self.client.chat.completions.create(
77
+ model=model,
78
+ messages=prompt,
79
+ stop=None,
80
+ temperature=self.temperature,
81
+ )
82
+ elif kwargs.get("engine", None) is not None:
83
+ engine = kwargs.get("engine", None)
84
+ print(
85
+ f"Using model {engine} for {num_tokens} tokens (approx)"
86
+ )
87
+ response = self.client.chat.completions.create(
88
+ engine=engine,
89
+ messages=prompt,
90
+ stop=None,
91
+ temperature=self.temperature,
92
+ )
93
+ elif self.config is not None and "engine" in self.config:
94
+ print(
95
+ f"Using engine {self.config['engine']} for {num_tokens} tokens (approx)"
96
+ )
97
+ response = self.client.chat.completions.create(
98
+ engine=self.config["engine"],
99
+ messages=prompt,
100
+ stop=None,
101
+ temperature=self.temperature,
102
+ )
103
+ elif self.config is not None and "model" in self.config:
104
+ print(
105
+ f"Using model {self.config['model']} for {num_tokens} tokens (approx)"
106
+ )
107
+ response = self.client.chat.completions.create(
108
+ model=self.config["model"],
109
+ messages=prompt,
110
+ stop=None,
111
+ temperature=self.temperature,
112
+ )
113
+ else:
114
+ if num_tokens > 3500:
115
+ model = "qwen-long"
116
+ else:
117
+ model = "qwen-plus"
118
+
119
+ print(f"Using model {model} for {num_tokens} tokens (approx)")
120
+ response = self.client.chat.completions.create(
121
+ model=model,
122
+ messages=prompt,
123
+ stop=None,
124
+ temperature=self.temperature,
125
+ )
126
+
127
+ # Find the first response from the chatbot that has text in it (some responses may not have text)
128
+ for choice in response.choices:
129
+ if "text" in choice:
130
+ return choice.text
131
+
132
+ # If no response with text is found, return the first response's content (which may be empty)
133
+ return response.choices[0].message.content
@@ -0,0 +1,46 @@
1
+ from openai import OpenAI
2
+
3
+ from ..base import VannaBase
4
+
5
+
6
+ class QianWenAI_Embeddings(VannaBase):
7
+ def __init__(self, client=None, config=None):
8
+ VannaBase.__init__(self, config=config)
9
+
10
+ if client is not None:
11
+ self.client = client
12
+ return
13
+
14
+ if self.client is not None:
15
+ return
16
+
17
+ self.client = OpenAI()
18
+
19
+ if config is None:
20
+ return
21
+
22
+ if "api_type" in config:
23
+ self.client.api_type = config["api_type"]
24
+
25
+ if "api_base" in config:
26
+ self.client.api_base = config["api_base"]
27
+
28
+ if "api_version" in config:
29
+ self.client.api_version = config["api_version"]
30
+
31
+ if "api_key" in config:
32
+ self.client.api_key = config["api_key"]
33
+
34
+ def generate_embedding(self, data: str, **kwargs) -> list[float]:
35
+ if self.config is not None and "engine" in self.config:
36
+ embedding = self.client.embeddings.create(
37
+ engine=self.config["engine"],
38
+ input=data,
39
+ )
40
+ else:
41
+ embedding = self.client.embeddings.create(
42
+ model="bge-large-zh",
43
+ input=data,
44
+ )
45
+
46
+ return embedding.get("data")[0]["embedding"]
@@ -0,0 +1,2 @@
1
+ from .QianwenAI_chat import QianWenAI_Chat
2
+ from .QianwenAI_embeddings import QianWenAI_Embeddings
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.1
2
2
  Name: vanna
3
- Version: 0.6.6
3
+ Version: 0.7.0
4
4
  Summary: Generate SQL queries from natural language
5
5
  Author-email: Zain Hoda <zain@vanna.ai>
6
6
  Requires-Python: >=3.9
@@ -43,7 +43,14 @@ Requires-Dist: transformers ; extra == "all"
43
43
  Requires-Dist: pinecone-client ; extra == "all"
44
44
  Requires-Dist: pymilvus[model] ; extra == "all"
45
45
  Requires-Dist: weaviate-client ; extra == "all"
46
+ Requires-Dist: azure-search-documents ; extra == "all"
47
+ Requires-Dist: azure-identity ; extra == "all"
48
+ Requires-Dist: azure-common ; extra == "all"
46
49
  Requires-Dist: anthropic ; extra == "anthropic"
50
+ Requires-Dist: azure-search-documents ; extra == "azuresearch"
51
+ Requires-Dist: azure-identity ; extra == "azuresearch"
52
+ Requires-Dist: azure-common ; extra == "azuresearch"
53
+ Requires-Dist: fastembed ; extra == "azuresearch"
47
54
  Requires-Dist: boto3 ; extra == "bedrock"
48
55
  Requires-Dist: botocore ; extra == "bedrock"
49
56
  Requires-Dist: google-cloud-bigquery ; extra == "bigquery"
@@ -79,6 +86,7 @@ Project-URL: Bug Tracker, https://github.com/vanna-ai/vanna/issues
79
86
  Project-URL: Homepage, https://github.com/vanna-ai/vanna
80
87
  Provides-Extra: all
81
88
  Provides-Extra: anthropic
89
+ Provides-Extra: azuresearch
82
90
  Provides-Extra: bedrock
83
91
  Provides-Extra: bigquery
84
92
  Provides-Extra: chromadb
@@ -8,15 +8,17 @@ vanna/ZhipuAI/__init__.py,sha256=NlsijtcZp5Tj9jkOe9fNcOQND_QsGgu7otODsCLBPr0,116
8
8
  vanna/advanced/__init__.py,sha256=oDj9g1JbrbCfp4WWdlr_bhgdMqNleyHgr6VXX6DcEbo,658
9
9
  vanna/anthropic/__init__.py,sha256=85s_2mAyyPxc0T_0JEvYeAkEKWJwkwqoyUwSC5dw9Gk,43
10
10
  vanna/anthropic/anthropic_chat.py,sha256=7X3x8SYwDY28aGyBnt0YNRMG8YY1p_t-foMfKGj8_Oo,2627
11
+ vanna/azuresearch/__init__.py,sha256=tZfvsrCJESiL3EnxA4PrOc5NoO8MXEzCfHX_hnj8n-c,58
12
+ vanna/azuresearch/azuresearch_vector.py,sha256=fc7w_bE6IgOsWb1vkA8t8k-AtuzLyH6zlW4ej4d2lE8,9558
11
13
  vanna/base/__init__.py,sha256=Sl-HM1RRYzAZoSqmL1CZQmF3ZF-byYTCFQP3JZ2A5MU,28
12
- vanna/base/base.py,sha256=DrXaJcMhIjD6BEqLu4JNZaZZ8nTM4SppjdvueJjEcko,71463
14
+ vanna/base/base.py,sha256=j5xQmK-MeFKAuPjgYLSl1ThCHZieG-ab-RFFSkDlbiw,73679
13
15
  vanna/bedrock/__init__.py,sha256=hRT2bgJbHEqViLdL-t9hfjSfFdIOkPU2ADBt-B1En-8,46
14
16
  vanna/bedrock/bedrock_converse.py,sha256=Nx5kYm-diAfYmsWAnTP5xnv7V84Og69-AP9b3seIe0E,2869
15
17
  vanna/chromadb/__init__.py,sha256=-iL0nW_g4uM8nWKMuWnNePfN4nb9uk8P3WzGvezOqRg,50
16
18
  vanna/chromadb/chromadb_vector.py,sha256=eKyPck99Y6Jt-BNWojvxLG-zvAERzLSm-3zY-bKXvaA,8792
17
19
  vanna/exceptions/__init__.py,sha256=dJ65xxxZh1lqBeg6nz6Tq_r34jLVmjvBvPO9Q6hFaQ8,685
18
- vanna/flask/__init__.py,sha256=cllLWqJ2SYVdvF4CQ-8cipoOdjgkoe0rChWnawtzMyA,42921
19
- vanna/flask/assets.py,sha256=_UoUr57sS0QL2BuTxAOe9k4yy8T7-fp2NpbRSVtW3IM,451769
20
+ vanna/flask/__init__.py,sha256=jcdaau1tQ142nL1ZsDklk0ilMkEyRxgQZdmsl1IN4LQ,43866
21
+ vanna/flask/assets.py,sha256=af-vact_5HSftltugBpPxzLkAI14Z0lVWcObyVe6eKE,453462
20
22
  vanna/flask/auth.py,sha256=UpKxh7W5cd43W0LGch0VqhncKwB78L6dtOQkl1JY5T0,1246
21
23
  vanna/google/__init__.py,sha256=6D8rDBjKJJm_jpVn9b4Vc2NR-R779ed_bnHhWmxCJXE,92
22
24
  vanna/google/bigquery_vector.py,sha256=rkP94Xd1lNYjU1x3MDLvqmGSPUYtDfQwvlqVmX44jyM,8839
@@ -34,7 +36,7 @@ vanna/mock/embedding.py,sha256=ggnP7KuPh6dlqeUFtoN8t0J0P7_yRNtn9rIq6h8g8-w,250
34
36
  vanna/mock/llm.py,sha256=WpG9f1pKZftPBHqgIYdARKB2Z9DZhOALYOJWoOjjFEc,518
35
37
  vanna/mock/vectordb.py,sha256=h45znfYMUnttE2BBC8v6TKeMaA58pFJL-5B3OGeRNFI,2681
36
38
  vanna/ollama/__init__.py,sha256=4xyu8aHPdnEHg5a-QAMwr5o0ns5wevsp_zkI-ndMO2k,27
37
- vanna/ollama/ollama.py,sha256=rXa7cfvdlO1E5SLysXIl3IZpIaA2r0RBvV5jX2-upiE,3794
39
+ vanna/ollama/ollama.py,sha256=yD7UHn4GNzWfQMi2OHlfWwIEJ_sTDzpPcgv_MCGRp6E,3871
38
40
  vanna/openai/__init__.py,sha256=tGkeQ7wTIPsando7QhoSHehtoQVdYLwFbKNlSmCmNeQ,86
39
41
  vanna/openai/openai_chat.py,sha256=KU6ynOQ5v7vwrQQ13phXoUXeQUrH6_vmhfiPvWddTrQ,4427
40
42
  vanna/openai/openai_embeddings.py,sha256=g4pNh9LVcYP9wOoO8ecaccDFWmCUYMInebfHucAa2Gc,1260
@@ -47,6 +49,9 @@ vanna/qdrant/qdrant.py,sha256=qkTWhGrVSAngJZkrcRQ8YFVHcI9j_ZoOGbF6ZVUUdsU,12567
47
49
  vanna/qianfan/Qianfan_Chat.py,sha256=Z-s9MwH22T4KMR8AViAjms6qoj67pHeQkMsbK-aXf1M,5273
48
50
  vanna/qianfan/Qianfan_embeddings.py,sha256=TYynAJXlyuZfmoj49h8nU6bXu_GjlXREp3tgfQUca04,954
49
51
  vanna/qianfan/__init__.py,sha256=QpR43BjZQZcrcDRkyYcYiS-kyqtYmu23AHDzK0Wy1D0,90
52
+ vanna/qianwen/QianwenAI_chat.py,sha256=c4stx4QzX-Af28c0H4h2ZDDKJknWcun0L9LevMTSHSE,4076
53
+ vanna/qianwen/QianwenAI_embeddings.py,sha256=55cwKpB_N3OVgXkC8uSGQCaIAK8vojz2UnlANtiXWS8,1253
54
+ vanna/qianwen/__init__.py,sha256=fBl4zQTpvObGRNJV6EVNjIUQ9aKDDYq-zLPsEZrRpwg,98
50
55
  vanna/types/__init__.py,sha256=Qhn_YscKtJh7mFPCyCDLa2K8a4ORLMGVnPpTbv9uB2U,4957
51
56
  vanna/vannadb/__init__.py,sha256=C6UkYocmO6dmzfPKZaWojN0mI5YlZZ9VIbdcquBE58A,48
52
57
  vanna/vannadb/vannadb_vector.py,sha256=N8poMYvAojoaOF5gI4STD5pZWK9lBKPvyIjbh9dPBa0,14189
@@ -54,6 +59,6 @@ vanna/vllm/__init__.py,sha256=aNlUkF9tbURdeXAJ8ytuaaF1gYwcG3ny1MfNl_cwQYg,23
54
59
  vanna/vllm/vllm.py,sha256=oM_aA-1Chyl7T_Qc_yRKlL6oSX1etsijY9zQdjeMGMQ,2827
55
60
  vanna/weaviate/__init__.py,sha256=HL6PAl7ePBAkeG8uln-BmM7IUtWohyTPvDfcPzSGSCg,46
56
61
  vanna/weaviate/weaviate_vector.py,sha256=GEiu4Vd9w-7j10aB-zTxJ8gefqe_F-LUUGvttFs1vlg,7539
57
- vanna-0.6.6.dist-info/WHEEL,sha256=EZbGkh7Ie4PoZfRQ8I0ZuP9VklN_TvcZ6DSE5Uar4z4,81
58
- vanna-0.6.6.dist-info/METADATA,sha256=_qy1wVZqQOLplCxZ43KqCIEgL0Wq2X48ekJEACq-0Ng,12011
59
- vanna-0.6.6.dist-info/RECORD,,
62
+ vanna-0.7.0.dist-info/WHEEL,sha256=EZbGkh7Ie4PoZfRQ8I0ZuP9VklN_TvcZ6DSE5Uar4z4,81
63
+ vanna-0.7.0.dist-info/METADATA,sha256=O-4tGHTmtlFTk-JMoYMNw0S3R6rfcu8geWDIUojQ36U,12407
64
+ vanna-0.7.0.dist-info/RECORD,,
File without changes