kobai-sdk 0.3.0rc1__py3-none-any.whl → 0.3.1__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of kobai-sdk might be problematic. Click here for more details.

kobai/ai_query.py CHANGED
@@ -1,8 +1,6 @@
1
1
  from langchain_core.prompts import ChatPromptTemplate, SystemMessagePromptTemplate, HumanMessagePromptTemplate, AIMessagePromptTemplate
2
2
  from langchain_core.output_parsers import StrOutputParser
3
3
 
4
- from sentence_transformers import SentenceTransformer, util
5
-
6
4
  from langchain_core.language_models.chat_models import BaseChatModel
7
5
  from langchain_core.embeddings import Embeddings
8
6
  from langchain_core.documents import Document
@@ -10,8 +8,9 @@ from langchain_core.retrievers import BaseRetriever
10
8
  from langchain_core.callbacks import CallbackManagerForRetrieverRun
11
9
  from langchain_core.runnables import RunnablePassthrough, RunnableLambda
12
10
  from langchain_core.vectorstores import InMemoryVectorStore
11
+ import numpy as np
13
12
 
14
- from typing import Union, List
13
+ from typing import List
15
14
 
16
15
 
17
16
  MESSAGE_SYSTEM_TEMPLATE = """
@@ -73,7 +72,7 @@ def format_docs(docs):
73
72
  def input_only(inpt):
74
73
  return inpt["question"]
75
74
 
76
- def followup_question(user_question, question_results, question_name, question_def, embedding_model: Union[SentenceTransformer, Embeddings], chat_model: BaseChatModel, use_inmem_vectors=False, k=50):
75
+ def followup_question(user_question, question_results, question_name, question_def, embedding_model: Embeddings, chat_model: BaseChatModel, use_inmem_vectors=False, k=50):
77
76
 
78
77
  row_texts = process_question_results(question_def, question_results)
79
78
  question_documents = [Document(page_content=r, metadata={"source": "kobai"}) for r in row_texts]
@@ -118,22 +117,13 @@ def init_question_search_index(tenant_questions, emb_model):
118
117
 
119
118
  q_ids = [q["id"] for q in tenant_questions]
120
119
  q_descs = [q["description"] for q in tenant_questions]
121
-
122
- if isinstance(emb_model, SentenceTransformer):
123
- q_vectors = emb_model.encode(q_descs)
124
- else:
125
- q_vectors = emb_model.embed_documents(q_descs)
126
-
120
+ q_vectors = emb_model.embed_documents(q_descs)
127
121
  return {"ids": q_ids, "descs": q_descs, "vectors": q_vectors}
128
122
 
129
123
 
130
124
  def question_search(search_text: str, search_index, emb_model, k: int):
131
- if isinstance(emb_model, SentenceTransformer):
132
- search_vec = emb_model.encode(search_text)
133
- else:
134
- search_vec = emb_model.embed_query(search_text)
125
+ search_vec = emb_model.embed_query(search_text)
135
126
  #search_vec = emb_model.encode(search_text)
136
-
137
127
  matches = __top_vector_matches(search_vec, search_index["vectors"], top=k)
138
128
 
139
129
  for mi, m in enumerate(matches):
@@ -142,13 +132,25 @@ def question_search(search_text: str, search_index, emb_model, k: int):
142
132
  return matches
143
133
 
144
134
  def __top_vector_matches(test_vec, options_list_vec, top=1):
145
- scores_t = util.cos_sim(test_vec, options_list_vec)[0]
146
- scores_l = scores_t.tolist()
147
- scores_d = [{"index": i, "value": v} for i, v in enumerate(scores_l)]
148
- sorted_d = sorted(scores_d, key=lambda i: i["value"], reverse=True)
149
- top_d = sorted_d[0:top]
135
+ # Normalize the test vector
136
+ test_vec_norm = test_vec / np.linalg.norm(test_vec)
137
+ # Normalize the option vectors
138
+ options_norm = options_list_vec / np.linalg.norm(options_list_vec, axis=1, keepdims=True)
139
+
140
+ # Compute cosine similarity (dot product of normalized vectors)
141
+ cosine_similarities = np.dot(options_norm, test_vec_norm)
142
+
143
+ # Get indexes and similarity scores as dict
144
+ scores_d = [{"index": i, "value": float(v)} for i, v in enumerate(cosine_similarities)]
145
+
146
+ # Sort dict by similarity score descending
147
+ sorted_d = sorted(scores_d, key=lambda x: x["value"], reverse=True)
148
+
149
+ # Return top results
150
+ top_d = sorted_d[:top]
150
151
  return top_d
151
152
 
153
+
152
154
  def process_question_results(question_def, question_results):
153
155
 
154
156
  """
@@ -211,8 +213,9 @@ def process_question_results(question_def, question_results):
211
213
 
212
214
 
213
215
  concept_order = [max_src]
214
- for t in concept_rels[max_src]["edges"]:
215
- concept_order.append(t["dst"])
216
+ if max_src != "":
217
+ for t in concept_rels[max_src]["edges"]:
218
+ concept_order.append(t["dst"])
216
219
 
217
220
  for c in concept_props:
218
221
  if c not in concept_order:
kobai/ai_rag.py CHANGED
@@ -3,9 +3,7 @@ from pyspark.sql import SparkSession
3
3
 
4
4
  from pyspark.sql.types import StructType, StructField, StringType, ArrayType, FloatType, IntegerType
5
5
  from pyspark.sql import functions as F
6
- from sentence_transformers import SentenceTransformer
7
6
  from delta import DeltaTable
8
- from typing import Union
9
7
  from langchain_core.language_models.chat_models import BaseChatModel
10
8
  from langchain_core.embeddings import Embeddings
11
9
  from langchain_community.document_loaders import PySparkDataFrameLoader
@@ -145,13 +143,13 @@ def __generate_sentences_from_questions(tc: AIContext, debug):
145
143
  ss.sql(full_sql)
146
144
 
147
145
 
148
- def encode_to_delta_local(tc: AIContext, st_model: Union[SentenceTransformer, Embeddings], replica_schema=None, batch_size=100000):
146
+ def encode_to_delta_local(tc: AIContext, st_model: Embeddings, replica_schema=None, batch_size=100000):
149
147
  """
150
148
  Encode Semantic Data to Vectors in Delta Table
151
149
 
152
150
  Parameters:
153
151
  tc (TenantClient): The Kobai tenant_client instance instantiated via the SDK.
154
- st_model (SentenceTransformer): A sentence_transformers model to use for encoding.
152
+ st_model (Embeddings): A langchain embedding model to use for encoding.
155
153
  replica_schema (str) OPTIONAL: An alternate schema (catalog.database) to create the Delta table. Useful when the base Kobai schema is not on a Unity Catalog.
156
154
  """
157
155
 
@@ -174,12 +172,8 @@ def encode_to_delta_local(tc: AIContext, st_model: Union[SentenceTransformer, Em
174
172
  content_list = [r["content"] for r in sentences_df.collect()]
175
173
  id_list = [r["id"] for r in sentences_df.collect()]
176
174
 
177
- if isinstance(st_model, SentenceTransformer):
178
- vector_list = st_model.encode(
179
- content_list, normalize_embeddings=True, show_progress_bar=True).tolist()
180
- else:
181
- vector_list = st_model.embed_documents(content_list)
182
- for i, v in enumerate(vector_list):
175
+ vector_list = st_model.embed_documents(content_list)
176
+ for i, v in enumerate(vector_list):
183
177
  vector_list[i] = [float(x) for x in v]
184
178
  #vector_list = st_model.encode(
185
179
  # content_list, normalize_embeddings=True, show_progress_bar=True)
@@ -214,13 +208,13 @@ def encode_to_delta_local(tc: AIContext, st_model: Union[SentenceTransformer, Em
214
208
  # """)
215
209
 
216
210
 
217
- def rag_delta(tc: AIContext, emb_model: Union[SentenceTransformer, Embeddings], chat_model: BaseChatModel, question, k=5, replica_schema=None):
211
+ def rag_delta(tc: AIContext, emb_model: Embeddings, chat_model: BaseChatModel, question, k=5, replica_schema=None):
218
212
  """
219
213
  Run a RAG query using vectors in Delta table.
220
214
 
221
215
  Parameters:
222
216
  tc (TenantClient): The Kobai tenant_client instance instantiated via the SDK.
223
- emb_model (UNION[SentenceTransformer, Embeddings]): A sentence_transformers or langchain embedding model to use for encoding the query.
217
+ emb_model (Embeddings): A langchain embedding model to use for encoding the query.
224
218
  chat_model (BaseChatModel): A langchain chat model to use in the RAG pipeline.
225
219
  question (str): The user's query.
226
220
  k (int) OPTIONAL: The number of RAG documents to retrieve.
@@ -233,10 +227,7 @@ def rag_delta(tc: AIContext, emb_model: Union[SentenceTransformer, Embeddings],
233
227
 
234
228
  ss = tc.spark_session
235
229
 
236
- if isinstance(emb_model, SentenceTransformer):
237
- vector_list = emb_model.encode(
238
- question, normalize_embeddings=True).tolist()
239
- elif isinstance(emb_model, Embeddings):
230
+ if isinstance(emb_model, Embeddings):
240
231
  vector_list = emb_model.embed_query(question)
241
232
  else:
242
233
  print("Invalid Embedding Model Type")
kobai/tenant_client.py CHANGED
@@ -7,10 +7,8 @@ from pyspark.sql import SparkSession
7
7
 
8
8
  from langchain_community.chat_models import ChatDatabricks
9
9
  from databricks_langchain import DatabricksEmbeddings
10
- from sentence_transformers import SentenceTransformer
11
10
  from langchain_core.language_models.chat_models import BaseChatModel
12
11
  from langchain_core.embeddings import Embeddings
13
- from typing import Union
14
12
 
15
13
  from . import spark_client, databricks_client, ai_query, tenant_api, ai_rag
16
14
 
@@ -63,7 +61,7 @@ class TenantClient:
63
61
  # MS Entra Auth
64
62
  ########################################
65
63
 
66
- def use_browser_token(self, access_token, run_ai_init: bool = True):
64
+ def use_browser_token(self, access_token):
67
65
 
68
66
  """
69
67
  Authenticate the TenantClient with the Kobai instance. Returns nothing, but stores bearer token in client.
@@ -72,9 +70,9 @@ class TenantClient:
72
70
  Parameters:
73
71
  access_token (str): Bearer token for Kobai app session.
74
72
  """
75
- self._init_post_auth_success(access_token, run_ai_init)
73
+ self._init_post_auth_success(access_token)
76
74
 
77
- def use_access_token(self, access_token: str, id_token: str = None, tenant_id: str = None, run_ai_init: bool = True):
75
+ def use_access_token(self, access_token: str, id_token: str = None, tenant_id: str = None):
78
76
 
79
77
  """
80
78
  Authenticate the TenantClient with the Kobai instance. Returns nothing, but stores bearer token in client.
@@ -98,7 +96,7 @@ class TenantClient:
98
96
  )
99
97
 
100
98
  kb_access_token = response.headers.get('Authorization')
101
- self.use_browser_token(kb_access_token, run_ai_init)
99
+ self.use_browser_token(kb_access_token)
102
100
 
103
101
  def get_tenants(self, id_token: str = None):
104
102
 
@@ -125,12 +123,10 @@ class TenantClient:
125
123
  def __api_init_session(self):
126
124
  self.api_client = tenant_api.TenantAPI(self.token, self.uri, verify=self.ssl_verify, proxies=self.proxies )
127
125
 
128
- def _init_post_auth_success(self, access_token, run_ai_init: bool = True):
126
+ def _init_post_auth_success(self, access_token):
129
127
  self.token = access_token
130
128
  self.__api_init_session()
131
129
  self.__set_tenant_solutionid()
132
- if run_ai_init:
133
- self.init_ai_components()
134
130
  print("Authentication Successful.")
135
131
 
136
132
  ########################################
@@ -443,7 +439,7 @@ class TenantClient:
443
439
  """
444
440
  ai_rag.generate_sentences(self.get_ai_context(), replica_schema=replica_schema, concept_white_list=concept_white_list, use_questions=use_questions, debug=debug)
445
441
 
446
- def rag_encode_to_delta_local(self, st_model: Union[SentenceTransformer, Embeddings], replica_schema=None, batch_size=100000):
442
+ def rag_encode_to_delta_local(self, st_model: Embeddings, replica_schema=None, batch_size=100000):
447
443
  """
448
444
  Encode Semantic Data to Vectors in Delta Table
449
445
 
@@ -453,7 +449,7 @@ class TenantClient:
453
449
  """
454
450
  ai_rag.encode_to_delta_local(self.get_ai_context(), st_model=st_model, replica_schema=replica_schema, batch_size=batch_size)
455
451
 
456
- def rag_delta(self, emb_model: Union[SentenceTransformer, Embeddings], chat_model: BaseChatModel, question, k=5, replica_schema=None):
452
+ def rag_delta(self, emb_model: Embeddings, chat_model: BaseChatModel, question, k=5, replica_schema=None):
457
453
  """
458
454
  Run a RAG query using vectors in Delta table.
459
455
 
@@ -481,9 +477,7 @@ class TenantClient:
481
477
  """
482
478
 
483
479
  if question_id is None:
484
-
485
480
  suggestions = self.question_search(user_question, k=1)
486
-
487
481
  question_id = suggestions[0]["id"]
488
482
 
489
483
  question_results = self.run_question_remote(question_id, dynamic_filters=dynamic_filters)
@@ -493,26 +487,16 @@ class TenantClient:
493
487
 
494
488
  return ai_query.followup_question(user_question, question_results, question_name, question_def, self.embedding_model, self.chat_model, use_inmem_vectors=use_inmem_vectors, k=k)
495
489
 
496
- def init_ai_components(self, embedding_model: Union[SentenceTransformer, Embeddings] = None, chat_model: BaseChatModel = None):
490
+ def init_ai_components(self, embedding_model: Embeddings, chat_model: BaseChatModel):
497
491
  """
498
492
  Set Chat and Embedding models for AI functions to use. If no arguments provided, Databricks hosted services are used.
499
493
 
500
494
  Parameters:
501
- embedding_model (Union[SentenceTransformer, Embeddings]) OPTIONAL: A sentence_transformer or Langchain Embedding model.
502
- chat_model (BaseChatModel) OPTIONAL: A Langchain BaseChatModel chat model.
495
+ embedding_model (Embeddings): A Langchain Embedding model.
496
+ chat_model (BaseChatModel): A Langchain BaseChatModel chat model.
503
497
  """
504
-
505
- if embedding_model is not None:
506
- self.embedding_model = embedding_model
507
- else:
508
- #self.embedding_model = SentenceTransformer("baai/bge-large-en-v1.5")
509
- self.embedding_model = DatabricksEmbeddings(endpoint="databricks-bge-large-en")
510
-
511
- if chat_model is not None:
512
- self.chat_model = chat_model
513
- else:
514
- self.chat_model = ChatDatabricks(endpoint="databricks-dbrx-instruct")
515
-
498
+ self.embedding_model = embedding_model
499
+ self.chat_model = chat_model
516
500
  self.question_search_index = ai_query.init_question_search_index(self.list_questions(), self.embedding_model)
517
501
 
518
502
  def question_search(self, search_text, k: int = 1):
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.4
2
2
  Name: kobai-sdk
3
- Version: 0.3.0rc1
3
+ Version: 0.3.1
4
4
  Summary: A package that enables interaction with a Kobai tenant.
5
5
  Author-email: Ryan Oattes <ryan@kobai.io>
6
6
  License: Apache License
@@ -223,7 +223,6 @@ Requires-Dist: langchain-core
223
223
  Requires-Dist: langchain-community
224
224
  Requires-Dist: langchain_openai
225
225
  Requires-Dist: databricks_langchain
226
- Requires-Dist: sentence-transformers
227
226
  Provides-Extra: dev
228
227
  Requires-Dist: black; extra == "dev"
229
228
  Requires-Dist: bumpver; extra == "dev"
@@ -249,22 +248,53 @@ from kobai import tenant_client, spark_client, databricks_client
249
248
 
250
249
  schema = 'main.demo'
251
250
  uri = 'https://demo.kobai.io'
252
- tenant_id = '1'
253
251
  tenant_name = 'My Demo Tenant'
254
-
255
- k = tenant_client.TenantClient(tenant_name, tenant_id, uri, schema)
252
+ k = tenant_client.TenantClient(tenant_name, uri, schema)
256
253
  ```
257
254
 
258
255
  2. Authenticate with the Kobai instance:
256
+ Authentication can be performed using different methods, such as device code flow, on-behalf-of flow, or browser-based tokens.
257
+
258
+ #### Authentication via device code
259
+ Step 1: Obtain the access token from IDM (Identity and Access Management)
259
260
 
260
261
  ```python
261
- client_id = 'your_Entra_app_id_here'
262
+ from kobai import ms_authenticate
263
+
262
264
  tenant_id = 'your_Entra_directory_id_here'
265
+ client_id = 'your_Entra_app_id_here'
266
+
267
+ access_token = ms_authenticate.device_code(tenant_id, client_id)
268
+ ```
269
+
270
+ Step 2: Use the token to retrieve the list of Kobai tenants (unless the tenant ID is already known).
271
+
272
+ ```python
273
+ tenants = k.get_tenants(id_token=access_token)
274
+ print(tenants)
275
+ ```
276
+
277
+ Step 3: Authenticate with Kobai for the specific tenant using the IDM access token.
263
278
 
264
- k.authenticate(client_id, tenant_id)
279
+ ```python
280
+ kobai_tenant_id = "5c1ba715-3961-4835-8a10-6f6f963b53ff"
281
+ k.use_access_token(access_token = access_token, tenant_id=kobai_tenant_id)
282
+ ```
283
+
284
+ At this point, authentication to the Kobai tenant is successfully completed.
285
+
286
+ #### Authentication via browser token
287
+
288
+ ```python
289
+ k.use_browser_token(access_token="KOBAI_ACESS_TOKEN_FROM_BROWSER")
265
290
  ```
266
291
 
267
- 3. Initialize a Spark client using your current `SparkSession`, and generate semantically-rich SQL views describing this Kobai tenant:
292
+ #### Authentication via on-behalf-of flow
293
+ The sample code demonstrating authentication via the on-behalf-of flow will be provided, if requested.
294
+
295
+ 3. Initialize a Spark client using your current `SparkSession`, and generate semantically-rich SQL views describing this Kobai tenant.
296
+
297
+ Please note that the generation of semantically-rich SQL views describing this Kobai tenant is currently broken and will be fixed in the next release.
268
298
 
269
299
  ```python
270
300
  k.spark_init_session(spark)
@@ -305,68 +335,41 @@ kobai_query_name = "Set ownership"
305
335
  question_json = k.run_question_remote(k.get_question_id(kobai_query_name)) # By questionName
306
336
  ```
307
337
 
308
- 3. Ask a Follow-Up Question: Based on the initial results, you can ask a follow-up question using either Azure OpenAI, Databricks or a user-provided chat model.
309
-
310
- #### Using Azure OpenAI
338
+ 3. Ask a Follow-Up Question: Based on the initial results, you can ask a follow-up question using the user-provided chat and embedding model.
311
339
 
312
- ###### Authentication Methods:
313
-
314
- 1. ApiKey
340
+ #### Using Databricks Embeddings and Chat Models in a Databricks Notebook
341
+ Initialize the AI components by specifying the embedding and chat models, then proceed with follow-up questions for interactive engagement.
315
342
 
316
343
  ```python
317
- from kobai import ai_query, llm_config
344
+ from databricks_langchain import DatabricksEmbeddings
345
+ from langchain_community.chat_models import ChatDatabricks
318
346
  import json
319
347
 
320
- followup_question = "Which owner owns the most sets?"
321
-
322
- llm_config = llm_config.LLMConfig(endpoint="https://kobaipoc.openai.azure.com/", api_key="YOUR_API_KEY", deployment="gpt-4o-mini", llm_provider="azure_openai")
323
-
324
- output = ai_query.followup_question(followup_question, json.dumps(question_json), kobai_query_name, llm_config=llm_config)
325
- print(output)
326
- ```
327
-
328
- 2. Azure Active Directory Authentication
329
-
330
- Ensure that the logged-in tenant has access to Azure OpenAI.
331
- In case of databricks notebook, the logged in service principal should have access to Azure OpenAI.
332
-
333
- ```python
334
- from kobai import ai_query, llm_config
335
- import json
348
+ # choose the embedding and chat model of your choice from the databricks serving and initialize.
349
+ embedding_model = DatabricksEmbeddings(endpoint="databricks-bge-large-en")
350
+ chat_model = ChatDatabricks(endpoint="databricks-gpt-oss-20b")
351
+ k.init_ai_components(embedding_model=embedding_model, chat_model=chat_model)
336
352
 
337
353
  followup_question = "Which owner owns the most sets?"
338
-
339
- llm_config = llm_config.LLMConfig(endpoint="https://kobaipoc.openai.azure.com/", deployment="gpt-4o-mini", llm_provider="azure_openai")
340
- llm_config.get_azure_ad_token()
341
-
342
- output = ai_query.followup_question(followup_question, json.dumps(question_json), kobai_query_name, llm_config=llm_config)
354
+ output = k.followup_question(followup_question, question_id=k.get_question_id(kobai_query_name))
343
355
  print(output)
344
356
  ```
345
357
 
346
- #### Using Databricks (Default Configuration)
358
+ #### Using Azure OpenAI Embeddings and Chat Models
347
359
 
348
360
  ```python
349
- from kobai import ai_query, llm_config
350
- import json
351
-
352
- followup_question = "Which owner owns the most sets?"
353
-
354
- llm_config = llm_config.LLMConfig()
355
-
356
- output = ai_query.followup_question(followup_question, json.dumps(question_json), kobai_query_name, llm_config=llm_config)
357
- print(output)
358
- ```
359
-
360
- #### User Provided Chat Model
361
-
362
- ```python
363
- from kobai import ai_query, llm_config
364
- import json
365
361
  from langchain_openai import AzureChatOpenAI
362
+ from langchain_openai import AzureOpenAIEmbeddings
363
+ import json
366
364
 
367
365
  followup_question = "Which owner owns the most sets?"
368
366
 
369
- llm_config = llm_config.LLMConfig(debug=True)
367
+ embedding_model = AzureOpenAIEmbeddings(
368
+ model="text-embedding-3-small",
369
+ azure_endpoint="https://kobaipoc.openai.azure.com/",
370
+ api_key="YOUR_API_KEY",
371
+ openai_api_version="2023-05-15"
372
+ )
370
373
 
371
374
  chat_model = AzureChatOpenAI(
372
375
  azure_endpoint="https://kobaipoc.openai.azure.com/", azure_deployment="gpt-4o-mini",
@@ -375,7 +378,10 @@ openai_api_version="2024-02-15-preview",
375
378
  temperature=0.5,
376
379
  max_tokens=150,)
377
380
 
378
- output = ai_query.followup_question(followup_question, json.dumps(question_json), kobai_query_name, override_model=chat_model, llm_config=llm_config)
381
+ k.init_ai_components(embedding_model=embedding_model, chat_model=chat_model)
382
+
383
+ followup_question = "Which theme has the most sets?"
384
+ output = k.followup_question(followup_question, question_id=k.get_question_id(kobai_query_name))
379
385
  print(output)
380
386
  ```
381
387
 
@@ -0,0 +1,14 @@
1
+ kobai/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
2
+ kobai/ai_query.py,sha256=FnXn2pabJpXfTUcJvieVkAgMAjSTH9u5SFR9SJUJ-Lk,9556
3
+ kobai/ai_rag.py,sha256=XUq_SnJw17P53Zk75hHJgTryGjHEAyYPwC0r2WtuNp4,14627
4
+ kobai/databricks_client.py,sha256=fyqqMly2Qm0r1AHWsQjkYeNsDdH0G1JSgTkF9KJ55qA,2118
5
+ kobai/demo_tenant_client.py,sha256=wlNc-bdI2wotRXo8ppUOalv4hYdBlek_WzJNARZV-AE,9293
6
+ kobai/ms_authenticate.py,sha256=rlmhtvAaSRBlYmvIBy5epMVa4MBGBLPaMwawu1T_xDQ,2252
7
+ kobai/spark_client.py,sha256=opM_F-4Ut5Hq5zZjWMuLvUps9sDULvyPNZHXGL8dW1k,776
8
+ kobai/tenant_api.py,sha256=Q5yuFd9_V4lo3LWzvYEEO3LpDRWFgQD4TlRPXDTGbiE,4368
9
+ kobai/tenant_client.py,sha256=w83NmLuOEyJjOVUuLva2vbq0zpGFzhi9LdSq1pKClA8,38613
10
+ kobai_sdk-0.3.1.dist-info/licenses/LICENSE,sha256=QwcOLU5TJoTeUhuIXzhdCEEDDvorGiC6-3YTOl4TecE,11356
11
+ kobai_sdk-0.3.1.dist-info/METADATA,sha256=kZEU4YFcGVr5rxGeHXk6FhXtS2h5RTPcoPE_oSaWjJc,20022
12
+ kobai_sdk-0.3.1.dist-info/WHEEL,sha256=_zCd3N1l69ArxyTb8rzEoP9TpbYXkqRFSNOD5OuxnTs,91
13
+ kobai_sdk-0.3.1.dist-info/top_level.txt,sha256=ns1El3BrTTHKvoAgU1XtiSaVIudYeCXbEEUVY8HFDZ4,6
14
+ kobai_sdk-0.3.1.dist-info/RECORD,,
@@ -1,5 +1,5 @@
1
1
  Wheel-Version: 1.0
2
- Generator: setuptools (80.8.0)
2
+ Generator: setuptools (80.9.0)
3
3
  Root-Is-Purelib: true
4
4
  Tag: py3-none-any
5
5
 
@@ -1,14 +0,0 @@
1
- kobai/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
2
- kobai/ai_query.py,sha256=xZh_OyakU01gIrnzaW4v_TdfzG51nPu0ntXJw1WEnvw,9424
3
- kobai/ai_rag.py,sha256=8B3HM4GoGVrgxJG678NN4vGaDwZRYnQiK5SCGiMIYkM,15186
4
- kobai/databricks_client.py,sha256=fyqqMly2Qm0r1AHWsQjkYeNsDdH0G1JSgTkF9KJ55qA,2118
5
- kobai/demo_tenant_client.py,sha256=wlNc-bdI2wotRXo8ppUOalv4hYdBlek_WzJNARZV-AE,9293
6
- kobai/ms_authenticate.py,sha256=rlmhtvAaSRBlYmvIBy5epMVa4MBGBLPaMwawu1T_xDQ,2252
7
- kobai/spark_client.py,sha256=opM_F-4Ut5Hq5zZjWMuLvUps9sDULvyPNZHXGL8dW1k,776
8
- kobai/tenant_api.py,sha256=Q5yuFd9_V4lo3LWzvYEEO3LpDRWFgQD4TlRPXDTGbiE,4368
9
- kobai/tenant_client.py,sha256=o2bifvmYwxL3_gpRMJaVRXrt9CN8Kfcx7p5r3jR9aGg,39415
10
- kobai_sdk-0.3.0rc1.dist-info/licenses/LICENSE,sha256=QwcOLU5TJoTeUhuIXzhdCEEDDvorGiC6-3YTOl4TecE,11356
11
- kobai_sdk-0.3.0rc1.dist-info/METADATA,sha256=9yGBoPdo2wWB_wYmAGG00dEY-4xqP7p9Grm1wUWcWfg,19263
12
- kobai_sdk-0.3.0rc1.dist-info/WHEEL,sha256=zaaOINJESkSfm_4HQVc5ssNzHCPXhJm0kEUakpsEHaU,91
13
- kobai_sdk-0.3.0rc1.dist-info/top_level.txt,sha256=ns1El3BrTTHKvoAgU1XtiSaVIudYeCXbEEUVY8HFDZ4,6
14
- kobai_sdk-0.3.0rc1.dist-info/RECORD,,