langchain-timbr 1.5.2__py3-none-any.whl → 1.5.3__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -28,7 +28,7 @@ version_tuple: VERSION_TUPLE
28
28
  commit_id: COMMIT_ID
29
29
  __commit_id__: COMMIT_ID
30
30
 
31
- __version__ = version = '1.5.2'
32
- __version_tuple__ = version_tuple = (1, 5, 2)
31
+ __version__ = version = '1.5.3'
32
+ __version_tuple__ = version_tuple = (1, 5, 3)
33
33
 
34
34
  __commit_id__ = commit_id = None
@@ -12,13 +12,14 @@ class LlmTypes(Enum):
12
12
  Google = 'chat-google-generative-ai'
13
13
  AzureOpenAI = 'azure-openai-chat'
14
14
  Snowflake = 'snowflake-cortex'
15
+ Databricks = 'chat-databricks'
15
16
  Timbr = 'timbr'
16
17
 
17
18
 
18
19
  class LlmWrapper(LLM):
19
20
  """
20
21
  LlmWrapper is a unified interface for connecting to various Large Language Model (LLM) providers
21
- (OpenAI, Anthropic, Google, Azure OpenAI, Snowflake Cortex, etc.) using LangChain. It abstracts
22
+ (OpenAI, Anthropic, Google, Azure OpenAI, Snowflake Cortex, Databricks, etc.) using LangChain. It abstracts
22
23
  the initialization and connection logic for each provider, allowing you to switch between them
23
24
  with a consistent API.
24
25
  """
@@ -95,12 +96,14 @@ class LlmWrapper(LLM):
95
96
  **params,
96
97
  )
97
98
  elif is_llm_type(llm_type, LlmTypes.Snowflake):
98
- from langchain_community.chat_models import ChatSnowflakeCortex
99
+ from langchain_community.chat_models import ChatSnowflakeCortex
99
100
  llm_model = model or "openai-gpt-4.1"
100
101
  params = self._add_temperature(LlmTypes.Snowflake.name, llm_model, **llm_params)
102
+ snowflake_password = params.pop('snowflake_api_key', params.pop('snowflake_password', api_key))
101
103
 
102
104
  return ChatSnowflakeCortex(
103
105
  model=llm_model,
106
+ snowflake_password=snowflake_password,
104
107
  **params,
105
108
  )
106
109
  elif is_llm_type(llm_type, LlmTypes.AzureOpenAI):
@@ -116,6 +119,19 @@ class LlmWrapper(LLM):
116
119
  openai_api_version=azure_api_version,
117
120
  **params,
118
121
  )
122
+ elif is_llm_type(llm_type, LlmTypes.Databricks):
123
+ from databricks.sdk import WorkspaceClient
124
+ from databricks_langchain import ChatDatabricks
125
+ llm_model = model or "databricks-claude-sonnet-4"
126
+ params = self._add_temperature(LlmTypes.Databricks.name, llm_model, **llm_params)
127
+
128
+ host = params.pop('databricks_host', params.pop('host', None))
129
+ w = WorkspaceClient(host=host, token=api_key)
130
+ return ChatDatabricks(
131
+ endpoint=llm_model,
132
+ workspace_client=w, # Using authenticated client
133
+ **params,
134
+ )
119
135
  else:
120
136
  raise ValueError(f"Unsupported LLM type: {llm_type}")
121
137
 
@@ -163,12 +179,16 @@ class LlmWrapper(LLM):
163
179
  "llama3.1-70b",
164
180
  "llama3.1-405b"
165
181
  ]
182
+ elif is_llm_type(self._llm_type, LlmTypes.Databricks):
183
+ w = self.client.workspace_client
184
+ models = [ep.name for ep in w.serving_endpoints.list()]
185
+
166
186
  # elif self._is_llm_type(self._llm_type, LlmTypes.Timbr):
167
187
 
168
188
  except Exception as e:
169
189
  models = []
170
190
 
171
- return models
191
+ return sorted(models)
172
192
 
173
193
 
174
194
  def _call(self, prompt, **kwargs):
@@ -44,7 +44,8 @@ def is_llm_type(llm_type, enum_value):
44
44
  llm_type_lower == enum_name_lower or
45
45
  llm_type_lower == enum_value_lower or
46
46
  llm_type_lower.startswith(enum_name_lower) or # Usecase for snowflake which its type is the provider name + the model name
47
- llm_type_lower.startswith(enum_value_lower)
47
+ llm_type_lower.startswith(enum_value_lower) or
48
+ llm_type_lower in enum_value_lower # Check if the enum value includes the llm type - when providing partial name
48
49
  )
49
50
 
50
51
  return False
@@ -22,7 +22,7 @@ class PromptService:
22
22
  jwt_tenant_id: Optional[str] = default_jwt_tenant_id,
23
23
  timeout: Optional[int] = llm_timeout,
24
24
  ):
25
- self.base_url = base_url.rstrip('/')
25
+ self.base_url = base_url.rstrip('/') if base_url else ''
26
26
  self.token = token
27
27
  self.is_jwt = is_jwt
28
28
  self.jwt_tenant_id = jwt_tenant_id
@@ -2,61 +2,69 @@
2
2
  "OpenAI": [
3
3
  "gpt-4",
4
4
  "gpt-4-turbo",
5
- "gpt-4o"
5
+ "gpt-4o",
6
+ "gpt-5",
7
+ "gpt-5-chat-latest",
8
+ "gpt-5-mini"
6
9
  ],
7
10
  "Anthropic": [
8
- "claude-opus-4-20250514",
9
- "claude-sonnet-4-20250514",
10
- "claude-3-7-sonnet-20250219",
11
- "claude-3-5-sonnet-20241022",
12
11
  "claude-3-5-haiku-20241022",
13
12
  "claude-3-5-sonnet-20240620",
13
+ "claude-3-5-sonnet-20241022",
14
+ "claude-3-7-sonnet-20250219",
14
15
  "claude-3-haiku-20240307",
15
16
  "claude-3-opus-20240229",
16
- "claude-3-sonnet-20240229",
17
- "claude-2.1",
18
- "claude-2.0"
17
+ "claude-opus-4-20250514",
18
+ "claude-sonnet-4-20250514"
19
19
  ],
20
20
  "Google": [
21
- "gemini-1.5-flash-latest",
22
21
  "gemini-1.5-flash",
23
22
  "gemini-1.5-flash-002",
24
23
  "gemini-1.5-flash-8b",
25
24
  "gemini-1.5-flash-8b-001",
26
25
  "gemini-1.5-flash-8b-latest",
27
- "gemini-2.5-flash-preview-04-17",
28
- "gemini-2.5-flash-preview-05-20",
29
- "gemini-2.5-flash",
30
- "gemini-2.5-flash-preview-04-17-thinking",
31
- "gemini-2.5-flash-lite-preview-06-17",
32
- "gemini-2.5-pro",
33
- "gemini-2.0-flash-exp",
26
+ "gemini-1.5-flash-latest",
34
27
  "gemini-2.0-flash",
35
28
  "gemini-2.0-flash-001",
29
+ "gemini-2.0-flash-exp",
36
30
  "gemini-2.0-flash-exp-image-generation",
37
- "gemini-2.0-flash-lite-001",
38
31
  "gemini-2.0-flash-lite",
39
- "gemini-2.0-flash-lite-preview-02-05",
32
+ "gemini-2.0-flash-lite-001",
40
33
  "gemini-2.0-flash-lite-preview",
41
- "gemini-2.0-flash-thinking-exp-01-21",
34
+ "gemini-2.0-flash-lite-preview-02-05",
42
35
  "gemini-2.0-flash-thinking-exp",
36
+ "gemini-2.0-flash-thinking-exp-01-21",
43
37
  "gemini-2.0-flash-thinking-exp-1219",
44
- "learnlm-2.0-flash-experimental",
45
- "gemma-3-1b-it",
46
- "gemma-3-4b-it",
38
+ "gemini-2.5-flash",
39
+ "gemini-2.5-flash-lite",
40
+ "gemini-2.5-flash-lite-preview-06-17",
41
+ "gemini-2.5-flash-preview-05-20",
47
42
  "gemma-3-12b-it",
43
+ "gemma-3-1b-it",
48
44
  "gemma-3-27b-it",
49
- "gemma-3n-e4b-it",
50
- "gemma-3n-e2b-it"
45
+ "gemma-3-4b-it",
46
+ "gemma-3n-e2b-it",
47
+ "gemma-3n-e4b-it"
51
48
  ],
52
49
  "AzureOpenAI": [
53
50
  "gpt-4o"
54
51
  ],
55
52
  "Snowflake": [
56
- "openai-gpt-4.1",
57
- "mistral-large2",
53
+ "llama3.1-405b",
58
54
  "llama3.1-70b",
59
- "llama3.1-405b"
55
+ "mistral-large2",
56
+ "openai-gpt-4.1"
57
+ ],
58
+ "Databricks": [
59
+ "databricks-claude-3-7-sonnet",
60
+ "databricks-claude-sonnet-4",
61
+ "databricks-gemma-3-12b",
62
+ "databricks-gpt-oss-120b",
63
+ "databricks-gpt-oss-20b",
64
+ "databricks-llama-4-maverick",
65
+ "databricks-meta-llama-3-1-405b-instruct",
66
+ "databricks-meta-llama-3-1-8b-instruct",
67
+ "databricks-meta-llama-3-3-70b-instruct"
60
68
  ],
61
69
  "Timbr": []
62
70
  }
@@ -165,6 +165,40 @@ def _calculate_token_count(llm: LLM, prompt: str) -> int:
165
165
  return token_count
166
166
 
167
167
 
168
+ def _get_response_text(response: Any) -> str:
169
+ if hasattr(response, "content"):
170
+ response_text = response.content
171
+
172
+ # Handle Databricks gpt-oss type of responses (having list of dicts with type + summary for reasoning or type + text for result)
173
+ if isinstance(response_text, list):
174
+ response_text = next(filter(lambda x: x.get('type') == 'text', response.content), None)
175
+ if isinstance(response_text, dict):
176
+ response_text = response_text.get('text', '')
177
+ elif isinstance(response, str):
178
+ response_text = response
179
+ else:
180
+ raise ValueError("Unexpected response format from LLM.")
181
+
182
+ return response_text
183
+
184
+ def _extract_usage_metadata(response: Any) -> dict:
185
+ usage_metadata = response.response_metadata
186
+
187
+ if usage_metadata and 'usage' in usage_metadata:
188
+ usage_metadata = usage_metadata['usage']
189
+
190
+ if not usage_metadata and 'usage_metadata' in response:
191
+ usage_metadata = response.usage_metadata
192
+ if usage_metadata and 'usage' in usage_metadata:
193
+ usage_metadata = usage_metadata['usage']
194
+
195
+ if not usage_metadata and 'usage' in response:
196
+ usage_metadata = response.usage
197
+ if usage_metadata and 'usage' in usage_metadata:
198
+ usage_metadata = usage_metadata['usage']
199
+
200
+ return usage_metadata
201
+
168
202
  def determine_concept(
169
203
  question: str,
170
204
  llm: LLM,
@@ -253,20 +287,12 @@ def determine_concept(
253
287
  continue
254
288
  usage_metadata['determine_concept'] = {
255
289
  "approximate": apx_token_count,
256
- # **(response.usage_metadata or response.usage or {}),
257
- **(response.usage_metadata or {}),
290
+ **_extract_usage_metadata(response),
258
291
  }
259
292
  if debug:
260
293
  usage_metadata['determine_concept']["p_hash"] = encrypt_prompt(prompt)
261
294
 
262
- if hasattr(response, "content"):
263
- response_text = response.content
264
- elif isinstance(response, str):
265
- response_text = response
266
- else:
267
- raise ValueError("Unexpected response format from LLM.")
268
-
269
-
295
+ response_text = _get_response_text(response)
270
296
  candidate = response_text.strip()
271
297
  if should_validate and candidate not in concepts.keys():
272
298
  error = f"Concept '{determined_concept_name}' not found in the list of concepts."
@@ -351,13 +377,7 @@ def _build_rel_columns_str(relationships: list[dict], columns_tags: Optional[dic
351
377
 
352
378
 
353
379
  def _parse_sql_from_llm_response(response: Any) -> str:
354
- if hasattr(response, "content"):
355
- response_text = response.content
356
- elif isinstance(response, str):
357
- response_text = response
358
- else:
359
- raise ValueError("Unexpected response format from LLM.")
360
-
380
+ response_text = _get_response_text(response)
361
381
  return (response_text
362
382
  .replace("```sql", "")
363
383
  .replace("```", "")
@@ -497,8 +517,7 @@ def generate_sql(
497
517
 
498
518
  usage_metadata['generate_sql'] = {
499
519
  "approximate": apx_token_count,
500
- # **(response.usage_metadata or response.usage or {}),
501
- **(response.usage_metadata or {}),
520
+ **_extract_usage_metadata(response),
502
521
  }
503
522
  if debug:
504
523
  usage_metadata['generate_sql']["p_hash"] = encrypt_prompt(prompt)
@@ -561,8 +580,7 @@ def answer_question(
561
580
  usage_metadata = {
562
581
  "answer_question": {
563
582
  "approximate": apx_token_count,
564
- # **(response.usage_metadata or response.usage or {}),
565
- **(response.usage_metadata or {}),
583
+ **_extract_usage_metadata(response),
566
584
  },
567
585
  }
568
586
  if debug:
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.4
2
2
  Name: langchain-timbr
3
- Version: 1.5.2
3
+ Version: 1.5.3
4
4
  Summary: LangChain & LangGraph extensions that parse LLM prompts into Timbr semantic SQL and execute them.
5
5
  Project-URL: Homepage, https://github.com/WPSemantix/langchain-timbr
6
6
  Project-URL: Documentation, https://docs.timbr.ai/doc/docs/integration/langchain-sdk/
@@ -28,15 +28,18 @@ Requires-Dist: langgraph>=0.3.20
28
28
  Requires-Dist: pydantic==2.10.4
29
29
  Requires-Dist: pytimbr-api>=2.0.0
30
30
  Requires-Dist: tiktoken==0.8.0
31
- Requires-Dist: transformers>=4.51.3
31
+ Requires-Dist: transformers>=4.53
32
32
  Provides-Extra: all
33
33
  Requires-Dist: anthropic==0.42.0; extra == 'all'
34
+ Requires-Dist: databricks-langchain==0.3.0; (python_version < '3.10') and extra == 'all'
35
+ Requires-Dist: databricks-langchain==0.7.1; (python_version >= '3.10') and extra == 'all'
36
+ Requires-Dist: databricks-sdk==0.64.0; extra == 'all'
34
37
  Requires-Dist: google-generativeai==0.8.4; extra == 'all'
35
38
  Requires-Dist: langchain-anthropic>=0.3.1; extra == 'all'
36
39
  Requires-Dist: langchain-google-genai>=2.0.9; extra == 'all'
37
40
  Requires-Dist: langchain-openai>=0.3.16; extra == 'all'
38
41
  Requires-Dist: langchain-tests>=0.3.20; extra == 'all'
39
- Requires-Dist: openai==1.77.0; extra == 'all'
42
+ Requires-Dist: openai>=1.77.0; extra == 'all'
40
43
  Requires-Dist: pyarrow<19.0.0; extra == 'all'
41
44
  Requires-Dist: pytest==8.3.4; extra == 'all'
42
45
  Requires-Dist: snowflake-snowpark-python>=1.6.0; extra == 'all'
@@ -45,6 +48,13 @@ Requires-Dist: uvicorn==0.34.0; extra == 'all'
45
48
  Provides-Extra: anthropic
46
49
  Requires-Dist: anthropic==0.42.0; extra == 'anthropic'
47
50
  Requires-Dist: langchain-anthropic>=0.3.1; extra == 'anthropic'
51
+ Provides-Extra: azure-openai
52
+ Requires-Dist: langchain-openai>=0.3.16; extra == 'azure-openai'
53
+ Requires-Dist: openai>=1.77.0; extra == 'azure-openai'
54
+ Provides-Extra: databricks
55
+ Requires-Dist: databricks-langchain==0.3.0; (python_version < '3.10') and extra == 'databricks'
56
+ Requires-Dist: databricks-langchain==0.7.1; (python_version >= '3.10') and extra == 'databricks'
57
+ Requires-Dist: databricks-sdk==0.64.0; extra == 'databricks'
48
58
  Provides-Extra: dev
49
59
  Requires-Dist: langchain-tests>=0.3.20; extra == 'dev'
50
60
  Requires-Dist: pyarrow<19.0.0; extra == 'dev'
@@ -55,7 +65,7 @@ Requires-Dist: google-generativeai==0.8.4; extra == 'google'
55
65
  Requires-Dist: langchain-google-genai>=2.0.9; extra == 'google'
56
66
  Provides-Extra: openai
57
67
  Requires-Dist: langchain-openai>=0.3.16; extra == 'openai'
58
- Requires-Dist: openai==1.77.0; extra == 'openai'
68
+ Requires-Dist: openai>=1.77.0; extra == 'openai'
59
69
  Provides-Extra: snowflake
60
70
  Requires-Dist: snowflake-snowpark-python>=1.6.0; extra == 'snowflake'
61
71
  Requires-Dist: snowflake>=0.8.0; extra == 'snowflake'
@@ -80,15 +90,23 @@ Timbr LangChain LLM SDK is a Python SDK that extends LangChain and LangGraph wit
80
90
 
81
91
  ## Dependencies
82
92
  - Access to a timbr-server
83
- - Python from 3.9.13 or newer
93
+ - Python 3.9.13 or newer
84
94
 
85
95
  ## Installation
86
96
 
87
97
  ### Using pip
98
+
88
99
  ```bash
89
100
  python -m pip install langchain-timbr
90
101
  ```
91
102
 
103
+ ### Install with selected LLM providers
104
+ #### One of: openai, anthropic, google, azure_openai, snowflake, databricks (or 'all')
105
+
106
+ ```bash
107
+ python -m pip install 'langchain-timbr[<your selected providers, separated by comma w/o space]'
108
+ ```
109
+
92
110
  ### Using pip from github
93
111
  ```bash
94
112
  pip install git+https://github.com/WPSemantix/langchain-timbr
@@ -1,5 +1,5 @@
1
1
  langchain_timbr/__init__.py,sha256=gxd6Y6QDmYZtPlYVdXtPIy501hMOZXHjWh2qq4qzt_s,828
2
- langchain_timbr/_version.py,sha256=fUJ-NqWYnyaK1C9rKEno8BrEixvc59sizL4S-uoRUBE,704
2
+ langchain_timbr/_version.py,sha256=YmPxci9z5OdhwvwXu2qOrZdFU4K4N2lTtMkI0KAJCh0,704
3
3
  langchain_timbr/config.py,sha256=NOMjSpo0TVkWT8BdbiGSADU08iknF2bRltFLwQRhpwk,832
4
4
  langchain_timbr/timbr_llm_connector.py,sha256=OXRttlEOJf-dTyilnXR6b6Cgl_cWDYrXGXQfmDV6vc8,13206
5
5
  langchain_timbr/langchain/__init__.py,sha256=ejcsZKP9PK0j4WrrCCcvBXpDpP-TeRiVb21OIUJqix8,580
@@ -15,14 +15,14 @@ langchain_timbr/langgraph/generate_response_node.py,sha256=gChNFSPjK9lKwblgWTia6
15
15
  langchain_timbr/langgraph/generate_timbr_sql_node.py,sha256=qyL7uqB5k-Bv8rE12f2Ub7wlcAw-pQibEPP1SvFKLu0,4638
16
16
  langchain_timbr/langgraph/identify_concept_node.py,sha256=ot9TFdRg8FA9JYVrtHLVi5k0vmUHUfL4ptQDFYYqOoA,3376
17
17
  langchain_timbr/langgraph/validate_timbr_query_node.py,sha256=TypUs60OaBhOx9Ceq-15qNVuuAvfrFBjQsPRjWK1StQ,4469
18
- langchain_timbr/llm_wrapper/llm_wrapper.py,sha256=sNMEqhtZx4S0ZKJCyg8OSE3fAWu1xI6Bp_GoRs7k4dI,6801
18
+ langchain_timbr/llm_wrapper/llm_wrapper.py,sha256=qd6MHbmJsU02no7bAReec3Ps4oYc_m0WPrDFqRY2Dlc,7735
19
19
  langchain_timbr/llm_wrapper/timbr_llm_wrapper.py,sha256=sDqDOz0qu8b4WWlagjNceswMVyvEJ8yBWZq2etBh-T0,1362
20
- langchain_timbr/utils/general.py,sha256=753GNpYiyxhfYq59Bi8qvCyuHmTrD1fobcm6U2jZAF4,2394
21
- langchain_timbr/utils/prompt_service.py,sha256=pJcBz3MKR51ajdU9gkif1r9_K7FxYbpWBiTkKA0A2q0,11144
22
- langchain_timbr/utils/temperature_supported_models.json,sha256=e8j9O-68eCJhEK_NWowh3C6FE7UXFbR9icjDQfJBkdM,1596
23
- langchain_timbr/utils/timbr_llm_utils.py,sha256=Gpp3nKG1MiwNBpl2Uua3pmKyxd1OEirRLW0kkxI473E,22462
20
+ langchain_timbr/utils/general.py,sha256=Psb9F9ylI0z-1Ddw0Hi74nKl03_aLanIV9YC-MJUdsw,2522
21
+ langchain_timbr/utils/prompt_service.py,sha256=f-L2w-wRCF9GEMLC7GZy_gZ0pcnEQj0em-lDJvg2onY,11164
22
+ langchain_timbr/utils/temperature_supported_models.json,sha256=d3UmBUpG38zDjjB42IoGpHTUaf0pHMBRSPY99ao1a3g,1832
23
+ langchain_timbr/utils/timbr_llm_utils.py,sha256=Mn9Q9Wj97WuOZO3xKxZLjTQx31H22EACAPppz3B0JlE,23202
24
24
  langchain_timbr/utils/timbr_utils.py,sha256=p21DwTGhF4iKTLDQBkeBaJDFcXt-Hpu1ij8xzQt00Ng,16958
25
- langchain_timbr-1.5.2.dist-info/METADATA,sha256=oieqooi9i5wlYlZ3YXJYzZS0VqSljnao7JlqJTSylwM,5235
26
- langchain_timbr-1.5.2.dist-info/WHEEL,sha256=qtCwoSJWgHk21S1Kb4ihdzI2rlJ1ZKaIurTj_ngOhyQ,87
27
- langchain_timbr-1.5.2.dist-info/licenses/LICENSE,sha256=0ITGFk2alkC7-e--bRGtuzDrv62USIiVyV2Crf3_L_0,1065
28
- langchain_timbr-1.5.2.dist-info/RECORD,,
25
+ langchain_timbr-1.5.3.dist-info/METADATA,sha256=jt8vCwK1a1hwjgTPxG2DzhNF0KO0CkqY-3kPTftejTc,6129
26
+ langchain_timbr-1.5.3.dist-info/WHEEL,sha256=qtCwoSJWgHk21S1Kb4ihdzI2rlJ1ZKaIurTj_ngOhyQ,87
27
+ langchain_timbr-1.5.3.dist-info/licenses/LICENSE,sha256=0ITGFk2alkC7-e--bRGtuzDrv62USIiVyV2Crf3_L_0,1065
28
+ langchain_timbr-1.5.3.dist-info/RECORD,,