langchain-timbr 2.1.12__py3-none-any.whl → 2.1.13__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -28,7 +28,7 @@ version_tuple: VERSION_TUPLE
28
28
  commit_id: COMMIT_ID
29
29
  __commit_id__: COMMIT_ID
30
30
 
31
- __version__ = version = '2.1.12'
32
- __version_tuple__ = version_tuple = (2, 1, 12)
31
+ __version__ = version = '2.1.13'
32
+ __version_tuple__ = version_tuple = (2, 1, 13)
33
33
 
34
34
  __commit_id__ = commit_id = None
@@ -102,7 +102,7 @@ class ExecuteSemanticQueryNode:
102
102
 
103
103
  def run(self, state: StateGraph) -> dict:
104
104
  try:
105
- prompt = state.messages[-1].content if state.messages[-1] else None
105
+ prompt = state.messages[-1].get('content') if state.messages[-1] and 'content' in state.messages[-1] else None
106
106
  except Exception:
107
107
  prompt = state.get('prompt', None)
108
108
 
@@ -94,7 +94,7 @@ class GenerateTimbrSqlNode:
94
94
 
95
95
  def run(self, state: StateGraph) -> dict:
96
96
  try:
97
- prompt = state.messages[-1].content if (state.messages and state.messages[-1]) else None
97
+ prompt = state.messages[-1].get('content') if state.messages[-1] and 'content' in state.messages[-1] else None
98
98
  except Exception:
99
99
  prompt = state.get('prompt', None)
100
100
 
@@ -66,7 +66,7 @@ class IdentifyConceptNode:
66
66
 
67
67
  def run(self, state: StateGraph) -> dict:
68
68
  try:
69
- prompt = state.messages[-1].content if state.messages[-1] else None
69
+ prompt = state.messages[-1].get('content') if state.messages[-1] and 'content' in state.messages[-1] else None
70
70
  except Exception:
71
71
  prompt = state.get('prompt', None)
72
72
 
@@ -763,42 +763,67 @@ def generate_sql(
763
763
  if reasoning_status == "correct":
764
764
  break
765
765
 
766
- # Step 2: Regenerate SQL with feedback
766
+ # Step 2: Regenerate SQL with feedback (with validation retries)
767
767
  evaluation_note = note + f"\n\nThe previously generated SQL: `{sql_query}` was assessed as '{evaluation.get('assessment')}' because: {evaluation.get('reasoning', '*could not determine cause*')}. Please provide a corrected SQL query that better answers the question: '{question}'."
768
768
 
769
769
  # Increase graph depth for 2nd+ reasoning attempts, up to max of 3
770
770
  context_graph_depth = min(3, int(graph_depth) + step) if graph_depth < 3 and step > 0 else graph_depth
771
- regen_result = _generate_sql_with_llm(
772
- question=question,
773
- llm=llm,
774
- conn_params=conn_params,
775
- generate_sql_prompt=generate_sql_prompt,
776
- current_context=_build_sql_generation_context(
777
- conn_params=conn_params,
778
- schema=schema,
779
- concept=concept,
780
- concept_metadata=concept_metadata,
781
- graph_depth=context_graph_depth,
782
- include_tags=include_tags,
783
- exclude_properties=exclude_properties,
784
- db_is_case_sensitive=db_is_case_sensitive,
785
- max_limit=max_limit),
786
- note=evaluation_note,
787
- should_validate_sql=should_validate_sql,
788
- timeout=timeout,
789
- debug=debug,
790
- )
791
771
 
792
- usage_metadata[f'generate_sql_reasoning_step_{step + 1}'] = {
793
- "approximate": regen_result['apx_token_count'],
794
- **regen_result['usage_metadata'],
795
- }
796
- if debug and 'p_hash' in regen_result:
797
- usage_metadata[f'generate_sql_reasoning_step_{step + 1}']['p_hash'] = regen_result['p_hash']
772
+ # Regenerate SQL with validation retries
773
+ # Always validate during reasoning to ensure quality, regardless of global should_validate_sql flag
774
+ validation_iteration = 0
775
+ regen_is_valid = False
776
+ regen_error = ''
777
+ regen_sql = None
778
+
779
+ while validation_iteration < retries and (regen_sql is None or not regen_is_valid):
780
+ validation_iteration += 1
781
+ validation_err_txt = f"\nThe regenerated SQL (`{regen_sql}`) was invalid with error: {regen_error}. Please generate a corrected query." if regen_error and "snowflake" not in llm._llm_type else ""
782
+
783
+ regen_result = _generate_sql_with_llm(
784
+ question=question,
785
+ llm=llm,
786
+ conn_params=conn_params,
787
+ generate_sql_prompt=generate_sql_prompt,
788
+ current_context=_build_sql_generation_context(
789
+ conn_params=conn_params,
790
+ schema=schema,
791
+ concept=concept,
792
+ concept_metadata=concept_metadata,
793
+ graph_depth=context_graph_depth,
794
+ include_tags=include_tags,
795
+ exclude_properties=exclude_properties,
796
+ db_is_case_sensitive=db_is_case_sensitive,
797
+ max_limit=max_limit),
798
+ note=evaluation_note + validation_err_txt,
799
+ should_validate_sql=True, # Always validate during reasoning
800
+ timeout=timeout,
801
+ debug=debug,
802
+ )
803
+
804
+ regen_sql = regen_result['sql']
805
+ regen_is_valid = regen_result['is_valid']
806
+ regen_error = regen_result['error']
807
+
808
+ # Track token usage for each validation iteration
809
+ if validation_iteration == 1:
810
+ usage_metadata[f'generate_sql_reasoning_step_{step + 1}'] = {
811
+ "approximate": regen_result['apx_token_count'],
812
+ **regen_result['usage_metadata'],
813
+ }
814
+ if debug and 'p_hash' in regen_result:
815
+ usage_metadata[f'generate_sql_reasoning_step_{step + 1}']['p_hash'] = regen_result['p_hash']
816
+ else:
817
+ usage_metadata[f'generate_sql_reasoning_step_{step + 1}_validation_{validation_iteration}'] = {
818
+ "approximate": regen_result['apx_token_count'],
819
+ **regen_result['usage_metadata'],
820
+ }
821
+ if debug and 'p_hash' in regen_result:
822
+ usage_metadata[f'generate_sql_reasoning_step_{step + 1}_validation_{validation_iteration}']['p_hash'] = regen_result['p_hash']
798
823
 
799
- sql_query = regen_result['sql']
800
- is_sql_valid = regen_result['is_valid']
801
- error = regen_result['error']
824
+ sql_query = regen_sql
825
+ is_sql_valid = regen_is_valid
826
+ error = regen_error
802
827
 
803
828
  except TimeoutError as e:
804
829
  raise Exception(f"LLM call timed out: {str(e)}")
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.4
2
2
  Name: langchain-timbr
3
- Version: 2.1.12
3
+ Version: 2.1.13
4
4
  Summary: LangChain & LangGraph extensions that parse LLM prompts into Timbr semantic SQL and execute them.
5
5
  Project-URL: Homepage, https://github.com/WPSemantix/langchain-timbr
6
6
  Project-URL: Documentation, https://docs.timbr.ai/doc/docs/integration/langchain-sdk/
@@ -1,5 +1,5 @@
1
1
  langchain_timbr/__init__.py,sha256=qNyk3Rt-8oWr_OGuU_E-6siNZXuCnvVEkj65EIuVbbQ,824
2
- langchain_timbr/_version.py,sha256=1lj0AemMGT5453uaWZkkL9tBMOyKax93TB9JdLpfKiI,706
2
+ langchain_timbr/_version.py,sha256=OCsbmwBvjsO5FZ5lUXj9InhdI3ucoauO2iZuo5VK5F8,706
3
3
  langchain_timbr/config.py,sha256=b-mLIrswgSez282ItJntb5mCa8poOe7kqXG1W_jGxvw,1971
4
4
  langchain_timbr/timbr_llm_connector.py,sha256=Y3nzWoocI5txvPGPAxwFsJde9k9l2J9ioB54MYRLrEQ,13288
5
5
  langchain_timbr/langchain/__init__.py,sha256=ejcsZKP9PK0j4WrrCCcvBXpDpP-TeRiVb21OIUJqix8,580
@@ -10,19 +10,19 @@ langchain_timbr/langchain/identify_concept_chain.py,sha256=kuzg0jJQpFGIiaxtNhdQ5
10
10
  langchain_timbr/langchain/timbr_sql_agent.py,sha256=AaBNJz3qKwJZVd-mvEmlVp6REE8QEEwlOvtkkjdBxxc,20938
11
11
  langchain_timbr/langchain/validate_timbr_sql_chain.py,sha256=ndFWSdb_LNI-BQg254hfTlzcZXOuM1opHHLdC0SALDo,10317
12
12
  langchain_timbr/langgraph/__init__.py,sha256=mKBFd0x01jWpRujUWe-suX3FFhenPoDxrvzs8I0mum0,457
13
- langchain_timbr/langgraph/execute_timbr_query_node.py,sha256=FUsDHQAEMKYETi3lxNuF_PU8yvq9z5PYPyRFsHjLPbs,6057
13
+ langchain_timbr/langgraph/execute_timbr_query_node.py,sha256=CYFN2VW7eKdgAylX0fktJQFEEHu8RgA8n1q32QwMFtI,6100
14
14
  langchain_timbr/langgraph/generate_response_node.py,sha256=opwscNEXabaSyCFLbzGQFkDFEymJurhNU9aAtm1rnOk,2375
15
- langchain_timbr/langgraph/generate_timbr_sql_node.py,sha256=tLO0tosmMxpNq1EBBjma4QVw0mNzYN_6bY8uG6QG3uo,5412
16
- langchain_timbr/langgraph/identify_concept_node.py,sha256=aiLDFEcz_vM4zZ_ULe1SvJKmI-e4Fb2SibZQaEPz_eY,3649
15
+ langchain_timbr/langgraph/generate_timbr_sql_node.py,sha256=hyrnUwgQTKLV5bw-NmPgNYGZXGh-pgVeBg5gbGaJKb0,5434
16
+ langchain_timbr/langgraph/identify_concept_node.py,sha256=zskaVAeY473j_hBbjULoAi1cV0D4hfKVapklBmmukBM,3692
17
17
  langchain_timbr/langgraph/validate_timbr_query_node.py,sha256=hPDa0fXgChxpIXSrZ3M70PFtcIkZ4HMdePMEc6W3nxw,5272
18
18
  langchain_timbr/llm_wrapper/llm_wrapper.py,sha256=j94DqIGECXyfAVayLC7VaNxs_8n1qYFiHY2Qvt2B3Bc,17537
19
19
  langchain_timbr/llm_wrapper/timbr_llm_wrapper.py,sha256=sDqDOz0qu8b4WWlagjNceswMVyvEJ8yBWZq2etBh-T0,1362
20
20
  langchain_timbr/utils/general.py,sha256=KkehHvIj8GoQ_0KVXLcUVeaYaTtkuzgXmYYx2TXJhI4,10253
21
21
  langchain_timbr/utils/prompt_service.py,sha256=QVmfA9cHO2IPVsKG8V5cuMm2gPfvRq2VzLcx04sqT88,12197
22
22
  langchain_timbr/utils/temperature_supported_models.json,sha256=d3UmBUpG38zDjjB42IoGpHTUaf0pHMBRSPY99ao1a3g,1832
23
- langchain_timbr/utils/timbr_llm_utils.py,sha256=ggz-9SpxFNxBO3bwSCNdNeyjZJYK-vCIufBvRZAgyMM,34418
23
+ langchain_timbr/utils/timbr_llm_utils.py,sha256=0Fn8CHcs7hydvCRqXEwU2gvKvlnqufq6hOIzPcVSADw,36163
24
24
  langchain_timbr/utils/timbr_utils.py,sha256=rhnGQndTIm4Hb5R_vAuJydnjYd88nbzm1I0yp5L8oEQ,19268
25
- langchain_timbr-2.1.12.dist-info/METADATA,sha256=pk7feupa72SCwWmHyca4u9J_fnASPxnCHb8jphZBWfw,10768
26
- langchain_timbr-2.1.12.dist-info/WHEEL,sha256=WLgqFyCfm_KASv4WHyYy0P3pM_m7J5L9k2skdKLirC8,87
27
- langchain_timbr-2.1.12.dist-info/licenses/LICENSE,sha256=0ITGFk2alkC7-e--bRGtuzDrv62USIiVyV2Crf3_L_0,1065
28
- langchain_timbr-2.1.12.dist-info/RECORD,,
25
+ langchain_timbr-2.1.13.dist-info/METADATA,sha256=NXba1vBmwR3pbjYP5DDMHdAoI_f7pegsdkteVCVBC1A,10768
26
+ langchain_timbr-2.1.13.dist-info/WHEEL,sha256=WLgqFyCfm_KASv4WHyYy0P3pM_m7J5L9k2skdKLirC8,87
27
+ langchain_timbr-2.1.13.dist-info/licenses/LICENSE,sha256=0ITGFk2alkC7-e--bRGtuzDrv62USIiVyV2Crf3_L_0,1065
28
+ langchain_timbr-2.1.13.dist-info/RECORD,,