langchain-timbr 2.1.12__tar.gz → 2.1.13__tar.gz

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (58) hide show
  1. {langchain_timbr-2.1.12 → langchain_timbr-2.1.13}/.github/workflows/install-dependencies-and-run-tests.yml +1 -0
  2. {langchain_timbr-2.1.12 → langchain_timbr-2.1.13}/PKG-INFO +1 -1
  3. {langchain_timbr-2.1.12 → langchain_timbr-2.1.13}/src/langchain_timbr/_version.py +2 -2
  4. {langchain_timbr-2.1.12 → langchain_timbr-2.1.13}/src/langchain_timbr/langgraph/execute_timbr_query_node.py +1 -1
  5. {langchain_timbr-2.1.12 → langchain_timbr-2.1.13}/src/langchain_timbr/langgraph/generate_timbr_sql_node.py +1 -1
  6. {langchain_timbr-2.1.12 → langchain_timbr-2.1.13}/src/langchain_timbr/langgraph/identify_concept_node.py +1 -1
  7. {langchain_timbr-2.1.12 → langchain_timbr-2.1.13}/src/langchain_timbr/utils/timbr_llm_utils.py +55 -30
  8. {langchain_timbr-2.1.12 → langchain_timbr-2.1.13}/tests/integration/test_langgraph_nodes.py +22 -1
  9. {langchain_timbr-2.1.12 → langchain_timbr-2.1.13}/.github/dependabot.yml +0 -0
  10. {langchain_timbr-2.1.12 → langchain_timbr-2.1.13}/.github/pull_request_template.md +0 -0
  11. {langchain_timbr-2.1.12 → langchain_timbr-2.1.13}/.github/workflows/_codespell.yml +0 -0
  12. {langchain_timbr-2.1.12 → langchain_timbr-2.1.13}/.github/workflows/_fossa.yml +0 -0
  13. {langchain_timbr-2.1.12 → langchain_timbr-2.1.13}/.github/workflows/publish.yml +0 -0
  14. {langchain_timbr-2.1.12 → langchain_timbr-2.1.13}/.gitignore +0 -0
  15. {langchain_timbr-2.1.12 → langchain_timbr-2.1.13}/LICENSE +0 -0
  16. {langchain_timbr-2.1.12 → langchain_timbr-2.1.13}/README.md +0 -0
  17. {langchain_timbr-2.1.12 → langchain_timbr-2.1.13}/SECURITY.md +0 -0
  18. {langchain_timbr-2.1.12 → langchain_timbr-2.1.13}/pyproject.toml +0 -0
  19. {langchain_timbr-2.1.12 → langchain_timbr-2.1.13}/pytest.ini +0 -0
  20. {langchain_timbr-2.1.12 → langchain_timbr-2.1.13}/requirements.txt +0 -0
  21. {langchain_timbr-2.1.12 → langchain_timbr-2.1.13}/requirements310.txt +0 -0
  22. {langchain_timbr-2.1.12 → langchain_timbr-2.1.13}/requirements311.txt +0 -0
  23. {langchain_timbr-2.1.12 → langchain_timbr-2.1.13}/src/langchain_timbr/__init__.py +0 -0
  24. {langchain_timbr-2.1.12 → langchain_timbr-2.1.13}/src/langchain_timbr/config.py +0 -0
  25. {langchain_timbr-2.1.12 → langchain_timbr-2.1.13}/src/langchain_timbr/langchain/__init__.py +0 -0
  26. {langchain_timbr-2.1.12 → langchain_timbr-2.1.13}/src/langchain_timbr/langchain/execute_timbr_query_chain.py +0 -0
  27. {langchain_timbr-2.1.12 → langchain_timbr-2.1.13}/src/langchain_timbr/langchain/generate_answer_chain.py +0 -0
  28. {langchain_timbr-2.1.12 → langchain_timbr-2.1.13}/src/langchain_timbr/langchain/generate_timbr_sql_chain.py +0 -0
  29. {langchain_timbr-2.1.12 → langchain_timbr-2.1.13}/src/langchain_timbr/langchain/identify_concept_chain.py +0 -0
  30. {langchain_timbr-2.1.12 → langchain_timbr-2.1.13}/src/langchain_timbr/langchain/timbr_sql_agent.py +0 -0
  31. {langchain_timbr-2.1.12 → langchain_timbr-2.1.13}/src/langchain_timbr/langchain/validate_timbr_sql_chain.py +0 -0
  32. {langchain_timbr-2.1.12 → langchain_timbr-2.1.13}/src/langchain_timbr/langgraph/__init__.py +0 -0
  33. {langchain_timbr-2.1.12 → langchain_timbr-2.1.13}/src/langchain_timbr/langgraph/generate_response_node.py +0 -0
  34. {langchain_timbr-2.1.12 → langchain_timbr-2.1.13}/src/langchain_timbr/langgraph/validate_timbr_query_node.py +0 -0
  35. {langchain_timbr-2.1.12 → langchain_timbr-2.1.13}/src/langchain_timbr/llm_wrapper/llm_wrapper.py +0 -0
  36. {langchain_timbr-2.1.12 → langchain_timbr-2.1.13}/src/langchain_timbr/llm_wrapper/timbr_llm_wrapper.py +0 -0
  37. {langchain_timbr-2.1.12 → langchain_timbr-2.1.13}/src/langchain_timbr/timbr_llm_connector.py +0 -0
  38. {langchain_timbr-2.1.12 → langchain_timbr-2.1.13}/src/langchain_timbr/utils/general.py +0 -0
  39. {langchain_timbr-2.1.12 → langchain_timbr-2.1.13}/src/langchain_timbr/utils/prompt_service.py +0 -0
  40. {langchain_timbr-2.1.12 → langchain_timbr-2.1.13}/src/langchain_timbr/utils/temperature_supported_models.json +0 -0
  41. {langchain_timbr-2.1.12 → langchain_timbr-2.1.13}/src/langchain_timbr/utils/timbr_utils.py +0 -0
  42. {langchain_timbr-2.1.12 → langchain_timbr-2.1.13}/tests/README.md +0 -0
  43. {langchain_timbr-2.1.12 → langchain_timbr-2.1.13}/tests/conftest.py +0 -0
  44. {langchain_timbr-2.1.12 → langchain_timbr-2.1.13}/tests/integration/test_agent_integration.py +0 -0
  45. {langchain_timbr-2.1.12 → langchain_timbr-2.1.13}/tests/integration/test_azure_databricks_provider.py +0 -0
  46. {langchain_timbr-2.1.12 → langchain_timbr-2.1.13}/tests/integration/test_azure_openai_model.py +0 -0
  47. {langchain_timbr-2.1.12 → langchain_timbr-2.1.13}/tests/integration/test_chain_pipeline.py +0 -0
  48. {langchain_timbr-2.1.12 → langchain_timbr-2.1.13}/tests/integration/test_chain_reasoning.py +0 -0
  49. {langchain_timbr-2.1.12 → langchain_timbr-2.1.13}/tests/integration/test_jwt_token.py +0 -0
  50. {langchain_timbr-2.1.12 → langchain_timbr-2.1.13}/tests/integration/test_langchain_chains.py +0 -0
  51. {langchain_timbr-2.1.12 → langchain_timbr-2.1.13}/tests/integration/test_timeout_functionality.py +0 -0
  52. {langchain_timbr-2.1.12 → langchain_timbr-2.1.13}/tests/standard/conftest.py +0 -0
  53. {langchain_timbr-2.1.12 → langchain_timbr-2.1.13}/tests/standard/test_chain_documentation.py +0 -0
  54. {langchain_timbr-2.1.12 → langchain_timbr-2.1.13}/tests/standard/test_connection_validation.py +0 -0
  55. {langchain_timbr-2.1.12 → langchain_timbr-2.1.13}/tests/standard/test_llm_wrapper_optional_params.py +0 -0
  56. {langchain_timbr-2.1.12 → langchain_timbr-2.1.13}/tests/standard/test_optional_llm_integration.py +0 -0
  57. {langchain_timbr-2.1.12 → langchain_timbr-2.1.13}/tests/standard/test_standard_chain_requirements.py +0 -0
  58. {langchain_timbr-2.1.12 → langchain_timbr-2.1.13}/tests/standard/test_unit_tests.py +0 -0
@@ -22,6 +22,7 @@ env:
22
22
  JWT_PASSWORD: ${{ secrets.JWT_PASSWORD }}
23
23
  JWT_SCOPE: ${{ secrets.JWT_SCOPE }}
24
24
  JWT_SECRET: ${{ secrets.JWT_SECRET }}
25
+ LLM_TIMEOUT: ${{ secrets.LLM_TIMEOUT }}
25
26
 
26
27
  jobs:
27
28
  test-python-310:
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.4
2
2
  Name: langchain-timbr
3
- Version: 2.1.12
3
+ Version: 2.1.13
4
4
  Summary: LangChain & LangGraph extensions that parse LLM prompts into Timbr semantic SQL and execute them.
5
5
  Project-URL: Homepage, https://github.com/WPSemantix/langchain-timbr
6
6
  Project-URL: Documentation, https://docs.timbr.ai/doc/docs/integration/langchain-sdk/
@@ -28,7 +28,7 @@ version_tuple: VERSION_TUPLE
28
28
  commit_id: COMMIT_ID
29
29
  __commit_id__: COMMIT_ID
30
30
 
31
- __version__ = version = '2.1.12'
32
- __version_tuple__ = version_tuple = (2, 1, 12)
31
+ __version__ = version = '2.1.13'
32
+ __version_tuple__ = version_tuple = (2, 1, 13)
33
33
 
34
34
  __commit_id__ = commit_id = None
@@ -102,7 +102,7 @@ class ExecuteSemanticQueryNode:
102
102
 
103
103
  def run(self, state: StateGraph) -> dict:
104
104
  try:
105
- prompt = state.messages[-1].content if state.messages[-1] else None
105
+ prompt = state.messages[-1].get('content') if state.messages[-1] and 'content' in state.messages[-1] else None
106
106
  except Exception:
107
107
  prompt = state.get('prompt', None)
108
108
 
@@ -94,7 +94,7 @@ class GenerateTimbrSqlNode:
94
94
 
95
95
  def run(self, state: StateGraph) -> dict:
96
96
  try:
97
- prompt = state.messages[-1].content if (state.messages and state.messages[-1]) else None
97
+ prompt = state.messages[-1].get('content') if state.messages[-1] and 'content' in state.messages[-1] else None
98
98
  except Exception:
99
99
  prompt = state.get('prompt', None)
100
100
 
@@ -66,7 +66,7 @@ class IdentifyConceptNode:
66
66
 
67
67
  def run(self, state: StateGraph) -> dict:
68
68
  try:
69
- prompt = state.messages[-1].content if state.messages[-1] else None
69
+ prompt = state.messages[-1].get('content') if state.messages[-1] and 'content' in state.messages[-1] else None
70
70
  except Exception:
71
71
  prompt = state.get('prompt', None)
72
72
 
@@ -763,42 +763,67 @@ def generate_sql(
763
763
  if reasoning_status == "correct":
764
764
  break
765
765
 
766
- # Step 2: Regenerate SQL with feedback
766
+ # Step 2: Regenerate SQL with feedback (with validation retries)
767
767
  evaluation_note = note + f"\n\nThe previously generated SQL: `{sql_query}` was assessed as '{evaluation.get('assessment')}' because: {evaluation.get('reasoning', '*could not determine cause*')}. Please provide a corrected SQL query that better answers the question: '{question}'."
768
768
 
769
769
  # Increase graph depth for 2nd+ reasoning attempts, up to max of 3
770
770
  context_graph_depth = min(3, int(graph_depth) + step) if graph_depth < 3 and step > 0 else graph_depth
771
- regen_result = _generate_sql_with_llm(
772
- question=question,
773
- llm=llm,
774
- conn_params=conn_params,
775
- generate_sql_prompt=generate_sql_prompt,
776
- current_context=_build_sql_generation_context(
777
- conn_params=conn_params,
778
- schema=schema,
779
- concept=concept,
780
- concept_metadata=concept_metadata,
781
- graph_depth=context_graph_depth,
782
- include_tags=include_tags,
783
- exclude_properties=exclude_properties,
784
- db_is_case_sensitive=db_is_case_sensitive,
785
- max_limit=max_limit),
786
- note=evaluation_note,
787
- should_validate_sql=should_validate_sql,
788
- timeout=timeout,
789
- debug=debug,
790
- )
791
771
 
792
- usage_metadata[f'generate_sql_reasoning_step_{step + 1}'] = {
793
- "approximate": regen_result['apx_token_count'],
794
- **regen_result['usage_metadata'],
795
- }
796
- if debug and 'p_hash' in regen_result:
797
- usage_metadata[f'generate_sql_reasoning_step_{step + 1}']['p_hash'] = regen_result['p_hash']
772
+ # Regenerate SQL with validation retries
773
+ # Always validate during reasoning to ensure quality, regardless of global should_validate_sql flag
774
+ validation_iteration = 0
775
+ regen_is_valid = False
776
+ regen_error = ''
777
+ regen_sql = None
778
+
779
+ while validation_iteration < retries and (regen_sql is None or not regen_is_valid):
780
+ validation_iteration += 1
781
+ validation_err_txt = f"\nThe regenerated SQL (`{regen_sql}`) was invalid with error: {regen_error}. Please generate a corrected query." if regen_error and "snowflake" not in llm._llm_type else ""
782
+
783
+ regen_result = _generate_sql_with_llm(
784
+ question=question,
785
+ llm=llm,
786
+ conn_params=conn_params,
787
+ generate_sql_prompt=generate_sql_prompt,
788
+ current_context=_build_sql_generation_context(
789
+ conn_params=conn_params,
790
+ schema=schema,
791
+ concept=concept,
792
+ concept_metadata=concept_metadata,
793
+ graph_depth=context_graph_depth,
794
+ include_tags=include_tags,
795
+ exclude_properties=exclude_properties,
796
+ db_is_case_sensitive=db_is_case_sensitive,
797
+ max_limit=max_limit),
798
+ note=evaluation_note + validation_err_txt,
799
+ should_validate_sql=True, # Always validate during reasoning
800
+ timeout=timeout,
801
+ debug=debug,
802
+ )
803
+
804
+ regen_sql = regen_result['sql']
805
+ regen_is_valid = regen_result['is_valid']
806
+ regen_error = regen_result['error']
807
+
808
+ # Track token usage for each validation iteration
809
+ if validation_iteration == 1:
810
+ usage_metadata[f'generate_sql_reasoning_step_{step + 1}'] = {
811
+ "approximate": regen_result['apx_token_count'],
812
+ **regen_result['usage_metadata'],
813
+ }
814
+ if debug and 'p_hash' in regen_result:
815
+ usage_metadata[f'generate_sql_reasoning_step_{step + 1}']['p_hash'] = regen_result['p_hash']
816
+ else:
817
+ usage_metadata[f'generate_sql_reasoning_step_{step + 1}_validation_{validation_iteration}'] = {
818
+ "approximate": regen_result['apx_token_count'],
819
+ **regen_result['usage_metadata'],
820
+ }
821
+ if debug and 'p_hash' in regen_result:
822
+ usage_metadata[f'generate_sql_reasoning_step_{step + 1}_validation_{validation_iteration}']['p_hash'] = regen_result['p_hash']
798
823
 
799
- sql_query = regen_result['sql']
800
- is_sql_valid = regen_result['is_valid']
801
- error = regen_result['error']
824
+ sql_query = regen_sql
825
+ is_sql_valid = regen_is_valid
826
+ error = regen_error
802
827
 
803
828
  except TimeoutError as e:
804
829
  raise Exception(f"LLM call timed out: {str(e)}")
@@ -29,7 +29,7 @@ class TestLangGraphNodes:
29
29
  # Create a test state payload.
30
30
  state = {
31
31
  "prompt": config["test_prompt"],
32
- "messages": [{ "content": config["test_prompt"] }],
32
+ # "messages": [{ "content": config["test_prompt"] }],
33
33
  }
34
34
  result = node(state)
35
35
  print("IdentifyConceptNode result:", result)
@@ -111,3 +111,24 @@ class TestLangGraphNodes:
111
111
  print("GenerateResponseNode result:", result)
112
112
  assert "answer" in result, "Result should contain 'answer'"
113
113
  assert result["answer"], "Answer should not be empty"
114
+
115
+ def test_execute_node_with_state_graph(self, llm, config):
116
+ """Test basic ExecuteSemanticQueryNode functionality."""
117
+ from langgraph.graph import StateGraph
118
+ state = StateGraph(dict)
119
+ state.messages = [{"content": config["test_prompt"]}]
120
+
121
+ execute_query_node = ExecuteSemanticQueryNode(
122
+ llm=llm,
123
+ url=config["timbr_url"],
124
+ token=config["timbr_token"],
125
+ ontology=config["timbr_ontology"],
126
+ verify_ssl=config["verify_ssl"],
127
+ )
128
+
129
+ output = execute_query_node(state)
130
+
131
+ print("ExecuteSemanticQueryNode result:", output)
132
+ assert "rows" in output, "Result should contain 'rows'"
133
+ assert isinstance(output["rows"], list), "'rows' should be a list"
134
+ assert output["sql"], "SQL should be present in the result"