langchain-timbr 2.1.12__tar.gz → 2.1.14__tar.gz

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (58) hide show
  1. {langchain_timbr-2.1.12 → langchain_timbr-2.1.14}/.github/workflows/install-dependencies-and-run-tests.yml +1 -0
  2. {langchain_timbr-2.1.12 → langchain_timbr-2.1.14}/PKG-INFO +1 -1
  3. {langchain_timbr-2.1.12 → langchain_timbr-2.1.14}/src/langchain_timbr/_version.py +2 -2
  4. {langchain_timbr-2.1.12 → langchain_timbr-2.1.14}/src/langchain_timbr/config.py +1 -1
  5. {langchain_timbr-2.1.12 → langchain_timbr-2.1.14}/src/langchain_timbr/langchain/execute_timbr_query_chain.py +1 -1
  6. {langchain_timbr-2.1.12 → langchain_timbr-2.1.14}/src/langchain_timbr/langchain/validate_timbr_sql_chain.py +1 -1
  7. {langchain_timbr-2.1.12 → langchain_timbr-2.1.14}/src/langchain_timbr/langgraph/execute_timbr_query_node.py +1 -1
  8. {langchain_timbr-2.1.12 → langchain_timbr-2.1.14}/src/langchain_timbr/langgraph/generate_timbr_sql_node.py +1 -1
  9. {langchain_timbr-2.1.12 → langchain_timbr-2.1.14}/src/langchain_timbr/langgraph/identify_concept_node.py +1 -1
  10. {langchain_timbr-2.1.12 → langchain_timbr-2.1.14}/src/langchain_timbr/utils/timbr_llm_utils.py +59 -34
  11. {langchain_timbr-2.1.12 → langchain_timbr-2.1.14}/src/langchain_timbr/utils/timbr_utils.py +23 -10
  12. {langchain_timbr-2.1.12 → langchain_timbr-2.1.14}/tests/integration/test_langgraph_nodes.py +22 -1
  13. {langchain_timbr-2.1.12 → langchain_timbr-2.1.14}/tests/integration/test_timeout_functionality.py +2 -2
  14. {langchain_timbr-2.1.12 → langchain_timbr-2.1.14}/.github/dependabot.yml +0 -0
  15. {langchain_timbr-2.1.12 → langchain_timbr-2.1.14}/.github/pull_request_template.md +0 -0
  16. {langchain_timbr-2.1.12 → langchain_timbr-2.1.14}/.github/workflows/_codespell.yml +0 -0
  17. {langchain_timbr-2.1.12 → langchain_timbr-2.1.14}/.github/workflows/_fossa.yml +0 -0
  18. {langchain_timbr-2.1.12 → langchain_timbr-2.1.14}/.github/workflows/publish.yml +0 -0
  19. {langchain_timbr-2.1.12 → langchain_timbr-2.1.14}/.gitignore +0 -0
  20. {langchain_timbr-2.1.12 → langchain_timbr-2.1.14}/LICENSE +0 -0
  21. {langchain_timbr-2.1.12 → langchain_timbr-2.1.14}/README.md +0 -0
  22. {langchain_timbr-2.1.12 → langchain_timbr-2.1.14}/SECURITY.md +0 -0
  23. {langchain_timbr-2.1.12 → langchain_timbr-2.1.14}/pyproject.toml +0 -0
  24. {langchain_timbr-2.1.12 → langchain_timbr-2.1.14}/pytest.ini +0 -0
  25. {langchain_timbr-2.1.12 → langchain_timbr-2.1.14}/requirements.txt +0 -0
  26. {langchain_timbr-2.1.12 → langchain_timbr-2.1.14}/requirements310.txt +0 -0
  27. {langchain_timbr-2.1.12 → langchain_timbr-2.1.14}/requirements311.txt +0 -0
  28. {langchain_timbr-2.1.12 → langchain_timbr-2.1.14}/src/langchain_timbr/__init__.py +0 -0
  29. {langchain_timbr-2.1.12 → langchain_timbr-2.1.14}/src/langchain_timbr/langchain/__init__.py +0 -0
  30. {langchain_timbr-2.1.12 → langchain_timbr-2.1.14}/src/langchain_timbr/langchain/generate_answer_chain.py +0 -0
  31. {langchain_timbr-2.1.12 → langchain_timbr-2.1.14}/src/langchain_timbr/langchain/generate_timbr_sql_chain.py +0 -0
  32. {langchain_timbr-2.1.12 → langchain_timbr-2.1.14}/src/langchain_timbr/langchain/identify_concept_chain.py +0 -0
  33. {langchain_timbr-2.1.12 → langchain_timbr-2.1.14}/src/langchain_timbr/langchain/timbr_sql_agent.py +0 -0
  34. {langchain_timbr-2.1.12 → langchain_timbr-2.1.14}/src/langchain_timbr/langgraph/__init__.py +0 -0
  35. {langchain_timbr-2.1.12 → langchain_timbr-2.1.14}/src/langchain_timbr/langgraph/generate_response_node.py +0 -0
  36. {langchain_timbr-2.1.12 → langchain_timbr-2.1.14}/src/langchain_timbr/langgraph/validate_timbr_query_node.py +0 -0
  37. {langchain_timbr-2.1.12 → langchain_timbr-2.1.14}/src/langchain_timbr/llm_wrapper/llm_wrapper.py +0 -0
  38. {langchain_timbr-2.1.12 → langchain_timbr-2.1.14}/src/langchain_timbr/llm_wrapper/timbr_llm_wrapper.py +0 -0
  39. {langchain_timbr-2.1.12 → langchain_timbr-2.1.14}/src/langchain_timbr/timbr_llm_connector.py +0 -0
  40. {langchain_timbr-2.1.12 → langchain_timbr-2.1.14}/src/langchain_timbr/utils/general.py +0 -0
  41. {langchain_timbr-2.1.12 → langchain_timbr-2.1.14}/src/langchain_timbr/utils/prompt_service.py +0 -0
  42. {langchain_timbr-2.1.12 → langchain_timbr-2.1.14}/src/langchain_timbr/utils/temperature_supported_models.json +0 -0
  43. {langchain_timbr-2.1.12 → langchain_timbr-2.1.14}/tests/README.md +0 -0
  44. {langchain_timbr-2.1.12 → langchain_timbr-2.1.14}/tests/conftest.py +0 -0
  45. {langchain_timbr-2.1.12 → langchain_timbr-2.1.14}/tests/integration/test_agent_integration.py +0 -0
  46. {langchain_timbr-2.1.12 → langchain_timbr-2.1.14}/tests/integration/test_azure_databricks_provider.py +0 -0
  47. {langchain_timbr-2.1.12 → langchain_timbr-2.1.14}/tests/integration/test_azure_openai_model.py +0 -0
  48. {langchain_timbr-2.1.12 → langchain_timbr-2.1.14}/tests/integration/test_chain_pipeline.py +0 -0
  49. {langchain_timbr-2.1.12 → langchain_timbr-2.1.14}/tests/integration/test_chain_reasoning.py +0 -0
  50. {langchain_timbr-2.1.12 → langchain_timbr-2.1.14}/tests/integration/test_jwt_token.py +0 -0
  51. {langchain_timbr-2.1.12 → langchain_timbr-2.1.14}/tests/integration/test_langchain_chains.py +0 -0
  52. {langchain_timbr-2.1.12 → langchain_timbr-2.1.14}/tests/standard/conftest.py +0 -0
  53. {langchain_timbr-2.1.12 → langchain_timbr-2.1.14}/tests/standard/test_chain_documentation.py +0 -0
  54. {langchain_timbr-2.1.12 → langchain_timbr-2.1.14}/tests/standard/test_connection_validation.py +0 -0
  55. {langchain_timbr-2.1.12 → langchain_timbr-2.1.14}/tests/standard/test_llm_wrapper_optional_params.py +0 -0
  56. {langchain_timbr-2.1.12 → langchain_timbr-2.1.14}/tests/standard/test_optional_llm_integration.py +0 -0
  57. {langchain_timbr-2.1.12 → langchain_timbr-2.1.14}/tests/standard/test_standard_chain_requirements.py +0 -0
  58. {langchain_timbr-2.1.12 → langchain_timbr-2.1.14}/tests/standard/test_unit_tests.py +0 -0
@@ -22,6 +22,7 @@ env:
22
22
  JWT_PASSWORD: ${{ secrets.JWT_PASSWORD }}
23
23
  JWT_SCOPE: ${{ secrets.JWT_SCOPE }}
24
24
  JWT_SECRET: ${{ secrets.JWT_SECRET }}
25
+ LLM_TIMEOUT: ${{ secrets.LLM_TIMEOUT }}
25
26
 
26
27
  jobs:
27
28
  test-python-310:
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.4
2
2
  Name: langchain-timbr
3
- Version: 2.1.12
3
+ Version: 2.1.14
4
4
  Summary: LangChain & LangGraph extensions that parse LLM prompts into Timbr semantic SQL and execute them.
5
5
  Project-URL: Homepage, https://github.com/WPSemantix/langchain-timbr
6
6
  Project-URL: Documentation, https://docs.timbr.ai/doc/docs/integration/langchain-sdk/
@@ -28,7 +28,7 @@ version_tuple: VERSION_TUPLE
28
28
  commit_id: COMMIT_ID
29
29
  __commit_id__: COMMIT_ID
30
30
 
31
- __version__ = version = '2.1.12'
32
- __version_tuple__ = version_tuple = (2, 1, 12)
31
+ __version__ = version = '2.1.14'
32
+ __version_tuple__ = version_tuple = (2, 1, 14)
33
33
 
34
34
  __commit_id__ = commit_id = None
@@ -19,7 +19,7 @@ llm_model = os.environ.get('LLM_MODEL')
19
19
  llm_api_key = os.environ.get('LLM_API_KEY')
20
20
  llm_temperature = os.environ.get('LLM_TEMPERATURE', 0.0)
21
21
  llm_additional_params = os.environ.get('LLM_ADDITIONAL_PARAMS', '')
22
- llm_timeout = to_integer(os.environ.get('LLM_TIMEOUT', 60)) # Default 60 seconds timeout
22
+ llm_timeout = to_integer(os.environ.get('LLM_TIMEOUT', 120)) # Default 120 seconds timeout
23
23
 
24
24
  # Optional for Azure OpenAI with Service Principal authentication
25
25
  llm_tenant_id = os.environ.get('LLM_TENANT_ID', None)
@@ -252,7 +252,7 @@ class ExecuteTimbrQueryChain(Chain):
252
252
  usage_metadata = {}
253
253
 
254
254
  if sql and self._should_validate_sql:
255
- is_sql_valid, error = validate_sql(sql, self._get_conn_params())
255
+ is_sql_valid, error, sql = validate_sql(sql, self._get_conn_params())
256
256
 
257
257
  is_infered = False
258
258
  iteration = 0
@@ -180,7 +180,7 @@ class ValidateTimbrSqlChain(Chain):
180
180
  concept = self._concept
181
181
  reasoning_status = None
182
182
 
183
- is_sql_valid, error = validate_sql(sql, self._get_conn_params())
183
+ is_sql_valid, error, sql = validate_sql(sql, self._get_conn_params())
184
184
  if not is_sql_valid:
185
185
  prompt_extension = self._note + '\n' if self._note else ""
186
186
  generate_res = generate_sql(
@@ -102,7 +102,7 @@ class ExecuteSemanticQueryNode:
102
102
 
103
103
  def run(self, state: StateGraph) -> dict:
104
104
  try:
105
- prompt = state.messages[-1].content if state.messages[-1] else None
105
+ prompt = state.messages[-1].get('content') if state.messages[-1] and 'content' in state.messages[-1] else None
106
106
  except Exception:
107
107
  prompt = state.get('prompt', None)
108
108
 
@@ -94,7 +94,7 @@ class GenerateTimbrSqlNode:
94
94
 
95
95
  def run(self, state: StateGraph) -> dict:
96
96
  try:
97
- prompt = state.messages[-1].content if (state.messages and state.messages[-1]) else None
97
+ prompt = state.messages[-1].get('content') if state.messages[-1] and 'content' in state.messages[-1] else None
98
98
  except Exception:
99
99
  prompt = state.get('prompt', None)
100
100
 
@@ -66,7 +66,7 @@ class IdentifyConceptNode:
66
66
 
67
67
  def run(self, state: StateGraph) -> dict:
68
68
  try:
69
- prompt = state.messages[-1].content if state.messages[-1] else None
69
+ prompt = state.messages[-1].get('content') if state.messages[-1] and 'content' in state.messages[-1] else None
70
70
  except Exception:
71
71
  prompt = state.get('prompt', None)
72
72
 
@@ -52,14 +52,14 @@ def _clean_snowflake_prompt(prompt: Any) -> None:
52
52
  prompt[1].content = clean_func(prompt[1].content) # User message
53
53
 
54
54
 
55
- def _call_llm_with_timeout(llm: LLM, prompt: Any, timeout: int = 60) -> Any:
55
+ def _call_llm_with_timeout(llm: LLM, prompt: Any, timeout: int = 120) -> Any:
56
56
  """
57
57
  Call LLM with timeout to prevent hanging.
58
58
 
59
59
  Args:
60
60
  llm: The LLM instance
61
61
  prompt: The prompt to send
62
- timeout: Timeout in seconds (default: 60)
62
+ timeout: Timeout in seconds (default: 120)
63
63
 
64
64
  Returns:
65
65
  LLM response
@@ -626,7 +626,7 @@ def _generate_sql_with_llm(
626
626
  result["p_hash"] = encrypt_prompt(prompt)
627
627
 
628
628
  if should_validate_sql:
629
- result["is_valid"], result["error"] = validate_sql(result["sql"], conn_params)
629
+ result["is_valid"], result["error"], result["sql"] = validate_sql(result["sql"], conn_params)
630
630
 
631
631
  return result
632
632
 
@@ -763,42 +763,67 @@ def generate_sql(
763
763
  if reasoning_status == "correct":
764
764
  break
765
765
 
766
- # Step 2: Regenerate SQL with feedback
767
- evaluation_note = note + f"\n\nThe previously generated SQL: `{sql_query}` was assessed as '{evaluation.get('assessment')}' because: {evaluation.get('reasoning', '*could not determine cause*')}. Please provide a corrected SQL query that better answers the question: '{question}'."
766
+ # Step 2: Regenerate SQL with feedback (with validation retries)
767
+ evaluation_note = note + f"\n\nThe previously generated SQL: `{sql_query}` was assessed as '{evaluation.get('assessment')}' because: {evaluation.get('reasoning', '*could not determine cause*')}. Please provide a corrected SQL query that better answers the question: '{question}'.\n\nCRITICAL: Return ONLY the SQL query without any explanation or comments."
768
768
 
769
769
  # Increase graph depth for 2nd+ reasoning attempts, up to max of 3
770
770
  context_graph_depth = min(3, int(graph_depth) + step) if graph_depth < 3 and step > 0 else graph_depth
771
- regen_result = _generate_sql_with_llm(
772
- question=question,
773
- llm=llm,
774
- conn_params=conn_params,
775
- generate_sql_prompt=generate_sql_prompt,
776
- current_context=_build_sql_generation_context(
777
- conn_params=conn_params,
778
- schema=schema,
779
- concept=concept,
780
- concept_metadata=concept_metadata,
781
- graph_depth=context_graph_depth,
782
- include_tags=include_tags,
783
- exclude_properties=exclude_properties,
784
- db_is_case_sensitive=db_is_case_sensitive,
785
- max_limit=max_limit),
786
- note=evaluation_note,
787
- should_validate_sql=should_validate_sql,
788
- timeout=timeout,
789
- debug=debug,
790
- )
791
771
 
792
- usage_metadata[f'generate_sql_reasoning_step_{step + 1}'] = {
793
- "approximate": regen_result['apx_token_count'],
794
- **regen_result['usage_metadata'],
795
- }
796
- if debug and 'p_hash' in regen_result:
797
- usage_metadata[f'generate_sql_reasoning_step_{step + 1}']['p_hash'] = regen_result['p_hash']
772
+ # Regenerate SQL with validation retries
773
+ # Always validate during reasoning to ensure quality, regardless of global should_validate_sql flag
774
+ validation_iteration = 0
775
+ regen_is_valid = False
776
+ regen_error = ''
777
+ regen_sql = None
778
+
779
+ while validation_iteration < retries and (regen_sql is None or not regen_is_valid):
780
+ validation_iteration += 1
781
+ validation_err_txt = f"\nThe regenerated SQL (`{regen_sql}`) was invalid with error: {regen_error}. Please generate a corrected query." if regen_error and "snowflake" not in llm._llm_type else ""
782
+
783
+ regen_result = _generate_sql_with_llm(
784
+ question=question,
785
+ llm=llm,
786
+ conn_params=conn_params,
787
+ generate_sql_prompt=generate_sql_prompt,
788
+ current_context=_build_sql_generation_context(
789
+ conn_params=conn_params,
790
+ schema=schema,
791
+ concept=concept,
792
+ concept_metadata=concept_metadata,
793
+ graph_depth=context_graph_depth,
794
+ include_tags=include_tags,
795
+ exclude_properties=exclude_properties,
796
+ db_is_case_sensitive=db_is_case_sensitive,
797
+ max_limit=max_limit),
798
+ note=evaluation_note + validation_err_txt,
799
+ should_validate_sql=True, # Always validate during reasoning
800
+ timeout=timeout,
801
+ debug=debug,
802
+ )
803
+
804
+ regen_sql = regen_result['sql']
805
+ regen_is_valid = regen_result['is_valid']
806
+ regen_error = regen_result['error']
807
+
808
+ # Track token usage for each validation iteration
809
+ if validation_iteration == 1:
810
+ usage_metadata[f'generate_sql_reasoning_step_{step + 1}'] = {
811
+ "approximate": regen_result['apx_token_count'],
812
+ **regen_result['usage_metadata'],
813
+ }
814
+ if debug and 'p_hash' in regen_result:
815
+ usage_metadata[f'generate_sql_reasoning_step_{step + 1}']['p_hash'] = regen_result['p_hash']
816
+ else:
817
+ usage_metadata[f'generate_sql_reasoning_step_{step + 1}_validation_{validation_iteration}'] = {
818
+ "approximate": regen_result['apx_token_count'],
819
+ **regen_result['usage_metadata'],
820
+ }
821
+ if debug and 'p_hash' in regen_result:
822
+ usage_metadata[f'generate_sql_reasoning_step_{step + 1}_validation_{validation_iteration}']['p_hash'] = regen_result['p_hash']
798
823
 
799
- sql_query = regen_result['sql']
800
- is_sql_valid = regen_result['is_valid']
801
- error = regen_result['error']
824
+ sql_query = regen_sql
825
+ is_sql_valid = regen_is_valid
826
+ error = regen_error
802
827
 
803
828
  except TimeoutError as e:
804
829
  raise Exception(f"LLM call timed out: {str(e)}")
@@ -154,24 +154,37 @@ def get_datasources(conn_params: dict, filter_active: Optional[bool] = False) ->
154
154
  return res
155
155
 
156
156
 
157
- def validate_sql(sql: str, conn_params: dict) -> tuple[bool, str]:
157
+ def _validate(sql: str, conn_params: dict) -> bool:
158
+ explain_sql = f"EXPLAIN {sql}"
159
+ explain_res = run_query(explain_sql, conn_params)
160
+
161
+ query_sql = f"SELECT * FROM ({sql.replace(';', '')}) explainable_query WHERE 1=0"
162
+ query_res = run_query(query_sql, conn_params)
163
+
164
+ return to_boolean(explain_res and explain_res[0].get('PLAN') and query_res is not None)
165
+
166
+
167
+ def validate_sql(sql: str, conn_params: dict) -> tuple[bool, str, str]:
158
168
  if not sql:
159
169
  raise Exception("Please provide SQL to validate.")
160
170
 
161
- explain_res = None
162
- query_res = None
171
+ is_valid = False
163
172
  error = None
164
173
 
165
174
  try:
166
- explain_sql = f"EXPLAIN {sql}"
167
- explain_res = run_query(explain_sql, conn_params)
168
-
169
- query_sql = f"SELECT * FROM ({sql.replace(';', '')}) explainable_query WHERE 1=0"
170
- query_res = run_query(query_sql, conn_params)
175
+ is_valid = _validate(sql, conn_params)
171
176
  except Exception as e:
172
177
  error = str(getattr(e, 'doc', e))
173
-
174
- return to_boolean(explain_res and explain_res[0].get('PLAN') and query_res is not None), error
178
+ if not sql.upper().startswith("SELECT"):
179
+ sql = sql[sql.upper().index("SELECT"):]
180
+ try:
181
+ is_valid = _validate(sql, conn_params)
182
+ if is_valid:
183
+ error = None
184
+ except Exception:
185
+ pass
186
+
187
+ return is_valid, error, sql
175
188
 
176
189
 
177
190
  def _should_ignore_tag(tag_name: str) -> bool:
@@ -29,7 +29,7 @@ class TestLangGraphNodes:
29
29
  # Create a test state payload.
30
30
  state = {
31
31
  "prompt": config["test_prompt"],
32
- "messages": [{ "content": config["test_prompt"] }],
32
+ # "messages": [{ "content": config["test_prompt"] }],
33
33
  }
34
34
  result = node(state)
35
35
  print("IdentifyConceptNode result:", result)
@@ -111,3 +111,24 @@ class TestLangGraphNodes:
111
111
  print("GenerateResponseNode result:", result)
112
112
  assert "answer" in result, "Result should contain 'answer'"
113
113
  assert result["answer"], "Answer should not be empty"
114
+
115
+ def test_execute_node_with_state_graph(self, llm, config):
116
+ """Test basic ExecuteSemanticQueryNode functionality."""
117
+ from langgraph.graph import StateGraph
118
+ state = StateGraph(dict)
119
+ state.messages = [{"content": config["test_prompt"]}]
120
+
121
+ execute_query_node = ExecuteSemanticQueryNode(
122
+ llm=llm,
123
+ url=config["timbr_url"],
124
+ token=config["timbr_token"],
125
+ ontology=config["timbr_ontology"],
126
+ verify_ssl=config["verify_ssl"],
127
+ )
128
+
129
+ output = execute_query_node(state)
130
+
131
+ print("ExecuteSemanticQueryNode result:", output)
132
+ assert "rows" in output, "Result should contain 'rows'"
133
+ assert isinstance(output["rows"], list), "'rows' should be a list"
134
+ assert output["sql"], "SQL should be present in the result"
@@ -13,14 +13,14 @@ from unittest.mock import Mock
13
13
  sys.path.insert(0, os.path.join(os.path.dirname(__file__), 'langchain_timbr'))
14
14
 
15
15
 
16
- def _call_llm_with_timeout(llm, prompt, timeout: int = 60):
16
+ def _call_llm_with_timeout(llm, prompt, timeout: int = 120):
17
17
  """
18
18
  Call LLM with timeout to prevent hanging.
19
19
 
20
20
  Args:
21
21
  llm: The LLM instance (mock for testing)
22
22
  prompt: The prompt to send
23
- timeout: Timeout in seconds (default: 60)
23
+ timeout: Timeout in seconds (default: 120)
24
24
 
25
25
  Returns:
26
26
  LLM response