pointblank 0.14.0__py3-none-any.whl → 0.15.0__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
pointblank/_utils.py CHANGED
@@ -670,18 +670,23 @@ def _get_api_text() -> str:
670
670
  "Validate.col_vals_outside",
671
671
  "Validate.col_vals_in_set",
672
672
  "Validate.col_vals_not_in_set",
673
+ "Validate.col_vals_increasing",
674
+ "Validate.col_vals_decreasing",
673
675
  "Validate.col_vals_null",
674
676
  "Validate.col_vals_not_null",
675
677
  "Validate.col_vals_regex",
678
+ "Validate.col_vals_within_spec",
676
679
  "Validate.col_vals_expr",
677
- "Validate.col_exists",
678
680
  "Validate.rows_distinct",
679
681
  "Validate.rows_complete",
682
+ "Validate.col_exists",
680
683
  "Validate.col_schema_match",
681
684
  "Validate.row_count_match",
682
685
  "Validate.col_count_match",
686
+ "Validate.tbl_match",
683
687
  "Validate.conjointly",
684
688
  "Validate.specially",
689
+ "Validate.prompt",
685
690
  ]
686
691
 
687
692
  column_selection_exported = [
@@ -702,6 +707,7 @@ def _get_api_text() -> str:
702
707
 
703
708
  interrogation_exported = [
704
709
  "Validate.interrogate",
710
+ "Validate.set_tbl",
705
711
  "Validate.get_tabular_report",
706
712
  "Validate.get_step_report",
707
713
  "Validate.get_json_report",
@@ -735,6 +741,7 @@ def _get_api_text() -> str:
735
741
  yaml_exported = [
736
742
  "yaml_interrogate",
737
743
  "validate_yaml",
744
+ "yaml_to_python",
738
745
  ]
739
746
 
740
747
  utility_exported = [
@@ -742,6 +749,8 @@ def _get_api_text() -> str:
742
749
  "get_row_count",
743
750
  "get_action_metadata",
744
751
  "get_validation_summary",
752
+ "write_file",
753
+ "read_file",
745
754
  "config",
746
755
  ]
747
756
 
@@ -786,9 +795,10 @@ datasets included in the package can be accessed via the `load_dataset()` functi
786
795
  the `assistant()` function to get help with Pointblank."""
787
796
 
788
797
  yaml_desc = """The *YAML* group contains functions that allow for the use of YAML to orchestrate
789
- validation workflows. The `yaml_interrogate()` function can be used to run a validation workflow from
790
- YAML strings or files. The `validate_yaml()` function checks if the YAML configuration
791
- passes its own validity checks."""
798
+ validation workflows. The `yaml_interrogate()` function can be used to run a validation workflow
799
+ from YAML strings or files. The `validate_yaml()` function checks if the YAML configuration passes
800
+ its own validity checks. The `yaml_to_python()` function converts YAML configuration to equivalent
801
+ Python code."""
792
802
 
793
803
  utility_desc = """The Utility Functions group contains functions that are useful for accessing
794
804
  metadata about the target data. Use `get_column_count()` or `get_row_count()` to get the number of
pointblank/_utils_ai.py CHANGED
@@ -28,17 +28,22 @@ class _LLMConfig:
28
28
  provider
29
29
  LLM provider name (e.g., 'anthropic', 'openai', 'ollama', 'bedrock').
30
30
  model
31
- Model name (e.g., 'claude-3-sonnet-20240229', 'gpt-4').
31
+ Model name (e.g., 'claude-sonnet-4-5', 'gpt-4').
32
32
  api_key
33
33
  API key for the provider. If None, will be read from environment.
34
+ verify_ssl
35
+ Whether to verify SSL certificates when making requests. Defaults to True.
34
36
  """
35
37
 
36
38
  provider: str
37
39
  model: str
38
40
  api_key: Optional[str] = None
41
+ verify_ssl: bool = True
39
42
 
40
43
 
41
- def _create_chat_instance(provider: str, model_name: str, api_key: Optional[str] = None):
44
+ def _create_chat_instance(
45
+ provider: str, model_name: str, api_key: Optional[str] = None, verify_ssl: bool = True
46
+ ):
42
47
  """
43
48
  Create a chatlas chat instance for the specified provider.
44
49
 
@@ -50,6 +55,8 @@ def _create_chat_instance(provider: str, model_name: str, api_key: Optional[str]
50
55
  The model name for the provider.
51
56
  api_key
52
57
  Optional API key. If None, will be read from environment.
58
+ verify_ssl
59
+ Whether to verify SSL certificates when making requests. Defaults to True.
53
60
 
54
61
  Returns
55
62
  -------
@@ -89,6 +96,17 @@ EXAMPLE OUTPUT FORMAT:
89
96
  {"index": 2, "result": true}
90
97
  ]"""
91
98
 
99
+ # Create httpx client with SSL verification settings
100
+ try:
101
+ import httpx # noqa
102
+ except ImportError: # pragma: no cover
103
+ raise ImportError( # pragma: no cover
104
+ "The `httpx` package is required for SSL configuration. "
105
+ "Please install it using `pip install httpx`."
106
+ )
107
+
108
+ http_client = httpx.AsyncClient(verify=verify_ssl)
109
+
92
110
  # Create provider-specific chat instance
93
111
  if provider == "anthropic": # pragma: no cover
94
112
  # Check that the anthropic package is installed
@@ -106,6 +124,7 @@ EXAMPLE OUTPUT FORMAT:
106
124
  model=model_name,
107
125
  api_key=api_key,
108
126
  system_prompt=system_prompt,
127
+ kwargs={"http_client": http_client},
109
128
  )
110
129
 
111
130
  elif provider == "openai": # pragma: no cover
@@ -124,6 +143,7 @@ EXAMPLE OUTPUT FORMAT:
124
143
  model=model_name,
125
144
  api_key=api_key,
126
145
  system_prompt=system_prompt,
146
+ kwargs={"http_client": http_client},
127
147
  )
128
148
 
129
149
  elif provider == "ollama": # pragma: no cover
@@ -141,6 +161,7 @@ EXAMPLE OUTPUT FORMAT:
141
161
  chat = ChatOllama(
142
162
  model=model_name,
143
163
  system_prompt=system_prompt,
164
+ kwargs={"http_client": http_client},
144
165
  )
145
166
 
146
167
  elif provider == "bedrock": # pragma: no cover
@@ -149,6 +170,7 @@ EXAMPLE OUTPUT FORMAT:
149
170
  chat = ChatBedrockAnthropic(
150
171
  model=model_name,
151
172
  system_prompt=system_prompt,
173
+ kwargs={"http_client": http_client},
152
174
  )
153
175
 
154
176
  else:
@@ -722,7 +744,10 @@ class _AIValidationEngine:
722
744
  """
723
745
  self.llm_config = llm_config
724
746
  self.chat = _create_chat_instance(
725
- provider=llm_config.provider, model_name=llm_config.model, api_key=llm_config.api_key
747
+ provider=llm_config.provider,
748
+ model_name=llm_config.model,
749
+ api_key=llm_config.api_key,
750
+ verify_ssl=llm_config.verify_ssl,
726
751
  )
727
752
 
728
753
  def validate_batches(
pointblank/assistant.py CHANGED
@@ -55,7 +55,7 @@ def assistant(
55
55
  ----------
56
56
  model
57
57
  The model to be used. This should be in the form of `provider:model` (e.g.,
58
- `"anthropic:claude-3-5-sonnet-latest"`). Supported providers are `"anthropic"`, `"openai"`,
58
+ `"anthropic:claude-sonnet-4-5"`). Supported providers are `"anthropic"`, `"openai"`,
59
59
  `"ollama"`, and `"bedrock"`.
60
60
  data
61
61
  An optional data table to focus on during discussion with the PbA, which could be a