palimpzest 1.3.0__py3-none-any.whl → 1.3.1__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -108,7 +108,7 @@ class Generator(Generic[ContextType, InputType]):
108
108
  self,
109
109
  model: Model,
110
110
  prompt_strategy: PromptStrategy,
111
- reasoning_effort: str | None = None,
111
+ reasoning_effort: str | None,
112
112
  api_base: str | None = None,
113
113
  cardinality: Cardinality = Cardinality.ONE_TO_ONE,
114
114
  desc: str | None = None,
@@ -325,18 +325,7 @@ class Generator(Generic[ContextType, InputType]):
325
325
  if is_audio_op:
326
326
  completion_kwargs = {"modalities": ["text"], **completion_kwargs}
327
327
  if self.model.is_reasoning_model():
328
- if self.model.is_vertex_model():
329
- reasoning_effort = self.reasoning_effort
330
- if self.reasoning_effort is None and self.model == Model.GEMINI_2_5_PRO:
331
- reasoning_effort = "low"
332
- elif self.reasoning_effort is None:
333
- reasoning_effort = "disable"
334
- completion_kwargs = {"reasoning_effort": reasoning_effort, **completion_kwargs}
335
- elif self.model.is_anthropic_model() and self.reasoning_effort is not None:
336
- completion_kwargs = {"reasoning_effort": self.reasoning_effort, **completion_kwargs}
337
- elif self.model.is_openai_model():
338
- reasoning_effort = "minimal" if self.reasoning_effort is None else self.reasoning_effort
339
- completion_kwargs = {"reasoning_effort": reasoning_effort, **completion_kwargs}
328
+ completion_kwargs = {"reasoning_effort": self.reasoning_effort, **completion_kwargs}
340
329
  if self.model.is_vllm_model():
341
330
  completion_kwargs = {"api_base": self.api_base, "api_key": os.environ.get("VLLM_API_KEY", "fake-api-key"), **completion_kwargs}
342
331
  completion = litellm.completion(model=self.model_name, messages=messages, **completion_kwargs)
@@ -75,7 +75,7 @@ class Optimizer:
75
75
  cost_model: BaseCostModel,
76
76
  available_models: list[Model],
77
77
  join_parallelism: int = 64,
78
- reasoning_effort: str | None = None,
78
+ reasoning_effort: str | None = "default",
79
79
  api_base: str | None = None,
80
80
  verbose: bool = False,
81
81
  allow_bonded_query: bool = True,
@@ -54,6 +54,7 @@ from palimpzest.query.operators.search import (
54
54
  from palimpzest.query.operators.split import SplitConvert, SplitFilter
55
55
  from palimpzest.query.operators.topk import TopKOp
56
56
  from palimpzest.query.optimizer.primitives import Expression, Group, LogicalExpression, PhysicalExpression
57
+ from palimpzest.utils.model_helpers import resolve_reasoning_settings
57
58
 
58
59
  logger = logging.getLogger(__name__)
59
60
 
@@ -628,15 +629,17 @@ class LLMConvertBondedRule(ImplementationRule):
628
629
 
629
630
  # create variable physical operator kwargs for each model which can implement this logical_expression
630
631
  models = [model for model in runtime_kwargs["available_models"] if cls._model_matches_input(model, logical_expression)]
631
- no_reasoning = runtime_kwargs["reasoning_effort"] in [None, "minimal", "low"]
632
- variable_op_kwargs = [
633
- {
634
- "model": model,
635
- "prompt_strategy": PromptStrategy.MAP_NO_REASONING if model.is_reasoning_model() and no_reasoning else PromptStrategy.MAP,
636
- "reasoning_effort": runtime_kwargs["reasoning_effort"],
637
- }
638
- for model in models
639
- ]
632
+ variable_op_kwargs = []
633
+ for model in models:
634
+ use_reasoning_prompt, reasoning_effort = resolve_reasoning_settings(model, runtime_kwargs["reasoning_effort"])
635
+ prompt_strategy = PromptStrategy.MAP if use_reasoning_prompt else PromptStrategy.MAP_NO_REASONING
636
+ variable_op_kwargs.append(
637
+ {
638
+ "model": model,
639
+ "prompt_strategy": prompt_strategy,
640
+ "reasoning_effort": reasoning_effort,
641
+ }
642
+ )
640
643
 
641
644
  return cls._perform_substitution(logical_expression, LLMConvertBonded, runtime_kwargs, variable_op_kwargs)
642
645
 
@@ -665,18 +668,27 @@ class RAGRule(ImplementationRule):
665
668
 
666
669
  # create variable physical operator kwargs for each model which can implement this logical_expression
667
670
  models = [model for model in runtime_kwargs["available_models"] if cls._model_matches_input(model, logical_expression)]
668
- variable_op_kwargs = [
669
- {
670
- "model": model,
671
- "prompt_strategy": PromptStrategy.MAP if phys_op_cls is RAGConvert else PromptStrategy.FILTER,
672
- "num_chunks_per_field": num_chunks_per_field,
673
- "chunk_size": chunk_size,
674
- "reasoning_effort": runtime_kwargs["reasoning_effort"],
675
- }
676
- for model in models
677
- for num_chunks_per_field in cls.num_chunks_per_fields
678
- for chunk_size in cls.chunk_sizes
679
- ]
671
+ variable_op_kwargs = []
672
+ for model in models:
673
+ use_reasoning_prompt, reasoning_effort = resolve_reasoning_settings(model, runtime_kwargs["reasoning_effort"])
674
+ prompt_strategy = (
675
+ PromptStrategy.MAP if use_reasoning_prompt else PromptStrategy.MAP_NO_REASONING
676
+ if phys_op_cls is RAGConvert
677
+ else PromptStrategy.FILTER if use_reasoning_prompt else PromptStrategy.FILTER_NO_REASONING
678
+ )
679
+ variable_op_kwargs.extend(
680
+ [
681
+ {
682
+ "model": model,
683
+ "prompt_strategy": prompt_strategy,
684
+ "num_chunks_per_field": num_chunks_per_field,
685
+ "chunk_size": chunk_size,
686
+ "reasoning_effort": reasoning_effort,
687
+ }
688
+ for num_chunks_per_field in cls.num_chunks_per_fields
689
+ for chunk_size in cls.chunk_sizes
690
+ ]
691
+ )
680
692
 
681
693
  return cls._perform_substitution(logical_expression, phys_op_cls, runtime_kwargs, variable_op_kwargs)
682
694
 
@@ -704,6 +716,7 @@ class MixtureOfAgentsRule(ImplementationRule):
704
716
  phys_op_cls = MixtureOfAgentsConvert if isinstance(logical_expression.operator, ConvertScan) else MixtureOfAgentsFilter
705
717
 
706
718
  # create variable physical operator kwargs for each model which can implement this logical_expression
719
+ _, reasoning_effort = resolve_reasoning_settings(None, runtime_kwargs["reasoning_effort"])
707
720
  proposer_model_set = {model for model in runtime_kwargs["available_models"] if cls._model_matches_input(model, logical_expression)}
708
721
  aggregator_model_set = {model for model in runtime_kwargs["available_models"] if model.is_text_model()}
709
722
  variable_op_kwargs = [
@@ -711,7 +724,7 @@ class MixtureOfAgentsRule(ImplementationRule):
711
724
  "proposer_models": list(proposer_models),
712
725
  "temperatures": [temp] * len(proposer_models),
713
726
  "aggregator_model": aggregator_model,
714
- "reasoning_effort": runtime_kwargs["reasoning_effort"],
727
+ "reasoning_effort": reasoning_effort,
715
728
  }
716
729
  for k in cls.num_proposer_models
717
730
  for temp in cls.temperatures
@@ -743,18 +756,27 @@ class CritiqueAndRefineRule(ImplementationRule):
743
756
 
744
757
  # create variable physical operator kwargs for each model which can implement this logical_expression
745
758
  models = [model for model in runtime_kwargs["available_models"] if cls._model_matches_input(model, logical_expression)]
746
- variable_op_kwargs = [
747
- {
748
- "model": model,
749
- "critic_model": critic_model,
750
- "refine_model": refine_model,
751
- "prompt_strategy": PromptStrategy.MAP if phys_op_cls is CritiqueAndRefineConvert else PromptStrategy.FILTER,
752
- "reasoning_effort": runtime_kwargs["reasoning_effort"],
753
- }
754
- for model in models
755
- for critic_model in models
756
- for refine_model in models
757
- ]
759
+ variable_op_kwargs = []
760
+ for model in models:
761
+ use_reasoning_prompt, reasoning_effort = resolve_reasoning_settings(model, runtime_kwargs["reasoning_effort"])
762
+ prompt_strategy = (
763
+ PromptStrategy.MAP if use_reasoning_prompt else PromptStrategy.MAP_NO_REASONING
764
+ if phys_op_cls is CritiqueAndRefineConvert
765
+ else PromptStrategy.FILTER if use_reasoning_prompt else PromptStrategy.FILTER_NO_REASONING
766
+ )
767
+ variable_op_kwargs.extend(
768
+ [
769
+ {
770
+ "model": model,
771
+ "critic_model": critic_model,
772
+ "refine_model": refine_model,
773
+ "prompt_strategy": prompt_strategy,
774
+ "reasoning_effort": reasoning_effort,
775
+ }
776
+ for critic_model in models
777
+ for refine_model in models
778
+ ]
779
+ )
758
780
 
759
781
  return cls._perform_substitution(logical_expression, phys_op_cls, runtime_kwargs, variable_op_kwargs)
760
782
 
@@ -782,12 +804,13 @@ class SplitRule(ImplementationRule):
782
804
 
783
805
  # create variable physical operator kwargs for each model which can implement this logical_expression
784
806
  models = [model for model in runtime_kwargs["available_models"] if cls._model_matches_input(model, logical_expression)]
807
+ _, reasoning_effort = resolve_reasoning_settings(None, runtime_kwargs["reasoning_effort"])
785
808
  variable_op_kwargs = [
786
809
  {
787
810
  "model": model,
788
811
  "min_size_to_chunk": min_size_to_chunk,
789
812
  "num_chunks": num_chunks,
790
- "reasoning_effort": runtime_kwargs["reasoning_effort"],
813
+ "reasoning_effort": reasoning_effort,
791
814
  }
792
815
  for model in models
793
816
  for min_size_to_chunk in cls.min_size_to_chunk
@@ -855,15 +878,17 @@ class LLMFilterRule(ImplementationRule):
855
878
 
856
879
  # create variable physical operator kwargs for each model which can implement this logical_expression
857
880
  models = [model for model in runtime_kwargs["available_models"] if cls._model_matches_input(model, logical_expression)]
858
- no_reasoning = runtime_kwargs["reasoning_effort"] in [None, "minimal", "low"]
859
- variable_op_kwargs = [
860
- {
861
- "model": model,
862
- "prompt_strategy": PromptStrategy.FILTER_NO_REASONING if model.is_reasoning_model() and no_reasoning else PromptStrategy.FILTER,
863
- "reasoning_effort": runtime_kwargs["reasoning_effort"]
864
- }
865
- for model in models
866
- ]
881
+ variable_op_kwargs = []
882
+ for model in models:
883
+ use_reasoning_prompt, reasoning_effort = resolve_reasoning_settings(model, runtime_kwargs["reasoning_effort"])
884
+ prompt_strategy = PromptStrategy.FILTER if use_reasoning_prompt else PromptStrategy.FILTER_NO_REASONING
885
+ variable_op_kwargs.append(
886
+ {
887
+ "model": model,
888
+ "prompt_strategy": prompt_strategy,
889
+ "reasoning_effort": reasoning_effort,
890
+ }
891
+ )
867
892
 
868
893
  return cls._perform_substitution(logical_expression, LLMFilter, runtime_kwargs, variable_op_kwargs)
869
894
 
@@ -902,17 +927,19 @@ class NestedLoopsJoinRule(ImplementationRule):
902
927
 
903
928
  # create variable physical operator kwargs for each model which can implement this logical_expression
904
929
  models = [model for model in runtime_kwargs["available_models"] if cls._model_matches_input(model, logical_expression)]
905
- no_reasoning = runtime_kwargs["reasoning_effort"] in [None, "minimal", "low"]
906
- variable_op_kwargs = [
907
- {
908
- "model": model,
909
- "prompt_strategy": PromptStrategy.JOIN_NO_REASONING if model.is_reasoning_model() and no_reasoning else PromptStrategy.JOIN,
910
- "join_parallelism": runtime_kwargs["join_parallelism"],
911
- "reasoning_effort": runtime_kwargs["reasoning_effort"],
912
- "retain_inputs": not runtime_kwargs["is_validation"],
913
- }
914
- for model in models
915
- ]
930
+ variable_op_kwargs = []
931
+ for model in models:
932
+ use_reasoning_prompt, reasoning_effort = resolve_reasoning_settings(model, runtime_kwargs["reasoning_effort"])
933
+ prompt_strategy = PromptStrategy.JOIN if use_reasoning_prompt else PromptStrategy.JOIN_NO_REASONING
934
+ variable_op_kwargs.append(
935
+ {
936
+ "model": model,
937
+ "prompt_strategy": prompt_strategy,
938
+ "join_parallelism": runtime_kwargs["join_parallelism"],
939
+ "reasoning_effort": reasoning_effort,
940
+ "retain_inputs": not runtime_kwargs["is_validation"],
941
+ }
942
+ )
916
943
 
917
944
  return cls._perform_substitution(logical_expression, NestedLoopsJoin, runtime_kwargs, variable_op_kwargs)
918
945
 
@@ -934,18 +961,20 @@ class EmbeddingJoinRule(ImplementationRule):
934
961
 
935
962
  # create variable physical operator kwargs for each model which can implement this logical_expression
936
963
  models = [model for model in runtime_kwargs["available_models"] if cls._model_matches_input(model, logical_expression)]
937
- no_reasoning = runtime_kwargs["reasoning_effort"] in [None, "minimal", "low"]
938
- variable_op_kwargs = [
939
- {
940
- "model": model,
941
- "prompt_strategy": PromptStrategy.JOIN_NO_REASONING if model.is_reasoning_model() and no_reasoning else PromptStrategy.JOIN,
942
- "join_parallelism": runtime_kwargs["join_parallelism"],
943
- "reasoning_effort": runtime_kwargs["reasoning_effort"],
944
- "retain_inputs": not runtime_kwargs["is_validation"],
945
- "num_samples": 10, # TODO: iterate over different choices of num_samples
946
- }
947
- for model in models
948
- ]
964
+ variable_op_kwargs = []
965
+ for model in models:
966
+ use_reasoning_prompt, reasoning_effort = resolve_reasoning_settings(model, runtime_kwargs["reasoning_effort"])
967
+ prompt_strategy = PromptStrategy.JOIN if use_reasoning_prompt else PromptStrategy.JOIN_NO_REASONING
968
+ variable_op_kwargs.append(
969
+ {
970
+ "model": model,
971
+ "prompt_strategy": prompt_strategy,
972
+ "join_parallelism": runtime_kwargs["join_parallelism"],
973
+ "reasoning_effort": reasoning_effort,
974
+ "retain_inputs": not runtime_kwargs["is_validation"],
975
+ "num_samples": 10, # TODO: iterate over different choices of num_samples
976
+ }
977
+ )
949
978
 
950
979
  return cls._perform_substitution(logical_expression, EmbeddingJoin, runtime_kwargs, variable_op_kwargs)
951
980
 
@@ -966,15 +995,17 @@ class SemanticAggregateRule(ImplementationRule):
966
995
 
967
996
  # create variable physical operator kwargs for each model which can implement this logical_expression
968
997
  models = [model for model in runtime_kwargs["available_models"] if cls._model_matches_input(model, logical_expression) and not model.is_llama_model()]
969
- no_reasoning = runtime_kwargs["reasoning_effort"] in [None, "minimal", "low"]
970
- variable_op_kwargs = [
971
- {
972
- "model": model,
973
- "prompt_strategy": PromptStrategy.AGG_NO_REASONING if model.is_reasoning_model() and no_reasoning else PromptStrategy.AGG,
974
- "reasoning_effort": runtime_kwargs["reasoning_effort"]
975
- }
976
- for model in models
977
- ]
998
+ variable_op_kwargs = []
999
+ for model in models:
1000
+ use_reasoning_prompt, reasoning_effort = resolve_reasoning_settings(model, runtime_kwargs["reasoning_effort"])
1001
+ prompt_strategy = PromptStrategy.AGG if use_reasoning_prompt else PromptStrategy.AGG_NO_REASONING
1002
+ variable_op_kwargs.append(
1003
+ {
1004
+ "model": model,
1005
+ "prompt_strategy": prompt_strategy,
1006
+ "reasoning_effort": reasoning_effort,
1007
+ }
1008
+ )
978
1009
 
979
1010
  return cls._perform_substitution(logical_expression, SemanticAggregate, runtime_kwargs, variable_op_kwargs)
980
1011
 
@@ -1,3 +1,5 @@
1
+ from __future__ import annotations
2
+
1
3
  from pydantic import BaseModel, ConfigDict, Field
2
4
 
3
5
  from palimpzest.constants import Model
@@ -18,7 +20,7 @@ class QueryProcessorConfig(BaseModel):
18
20
  policy: Policy = Field(default_factory=MaxQuality)
19
21
  enforce_types: bool = Field(default=False)
20
22
  scan_start_idx: int = Field(default=0)
21
- num_samples: int = Field(default=None)
23
+ num_samples: int | None = Field(default=None)
22
24
  verbose: bool = Field(default=False)
23
25
  progress: bool = Field(default=True)
24
26
  available_models: list[Model] | None = Field(default=None)
@@ -26,7 +28,7 @@ class QueryProcessorConfig(BaseModel):
26
28
  max_workers: int | None = Field(default=64)
27
29
  join_parallelism: int = Field(default=64)
28
30
  batch_size: int | None = Field(default=None)
29
- reasoning_effort: str | None = Field(default=None) # Gemini: "disable", "low", "medium", "high"
31
+ reasoning_effort: str | None = Field(default="default") # Gemini: "disable", "low", "medium", "high"
30
32
  use_vertex: bool = Field(default=False) # Whether to use Vertex models for Gemini or Google models
31
33
  gemini_credentials_path: str | None = Field(default=None) # Path to Gemini credentials file
32
34
  api_base: str | None = Field(default=None) # API base URL for vLLM
@@ -53,3 +55,7 @@ class QueryProcessorConfig(BaseModel):
53
55
  def to_dict(self) -> dict:
54
56
  """Convert the config to a dict representation."""
55
57
  return self.model_dump()
58
+
59
+ def copy(self) -> QueryProcessorConfig:
60
+ """Create a copy of the config."""
61
+ return QueryProcessorConfig(**self.to_dict())
@@ -170,6 +170,9 @@ class QueryProcessorFactory:
170
170
  if config is None:
171
171
  config = QueryProcessorConfig()
172
172
 
173
+ # make a copy of the config to avoid modifying the original
174
+ config = config.copy()
175
+
173
176
  # apply any additional keyword arguments to the config and validate its contents
174
177
  config, validator = cls._config_validation_and_normalization(config, train_dataset, validator)
175
178
 
@@ -58,3 +58,31 @@ def get_models(include_embedding: bool = False, use_vertex: bool = False, gemini
58
58
  models.extend(vllm_models)
59
59
 
60
60
  return models
61
+
62
+
63
+ def resolve_reasoning_settings(model: Model | None, reasoning_effort: str | None) -> tuple[bool, str]:
64
+ """
65
+ Resolve the reasoning settings based on the model and provided reasoning effort.
66
+ Returns a tuple indicating whether reasoning prompt should be used and the reasoning effort level.
67
+ By default, we use the reasoning prompt everywhere while setting the model reasoning effort to None (or minimal).
68
+ If a user explicitly provides a reasoning_effort, we pass that through to the model.
69
+ If the user explicitly disables reasoning_effort, we disable the reasoning prompt as well.
70
+ """
71
+ # turn off reasoning prompt if reasoning_effort is in [None, "disable", "minimal", "low"]
72
+ use_reasoning_prompt = reasoning_effort not in [None, "disable", "minimal", "low"]
73
+
74
+ # if reasoning_effort is set to "default", set it to None to use model defaults
75
+ if reasoning_effort == "default":
76
+ reasoning_effort = None
77
+
78
+ # translate reasoning_effort into model-specific settings
79
+ if model is not None and model.is_reasoning_model():
80
+ if model.is_vertex_model() or model.is_google_ai_studio_model():
81
+ if reasoning_effort is None and model in [Model.GEMINI_2_5_PRO, Model.GOOGLE_GEMINI_2_5_PRO]:
82
+ reasoning_effort = "low"
83
+ elif reasoning_effort is None:
84
+ reasoning_effort = "disable"
85
+ elif model.is_openai_model():
86
+ reasoning_effort = "minimal" if reasoning_effort in [None, "disable", "minimal", "low"] else reasoning_effort
87
+
88
+ return use_reasoning_prompt, reasoning_effort
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.4
2
2
  Name: palimpzest
3
- Version: 1.3.0
3
+ Version: 1.3.1
4
4
  Summary: Palimpzest is a system which enables anyone to process AI-powered analytical queries simply by defining them in a declarative language
5
5
  Author-email: MIT DSG Semantic Management Lab <michjc@csail.mit.edu>
6
6
  Project-URL: homepage, https://palimpzest.org
@@ -42,7 +42,7 @@ palimpzest/query/execution/mab_execution_strategy.py,sha256=BLRTSQXPeWBlJ_-8GAFH
42
42
  palimpzest/query/execution/parallel_execution_strategy.py,sha256=Di-8d7waE0bev4kNDXEJJqQ0wwQ87_sPV-t5qFtAlPQ,17589
43
43
  palimpzest/query/execution/single_threaded_execution_strategy.py,sha256=1rjMel0-AI6KUi_SMNgPPXxMgG5-t9lenLKoYEClgjk,17464
44
44
  palimpzest/query/generators/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
45
- palimpzest/query/generators/generators.py,sha256=OV2HBZvCQtvhj6rOwti_8dpQX_bqTZ-uehyUCFG2BBE,21758
45
+ palimpzest/query/generators/generators.py,sha256=PexL2hVTqx3CZmdZSjoZ5Ylw2M6BQARvVq24_J5x_RA,20934
46
46
  palimpzest/query/operators/__init__.py,sha256=B9zr_VmUs6YRep4fjbj7e0aTM6T9-GrqbY7tKWxEdkc,4734
47
47
  palimpzest/query/operators/aggregate.py,sha256=nQ6Zh1DYeqDoIDwkPQDw8QCwW0y52sGC-No6uPSRc8A,27367
48
48
  palimpzest/query/operators/compute.py,sha256=X_pWN45smg8L4dV54nOae7dldQGL1nJVlVyJ3ULWSmI,8432
@@ -63,17 +63,17 @@ palimpzest/query/operators/split.py,sha256=oLzwnYb8TNf3XA9TMKEAIw7EIA12wHneaD42B
63
63
  palimpzest/query/operators/topk.py,sha256=MZl83Cu43QmN4skjlfpR8EVFFCgA7sR6PbGgBGWC0tg,13564
64
64
  palimpzest/query/optimizer/__init__.py,sha256=v9fSBOL2p3sQew4LrN2DQUPe0WezO328Hr54qBTqrAs,2799
65
65
  palimpzest/query/optimizer/cost_model.py,sha256=JaxdLuUZuq52BJ52YdW4ChfWptwXsh7Rk7oaPCn_gWc,12956
66
- palimpzest/query/optimizer/optimizer.py,sha256=ksLkzQ2sVgJFbkxGF3ncF74EsAHZFos8G19xlHQrtJo,20063
66
+ palimpzest/query/optimizer/optimizer.py,sha256=I1AwsSaMgnhOOImsWAYZEsyXTZFX-kmwDh89gHL4Sg0,20068
67
67
  palimpzest/query/optimizer/optimizer_strategy.py,sha256=0foDaBHqQehK_zz6IlDEbNIw-44wxY6LO5H1anJi56Y,10042
68
68
  palimpzest/query/optimizer/optimizer_strategy_type.py,sha256=V-MMHvJdnfZKoUX1xxxwh66q1RjN2FL35IsiT1C62c8,1084
69
69
  palimpzest/query/optimizer/plan.py,sha256=O33uzcpwhcHVu5MuxcLzrwodcF86ZrcMZSOy4xoOb7A,22792
70
70
  palimpzest/query/optimizer/primitives.py,sha256=jMMVq37y1tWiPU1lSSKQP9OP-mzkpSxSmUeDajRYYOQ,5445
71
- palimpzest/query/optimizer/rules.py,sha256=awhe76trskv5Tq5E2QHpUN_YV6jH8INywa0Ige8IIhY,53341
71
+ palimpzest/query/optimizer/rules.py,sha256=Xu7VjLYExD_sxpD-N7kXsw0-Jo7cwKPjgpuFq7sEhy8,55098
72
72
  palimpzest/query/optimizer/tasks.py,sha256=DNJjY2QldfKFWj6INHElMh88dYc36Z5m3wHwbs4jyF4,30455
73
73
  palimpzest/query/processor/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
74
- palimpzest/query/processor/config.py,sha256=rVpXNfzJvYstNZ2PxhQMWBpHCmjLDfiKh4ERKWRojc0,2522
74
+ palimpzest/query/processor/config.py,sha256=Msi9q6GPkRyu6I4C27I4zrIvEs3Lg0neGMz_MZnNtbQ,2712
75
75
  palimpzest/query/processor/query_processor.py,sha256=T4ffPbnOX23G8FDITzmM7Iw7DUEDWIHnwl8XLYllgjg,6240
76
- palimpzest/query/processor/query_processor_factory.py,sha256=qED6pJtJJXVci2a4nScURSpJohrz1LIW6tXY05un2R8,9653
76
+ palimpzest/query/processor/query_processor_factory.py,sha256=-8Q3yCIFY1cbCJL3tZKEajEF01ZxDHdSukjOEtYuqeI,9753
77
77
  palimpzest/schemabuilder/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
78
78
  palimpzest/schemabuilder/schema_builder.py,sha256=QraGp66dcD-ej6Y2mER40o86G9JqlBkL7swkJzjUAIY,7968
79
79
  palimpzest/tools/README.md,sha256=56_6LPG80uc0CLVhTBP6I1wgIffNv9cyTr0TmVZqmrM,483
@@ -84,13 +84,13 @@ palimpzest/tools/skema_tools.py,sha256=HXUFpjMhbVxZwKKkATeK-FwtlTCawaCbeP-uHntI1
84
84
  palimpzest/utils/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
85
85
  palimpzest/utils/env_helpers.py,sha256=n81KzoJ459pRxo7QmJA7duazwWsfoMGTHc71D2LatFk,334
86
86
  palimpzest/utils/hash_helpers.py,sha256=3A8dA7SbXTwnnvZvPVNqqMLlVRhCKyKF_bjNNAu3Exk,334
87
- palimpzest/utils/model_helpers.py,sha256=SqcY8rWzZ7D3Vgeq8d4OGNLGv4dXXVMWiJDqSCaalRQ,2490
87
+ palimpzest/utils/model_helpers.py,sha256=11dmGztBms9FyHsFdCXIkTFV_1oahKfMzv4-kkH5wCY,4076
88
88
  palimpzest/utils/progress.py,sha256=eHXrTPTCRHjMdK0EjYRUzSxcV6N1lK8TS3Ju_ZlQLhY,22002
89
89
  palimpzest/utils/udfs.py,sha256=LjHic54B1az-rKgNLur0wOpaz2ko_UodjLEJrazkxvY,1854
90
90
  palimpzest/validator/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
91
91
  palimpzest/validator/validator.py,sha256=SvjK09zCpGtK0yM0OasvQlSzyq3loy32DyOOKRmYXC0,15977
92
- palimpzest-1.3.0.dist-info/licenses/LICENSE,sha256=5GUlHy9lr-Py9kvV38FF1m3yy3NqM18fefuE9wkWumo,1079
93
- palimpzest-1.3.0.dist-info/METADATA,sha256=1VcCZTtJHg1ujb41-3QquXQLYaWtpCG9AHuvtrvJGvg,5395
94
- palimpzest-1.3.0.dist-info/WHEEL,sha256=_zCd3N1l69ArxyTb8rzEoP9TpbYXkqRFSNOD5OuxnTs,91
95
- palimpzest-1.3.0.dist-info/top_level.txt,sha256=raV06dJUgohefUn3ZyJS2uqp_Y76EOLA9Y2e_fxt8Ew,11
96
- palimpzest-1.3.0.dist-info/RECORD,,
92
+ palimpzest-1.3.1.dist-info/licenses/LICENSE,sha256=5GUlHy9lr-Py9kvV38FF1m3yy3NqM18fefuE9wkWumo,1079
93
+ palimpzest-1.3.1.dist-info/METADATA,sha256=pDfvhYW8xmKZrc-4yJQ4dkvatr0FMdJ3kWxpiclTwvk,5395
94
+ palimpzest-1.3.1.dist-info/WHEEL,sha256=_zCd3N1l69ArxyTb8rzEoP9TpbYXkqRFSNOD5OuxnTs,91
95
+ palimpzest-1.3.1.dist-info/top_level.txt,sha256=raV06dJUgohefUn3ZyJS2uqp_Y76EOLA9Y2e_fxt8Ew,11
96
+ palimpzest-1.3.1.dist-info/RECORD,,