palimpzest 1.3.2__py3-none-any.whl → 1.3.4__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- palimpzest/constants.py +2 -2
- palimpzest/query/generators/generators.py +4 -2
- palimpzest/query/operators/aggregate.py +1 -1
- palimpzest/query/operators/convert.py +1 -1
- palimpzest/query/operators/filter.py +1 -1
- palimpzest/query/operators/join.py +1 -1
- palimpzest/query/optimizer/optimizer.py +1 -1
- palimpzest/query/optimizer/rules.py +42 -38
- palimpzest/query/processor/config.py +1 -1
- palimpzest/utils/model_helpers.py +20 -17
- {palimpzest-1.3.2.dist-info → palimpzest-1.3.4.dist-info}/METADATA +1 -1
- {palimpzest-1.3.2.dist-info → palimpzest-1.3.4.dist-info}/RECORD +15 -15
- {palimpzest-1.3.2.dist-info → palimpzest-1.3.4.dist-info}/WHEEL +0 -0
- {palimpzest-1.3.2.dist-info → palimpzest-1.3.4.dist-info}/licenses/LICENSE +0 -0
- {palimpzest-1.3.2.dist-info → palimpzest-1.3.4.dist-info}/top_level.txt +0 -0
palimpzest/constants.py
CHANGED
|
@@ -25,7 +25,7 @@ class Model(str, Enum):
|
|
|
25
25
|
GPT_5_MINI = "openai/gpt-5-mini-2025-08-07"
|
|
26
26
|
GPT_5_NANO = "openai/gpt-5-nano-2025-08-07"
|
|
27
27
|
o4_MINI = "openai/o4-mini-2025-04-16" # noqa: N815
|
|
28
|
-
CLAUDE_3_5_SONNET = "anthropic/claude-3-5-sonnet-20241022"
|
|
28
|
+
# CLAUDE_3_5_SONNET = "anthropic/claude-3-5-sonnet-20241022"
|
|
29
29
|
CLAUDE_3_7_SONNET = "anthropic/claude-3-7-sonnet-20250219"
|
|
30
30
|
CLAUDE_3_5_HAIKU = "anthropic/claude-3-5-haiku-20241022"
|
|
31
31
|
GEMINI_2_0_FLASH = "vertex_ai/gemini-2.0-flash"
|
|
@@ -621,7 +621,7 @@ MODEL_CARDS = {
|
|
|
621
621
|
# Model.o1.value: o1_MODEL_CARD,
|
|
622
622
|
Model.TEXT_EMBEDDING_3_SMALL.value: TEXT_EMBEDDING_3_SMALL_MODEL_CARD,
|
|
623
623
|
Model.CLIP_VIT_B_32.value: CLIP_VIT_B_32_MODEL_CARD,
|
|
624
|
-
Model.CLAUDE_3_5_SONNET.value: CLAUDE_3_5_SONNET_MODEL_CARD,
|
|
624
|
+
# Model.CLAUDE_3_5_SONNET.value: CLAUDE_3_5_SONNET_MODEL_CARD,
|
|
625
625
|
Model.CLAUDE_3_7_SONNET.value: CLAUDE_3_7_SONNET_MODEL_CARD,
|
|
626
626
|
Model.CLAUDE_3_5_HAIKU.value: CLAUDE_3_5_HAIKU_MODEL_CARD,
|
|
627
627
|
Model.GEMINI_2_0_FLASH.value: GEMINI_2_0_FLASH_MODEL_CARD,
|
|
@@ -26,6 +26,7 @@ from palimpzest.constants import (
|
|
|
26
26
|
from palimpzest.core.elements.records import DataRecord
|
|
27
27
|
from palimpzest.core.models import GenerationStats
|
|
28
28
|
from palimpzest.prompts import PromptFactory
|
|
29
|
+
from palimpzest.utils.model_helpers import resolve_reasoning_effort
|
|
29
30
|
|
|
30
31
|
# DEFINITIONS
|
|
31
32
|
GenerationOutput = tuple[dict, str | None, GenerationStats, list[dict]]
|
|
@@ -108,7 +109,7 @@ class Generator(Generic[ContextType, InputType]):
|
|
|
108
109
|
self,
|
|
109
110
|
model: Model,
|
|
110
111
|
prompt_strategy: PromptStrategy,
|
|
111
|
-
reasoning_effort: str
|
|
112
|
+
reasoning_effort: str,
|
|
112
113
|
api_base: str | None = None,
|
|
113
114
|
cardinality: Cardinality = Cardinality.ONE_TO_ONE,
|
|
114
115
|
desc: str | None = None,
|
|
@@ -325,7 +326,8 @@ class Generator(Generic[ContextType, InputType]):
|
|
|
325
326
|
if is_audio_op:
|
|
326
327
|
completion_kwargs = {"modalities": ["text"], **completion_kwargs}
|
|
327
328
|
if self.model.is_reasoning_model():
|
|
328
|
-
|
|
329
|
+
reasoning_effort = resolve_reasoning_effort(self.model, self.reasoning_effort)
|
|
330
|
+
completion_kwargs = {"reasoning_effort": reasoning_effort, **completion_kwargs}
|
|
329
331
|
if self.model.is_vllm_model():
|
|
330
332
|
completion_kwargs = {"api_base": self.api_base, "api_key": os.environ.get("VLLM_API_KEY", "fake-api-key"), **completion_kwargs}
|
|
331
333
|
completion = litellm.completion(model=self.model_name, messages=messages, **completion_kwargs)
|
|
@@ -531,7 +531,7 @@ class MaxAggregateOp(AggregateOp):
|
|
|
531
531
|
|
|
532
532
|
class SemanticAggregate(AggregateOp):
|
|
533
533
|
|
|
534
|
-
def __init__(self, agg_str: str, model: Model, prompt_strategy: PromptStrategy = PromptStrategy.AGG, reasoning_effort: str
|
|
534
|
+
def __init__(self, agg_str: str, model: Model, prompt_strategy: PromptStrategy = PromptStrategy.AGG, reasoning_effort: str = "default", *args, **kwargs):
|
|
535
535
|
# call parent constructor
|
|
536
536
|
super().__init__(*args, **kwargs)
|
|
537
537
|
self.agg_str = agg_str
|
|
@@ -75,7 +75,7 @@ class Optimizer:
|
|
|
75
75
|
cost_model: BaseCostModel,
|
|
76
76
|
available_models: list[Model],
|
|
77
77
|
join_parallelism: int = 64,
|
|
78
|
-
reasoning_effort: str
|
|
78
|
+
reasoning_effort: str = "default",
|
|
79
79
|
api_base: str | None = None,
|
|
80
80
|
verbose: bool = False,
|
|
81
81
|
allow_bonded_query: bool = True,
|
|
@@ -54,7 +54,7 @@ from palimpzest.query.operators.search import (
|
|
|
54
54
|
from palimpzest.query.operators.split import SplitConvert, SplitFilter
|
|
55
55
|
from palimpzest.query.operators.topk import TopKOp
|
|
56
56
|
from palimpzest.query.optimizer.primitives import Expression, Group, LogicalExpression, PhysicalExpression
|
|
57
|
-
from palimpzest.utils.model_helpers import
|
|
57
|
+
from palimpzest.utils.model_helpers import use_reasoning_prompt
|
|
58
58
|
|
|
59
59
|
logger = logging.getLogger(__name__)
|
|
60
60
|
|
|
@@ -631,13 +631,13 @@ class LLMConvertBondedRule(ImplementationRule):
|
|
|
631
631
|
models = [model for model in runtime_kwargs["available_models"] if cls._model_matches_input(model, logical_expression)]
|
|
632
632
|
variable_op_kwargs = []
|
|
633
633
|
for model in models:
|
|
634
|
-
|
|
635
|
-
prompt_strategy = PromptStrategy.MAP if
|
|
634
|
+
reasoning_prompt_strategy = use_reasoning_prompt(runtime_kwargs["reasoning_effort"])
|
|
635
|
+
prompt_strategy = PromptStrategy.MAP if reasoning_prompt_strategy else PromptStrategy.MAP_NO_REASONING
|
|
636
636
|
variable_op_kwargs.append(
|
|
637
637
|
{
|
|
638
638
|
"model": model,
|
|
639
639
|
"prompt_strategy": prompt_strategy,
|
|
640
|
-
"reasoning_effort": reasoning_effort,
|
|
640
|
+
"reasoning_effort": runtime_kwargs["reasoning_effort"],
|
|
641
641
|
}
|
|
642
642
|
)
|
|
643
643
|
|
|
@@ -670,25 +670,28 @@ class RAGRule(ImplementationRule):
|
|
|
670
670
|
models = [model for model in runtime_kwargs["available_models"] if cls._model_matches_input(model, logical_expression)]
|
|
671
671
|
variable_op_kwargs = []
|
|
672
672
|
for model in models:
|
|
673
|
-
|
|
674
|
-
|
|
675
|
-
|
|
676
|
-
|
|
677
|
-
|
|
678
|
-
|
|
679
|
-
|
|
680
|
-
|
|
673
|
+
reasoning_prompt_strategy = use_reasoning_prompt(runtime_kwargs["reasoning_effort"])
|
|
674
|
+
if phys_op_cls is RAGConvert:
|
|
675
|
+
reasoning = PromptStrategy.MAP
|
|
676
|
+
no_reasoning = PromptStrategy.MAP_NO_REASONING
|
|
677
|
+
elif phys_op_cls is RAGFilter:
|
|
678
|
+
reasoning = PromptStrategy.FILTER
|
|
679
|
+
no_reasoning = PromptStrategy.FILTER_NO_REASONING
|
|
680
|
+
|
|
681
|
+
prompt_strategy = reasoning if reasoning_prompt_strategy else no_reasoning
|
|
682
|
+
variable_op_kwargs.extend(
|
|
683
|
+
[
|
|
681
684
|
{
|
|
682
685
|
"model": model,
|
|
683
686
|
"prompt_strategy": prompt_strategy,
|
|
684
687
|
"num_chunks_per_field": num_chunks_per_field,
|
|
685
688
|
"chunk_size": chunk_size,
|
|
686
|
-
"reasoning_effort": reasoning_effort,
|
|
689
|
+
"reasoning_effort": runtime_kwargs["reasoning_effort"],
|
|
687
690
|
}
|
|
688
691
|
for num_chunks_per_field in cls.num_chunks_per_fields
|
|
689
692
|
for chunk_size in cls.chunk_sizes
|
|
690
|
-
|
|
691
|
-
|
|
693
|
+
]
|
|
694
|
+
)
|
|
692
695
|
|
|
693
696
|
return cls._perform_substitution(logical_expression, phys_op_cls, runtime_kwargs, variable_op_kwargs)
|
|
694
697
|
|
|
@@ -716,7 +719,6 @@ class MixtureOfAgentsRule(ImplementationRule):
|
|
|
716
719
|
phys_op_cls = MixtureOfAgentsConvert if isinstance(logical_expression.operator, ConvertScan) else MixtureOfAgentsFilter
|
|
717
720
|
|
|
718
721
|
# create variable physical operator kwargs for each model which can implement this logical_expression
|
|
719
|
-
_, reasoning_effort = resolve_reasoning_settings(None, runtime_kwargs["reasoning_effort"])
|
|
720
722
|
proposer_model_set = {model for model in runtime_kwargs["available_models"] if cls._model_matches_input(model, logical_expression)}
|
|
721
723
|
aggregator_model_set = {model for model in runtime_kwargs["available_models"] if model.is_text_model()}
|
|
722
724
|
variable_op_kwargs = [
|
|
@@ -724,7 +726,7 @@ class MixtureOfAgentsRule(ImplementationRule):
|
|
|
724
726
|
"proposer_models": list(proposer_models),
|
|
725
727
|
"temperatures": [temp] * len(proposer_models),
|
|
726
728
|
"aggregator_model": aggregator_model,
|
|
727
|
-
"reasoning_effort": reasoning_effort,
|
|
729
|
+
"reasoning_effort": runtime_kwargs["reasoning_effort"],
|
|
728
730
|
}
|
|
729
731
|
for k in cls.num_proposer_models
|
|
730
732
|
for temp in cls.temperatures
|
|
@@ -758,12 +760,15 @@ class CritiqueAndRefineRule(ImplementationRule):
|
|
|
758
760
|
models = [model for model in runtime_kwargs["available_models"] if cls._model_matches_input(model, logical_expression)]
|
|
759
761
|
variable_op_kwargs = []
|
|
760
762
|
for model in models:
|
|
761
|
-
|
|
762
|
-
|
|
763
|
-
|
|
764
|
-
|
|
765
|
-
|
|
766
|
-
|
|
763
|
+
reasoning_prompt_strategy = use_reasoning_prompt(runtime_kwargs["reasoning_effort"])
|
|
764
|
+
if phys_op_cls is CritiqueAndRefineConvert:
|
|
765
|
+
reasoning = PromptStrategy.MAP
|
|
766
|
+
no_reasoning = PromptStrategy.MAP_NO_REASONING
|
|
767
|
+
elif phys_op_cls is CritiqueAndRefineFilter:
|
|
768
|
+
reasoning = PromptStrategy.FILTER
|
|
769
|
+
no_reasoning = PromptStrategy.FILTER_NO_REASONING
|
|
770
|
+
|
|
771
|
+
prompt_strategy = reasoning if reasoning_prompt_strategy else no_reasoning
|
|
767
772
|
variable_op_kwargs.extend(
|
|
768
773
|
[
|
|
769
774
|
{
|
|
@@ -771,7 +776,7 @@ class CritiqueAndRefineRule(ImplementationRule):
|
|
|
771
776
|
"critic_model": critic_model,
|
|
772
777
|
"refine_model": refine_model,
|
|
773
778
|
"prompt_strategy": prompt_strategy,
|
|
774
|
-
"reasoning_effort": reasoning_effort,
|
|
779
|
+
"reasoning_effort": runtime_kwargs["reasoning_effort"],
|
|
775
780
|
}
|
|
776
781
|
for critic_model in models
|
|
777
782
|
for refine_model in models
|
|
@@ -804,13 +809,12 @@ class SplitRule(ImplementationRule):
|
|
|
804
809
|
|
|
805
810
|
# create variable physical operator kwargs for each model which can implement this logical_expression
|
|
806
811
|
models = [model for model in runtime_kwargs["available_models"] if cls._model_matches_input(model, logical_expression)]
|
|
807
|
-
_, reasoning_effort = resolve_reasoning_settings(None, runtime_kwargs["reasoning_effort"])
|
|
808
812
|
variable_op_kwargs = [
|
|
809
813
|
{
|
|
810
814
|
"model": model,
|
|
811
815
|
"min_size_to_chunk": min_size_to_chunk,
|
|
812
816
|
"num_chunks": num_chunks,
|
|
813
|
-
"reasoning_effort": reasoning_effort,
|
|
817
|
+
"reasoning_effort": runtime_kwargs["reasoning_effort"],
|
|
814
818
|
}
|
|
815
819
|
for model in models
|
|
816
820
|
for min_size_to_chunk in cls.min_size_to_chunk
|
|
@@ -880,13 +884,13 @@ class LLMFilterRule(ImplementationRule):
|
|
|
880
884
|
models = [model for model in runtime_kwargs["available_models"] if cls._model_matches_input(model, logical_expression)]
|
|
881
885
|
variable_op_kwargs = []
|
|
882
886
|
for model in models:
|
|
883
|
-
|
|
884
|
-
prompt_strategy = PromptStrategy.FILTER if
|
|
887
|
+
reasoning_prompt_strategy = use_reasoning_prompt(runtime_kwargs["reasoning_effort"])
|
|
888
|
+
prompt_strategy = PromptStrategy.FILTER if reasoning_prompt_strategy else PromptStrategy.FILTER_NO_REASONING
|
|
885
889
|
variable_op_kwargs.append(
|
|
886
890
|
{
|
|
887
891
|
"model": model,
|
|
888
892
|
"prompt_strategy": prompt_strategy,
|
|
889
|
-
"reasoning_effort": reasoning_effort,
|
|
893
|
+
"reasoning_effort": runtime_kwargs["reasoning_effort"],
|
|
890
894
|
}
|
|
891
895
|
)
|
|
892
896
|
|
|
@@ -929,14 +933,14 @@ class NestedLoopsJoinRule(ImplementationRule):
|
|
|
929
933
|
models = [model for model in runtime_kwargs["available_models"] if cls._model_matches_input(model, logical_expression)]
|
|
930
934
|
variable_op_kwargs = []
|
|
931
935
|
for model in models:
|
|
932
|
-
|
|
933
|
-
prompt_strategy = PromptStrategy.JOIN if
|
|
936
|
+
reasoning_prompt_strategy = use_reasoning_prompt(runtime_kwargs["reasoning_effort"])
|
|
937
|
+
prompt_strategy = PromptStrategy.JOIN if reasoning_prompt_strategy else PromptStrategy.JOIN_NO_REASONING
|
|
934
938
|
variable_op_kwargs.append(
|
|
935
939
|
{
|
|
936
940
|
"model": model,
|
|
937
941
|
"prompt_strategy": prompt_strategy,
|
|
938
942
|
"join_parallelism": runtime_kwargs["join_parallelism"],
|
|
939
|
-
"reasoning_effort": reasoning_effort,
|
|
943
|
+
"reasoning_effort": runtime_kwargs["reasoning_effort"],
|
|
940
944
|
"retain_inputs": not runtime_kwargs["is_validation"],
|
|
941
945
|
}
|
|
942
946
|
)
|
|
@@ -963,14 +967,14 @@ class EmbeddingJoinRule(ImplementationRule):
|
|
|
963
967
|
models = [model for model in runtime_kwargs["available_models"] if cls._model_matches_input(model, logical_expression)]
|
|
964
968
|
variable_op_kwargs = []
|
|
965
969
|
for model in models:
|
|
966
|
-
|
|
967
|
-
prompt_strategy = PromptStrategy.JOIN if
|
|
970
|
+
reasoning_prompt_strategy = use_reasoning_prompt(runtime_kwargs["reasoning_effort"])
|
|
971
|
+
prompt_strategy = PromptStrategy.JOIN if reasoning_prompt_strategy else PromptStrategy.JOIN_NO_REASONING
|
|
968
972
|
variable_op_kwargs.append(
|
|
969
973
|
{
|
|
970
974
|
"model": model,
|
|
971
975
|
"prompt_strategy": prompt_strategy,
|
|
972
976
|
"join_parallelism": runtime_kwargs["join_parallelism"],
|
|
973
|
-
"reasoning_effort": reasoning_effort,
|
|
977
|
+
"reasoning_effort": runtime_kwargs["reasoning_effort"],
|
|
974
978
|
"retain_inputs": not runtime_kwargs["is_validation"],
|
|
975
979
|
"num_samples": 10, # TODO: iterate over different choices of num_samples
|
|
976
980
|
}
|
|
@@ -997,13 +1001,13 @@ class SemanticAggregateRule(ImplementationRule):
|
|
|
997
1001
|
models = [model for model in runtime_kwargs["available_models"] if cls._model_matches_input(model, logical_expression) and not model.is_llama_model()]
|
|
998
1002
|
variable_op_kwargs = []
|
|
999
1003
|
for model in models:
|
|
1000
|
-
|
|
1001
|
-
prompt_strategy = PromptStrategy.AGG if
|
|
1004
|
+
reasoning_prompt_strategy = use_reasoning_prompt(runtime_kwargs["reasoning_effort"])
|
|
1005
|
+
prompt_strategy = PromptStrategy.AGG if reasoning_prompt_strategy else PromptStrategy.AGG_NO_REASONING
|
|
1002
1006
|
variable_op_kwargs.append(
|
|
1003
1007
|
{
|
|
1004
1008
|
"model": model,
|
|
1005
1009
|
"prompt_strategy": prompt_strategy,
|
|
1006
|
-
"reasoning_effort": reasoning_effort,
|
|
1010
|
+
"reasoning_effort": runtime_kwargs["reasoning_effort"],
|
|
1007
1011
|
}
|
|
1008
1012
|
)
|
|
1009
1013
|
|
|
@@ -28,7 +28,7 @@ class QueryProcessorConfig(BaseModel):
|
|
|
28
28
|
max_workers: int | None = Field(default=64)
|
|
29
29
|
join_parallelism: int = Field(default=64)
|
|
30
30
|
batch_size: int | None = Field(default=None)
|
|
31
|
-
reasoning_effort: str
|
|
31
|
+
reasoning_effort: str = Field(default="default") # Gemini: "disable", "low", "medium", "high"
|
|
32
32
|
use_vertex: bool = Field(default=False) # Whether to use Vertex models for Gemini or Google models
|
|
33
33
|
gemini_credentials_path: str | None = Field(default=None) # Path to Gemini credentials file
|
|
34
34
|
api_base: str | None = Field(default=None) # API base URL for vLLM
|
|
@@ -60,29 +60,32 @@ def get_models(include_embedding: bool = False, use_vertex: bool = False, gemini
|
|
|
60
60
|
return models
|
|
61
61
|
|
|
62
62
|
|
|
63
|
-
def
|
|
63
|
+
def use_reasoning_prompt(reasoning_effort: str) -> bool:
|
|
64
64
|
"""
|
|
65
|
-
|
|
66
|
-
|
|
67
|
-
By default, we use the reasoning prompt everywhere while setting the model reasoning effort to None (or minimal).
|
|
68
|
-
If a user explicitly provides a reasoning_effort, we pass that through to the model.
|
|
69
|
-
If the user explicitly disables reasoning_effort, we disable the reasoning prompt as well.
|
|
65
|
+
Determine whether to use the reasoning prompt based on the provided reasoning effort.
|
|
66
|
+
By default, we use the reasoning prompt everywhere unless the reasoning_effort is in [None, "disable", "minimal", "low"].
|
|
70
67
|
"""
|
|
71
|
-
|
|
72
|
-
|
|
68
|
+
return reasoning_effort not in ["disable", "minimal", "low"]
|
|
69
|
+
|
|
70
|
+
|
|
71
|
+
def resolve_reasoning_effort(model: Model, reasoning_effort: str) -> str | None:
|
|
72
|
+
"""
|
|
73
|
+
Resolve the reasoning effort setting based on the model and provided reasoning effort.
|
|
74
|
+
"""
|
|
75
|
+
# check that model is a reasoning model, throw an assertion error otherwise
|
|
76
|
+
assert model.is_reasoning_model(), f"Model {model} is not a reasoning model. Should only use resolve_reasoning_effort with reasoning models."
|
|
73
77
|
|
|
74
78
|
# if reasoning_effort is set to "default", set it to None to use model defaults
|
|
75
79
|
if reasoning_effort == "default":
|
|
76
80
|
reasoning_effort = None
|
|
77
81
|
|
|
78
82
|
# translate reasoning_effort into model-specific settings
|
|
79
|
-
if model
|
|
80
|
-
if model
|
|
81
|
-
|
|
82
|
-
|
|
83
|
-
|
|
84
|
-
|
|
85
|
-
|
|
86
|
-
reasoning_effort = "minimal" if reasoning_effort in [None, "disable", "minimal", "low"] else reasoning_effort
|
|
83
|
+
if model.is_vertex_model() or model.is_google_ai_studio_model():
|
|
84
|
+
if reasoning_effort is None and model in [Model.GEMINI_2_5_PRO, Model.GOOGLE_GEMINI_2_5_PRO]:
|
|
85
|
+
reasoning_effort = "low"
|
|
86
|
+
elif reasoning_effort is None:
|
|
87
|
+
reasoning_effort = "disable"
|
|
88
|
+
elif model.is_openai_model():
|
|
89
|
+
reasoning_effort = "low" if reasoning_effort in [None, "disable", "minimal", "low"] else reasoning_effort
|
|
87
90
|
|
|
88
|
-
return
|
|
91
|
+
return reasoning_effort
|
|
@@ -1,6 +1,6 @@
|
|
|
1
1
|
Metadata-Version: 2.4
|
|
2
2
|
Name: palimpzest
|
|
3
|
-
Version: 1.3.
|
|
3
|
+
Version: 1.3.4
|
|
4
4
|
Summary: Palimpzest is a system which enables anyone to process AI-powered analytical queries simply by defining them in a declarative language
|
|
5
5
|
Author-email: MIT DSG Semantic Management Lab <michjc@csail.mit.edu>
|
|
6
6
|
Project-URL: homepage, https://palimpzest.org
|
|
@@ -1,5 +1,5 @@
|
|
|
1
1
|
palimpzest/__init__.py,sha256=1PzadDDOVMQJKNEYUH0_tw8tQKUYTT31M0vuzTr2Rqk,1694
|
|
2
|
-
palimpzest/constants.py,sha256=
|
|
2
|
+
palimpzest/constants.py,sha256=67E4FsvWsNs_EQGtdNR0MO-vzq4Oh7qZutGnkg-rITo,23338
|
|
3
3
|
palimpzest/policy.py,sha256=lIvw_C_rmwCH4LZaeNkAuixl8zw9RAW_JcSWSHPjKyc,11628
|
|
4
4
|
palimpzest/agents/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
|
|
5
5
|
palimpzest/agents/compute_agents.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
|
|
@@ -42,15 +42,15 @@ palimpzest/query/execution/mab_execution_strategy.py,sha256=BLRTSQXPeWBlJ_-8GAFH
|
|
|
42
42
|
palimpzest/query/execution/parallel_execution_strategy.py,sha256=Di-8d7waE0bev4kNDXEJJqQ0wwQ87_sPV-t5qFtAlPQ,17589
|
|
43
43
|
palimpzest/query/execution/single_threaded_execution_strategy.py,sha256=1rjMel0-AI6KUi_SMNgPPXxMgG5-t9lenLKoYEClgjk,17464
|
|
44
44
|
palimpzest/query/generators/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
|
|
45
|
-
palimpzest/query/generators/generators.py,sha256=
|
|
45
|
+
palimpzest/query/generators/generators.py,sha256=iDiTInKsmeJGrZyf-oWDyGcTG5qMpGf2DE90h9w_hgk,21085
|
|
46
46
|
palimpzest/query/operators/__init__.py,sha256=B9zr_VmUs6YRep4fjbj7e0aTM6T9-GrqbY7tKWxEdkc,4734
|
|
47
|
-
palimpzest/query/operators/aggregate.py,sha256=
|
|
47
|
+
palimpzest/query/operators/aggregate.py,sha256=Lrs0ZccNoNVWkulXQwREvmyvuMWvbjPQPue0RaboL-8,27365
|
|
48
48
|
palimpzest/query/operators/compute.py,sha256=X_pWN45smg8L4dV54nOae7dldQGL1nJVlVyJ3ULWSmI,8432
|
|
49
|
-
palimpzest/query/operators/convert.py,sha256=
|
|
49
|
+
palimpzest/query/operators/convert.py,sha256=gT0aMoj1zponZkmjypyzyaP-p1KtXVStAdUgmRs8Ks4,16216
|
|
50
50
|
palimpzest/query/operators/critique_and_refine.py,sha256=Q-NhasVoD9meX7g36RPrv3q4R48_8XEU4d3TE46hRJI,8979
|
|
51
51
|
palimpzest/query/operators/distinct.py,sha256=ZTXlIS7IaFRTsWv9RemzCo1JLz25vEma-TB42CV5fJQ,2614
|
|
52
|
-
palimpzest/query/operators/filter.py,sha256=
|
|
53
|
-
palimpzest/query/operators/join.py,sha256=
|
|
52
|
+
palimpzest/query/operators/filter.py,sha256=S4U5U39qHbH3Ceyl2jak97uXoKJ3OQbP1XegAqGMorg,10514
|
|
53
|
+
palimpzest/query/operators/join.py,sha256=ZA_54d8uY3nSMEMEMeToNMA-pAx05_8zJwWMsaMWeAI,38843
|
|
54
54
|
palimpzest/query/operators/limit.py,sha256=pdo7WfWY97SW3c-WqZ4SIPw7lHIVbaXPEWqHyK8qkF8,2130
|
|
55
55
|
palimpzest/query/operators/logical.py,sha256=OtB82L1X19ibtLx1GIfeXXyO7YfjkFmh3puIUgqKQRE,21160
|
|
56
56
|
palimpzest/query/operators/mixture_of_agents.py,sha256=KC-ZpjtGY28sfwlk2TpduLC_fADj_UASFCaicaKqSFc,11671
|
|
@@ -63,15 +63,15 @@ palimpzest/query/operators/split.py,sha256=oLzwnYb8TNf3XA9TMKEAIw7EIA12wHneaD42B
|
|
|
63
63
|
palimpzest/query/operators/topk.py,sha256=MZl83Cu43QmN4skjlfpR8EVFFCgA7sR6PbGgBGWC0tg,13564
|
|
64
64
|
palimpzest/query/optimizer/__init__.py,sha256=v9fSBOL2p3sQew4LrN2DQUPe0WezO328Hr54qBTqrAs,2799
|
|
65
65
|
palimpzest/query/optimizer/cost_model.py,sha256=JaxdLuUZuq52BJ52YdW4ChfWptwXsh7Rk7oaPCn_gWc,12956
|
|
66
|
-
palimpzest/query/optimizer/optimizer.py,sha256=
|
|
66
|
+
palimpzest/query/optimizer/optimizer.py,sha256=kOB8NaGYNfnNcB_Ko3uXz8D17QLSRCX9Y98IBCHcftI,20061
|
|
67
67
|
palimpzest/query/optimizer/optimizer_strategy.py,sha256=0foDaBHqQehK_zz6IlDEbNIw-44wxY6LO5H1anJi56Y,10042
|
|
68
68
|
palimpzest/query/optimizer/optimizer_strategy_type.py,sha256=V-MMHvJdnfZKoUX1xxxwh66q1RjN2FL35IsiT1C62c8,1084
|
|
69
69
|
palimpzest/query/optimizer/plan.py,sha256=O33uzcpwhcHVu5MuxcLzrwodcF86ZrcMZSOy4xoOb7A,22792
|
|
70
70
|
palimpzest/query/optimizer/primitives.py,sha256=jMMVq37y1tWiPU1lSSKQP9OP-mzkpSxSmUeDajRYYOQ,5445
|
|
71
|
-
palimpzest/query/optimizer/rules.py,sha256=
|
|
71
|
+
palimpzest/query/optimizer/rules.py,sha256=brsySU2O4GK6HrZJ1zqePPmLD_TF3GHIpEDuM9xcvok,55113
|
|
72
72
|
palimpzest/query/optimizer/tasks.py,sha256=DNJjY2QldfKFWj6INHElMh88dYc36Z5m3wHwbs4jyF4,30455
|
|
73
73
|
palimpzest/query/processor/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
|
|
74
|
-
palimpzest/query/processor/config.py,sha256=
|
|
74
|
+
palimpzest/query/processor/config.py,sha256=V3znG_nL8JDo8dIlQEL2pCaGgFDM9WNacaF8FefR0Ms,2705
|
|
75
75
|
palimpzest/query/processor/query_processor.py,sha256=T4ffPbnOX23G8FDITzmM7Iw7DUEDWIHnwl8XLYllgjg,6240
|
|
76
76
|
palimpzest/query/processor/query_processor_factory.py,sha256=-8Q3yCIFY1cbCJL3tZKEajEF01ZxDHdSukjOEtYuqeI,9753
|
|
77
77
|
palimpzest/schemabuilder/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
|
|
@@ -84,13 +84,13 @@ palimpzest/tools/skema_tools.py,sha256=HXUFpjMhbVxZwKKkATeK-FwtlTCawaCbeP-uHntI1
|
|
|
84
84
|
palimpzest/utils/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
|
|
85
85
|
palimpzest/utils/env_helpers.py,sha256=n81KzoJ459pRxo7QmJA7duazwWsfoMGTHc71D2LatFk,334
|
|
86
86
|
palimpzest/utils/hash_helpers.py,sha256=3A8dA7SbXTwnnvZvPVNqqMLlVRhCKyKF_bjNNAu3Exk,334
|
|
87
|
-
palimpzest/utils/model_helpers.py,sha256=
|
|
87
|
+
palimpzest/utils/model_helpers.py,sha256=PsAPA567nOgSobIFUJZWm22o3lRWCqHf7HVuMvcdfuo,3945
|
|
88
88
|
palimpzest/utils/progress.py,sha256=eHXrTPTCRHjMdK0EjYRUzSxcV6N1lK8TS3Ju_ZlQLhY,22002
|
|
89
89
|
palimpzest/utils/udfs.py,sha256=LjHic54B1az-rKgNLur0wOpaz2ko_UodjLEJrazkxvY,1854
|
|
90
90
|
palimpzest/validator/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
|
|
91
91
|
palimpzest/validator/validator.py,sha256=SvjK09zCpGtK0yM0OasvQlSzyq3loy32DyOOKRmYXC0,15977
|
|
92
|
-
palimpzest-1.3.
|
|
93
|
-
palimpzest-1.3.
|
|
94
|
-
palimpzest-1.3.
|
|
95
|
-
palimpzest-1.3.
|
|
96
|
-
palimpzest-1.3.
|
|
92
|
+
palimpzest-1.3.4.dist-info/licenses/LICENSE,sha256=5GUlHy9lr-Py9kvV38FF1m3yy3NqM18fefuE9wkWumo,1079
|
|
93
|
+
palimpzest-1.3.4.dist-info/METADATA,sha256=qc_BI7yU6mIdZWpHBCjd78BXWHZ6AX6E--4KivpysTw,5395
|
|
94
|
+
palimpzest-1.3.4.dist-info/WHEEL,sha256=wUyA8OaulRlbfwMtmQsvNngGrxQHAvkKcvRmdizlJi0,92
|
|
95
|
+
palimpzest-1.3.4.dist-info/top_level.txt,sha256=raV06dJUgohefUn3ZyJS2uqp_Y76EOLA9Y2e_fxt8Ew,11
|
|
96
|
+
palimpzest-1.3.4.dist-info/RECORD,,
|
|
File without changes
|
|
File without changes
|
|
File without changes
|