palimpzest 0.7.21__py3-none-any.whl → 0.8.1__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (89) hide show
  1. palimpzest/__init__.py +37 -6
  2. palimpzest/agents/__init__.py +0 -0
  3. palimpzest/agents/compute_agents.py +0 -0
  4. palimpzest/agents/search_agents.py +637 -0
  5. palimpzest/constants.py +343 -209
  6. palimpzest/core/data/context.py +393 -0
  7. palimpzest/core/data/context_manager.py +163 -0
  8. palimpzest/core/data/dataset.py +639 -0
  9. palimpzest/core/data/{datareaders.py → iter_dataset.py} +202 -126
  10. palimpzest/core/elements/groupbysig.py +16 -13
  11. palimpzest/core/elements/records.py +166 -75
  12. palimpzest/core/lib/schemas.py +152 -390
  13. palimpzest/core/{data/dataclasses.py → models.py} +306 -170
  14. palimpzest/policy.py +2 -27
  15. palimpzest/prompts/__init__.py +35 -5
  16. palimpzest/prompts/agent_prompts.py +357 -0
  17. palimpzest/prompts/context_search.py +9 -0
  18. palimpzest/prompts/convert_prompts.py +62 -6
  19. palimpzest/prompts/filter_prompts.py +51 -6
  20. palimpzest/prompts/join_prompts.py +163 -0
  21. palimpzest/prompts/moa_proposer_convert_prompts.py +6 -6
  22. palimpzest/prompts/prompt_factory.py +375 -47
  23. palimpzest/prompts/split_proposer_prompts.py +1 -1
  24. palimpzest/prompts/util_phrases.py +5 -0
  25. palimpzest/prompts/validator.py +239 -0
  26. palimpzest/query/execution/all_sample_execution_strategy.py +134 -76
  27. palimpzest/query/execution/execution_strategy.py +210 -317
  28. palimpzest/query/execution/execution_strategy_type.py +5 -7
  29. palimpzest/query/execution/mab_execution_strategy.py +249 -136
  30. palimpzest/query/execution/parallel_execution_strategy.py +153 -244
  31. palimpzest/query/execution/single_threaded_execution_strategy.py +107 -64
  32. palimpzest/query/generators/generators.py +160 -331
  33. palimpzest/query/operators/__init__.py +15 -5
  34. palimpzest/query/operators/aggregate.py +50 -33
  35. palimpzest/query/operators/compute.py +201 -0
  36. palimpzest/query/operators/convert.py +33 -19
  37. palimpzest/query/operators/critique_and_refine_convert.py +7 -5
  38. palimpzest/query/operators/distinct.py +62 -0
  39. palimpzest/query/operators/filter.py +26 -16
  40. palimpzest/query/operators/join.py +403 -0
  41. palimpzest/query/operators/limit.py +3 -3
  42. palimpzest/query/operators/logical.py +205 -77
  43. palimpzest/query/operators/mixture_of_agents_convert.py +10 -8
  44. palimpzest/query/operators/physical.py +27 -21
  45. palimpzest/query/operators/project.py +3 -3
  46. palimpzest/query/operators/rag_convert.py +7 -7
  47. palimpzest/query/operators/retrieve.py +9 -9
  48. palimpzest/query/operators/scan.py +81 -42
  49. palimpzest/query/operators/search.py +524 -0
  50. palimpzest/query/operators/split_convert.py +10 -8
  51. palimpzest/query/optimizer/__init__.py +7 -9
  52. palimpzest/query/optimizer/cost_model.py +108 -441
  53. palimpzest/query/optimizer/optimizer.py +123 -181
  54. palimpzest/query/optimizer/optimizer_strategy.py +66 -61
  55. palimpzest/query/optimizer/plan.py +352 -67
  56. palimpzest/query/optimizer/primitives.py +43 -19
  57. palimpzest/query/optimizer/rules.py +484 -646
  58. palimpzest/query/optimizer/tasks.py +127 -58
  59. palimpzest/query/processor/config.py +42 -76
  60. palimpzest/query/processor/query_processor.py +73 -18
  61. palimpzest/query/processor/query_processor_factory.py +46 -38
  62. palimpzest/schemabuilder/schema_builder.py +15 -28
  63. palimpzest/utils/model_helpers.py +32 -77
  64. palimpzest/utils/progress.py +114 -102
  65. palimpzest/validator/__init__.py +0 -0
  66. palimpzest/validator/validator.py +306 -0
  67. {palimpzest-0.7.21.dist-info → palimpzest-0.8.1.dist-info}/METADATA +6 -1
  68. palimpzest-0.8.1.dist-info/RECORD +95 -0
  69. palimpzest/core/lib/fields.py +0 -141
  70. palimpzest/prompts/code_synthesis_prompts.py +0 -28
  71. palimpzest/query/execution/random_sampling_execution_strategy.py +0 -240
  72. palimpzest/query/generators/api_client_factory.py +0 -30
  73. palimpzest/query/operators/code_synthesis_convert.py +0 -488
  74. palimpzest/query/operators/map.py +0 -130
  75. palimpzest/query/processor/nosentinel_processor.py +0 -33
  76. palimpzest/query/processor/processing_strategy_type.py +0 -28
  77. palimpzest/query/processor/sentinel_processor.py +0 -88
  78. palimpzest/query/processor/streaming_processor.py +0 -149
  79. palimpzest/sets.py +0 -405
  80. palimpzest/utils/datareader_helpers.py +0 -61
  81. palimpzest/utils/demo_helpers.py +0 -75
  82. palimpzest/utils/field_helpers.py +0 -69
  83. palimpzest/utils/generation_helpers.py +0 -69
  84. palimpzest/utils/sandbox.py +0 -183
  85. palimpzest-0.7.21.dist-info/RECORD +0 -95
  86. /palimpzest/core/{elements/index.py → data/index_dataset.py} +0 -0
  87. {palimpzest-0.7.21.dist-info → palimpzest-0.8.1.dist-info}/WHEEL +0 -0
  88. {palimpzest-0.7.21.dist-info → palimpzest-0.8.1.dist-info}/licenses/LICENSE +0 -0
  89. {palimpzest-0.7.21.dist-info → palimpzest-0.8.1.dist-info}/top_level.txt +0 -0
@@ -1,240 +0,0 @@
1
- import logging
2
-
3
- import numpy as np
4
-
5
- from palimpzest.core.data.dataclasses import SentinelPlanStats
6
- from palimpzest.core.elements.records import DataRecord, DataRecordSet
7
- from palimpzest.query.execution.execution_strategy import SentinelExecutionStrategy
8
- from palimpzest.query.operators.physical import PhysicalOperator
9
- from palimpzest.query.operators.scan import ScanPhysicalOp
10
- from palimpzest.query.optimizer.plan import SentinelPlan
11
- from palimpzest.utils.progress import create_progress_manager
12
-
13
- logger = logging.getLogger(__name__)
14
-
15
- class OpSet:
16
- """
17
- This class represents the set of operators which are currently in the frontier for a given logical operator.
18
- Each operator in the frontier is an instance of a PhysicalOperator which either:
19
-
20
- 1. lies on the Pareto frontier of the set of sampled operators, or
21
- 2. has been sampled fewer than j times
22
- """
23
-
24
- def __init__(self, op_set: list[PhysicalOperator], source_indices: list[int], k: int, j: int, seed: int):
25
- # set k and j, which are the initial number of operators in the frontier and the
26
- # initial number of records to sample for each frontier operator
27
- self.k = min(k, len(op_set))
28
- self.j = min(j, len(source_indices))
29
-
30
- # get order in which we will sample physical operators for this logical operator
31
- sample_op_indices = self._get_op_index_order(op_set, seed)
32
-
33
- # construct the set of operators
34
- self.ops = [op_set[sample_idx] for sample_idx in sample_op_indices[:self.k]]
35
-
36
- # store the order in which we will sample the source records
37
- self.source_indices = source_indices
38
-
39
- # set the initial inputs for this logical operator
40
- is_scan_op = isinstance(op_set[0], ScanPhysicalOp)
41
- self.source_idx_to_input = {source_idx: [source_idx] for source_idx in self.source_indices} if is_scan_op else {}
42
-
43
-
44
- def _get_op_index_order(self, op_set: list[PhysicalOperator], seed: int) -> list[int]:
45
- """
46
- Returns a list of indices for the operators in the op_set.
47
- """
48
- rng = np.random.default_rng(seed=seed)
49
- op_indices = np.arange(len(op_set))
50
- rng.shuffle(op_indices)
51
- return op_indices
52
-
53
- def get_op_input_pairs(self) -> list[PhysicalOperator, DataRecord | int | None]:
54
- """
55
- Returns the list of frontier operators and their next input to process. If there are
56
- any indices in `source_indices_to_sample` which this operator does not sample on its own, then
57
- we also have this frontier process that source_idx's input with its max quality operator.
58
- """
59
- # get the list of (op, source_idx) pairs which this operator needs to execute
60
- op_source_idx_pairs = []
61
- for op in self.ops:
62
- # construct list of inputs by looking up the input for the given source_idx
63
- for sample_idx in range(self.j):
64
- source_idx = self.source_indices[sample_idx]
65
- op_source_idx_pairs.append((op, source_idx))
66
-
67
- # fetch the corresponding (op, input) pairs
68
- op_input_pairs = []
69
- for op, source_idx in op_source_idx_pairs:
70
- op_input_pairs.extend([(op, input_record) for input_record in self.source_idx_to_input[source_idx]])
71
-
72
- return op_input_pairs
73
-
74
- def pick_highest_quality_output(self, record_sets: list[DataRecordSet]) -> DataRecordSet:
75
- # if there's only one operator in the set, we return its record_set
76
- if len(record_sets) == 1:
77
- return record_sets[0]
78
-
79
- # NOTE: I don't like that this assumes the models are consistent in
80
- # how they order their record outputs for one-to-many converts;
81
- # eventually we can try out more robust schemes to account for
82
- # differences in ordering
83
- # aggregate records at each index in the response
84
- idx_to_records = {}
85
- for record_set in record_sets:
86
- for idx in range(len(record_set)):
87
- record, record_op_stats = record_set[idx], record_set.record_op_stats[idx]
88
- if idx not in idx_to_records:
89
- idx_to_records[idx] = [(record, record_op_stats)]
90
- else:
91
- idx_to_records[idx].append((record, record_op_stats))
92
-
93
- # compute highest quality answer at each index
94
- out_records = []
95
- out_record_op_stats = []
96
- for idx in range(len(idx_to_records)):
97
- records_lst, record_op_stats_lst = zip(*idx_to_records[idx])
98
- max_quality_record, max_quality = records_lst[0], record_op_stats_lst[0].quality
99
- max_quality_stats = record_op_stats_lst[0]
100
- for record, record_op_stats in zip(records_lst[1:], record_op_stats_lst[1:]):
101
- record_quality = record_op_stats.quality
102
- if record_quality > max_quality:
103
- max_quality_record = record
104
- max_quality = record_quality
105
- max_quality_stats = record_op_stats
106
- out_records.append(max_quality_record)
107
- out_record_op_stats.append(max_quality_stats)
108
-
109
- # create and return final DataRecordSet
110
- return DataRecordSet(out_records, out_record_op_stats)
111
-
112
- def update_inputs(self, source_idx_to_record_sets: dict[int, DataRecordSet]):
113
- """
114
- Update the inputs for this logical operator based on the outputs of the previous logical operator.
115
- """
116
- for source_idx, record_sets in source_idx_to_record_sets.items():
117
- input = []
118
- max_quality_record_set = self.pick_highest_quality_output(record_sets)
119
- for record in max_quality_record_set:
120
- input.append(record if record.passed_operator else None)
121
-
122
- self.source_idx_to_input[source_idx] = input
123
-
124
-
125
- class RandomSamplingExecutionStrategy(SentinelExecutionStrategy):
126
-
127
- def _get_source_indices(self):
128
- """Get the list of source indices which the sentinel plan should execute over."""
129
- # create list of all source indices and shuffle it
130
- total_num_samples = len(self.val_datasource)
131
- source_indices = list(np.arange(total_num_samples))
132
- self.rng.shuffle(source_indices)
133
-
134
- # slice the list of source indices to get the first j indices
135
- j = min(self.j, len(source_indices))
136
- source_indices = source_indices[:j]
137
-
138
- return source_indices
139
-
140
- def _execute_sentinel_plan(self,
141
- plan: SentinelPlan,
142
- op_sets: dict[str, OpSet],
143
- expected_outputs: dict[int, dict] | None,
144
- plan_stats: SentinelPlanStats,
145
- ) -> SentinelPlanStats:
146
- # execute operator sets in sequence
147
- for op_idx, (logical_op_id, op_set) in enumerate(plan):
148
- # get frontier ops and their next input
149
- op_input_pairs = op_sets[logical_op_id].get_op_input_pairs()
150
-
151
- # break out of the loop if op_input_pairs is empty, as this means all records have been filtered out
152
- if len(op_input_pairs) == 0:
153
- break
154
-
155
- # run sampled operators on sampled inputs
156
- source_idx_to_record_sets_and_ops, _ = self._execute_op_set(op_input_pairs)
157
-
158
- # FUTURE TODO: have this return the highest quality record set simply based on our posterior (or prior) belief on operator quality
159
- # get the target record set for each source_idx
160
- source_idx_to_target_record_set = self._get_target_record_sets(logical_op_id, source_idx_to_record_sets_and_ops, expected_outputs)
161
-
162
- # TODO: make consistent across here and RandomSampling
163
- # FUTURE TODO: move this outside of the loop (i.e. assume we only get quality label(s) after executing full program)
164
- # score the quality of each generated output
165
- physical_op_cls = op_set[0].__class__
166
- source_idx_to_record_sets = {
167
- source_idx: list(map(lambda tup: tup[0], record_sets_and_ops))
168
- for source_idx, record_sets_and_ops in source_idx_to_record_sets_and_ops.items()
169
- }
170
- source_idx_to_record_sets = self._score_quality(physical_op_cls, source_idx_to_record_sets, source_idx_to_target_record_set)
171
-
172
- # flatten the lists of records and record_op_stats
173
- all_records, all_record_op_stats = self._flatten_record_sets(source_idx_to_record_sets)
174
-
175
- # update plan stats
176
- plan_stats.add_record_op_stats(all_record_op_stats)
177
-
178
- # add records (which are not filtered) to the cache, if allowed
179
- self._add_records_to_cache(logical_op_id, all_records)
180
-
181
- # FUTURE TODO: simply set input based on source_idx_to_target_record_set (b/c we won't have scores computed)
182
- # provide the champion record sets as inputs to the next logical operator
183
- if op_idx + 1 < len(plan):
184
- next_logical_op_id = plan.logical_op_ids[op_idx + 1]
185
- op_sets[next_logical_op_id].update_inputs(source_idx_to_record_sets)
186
-
187
- # close the cache
188
- self._close_cache(plan.logical_op_ids)
189
-
190
- # finalize plan stats
191
- plan_stats.finish()
192
-
193
- return plan_stats
194
-
195
- def execute_sentinel_plan(self, plan: SentinelPlan, expected_outputs: dict[int, dict] | None):
196
- """
197
- NOTE: this function currently requires us to set k and j properly in order to make
198
- comparison in our research against the corresponding sample budget in MAB.
199
-
200
- NOTE: the number of samples will slightly exceed the sample_budget if the number of operator
201
- calls does not perfectly match the sample_budget. This may cause some minor discrepancies with
202
- the progress manager as a result.
203
- """
204
- # for now, assert that the first operator in the plan is a ScanPhysicalOp
205
- assert all(isinstance(op, ScanPhysicalOp) for op in plan.operator_sets[0]), "First operator in physical plan must be a ScanPhysicalOp"
206
- logger.info(f"Executing plan {plan.plan_id} with {self.max_workers} workers")
207
- logger.info(f"Plan Details: {plan}")
208
-
209
- # initialize plan stats
210
- plan_stats = SentinelPlanStats.from_plan(plan)
211
- plan_stats.start()
212
-
213
- # get list of source indices which can be sampled from
214
- source_indices = self._get_source_indices()
215
-
216
- # initialize set of physical operators for each logical operator
217
- op_sets = {
218
- logical_op_id: OpSet(op_set, source_indices, self.k, self.j, self.seed)
219
- for logical_op_id, op_set in plan
220
- }
221
-
222
- # initialize and start the progress manager
223
- self.progress_manager = create_progress_manager(plan, sample_budget=self.sample_budget, progress=self.progress)
224
- self.progress_manager.start()
225
-
226
- # NOTE: we must handle progress manager outside of _exeecute_sentinel_plan to ensure that it is shut down correctly;
227
- # if we don't have the `finally:` branch, then program crashes can cause future program runs to fail because
228
- # the progress manager cannot get a handle to the console
229
- try:
230
- # execute sentinel plan by sampling records and operators
231
- plan_stats = self._execute_sentinel_plan(plan, op_sets, expected_outputs, plan_stats)
232
-
233
- finally:
234
- # finish progress tracking
235
- self.progress_manager.finish()
236
-
237
- logger.info(f"Done executing sentinel plan: {plan.plan_id}")
238
- logger.debug(f"Plan stats: (plan_cost={plan_stats.total_plan_cost}, plan_time={plan_stats.total_plan_time})")
239
-
240
- return plan_stats
@@ -1,30 +0,0 @@
1
- from threading import Lock
2
-
3
- from openai import OpenAI
4
- from together import Together
5
-
6
- from palimpzest.constants import APIClient
7
-
8
-
9
- class APIClientFactory:
10
- _instances = {}
11
- _lock = Lock()
12
-
13
- @classmethod
14
- def get_client(cls, api_client: APIClient, api_key: str):
15
- """Get a singleton instance of the requested API client."""
16
- if api_client not in cls._instances:
17
- with cls._lock: # Ensure thread safety
18
- if api_client not in cls._instances: # Double-check inside the lock
19
- cls._instances[api_client] = cls._create_client(api_client, api_key)
20
- return cls._instances[api_client]
21
-
22
- @staticmethod
23
- def _create_client(api_client: APIClient, api_key: str):
24
- """Create a new client instance based on the api_client name."""
25
- if api_client == APIClient.OPENAI:
26
- return OpenAI(api_key=api_key)
27
- elif api_client == APIClient.TOGETHER:
28
- return Together(api_key=api_key)
29
- else:
30
- raise ValueError(f"Unknown api_client: {api_client}")