edsl 0.1.40__py3-none-any.whl → 0.1.40.dev2__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
edsl/__init__.py CHANGED
@@ -21,7 +21,6 @@ from edsl.questions import QuestionFunctional
21
21
  from edsl.questions import QuestionLikertFive
22
22
  from edsl.questions import QuestionList
23
23
  from edsl.questions import QuestionMatrix
24
- from edsl.questions import QuestionDict
25
24
  from edsl.questions import QuestionLinearScale
26
25
  from edsl.questions import QuestionNumerical
27
26
  from edsl.questions import QuestionYesNo
edsl/__version__.py CHANGED
@@ -1 +1 @@
1
- __version__ = "0.1.40"
1
+ __version__ = "0.1.40.dev2"
edsl/agents/Agent.py CHANGED
@@ -906,7 +906,7 @@ class Agent(Base):
906
906
  {'traits': {'age': 10, 'hair': 'brown', 'height': 5.5}, 'instruction': 'Have fun.', 'edsl_version': '...', 'edsl_class_name': 'Agent'}
907
907
  """
908
908
  d = {}
909
- d["traits"] = copy.deepcopy(dict(self._traits))
909
+ d["traits"] = copy.deepcopy(self.traits)
910
910
  if self.name:
911
911
  d["name"] = self.name
912
912
  if self.set_instructions:
@@ -48,14 +48,13 @@ class InvigilatorAI(InvigilatorBase):
48
48
  """Store the response in the invigilator, in case it is needed later because of validation failure."""
49
49
  self.raw_model_response = agent_response_dict.model_outputs.response
50
50
  self.generated_tokens = agent_response_dict.edsl_dict.generated_tokens
51
- self.cache_key = agent_response_dict.model_outputs.cache_key
52
51
 
53
- async def async_answer_question(self) -> EDSLResultObjectInput:
52
+ async def async_answer_question(self) -> AgentResponseDict:
54
53
  """Answer a question using the AI model.
55
54
 
56
55
  >>> i = InvigilatorAI.example()
57
56
  """
58
- agent_response_dict: AgentResponseDict = await self.async_get_agent_response()
57
+ agent_response_dict = await self.async_get_agent_response()
59
58
  self.store_response(agent_response_dict)
60
59
  return self._extract_edsl_result_entry_and_validate(agent_response_dict)
61
60
 
edsl/coop/coop.py CHANGED
@@ -111,13 +111,13 @@ class Coop(CoopFunctionsMixin):
111
111
  url = f"{self.api_url}/{uri}"
112
112
  method = method.upper()
113
113
  if payload is None:
114
- timeout = 40
114
+ timeout = 20
115
115
  elif (
116
116
  method.upper() == "POST"
117
117
  and "json_string" in payload
118
118
  and payload.get("json_string") is not None
119
119
  ):
120
- timeout = max(40, (len(payload.get("json_string", "")) // (1024 * 1024)))
120
+ timeout = max(20, (len(payload.get("json_string", "")) // (1024 * 1024)))
121
121
  try:
122
122
  if method in ["GET", "DELETE"]:
123
123
  response = requests.request(
@@ -533,7 +533,6 @@ class Coop(CoopFunctionsMixin):
533
533
  uri="api/v0/remote-cache/many",
534
534
  method="POST",
535
535
  payload=payload,
536
- timeout=40,
537
536
  )
538
537
  self._resolve_server_response(response)
539
538
  response_json = response.json()
@@ -564,7 +563,6 @@ class Coop(CoopFunctionsMixin):
564
563
  uri="api/v0/remote-cache/get-many",
565
564
  method="POST",
566
565
  payload={"keys": exclude_keys},
567
- timeout=40,
568
566
  )
569
567
  self._resolve_server_response(response)
570
568
  return [
@@ -583,7 +581,6 @@ class Coop(CoopFunctionsMixin):
583
581
  uri="api/v0/remote-cache/get-diff",
584
582
  method="POST",
585
583
  payload={"keys": client_cacheentry_keys},
586
- timeout=40,
587
584
  )
588
585
  self._resolve_server_response(response)
589
586
  response_json = response.json()
edsl/data/Cache.py CHANGED
@@ -535,13 +535,6 @@ class Cache(Base):
535
535
  """
536
536
  return html
537
537
 
538
- def subset(self, keys: list[str]) -> Cache:
539
- """
540
- Return a subset of the Cache with the specified keys.
541
- """
542
- new_data = {k: v for k, v in self.data.items() if k in keys}
543
- return Cache(data=new_data)
544
-
545
538
  def view(self) -> None:
546
539
  """View the Cache in a new browser tab."""
547
540
  import tempfile
@@ -112,18 +112,18 @@ class RemoteCacheSync(AbstractContextManager):
112
112
  missing_count = len(diff.client_missing_entries)
113
113
 
114
114
  if missing_count == 0:
115
- # self._output("No new entries to add to local cache.")
115
+ self._output("No new entries to add to local cache.")
116
116
  return
117
117
 
118
- # self._output(
119
- # f"Updating local cache with {missing_count:,} new "
120
- # f"{'entry' if missing_count == 1 else 'entries'} from remote..."
121
- # )
118
+ self._output(
119
+ f"Updating local cache with {missing_count:,} new "
120
+ f"{'entry' if missing_count == 1 else 'entries'} from remote..."
121
+ )
122
122
 
123
123
  self.cache.add_from_dict(
124
124
  {entry.key: entry for entry in diff.client_missing_entries}
125
125
  )
126
- # self._output("Local cache updated!")
126
+ self._output("Local cache updated!")
127
127
 
128
128
  def _get_entries_to_upload(self, diff: CacheDifference) -> CacheEntriesList:
129
129
  """Determines which entries need to be uploaded to remote cache."""
@@ -154,23 +154,23 @@ class RemoteCacheSync(AbstractContextManager):
154
154
  upload_count = len(entries_to_upload)
155
155
 
156
156
  if upload_count > 0:
157
- # self._output(
158
- # f"Updating remote cache with {upload_count:,} new "
159
- # f"{'entry' if upload_count == 1 else 'entries'}..."
160
- # )
157
+ self._output(
158
+ f"Updating remote cache with {upload_count:,} new "
159
+ f"{'entry' if upload_count == 1 else 'entries'}..."
160
+ )
161
161
 
162
162
  self.coop.remote_cache_create_many(
163
163
  entries_to_upload,
164
164
  visibility="private",
165
165
  description=self.remote_cache_description,
166
166
  )
167
- # self._output("Remote cache updated!")
168
- # else:
169
- # self._output("No new entries to add to remote cache.")
167
+ self._output("Remote cache updated!")
168
+ else:
169
+ self._output("No new entries to add to remote cache.")
170
170
 
171
- # self._output(
172
- # f"There are {len(self.cache.keys()):,} entries in the local cache."
173
- # )
171
+ self._output(
172
+ f"There are {len(self.cache.keys()):,} entries in the local cache."
173
+ )
174
174
 
175
175
 
176
176
  if __name__ == "__main__":
@@ -139,7 +139,7 @@ class AnswerQuestionFunctionConstructor:
139
139
  *,
140
140
  question: "QuestionBase",
141
141
  task=None,
142
- ) -> "EDSLResultObjectInput":
142
+ ) -> "AgentResponseDict":
143
143
 
144
144
  from tenacity import (
145
145
  retry,
edsl/jobs/Jobs.py CHANGED
@@ -499,6 +499,7 @@ class Jobs(Base):
499
499
  jc.check_api_keys()
500
500
 
501
501
  async def _execute_with_remote_cache(self, run_job_async: bool) -> Results:
502
+
502
503
  use_remote_cache = self.use_remote_cache()
503
504
 
504
505
  from edsl.coop.coop import Coop
@@ -507,21 +508,22 @@ class Jobs(Base):
507
508
 
508
509
  assert isinstance(self.run_config.environment.cache, Cache)
509
510
 
510
- # with RemoteCacheSync(
511
- # coop=Coop(),
512
- # cache=self.run_config.environment.cache,
513
- # output_func=self._output,
514
- # remote_cache=use_remote_cache,
515
- # remote_cache_description=self.run_config.parameters.remote_cache_description,
516
- # ):
517
- runner = JobsRunnerAsyncio(self, environment=self.run_config.environment)
518
- if run_job_async:
519
- results = await runner.run_async(self.run_config.parameters)
520
- else:
521
- results = runner.run(self.run_config.parameters)
511
+ with RemoteCacheSync(
512
+ coop=Coop(),
513
+ cache=self.run_config.environment.cache,
514
+ output_func=self._output,
515
+ remote_cache=use_remote_cache,
516
+ remote_cache_description=self.run_config.parameters.remote_cache_description,
517
+ ):
518
+ runner = JobsRunnerAsyncio(self, environment=self.run_config.environment)
519
+ if run_job_async:
520
+ results = await runner.run_async(self.run_config.parameters)
521
+ else:
522
+ results = runner.run(self.run_config.parameters)
522
523
  return results
523
524
 
524
525
  def _setup_and_check(self) -> Tuple[RunConfig, Optional[Results]]:
526
+
525
527
  self._prepare_to_run()
526
528
  self._check_if_remote_keys_ok()
527
529
 
@@ -537,16 +539,12 @@ class Jobs(Base):
537
539
  if self.run_config.parameters.n is None:
538
540
  return len(self)
539
541
  else:
540
- return len(self) * self.run_config.parameters.n
542
+ len(self) * self.run_config.parameters.n
541
543
 
542
544
  def _run(self, config: RunConfig):
543
545
  "Shared code for run and run_async"
544
546
  if config.environment.cache is not None:
545
547
  self.run_config.environment.cache = config.environment.cache
546
- if config.environment.jobs_runner_status is not None:
547
- self.run_config.environment.jobs_runner_status = (
548
- config.environment.jobs_runner_status
549
- )
550
548
 
551
549
  if config.environment.bucket_collection is not None:
552
550
  self.run_config.environment.bucket_collection = (
@@ -648,19 +646,20 @@ class Jobs(Base):
648
646
  }
649
647
 
650
648
  def __len__(self) -> int:
651
- """Return the number of interviews that will be conducted for one iteration of this job.
652
- An interview is the result of one survey, taken by one agent, with one model, with one scenario.
649
+ """Return the maximum number of questions that will be asked while running this job.
650
+ Note that this is the maximum number of questions, not the actual number of questions that will be asked, as some questions may be skipped.
653
651
 
654
652
  >>> from edsl.jobs import Jobs
655
653
  >>> len(Jobs.example())
656
- 4
654
+ 8
657
655
  """
658
- number_of_interviews = (
656
+ number_of_questions = (
659
657
  len(self.agents or [1])
660
658
  * len(self.scenarios or [1])
661
659
  * len(self.models or [1])
660
+ * len(self.survey)
662
661
  )
663
- return number_of_interviews
662
+ return number_of_questions
664
663
 
665
664
  def to_dict(self, add_edsl_version=True):
666
665
  d = {
@@ -811,9 +810,9 @@ def main():
811
810
  from edsl.data.Cache import Cache
812
811
 
813
812
  job = Jobs.example()
814
- len(job) == 4
813
+ len(job) == 8
815
814
  results = job.run(cache=Cache())
816
- len(results) == 4
815
+ len(results) == 8
817
816
  results
818
817
 
819
818
 
@@ -213,6 +213,10 @@ class Interview:
213
213
  async def async_conduct_interview(
214
214
  self,
215
215
  run_config: Optional["RunConfig"] = None,
216
+ # model_buckets: Optional[ModelBuckets] = None,
217
+ # stop_on_exception: bool = False,
218
+ # raise_validation_errors: bool = True,
219
+ # key_lookup: Optional[KeyLookup] = None,
216
220
  ) -> tuple["Answers", List[dict[str, Any]]]:
217
221
  """
218
222
  Conduct an Interview asynchronously.
@@ -309,7 +313,7 @@ class Interview:
309
313
 
310
314
  def handle_task(task, invigilator):
311
315
  try:
312
- result: Answers = task.result()
316
+ result = task.result()
313
317
  except asyncio.CancelledError as e: # task was cancelled
314
318
  result = invigilator.get_failed_task_result(
315
319
  failure_reason="Task was cancelled."
@@ -44,16 +44,7 @@ class JobsRunnerAsyncio:
44
44
  data.append(result)
45
45
  task_history.add_interview(interview)
46
46
 
47
- results = Results(survey=self.jobs.survey, task_history=task_history, data=data)
48
-
49
- relevant_cache = results.relevant_cache(self.environment.cache)
50
-
51
- return Results(
52
- survey=self.jobs.survey,
53
- task_history=task_history,
54
- data=data,
55
- cache=relevant_cache,
56
- )
47
+ return Results(survey=self.jobs.survey, task_history=task_history, data=data)
57
48
 
58
49
  def simple_run(self):
59
50
  data = asyncio.run(self.run_async())
@@ -102,16 +93,16 @@ class JobsRunnerAsyncio:
102
93
 
103
94
  self.completed = True
104
95
 
105
- def run_progress_bar(stop_event, jobs_runner_status) -> None:
96
+ def run_progress_bar(stop_event) -> None:
106
97
  """Runs the progress bar in a separate thread."""
107
- jobs_runner_status.update_progress(stop_event)
98
+ self.jobs_runner_status.update_progress(stop_event)
108
99
 
109
100
  def set_up_progress_bar(progress_bar: bool, jobs_runner_status):
110
101
  progress_thread = None
111
102
  if progress_bar and jobs_runner_status.has_ep_api_key():
112
103
  jobs_runner_status.setup()
113
104
  progress_thread = threading.Thread(
114
- target=run_progress_bar, args=(stop_event, jobs_runner_status)
105
+ target=run_progress_bar, args=(stop_event,)
115
106
  )
116
107
  progress_thread.start()
117
108
  elif progress_bar:
@@ -124,9 +115,8 @@ class JobsRunnerAsyncio:
124
115
  survey=self.jobs.survey,
125
116
  data=[],
126
117
  task_history=TaskHistory(),
127
- # cache=self.environment.cache.new_entries_cache(),
118
+ cache=self.environment.cache.new_entries_cache(),
128
119
  )
129
-
130
120
  stop_event = threading.Event()
131
121
  progress_thread = set_up_progress_bar(
132
122
  parameters.progress_bar, run_config.environment.jobs_runner_status
@@ -150,9 +140,7 @@ class JobsRunnerAsyncio:
150
140
  if exception_to_raise:
151
141
  raise exception_to_raise
152
142
 
153
- relevant_cache = results.relevant_cache(self.environment.cache)
154
- results.cache = relevant_cache
155
- # breakpoint()
143
+ results.cache = self.environment.cache.new_entries_cache()
156
144
  results.bucket_collection = self.environment.bucket_collection
157
145
 
158
146
  from edsl.jobs.results_exceptions_handler import ResultsExceptionsHandler
@@ -148,8 +148,7 @@ class JobsRunnerStatusBase(ABC):
148
148
  }
149
149
 
150
150
  model_queues = {}
151
- # for model, bucket in self.jobs_runner.bucket_collection.items():
152
- for model, bucket in self.jobs_runner.environment.bucket_collection.items():
151
+ for model, bucket in self.jobs_runner.bucket_collection.items():
153
152
  model_name = model.model
154
153
  model_queues[model_name] = {
155
154
  "language_model_name": model_name,
@@ -8,7 +8,7 @@ from edsl.questions.descriptors import IntegerDescriptor, QuestionOptionsDescrip
8
8
  from edsl.questions.response_validator_abc import ResponseValidatorABC
9
9
 
10
10
 
11
- class BudgetResponseValidator(ResponseValidatorABC):
11
+ class BudgewResponseValidator(ResponseValidatorABC):
12
12
  valid_examples = []
13
13
 
14
14
  invalid_examples = []
@@ -64,7 +64,7 @@ class QuestionBudget(QuestionBase):
64
64
  budget_sum: int = IntegerDescriptor(none_allowed=False)
65
65
  question_options: list[str] = QuestionOptionsDescriptor(q_budget=True)
66
66
  _response_model = None
67
- response_validator_class = BudgetResponseValidator
67
+ response_validator_class = BudgewResponseValidator
68
68
 
69
69
  def __init__(
70
70
  self,
@@ -50,7 +50,7 @@ def extract_json(text, expected_keys, verbose=False):
50
50
 
51
51
  def dict_to_pydantic_model(input_dict: Dict[str, Any]) -> Any:
52
52
  field_definitions = {
53
- key: (type(value), Field(default=value)) for key, value in input_dict.items()
53
+ key: (str, Field(default=str(value))) for key, value in input_dict.items()
54
54
  }
55
55
 
56
56
  DynamicModel = create_model("DynamicModel", **field_definitions)
@@ -12,7 +12,6 @@ from edsl.questions.QuestionFreeText import QuestionFreeText
12
12
  from edsl.questions.QuestionFunctional import QuestionFunctional
13
13
  from edsl.questions.QuestionList import QuestionList
14
14
  from edsl.questions.QuestionMatrix import QuestionMatrix
15
- from edsl.questions.QuestionDict import QuestionDict
16
15
  from edsl.questions.QuestionMultipleChoice import QuestionMultipleChoice
17
16
  from edsl.questions.QuestionNumerical import QuestionNumerical
18
17
  from edsl.questions.QuestionBudget import QuestionBudget
@@ -324,35 +324,6 @@ class AnswerValidatorMixin:
324
324
  f"Must be one of: {valid_options}"
325
325
  )
326
326
 
327
- def _validate_answer_dict(self, answer: dict[str, Any]) -> None:
328
- """Validate QuestionDict-specific answer.
329
-
330
- Check that answer["answer"]:
331
- - is a dictionary
332
- - has all required answer_keys as keys
333
- """
334
- value = answer.get("answer")
335
-
336
- # Check that answer is a dictionary
337
- if not isinstance(value, dict):
338
- raise QuestionAnswerValidationError(
339
- f"Dict answer must be a dictionary mapping values to specified keys (got {value})"
340
- )
341
-
342
- # Check that all required answer keys are present
343
- required_keys = set(self.answer_keys)
344
- provided_keys = set(value.keys())
345
-
346
- if missing_keys := (required_keys - provided_keys):
347
- raise QuestionAnswerValidationError(
348
- f"Missing required keys: {missing_keys}"
349
- )
350
-
351
- if extra_keys := (provided_keys - required_keys):
352
- raise QuestionAnswerValidationError(
353
- f"Unexpected keys: {extra_keys}"
354
- )
355
-
356
327
 
357
328
  if __name__ == "__main__":
358
329
  pass
@@ -36,7 +36,7 @@ class QuestionLinearScale(QuestionMultipleChoice):
36
36
  question_name=question_name,
37
37
  question_text=question_text,
38
38
  question_options=question_options,
39
- use_code=False, # question linear scale will have its own code
39
+ use_code=False, # question linear scale will have it's own code
40
40
  include_comment=include_comment,
41
41
  )
42
42
  self.question_options = question_options
@@ -421,50 +421,6 @@ class QuestionTextDescriptor(BaseDescriptor):
421
421
  return None
422
422
 
423
423
 
424
- class ValueTypesDescriptor(BaseDescriptor):
425
- def validate(self, value, instance):
426
- """Validate the value is a list of strings or None."""
427
- if value is None: # Allow None as a valid value
428
- return None
429
- if not isinstance(value, list):
430
- raise QuestionCreationValidationError(
431
- f"`value_types` must be a list or None (got {value})."
432
- )
433
- # Convert all items in the list to strings
434
- return [str(item) for item in value]
435
-
436
-
437
- class ValueDescriptionsDescriptor(BaseDescriptor):
438
- def validate(self, value, instance):
439
- """Validate the value is a list of strings or None."""
440
- if value is None: # Allow None as a valid value
441
- return None
442
- if not isinstance(value, list):
443
- raise QuestionCreationValidationError(
444
- f"`value_descriptions` must be a list or None (got {value})."
445
- )
446
- if not all(isinstance(x, str) for x in value):
447
- raise QuestionCreationValidationError(
448
- f"`value_descriptions` must be a list of strings (got {value})."
449
- )
450
- return value
451
-
452
-
453
- class AnswerKeysDescriptor(BaseDescriptor):
454
- """Validate that the `answer_keys` attribute is a list of strings or integers."""
455
-
456
- def validate(self, value, instance):
457
- """Validate the value is a list of strings or integers."""
458
- if not isinstance(value, list):
459
- raise QuestionCreationValidationError(
460
- f"`answer_keys` must be a list (got {value})."
461
- )
462
- if not all(isinstance(x, (str, int)) for x in value):
463
- raise QuestionCreationValidationError(
464
- f"`answer_keys` must be a list of strings or integers (got {value})."
465
- )
466
-
467
-
468
424
  if __name__ == "__main__":
469
425
  import doctest
470
426
 
@@ -96,7 +96,7 @@ class Question(metaclass=Meta):
96
96
 
97
97
  >>> from edsl import Question
98
98
  >>> Question.list_question_types()
99
- ['checkbox', 'dict', 'extract', 'free_text', 'functional', 'likert_five', 'linear_scale', 'list', 'matrix', 'multiple_choice', 'numerical', 'rank', 'top_k', 'yes_no']
99
+ ['checkbox', 'extract', 'free_text', 'functional', 'likert_five', 'linear_scale', 'list', 'matrix', 'multiple_choice', 'numerical', 'rank', 'top_k', 'yes_no']
100
100
  """
101
101
  return [
102
102
  q
edsl/results/Result.py CHANGED
@@ -56,7 +56,6 @@ class Result(Base, UserDict):
56
56
  comments_dict: Optional[dict] = None,
57
57
  cache_used_dict: Optional[dict[QuestionName, bool]] = None,
58
58
  indices: Optional[dict] = None,
59
- cache_keys: Optional[dict[QuestionName, str]] = None,
60
59
  ):
61
60
  """Initialize a Result object.
62
61
 
@@ -91,7 +90,6 @@ class Result(Base, UserDict):
91
90
  "generated_tokens": generated_tokens or {},
92
91
  "comments_dict": comments_dict or {},
93
92
  "cache_used_dict": cache_used_dict or {},
94
- "cache_keys": cache_keys or {},
95
93
  }
96
94
  super().__init__(**data)
97
95
  self.indices = indices
@@ -165,7 +163,6 @@ class Result(Base, UserDict):
165
163
 
166
164
  def _construct_sub_dicts(self) -> dict[str, dict]:
167
165
  """Construct a dictionary of sub-dictionaries for the Result object."""
168
-
169
166
  sub_dicts_needing_new_keys = {
170
167
  "question_text": {},
171
168
  "question_options": {},
@@ -184,8 +181,6 @@ class Result(Base, UserDict):
184
181
  f"{k}_cache_used": v for k, v in self.data["cache_used_dict"].items()
185
182
  }
186
183
 
187
- cache_keys = {f"{k}_cache_key": v for k, v in self.data["cache_keys"].items()}
188
-
189
184
  d = {
190
185
  **self._create_agent_sub_dict(self.data["agent"]),
191
186
  **self._create_model_sub_dict(self.data["model"]),
@@ -200,13 +195,11 @@ class Result(Base, UserDict):
200
195
  "question_options": sub_dicts_needing_new_keys["question_options"],
201
196
  "question_type": sub_dicts_needing_new_keys["question_type"],
202
197
  "cache_used": new_cache_dict,
203
- "cache_keys": cache_keys,
204
198
  }
205
199
  if hasattr(self, "indices") and self.indices is not None:
206
200
  d["agent"].update({"agent_index": self.indices["agent"]})
207
201
  d["scenario"].update({"scenario_index": self.indices["scenario"]})
208
202
  d["model"].update({"model_index": self.indices["model"]})
209
-
210
203
  return d
211
204
 
212
205
  @property
@@ -413,7 +406,6 @@ class Result(Base, UserDict):
413
406
  generated_tokens=json_dict.get("generated_tokens", {}),
414
407
  comments_dict=json_dict.get("comments_dict", {}),
415
408
  cache_used_dict=json_dict.get("cache_used_dict", {}),
416
- cache_keys=json_dict.get("cache_keys", {}),
417
409
  )
418
410
  return result
419
411
 
@@ -467,12 +459,6 @@ class Result(Base, UserDict):
467
459
  question_results[result.question_name] = result
468
460
  return question_results
469
461
 
470
- def get_cache_keys(model_response_objects) -> dict[str, bool]:
471
- cache_keys = {}
472
- for result in model_response_objects:
473
- cache_keys[result.question_name] = result.cache_key
474
- return cache_keys
475
-
476
462
  def get_generated_tokens_dict(answer_key_names) -> dict[str, str]:
477
463
  generated_tokens_dict = {
478
464
  k + "_generated_tokens": question_results[k].generated_tokens
@@ -537,7 +523,6 @@ class Result(Base, UserDict):
537
523
  generated_tokens_dict = get_generated_tokens_dict(answer_key_names)
538
524
  comments_dict = get_comments_dict(answer_key_names)
539
525
  answer_dict = {k: extracted_answers[k] for k in answer_key_names}
540
- cache_keys = get_cache_keys(model_response_objects)
541
526
 
542
527
  question_name_to_prompts = get_question_name_to_prompts(model_response_objects)
543
528
  prompt_dictionary = get_prompt_dictionary(
@@ -561,7 +546,6 @@ class Result(Base, UserDict):
561
546
  comments_dict=comments_dict,
562
547
  cache_used_dict=cache_used_dictionary,
563
548
  indices=interview.indices,
564
- cache_keys=cache_keys,
565
549
  )
566
550
  result.interview_hash = interview.initial_hash
567
551
  return result
edsl/results/Results.py CHANGED
@@ -90,7 +90,6 @@ class Results(UserList, Mixins, Base):
90
90
  "comment",
91
91
  "generated_tokens",
92
92
  "cache_used",
93
- "cache_keys",
94
93
  ]
95
94
 
96
95
  def __init__(
@@ -110,7 +109,6 @@ class Results(UserList, Mixins, Base):
110
109
  :param created_columns: A list of strings that are created columns.
111
110
  :param job_uuid: A string representing the job UUID.
112
111
  :param total_results: An integer representing the total number of results.
113
- :cache: A Cache object.
114
112
  """
115
113
  super().__init__(data)
116
114
  from edsl.data.Cache import Cache
@@ -140,16 +138,6 @@ class Results(UserList, Mixins, Base):
140
138
  }
141
139
  return d
142
140
 
143
- def _cache_keys(self):
144
- cache_keys = []
145
- for result in self:
146
- cache_keys.extend(list(result["cache_keys"].values()))
147
- return cache_keys
148
-
149
- def relevant_cache(self, cache: Cache) -> Cache:
150
- cache_keys = self._cache_keys()
151
- return cache.subset(cache_keys)
152
-
153
141
  def insert(self, item):
154
142
  item_order = getattr(item, "order", None)
155
143
  if item_order is not None:
@@ -182,12 +170,12 @@ class Results(UserList, Mixins, Base):
182
170
  """
183
171
  total_cost = 0
184
172
  for result in self:
185
- for key in result["raw_model_response"]:
173
+ for key in result.raw_model_response:
186
174
  if key.endswith("_cost"):
187
- result_cost = result["raw_model_response"][key]
175
+ result_cost = result.raw_model_response[key]
188
176
 
189
177
  question_name = key.removesuffix("_cost")
190
- cache_used = result["cache_used_dict"][question_name]
178
+ cache_used = result.cache_used_dict[question_name]
191
179
 
192
180
  if isinstance(result_cost, (int, float)):
193
181
  if include_cached_responses_in_cost:
@@ -361,7 +349,7 @@ class Results(UserList, Mixins, Base):
361
349
  self,
362
350
  sort: bool = False,
363
351
  add_edsl_version: bool = False,
364
- include_cache: bool = True,
352
+ include_cache: bool = False,
365
353
  include_task_history: bool = False,
366
354
  include_cache_info: bool = True,
367
355
  ) -> dict[str, Any]: