edsl 0.1.39__py3-none-any.whl → 0.1.39.dev2__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (85) hide show
  1. edsl/Base.py +0 -28
  2. edsl/__init__.py +1 -1
  3. edsl/__version__.py +1 -1
  4. edsl/agents/Agent.py +17 -9
  5. edsl/agents/Invigilator.py +14 -13
  6. edsl/agents/InvigilatorBase.py +1 -4
  7. edsl/agents/PromptConstructor.py +22 -42
  8. edsl/agents/QuestionInstructionPromptBuilder.py +1 -1
  9. edsl/auto/AutoStudy.py +5 -18
  10. edsl/auto/StageBase.py +40 -53
  11. edsl/auto/StageQuestions.py +1 -2
  12. edsl/auto/utilities.py +6 -0
  13. edsl/coop/coop.py +5 -21
  14. edsl/data/Cache.py +18 -29
  15. edsl/data/CacheHandler.py +2 -0
  16. edsl/data/RemoteCacheSync.py +46 -154
  17. edsl/enums.py +0 -7
  18. edsl/inference_services/AnthropicService.py +16 -38
  19. edsl/inference_services/AvailableModelFetcher.py +1 -7
  20. edsl/inference_services/GoogleService.py +1 -5
  21. edsl/inference_services/InferenceServicesCollection.py +2 -18
  22. edsl/inference_services/OpenAIService.py +31 -46
  23. edsl/inference_services/TestService.py +3 -1
  24. edsl/inference_services/TogetherAIService.py +3 -5
  25. edsl/inference_services/data_structures.py +2 -74
  26. edsl/jobs/AnswerQuestionFunctionConstructor.py +113 -148
  27. edsl/jobs/FetchInvigilator.py +3 -10
  28. edsl/jobs/InterviewsConstructor.py +4 -6
  29. edsl/jobs/Jobs.py +233 -299
  30. edsl/jobs/JobsChecks.py +2 -2
  31. edsl/jobs/JobsPrompts.py +1 -1
  32. edsl/jobs/JobsRemoteInferenceHandler.py +136 -160
  33. edsl/jobs/interviews/Interview.py +42 -80
  34. edsl/jobs/runners/JobsRunnerAsyncio.py +358 -88
  35. edsl/jobs/runners/JobsRunnerStatus.py +165 -133
  36. edsl/jobs/tasks/TaskHistory.py +3 -24
  37. edsl/language_models/LanguageModel.py +4 -59
  38. edsl/language_models/ModelList.py +8 -19
  39. edsl/language_models/__init__.py +1 -1
  40. edsl/language_models/registry.py +180 -0
  41. edsl/language_models/repair.py +1 -1
  42. edsl/questions/QuestionBase.py +26 -35
  43. edsl/questions/{question_base_gen_mixin.py → QuestionBaseGenMixin.py} +49 -52
  44. edsl/questions/QuestionBasePromptsMixin.py +1 -1
  45. edsl/questions/QuestionBudget.py +1 -1
  46. edsl/questions/QuestionCheckBox.py +2 -2
  47. edsl/questions/QuestionExtract.py +7 -5
  48. edsl/questions/QuestionFreeText.py +1 -1
  49. edsl/questions/QuestionList.py +15 -9
  50. edsl/questions/QuestionMatrix.py +1 -1
  51. edsl/questions/QuestionMultipleChoice.py +1 -1
  52. edsl/questions/QuestionNumerical.py +1 -1
  53. edsl/questions/QuestionRank.py +1 -1
  54. edsl/questions/{response_validator_abc.py → ResponseValidatorABC.py} +18 -6
  55. edsl/questions/{response_validator_factory.py → ResponseValidatorFactory.py} +1 -7
  56. edsl/questions/SimpleAskMixin.py +1 -1
  57. edsl/questions/__init__.py +1 -1
  58. edsl/results/DatasetExportMixin.py +119 -60
  59. edsl/results/Result.py +3 -109
  60. edsl/results/Results.py +39 -50
  61. edsl/scenarios/FileStore.py +0 -32
  62. edsl/scenarios/ScenarioList.py +7 -35
  63. edsl/scenarios/handlers/csv.py +0 -11
  64. edsl/surveys/Survey.py +20 -71
  65. {edsl-0.1.39.dist-info → edsl-0.1.39.dev2.dist-info}/METADATA +1 -1
  66. {edsl-0.1.39.dist-info → edsl-0.1.39.dev2.dist-info}/RECORD +78 -84
  67. {edsl-0.1.39.dist-info → edsl-0.1.39.dev2.dist-info}/WHEEL +1 -1
  68. edsl/jobs/async_interview_runner.py +0 -138
  69. edsl/jobs/check_survey_scenario_compatibility.py +0 -85
  70. edsl/jobs/data_structures.py +0 -120
  71. edsl/jobs/results_exceptions_handler.py +0 -98
  72. edsl/language_models/model.py +0 -256
  73. edsl/questions/data_structures.py +0 -20
  74. edsl/results/file_exports.py +0 -252
  75. /edsl/agents/{question_option_processor.py → QuestionOptionProcessor.py} +0 -0
  76. /edsl/questions/{answer_validator_mixin.py → AnswerValidatorMixin.py} +0 -0
  77. /edsl/questions/{loop_processor.py → LoopProcessor.py} +0 -0
  78. /edsl/questions/{register_questions_meta.py → RegisterQuestionsMeta.py} +0 -0
  79. /edsl/results/{results_fetch_mixin.py → ResultsFetchMixin.py} +0 -0
  80. /edsl/results/{results_tools_mixin.py → ResultsToolsMixin.py} +0 -0
  81. /edsl/results/{results_selector.py → Selector.py} +0 -0
  82. /edsl/scenarios/{directory_scanner.py → DirectoryScanner.py} +0 -0
  83. /edsl/scenarios/{scenario_join.py → ScenarioJoin.py} +0 -0
  84. /edsl/scenarios/{scenario_selector.py → ScenarioSelector.py} +0 -0
  85. {edsl-0.1.39.dist-info → edsl-0.1.39.dev2.dist-info}/LICENSE +0 -0
@@ -3,17 +3,31 @@ import time
3
3
  import asyncio
4
4
  import threading
5
5
  import warnings
6
- from typing import TYPE_CHECKING
6
+ from typing import Coroutine, List, AsyncGenerator, Optional, Union, Generator, Type
7
+ from uuid import UUID
8
+ from collections import UserList
7
9
 
8
10
  from edsl.results.Results import Results
9
- from edsl.jobs.runners.JobsRunnerStatus import JobsRunnerStatus
11
+ from edsl.jobs.interviews.Interview import Interview
12
+ from edsl.jobs.runners.JobsRunnerStatus import JobsRunnerStatus, JobsRunnerStatusBase
13
+
10
14
  from edsl.jobs.tasks.TaskHistory import TaskHistory
15
+ from edsl.jobs.buckets.BucketCollection import BucketCollection
11
16
  from edsl.utilities.decorators import jupyter_nb_handler
12
- from edsl.jobs.async_interview_runner import AsyncInterviewRunner
13
- from edsl.jobs.data_structures import RunEnvironment, RunParameters, RunConfig
17
+ from edsl.data.Cache import Cache
18
+ from edsl.results.Result import Result
19
+ from edsl.results.Results import Results
20
+ from edsl.language_models.LanguageModel import LanguageModel
21
+ from edsl.data.Cache import Cache
22
+
14
23
 
15
- if TYPE_CHECKING:
16
- from edsl.jobs.Jobs import Jobs
24
+ class StatusTracker(UserList):
25
+ def __init__(self, total_tasks: int):
26
+ self.total_tasks = total_tasks
27
+ super().__init__()
28
+
29
+ def current_status(self):
30
+ return print(f"Completed: {len(self.data)} of {self.total_tasks}", end="\r")
17
31
 
18
32
 
19
33
  class JobsRunnerAsyncio:
@@ -23,129 +37,385 @@ class JobsRunnerAsyncio:
23
37
  The Jobs object is a collection of interviews that are to be run.
24
38
  """
25
39
 
26
- def __init__(self, jobs: "Jobs", environment: RunEnvironment):
40
+ def __init__(self, jobs: "Jobs", bucket_collection: "BucketCollection"):
27
41
  self.jobs = jobs
28
- self.environment = environment
42
+ self.interviews: List["Interview"] = jobs.interviews()
43
+ self.bucket_collection: "BucketCollection" = bucket_collection
29
44
 
30
- def __len__(self):
31
- return len(self.jobs)
45
+ self.total_interviews: List["Interview"] = []
46
+ self._initialized = threading.Event()
32
47
 
33
- async def run_async(self, parameters: RunParameters) -> Results:
34
- """Used for some other modules that have a non-standard way of running interviews."""
48
+ from edsl.config import CONFIG
35
49
 
36
- self.environment.jobs_runner_status = JobsRunnerStatus(self, n=parameters.n)
37
- data = []
38
- task_history = TaskHistory(include_traceback=False)
50
+ self.MAX_CONCURRENT = int(CONFIG.get("EDSL_MAX_CONCURRENT_TASKS"))
39
51
 
40
- run_config = RunConfig(parameters=parameters, environment=self.environment)
41
- result_generator = AsyncInterviewRunner(self.jobs, run_config)
52
+ async def run_async_generator(
53
+ self,
54
+ cache: Cache,
55
+ n: int = 1,
56
+ stop_on_exception: bool = False,
57
+ sidecar_model: Optional[LanguageModel] = None,
58
+ total_interviews: Optional[List["Interview"]] = None,
59
+ raise_validation_errors: bool = False,
60
+ ) -> AsyncGenerator["Result", None]:
61
+ """Creates and processes tasks asynchronously, yielding results as they complete.
42
62
 
43
- async for result, interview in result_generator.run():
44
- data.append(result)
45
- task_history.add_interview(interview)
63
+ Tasks are created and processed in a streaming fashion rather than building the full list upfront.
64
+ Results are yielded as soon as they are available.
65
+
66
+ :param n: how many times to run each interview
67
+ :param stop_on_exception: Whether to stop the interview if an exception is raised
68
+ :param sidecar_model: a language model to use in addition to the interview's model
69
+ :param total_interviews: A list of interviews to run can be provided instead.
70
+ :param raise_validation_errors: Whether to raise validation errors
71
+ """
72
+ # Initialize interviews iterator
73
+ if total_interviews:
74
+ interviews_iter = iter(total_interviews)
75
+ self.total_interviews = total_interviews
76
+ else:
77
+ interviews_iter = self._populate_total_interviews(n=n)
78
+ self.total_interviews = list(interviews_iter)
79
+ interviews_iter = iter(self.total_interviews) # Create fresh iterator
46
80
 
47
- return Results(survey=self.jobs.survey, task_history=task_history, data=data)
81
+ self._initialized.set() # Signal that we're ready
82
+
83
+ # Keep track of active tasks
84
+ active_tasks = set()
85
+
86
+ try:
87
+ while True:
88
+ # Add new tasks if we're below max_concurrent and there are more interviews
89
+ while len(active_tasks) < self.MAX_CONCURRENT:
90
+ try:
91
+ interview = next(interviews_iter)
92
+ task = asyncio.create_task(
93
+ self._build_interview_task(
94
+ interview=interview,
95
+ stop_on_exception=stop_on_exception,
96
+ sidecar_model=sidecar_model,
97
+ raise_validation_errors=raise_validation_errors,
98
+ )
99
+ )
100
+ active_tasks.add(task)
101
+ # Add callback to remove task from set when done
102
+ task.add_done_callback(active_tasks.discard)
103
+ except StopIteration:
104
+ break
105
+
106
+ if not active_tasks:
107
+ break
108
+
109
+ # Wait for next completed task
110
+ done, _ = await asyncio.wait(
111
+ active_tasks, return_when=asyncio.FIRST_COMPLETED
112
+ )
113
+
114
+ # Process completed tasks
115
+ for task in done:
116
+ try:
117
+ result = await task
118
+ self.jobs_runner_status.add_completed_interview(result)
119
+ yield result
120
+ except Exception as e:
121
+ if stop_on_exception:
122
+ # Cancel remaining tasks
123
+ for t in active_tasks:
124
+ if not t.done():
125
+ t.cancel()
126
+ raise
127
+ else:
128
+ # Log error and continue
129
+ # logger.error(f"Task failed with error: {e}")
130
+ continue
131
+ finally:
132
+ # Ensure we cancel any remaining tasks if we exit early
133
+ for task in active_tasks:
134
+ if not task.done():
135
+ task.cancel()
136
+
137
+ def _populate_total_interviews(
138
+ self, n: int = 1
139
+ ) -> Generator["Interview", None, None]:
140
+ """Populates self.total_interviews with n copies of each interview.
141
+
142
+ :param n: how many times to run each interview.
143
+ """
144
+ for interview in self.interviews:
145
+ for iteration in range(n):
146
+ if iteration > 0:
147
+ yield interview.duplicate(iteration=iteration, cache=self.cache)
148
+ else:
149
+ interview.cache = self.cache
150
+ yield interview
151
+
152
+ async def run_async(self, cache: Optional[Cache] = None, n: int = 1) -> Results:
153
+ """Used for some other modules that have a non-standard way of running interviews."""
154
+ self.jobs_runner_status = JobsRunnerStatus(self, n=n)
155
+ self.cache = Cache() if cache is None else cache
156
+ data = []
157
+ async for result in self.run_async_generator(cache=self.cache, n=n):
158
+ data.append(result)
159
+ return Results(survey=self.jobs.survey, data=data)
48
160
 
49
161
  def simple_run(self):
50
162
  data = asyncio.run(self.run_async())
51
163
  return Results(survey=self.jobs.survey, data=data)
52
164
 
165
+ async def _build_interview_task(
166
+ self,
167
+ *,
168
+ interview: Interview,
169
+ stop_on_exception: bool = False,
170
+ sidecar_model: Optional["LanguageModel"] = None,
171
+ raise_validation_errors: bool = False,
172
+ ) -> "Result":
173
+ """Conducts an interview and returns the result.
174
+
175
+ :param interview: the interview to conduct
176
+ :param stop_on_exception: stops the interview if an exception is raised
177
+ :param sidecar_model: a language model to use in addition to the interview's model
178
+ """
179
+ # the model buckets are used to track usage rates
180
+ model_buckets = self.bucket_collection[interview.model]
181
+
182
+ # get the results of the interview
183
+ answer, valid_results = await interview.async_conduct_interview(
184
+ model_buckets=model_buckets,
185
+ stop_on_exception=stop_on_exception,
186
+ sidecar_model=sidecar_model,
187
+ raise_validation_errors=raise_validation_errors,
188
+ )
189
+
190
+ question_results = {}
191
+ for result in valid_results:
192
+ question_results[result.question_name] = result
193
+
194
+ answer_key_names = list(question_results.keys())
195
+
196
+ generated_tokens_dict = {
197
+ k + "_generated_tokens": question_results[k].generated_tokens
198
+ for k in answer_key_names
199
+ }
200
+ comments_dict = {
201
+ k + "_comment": question_results[k].comment for k in answer_key_names
202
+ }
203
+
204
+ # we should have a valid result for each question
205
+ answer_dict = {k: answer[k] for k in answer_key_names}
206
+ assert len(valid_results) == len(answer_key_names)
207
+
208
+ # TODO: move this down into Interview
209
+ question_name_to_prompts = dict({})
210
+ for result in valid_results:
211
+ question_name = result.question_name
212
+ question_name_to_prompts[question_name] = {
213
+ "user_prompt": result.prompts["user_prompt"],
214
+ "system_prompt": result.prompts["system_prompt"],
215
+ }
216
+
217
+ prompt_dictionary = {}
218
+ for answer_key_name in answer_key_names:
219
+ prompt_dictionary[
220
+ answer_key_name + "_user_prompt"
221
+ ] = question_name_to_prompts[answer_key_name]["user_prompt"]
222
+ prompt_dictionary[
223
+ answer_key_name + "_system_prompt"
224
+ ] = question_name_to_prompts[answer_key_name]["system_prompt"]
225
+
226
+ raw_model_results_dictionary = {}
227
+ cache_used_dictionary = {}
228
+ for result in valid_results:
229
+ question_name = result.question_name
230
+ raw_model_results_dictionary[
231
+ question_name + "_raw_model_response"
232
+ ] = result.raw_model_response
233
+ raw_model_results_dictionary[question_name + "_cost"] = result.cost
234
+ one_use_buys = (
235
+ "NA"
236
+ if isinstance(result.cost, str)
237
+ or result.cost == 0
238
+ or result.cost is None
239
+ else 1.0 / result.cost
240
+ )
241
+ raw_model_results_dictionary[question_name + "_one_usd_buys"] = one_use_buys
242
+ cache_used_dictionary[question_name] = result.cache_used
243
+
244
+ result = Result(
245
+ agent=interview.agent,
246
+ scenario=interview.scenario,
247
+ model=interview.model,
248
+ iteration=interview.iteration,
249
+ answer=answer_dict,
250
+ prompt=prompt_dictionary,
251
+ raw_model_response=raw_model_results_dictionary,
252
+ survey=interview.survey,
253
+ generated_tokens=generated_tokens_dict,
254
+ comments_dict=comments_dict,
255
+ cache_used_dict=cache_used_dictionary,
256
+ indices=interview.indices,
257
+ )
258
+ result.interview_hash = hash(interview)
259
+
260
+ return result
261
+
262
+ @property
263
+ def elapsed_time(self):
264
+ return time.monotonic() - self.start_time
265
+
266
+ def process_results(
267
+ self, raw_results: Results, cache: Cache, print_exceptions: bool
268
+ ):
269
+ interview_lookup = {
270
+ hash(interview): index
271
+ for index, interview in enumerate(self.total_interviews)
272
+ }
273
+ interview_hashes = list(interview_lookup.keys())
274
+
275
+ task_history = TaskHistory(self.total_interviews, include_traceback=False)
276
+
277
+ results = Results(
278
+ survey=self.jobs.survey,
279
+ data=sorted(
280
+ raw_results, key=lambda x: interview_hashes.index(x.interview_hash)
281
+ ),
282
+ task_history=task_history,
283
+ cache=cache,
284
+ )
285
+ results.bucket_collection = self.bucket_collection
286
+
287
+ if results.has_unfixed_exceptions and print_exceptions:
288
+ from edsl.scenarios.FileStore import HTMLFileStore
289
+ from edsl.config import CONFIG
290
+ from edsl.coop.coop import Coop
291
+
292
+ msg = f"Exceptions were raised in {len(results.task_history.indices)} out of {len(self.total_interviews)} interviews.\n"
293
+
294
+ if len(results.task_history.indices) > 5:
295
+ msg += f"Exceptions were raised in the following interviews: {results.task_history.indices}.\n"
296
+
297
+ import sys
298
+
299
+ print(msg, file=sys.stderr)
300
+ from edsl.config import CONFIG
301
+
302
+ if CONFIG.get("EDSL_OPEN_EXCEPTION_REPORT_URL") == "True":
303
+ open_in_browser = True
304
+ elif CONFIG.get("EDSL_OPEN_EXCEPTION_REPORT_URL") == "False":
305
+ open_in_browser = False
306
+ else:
307
+ raise Exception(
308
+ "EDSL_OPEN_EXCEPTION_REPORT_URL", "must be either True or False"
309
+ )
310
+
311
+ filepath = results.task_history.html(
312
+ cta="Open report to see details.",
313
+ open_in_browser=open_in_browser,
314
+ return_link=True,
315
+ )
316
+
317
+ try:
318
+ coop = Coop()
319
+ user_edsl_settings = coop.edsl_settings
320
+ remote_logging = user_edsl_settings["remote_logging"]
321
+ except Exception as e:
322
+ print(e)
323
+ remote_logging = False
324
+
325
+ if remote_logging:
326
+ filestore = HTMLFileStore(filepath)
327
+ coop_details = filestore.push(description="Error report")
328
+ print(coop_details)
329
+
330
+ print("Also see: https://docs.expectedparrot.com/en/latest/exceptions.html")
331
+
332
+ return results
333
+
53
334
  @jupyter_nb_handler
54
- async def run(self, parameters: RunParameters) -> Results:
335
+ async def run(
336
+ self,
337
+ cache: Union[Cache, False, None],
338
+ n: int = 1,
339
+ stop_on_exception: bool = False,
340
+ progress_bar: bool = False,
341
+ sidecar_model: Optional[LanguageModel] = None,
342
+ jobs_runner_status: Optional[Type[JobsRunnerStatusBase]] = None,
343
+ job_uuid: Optional[UUID] = None,
344
+ print_exceptions: bool = True,
345
+ raise_validation_errors: bool = False,
346
+ ) -> "Coroutine":
55
347
  """Runs a collection of interviews, handling both async and sync contexts."""
56
348
 
57
- run_config = RunConfig(parameters=parameters, environment=self.environment)
58
-
349
+ self.results = []
59
350
  self.start_time = time.monotonic()
60
351
  self.completed = False
352
+ self.cache = cache
353
+ self.sidecar_model = sidecar_model
61
354
 
62
355
  from edsl.coop import Coop
63
356
 
64
357
  coop = Coop()
65
358
  endpoint_url = coop.get_progress_bar_url()
66
359
 
67
- def set_up_jobs_runner_status(jobs_runner_status):
68
- if jobs_runner_status is not None:
69
- return jobs_runner_status(
70
- self,
71
- n=parameters.n,
72
- endpoint_url=endpoint_url,
73
- job_uuid=parameters.job_uuid,
74
- )
75
- else:
76
- return JobsRunnerStatus(
77
- self,
78
- n=parameters.n,
79
- endpoint_url=endpoint_url,
80
- job_uuid=parameters.job_uuid,
81
- )
360
+ if jobs_runner_status is not None:
361
+ self.jobs_runner_status = jobs_runner_status(
362
+ self, n=n, endpoint_url=endpoint_url, job_uuid=job_uuid
363
+ )
364
+ else:
365
+ self.jobs_runner_status = JobsRunnerStatus(
366
+ self, n=n, endpoint_url=endpoint_url, job_uuid=job_uuid
367
+ )
82
368
 
83
- run_config.environment.jobs_runner_status = set_up_jobs_runner_status(
84
- self.environment.jobs_runner_status
85
- )
86
-
87
- async def get_results(results) -> None:
88
- """Conducted the interviews and append to the results list."""
89
- result_generator = AsyncInterviewRunner(self.jobs, run_config)
90
- async for result, interview in result_generator.run():
91
- results.append(result)
92
- results.task_history.add_interview(interview)
369
+ stop_event = threading.Event()
93
370
 
371
+ async def process_results(cache):
372
+ """Processes results from interviews."""
373
+ async for result in self.run_async_generator(
374
+ n=n,
375
+ stop_on_exception=stop_on_exception,
376
+ cache=cache,
377
+ sidecar_model=sidecar_model,
378
+ raise_validation_errors=raise_validation_errors,
379
+ ):
380
+ self.results.append(result)
94
381
  self.completed = True
95
382
 
96
- def run_progress_bar(stop_event, jobs_runner_status) -> None:
383
+ def run_progress_bar(stop_event):
97
384
  """Runs the progress bar in a separate thread."""
98
- jobs_runner_status.update_progress(stop_event)
99
-
100
- def set_up_progress_bar(progress_bar: bool, jobs_runner_status):
101
- progress_thread = None
102
- if progress_bar and jobs_runner_status.has_ep_api_key():
103
- jobs_runner_status.setup()
104
- progress_thread = threading.Thread(
105
- target=run_progress_bar, args=(stop_event, jobs_runner_status)
106
- )
107
- progress_thread.start()
108
- elif progress_bar:
109
- warnings.warn(
110
- "You need an Expected Parrot API key to view job progress bars."
111
- )
112
- return progress_thread
385
+ self.jobs_runner_status.update_progress(stop_event)
113
386
 
114
- results = Results(
115
- survey=self.jobs.survey,
116
- data=[],
117
- task_history=TaskHistory(),
118
- cache=self.environment.cache.new_entries_cache(),
119
- )
120
- stop_event = threading.Event()
121
- progress_thread = set_up_progress_bar(
122
- parameters.progress_bar, run_config.environment.jobs_runner_status
123
- )
387
+ if progress_bar and self.jobs_runner_status.has_ep_api_key():
388
+ self.jobs_runner_status.setup()
389
+ progress_thread = threading.Thread(
390
+ target=run_progress_bar, args=(stop_event,)
391
+ )
392
+ progress_thread.start()
393
+ elif progress_bar:
394
+ warnings.warn(
395
+ "You need an Expected Parrot API key to view job progress bars."
396
+ )
124
397
 
125
398
  exception_to_raise = None
126
399
  try:
127
- await get_results(results)
400
+ with cache as c:
401
+ await process_results(cache=c)
128
402
  except KeyboardInterrupt:
129
403
  print("Keyboard interrupt received. Stopping gracefully...")
130
404
  stop_event.set()
131
405
  except Exception as e:
132
- if parameters.stop_on_exception:
406
+ if stop_on_exception:
133
407
  exception_to_raise = e
134
408
  stop_event.set()
135
409
  finally:
136
410
  stop_event.set()
137
- if progress_thread is not None:
138
- progress_thread.join()
411
+ if progress_bar and self.jobs_runner_status.has_ep_api_key():
412
+ # self.jobs_runner_status.stop_event.set()
413
+ if progress_thread:
414
+ progress_thread.join()
139
415
 
140
416
  if exception_to_raise:
141
417
  raise exception_to_raise
142
418
 
143
- results.cache = self.environment.cache.new_entries_cache()
144
- results.bucket_collection = self.environment.bucket_collection
145
-
146
- from edsl.jobs.results_exceptions_handler import ResultsExceptionsHandler
147
-
148
- results_exceptions_handler = ResultsExceptionsHandler(results, parameters)
149
-
150
- results_exceptions_handler.handle_exceptions()
151
- return results
419
+ return self.process_results(
420
+ raw_results=self.results, cache=cache, print_exceptions=print_exceptions
421
+ )