edsl 0.1.39.dev2__py3-none-any.whl → 0.1.39.dev4__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- edsl/Base.py +28 -0
- edsl/__init__.py +1 -1
- edsl/__version__.py +1 -1
- edsl/agents/Agent.py +8 -16
- edsl/agents/Invigilator.py +13 -14
- edsl/agents/InvigilatorBase.py +4 -1
- edsl/agents/PromptConstructor.py +42 -22
- edsl/agents/QuestionInstructionPromptBuilder.py +1 -1
- edsl/auto/AutoStudy.py +18 -5
- edsl/auto/StageBase.py +53 -40
- edsl/auto/StageQuestions.py +2 -1
- edsl/auto/utilities.py +0 -6
- edsl/coop/coop.py +21 -5
- edsl/data/Cache.py +29 -18
- edsl/data/CacheHandler.py +0 -2
- edsl/data/RemoteCacheSync.py +154 -46
- edsl/data/hack.py +10 -0
- edsl/enums.py +7 -0
- edsl/inference_services/AnthropicService.py +38 -16
- edsl/inference_services/AvailableModelFetcher.py +7 -1
- edsl/inference_services/GoogleService.py +5 -1
- edsl/inference_services/InferenceServicesCollection.py +18 -2
- edsl/inference_services/OpenAIService.py +46 -31
- edsl/inference_services/TestService.py +1 -3
- edsl/inference_services/TogetherAIService.py +5 -3
- edsl/inference_services/data_structures.py +74 -2
- edsl/jobs/AnswerQuestionFunctionConstructor.py +148 -113
- edsl/jobs/FetchInvigilator.py +10 -3
- edsl/jobs/InterviewsConstructor.py +6 -4
- edsl/jobs/Jobs.py +299 -233
- edsl/jobs/JobsChecks.py +2 -2
- edsl/jobs/JobsPrompts.py +1 -1
- edsl/jobs/JobsRemoteInferenceHandler.py +160 -136
- edsl/jobs/async_interview_runner.py +138 -0
- edsl/jobs/check_survey_scenario_compatibility.py +85 -0
- edsl/jobs/data_structures.py +120 -0
- edsl/jobs/interviews/Interview.py +80 -42
- edsl/jobs/results_exceptions_handler.py +98 -0
- edsl/jobs/runners/JobsRunnerAsyncio.py +87 -357
- edsl/jobs/runners/JobsRunnerStatus.py +131 -164
- edsl/jobs/tasks/TaskHistory.py +24 -3
- edsl/language_models/LanguageModel.py +59 -4
- edsl/language_models/ModelList.py +19 -8
- edsl/language_models/__init__.py +1 -1
- edsl/language_models/model.py +256 -0
- edsl/language_models/repair.py +1 -1
- edsl/questions/QuestionBase.py +35 -26
- edsl/questions/QuestionBasePromptsMixin.py +1 -1
- edsl/questions/QuestionBudget.py +1 -1
- edsl/questions/QuestionCheckBox.py +2 -2
- edsl/questions/QuestionExtract.py +5 -7
- edsl/questions/QuestionFreeText.py +1 -1
- edsl/questions/QuestionList.py +9 -15
- edsl/questions/QuestionMatrix.py +1 -1
- edsl/questions/QuestionMultipleChoice.py +1 -1
- edsl/questions/QuestionNumerical.py +1 -1
- edsl/questions/QuestionRank.py +1 -1
- edsl/questions/SimpleAskMixin.py +1 -1
- edsl/questions/__init__.py +1 -1
- edsl/questions/data_structures.py +20 -0
- edsl/questions/{QuestionBaseGenMixin.py → question_base_gen_mixin.py} +52 -49
- edsl/questions/{ResponseValidatorABC.py → response_validator_abc.py} +6 -18
- edsl/questions/{ResponseValidatorFactory.py → response_validator_factory.py} +7 -1
- edsl/results/DatasetExportMixin.py +60 -119
- edsl/results/Result.py +109 -3
- edsl/results/Results.py +50 -39
- edsl/results/file_exports.py +252 -0
- edsl/scenarios/ScenarioList.py +35 -7
- edsl/surveys/Survey.py +71 -20
- edsl/test_h +1 -0
- edsl/utilities/gcp_bucket/example.py +50 -0
- {edsl-0.1.39.dev2.dist-info → edsl-0.1.39.dev4.dist-info}/METADATA +2 -2
- {edsl-0.1.39.dev2.dist-info → edsl-0.1.39.dev4.dist-info}/RECORD +85 -76
- edsl/language_models/registry.py +0 -180
- /edsl/agents/{QuestionOptionProcessor.py → question_option_processor.py} +0 -0
- /edsl/questions/{AnswerValidatorMixin.py → answer_validator_mixin.py} +0 -0
- /edsl/questions/{LoopProcessor.py → loop_processor.py} +0 -0
- /edsl/questions/{RegisterQuestionsMeta.py → register_questions_meta.py} +0 -0
- /edsl/results/{ResultsFetchMixin.py → results_fetch_mixin.py} +0 -0
- /edsl/results/{Selector.py → results_selector.py} +0 -0
- /edsl/results/{ResultsToolsMixin.py → results_tools_mixin.py} +0 -0
- /edsl/scenarios/{DirectoryScanner.py → directory_scanner.py} +0 -0
- /edsl/scenarios/{ScenarioJoin.py → scenario_join.py} +0 -0
- /edsl/scenarios/{ScenarioSelector.py → scenario_selector.py} +0 -0
- {edsl-0.1.39.dev2.dist-info → edsl-0.1.39.dev4.dist-info}/LICENSE +0 -0
- {edsl-0.1.39.dev2.dist-info → edsl-0.1.39.dev4.dist-info}/WHEEL +0 -0
@@ -3,31 +3,17 @@ import time
|
|
3
3
|
import asyncio
|
4
4
|
import threading
|
5
5
|
import warnings
|
6
|
-
from typing import
|
7
|
-
from uuid import UUID
|
8
|
-
from collections import UserList
|
6
|
+
from typing import TYPE_CHECKING
|
9
7
|
|
10
8
|
from edsl.results.Results import Results
|
11
|
-
from edsl.jobs.
|
12
|
-
from edsl.jobs.runners.JobsRunnerStatus import JobsRunnerStatus, JobsRunnerStatusBase
|
13
|
-
|
9
|
+
from edsl.jobs.runners.JobsRunnerStatus import JobsRunnerStatus
|
14
10
|
from edsl.jobs.tasks.TaskHistory import TaskHistory
|
15
|
-
from edsl.jobs.buckets.BucketCollection import BucketCollection
|
16
11
|
from edsl.utilities.decorators import jupyter_nb_handler
|
17
|
-
from edsl.
|
18
|
-
from edsl.
|
19
|
-
from edsl.results.Results import Results
|
20
|
-
from edsl.language_models.LanguageModel import LanguageModel
|
21
|
-
from edsl.data.Cache import Cache
|
22
|
-
|
12
|
+
from edsl.jobs.async_interview_runner import AsyncInterviewRunner
|
13
|
+
from edsl.jobs.data_structures import RunEnvironment, RunParameters, RunConfig
|
23
14
|
|
24
|
-
|
25
|
-
|
26
|
-
self.total_tasks = total_tasks
|
27
|
-
super().__init__()
|
28
|
-
|
29
|
-
def current_status(self):
|
30
|
-
return print(f"Completed: {len(self.data)} of {self.total_tasks}", end="\r")
|
15
|
+
if TYPE_CHECKING:
|
16
|
+
from edsl.jobs.Jobs import Jobs
|
31
17
|
|
32
18
|
|
33
19
|
class JobsRunnerAsyncio:
|
@@ -37,385 +23,129 @@ class JobsRunnerAsyncio:
|
|
37
23
|
The Jobs object is a collection of interviews that are to be run.
|
38
24
|
"""
|
39
25
|
|
40
|
-
def __init__(self, jobs: "Jobs",
|
26
|
+
def __init__(self, jobs: "Jobs", environment: RunEnvironment):
|
41
27
|
self.jobs = jobs
|
42
|
-
self.
|
43
|
-
self.bucket_collection: "BucketCollection" = bucket_collection
|
44
|
-
|
45
|
-
self.total_interviews: List["Interview"] = []
|
46
|
-
self._initialized = threading.Event()
|
47
|
-
|
48
|
-
from edsl.config import CONFIG
|
49
|
-
|
50
|
-
self.MAX_CONCURRENT = int(CONFIG.get("EDSL_MAX_CONCURRENT_TASKS"))
|
51
|
-
|
52
|
-
async def run_async_generator(
|
53
|
-
self,
|
54
|
-
cache: Cache,
|
55
|
-
n: int = 1,
|
56
|
-
stop_on_exception: bool = False,
|
57
|
-
sidecar_model: Optional[LanguageModel] = None,
|
58
|
-
total_interviews: Optional[List["Interview"]] = None,
|
59
|
-
raise_validation_errors: bool = False,
|
60
|
-
) -> AsyncGenerator["Result", None]:
|
61
|
-
"""Creates and processes tasks asynchronously, yielding results as they complete.
|
62
|
-
|
63
|
-
Tasks are created and processed in a streaming fashion rather than building the full list upfront.
|
64
|
-
Results are yielded as soon as they are available.
|
65
|
-
|
66
|
-
:param n: how many times to run each interview
|
67
|
-
:param stop_on_exception: Whether to stop the interview if an exception is raised
|
68
|
-
:param sidecar_model: a language model to use in addition to the interview's model
|
69
|
-
:param total_interviews: A list of interviews to run can be provided instead.
|
70
|
-
:param raise_validation_errors: Whether to raise validation errors
|
71
|
-
"""
|
72
|
-
# Initialize interviews iterator
|
73
|
-
if total_interviews:
|
74
|
-
interviews_iter = iter(total_interviews)
|
75
|
-
self.total_interviews = total_interviews
|
76
|
-
else:
|
77
|
-
interviews_iter = self._populate_total_interviews(n=n)
|
78
|
-
self.total_interviews = list(interviews_iter)
|
79
|
-
interviews_iter = iter(self.total_interviews) # Create fresh iterator
|
28
|
+
self.environment = environment
|
80
29
|
|
81
|
-
|
30
|
+
def __len__(self):
|
31
|
+
return len(self.jobs)
|
82
32
|
|
83
|
-
|
84
|
-
|
85
|
-
|
86
|
-
try:
|
87
|
-
while True:
|
88
|
-
# Add new tasks if we're below max_concurrent and there are more interviews
|
89
|
-
while len(active_tasks) < self.MAX_CONCURRENT:
|
90
|
-
try:
|
91
|
-
interview = next(interviews_iter)
|
92
|
-
task = asyncio.create_task(
|
93
|
-
self._build_interview_task(
|
94
|
-
interview=interview,
|
95
|
-
stop_on_exception=stop_on_exception,
|
96
|
-
sidecar_model=sidecar_model,
|
97
|
-
raise_validation_errors=raise_validation_errors,
|
98
|
-
)
|
99
|
-
)
|
100
|
-
active_tasks.add(task)
|
101
|
-
# Add callback to remove task from set when done
|
102
|
-
task.add_done_callback(active_tasks.discard)
|
103
|
-
except StopIteration:
|
104
|
-
break
|
105
|
-
|
106
|
-
if not active_tasks:
|
107
|
-
break
|
108
|
-
|
109
|
-
# Wait for next completed task
|
110
|
-
done, _ = await asyncio.wait(
|
111
|
-
active_tasks, return_when=asyncio.FIRST_COMPLETED
|
112
|
-
)
|
113
|
-
|
114
|
-
# Process completed tasks
|
115
|
-
for task in done:
|
116
|
-
try:
|
117
|
-
result = await task
|
118
|
-
self.jobs_runner_status.add_completed_interview(result)
|
119
|
-
yield result
|
120
|
-
except Exception as e:
|
121
|
-
if stop_on_exception:
|
122
|
-
# Cancel remaining tasks
|
123
|
-
for t in active_tasks:
|
124
|
-
if not t.done():
|
125
|
-
t.cancel()
|
126
|
-
raise
|
127
|
-
else:
|
128
|
-
# Log error and continue
|
129
|
-
# logger.error(f"Task failed with error: {e}")
|
130
|
-
continue
|
131
|
-
finally:
|
132
|
-
# Ensure we cancel any remaining tasks if we exit early
|
133
|
-
for task in active_tasks:
|
134
|
-
if not task.done():
|
135
|
-
task.cancel()
|
33
|
+
async def run_async(self, parameters: RunParameters) -> Results:
|
34
|
+
"""Used for some other modules that have a non-standard way of running interviews."""
|
136
35
|
|
137
|
-
|
138
|
-
|
139
|
-
|
140
|
-
"""Populates self.total_interviews with n copies of each interview.
|
36
|
+
self.environment.jobs_runner_status = JobsRunnerStatus(self, n=parameters.n)
|
37
|
+
data = []
|
38
|
+
task_history = TaskHistory(include_traceback=False)
|
141
39
|
|
142
|
-
|
143
|
-
|
144
|
-
for interview in self.interviews:
|
145
|
-
for iteration in range(n):
|
146
|
-
if iteration > 0:
|
147
|
-
yield interview.duplicate(iteration=iteration, cache=self.cache)
|
148
|
-
else:
|
149
|
-
interview.cache = self.cache
|
150
|
-
yield interview
|
40
|
+
run_config = RunConfig(parameters=parameters, environment=self.environment)
|
41
|
+
result_generator = AsyncInterviewRunner(self.jobs, run_config)
|
151
42
|
|
152
|
-
|
153
|
-
"""Used for some other modules that have a non-standard way of running interviews."""
|
154
|
-
self.jobs_runner_status = JobsRunnerStatus(self, n=n)
|
155
|
-
self.cache = Cache() if cache is None else cache
|
156
|
-
data = []
|
157
|
-
async for result in self.run_async_generator(cache=self.cache, n=n):
|
43
|
+
async for result, interview in result_generator.run():
|
158
44
|
data.append(result)
|
159
|
-
|
45
|
+
task_history.add_interview(interview)
|
46
|
+
|
47
|
+
return Results(survey=self.jobs.survey, task_history=task_history, data=data)
|
160
48
|
|
161
49
|
def simple_run(self):
|
162
50
|
data = asyncio.run(self.run_async())
|
163
51
|
return Results(survey=self.jobs.survey, data=data)
|
164
52
|
|
165
|
-
async def _build_interview_task(
|
166
|
-
self,
|
167
|
-
*,
|
168
|
-
interview: Interview,
|
169
|
-
stop_on_exception: bool = False,
|
170
|
-
sidecar_model: Optional["LanguageModel"] = None,
|
171
|
-
raise_validation_errors: bool = False,
|
172
|
-
) -> "Result":
|
173
|
-
"""Conducts an interview and returns the result.
|
174
|
-
|
175
|
-
:param interview: the interview to conduct
|
176
|
-
:param stop_on_exception: stops the interview if an exception is raised
|
177
|
-
:param sidecar_model: a language model to use in addition to the interview's model
|
178
|
-
"""
|
179
|
-
# the model buckets are used to track usage rates
|
180
|
-
model_buckets = self.bucket_collection[interview.model]
|
181
|
-
|
182
|
-
# get the results of the interview
|
183
|
-
answer, valid_results = await interview.async_conduct_interview(
|
184
|
-
model_buckets=model_buckets,
|
185
|
-
stop_on_exception=stop_on_exception,
|
186
|
-
sidecar_model=sidecar_model,
|
187
|
-
raise_validation_errors=raise_validation_errors,
|
188
|
-
)
|
189
|
-
|
190
|
-
question_results = {}
|
191
|
-
for result in valid_results:
|
192
|
-
question_results[result.question_name] = result
|
193
|
-
|
194
|
-
answer_key_names = list(question_results.keys())
|
195
|
-
|
196
|
-
generated_tokens_dict = {
|
197
|
-
k + "_generated_tokens": question_results[k].generated_tokens
|
198
|
-
for k in answer_key_names
|
199
|
-
}
|
200
|
-
comments_dict = {
|
201
|
-
k + "_comment": question_results[k].comment for k in answer_key_names
|
202
|
-
}
|
203
|
-
|
204
|
-
# we should have a valid result for each question
|
205
|
-
answer_dict = {k: answer[k] for k in answer_key_names}
|
206
|
-
assert len(valid_results) == len(answer_key_names)
|
207
|
-
|
208
|
-
# TODO: move this down into Interview
|
209
|
-
question_name_to_prompts = dict({})
|
210
|
-
for result in valid_results:
|
211
|
-
question_name = result.question_name
|
212
|
-
question_name_to_prompts[question_name] = {
|
213
|
-
"user_prompt": result.prompts["user_prompt"],
|
214
|
-
"system_prompt": result.prompts["system_prompt"],
|
215
|
-
}
|
216
|
-
|
217
|
-
prompt_dictionary = {}
|
218
|
-
for answer_key_name in answer_key_names:
|
219
|
-
prompt_dictionary[
|
220
|
-
answer_key_name + "_user_prompt"
|
221
|
-
] = question_name_to_prompts[answer_key_name]["user_prompt"]
|
222
|
-
prompt_dictionary[
|
223
|
-
answer_key_name + "_system_prompt"
|
224
|
-
] = question_name_to_prompts[answer_key_name]["system_prompt"]
|
225
|
-
|
226
|
-
raw_model_results_dictionary = {}
|
227
|
-
cache_used_dictionary = {}
|
228
|
-
for result in valid_results:
|
229
|
-
question_name = result.question_name
|
230
|
-
raw_model_results_dictionary[
|
231
|
-
question_name + "_raw_model_response"
|
232
|
-
] = result.raw_model_response
|
233
|
-
raw_model_results_dictionary[question_name + "_cost"] = result.cost
|
234
|
-
one_use_buys = (
|
235
|
-
"NA"
|
236
|
-
if isinstance(result.cost, str)
|
237
|
-
or result.cost == 0
|
238
|
-
or result.cost is None
|
239
|
-
else 1.0 / result.cost
|
240
|
-
)
|
241
|
-
raw_model_results_dictionary[question_name + "_one_usd_buys"] = one_use_buys
|
242
|
-
cache_used_dictionary[question_name] = result.cache_used
|
243
|
-
|
244
|
-
result = Result(
|
245
|
-
agent=interview.agent,
|
246
|
-
scenario=interview.scenario,
|
247
|
-
model=interview.model,
|
248
|
-
iteration=interview.iteration,
|
249
|
-
answer=answer_dict,
|
250
|
-
prompt=prompt_dictionary,
|
251
|
-
raw_model_response=raw_model_results_dictionary,
|
252
|
-
survey=interview.survey,
|
253
|
-
generated_tokens=generated_tokens_dict,
|
254
|
-
comments_dict=comments_dict,
|
255
|
-
cache_used_dict=cache_used_dictionary,
|
256
|
-
indices=interview.indices,
|
257
|
-
)
|
258
|
-
result.interview_hash = hash(interview)
|
259
|
-
|
260
|
-
return result
|
261
|
-
|
262
|
-
@property
|
263
|
-
def elapsed_time(self):
|
264
|
-
return time.monotonic() - self.start_time
|
265
|
-
|
266
|
-
def process_results(
|
267
|
-
self, raw_results: Results, cache: Cache, print_exceptions: bool
|
268
|
-
):
|
269
|
-
interview_lookup = {
|
270
|
-
hash(interview): index
|
271
|
-
for index, interview in enumerate(self.total_interviews)
|
272
|
-
}
|
273
|
-
interview_hashes = list(interview_lookup.keys())
|
274
|
-
|
275
|
-
task_history = TaskHistory(self.total_interviews, include_traceback=False)
|
276
|
-
|
277
|
-
results = Results(
|
278
|
-
survey=self.jobs.survey,
|
279
|
-
data=sorted(
|
280
|
-
raw_results, key=lambda x: interview_hashes.index(x.interview_hash)
|
281
|
-
),
|
282
|
-
task_history=task_history,
|
283
|
-
cache=cache,
|
284
|
-
)
|
285
|
-
results.bucket_collection = self.bucket_collection
|
286
|
-
|
287
|
-
if results.has_unfixed_exceptions and print_exceptions:
|
288
|
-
from edsl.scenarios.FileStore import HTMLFileStore
|
289
|
-
from edsl.config import CONFIG
|
290
|
-
from edsl.coop.coop import Coop
|
291
|
-
|
292
|
-
msg = f"Exceptions were raised in {len(results.task_history.indices)} out of {len(self.total_interviews)} interviews.\n"
|
293
|
-
|
294
|
-
if len(results.task_history.indices) > 5:
|
295
|
-
msg += f"Exceptions were raised in the following interviews: {results.task_history.indices}.\n"
|
296
|
-
|
297
|
-
import sys
|
298
|
-
|
299
|
-
print(msg, file=sys.stderr)
|
300
|
-
from edsl.config import CONFIG
|
301
|
-
|
302
|
-
if CONFIG.get("EDSL_OPEN_EXCEPTION_REPORT_URL") == "True":
|
303
|
-
open_in_browser = True
|
304
|
-
elif CONFIG.get("EDSL_OPEN_EXCEPTION_REPORT_URL") == "False":
|
305
|
-
open_in_browser = False
|
306
|
-
else:
|
307
|
-
raise Exception(
|
308
|
-
"EDSL_OPEN_EXCEPTION_REPORT_URL", "must be either True or False"
|
309
|
-
)
|
310
|
-
|
311
|
-
filepath = results.task_history.html(
|
312
|
-
cta="Open report to see details.",
|
313
|
-
open_in_browser=open_in_browser,
|
314
|
-
return_link=True,
|
315
|
-
)
|
316
|
-
|
317
|
-
try:
|
318
|
-
coop = Coop()
|
319
|
-
user_edsl_settings = coop.edsl_settings
|
320
|
-
remote_logging = user_edsl_settings["remote_logging"]
|
321
|
-
except Exception as e:
|
322
|
-
print(e)
|
323
|
-
remote_logging = False
|
324
|
-
|
325
|
-
if remote_logging:
|
326
|
-
filestore = HTMLFileStore(filepath)
|
327
|
-
coop_details = filestore.push(description="Error report")
|
328
|
-
print(coop_details)
|
329
|
-
|
330
|
-
print("Also see: https://docs.expectedparrot.com/en/latest/exceptions.html")
|
331
|
-
|
332
|
-
return results
|
333
|
-
|
334
53
|
@jupyter_nb_handler
|
335
|
-
async def run(
|
336
|
-
self,
|
337
|
-
cache: Union[Cache, False, None],
|
338
|
-
n: int = 1,
|
339
|
-
stop_on_exception: bool = False,
|
340
|
-
progress_bar: bool = False,
|
341
|
-
sidecar_model: Optional[LanguageModel] = None,
|
342
|
-
jobs_runner_status: Optional[Type[JobsRunnerStatusBase]] = None,
|
343
|
-
job_uuid: Optional[UUID] = None,
|
344
|
-
print_exceptions: bool = True,
|
345
|
-
raise_validation_errors: bool = False,
|
346
|
-
) -> "Coroutine":
|
54
|
+
async def run(self, parameters: RunParameters) -> Results:
|
347
55
|
"""Runs a collection of interviews, handling both async and sync contexts."""
|
348
56
|
|
349
|
-
|
57
|
+
run_config = RunConfig(parameters=parameters, environment=self.environment)
|
58
|
+
|
350
59
|
self.start_time = time.monotonic()
|
351
60
|
self.completed = False
|
352
|
-
self.cache = cache
|
353
|
-
self.sidecar_model = sidecar_model
|
354
61
|
|
355
62
|
from edsl.coop import Coop
|
356
63
|
|
357
64
|
coop = Coop()
|
358
65
|
endpoint_url = coop.get_progress_bar_url()
|
359
66
|
|
360
|
-
|
361
|
-
|
362
|
-
|
363
|
-
|
364
|
-
|
365
|
-
|
366
|
-
|
367
|
-
|
67
|
+
def set_up_jobs_runner_status(jobs_runner_status):
|
68
|
+
if jobs_runner_status is not None:
|
69
|
+
return jobs_runner_status(
|
70
|
+
self,
|
71
|
+
n=parameters.n,
|
72
|
+
endpoint_url=endpoint_url,
|
73
|
+
job_uuid=parameters.job_uuid,
|
74
|
+
)
|
75
|
+
else:
|
76
|
+
return JobsRunnerStatus(
|
77
|
+
self,
|
78
|
+
n=parameters.n,
|
79
|
+
endpoint_url=endpoint_url,
|
80
|
+
job_uuid=parameters.job_uuid,
|
81
|
+
)
|
368
82
|
|
369
|
-
|
83
|
+
run_config.environment.jobs_runner_status = set_up_jobs_runner_status(
|
84
|
+
self.environment.jobs_runner_status
|
85
|
+
)
|
86
|
+
|
87
|
+
async def get_results(results) -> None:
|
88
|
+
"""Conducted the interviews and append to the results list."""
|
89
|
+
result_generator = AsyncInterviewRunner(self.jobs, run_config)
|
90
|
+
async for result, interview in result_generator.run():
|
91
|
+
results.append(result)
|
92
|
+
results.task_history.add_interview(interview)
|
370
93
|
|
371
|
-
async def process_results(cache):
|
372
|
-
"""Processes results from interviews."""
|
373
|
-
async for result in self.run_async_generator(
|
374
|
-
n=n,
|
375
|
-
stop_on_exception=stop_on_exception,
|
376
|
-
cache=cache,
|
377
|
-
sidecar_model=sidecar_model,
|
378
|
-
raise_validation_errors=raise_validation_errors,
|
379
|
-
):
|
380
|
-
self.results.append(result)
|
381
94
|
self.completed = True
|
382
95
|
|
383
|
-
def run_progress_bar(stop_event):
|
96
|
+
def run_progress_bar(stop_event) -> None:
|
384
97
|
"""Runs the progress bar in a separate thread."""
|
385
98
|
self.jobs_runner_status.update_progress(stop_event)
|
386
99
|
|
387
|
-
|
388
|
-
|
389
|
-
|
390
|
-
|
391
|
-
|
392
|
-
|
393
|
-
|
394
|
-
|
395
|
-
|
396
|
-
|
100
|
+
def set_up_progress_bar(progress_bar: bool, jobs_runner_status):
|
101
|
+
progress_thread = None
|
102
|
+
if progress_bar and jobs_runner_status.has_ep_api_key():
|
103
|
+
jobs_runner_status.setup()
|
104
|
+
progress_thread = threading.Thread(
|
105
|
+
target=run_progress_bar, args=(stop_event,)
|
106
|
+
)
|
107
|
+
progress_thread.start()
|
108
|
+
elif progress_bar:
|
109
|
+
warnings.warn(
|
110
|
+
"You need an Expected Parrot API key to view job progress bars."
|
111
|
+
)
|
112
|
+
return progress_thread
|
113
|
+
|
114
|
+
results = Results(
|
115
|
+
survey=self.jobs.survey,
|
116
|
+
data=[],
|
117
|
+
task_history=TaskHistory(),
|
118
|
+
cache=self.environment.cache.new_entries_cache(),
|
119
|
+
)
|
120
|
+
stop_event = threading.Event()
|
121
|
+
progress_thread = set_up_progress_bar(
|
122
|
+
parameters.progress_bar, run_config.environment.jobs_runner_status
|
123
|
+
)
|
397
124
|
|
398
125
|
exception_to_raise = None
|
399
126
|
try:
|
400
|
-
|
401
|
-
await process_results(cache=c)
|
127
|
+
await get_results(results)
|
402
128
|
except KeyboardInterrupt:
|
403
129
|
print("Keyboard interrupt received. Stopping gracefully...")
|
404
130
|
stop_event.set()
|
405
131
|
except Exception as e:
|
406
|
-
if stop_on_exception:
|
132
|
+
if parameters.stop_on_exception:
|
407
133
|
exception_to_raise = e
|
408
134
|
stop_event.set()
|
409
135
|
finally:
|
410
136
|
stop_event.set()
|
411
|
-
if
|
412
|
-
|
413
|
-
if progress_thread:
|
414
|
-
progress_thread.join()
|
137
|
+
if progress_thread is not None:
|
138
|
+
progress_thread.join()
|
415
139
|
|
416
140
|
if exception_to_raise:
|
417
141
|
raise exception_to_raise
|
418
142
|
|
419
|
-
|
420
|
-
|
421
|
-
|
143
|
+
results.cache = self.environment.cache.new_entries_cache()
|
144
|
+
results.bucket_collection = self.environment.bucket_collection
|
145
|
+
|
146
|
+
from edsl.jobs.results_exceptions_handler import ResultsExceptionsHandler
|
147
|
+
|
148
|
+
results_exceptions_handler = ResultsExceptionsHandler(results, parameters)
|
149
|
+
|
150
|
+
results_exceptions_handler.handle_exceptions()
|
151
|
+
return results
|