edsl 0.1.36.dev2__py3-none-any.whl → 0.1.36.dev4__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- edsl/__version__.py +1 -1
- edsl/coop/PriceFetcher.py +14 -18
- edsl/coop/coop.py +7 -3
- edsl/data/RemoteCacheSync.py +84 -0
- edsl/exceptions/coop.py +8 -0
- edsl/jobs/Jobs.py +182 -165
- edsl/jobs/interviews/Interview.py +21 -3
- edsl/jobs/interviews/InterviewExceptionCollection.py +9 -0
- edsl/jobs/interviews/InterviewExceptionEntry.py +24 -6
- edsl/jobs/runners/JobsRunnerAsyncio.py +17 -23
- edsl/jobs/tasks/TaskHistory.py +22 -7
- edsl/results/Result.py +11 -9
- edsl/results/Results.py +13 -1
- {edsl-0.1.36.dev2.dist-info → edsl-0.1.36.dev4.dist-info}/METADATA +1 -1
- {edsl-0.1.36.dev2.dist-info → edsl-0.1.36.dev4.dist-info}/RECORD +17 -16
- {edsl-0.1.36.dev2.dist-info → edsl-0.1.36.dev4.dist-info}/LICENSE +0 -0
- {edsl-0.1.36.dev2.dist-info → edsl-0.1.36.dev4.dist-info}/WHEEL +0 -0
edsl/__version__.py
CHANGED
@@ -1 +1 @@
|
|
1
|
-
__version__ = "0.1.36.
|
1
|
+
__version__ = "0.1.36.dev4"
|
edsl/coop/PriceFetcher.py
CHANGED
@@ -16,30 +16,26 @@ class PriceFetcher:
|
|
16
16
|
if self._cached_prices is not None:
|
17
17
|
return self._cached_prices
|
18
18
|
|
19
|
+
import os
|
19
20
|
import requests
|
20
|
-
import
|
21
|
-
from io import StringIO
|
22
|
-
|
23
|
-
sheet_id = "1SAO3Bhntefl0XQHJv27rMxpvu6uzKDWNXFHRa7jrUDs"
|
24
|
-
|
25
|
-
# Construct the URL to fetch the CSV
|
26
|
-
url = f"https://docs.google.com/spreadsheets/d/{sheet_id}/export?format=csv"
|
21
|
+
from edsl import CONFIG
|
27
22
|
|
28
23
|
try:
|
29
|
-
# Fetch the
|
30
|
-
|
24
|
+
# Fetch the pricing data
|
25
|
+
url = f"{CONFIG.EXPECTED_PARROT_URL}/api/v0/prices"
|
26
|
+
api_key = os.getenv("EXPECTED_PARROT_API_KEY")
|
27
|
+
headers = {}
|
28
|
+
if api_key:
|
29
|
+
headers["Authorization"] = f"Bearer {api_key}"
|
30
|
+
else:
|
31
|
+
headers["Authorization"] = f"Bearer None"
|
32
|
+
|
33
|
+
response = requests.get(url, headers=headers, timeout=20)
|
31
34
|
response.raise_for_status() # Raise an exception for bad responses
|
32
35
|
|
33
|
-
# Parse the
|
34
|
-
|
35
|
-
reader = csv.reader(csv_data)
|
36
|
-
|
37
|
-
# Convert to list of dictionaries
|
38
|
-
headers = next(reader)
|
39
|
-
data = [dict(zip(headers, row)) for row in reader]
|
36
|
+
# Parse the data
|
37
|
+
data = response.json()
|
40
38
|
|
41
|
-
# self._cached_prices = data
|
42
|
-
# return data
|
43
39
|
price_lookup = {}
|
44
40
|
for entry in data:
|
45
41
|
service = entry.get("service", None)
|
edsl/coop/coop.py
CHANGED
@@ -6,6 +6,7 @@ from typing import Any, Optional, Union, Literal
|
|
6
6
|
from uuid import UUID
|
7
7
|
import edsl
|
8
8
|
from edsl import CONFIG, CacheEntry, Jobs, Survey
|
9
|
+
from edsl.exceptions.coop import CoopNoUUIDError, CoopServerResponseError
|
9
10
|
from edsl.coop.utils import (
|
10
11
|
EDSLObject,
|
11
12
|
ObjectRegistry,
|
@@ -99,7 +100,7 @@ class Coop:
|
|
99
100
|
if "Authorization" in message:
|
100
101
|
print(message)
|
101
102
|
message = "Please provide an Expected Parrot API key."
|
102
|
-
raise
|
103
|
+
raise CoopServerResponseError(message)
|
103
104
|
|
104
105
|
def _json_handle_none(self, value: Any) -> Any:
|
105
106
|
"""
|
@@ -116,7 +117,7 @@ class Coop:
|
|
116
117
|
Resolve the uuid from a uuid or a url.
|
117
118
|
"""
|
118
119
|
if not url and not uuid:
|
119
|
-
raise
|
120
|
+
raise CoopNoUUIDError("No uuid or url provided for the object.")
|
120
121
|
if not uuid and url:
|
121
122
|
uuid = url.split("/")[-1]
|
122
123
|
return uuid
|
@@ -584,7 +585,10 @@ class Coop:
|
|
584
585
|
)
|
585
586
|
self._resolve_server_response(response)
|
586
587
|
response_json = response.json()
|
587
|
-
return
|
588
|
+
return {
|
589
|
+
"credits": response_json.get("cost_in_credits"),
|
590
|
+
"usd": response_json.get("cost_in_usd"),
|
591
|
+
}
|
588
592
|
|
589
593
|
################
|
590
594
|
# Remote Errors
|
@@ -0,0 +1,84 @@
|
|
1
|
+
class RemoteCacheSync:
|
2
|
+
def __init__(self, coop, cache, output_func, remote_cache=True, remote_cache_description=""):
|
3
|
+
self.coop = coop
|
4
|
+
self.cache = cache
|
5
|
+
self._output = output_func
|
6
|
+
self.remote_cache = remote_cache
|
7
|
+
self.old_entry_keys = []
|
8
|
+
self.new_cache_entries = []
|
9
|
+
self.remote_cache_description = remote_cache_description
|
10
|
+
|
11
|
+
def __enter__(self):
|
12
|
+
if self.remote_cache:
|
13
|
+
self._sync_from_remote()
|
14
|
+
self.old_entry_keys = list(self.cache.keys())
|
15
|
+
return self
|
16
|
+
|
17
|
+
def __exit__(self, exc_type, exc_value, traceback):
|
18
|
+
if self.remote_cache:
|
19
|
+
self._sync_to_remote()
|
20
|
+
return False # Propagate exceptions
|
21
|
+
|
22
|
+
def _sync_from_remote(self):
|
23
|
+
cache_difference = self.coop.remote_cache_get_diff(self.cache.keys())
|
24
|
+
client_missing_cacheentries = cache_difference.get("client_missing_cacheentries", [])
|
25
|
+
missing_entry_count = len(client_missing_cacheentries)
|
26
|
+
|
27
|
+
if missing_entry_count > 0:
|
28
|
+
self._output(
|
29
|
+
f"Updating local cache with {missing_entry_count:,} new "
|
30
|
+
f"{'entry' if missing_entry_count == 1 else 'entries'} from remote..."
|
31
|
+
)
|
32
|
+
self.cache.add_from_dict({entry.key: entry for entry in client_missing_cacheentries})
|
33
|
+
self._output("Local cache updated!")
|
34
|
+
else:
|
35
|
+
self._output("No new entries to add to local cache.")
|
36
|
+
|
37
|
+
def _sync_to_remote(self):
|
38
|
+
cache_difference = self.coop.remote_cache_get_diff(self.cache.keys())
|
39
|
+
server_missing_cacheentry_keys = cache_difference.get("server_missing_cacheentry_keys", [])
|
40
|
+
server_missing_cacheentries = [
|
41
|
+
entry
|
42
|
+
for key in server_missing_cacheentry_keys
|
43
|
+
if (entry := self.cache.data.get(key)) is not None
|
44
|
+
]
|
45
|
+
|
46
|
+
new_cache_entries = [
|
47
|
+
entry for entry in self.cache.values() if entry.key not in self.old_entry_keys
|
48
|
+
]
|
49
|
+
server_missing_cacheentries.extend(new_cache_entries)
|
50
|
+
new_entry_count = len(server_missing_cacheentries)
|
51
|
+
|
52
|
+
if new_entry_count > 0:
|
53
|
+
self._output(
|
54
|
+
f"Updating remote cache with {new_entry_count:,} new "
|
55
|
+
f"{'entry' if new_entry_count == 1 else 'entries'}..."
|
56
|
+
)
|
57
|
+
self.coop.remote_cache_create_many(
|
58
|
+
server_missing_cacheentries,
|
59
|
+
visibility="private",
|
60
|
+
description=self.remote_cache_description,
|
61
|
+
)
|
62
|
+
self._output("Remote cache updated!")
|
63
|
+
else:
|
64
|
+
self._output("No new entries to add to remote cache.")
|
65
|
+
|
66
|
+
self._output(f"There are {len(self.cache.keys()):,} entries in the local cache.")
|
67
|
+
|
68
|
+
# # Usage example
|
69
|
+
# def run_job(self, n, progress_bar, cache, stop_on_exception, sidecar_model, print_exceptions, raise_validation_errors, use_remote_cache=True):
|
70
|
+
# with RemoteCacheSync(self.coop, cache, self._output, remote_cache=use_remote_cache):
|
71
|
+
# self._output("Running job...")
|
72
|
+
# results = self._run_local(
|
73
|
+
# n=n,
|
74
|
+
# progress_bar=progress_bar,
|
75
|
+
# cache=cache,
|
76
|
+
# stop_on_exception=stop_on_exception,
|
77
|
+
# sidecar_model=sidecar_model,
|
78
|
+
# print_exceptions=print_exceptions,
|
79
|
+
# raise_validation_errors=raise_validation_errors,
|
80
|
+
# )
|
81
|
+
# self._output("Job completed!")
|
82
|
+
|
83
|
+
# results.cache = cache.new_entries_cache()
|
84
|
+
# return results
|
edsl/exceptions/coop.py
CHANGED
edsl/jobs/Jobs.py
CHANGED
@@ -1,8 +1,10 @@
|
|
1
1
|
# """The Jobs class is a collection of agents, scenarios and models and one survey."""
|
2
2
|
from __future__ import annotations
|
3
3
|
import warnings
|
4
|
+
import requests
|
4
5
|
from itertools import product
|
5
6
|
from typing import Optional, Union, Sequence, Generator
|
7
|
+
|
6
8
|
from edsl.Base import Base
|
7
9
|
from edsl.exceptions import MissingAPIKeyError
|
8
10
|
from edsl.jobs.buckets.BucketCollection import BucketCollection
|
@@ -10,6 +12,9 @@ from edsl.jobs.interviews.Interview import Interview
|
|
10
12
|
from edsl.jobs.runners.JobsRunnerAsyncio import JobsRunnerAsyncio
|
11
13
|
from edsl.utilities.decorators import add_edsl_version, remove_edsl_version
|
12
14
|
|
15
|
+
from edsl.data.RemoteCacheSync import RemoteCacheSync
|
16
|
+
from edsl.exceptions.coop import CoopServerResponseError
|
17
|
+
|
13
18
|
|
14
19
|
class Jobs(Base):
|
15
20
|
"""
|
@@ -203,10 +208,6 @@ class Jobs(Base):
|
|
203
208
|
]
|
204
209
|
)
|
205
210
|
return d
|
206
|
-
# if table:
|
207
|
-
# d.to_scenario_list().print(format="rich")
|
208
|
-
# else:
|
209
|
-
# return d
|
210
211
|
|
211
212
|
def show_prompts(self) -> None:
|
212
213
|
"""Print the prompts."""
|
@@ -219,11 +220,11 @@ class Jobs(Base):
|
|
219
220
|
price_lookup: dict,
|
220
221
|
inference_service: str,
|
221
222
|
model: str,
|
222
|
-
):
|
223
|
+
) -> dict:
|
223
224
|
"""Estimates the cost of a prompt. Takes piping into account."""
|
224
225
|
|
225
226
|
def get_piping_multiplier(prompt: str):
|
226
|
-
"""Returns 2 if a prompt includes Jinja
|
227
|
+
"""Returns 2 if a prompt includes Jinja braces, and 1 otherwise."""
|
227
228
|
|
228
229
|
if "{{" in prompt and "}}" in prompt:
|
229
230
|
return 2
|
@@ -231,9 +232,25 @@ class Jobs(Base):
|
|
231
232
|
|
232
233
|
# Look up prices per token
|
233
234
|
key = (inference_service, model)
|
234
|
-
|
235
|
-
|
236
|
-
|
235
|
+
|
236
|
+
try:
|
237
|
+
relevant_prices = price_lookup[key]
|
238
|
+
output_price_per_token = 1 / float(
|
239
|
+
relevant_prices["output"]["one_usd_buys"]
|
240
|
+
)
|
241
|
+
input_price_per_token = 1 / float(relevant_prices["input"]["one_usd_buys"])
|
242
|
+
except KeyError:
|
243
|
+
# A KeyError is likely to occur if we cannot retrieve prices (the price_lookup dict is empty)
|
244
|
+
# Use a sensible default
|
245
|
+
|
246
|
+
import warnings
|
247
|
+
|
248
|
+
warnings.warn(
|
249
|
+
"Price data could not be retrieved. Using default estimates for input and output token prices. Input: $0.15 / 1M tokens; Output: $0.60 / 1M tokens"
|
250
|
+
)
|
251
|
+
|
252
|
+
output_price_per_token = 0.00000015 # $0.15 / 1M tokens
|
253
|
+
input_price_per_token = 0.00000060 # $0.60 / 1M tokens
|
237
254
|
|
238
255
|
# Compute the number of characters (double if the question involves piping)
|
239
256
|
user_prompt_chars = len(str(user_prompt)) * get_piping_multiplier(
|
@@ -258,7 +275,7 @@ class Jobs(Base):
|
|
258
275
|
"cost": cost,
|
259
276
|
}
|
260
277
|
|
261
|
-
def estimate_job_cost_from_external_prices(self, price_lookup: dict):
|
278
|
+
def estimate_job_cost_from_external_prices(self, price_lookup: dict) -> dict:
|
262
279
|
"""
|
263
280
|
Estimates the cost of a job according to the following assumptions:
|
264
281
|
|
@@ -341,7 +358,7 @@ class Jobs(Base):
|
|
341
358
|
|
342
359
|
return output
|
343
360
|
|
344
|
-
def estimate_job_cost(self):
|
361
|
+
def estimate_job_cost(self) -> dict:
|
345
362
|
"""
|
346
363
|
Estimates the cost of a job according to the following assumptions:
|
347
364
|
|
@@ -357,6 +374,25 @@ class Jobs(Base):
|
|
357
374
|
|
358
375
|
return self.estimate_job_cost_from_external_prices(price_lookup=price_lookup)
|
359
376
|
|
377
|
+
@staticmethod
|
378
|
+
def compute_job_cost(job_results: "Results") -> float:
|
379
|
+
"""
|
380
|
+
Computes the cost of a completed job in USD.
|
381
|
+
"""
|
382
|
+
total_cost = 0
|
383
|
+
for result in job_results:
|
384
|
+
for key in result.raw_model_response:
|
385
|
+
if key.endswith("_cost"):
|
386
|
+
result_cost = result.raw_model_response[key]
|
387
|
+
|
388
|
+
question_name = key.removesuffix("_cost")
|
389
|
+
cache_used = result.cache_used_dict[question_name]
|
390
|
+
|
391
|
+
if isinstance(result_cost, (int, float)) and not cache_used:
|
392
|
+
total_cost += result_cost
|
393
|
+
|
394
|
+
return total_cost
|
395
|
+
|
360
396
|
@staticmethod
|
361
397
|
def _get_container_class(object):
|
362
398
|
from edsl.agents.AgentList import AgentList
|
@@ -580,7 +616,7 @@ class Jobs(Base):
|
|
580
616
|
|
581
617
|
def _output(self, message) -> None:
|
582
618
|
"""Check if a Job is verbose. If so, print the message."""
|
583
|
-
if self.verbose:
|
619
|
+
if hasattr(self, "verbose") and self.verbose:
|
584
620
|
print(message)
|
585
621
|
|
586
622
|
def _check_parameters(self, strict=False, warn=False) -> None:
|
@@ -657,6 +693,122 @@ class Jobs(Base):
|
|
657
693
|
return False
|
658
694
|
return self._raise_validation_errors
|
659
695
|
|
696
|
+
def create_remote_inference_job(
|
697
|
+
self, iterations: int = 1, remote_inference_description: Optional[str] = None
|
698
|
+
):
|
699
|
+
""" """
|
700
|
+
from edsl.coop.coop import Coop
|
701
|
+
|
702
|
+
coop = Coop()
|
703
|
+
self._output("Remote inference activated. Sending job to server...")
|
704
|
+
remote_job_creation_data = coop.remote_inference_create(
|
705
|
+
self,
|
706
|
+
description=remote_inference_description,
|
707
|
+
status="queued",
|
708
|
+
iterations=iterations,
|
709
|
+
)
|
710
|
+
return remote_job_creation_data
|
711
|
+
|
712
|
+
@staticmethod
|
713
|
+
def check_status(job_uuid):
|
714
|
+
from edsl.coop.coop import Coop
|
715
|
+
|
716
|
+
coop = Coop()
|
717
|
+
return coop.remote_inference_get(job_uuid)
|
718
|
+
|
719
|
+
def poll_remote_inference_job(
|
720
|
+
self, remote_job_creation_data: dict
|
721
|
+
) -> Union[Results, None]:
|
722
|
+
from edsl.coop.coop import Coop
|
723
|
+
import time
|
724
|
+
from datetime import datetime
|
725
|
+
from edsl.config import CONFIG
|
726
|
+
|
727
|
+
expected_parrot_url = CONFIG.get("EXPECTED_PARROT_URL")
|
728
|
+
|
729
|
+
job_uuid = remote_job_creation_data.get("uuid")
|
730
|
+
|
731
|
+
coop = Coop()
|
732
|
+
job_in_queue = True
|
733
|
+
while job_in_queue:
|
734
|
+
remote_job_data = coop.remote_inference_get(job_uuid)
|
735
|
+
status = remote_job_data.get("status")
|
736
|
+
if status == "cancelled":
|
737
|
+
print("\r" + " " * 80 + "\r", end="")
|
738
|
+
print("Job cancelled by the user.")
|
739
|
+
print(
|
740
|
+
f"See {expected_parrot_url}/home/remote-inference for more details."
|
741
|
+
)
|
742
|
+
return None
|
743
|
+
elif status == "failed":
|
744
|
+
print("\r" + " " * 80 + "\r", end="")
|
745
|
+
print("Job failed.")
|
746
|
+
print(
|
747
|
+
f"See {expected_parrot_url}/home/remote-inference for more details."
|
748
|
+
)
|
749
|
+
return None
|
750
|
+
elif status == "completed":
|
751
|
+
results_uuid = remote_job_data.get("results_uuid")
|
752
|
+
results = coop.get(results_uuid, expected_object_type="results")
|
753
|
+
print("\r" + " " * 80 + "\r", end="")
|
754
|
+
print(
|
755
|
+
f"Job completed and Results stored on Coop (Results uuid={results_uuid})."
|
756
|
+
)
|
757
|
+
return results
|
758
|
+
else:
|
759
|
+
duration = 5
|
760
|
+
time_checked = datetime.now().strftime("%Y-%m-%d %I:%M:%S %p")
|
761
|
+
frames = ["⠋", "⠙", "⠹", "⠸", "⠼", "⠴", "⠦", "⠧", "⠇", "⠏"]
|
762
|
+
start_time = time.time()
|
763
|
+
i = 0
|
764
|
+
while time.time() - start_time < duration:
|
765
|
+
print(
|
766
|
+
f"\r{frames[i % len(frames)]} Job status: {status} - last update: {time_checked}",
|
767
|
+
end="",
|
768
|
+
flush=True,
|
769
|
+
)
|
770
|
+
time.sleep(0.1)
|
771
|
+
i += 1
|
772
|
+
|
773
|
+
def use_remote_inference(self, disable_remote_inference: bool):
|
774
|
+
if disable_remote_inference:
|
775
|
+
return False
|
776
|
+
if not disable_remote_inference:
|
777
|
+
try:
|
778
|
+
from edsl import Coop
|
779
|
+
|
780
|
+
user_edsl_settings = Coop().edsl_settings
|
781
|
+
return user_edsl_settings.get("remote_inference", False)
|
782
|
+
except requests.ConnectionError:
|
783
|
+
pass
|
784
|
+
except CoopServerResponseError as e:
|
785
|
+
pass
|
786
|
+
|
787
|
+
return False
|
788
|
+
|
789
|
+
def use_remote_cache(self):
|
790
|
+
try:
|
791
|
+
from edsl import Coop
|
792
|
+
|
793
|
+
user_edsl_settings = Coop().edsl_settings
|
794
|
+
return user_edsl_settings.get("remote_caching", False)
|
795
|
+
except requests.ConnectionError:
|
796
|
+
pass
|
797
|
+
except CoopServerResponseError as e:
|
798
|
+
pass
|
799
|
+
|
800
|
+
return False
|
801
|
+
|
802
|
+
def check_api_keys(self):
|
803
|
+
from edsl import Model
|
804
|
+
|
805
|
+
for model in self.models + [Model()]:
|
806
|
+
if not model.has_valid_api_key():
|
807
|
+
raise MissingAPIKeyError(
|
808
|
+
model_name=str(model.model),
|
809
|
+
inference_service=model._inference_service_,
|
810
|
+
)
|
811
|
+
|
660
812
|
def run(
|
661
813
|
self,
|
662
814
|
n: int = 1,
|
@@ -694,91 +846,17 @@ class Jobs(Base):
|
|
694
846
|
|
695
847
|
self.verbose = verbose
|
696
848
|
|
697
|
-
|
698
|
-
|
699
|
-
|
700
|
-
if not disable_remote_inference:
|
701
|
-
try:
|
702
|
-
coop = Coop()
|
703
|
-
user_edsl_settings = Coop().edsl_settings
|
704
|
-
remote_cache = user_edsl_settings.get("remote_caching", False)
|
705
|
-
remote_inference = user_edsl_settings.get("remote_inference", False)
|
706
|
-
except Exception:
|
707
|
-
pass
|
708
|
-
|
709
|
-
if remote_inference:
|
710
|
-
import time
|
711
|
-
from datetime import datetime
|
712
|
-
from edsl.config import CONFIG
|
713
|
-
|
714
|
-
expected_parrot_url = CONFIG.get("EXPECTED_PARROT_URL")
|
715
|
-
|
716
|
-
self._output("Remote inference activated. Sending job to server...")
|
717
|
-
if remote_cache:
|
718
|
-
self._output(
|
719
|
-
"Remote caching activated. The remote cache will be used for this job."
|
720
|
-
)
|
721
|
-
|
722
|
-
remote_job_creation_data = coop.remote_inference_create(
|
723
|
-
self,
|
724
|
-
description=remote_inference_description,
|
725
|
-
status="queued",
|
726
|
-
iterations=n,
|
849
|
+
if remote_inference := self.use_remote_inference(disable_remote_inference):
|
850
|
+
remote_job_creation_data = self.create_remote_inference_job(
|
851
|
+
iterations=n, remote_inference_description=remote_inference_description
|
727
852
|
)
|
728
|
-
|
729
|
-
|
730
|
-
|
731
|
-
|
732
|
-
job_in_queue = True
|
733
|
-
while job_in_queue:
|
734
|
-
remote_job_data = coop.remote_inference_get(job_uuid)
|
735
|
-
status = remote_job_data.get("status")
|
736
|
-
if status == "cancelled":
|
737
|
-
print("\r" + " " * 80 + "\r", end="")
|
738
|
-
print("Job cancelled by the user.")
|
739
|
-
print(
|
740
|
-
f"See {expected_parrot_url}/home/remote-inference for more details."
|
741
|
-
)
|
742
|
-
return None
|
743
|
-
elif status == "failed":
|
744
|
-
print("\r" + " " * 80 + "\r", end="")
|
745
|
-
print("Job failed.")
|
746
|
-
print(
|
747
|
-
f"See {expected_parrot_url}/home/remote-inference for more details."
|
748
|
-
)
|
749
|
-
return None
|
750
|
-
elif status == "completed":
|
751
|
-
results_uuid = remote_job_data.get("results_uuid")
|
752
|
-
results = coop.get(results_uuid, expected_object_type="results")
|
753
|
-
print("\r" + " " * 80 + "\r", end="")
|
754
|
-
print(
|
755
|
-
f"Job completed and Results stored on Coop (Results uuid={results_uuid})."
|
756
|
-
)
|
757
|
-
return results
|
758
|
-
else:
|
759
|
-
duration = 5
|
760
|
-
time_checked = datetime.now().strftime("%Y-%m-%d %I:%M:%S %p")
|
761
|
-
frames = ["⠋", "⠙", "⠹", "⠸", "⠼", "⠴", "⠦", "⠧", "⠇", "⠏"]
|
762
|
-
start_time = time.time()
|
763
|
-
i = 0
|
764
|
-
while time.time() - start_time < duration:
|
765
|
-
print(
|
766
|
-
f"\r{frames[i % len(frames)]} Job status: {status} - last update: {time_checked}",
|
767
|
-
end="",
|
768
|
-
flush=True,
|
769
|
-
)
|
770
|
-
time.sleep(0.1)
|
771
|
-
i += 1
|
772
|
-
else:
|
773
|
-
if check_api_keys:
|
774
|
-
from edsl import Model
|
853
|
+
results = self.poll_remote_inference_job(remote_job_creation_data)
|
854
|
+
if results is None:
|
855
|
+
self._output("Job failed.")
|
856
|
+
return results
|
775
857
|
|
776
|
-
|
777
|
-
|
778
|
-
raise MissingAPIKeyError(
|
779
|
-
model_name=str(model.model),
|
780
|
-
inference_service=model._inference_service_,
|
781
|
-
)
|
858
|
+
if check_api_keys:
|
859
|
+
self.check_api_keys()
|
782
860
|
|
783
861
|
# handle cache
|
784
862
|
if cache is None or cache is True:
|
@@ -790,51 +868,14 @@ class Jobs(Base):
|
|
790
868
|
|
791
869
|
cache = Cache()
|
792
870
|
|
793
|
-
|
794
|
-
|
795
|
-
|
796
|
-
|
797
|
-
|
798
|
-
|
799
|
-
|
800
|
-
|
801
|
-
raise_validation_errors=raise_validation_errors,
|
802
|
-
)
|
803
|
-
|
804
|
-
results.cache = cache.new_entries_cache()
|
805
|
-
|
806
|
-
self._output(f"There are {len(cache.keys()):,} entries in the local cache.")
|
807
|
-
else:
|
808
|
-
cache_difference = coop.remote_cache_get_diff(cache.keys())
|
809
|
-
|
810
|
-
client_missing_cacheentries = cache_difference.get(
|
811
|
-
"client_missing_cacheentries", []
|
812
|
-
)
|
813
|
-
|
814
|
-
missing_entry_count = len(client_missing_cacheentries)
|
815
|
-
if missing_entry_count > 0:
|
816
|
-
self._output(
|
817
|
-
f"Updating local cache with {missing_entry_count:,} new "
|
818
|
-
f"{'entry' if missing_entry_count == 1 else 'entries'} from remote..."
|
819
|
-
)
|
820
|
-
cache.add_from_dict(
|
821
|
-
{entry.key: entry for entry in client_missing_cacheentries}
|
822
|
-
)
|
823
|
-
self._output("Local cache updated!")
|
824
|
-
else:
|
825
|
-
self._output("No new entries to add to local cache.")
|
826
|
-
|
827
|
-
server_missing_cacheentry_keys = cache_difference.get(
|
828
|
-
"server_missing_cacheentry_keys", []
|
829
|
-
)
|
830
|
-
server_missing_cacheentries = [
|
831
|
-
entry
|
832
|
-
for key in server_missing_cacheentry_keys
|
833
|
-
if (entry := cache.data.get(key)) is not None
|
834
|
-
]
|
835
|
-
old_entry_keys = [key for key in cache.keys()]
|
836
|
-
|
837
|
-
self._output("Running job...")
|
871
|
+
remote_cache = self.use_remote_cache()
|
872
|
+
with RemoteCacheSync(
|
873
|
+
coop=Coop(),
|
874
|
+
cache=cache,
|
875
|
+
output_func=self._output,
|
876
|
+
remote_cache=remote_cache,
|
877
|
+
remote_cache_description=remote_cache_description,
|
878
|
+
) as r:
|
838
879
|
results = self._run_local(
|
839
880
|
n=n,
|
840
881
|
progress_bar=progress_bar,
|
@@ -844,32 +885,8 @@ class Jobs(Base):
|
|
844
885
|
print_exceptions=print_exceptions,
|
845
886
|
raise_validation_errors=raise_validation_errors,
|
846
887
|
)
|
847
|
-
self._output("Job completed!")
|
848
|
-
|
849
|
-
new_cache_entries = list(
|
850
|
-
[entry for entry in cache.values() if entry.key not in old_entry_keys]
|
851
|
-
)
|
852
|
-
server_missing_cacheentries.extend(new_cache_entries)
|
853
|
-
|
854
|
-
new_entry_count = len(server_missing_cacheentries)
|
855
|
-
if new_entry_count > 0:
|
856
|
-
self._output(
|
857
|
-
f"Updating remote cache with {new_entry_count:,} new "
|
858
|
-
f"{'entry' if new_entry_count == 1 else 'entries'}..."
|
859
|
-
)
|
860
|
-
coop.remote_cache_create_many(
|
861
|
-
server_missing_cacheentries,
|
862
|
-
visibility="private",
|
863
|
-
description=remote_cache_description,
|
864
|
-
)
|
865
|
-
self._output("Remote cache updated!")
|
866
|
-
else:
|
867
|
-
self._output("No new entries to add to remote cache.")
|
868
|
-
|
869
|
-
results.cache = cache.new_entries_cache()
|
870
|
-
|
871
|
-
self._output(f"There are {len(cache.keys()):,} entries in the local cache.")
|
872
888
|
|
889
|
+
results.cache = cache.new_entries_cache()
|
873
890
|
return results
|
874
891
|
|
875
892
|
def _run_local(self, *args, **kwargs):
|
@@ -159,13 +159,13 @@ class Interview:
|
|
159
159
|
return self.task_creators.interview_status
|
160
160
|
|
161
161
|
# region: Serialization
|
162
|
-
def _to_dict(self, include_exceptions=
|
162
|
+
def _to_dict(self, include_exceptions=True) -> dict[str, Any]:
|
163
163
|
"""Return a dictionary representation of the Interview instance.
|
164
164
|
This is just for hashing purposes.
|
165
165
|
|
166
166
|
>>> i = Interview.example()
|
167
167
|
>>> hash(i)
|
168
|
-
|
168
|
+
1217840301076717434
|
169
169
|
"""
|
170
170
|
d = {
|
171
171
|
"agent": self.agent._to_dict(),
|
@@ -177,11 +177,29 @@ class Interview:
|
|
177
177
|
}
|
178
178
|
if include_exceptions:
|
179
179
|
d["exceptions"] = self.exceptions.to_dict()
|
180
|
+
return d
|
181
|
+
|
182
|
+
@classmethod
|
183
|
+
def from_dict(cls, d: dict[str, Any]) -> "Interview":
|
184
|
+
"""Return an Interview instance from a dictionary."""
|
185
|
+
agent = Agent.from_dict(d["agent"])
|
186
|
+
survey = Survey.from_dict(d["survey"])
|
187
|
+
scenario = Scenario.from_dict(d["scenario"])
|
188
|
+
model = LanguageModel.from_dict(d["model"])
|
189
|
+
iteration = d["iteration"]
|
190
|
+
return cls(agent=agent, survey=survey, scenario=scenario, model=model, iteration=iteration)
|
180
191
|
|
181
192
|
def __hash__(self) -> int:
|
182
193
|
from edsl.utilities.utilities import dict_hash
|
183
194
|
|
184
|
-
return dict_hash(self._to_dict())
|
195
|
+
return dict_hash(self._to_dict(include_exceptions=False))
|
196
|
+
|
197
|
+
def __eq__(self, other: "Interview") -> bool:
|
198
|
+
"""
|
199
|
+
>>> from edsl.jobs.interviews.Interview import Interview; i = Interview.example(); d = i._to_dict(); i2 = Interview.from_dict(d); i == i2
|
200
|
+
True
|
201
|
+
"""
|
202
|
+
return hash(self) == hash(other)
|
185
203
|
|
186
204
|
# endregion
|
187
205
|
|
@@ -33,6 +33,15 @@ class InterviewExceptionCollection(UserDict):
|
|
33
33
|
"""Return the collection of exceptions as a dictionary."""
|
34
34
|
newdata = {k: [e.to_dict() for e in v] for k, v in self.data.items()}
|
35
35
|
return newdata
|
36
|
+
|
37
|
+
@classmethod
|
38
|
+
def from_dict(cls, data: dict) -> "InterviewExceptionCollection":
|
39
|
+
"""Create an InterviewExceptionCollection from a dictionary."""
|
40
|
+
collection = cls()
|
41
|
+
for question_name, entries in data.items():
|
42
|
+
for entry in entries:
|
43
|
+
collection.add(question_name, InterviewExceptionEntry.from_dict(entry))
|
44
|
+
return collection
|
36
45
|
|
37
46
|
def _repr_html_(self) -> str:
|
38
47
|
from edsl.utilities.utilities import data_to_html
|
@@ -9,7 +9,6 @@ class InterviewExceptionEntry:
|
|
9
9
|
self,
|
10
10
|
*,
|
11
11
|
exception: Exception,
|
12
|
-
# failed_question: FailedQuestion,
|
13
12
|
invigilator: "Invigilator",
|
14
13
|
traceback_format="text",
|
15
14
|
answers=None,
|
@@ -133,22 +132,41 @@ class InterviewExceptionEntry:
|
|
133
132
|
)
|
134
133
|
console.print(tb)
|
135
134
|
return html_output.getvalue()
|
135
|
+
|
136
|
+
@staticmethod
|
137
|
+
def serialize_exception(exception: Exception) -> dict:
|
138
|
+
return {
|
139
|
+
"type": type(exception).__name__,
|
140
|
+
"message": str(exception),
|
141
|
+
"traceback": "".join(traceback.format_exception(type(exception), exception, exception.__traceback__)),
|
142
|
+
}
|
143
|
+
|
144
|
+
@staticmethod
|
145
|
+
def deserialize_exception(data: dict) -> Exception:
|
146
|
+
exception_class = globals()[data["type"]]
|
147
|
+
return exception_class(data["message"])
|
136
148
|
|
137
149
|
def to_dict(self) -> dict:
|
138
150
|
"""Return the exception as a dictionary.
|
139
151
|
|
140
152
|
>>> entry = InterviewExceptionEntry.example()
|
141
|
-
>>> entry.to_dict()
|
142
|
-
ValueError()
|
143
|
-
|
153
|
+
>>> _ = entry.to_dict()
|
144
154
|
"""
|
145
155
|
return {
|
146
|
-
"exception": self.exception,
|
156
|
+
"exception": self.serialize_exception(self.exception),
|
147
157
|
"time": self.time,
|
148
158
|
"traceback": self.traceback,
|
149
|
-
# "failed_question": self.failed_question.to_dict(),
|
150
159
|
"invigilator": self.invigilator.to_dict(),
|
151
160
|
}
|
161
|
+
|
162
|
+
@classmethod
|
163
|
+
def from_dict(cls, data: dict) -> "InterviewExceptionEntry":
|
164
|
+
"""Create an InterviewExceptionEntry from a dictionary."""
|
165
|
+
from edsl.agents.Invigilator import InvigilatorAI
|
166
|
+
|
167
|
+
exception = cls.deserialize_exception(data["exception"])
|
168
|
+
invigilator = InvigilatorAI.from_dict(data["invigilator"])
|
169
|
+
return cls(exception=exception, invigilator=invigilator)
|
152
170
|
|
153
171
|
def push(self):
|
154
172
|
from edsl import Coop
|
@@ -1,18 +1,12 @@
|
|
1
1
|
from __future__ import annotations
|
2
2
|
import time
|
3
|
-
import math
|
4
3
|
import asyncio
|
5
|
-
import functools
|
6
4
|
import threading
|
7
5
|
from typing import Coroutine, List, AsyncGenerator, Optional, Union, Generator
|
8
6
|
from contextlib import contextmanager
|
9
7
|
from collections import UserList
|
10
8
|
|
11
|
-
from rich.live import Live
|
12
|
-
from rich.console import Console
|
13
|
-
|
14
9
|
from edsl.results.Results import Results
|
15
|
-
from edsl import shared_globals
|
16
10
|
from edsl.jobs.interviews.Interview import Interview
|
17
11
|
from edsl.jobs.runners.JobsRunnerStatus import JobsRunnerStatus
|
18
12
|
|
@@ -25,7 +19,6 @@ from edsl.results.Results import Results
|
|
25
19
|
from edsl.language_models.LanguageModel import LanguageModel
|
26
20
|
from edsl.data.Cache import Cache
|
27
21
|
|
28
|
-
|
29
22
|
class StatusTracker(UserList):
|
30
23
|
def __init__(self, total_tasks: int):
|
31
24
|
self.total_tasks = total_tasks
|
@@ -48,8 +41,6 @@ class JobsRunnerAsyncio:
|
|
48
41
|
self.bucket_collection: "BucketCollection" = jobs.bucket_collection
|
49
42
|
self.total_interviews: List["Interview"] = []
|
50
43
|
|
51
|
-
# self.jobs_runner_status = JobsRunnerStatus(self, n=1)
|
52
|
-
|
53
44
|
async def run_async_generator(
|
54
45
|
self,
|
55
46
|
cache: Cache,
|
@@ -173,19 +164,20 @@ class JobsRunnerAsyncio:
|
|
173
164
|
|
174
165
|
prompt_dictionary = {}
|
175
166
|
for answer_key_name in answer_key_names:
|
176
|
-
prompt_dictionary[
|
177
|
-
answer_key_name
|
178
|
-
|
179
|
-
prompt_dictionary[
|
180
|
-
answer_key_name
|
181
|
-
|
167
|
+
prompt_dictionary[answer_key_name + "_user_prompt"] = (
|
168
|
+
question_name_to_prompts[answer_key_name]["user_prompt"]
|
169
|
+
)
|
170
|
+
prompt_dictionary[answer_key_name + "_system_prompt"] = (
|
171
|
+
question_name_to_prompts[answer_key_name]["system_prompt"]
|
172
|
+
)
|
182
173
|
|
183
174
|
raw_model_results_dictionary = {}
|
175
|
+
cache_used_dictionary = {}
|
184
176
|
for result in valid_results:
|
185
177
|
question_name = result.question_name
|
186
|
-
raw_model_results_dictionary[
|
187
|
-
|
188
|
-
|
178
|
+
raw_model_results_dictionary[question_name + "_raw_model_response"] = (
|
179
|
+
result.raw_model_response
|
180
|
+
)
|
189
181
|
raw_model_results_dictionary[question_name + "_cost"] = result.cost
|
190
182
|
one_use_buys = (
|
191
183
|
"NA"
|
@@ -195,6 +187,7 @@ class JobsRunnerAsyncio:
|
|
195
187
|
else 1.0 / result.cost
|
196
188
|
)
|
197
189
|
raw_model_results_dictionary[question_name + "_one_usd_buys"] = one_use_buys
|
190
|
+
cache_used_dictionary[question_name] = result.cache_used
|
198
191
|
|
199
192
|
result = Result(
|
200
193
|
agent=interview.agent,
|
@@ -207,6 +200,7 @@ class JobsRunnerAsyncio:
|
|
207
200
|
survey=interview.survey,
|
208
201
|
generated_tokens=generated_tokens_dict,
|
209
202
|
comments_dict=comments_dict,
|
203
|
+
cache_used_dict=cache_used_dictionary,
|
210
204
|
)
|
211
205
|
result.interview_hash = hash(interview)
|
212
206
|
|
@@ -225,17 +219,16 @@ class JobsRunnerAsyncio:
|
|
225
219
|
}
|
226
220
|
interview_hashes = list(interview_lookup.keys())
|
227
221
|
|
222
|
+
task_history = TaskHistory(self.total_interviews, include_traceback=False)
|
223
|
+
|
228
224
|
results = Results(
|
229
225
|
survey=self.jobs.survey,
|
230
226
|
data=sorted(
|
231
227
|
raw_results, key=lambda x: interview_hashes.index(x.interview_hash)
|
232
228
|
),
|
229
|
+
task_history=task_history,
|
230
|
+
cache=cache,
|
233
231
|
)
|
234
|
-
results.cache = cache
|
235
|
-
results.task_history = TaskHistory(
|
236
|
-
self.total_interviews, include_traceback=False
|
237
|
-
)
|
238
|
-
results.has_unfixed_exceptions = results.task_history.has_unfixed_exceptions
|
239
232
|
results.bucket_collection = self.bucket_collection
|
240
233
|
|
241
234
|
if results.has_unfixed_exceptions and print_exceptions:
|
@@ -263,6 +256,7 @@ class JobsRunnerAsyncio:
|
|
263
256
|
except Exception as e:
|
264
257
|
print(e)
|
265
258
|
remote_logging = False
|
259
|
+
|
266
260
|
if remote_logging:
|
267
261
|
filestore = HTMLFileStore(filepath)
|
268
262
|
coop_details = filestore.push(description="Error report")
|
edsl/jobs/tasks/TaskHistory.py
CHANGED
@@ -8,7 +8,7 @@ from edsl.jobs.tasks.task_status_enum import TaskStatus
|
|
8
8
|
|
9
9
|
|
10
10
|
class TaskHistory:
|
11
|
-
def __init__(self, interviews: List["Interview"], include_traceback=False):
|
11
|
+
def __init__(self, interviews: List["Interview"], include_traceback:bool=False):
|
12
12
|
"""
|
13
13
|
The structure of a TaskHistory exception
|
14
14
|
|
@@ -25,6 +25,8 @@ class TaskHistory:
|
|
25
25
|
|
26
26
|
@classmethod
|
27
27
|
def example(cls):
|
28
|
+
"""
|
29
|
+
"""
|
28
30
|
from edsl.jobs.interviews.Interview import Interview
|
29
31
|
|
30
32
|
from edsl.jobs.Jobs import Jobs
|
@@ -72,13 +74,27 @@ class TaskHistory:
|
|
72
74
|
|
73
75
|
def to_dict(self):
|
74
76
|
"""Return the TaskHistory as a dictionary."""
|
77
|
+
# return {
|
78
|
+
# "exceptions": [
|
79
|
+
# e.to_dict(include_traceback=self.include_traceback)
|
80
|
+
# for e in self.exceptions
|
81
|
+
# ],
|
82
|
+
# "indices": self.indices,
|
83
|
+
# }
|
75
84
|
return {
|
76
|
-
|
77
|
-
|
78
|
-
for e in self.exceptions
|
79
|
-
],
|
80
|
-
"indices": self.indices,
|
85
|
+
'interviews': [i._to_dict() for i in self.total_interviews],
|
86
|
+
'include_traceback': self.include_traceback
|
81
87
|
}
|
88
|
+
|
89
|
+
@classmethod
|
90
|
+
def from_dict(cls, data: dict):
|
91
|
+
"""Create a TaskHistory from a dictionary."""
|
92
|
+
if data is None:
|
93
|
+
return cls([], include_traceback=False)
|
94
|
+
|
95
|
+
from edsl.jobs.interviews.Interview import Interview
|
96
|
+
interviews = [Interview.from_dict(i) for i in data['interviews']]
|
97
|
+
return cls(interviews, include_traceback=data['include_traceback'])
|
82
98
|
|
83
99
|
@property
|
84
100
|
def has_exceptions(self) -> bool:
|
@@ -259,7 +275,6 @@ class TaskHistory:
|
|
259
275
|
question_type = interview.survey.get_question(
|
260
276
|
question_name
|
261
277
|
).question_type
|
262
|
-
# breakpoint()
|
263
278
|
if (question_name, question_type) not in exceptions_by_question_name:
|
264
279
|
exceptions_by_question_name[(question_name, question_type)] = 0
|
265
280
|
exceptions_by_question_name[(question_name, question_type)] += len(
|
edsl/results/Result.py
CHANGED
@@ -75,6 +75,7 @@ class Result(Base, UserDict):
|
|
75
75
|
question_to_attributes: Optional[dict] = None,
|
76
76
|
generated_tokens: Optional[dict] = None,
|
77
77
|
comments_dict: Optional[dict] = None,
|
78
|
+
cache_used_dict: Optional[dict] = None,
|
78
79
|
):
|
79
80
|
"""Initialize a Result object.
|
80
81
|
|
@@ -130,6 +131,7 @@ class Result(Base, UserDict):
|
|
130
131
|
self.question_to_attributes = question_to_attributes
|
131
132
|
self.generated_tokens = generated_tokens
|
132
133
|
self.comments_dict = comments_dict or {}
|
134
|
+
self.cache_used_dict = cache_used_dict or {}
|
133
135
|
|
134
136
|
self._combined_dict = None
|
135
137
|
self._problem_keys = None
|
@@ -153,15 +155,15 @@ class Result(Base, UserDict):
|
|
153
155
|
if key in self.question_to_attributes:
|
154
156
|
# You might be tempted to just use the naked key
|
155
157
|
# but this is a bad idea because it pollutes the namespace
|
156
|
-
question_text_dict[
|
157
|
-
key
|
158
|
-
|
159
|
-
question_options_dict[
|
160
|
-
key
|
161
|
-
|
162
|
-
question_type_dict[
|
163
|
-
key
|
164
|
-
|
158
|
+
question_text_dict[key + "_question_text"] = (
|
159
|
+
self.question_to_attributes[key]["question_text"]
|
160
|
+
)
|
161
|
+
question_options_dict[key + "_question_options"] = (
|
162
|
+
self.question_to_attributes[key]["question_options"]
|
163
|
+
)
|
164
|
+
question_type_dict[key + "_question_type"] = (
|
165
|
+
self.question_to_attributes[key]["question_type"]
|
166
|
+
)
|
165
167
|
|
166
168
|
return {
|
167
169
|
"agent": self.agent.traits
|
edsl/results/Results.py
CHANGED
@@ -29,6 +29,7 @@ from edsl.results.ResultsFetchMixin import ResultsFetchMixin
|
|
29
29
|
from edsl.utilities.decorators import add_edsl_version, remove_edsl_version
|
30
30
|
from edsl.utilities.utilities import dict_hash
|
31
31
|
|
32
|
+
|
32
33
|
from edsl.Base import Base
|
33
34
|
|
34
35
|
|
@@ -89,6 +90,7 @@ class Results(UserList, Mixins, Base):
|
|
89
90
|
cache: Optional["Cache"] = None,
|
90
91
|
job_uuid: Optional[str] = None,
|
91
92
|
total_results: Optional[int] = None,
|
93
|
+
task_history: Optional["TaskHistory"] = None,
|
92
94
|
):
|
93
95
|
"""Instantiate a `Results` object with a survey and a list of `Result` objects.
|
94
96
|
|
@@ -100,6 +102,7 @@ class Results(UserList, Mixins, Base):
|
|
100
102
|
"""
|
101
103
|
super().__init__(data)
|
102
104
|
from edsl.data.Cache import Cache
|
105
|
+
from edsl.jobs.tasks.TaskHistory import TaskHistory
|
103
106
|
|
104
107
|
self.survey = survey
|
105
108
|
self.created_columns = created_columns or []
|
@@ -107,6 +110,8 @@ class Results(UserList, Mixins, Base):
|
|
107
110
|
self._total_results = total_results
|
108
111
|
self.cache = cache or Cache()
|
109
112
|
|
113
|
+
self.task_history = task_history or TaskHistory(interviews = [])
|
114
|
+
|
110
115
|
if hasattr(self, "_add_output_functions"):
|
111
116
|
self._add_output_functions()
|
112
117
|
|
@@ -276,6 +281,7 @@ class Results(UserList, Mixins, Base):
|
|
276
281
|
"survey": self.survey.to_dict(),
|
277
282
|
"created_columns": self.created_columns,
|
278
283
|
"cache": Cache() if not hasattr(self, "cache") else self.cache.to_dict(),
|
284
|
+
"task_history": self.task_history.to_dict(),
|
279
285
|
}
|
280
286
|
|
281
287
|
def compare(self, other_results):
|
@@ -295,6 +301,10 @@ class Results(UserList, Mixins, Base):
|
|
295
301
|
"b_not_a": [other_results[i] for i in indices_other],
|
296
302
|
}
|
297
303
|
|
304
|
+
@property
|
305
|
+
def has_unfixed_exceptions(self):
|
306
|
+
return self.task_history.has_unfixed_exceptions
|
307
|
+
|
298
308
|
@add_edsl_version
|
299
309
|
def to_dict(self) -> dict[str, Any]:
|
300
310
|
"""Convert the Results object to a dictionary.
|
@@ -305,7 +315,7 @@ class Results(UserList, Mixins, Base):
|
|
305
315
|
|
306
316
|
>>> r = Results.example()
|
307
317
|
>>> r.to_dict().keys()
|
308
|
-
dict_keys(['data', 'survey', 'created_columns', 'cache', 'edsl_version', 'edsl_class_name'])
|
318
|
+
dict_keys(['data', 'survey', 'created_columns', 'cache', 'task_history', 'edsl_version', 'edsl_class_name'])
|
309
319
|
"""
|
310
320
|
return self._to_dict()
|
311
321
|
|
@@ -358,6 +368,7 @@ class Results(UserList, Mixins, Base):
|
|
358
368
|
"""
|
359
369
|
from edsl import Survey, Cache
|
360
370
|
from edsl.results.Result import Result
|
371
|
+
from edsl.jobs.tasks.TaskHistory import TaskHistory
|
361
372
|
|
362
373
|
try:
|
363
374
|
results = cls(
|
@@ -367,6 +378,7 @@ class Results(UserList, Mixins, Base):
|
|
367
378
|
cache=(
|
368
379
|
Cache.from_dict(data.get("cache")) if "cache" in data else Cache()
|
369
380
|
),
|
381
|
+
task_history=TaskHistory.from_dict(data.get("task_history")),
|
370
382
|
)
|
371
383
|
except Exception as e:
|
372
384
|
raise ResultsDeserializationError(f"Error in Results.from_dict: {e}")
|
@@ -2,7 +2,7 @@ edsl/Base.py,sha256=wdFpHWlQlGNL4XfOmYA0AK9YupMDxK3G7mDHCQp60o4,9295
|
|
2
2
|
edsl/BaseDiff.py,sha256=RoVEh52UJs22yMa7k7jv8se01G62jJNWnBzaZngo-Ug,8260
|
3
3
|
edsl/TemplateLoader.py,sha256=sDBlSMt7EfOduM7w3h6v03gvh_Rzn9hVrlS-iLSQdZA,849
|
4
4
|
edsl/__init__.py,sha256=UZcx9RHSi3Dslh2lWvCOeppdMW9Xzw_YLs-kFaNW1MU,1770
|
5
|
-
edsl/__version__.py,sha256=
|
5
|
+
edsl/__version__.py,sha256=HBMCjiG9XIFS4hsly_bM8VF16Mlu-TRk3FhZ60DtWQE,28
|
6
6
|
edsl/agents/Agent.py,sha256=dG3SbCm4IpHpObcWm-OejfYHtVXa5NlxGKYKOc-dUxQ,29311
|
7
7
|
edsl/agents/AgentList.py,sha256=qo8VK3Ov0YOSbsBcHmlwLZBH81CcDfy5IEcx9AVH78M,10963
|
8
8
|
edsl/agents/Invigilator.py,sha256=m4T-z4aNCGd4LKjLXVNI2VszYW-pQeScfcFAxkb0pWc,9080
|
@@ -44,13 +44,14 @@ edsl/conversation/Conversation.py,sha256=NdWH62XpcF6hoaG0ScMho_c3TO7PfBnbdlppUN-
|
|
44
44
|
edsl/conversation/car_buying.py,sha256=Quh2Q8O9YoCyTKJUy3li376QFIOcL1gX0y89w3wlSl4,1950
|
45
45
|
edsl/conversation/mug_negotiation.py,sha256=mjvAqErD4AjN3G2za2c-X-3axOShW-zAJUeiJqTxVPA,2616
|
46
46
|
edsl/conversation/next_speaker_utilities.py,sha256=bqr5JglCd6bdLc9IZ5zGOAsmN2F4ERiubSMYvZIG7qk,3629
|
47
|
-
edsl/coop/PriceFetcher.py,sha256=
|
47
|
+
edsl/coop/PriceFetcher.py,sha256=pCCWBqFnSv8iYpgQKhAzVCdan1xTCNesZgmIB34N4HY,1770
|
48
48
|
edsl/coop/__init__.py,sha256=4iZCwJSzJVyjBYk8ggGxY2kZjq9dXVT1jhyPDNyew4I,115
|
49
|
-
edsl/coop/coop.py,sha256=
|
49
|
+
edsl/coop/coop.py,sha256=Q0bcCiHkVC2lrwCoWigHr1SYeW92PydQDXRBY1V1tks,28741
|
50
50
|
edsl/coop/utils.py,sha256=UZwljKYW_Yjw7RYcjOg3SW7fn1pyHQfJ1fM48TBNoss,3601
|
51
51
|
edsl/data/Cache.py,sha256=jDt0LoZjLpGnM8-CraQEcsQaVg--U3BiBR1zHj0nDn8,16536
|
52
52
|
edsl/data/CacheEntry.py,sha256=_5UiFaJQu_U-Z1_lEPt-h6Gaidp2Eunk02wOd3Ni3MQ,7252
|
53
53
|
edsl/data/CacheHandler.py,sha256=DxbfeT2nZGRu8yQkbWr2tyEnhNiClevMsd5KZMCq2f0,4793
|
54
|
+
edsl/data/RemoteCacheSync.py,sha256=V3Eznr1bCtSs0gnjdc_emmHND7l3fiK9samyPAVb6bo,3528
|
54
55
|
edsl/data/SQLiteDict.py,sha256=V5Nfnxctgh4Iblqcw1KmbnkjtfmWrrombROSQ3mvg6A,8979
|
55
56
|
edsl/data/__init__.py,sha256=KBNGGEuGHq--D-TlpAQmvv_If906dJc1Gsy028zOx78,170
|
56
57
|
edsl/data/orm.py,sha256=Jz6rvw5SrlxwysTL0QI9r68EflKxeEBmf6j6himHDS8,238
|
@@ -59,7 +60,7 @@ edsl/enums.py,sha256=Z6nhaP8p3z0UJSfsCGb6VQUtGUKw3AK6yC0UDwOi05c,5247
|
|
59
60
|
edsl/exceptions/__init__.py,sha256=HVg-U-rJ0fRoG9Rws6gnK5S9B68SkPWDPsoD6KpMZ-A,1370
|
60
61
|
edsl/exceptions/agents.py,sha256=3SORFwFbMGrF6-vAL2GrKEVdPcXo7md_k2oYufnVXHA,673
|
61
62
|
edsl/exceptions/configuration.py,sha256=qH2sInNTndKlCLAaNgaXHyRFdKQHL7-dElB_j8wz9g4,351
|
62
|
-
edsl/exceptions/coop.py,sha256=
|
63
|
+
edsl/exceptions/coop.py,sha256=xunPPrnbcNHn60wnH-Qw0rC_Ey99X_N7HnOBF8BQg7E,138
|
63
64
|
edsl/exceptions/data.py,sha256=K24CjgwFiMWxrF1Z2dF6F7Vfrge_y9kMK_wsYYSaroU,209
|
64
65
|
edsl/exceptions/general.py,sha256=zAyJnppPjjxQAn6X3A5fetmv5FUR7kQDU58vwBKvAks,1114
|
65
66
|
edsl/exceptions/jobs.py,sha256=sSUATmzBIN1oINWuwPExxPqIWmfCo0XYj_yR4dJzVjo,803
|
@@ -87,26 +88,26 @@ edsl/inference_services/rate_limits_cache.py,sha256=HYslviz7mxF9U4CUTPAkoyBsiXjS
|
|
87
88
|
edsl/inference_services/registry.py,sha256=Fn6va65MqD9lnFvT603ZnU7Ok8IW64M2MzOH57kf9-A,1240
|
88
89
|
edsl/inference_services/write_available.py,sha256=NNwhATlaMp8IYY635MSx-oYxt5X15acjAfaqYCo_I1Y,285
|
89
90
|
edsl/jobs/Answers.py,sha256=c4LpigQjdnMr7iJu8571C4FggGPVudfT7hbJgmgKW40,1821
|
90
|
-
edsl/jobs/Jobs.py,sha256=
|
91
|
+
edsl/jobs/Jobs.py,sha256=mZKyTxlcOgnB23JN6xVS4DiAJx1eDMJL7vMEF8D2PqU,39869
|
91
92
|
edsl/jobs/__init__.py,sha256=aKuAyd_GoalGj-k7djOoVwEbFUE2XLPlikXaA1_8yAg,32
|
92
93
|
edsl/jobs/buckets/BucketCollection.py,sha256=11CRisE1WAPcAlI3YJK3DVvu0AqSvv8KskXo4Q1waSk,2286
|
93
94
|
edsl/jobs/buckets/ModelBuckets.py,sha256=hxw_tzc0V42CiB7mh5jIxlgwDVJ-zFZhlLtKrHEg8ho,2419
|
94
95
|
edsl/jobs/buckets/TokenBucket.py,sha256=7fG4omzTcj5xC2iJLO9bfBkdAGs6Y3weXzlA3BgPr0E,9090
|
95
|
-
edsl/jobs/interviews/Interview.py,sha256=
|
96
|
-
edsl/jobs/interviews/InterviewExceptionCollection.py,sha256=
|
97
|
-
edsl/jobs/interviews/InterviewExceptionEntry.py,sha256=
|
96
|
+
edsl/jobs/interviews/Interview.py,sha256=nsDxbMF0iOEYpgXcmzKVwAtkvarvsWeSsr1rhUTaIak,25755
|
97
|
+
edsl/jobs/interviews/InterviewExceptionCollection.py,sha256=HRhxuwR_CQXs22yKm1PCpbv3pgh5t0UTBRbdFhODRM0,3670
|
98
|
+
edsl/jobs/interviews/InterviewExceptionEntry.py,sha256=vqtVnT35wUVMwc8YfVhoOgyCKCjpiBdyPHPd-PWpZJY,5589
|
98
99
|
edsl/jobs/interviews/InterviewStatistic.py,sha256=hY5d2EkIJ96NilPpZAvZZzZoxLXM7ss3xx5MIcKtTPs,1856
|
99
100
|
edsl/jobs/interviews/InterviewStatisticsCollection.py,sha256=_ZZ0fnZBQiIywP9Q_wWjpWhlfcPe2cn32GKut10t5RI,788
|
100
101
|
edsl/jobs/interviews/InterviewStatusDictionary.py,sha256=MSyys4hOWe1d8gfsUvAPbcKrs8YiPnz8jpufBSJL7SU,2485
|
101
102
|
edsl/jobs/interviews/InterviewStatusLog.py,sha256=6u0F8gf5tha39VQL-IK_QPkCsQAYVOx_IesX7TDDX_A,3252
|
102
103
|
edsl/jobs/interviews/ReportErrors.py,sha256=RSzDU2rWwtjfztj7sqaMab0quCiY-X2bG3AEOxhTim8,1745
|
103
104
|
edsl/jobs/interviews/interview_status_enum.py,sha256=KJ-1yLAHdX-p8TiFnM0M3v1tnBwkq4aMCuBX6-ytrI8,229
|
104
|
-
edsl/jobs/runners/JobsRunnerAsyncio.py,sha256=
|
105
|
+
edsl/jobs/runners/JobsRunnerAsyncio.py,sha256=6i9X8zDfl0cXWtVAZDzph0Ei-RIUHOsqsq3mtQNQ6D8,12744
|
105
106
|
edsl/jobs/runners/JobsRunnerStatus.py,sha256=4eCh9sRpswGdKeSMW9pCGCAjJZa-OrWUPI7tsxIy_g4,12112
|
106
107
|
edsl/jobs/runners/JobsRunnerStatusData.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
|
107
108
|
edsl/jobs/tasks/QuestionTaskCreator.py,sha256=K-xATHIXMWPTMOnms5UDW30eTIlIfebf7oOEfwrh1ME,10072
|
108
109
|
edsl/jobs/tasks/TaskCreators.py,sha256=XqAbNU33378Z4PQncokbfJwnKt3KHR9aqa5fKYRDpfg,2694
|
109
|
-
edsl/jobs/tasks/TaskHistory.py,sha256=
|
110
|
+
edsl/jobs/tasks/TaskHistory.py,sha256=9HgXsggiuo77TX8HLXMG3NVeupzS8qLSf-lZPGlps0s,14796
|
110
111
|
edsl/jobs/tasks/TaskStatusLog.py,sha256=bqH36a32F12fjX-M-4lNOhHaK2-WLFzKE-r0PxZPRjI,546
|
111
112
|
edsl/jobs/tasks/task_status_enum.py,sha256=DOyrz61YlIS8R1W7izJNphcLrJ7I_ReUlfdRmk23h0Q,5333
|
112
113
|
edsl/jobs/tokens/InterviewTokenUsage.py,sha256=u_6-IHpGFwZ6qMEXr24-jyLVUSSp4dSs_4iAZsBv7O4,1100
|
@@ -202,8 +203,8 @@ edsl/questions/templates/yes_no/question_presentation.jinja,sha256=hoEVj4GQD3EYn
|
|
202
203
|
edsl/results/Dataset.py,sha256=XeCWNcni1rde9iVzmC1WTIne2cip4-f2gQL5iaJfXNw,9202
|
203
204
|
edsl/results/DatasetExportMixin.py,sha256=-YR-UeuIW_8u0a8HnQ9R6V41DxCq22_AlsD48fXv0sw,25890
|
204
205
|
edsl/results/DatasetTree.py,sha256=nwEgnWBqRXUxagSCEgqwikmIo8ztUxaF-QH-m-8myyQ,4985
|
205
|
-
edsl/results/Result.py,sha256=
|
206
|
-
edsl/results/Results.py,sha256=
|
206
|
+
edsl/results/Result.py,sha256=85TlWtcNwCc98N-w3JF0APIkq5LmHfB8cXyW1T5s3f8,15576
|
207
|
+
edsl/results/Results.py,sha256=XdPN_RCpYaQ00SWUdzuxFvReVv8__q-oq87-3Du_szY,41317
|
207
208
|
edsl/results/ResultsDBMixin.py,sha256=Hc08aOiArBf9jbxI5uV4VL4wT6BLOkaaEgTMb3zyTUI,7922
|
208
209
|
edsl/results/ResultsExportMixin.py,sha256=XizBsPNxziyffirMA4kS7UHpYM1WIE4s1K-B7TqTfDw,1266
|
209
210
|
edsl/results/ResultsFetchMixin.py,sha256=VEa0TKDcXbnTinSKs9YaE4WjOSLmlp9Po1_9kklFvSo,848
|
@@ -272,7 +273,7 @@ edsl/utilities/interface.py,sha256=AaKpWiwWBwP2swNXmnFlIf3ZFsjfsR5bjXQAW47tD-8,1
|
|
272
273
|
edsl/utilities/repair_functions.py,sha256=tftmklAqam6LOQQu_-9U44N-llycffhW8LfO63vBmNw,929
|
273
274
|
edsl/utilities/restricted_python.py,sha256=5-_zUhrNbos7pLhDl9nr8d24auRlquR6w-vKkmNjPiA,2060
|
274
275
|
edsl/utilities/utilities.py,sha256=gqMtWWNEZkWLiRR9vHW-VRNy2bStEPlJ-I2aK9CwFiQ,11367
|
275
|
-
edsl-0.1.36.
|
276
|
-
edsl-0.1.36.
|
277
|
-
edsl-0.1.36.
|
278
|
-
edsl-0.1.36.
|
276
|
+
edsl-0.1.36.dev4.dist-info/LICENSE,sha256=_qszBDs8KHShVYcYzdMz3HNMtH-fKN_p5zjoVAVumFc,1111
|
277
|
+
edsl-0.1.36.dev4.dist-info/METADATA,sha256=htWeZ0pTPW3Lzb4Ygw147ShH5d0Oqlo5WrlYePlLobc,4476
|
278
|
+
edsl-0.1.36.dev4.dist-info/WHEEL,sha256=sP946D7jFCHeNz5Iq4fL4Lu-PrWrFsgfLXbbkciIZwg,88
|
279
|
+
edsl-0.1.36.dev4.dist-info/RECORD,,
|
File without changes
|
File without changes
|