edsl 0.1.44__py3-none-any.whl → 0.1.46__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- edsl/Base.py +7 -3
- edsl/__version__.py +1 -1
- edsl/agents/InvigilatorBase.py +3 -1
- edsl/agents/PromptConstructor.py +66 -91
- edsl/agents/QuestionInstructionPromptBuilder.py +160 -79
- edsl/agents/QuestionTemplateReplacementsBuilder.py +80 -17
- edsl/agents/question_option_processor.py +15 -6
- edsl/coop/CoopFunctionsMixin.py +3 -4
- edsl/coop/coop.py +171 -96
- edsl/data/RemoteCacheSync.py +10 -9
- edsl/enums.py +3 -3
- edsl/inference_services/AnthropicService.py +11 -9
- edsl/inference_services/AvailableModelFetcher.py +2 -0
- edsl/inference_services/AwsBedrock.py +1 -2
- edsl/inference_services/AzureAI.py +12 -9
- edsl/inference_services/GoogleService.py +9 -4
- edsl/inference_services/InferenceServicesCollection.py +2 -2
- edsl/inference_services/MistralAIService.py +1 -2
- edsl/inference_services/OpenAIService.py +9 -4
- edsl/inference_services/PerplexityService.py +2 -1
- edsl/inference_services/{GrokService.py → XAIService.py} +2 -2
- edsl/inference_services/registry.py +2 -2
- edsl/jobs/AnswerQuestionFunctionConstructor.py +12 -1
- edsl/jobs/Jobs.py +24 -17
- edsl/jobs/JobsChecks.py +10 -13
- edsl/jobs/JobsPrompts.py +49 -26
- edsl/jobs/JobsRemoteInferenceHandler.py +4 -5
- edsl/jobs/async_interview_runner.py +3 -1
- edsl/jobs/check_survey_scenario_compatibility.py +5 -5
- edsl/jobs/data_structures.py +3 -0
- edsl/jobs/interviews/Interview.py +6 -3
- edsl/jobs/interviews/InterviewExceptionEntry.py +12 -0
- edsl/jobs/tasks/TaskHistory.py +1 -1
- edsl/language_models/LanguageModel.py +6 -3
- edsl/language_models/PriceManager.py +45 -5
- edsl/language_models/model.py +47 -26
- edsl/questions/QuestionBase.py +21 -0
- edsl/questions/QuestionBasePromptsMixin.py +103 -0
- edsl/questions/QuestionFreeText.py +22 -5
- edsl/questions/descriptors.py +4 -0
- edsl/questions/question_base_gen_mixin.py +96 -29
- edsl/results/Dataset.py +65 -0
- edsl/results/DatasetExportMixin.py +320 -32
- edsl/results/Result.py +27 -0
- edsl/results/Results.py +22 -2
- edsl/results/ResultsGGMixin.py +7 -3
- edsl/scenarios/DocumentChunker.py +2 -0
- edsl/scenarios/FileStore.py +10 -0
- edsl/scenarios/PdfExtractor.py +21 -1
- edsl/scenarios/Scenario.py +25 -9
- edsl/scenarios/ScenarioList.py +226 -24
- edsl/scenarios/handlers/__init__.py +1 -0
- edsl/scenarios/handlers/docx.py +5 -1
- edsl/scenarios/handlers/jpeg.py +39 -0
- edsl/surveys/Survey.py +5 -4
- edsl/surveys/SurveyFlowVisualization.py +91 -43
- edsl/templates/error_reporting/exceptions_table.html +7 -8
- edsl/templates/error_reporting/interview_details.html +1 -1
- edsl/templates/error_reporting/interviews.html +0 -1
- edsl/templates/error_reporting/overview.html +2 -7
- edsl/templates/error_reporting/performance_plot.html +1 -1
- edsl/templates/error_reporting/report.css +1 -1
- edsl/utilities/PrettyList.py +14 -0
- edsl-0.1.46.dist-info/METADATA +246 -0
- {edsl-0.1.44.dist-info → edsl-0.1.46.dist-info}/RECORD +67 -66
- edsl-0.1.44.dist-info/METADATA +0 -110
- {edsl-0.1.44.dist-info → edsl-0.1.46.dist-info}/LICENSE +0 -0
- {edsl-0.1.44.dist-info → edsl-0.1.46.dist-info}/WHEEL +0 -0
@@ -4,19 +4,53 @@ from typing import Any, Set, TYPE_CHECKING
|
|
4
4
|
if TYPE_CHECKING:
|
5
5
|
from edsl.agents.PromptConstructor import PromptConstructor
|
6
6
|
from edsl.scenarios.Scenario import Scenario
|
7
|
+
from edsl.questions.QuestionBase import QuestionBase
|
8
|
+
from edsl.agents.Agent import Agent
|
7
9
|
|
8
10
|
|
9
11
|
class QuestionTemplateReplacementsBuilder:
|
10
|
-
|
11
|
-
|
12
|
+
|
13
|
+
@classmethod
|
14
|
+
def from_prompt_constructor(cls, prompt_constructor: "PromptConstructor"):
|
15
|
+
scenario = prompt_constructor.scenario
|
16
|
+
question = prompt_constructor.question
|
17
|
+
prior_answers_dict = prompt_constructor.prior_answers_dict()
|
18
|
+
agent = prompt_constructor.agent
|
19
|
+
|
20
|
+
return cls(scenario, question, prior_answers_dict, agent)
|
21
|
+
|
22
|
+
def __init__(
|
23
|
+
self,
|
24
|
+
scenario: "Scenario",
|
25
|
+
question: "QuestionBase",
|
26
|
+
prior_answers_dict: dict,
|
27
|
+
agent: "Agent",
|
28
|
+
):
|
29
|
+
self.scenario = scenario
|
30
|
+
self.question = question
|
31
|
+
self.prior_answers_dict = prior_answers_dict
|
32
|
+
self.agent = agent
|
12
33
|
|
13
34
|
def question_file_keys(self):
|
14
|
-
|
15
|
-
|
35
|
+
"""
|
36
|
+
>>> from edsl import QuestionMultipleChoice, Scenario
|
37
|
+
>>> q = QuestionMultipleChoice(question_text="Do you like school?", question_name = "q0", question_options = ["yes", "no"])
|
38
|
+
>>> qtrb = QuestionTemplateReplacementsBuilder(scenario = {"file1": "file1"}, question = q, prior_answers_dict = {'q0': 'q0'}, agent = "agent")
|
39
|
+
>>> qtrb.question_file_keys()
|
40
|
+
[]
|
41
|
+
>>> from edsl import FileStore
|
42
|
+
>>> fs = FileStore.example()
|
43
|
+
>>> q = QuestionMultipleChoice(question_text="What do you think of this file: {{ file1 }}", question_name = "q0", question_options = ["good", "bad"])
|
44
|
+
>>> qtrb = QuestionTemplateReplacementsBuilder(scenario = Scenario({"file1": fs}), question = q, prior_answers_dict = {'q0': 'q0'}, agent = "agent")
|
45
|
+
>>> qtrb.question_file_keys()
|
46
|
+
['file1']
|
47
|
+
"""
|
48
|
+
question_text = self.question.question_text
|
49
|
+
file_keys = self._find_file_keys(self.scenario)
|
16
50
|
return self._extract_file_keys_from_question_text(question_text, file_keys)
|
17
51
|
|
18
52
|
def scenario_file_keys(self):
|
19
|
-
return self._find_file_keys(self.
|
53
|
+
return self._find_file_keys(self.scenario)
|
20
54
|
|
21
55
|
def get_jinja2_variables(template_str: str) -> Set[str]:
|
22
56
|
"""
|
@@ -88,17 +122,29 @@ class QuestionTemplateReplacementsBuilder:
|
|
88
122
|
question_file_keys.append(var)
|
89
123
|
return question_file_keys
|
90
124
|
|
91
|
-
def _scenario_replacements(
|
125
|
+
def _scenario_replacements(
|
126
|
+
self, replacement_string: str = "<see file {key}>"
|
127
|
+
) -> dict[str, Any]:
|
128
|
+
"""
|
129
|
+
>>> from edsl import Scenario
|
130
|
+
>>> from edsl import QuestionFreeText;
|
131
|
+
>>> q = QuestionFreeText(question_text = "How are you {{ scenario.friend }}?", question_name = "test")
|
132
|
+
>>> s = Scenario({'friend':'john'})
|
133
|
+
>>> q.by(s).prompts().select('user_prompt')
|
134
|
+
Dataset([{'user_prompt': [Prompt(text=\"""How are you john?\""")]}])
|
135
|
+
"""
|
92
136
|
# File references dictionary
|
93
|
-
file_refs = {
|
137
|
+
file_refs = {
|
138
|
+
key: replacement_string.format(key=key) for key in self.scenario_file_keys()
|
139
|
+
}
|
94
140
|
|
95
141
|
# Scenario items excluding file keys
|
96
142
|
scenario_items = {
|
97
|
-
k: v
|
98
|
-
for k, v in self.prompt_constructor.scenario.items()
|
99
|
-
if k not in self.scenario_file_keys()
|
143
|
+
k: v for k, v in self.scenario.items() if k not in self.scenario_file_keys()
|
100
144
|
}
|
101
|
-
|
145
|
+
scenario_items_with_prefix = {'scenario': scenario_items}
|
146
|
+
|
147
|
+
return {**file_refs, **scenario_items, **scenario_items_with_prefix}
|
102
148
|
|
103
149
|
@staticmethod
|
104
150
|
def _question_data_replacements(
|
@@ -119,14 +165,31 @@ class QuestionTemplateReplacementsBuilder:
|
|
119
165
|
return {**question_settings, **question_data}
|
120
166
|
|
121
167
|
def build_replacement_dict(self, question_data: dict) -> dict[str, Any]:
|
122
|
-
"""Builds a dictionary of replacement values for rendering a prompt by combining multiple data sources.
|
168
|
+
"""Builds a dictionary of replacement values for rendering a prompt by combining multiple data sources.
|
169
|
+
|
170
|
+
|
171
|
+
>>> from edsl import QuestionMultipleChoice, Scenario
|
172
|
+
>>> q = QuestionMultipleChoice(question_text="Do you like school?", question_name = "q0", question_options = ["yes", "no"])
|
173
|
+
>>> qtrb = QuestionTemplateReplacementsBuilder(scenario = {"file1": "file1"}, question = q, prior_answers_dict = {'q0': 'q0'}, agent = "agent")
|
174
|
+
>>> qtrb.question_file_keys()
|
175
|
+
[]
|
176
|
+
>>> from edsl import FileStore
|
177
|
+
>>> fs = FileStore.example()
|
178
|
+
>>> s = Scenario({"file1": fs, "first_name": "John"})
|
179
|
+
>>> q = QuestionMultipleChoice(question_text="What do you think of this file: {{ file1 }}, {{ first_name}}", question_name = "q0", question_options = ["good", "bad"])
|
180
|
+
>>> qtrb = QuestionTemplateReplacementsBuilder(scenario = s, question = q, prior_answers_dict = {'q0': 'q0'}, agent = "agent")
|
181
|
+
>>> qtrb.build_replacement_dict(q.data)
|
182
|
+
{'file1': '<see file file1>', 'first_name': 'John', 'scenario': {'first_name': 'John'}, 'use_code': False, 'include_comment': True, 'question_name': 'q0', 'question_text': 'What do you think of this file: {{ file1 }}, {{ first_name}}', 'question_options': ['good', 'bad'], 'q0': 'q0', 'agent': 'agent'}
|
183
|
+
|
184
|
+
|
185
|
+
"""
|
123
186
|
rpl = {}
|
124
187
|
rpl["scenario"] = self._scenario_replacements()
|
125
|
-
rpl["question"] = self._question_data_replacements(
|
126
|
-
|
127
|
-
|
128
|
-
rpl["
|
129
|
-
rpl["agent"] = {"agent": self.
|
188
|
+
rpl["question"] = self._question_data_replacements(self.question, question_data)
|
189
|
+
# rpl["prior_answers"] = self.prompt_constructor.prior_answers_dict()
|
190
|
+
rpl["prior_answers"] = self.prior_answers_dict
|
191
|
+
# rpl["agent"] = {"agent": self.prompt_constructor.agent}
|
192
|
+
rpl["agent"] = {"agent": self.agent}
|
130
193
|
|
131
194
|
# Combine all dictionaries using dict.update() for clarity
|
132
195
|
replacement_dict = {}
|
@@ -8,8 +8,16 @@ class QuestionOptionProcessor:
|
|
8
8
|
These can be provided directly, as a template string, or fetched from prior answers or the scenario.
|
9
9
|
"""
|
10
10
|
|
11
|
-
|
12
|
-
|
11
|
+
@classmethod
|
12
|
+
def from_prompt_constructor(cls, prompt_constructor):
|
13
|
+
scenario = prompt_constructor.scenario
|
14
|
+
prior_answers_dict = prompt_constructor.prior_answers_dict()
|
15
|
+
|
16
|
+
return cls(scenario, prior_answers_dict)
|
17
|
+
|
18
|
+
def __init__(self, scenario: 'Scenario', prior_answers_dict: dict):
|
19
|
+
self.scenario = scenario
|
20
|
+
self.prior_answers_dict = prior_answers_dict
|
13
21
|
|
14
22
|
@staticmethod
|
15
23
|
def _get_default_options() -> list:
|
@@ -109,7 +117,8 @@ class QuestionOptionProcessor:
|
|
109
117
|
>>> mpc = MockPromptConstructor()
|
110
118
|
>>> from edsl import Scenario
|
111
119
|
>>> mpc.scenario = Scenario({"options": ["Option 1", "Option 2"]})
|
112
|
-
>>>
|
120
|
+
>>> mpc.prior_answers_dict = lambda: {'q0': 'q0'}
|
121
|
+
>>> processor = QuestionOptionProcessor.from_prompt_constructor(mpc)
|
113
122
|
|
114
123
|
The basic case where options are directly provided:
|
115
124
|
|
@@ -130,7 +139,7 @@ class QuestionOptionProcessor:
|
|
130
139
|
>>> q0 = MockQuestion()
|
131
140
|
>>> q0.answer = ["Option 1", "Option 2"]
|
132
141
|
>>> mpc.prior_answers_dict = lambda: {'q0': q0}
|
133
|
-
>>> processor = QuestionOptionProcessor(mpc)
|
142
|
+
>>> processor = QuestionOptionProcessor.from_prompt_constructor(mpc)
|
134
143
|
>>> question_data = {"question_options": "{{ q0 }}"}
|
135
144
|
>>> processor.get_question_options(question_data)
|
136
145
|
['Option 1', 'Option 2']
|
@@ -151,14 +160,14 @@ class QuestionOptionProcessor:
|
|
151
160
|
|
152
161
|
# Try getting options from scenario
|
153
162
|
scenario_options = self._get_options_from_scenario(
|
154
|
-
self.
|
163
|
+
self.scenario, option_key
|
155
164
|
)
|
156
165
|
if scenario_options:
|
157
166
|
return scenario_options
|
158
167
|
|
159
168
|
# Try getting options from prior answers
|
160
169
|
prior_answer_options = self._get_options_from_prior_answers(
|
161
|
-
self.
|
170
|
+
self.prior_answers_dict, option_key
|
162
171
|
)
|
163
172
|
if prior_answer_options:
|
164
173
|
return prior_answer_options
|
edsl/coop/CoopFunctionsMixin.py
CHANGED
@@ -4,10 +4,9 @@ class CoopFunctionsMixin:
|
|
4
4
|
|
5
5
|
s = Scenario({"existing_names": existing_names})
|
6
6
|
q = QuestionList(
|
7
|
-
question_text="""The following
|
8
|
-
Please provide new names
|
9
|
-
They should be short
|
10
|
-
No spaces - use underscores instead.
|
7
|
+
question_text="""The following column names are already in use: {{ existing_names }}
|
8
|
+
Please provide new column names.
|
9
|
+
They should be short (one or two words) and unique valid Python idenifiers (i.e., use underscores instead of spaces).
|
11
10
|
""",
|
12
11
|
question_name="better_names",
|
13
12
|
)
|
edsl/coop/coop.py
CHANGED
@@ -190,7 +190,7 @@ class Coop(CoopFunctionsMixin):
|
|
190
190
|
server_version_str=server_edsl_version,
|
191
191
|
):
|
192
192
|
print(
|
193
|
-
"Please upgrade your EDSL version to access our latest features.
|
193
|
+
"Please upgrade your EDSL version to access our latest features. Open your terminal and run `pip install --upgrade edsl`"
|
194
194
|
)
|
195
195
|
|
196
196
|
if response.status_code >= 400:
|
@@ -212,7 +212,7 @@ class Coop(CoopFunctionsMixin):
|
|
212
212
|
print("Your Expected Parrot API key is invalid.")
|
213
213
|
self._display_login_url(
|
214
214
|
edsl_auth_token=edsl_auth_token,
|
215
|
-
link_description="\n🔗 Use the link below to log in to
|
215
|
+
link_description="\n🔗 Use the link below to log in to your account and automatically update your API key.",
|
216
216
|
)
|
217
217
|
api_key = self._poll_for_api_key(edsl_auth_token)
|
218
218
|
|
@@ -504,90 +504,146 @@ class Coop(CoopFunctionsMixin):
|
|
504
504
|
################
|
505
505
|
# Remote Cache
|
506
506
|
################
|
507
|
-
def remote_cache_create(
|
507
|
+
# def remote_cache_create(
|
508
|
+
# self,
|
509
|
+
# cache_entry: CacheEntry,
|
510
|
+
# visibility: VisibilityType = "private",
|
511
|
+
# description: Optional[str] = None,
|
512
|
+
# ) -> dict:
|
513
|
+
# """
|
514
|
+
# Create a single remote cache entry.
|
515
|
+
# If an entry with the same key already exists in the database, update it instead.
|
516
|
+
|
517
|
+
# :param cache_entry: The cache entry to send to the server.
|
518
|
+
# :param visibility: The visibility of the cache entry.
|
519
|
+
# :param optional description: A description for this entry in the remote cache.
|
520
|
+
|
521
|
+
# >>> entry = CacheEntry.example()
|
522
|
+
# >>> coop.remote_cache_create(cache_entry=entry)
|
523
|
+
# {'status': 'success', 'created_entry_count': 1, 'updated_entry_count': 0}
|
524
|
+
# """
|
525
|
+
# response = self._send_server_request(
|
526
|
+
# uri="api/v0/remote-cache",
|
527
|
+
# method="POST",
|
528
|
+
# payload={
|
529
|
+
# "json_string": json.dumps(cache_entry.to_dict()),
|
530
|
+
# "version": self._edsl_version,
|
531
|
+
# "visibility": visibility,
|
532
|
+
# "description": description,
|
533
|
+
# },
|
534
|
+
# )
|
535
|
+
# self._resolve_server_response(response)
|
536
|
+
# response_json = response.json()
|
537
|
+
# created_entry_count = response_json.get("created_entry_count", 0)
|
538
|
+
# if created_entry_count > 0:
|
539
|
+
# self.remote_cache_create_log(
|
540
|
+
# response,
|
541
|
+
# description="Upload new cache entries to server",
|
542
|
+
# cache_entry_count=created_entry_count,
|
543
|
+
# )
|
544
|
+
# return response.json()
|
545
|
+
|
546
|
+
# def remote_cache_create_many(
|
547
|
+
# self,
|
548
|
+
# cache_entries: list[CacheEntry],
|
549
|
+
# visibility: VisibilityType = "private",
|
550
|
+
# description: Optional[str] = None,
|
551
|
+
# ) -> dict:
|
552
|
+
# """
|
553
|
+
# Create many remote cache entries.
|
554
|
+
# If an entry with the same key already exists in the database, update it instead.
|
555
|
+
|
556
|
+
# :param cache_entries: The list of cache entries to send to the server.
|
557
|
+
# :param visibility: The visibility of the cache entries.
|
558
|
+
# :param optional description: A description for these entries in the remote cache.
|
559
|
+
|
560
|
+
# >>> entries = [CacheEntry.example(randomize=True) for _ in range(10)]
|
561
|
+
# >>> coop.remote_cache_create_many(cache_entries=entries)
|
562
|
+
# {'status': 'success', 'created_entry_count': 10, 'updated_entry_count': 0}
|
563
|
+
# """
|
564
|
+
# payload = [
|
565
|
+
# {
|
566
|
+
# "json_string": json.dumps(c.to_dict()),
|
567
|
+
# "version": self._edsl_version,
|
568
|
+
# "visibility": visibility,
|
569
|
+
# "description": description,
|
570
|
+
# }
|
571
|
+
# for c in cache_entries
|
572
|
+
# ]
|
573
|
+
# response = self._send_server_request(
|
574
|
+
# uri="api/v0/remote-cache/many",
|
575
|
+
# method="POST",
|
576
|
+
# payload=payload,
|
577
|
+
# timeout=40,
|
578
|
+
# )
|
579
|
+
# self._resolve_server_response(response)
|
580
|
+
# response_json = response.json()
|
581
|
+
# created_entry_count = response_json.get("created_entry_count", 0)
|
582
|
+
# if created_entry_count > 0:
|
583
|
+
# self.remote_cache_create_log(
|
584
|
+
# response,
|
585
|
+
# description="Upload new cache entries to server",
|
586
|
+
# cache_entry_count=created_entry_count,
|
587
|
+
# )
|
588
|
+
# return response.json()
|
589
|
+
|
590
|
+
def remote_cache_get(
|
508
591
|
self,
|
509
|
-
|
510
|
-
|
511
|
-
description: Optional[str] = None,
|
512
|
-
) -> dict:
|
592
|
+
job_uuid: Optional[Union[str, UUID]] = None,
|
593
|
+
) -> list[CacheEntry]:
|
513
594
|
"""
|
514
|
-
|
515
|
-
If an entry with the same key already exists in the database, update it instead.
|
595
|
+
Get all remote cache entries.
|
516
596
|
|
517
|
-
:param
|
518
|
-
:param visibility: The visibility of the cache entry.
|
519
|
-
:param optional description: A description for this entry in the remote cache.
|
597
|
+
:param optional select_keys: Only return CacheEntry objects with these keys.
|
520
598
|
|
521
|
-
>>>
|
522
|
-
|
523
|
-
{'status': 'success', 'created_entry_count': 1, 'updated_entry_count': 0}
|
599
|
+
>>> coop.remote_cache_get(job_uuid="...")
|
600
|
+
[CacheEntry(...), CacheEntry(...), ...]
|
524
601
|
"""
|
602
|
+
if job_uuid is None:
|
603
|
+
raise ValueError("Must provide a job_uuid.")
|
525
604
|
response = self._send_server_request(
|
526
|
-
uri="api/v0/remote-cache",
|
605
|
+
uri="api/v0/remote-cache/get-many-by-job",
|
527
606
|
method="POST",
|
528
607
|
payload={
|
529
|
-
"
|
530
|
-
"version": self._edsl_version,
|
531
|
-
"visibility": visibility,
|
532
|
-
"description": description,
|
608
|
+
"job_uuid": str(job_uuid),
|
533
609
|
},
|
610
|
+
timeout=40,
|
534
611
|
)
|
535
612
|
self._resolve_server_response(response)
|
536
|
-
|
537
|
-
|
538
|
-
|
539
|
-
|
540
|
-
response,
|
541
|
-
description="Upload new cache entries to server",
|
542
|
-
cache_entry_count=created_entry_count,
|
543
|
-
)
|
544
|
-
return response.json()
|
613
|
+
return [
|
614
|
+
CacheEntry.from_dict(json.loads(v.get("json_string")))
|
615
|
+
for v in response.json()
|
616
|
+
]
|
545
617
|
|
546
|
-
def
|
618
|
+
def remote_cache_get_by_key(
|
547
619
|
self,
|
548
|
-
|
549
|
-
|
550
|
-
description: Optional[str] = None,
|
551
|
-
) -> dict:
|
620
|
+
select_keys: Optional[list[str]] = None,
|
621
|
+
) -> list[CacheEntry]:
|
552
622
|
"""
|
553
|
-
|
554
|
-
If an entry with the same key already exists in the database, update it instead.
|
623
|
+
Get all remote cache entries.
|
555
624
|
|
556
|
-
:param
|
557
|
-
:param visibility: The visibility of the cache entries.
|
558
|
-
:param optional description: A description for these entries in the remote cache.
|
625
|
+
:param optional select_keys: Only return CacheEntry objects with these keys.
|
559
626
|
|
560
|
-
>>>
|
561
|
-
|
562
|
-
{'status': 'success', 'created_entry_count': 10, 'updated_entry_count': 0}
|
627
|
+
>>> coop.remote_cache_get_by_key(selected_keys=["..."])
|
628
|
+
[CacheEntry(...), CacheEntry(...), ...]
|
563
629
|
"""
|
564
|
-
|
565
|
-
|
566
|
-
"json_string": json.dumps(c.to_dict()),
|
567
|
-
"version": self._edsl_version,
|
568
|
-
"visibility": visibility,
|
569
|
-
"description": description,
|
570
|
-
}
|
571
|
-
for c in cache_entries
|
572
|
-
]
|
630
|
+
if select_keys is None or len(select_keys) == 0:
|
631
|
+
raise ValueError("Must provide a non-empty list of select_keys.")
|
573
632
|
response = self._send_server_request(
|
574
|
-
uri="api/v0/remote-cache/many",
|
633
|
+
uri="api/v0/remote-cache/get-many-by-key",
|
575
634
|
method="POST",
|
576
|
-
payload=
|
635
|
+
payload={
|
636
|
+
"selected_keys": select_keys,
|
637
|
+
},
|
577
638
|
timeout=40,
|
578
639
|
)
|
579
640
|
self._resolve_server_response(response)
|
580
|
-
|
581
|
-
|
582
|
-
|
583
|
-
|
584
|
-
response,
|
585
|
-
description="Upload new cache entries to server",
|
586
|
-
cache_entry_count=created_entry_count,
|
587
|
-
)
|
588
|
-
return response.json()
|
641
|
+
return [
|
642
|
+
CacheEntry.from_dict(json.loads(v.get("json_string")))
|
643
|
+
for v in response.json()
|
644
|
+
]
|
589
645
|
|
590
|
-
def
|
646
|
+
def legacy_remote_cache_get(
|
591
647
|
self,
|
592
648
|
exclude_keys: Optional[list[str]] = None,
|
593
649
|
select_keys: Optional[list[str]] = None,
|
@@ -595,9 +651,10 @@ class Coop(CoopFunctionsMixin):
|
|
595
651
|
"""
|
596
652
|
Get all remote cache entries.
|
597
653
|
|
654
|
+
:param optional select_keys: Only return CacheEntry objects with these keys.
|
598
655
|
:param optional exclude_keys: Exclude CacheEntry objects with these keys.
|
599
656
|
|
600
|
-
>>> coop.
|
657
|
+
>>> coop.legacy_remote_cache_get()
|
601
658
|
[CacheEntry(...), CacheEntry(...), ...]
|
602
659
|
"""
|
603
660
|
if exclude_keys is None:
|
@@ -605,9 +662,9 @@ class Coop(CoopFunctionsMixin):
|
|
605
662
|
if select_keys is None:
|
606
663
|
select_keys = []
|
607
664
|
response = self._send_server_request(
|
608
|
-
uri="api/v0/remote-cache/get-many",
|
665
|
+
uri="api/v0/remote-cache/legacy/get-many",
|
609
666
|
method="POST",
|
610
|
-
payload={"
|
667
|
+
payload={"exclude_keys": exclude_keys, "selected_keys": select_keys},
|
611
668
|
timeout=40,
|
612
669
|
)
|
613
670
|
self._resolve_server_response(response)
|
@@ -616,7 +673,7 @@ class Coop(CoopFunctionsMixin):
|
|
616
673
|
for v in response.json()
|
617
674
|
]
|
618
675
|
|
619
|
-
def
|
676
|
+
def legacy_remote_cache_get_diff(
|
620
677
|
self,
|
621
678
|
client_cacheentry_keys: list[str],
|
622
679
|
) -> dict:
|
@@ -624,7 +681,7 @@ class Coop(CoopFunctionsMixin):
|
|
624
681
|
Get the difference between local and remote cache entries for a user.
|
625
682
|
"""
|
626
683
|
response = self._send_server_request(
|
627
|
-
uri="api/v0/remote-cache/get-diff",
|
684
|
+
uri="api/v0/remote-cache/legacy/get-diff",
|
628
685
|
method="POST",
|
629
686
|
payload={"keys": client_cacheentry_keys},
|
630
687
|
timeout=40,
|
@@ -642,38 +699,38 @@ class Coop(CoopFunctionsMixin):
|
|
642
699
|
}
|
643
700
|
downloaded_entry_count = len(response_dict["client_missing_cacheentries"])
|
644
701
|
if downloaded_entry_count > 0:
|
645
|
-
self.
|
702
|
+
self.legacy_remote_cache_create_log(
|
646
703
|
response,
|
647
704
|
description="Download missing cache entries to client",
|
648
705
|
cache_entry_count=downloaded_entry_count,
|
649
706
|
)
|
650
707
|
return response_dict
|
651
708
|
|
652
|
-
def
|
709
|
+
def legacy_remote_cache_clear(self) -> dict:
|
653
710
|
"""
|
654
711
|
Clear all remote cache entries.
|
655
712
|
|
656
713
|
>>> entries = [CacheEntry.example(randomize=True) for _ in range(10)]
|
657
|
-
>>> coop.
|
658
|
-
>>> coop.
|
714
|
+
>>> coop.legacy_remote_cache_create_many(cache_entries=entries)
|
715
|
+
>>> coop.legacy_remote_cache_clear()
|
659
716
|
{'status': 'success', 'deleted_entry_count': 10}
|
660
717
|
"""
|
661
718
|
response = self._send_server_request(
|
662
|
-
uri="api/v0/remote-cache/delete-all",
|
719
|
+
uri="api/v0/remote-cache/legacy/delete-all",
|
663
720
|
method="DELETE",
|
664
721
|
)
|
665
722
|
self._resolve_server_response(response)
|
666
723
|
response_json = response.json()
|
667
724
|
deleted_entry_count = response_json.get("deleted_entry_count", 0)
|
668
725
|
if deleted_entry_count > 0:
|
669
|
-
self.
|
726
|
+
self.legacy_remote_cache_create_log(
|
670
727
|
response,
|
671
728
|
description="Clear cache entries",
|
672
729
|
cache_entry_count=deleted_entry_count,
|
673
730
|
)
|
674
731
|
return response.json()
|
675
732
|
|
676
|
-
def
|
733
|
+
def legacy_remote_cache_create_log(
|
677
734
|
self, response: requests.Response, description: str, cache_entry_count: int
|
678
735
|
) -> Union[dict, None]:
|
679
736
|
"""
|
@@ -682,7 +739,7 @@ class Coop(CoopFunctionsMixin):
|
|
682
739
|
"""
|
683
740
|
if 200 <= response.status_code < 300:
|
684
741
|
log_response = self._send_server_request(
|
685
|
-
uri="api/v0/remote-cache-log",
|
742
|
+
uri="api/v0/remote-cache-log/legacy",
|
686
743
|
method="POST",
|
687
744
|
payload={
|
688
745
|
"description": description,
|
@@ -692,15 +749,15 @@ class Coop(CoopFunctionsMixin):
|
|
692
749
|
self._resolve_server_response(log_response)
|
693
750
|
return response.json()
|
694
751
|
|
695
|
-
def
|
752
|
+
def legacy_remote_cache_clear_log(self) -> dict:
|
696
753
|
"""
|
697
754
|
Clear all remote cache log entries.
|
698
755
|
|
699
|
-
>>> coop.
|
756
|
+
>>> coop.legacy_remote_cache_clear_log()
|
700
757
|
{'status': 'success'}
|
701
758
|
"""
|
702
759
|
response = self._send_server_request(
|
703
|
-
uri="api/v0/remote-cache-log/delete-all",
|
760
|
+
uri="api/v0/remote-cache-log/legacy/delete-all",
|
704
761
|
method="DELETE",
|
705
762
|
)
|
706
763
|
self._resolve_server_response(response)
|
@@ -714,6 +771,7 @@ class Coop(CoopFunctionsMixin):
|
|
714
771
|
visibility: Optional[VisibilityType] = "unlisted",
|
715
772
|
initial_results_visibility: Optional[VisibilityType] = "unlisted",
|
716
773
|
iterations: Optional[int] = 1,
|
774
|
+
fresh: Optional[bool] = False,
|
717
775
|
) -> RemoteInferenceCreationInfo:
|
718
776
|
"""
|
719
777
|
Send a remote inference job to the server.
|
@@ -742,6 +800,7 @@ class Coop(CoopFunctionsMixin):
|
|
742
800
|
"visibility": visibility,
|
743
801
|
"version": self._edsl_version,
|
744
802
|
"initial_results_visibility": initial_results_visibility,
|
803
|
+
"fresh": fresh,
|
745
804
|
},
|
746
805
|
)
|
747
806
|
self._resolve_server_response(response)
|
@@ -870,7 +929,7 @@ class Coop(CoopFunctionsMixin):
|
|
870
929
|
def create_project(
|
871
930
|
self,
|
872
931
|
survey: Survey,
|
873
|
-
project_name: str,
|
932
|
+
project_name: str = "Project",
|
874
933
|
survey_description: Optional[str] = None,
|
875
934
|
survey_alias: Optional[str] = None,
|
876
935
|
survey_visibility: Optional[VisibilityType] = "unlisted",
|
@@ -895,7 +954,8 @@ class Coop(CoopFunctionsMixin):
|
|
895
954
|
return {
|
896
955
|
"name": response_json.get("project_name"),
|
897
956
|
"uuid": response_json.get("uuid"),
|
898
|
-
"
|
957
|
+
"admin_url": f"{self.url}/home/projects/{response_json.get('uuid')}",
|
958
|
+
"respondent_url": f"{self.url}/respond/{response_json.get('uuid')}",
|
899
959
|
}
|
900
960
|
|
901
961
|
################
|
@@ -1027,15 +1087,30 @@ class Coop(CoopFunctionsMixin):
|
|
1027
1087
|
- We need this function because URL detection with print() does not work alongside animations in VSCode.
|
1028
1088
|
"""
|
1029
1089
|
from rich import print as rich_print
|
1090
|
+
from rich.console import Console
|
1091
|
+
|
1092
|
+
console = Console()
|
1030
1093
|
|
1031
1094
|
url = f"{CONFIG.EXPECTED_PARROT_URL}/login?edsl_auth_token={edsl_auth_token}"
|
1032
1095
|
|
1033
|
-
if
|
1034
|
-
|
1035
|
-
|
1036
|
-
|
1096
|
+
if console.is_terminal:
|
1097
|
+
# Running in a standard terminal, show the full URL
|
1098
|
+
if link_description:
|
1099
|
+
rich_print(
|
1100
|
+
"{link_description}\n[#38bdf8][link={url}]{url}[/link][/#38bdf8]"
|
1101
|
+
)
|
1102
|
+
else:
|
1103
|
+
rich_print(f"[#38bdf8][link={url}]{url}[/link][/#38bdf8]")
|
1037
1104
|
else:
|
1038
|
-
|
1105
|
+
# Running in an interactive environment (e.g., Jupyter Notebook), hide the URL
|
1106
|
+
if link_description:
|
1107
|
+
rich_print(
|
1108
|
+
f"{link_description}\n[#38bdf8][link={url}][underline]Log in and automatically store key[/underline][/link][/#38bdf8]"
|
1109
|
+
)
|
1110
|
+
else:
|
1111
|
+
rich_print(
|
1112
|
+
f"[#38bdf8][link={url}][underline]Log in and automatically store key[/underline][/link][/#38bdf8]"
|
1113
|
+
)
|
1039
1114
|
|
1040
1115
|
def _get_api_key(self, edsl_auth_token: str):
|
1041
1116
|
"""
|
@@ -1190,24 +1265,24 @@ def main():
|
|
1190
1265
|
# C. Remote Cache
|
1191
1266
|
##############
|
1192
1267
|
# clear
|
1193
|
-
coop.
|
1194
|
-
assert coop.
|
1268
|
+
coop.legacy_remote_cache_clear()
|
1269
|
+
assert coop.legacy_remote_cache_get() == []
|
1195
1270
|
# create one remote cache entry
|
1196
1271
|
cache_entry = CacheEntry.example()
|
1197
1272
|
cache_entry.to_dict()
|
1198
|
-
coop.remote_cache_create(cache_entry)
|
1273
|
+
# coop.remote_cache_create(cache_entry)
|
1199
1274
|
# create many remote cache entries
|
1200
1275
|
cache_entries = [CacheEntry.example(randomize=True) for _ in range(10)]
|
1201
|
-
coop.remote_cache_create_many(cache_entries)
|
1276
|
+
# coop.remote_cache_create_many(cache_entries)
|
1202
1277
|
# get all remote cache entries
|
1203
|
-
coop.
|
1204
|
-
coop.
|
1205
|
-
coop.
|
1278
|
+
coop.legacy_remote_cache_get()
|
1279
|
+
coop.legacy_remote_cache_get(exclude_keys=[])
|
1280
|
+
coop.legacy_remote_cache_get(exclude_keys=["a"])
|
1206
1281
|
exclude_keys = [cache_entry.key for cache_entry in cache_entries]
|
1207
|
-
coop.
|
1282
|
+
coop.legacy_remote_cache_get(exclude_keys)
|
1208
1283
|
# clear
|
1209
|
-
coop.
|
1210
|
-
coop.
|
1284
|
+
coop.legacy_remote_cache_clear()
|
1285
|
+
coop.legacy_remote_cache_get()
|
1211
1286
|
|
1212
1287
|
##############
|
1213
1288
|
# D. Remote Inference
|