edsl 0.1.41__py3-none-any.whl → 0.1.43__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (53) hide show
  1. edsl/__version__.py +1 -1
  2. edsl/agents/Invigilator.py +4 -3
  3. edsl/agents/InvigilatorBase.py +2 -1
  4. edsl/agents/PromptConstructor.py +92 -21
  5. edsl/agents/QuestionInstructionPromptBuilder.py +68 -9
  6. edsl/agents/QuestionTemplateReplacementsBuilder.py +7 -2
  7. edsl/agents/prompt_helpers.py +2 -2
  8. edsl/coop/coop.py +97 -19
  9. edsl/enums.py +3 -1
  10. edsl/exceptions/coop.py +4 -0
  11. edsl/exceptions/jobs.py +1 -9
  12. edsl/exceptions/language_models.py +8 -4
  13. edsl/exceptions/questions.py +8 -11
  14. edsl/inference_services/AvailableModelFetcher.py +4 -1
  15. edsl/inference_services/DeepSeekService.py +18 -0
  16. edsl/inference_services/registry.py +2 -0
  17. edsl/jobs/Jobs.py +60 -34
  18. edsl/jobs/JobsPrompts.py +64 -3
  19. edsl/jobs/JobsRemoteInferenceHandler.py +42 -25
  20. edsl/jobs/JobsRemoteInferenceLogger.py +1 -1
  21. edsl/jobs/buckets/BucketCollection.py +30 -0
  22. edsl/jobs/data_structures.py +1 -0
  23. edsl/jobs/interviews/Interview.py +1 -1
  24. edsl/jobs/loggers/HTMLTableJobLogger.py +6 -1
  25. edsl/jobs/results_exceptions_handler.py +2 -7
  26. edsl/jobs/tasks/TaskHistory.py +49 -17
  27. edsl/language_models/LanguageModel.py +7 -4
  28. edsl/language_models/ModelList.py +1 -1
  29. edsl/language_models/key_management/KeyLookupBuilder.py +47 -20
  30. edsl/language_models/key_management/models.py +10 -4
  31. edsl/language_models/model.py +49 -0
  32. edsl/prompts/Prompt.py +124 -61
  33. edsl/questions/descriptors.py +37 -23
  34. edsl/questions/question_base_gen_mixin.py +1 -0
  35. edsl/results/DatasetExportMixin.py +35 -6
  36. edsl/results/Result.py +9 -3
  37. edsl/results/Results.py +180 -2
  38. edsl/results/ResultsGGMixin.py +117 -60
  39. edsl/scenarios/PdfExtractor.py +3 -6
  40. edsl/scenarios/Scenario.py +35 -1
  41. edsl/scenarios/ScenarioList.py +22 -3
  42. edsl/scenarios/ScenarioListPdfMixin.py +9 -3
  43. edsl/surveys/Survey.py +1 -1
  44. edsl/templates/error_reporting/base.html +2 -4
  45. edsl/templates/error_reporting/exceptions_table.html +35 -0
  46. edsl/templates/error_reporting/interview_details.html +67 -53
  47. edsl/templates/error_reporting/interviews.html +4 -17
  48. edsl/templates/error_reporting/overview.html +31 -5
  49. edsl/templates/error_reporting/performance_plot.html +1 -1
  50. {edsl-0.1.41.dist-info → edsl-0.1.43.dist-info}/METADATA +2 -3
  51. {edsl-0.1.41.dist-info → edsl-0.1.43.dist-info}/RECORD +53 -51
  52. {edsl-0.1.41.dist-info → edsl-0.1.43.dist-info}/LICENSE +0 -0
  53. {edsl-0.1.41.dist-info → edsl-0.1.43.dist-info}/WHEEL +0 -0
edsl/__version__.py CHANGED
@@ -1 +1 @@
1
- __version__ = "0.1.41"
1
+ __version__ = "0.1.43"
@@ -1,6 +1,6 @@
1
1
  """Module for creating Invigilators, which are objects to administer a question to an Agent."""
2
2
 
3
- from typing import Dict, Any, Optional, TYPE_CHECKING
3
+ from typing import Dict, Any, Optional, TYPE_CHECKING, Literal
4
4
 
5
5
  from edsl.utilities.decorators import sync_wrapper
6
6
  from edsl.exceptions.questions import QuestionAnswerValidationError
@@ -12,6 +12,7 @@ if TYPE_CHECKING:
12
12
  from edsl.scenarios.Scenario import Scenario
13
13
  from edsl.surveys.Survey import Survey
14
14
 
15
+ PromptType = Literal["user_prompt", "system_prompt", "encoded_image", "files_list"]
15
16
 
16
17
  NA = "Not Applicable"
17
18
 
@@ -19,7 +20,7 @@ NA = "Not Applicable"
19
20
  class InvigilatorAI(InvigilatorBase):
20
21
  """An invigilator that uses an AI model to answer questions."""
21
22
 
22
- def get_prompts(self) -> Dict[str, "Prompt"]:
23
+ def get_prompts(self) -> Dict[PromptType, "Prompt"]:
23
24
  """Return the prompts used."""
24
25
  return self.prompt_constructor.get_prompts()
25
26
 
@@ -155,7 +156,7 @@ class InvigilatorAI(InvigilatorBase):
155
156
  self.question.question_options = new_question_options
156
157
 
157
158
  question_with_validators = self.question.render(
158
- self.scenario | prior_answers_dict
159
+ self.scenario | prior_answers_dict | {'agent':self.agent.traits}
159
160
  )
160
161
  question_with_validators.use_code = self.question.use_code
161
162
  else:
@@ -135,6 +135,7 @@ class InvigilatorBase(ABC):
135
135
  d["additional_prompt_data"] = data["additional_prompt_data"]
136
136
 
137
137
  d = cls(**d)
138
+ return d
138
139
 
139
140
  def __repr__(self) -> str:
140
141
  """Return a string representation of the Invigilator.
@@ -143,7 +144,7 @@ class InvigilatorBase(ABC):
143
144
  'InvigilatorExample(...)'
144
145
 
145
146
  """
146
- return f"{self.__class__.__name__}(agent={repr(self.agent)}, question={repr(self.question)}, scneario={repr(self.scenario)}, model={repr(self.model)}, memory_plan={repr(self.memory_plan)}, current_answers={repr(self.current_answers)}, iteration{repr(self.iteration)}, additional_prompt_data={repr(self.additional_prompt_data)}, cache={repr(self.cache)})"
147
+ return f"{self.__class__.__name__}(agent={repr(self.agent)}, question={repr(self.question)}, scenario={repr(self.scenario)}, model={repr(self.model)}, memory_plan={repr(self.memory_plan)}, current_answers={repr(self.current_answers)}, iteration={repr(self.iteration)}, additional_prompt_data={repr(self.additional_prompt_data)}, cache={repr(self.cache)})"
147
148
 
148
149
  def get_failed_task_result(self, failure_reason: str) -> EDSLResultObjectInput:
149
150
  """Return an AgentResponseDict used in case the question-asking fails.
@@ -1,6 +1,10 @@
1
1
  from __future__ import annotations
2
- from typing import Dict, Any, Optional, Set, Union, TYPE_CHECKING
2
+ from typing import Dict, Any, Optional, Set, Union, TYPE_CHECKING, Literal
3
3
  from functools import cached_property
4
+ from multiprocessing import Pool, freeze_support, get_context
5
+ from concurrent.futures import ThreadPoolExecutor, ProcessPoolExecutor
6
+ import time
7
+ import logging
4
8
 
5
9
  from edsl.prompts.Prompt import Prompt
6
10
 
@@ -22,6 +26,7 @@ if TYPE_CHECKING:
22
26
  from edsl.questions.QuestionBase import QuestionBase
23
27
  from edsl.scenarios.Scenario import Scenario
24
28
 
29
+ logger = logging.getLogger(__name__)
25
30
 
26
31
  class BasePlaceholder:
27
32
  """Base class for placeholder values when a question is not yet answered."""
@@ -242,31 +247,97 @@ class PromptConstructor:
242
247
  question_name, self.current_answers
243
248
  )
244
249
 
245
- def get_prompts(self) -> Dict[str, Prompt]:
246
- """Get both prompts for the LLM call.
247
-
248
- >>> from edsl import QuestionFreeText
249
- >>> from edsl.agents.InvigilatorBase import InvigilatorBase
250
- >>> q = QuestionFreeText(question_text="How are you today?", question_name="q_new")
251
- >>> i = InvigilatorBase.example(question = q)
252
- >>> i.get_prompts()
253
- {'user_prompt': ..., 'system_prompt': ...}
254
- """
255
- prompts = self.prompt_plan.get_prompts(
256
- agent_instructions=self.agent_instructions_prompt,
257
- agent_persona=self.agent_persona_prompt,
258
- question_instructions=Prompt(self.question_instructions_prompt),
259
- prior_question_memory=self.prior_question_memory_prompt,
260
- )
261
- if self.question_file_keys:
250
+ def get_prompts(self, parallel: Literal["thread", "process", None] = None) -> Dict[str, Any]:
251
+ """Get the prompts for the question."""
252
+ start = time.time()
253
+
254
+ # Build all the components
255
+ instr_start = time.time()
256
+ agent_instructions = self.agent_instructions_prompt
257
+ instr_end = time.time()
258
+ logger.debug(f"Time taken for agent instructions: {instr_end - instr_start:.4f}s")
259
+
260
+ persona_start = time.time()
261
+ agent_persona = self.agent_persona_prompt
262
+ persona_end = time.time()
263
+ logger.debug(f"Time taken for agent persona: {persona_end - persona_start:.4f}s")
264
+
265
+ q_instr_start = time.time()
266
+ question_instructions = self.question_instructions_prompt
267
+ q_instr_end = time.time()
268
+ logger.debug(f"Time taken for question instructions: {q_instr_end - q_instr_start:.4f}s")
269
+
270
+ memory_start = time.time()
271
+ prior_question_memory = self.prior_question_memory_prompt
272
+ memory_end = time.time()
273
+ logger.debug(f"Time taken for prior question memory: {memory_end - memory_start:.4f}s")
274
+
275
+ # Get components dict
276
+ components = {
277
+ "agent_instructions": agent_instructions.text,
278
+ "agent_persona": agent_persona.text,
279
+ "question_instructions": question_instructions.text,
280
+ "prior_question_memory": prior_question_memory.text,
281
+ }
282
+
283
+ # Use PromptPlan's get_prompts method
284
+ plan_start = time.time()
285
+
286
+ # Get arranged components first
287
+ arranged = self.prompt_plan.arrange_components(**components)
288
+
289
+ if parallel == "process":
290
+ ctx = get_context('fork')
291
+ with ctx.Pool() as pool:
292
+ results = pool.map(_process_prompt, [
293
+ (arranged["user_prompt"], {}),
294
+ (arranged["system_prompt"], {})
295
+ ])
296
+ prompts = {
297
+ "user_prompt": results[0],
298
+ "system_prompt": results[1]
299
+ }
300
+
301
+ elif parallel == "thread":
302
+ with ThreadPoolExecutor() as executor:
303
+ user_prompt_list = arranged["user_prompt"]
304
+ system_prompt_list = arranged["system_prompt"]
305
+
306
+ # Process both prompt lists in parallel
307
+ rendered_user = executor.submit(_process_prompt, (user_prompt_list, {}))
308
+ rendered_system = executor.submit(_process_prompt, (system_prompt_list, {}))
309
+
310
+ prompts = {
311
+ "user_prompt": rendered_user.result(),
312
+ "system_prompt": rendered_system.result()
313
+ }
314
+
315
+ else: # sequential processing
316
+ prompts = self.prompt_plan.get_prompts(**components)
317
+
318
+ plan_end = time.time()
319
+ logger.debug(f"Time taken for prompt processing: {plan_end - plan_start:.4f}s")
320
+
321
+ # Handle file keys if present
322
+ if hasattr(self, 'question_file_keys') and self.question_file_keys:
323
+ files_start = time.time()
262
324
  files_list = []
263
325
  for key in self.question_file_keys:
264
326
  files_list.append(self.scenario[key])
265
327
  prompts["files_list"] = files_list
328
+ files_end = time.time()
329
+ logger.debug(f"Time taken for file key processing: {files_end - files_start:.4f}s")
330
+
331
+ end = time.time()
332
+ logger.debug(f"Total time in get_prompts: {end - start:.4f}s")
266
333
  return prompts
267
334
 
268
335
 
269
- if __name__ == "__main__":
270
- import doctest
336
+ def _process_prompt(args):
337
+ """Helper function to process a single prompt list with its replacements."""
338
+ prompt_list, replacements = args
339
+ return prompt_list.reduce()
340
+
271
341
 
272
- doctest.testmod(optionflags=doctest.ELLIPSIS)
342
+ if __name__ == '__main__':
343
+ freeze_support()
@@ -1,5 +1,6 @@
1
1
  from typing import Dict, List, Set
2
2
  from warnings import warn
3
+ import logging
3
4
  from edsl.prompts.Prompt import Prompt
4
5
 
5
6
  from edsl.agents.QuestionTemplateReplacementsBuilder import (
@@ -23,12 +24,44 @@ class QuestionInstructionPromptBuilder:
23
24
  Returns:
24
25
  Prompt: The fully rendered question instructions
25
26
  """
27
+ import time
28
+
29
+ start = time.time()
30
+
31
+ # Create base prompt
32
+ base_start = time.time()
26
33
  base_prompt = self._create_base_prompt()
34
+ base_end = time.time()
35
+ logging.debug(f"Time for base prompt: {base_end - base_start}")
36
+
37
+ # Enrich with options
38
+ enrich_start = time.time()
27
39
  enriched_prompt = self._enrich_with_question_options(base_prompt)
40
+ enrich_end = time.time()
41
+ logging.debug(f"Time for enriching with options: {enrich_end - enrich_start}")
42
+
43
+ # Render prompt
44
+ render_start = time.time()
28
45
  rendered_prompt = self._render_prompt(enriched_prompt)
46
+ render_end = time.time()
47
+ logging.debug(f"Time for rendering prompt: {render_end - render_start}")
48
+
49
+ # Validate template variables
50
+ validate_start = time.time()
29
51
  self._validate_template_variables(rendered_prompt)
30
-
31
- return self._append_survey_instructions(rendered_prompt)
52
+ validate_end = time.time()
53
+ logging.debug(f"Time for template validation: {validate_end - validate_start}")
54
+
55
+ # Append survey instructions
56
+ append_start = time.time()
57
+ final_prompt = self._append_survey_instructions(rendered_prompt)
58
+ append_end = time.time()
59
+ logging.debug(f"Time for appending survey instructions: {append_end - append_start}")
60
+
61
+ end = time.time()
62
+ logging.debug(f"Total time in build_question_instructions: {end - start}")
63
+
64
+ return final_prompt
32
65
 
33
66
  def _create_base_prompt(self) -> Dict:
34
67
  """Creates the initial prompt with basic question data.
@@ -50,14 +83,25 @@ class QuestionInstructionPromptBuilder:
50
83
  Returns:
51
84
  Dict: Enriched prompt data
52
85
  """
86
+ import time
87
+
88
+ start = time.time()
89
+
53
90
  if "question_options" in prompt_data["data"]:
54
91
  from edsl.agents.question_option_processor import QuestionOptionProcessor
55
-
92
+
93
+ processor_start = time.time()
56
94
  question_options = QuestionOptionProcessor(
57
95
  self.prompt_constructor
58
96
  ).get_question_options(question_data=prompt_data["data"])
59
-
97
+ processor_end = time.time()
98
+ logging.debug(f"Time to process question options: {processor_end - processor_start}")
99
+
60
100
  prompt_data["data"]["question_options"] = question_options
101
+
102
+ end = time.time()
103
+ logging.debug(f"Total time in _enrich_with_question_options: {end - start}")
104
+
61
105
  return prompt_data
62
106
 
63
107
  def _render_prompt(self, prompt_data: Dict) -> Prompt:
@@ -69,11 +113,28 @@ class QuestionInstructionPromptBuilder:
69
113
  Returns:
70
114
  Prompt: Rendered instructions
71
115
  """
72
-
116
+ import time
117
+
118
+ start = time.time()
119
+
120
+ # Build replacement dict
121
+ dict_start = time.time()
73
122
  replacement_dict = QTRB(self.prompt_constructor).build_replacement_dict(
74
123
  prompt_data["data"]
75
124
  )
76
- return prompt_data["prompt"].render(replacement_dict)
125
+ dict_end = time.time()
126
+ logging.debug(f"Time to build replacement dict: {dict_end - dict_start}")
127
+
128
+ # Render with dict
129
+ render_start = time.time()
130
+ result = prompt_data["prompt"].render(replacement_dict)
131
+ render_end = time.time()
132
+ logging.debug(f"Time to render with dict: {render_end - render_start}")
133
+
134
+ end = time.time()
135
+ logging.debug(f"Total time in _render_prompt: {end - start}")
136
+
137
+ return result
77
138
 
78
139
  def _validate_template_variables(self, rendered_prompt: Prompt) -> None:
79
140
  """Validates that all template variables have been properly replaced.
@@ -101,9 +162,7 @@ class QuestionInstructionPromptBuilder:
101
162
  """
102
163
  for question_name in self.survey.question_names:
103
164
  if question_name in undefined_vars:
104
- print(
105
- f"Question name found in undefined_template_variables: {question_name}"
106
- )
165
+ logging.warning(f"Question name found in undefined_template_variables: {question_name}")
107
166
 
108
167
  def _append_survey_instructions(self, rendered_prompt: Prompt) -> Prompt:
109
168
  """Appends any relevant survey instructions to the rendered prompt.
@@ -1,4 +1,4 @@
1
- from jinja2 import Environment, meta
1
+ from jinja2 import Environment, meta, TemplateSyntaxError
2
2
  from typing import Any, Set, TYPE_CHECKING
3
3
 
4
4
  if TYPE_CHECKING:
@@ -29,7 +29,12 @@ class QuestionTemplateReplacementsBuilder:
29
29
  Set[str]: A set of variable names found in the template
30
30
  """
31
31
  env = Environment()
32
- ast = env.parse(template_str)
32
+ try:
33
+ ast = env.parse(template_str)
34
+ except TemplateSyntaxError:
35
+ print(f"Error parsing template: {template_str}")
36
+ raise
37
+
33
38
  return meta.find_undeclared_variables(ast)
34
39
 
35
40
  @staticmethod
@@ -124,6 +124,6 @@ class PromptPlan:
124
124
  """Get both prompts for the LLM call."""
125
125
  prompts = self.arrange_components(**kwargs)
126
126
  return {
127
- "user_prompt": prompts["user_prompt"].reduce(),
128
- "system_prompt": prompts["system_prompt"].reduce(),
127
+ "user_prompt": Prompt("".join(str(p) for p in prompts["user_prompt"])),
128
+ "system_prompt": Prompt("".join(str(p) for p in prompts["system_prompt"])),
129
129
  }
edsl/coop/coop.py CHANGED
@@ -14,7 +14,11 @@ from edsl.data.CacheEntry import CacheEntry
14
14
  from edsl.jobs.Jobs import Jobs
15
15
  from edsl.surveys.Survey import Survey
16
16
 
17
- from edsl.exceptions.coop import CoopNoUUIDError, CoopServerResponseError
17
+ from edsl.exceptions.coop import (
18
+ CoopInvalidURLError,
19
+ CoopNoUUIDError,
20
+ CoopServerResponseError,
21
+ )
18
22
  from edsl.coop.utils import (
19
23
  EDSLObject,
20
24
  ObjectRegistry,
@@ -285,17 +289,46 @@ class Coop(CoopFunctionsMixin):
285
289
  if value is None:
286
290
  return "null"
287
291
 
288
- def _resolve_uuid(
292
+ def _resolve_uuid_or_alias(
289
293
  self, uuid: Union[str, UUID] = None, url: str = None
290
- ) -> Union[str, UUID]:
294
+ ) -> tuple[Optional[str], Optional[str], Optional[str]]:
291
295
  """
292
- Resolve the uuid from a uuid or a url.
296
+ Resolve the uuid or alias information from a uuid or a url.
297
+ Returns a tuple of (uuid, owner_username, alias)
298
+ - For content/<uuid> URLs: returns (uuid, None, None)
299
+ - For content/<username>/<alias> URLs: returns (None, username, alias)
293
300
  """
294
301
  if not url and not uuid:
295
302
  raise CoopNoUUIDError("No uuid or url provided for the object.")
303
+
296
304
  if not uuid and url:
297
- uuid = url.split("/")[-1]
298
- return uuid
305
+ parts = (
306
+ url.replace("http://", "")
307
+ .replace("https://", "")
308
+ .rstrip("/")
309
+ .split("/")
310
+ )
311
+
312
+ # Remove domain
313
+ parts = parts[1:]
314
+
315
+ if len(parts) < 2 or parts[0] != "content":
316
+ raise CoopInvalidURLError(
317
+ f"Invalid URL format. The URL must end with /content/<uuid> or /content/<username>/<alias>: {url}"
318
+ )
319
+
320
+ if len(parts) == 2:
321
+ obj_uuid = parts[1]
322
+ return obj_uuid, None, None
323
+ elif len(parts) == 3:
324
+ username, alias = parts[1], parts[2]
325
+ return None, username, alias
326
+ else:
327
+ raise CoopInvalidURLError(
328
+ f"Invalid URL format. The URL must end with /content/<uuid> or /content/<username>/<alias>: {url}"
329
+ )
330
+
331
+ return str(uuid), None, None
299
332
 
300
333
  @property
301
334
  def edsl_settings(self) -> dict:
@@ -361,22 +394,31 @@ class Coop(CoopFunctionsMixin):
361
394
  expected_object_type: Optional[ObjectType] = None,
362
395
  ) -> EDSLObject:
363
396
  """
364
- Retrieve an EDSL object by its uuid or its url.
397
+ Retrieve an EDSL object by its uuid/url or by owner username and alias.
365
398
  - If the object's visibility is private, the user must be the owner.
366
399
  - Optionally, check if the retrieved object is of a certain type.
367
400
 
368
401
  :param uuid: the uuid of the object either in str or UUID format.
369
- :param url: the url of the object.
402
+ :param url: the url of the object (can be content/uuid or content/username/alias format).
370
403
  :param expected_object_type: the expected type of the object.
371
404
 
372
405
  :return: the object instance.
373
406
  """
374
- uuid = self._resolve_uuid(uuid, url)
375
- response = self._send_server_request(
376
- uri=f"api/v0/object",
377
- method="GET",
378
- params={"uuid": uuid},
379
- )
407
+ obj_uuid, owner_username, alias = self._resolve_uuid_or_alias(uuid, url)
408
+
409
+ if obj_uuid:
410
+ response = self._send_server_request(
411
+ uri=f"api/v0/object",
412
+ method="GET",
413
+ params={"uuid": obj_uuid},
414
+ )
415
+ else:
416
+ response = self._send_server_request(
417
+ uri=f"api/v0/object/alias",
418
+ method="GET",
419
+ params={"owner_username": owner_username, "alias": alias},
420
+ )
421
+
380
422
  self._resolve_server_response(response)
381
423
  json_string = response.json().get("json_string")
382
424
  object_type = response.json().get("object_type")
@@ -414,12 +456,13 @@ class Coop(CoopFunctionsMixin):
414
456
  """
415
457
  Delete an object from the server.
416
458
  """
417
- uuid = self._resolve_uuid(uuid, url)
459
+ obj_uuid, _, _ = self._resolve_uuid_or_alias(uuid, url)
418
460
  response = self._send_server_request(
419
461
  uri=f"api/v0/object",
420
462
  method="DELETE",
421
- params={"uuid": uuid},
463
+ params={"uuid": obj_uuid},
422
464
  )
465
+
423
466
  self._resolve_server_response(response)
424
467
  return response.json()
425
468
 
@@ -438,11 +481,11 @@ class Coop(CoopFunctionsMixin):
438
481
  """
439
482
  if description is None and visibility is None and value is None:
440
483
  raise Exception("Nothing to patch.")
441
- uuid = self._resolve_uuid(uuid, url)
484
+ obj_uuid, _, _ = self._resolve_uuid_or_alias(uuid, url)
442
485
  response = self._send_server_request(
443
486
  uri=f"api/v0/object",
444
487
  method="PATCH",
445
- params={"uuid": uuid},
488
+ params={"uuid": obj_uuid},
446
489
  payload={
447
490
  "description": description,
448
491
  "alias": alias,
@@ -549,6 +592,7 @@ class Coop(CoopFunctionsMixin):
549
592
  def remote_cache_get(
550
593
  self,
551
594
  exclude_keys: Optional[list[str]] = None,
595
+ select_keys: Optional[list[str]] = None,
552
596
  ) -> list[CacheEntry]:
553
597
  """
554
598
  Get all remote cache entries.
@@ -560,10 +604,12 @@ class Coop(CoopFunctionsMixin):
560
604
  """
561
605
  if exclude_keys is None:
562
606
  exclude_keys = []
607
+ if select_keys is None:
608
+ select_keys = []
563
609
  response = self._send_server_request(
564
610
  uri="api/v0/remote-cache/get-many",
565
611
  method="POST",
566
- payload={"keys": exclude_keys},
612
+ payload={"keys": exclude_keys, "selected_keys": select_keys},
567
613
  timeout=40,
568
614
  )
569
615
  self._resolve_server_response(response)
@@ -894,6 +940,38 @@ class Coop(CoopFunctionsMixin):
894
940
  data = response.json()
895
941
  return ServiceToModelsMapping(data)
896
942
 
943
+ def fetch_working_models(self) -> list[dict]:
944
+ """
945
+ Fetch a list of working models from Coop.
946
+
947
+ Example output:
948
+
949
+ [
950
+ {
951
+ "service": "openai",
952
+ "model": "gpt-4o",
953
+ "works_with_text": True,
954
+ "works_with_images": True,
955
+ "usd_per_1M_input_tokens": 2.5,
956
+ "usd_per_1M_output_tokens": 10.0,
957
+ }
958
+ ]
959
+ """
960
+ response = self._send_server_request(uri="api/v0/working-models", method="GET")
961
+ self._resolve_server_response(response)
962
+ data = response.json()
963
+ return [
964
+ {
965
+ "service": record.get("service"),
966
+ "model": record.get("model"),
967
+ "works_with_text": record.get("works_with_text"),
968
+ "works_with_images": record.get("works_with_images"),
969
+ "usd_per_1M_input_tokens": record.get("input_price_per_1M_tokens"),
970
+ "usd_per_1M_output_tokens": record.get("output_price_per_1M_tokens"),
971
+ }
972
+ for record in data
973
+ ]
974
+
897
975
  def fetch_rate_limit_config_vars(self) -> dict:
898
976
  """
899
977
  Fetch a dict of rate limit config vars from Coop.
edsl/enums.py CHANGED
@@ -66,6 +66,7 @@ class InferenceServiceType(EnumWithChecks):
66
66
  MISTRAL = "mistral"
67
67
  TOGETHER = "together"
68
68
  PERPLEXITY = "perplexity"
69
+ DEEPSEEK = "deepseek"
69
70
 
70
71
 
71
72
  # unavoidable violation of the DRY principle but it is necessary
@@ -84,6 +85,7 @@ InferenceServiceLiteral = Literal[
84
85
  "mistral",
85
86
  "together",
86
87
  "perplexity",
88
+ "deepseek",
87
89
  ]
88
90
 
89
91
  available_models_urls = {
@@ -95,7 +97,6 @@ available_models_urls = {
95
97
 
96
98
 
97
99
  service_to_api_keyname = {
98
- InferenceServiceType.BEDROCK.value: "TBD",
99
100
  InferenceServiceType.DEEP_INFRA.value: "DEEP_INFRA_API_KEY",
100
101
  InferenceServiceType.REPLICATE.value: "TBD",
101
102
  InferenceServiceType.OPENAI.value: "OPENAI_API_KEY",
@@ -107,6 +108,7 @@ service_to_api_keyname = {
107
108
  InferenceServiceType.MISTRAL.value: "MISTRAL_API_KEY",
108
109
  InferenceServiceType.TOGETHER.value: "TOGETHER_API_KEY",
109
110
  InferenceServiceType.PERPLEXITY.value: "PERPLEXITY_API_KEY",
111
+ InferenceServiceType.DEEPSEEK.value: "DEEPSEEK_API_KEY"
110
112
  }
111
113
 
112
114
 
edsl/exceptions/coop.py CHANGED
@@ -2,6 +2,10 @@ class CoopErrors(Exception):
2
2
  pass
3
3
 
4
4
 
5
+ class CoopInvalidURLError(CoopErrors):
6
+ pass
7
+
8
+
5
9
  class CoopNoUUIDError(CoopErrors):
6
10
  pass
7
11
 
edsl/exceptions/jobs.py CHANGED
@@ -10,15 +10,7 @@ class JobsRunError(JobsErrors):
10
10
 
11
11
 
12
12
  class MissingRemoteInferenceError(JobsErrors):
13
- def __init__(self):
14
- message = dedent(
15
- """\\
16
- You are trying to run the job remotely, but you have not set the EXPECTED_PARROT_INFERENCE_URL environment variable.
17
- This remote running service is not quite ready yet!
18
- But please see https://docs.expectedparrot.com/en/latest/coop.html for what we are working on.
19
- """
20
- )
21
- super().__init__(message)
13
+ pass
22
14
 
23
15
 
24
16
  class InterviewError(Exception):
@@ -34,11 +34,15 @@ class LanguageModelNotFound(LanguageModelExceptions):
34
34
  msg = dedent(
35
35
  f"""\
36
36
  Model {model_name} not found.
37
- To create an instance, you can do:
38
- >>> m = Model('gpt-4-1106-preview', temperature=0.5, ...)
37
+ To create an instance of this model, pass the model name to a `Model` object.
38
+ You can optionally pass additional parameters to the model, e.g.:
39
+ >>> m = Model('gpt-4-1106-preview', temperature=0.5)
39
40
 
40
- To get the default model, you can leave out the model name.
41
- To see the available models, you can do:
41
+ To use the default model, simply run your job without specifying a model.
42
+ To check the default model, run the following code:
43
+ >>> Model()
44
+
45
+ To see information about all available models, run the following code:
42
46
  >>> Model.available()
43
47
 
44
48
  See https://docs.expectedparrot.com/en/latest/language_models.html#available-models for more details.
@@ -16,7 +16,8 @@ class QuestionErrors(Exception):
16
16
  class QuestionAnswerValidationError(QuestionErrors):
17
17
  documentation = "https://docs.expectedparrot.com/en/latest/exceptions.html"
18
18
 
19
- explanation = """This when the answer coming from the Language Model does not conform to the expectation for that question type.
19
+ explanation = """
20
+ This can occur when the answer coming from the Language Model does not conform to the expectations for the question type.
20
21
  For example, if the question is a multiple choice question, the answer should be drawn from the list of options provided.
21
22
  """
22
23
 
@@ -52,28 +53,24 @@ class QuestionAnswerValidationError(QuestionErrors):
52
53
 
53
54
  def to_html_dict(self):
54
55
  return {
55
- "error_type": ("Name of the exception", "p", "/p", self.__class__.__name__),
56
- "explaination": ("Explanation", "p", "/p", self.explanation),
57
- "edsl answer": (
58
- "What model returned",
56
+ "Exception type": ("p", "/p", self.__class__.__name__),
57
+ "Explanation": ("p", "/p", self.explanation),
58
+ "EDSL response": (
59
59
  "pre",
60
60
  "/pre",
61
61
  json.dumps(self.data, indent=2),
62
62
  ),
63
- "validating_model": (
64
- "Pydantic model for answers",
63
+ "Validating model": (
65
64
  "pre",
66
65
  "/pre",
67
66
  json.dumps(self.model.model_json_schema(), indent=2),
68
67
  ),
69
- "error_message": (
70
- "Error message Pydantic returned",
68
+ "Error message": (
71
69
  "p",
72
70
  "/p",
73
71
  self.message,
74
72
  ),
75
- "documentation_url": (
76
- "URL to EDSL docs",
73
+ "Documentation": (
77
74
  f"a href='{self.documentation}'",
78
75
  "/a",
79
76
  self.documentation,