edsl 0.1.39.dev2__py3-none-any.whl → 0.1.39.dev4__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (86) hide show
  1. edsl/Base.py +28 -0
  2. edsl/__init__.py +1 -1
  3. edsl/__version__.py +1 -1
  4. edsl/agents/Agent.py +8 -16
  5. edsl/agents/Invigilator.py +13 -14
  6. edsl/agents/InvigilatorBase.py +4 -1
  7. edsl/agents/PromptConstructor.py +42 -22
  8. edsl/agents/QuestionInstructionPromptBuilder.py +1 -1
  9. edsl/auto/AutoStudy.py +18 -5
  10. edsl/auto/StageBase.py +53 -40
  11. edsl/auto/StageQuestions.py +2 -1
  12. edsl/auto/utilities.py +0 -6
  13. edsl/coop/coop.py +21 -5
  14. edsl/data/Cache.py +29 -18
  15. edsl/data/CacheHandler.py +0 -2
  16. edsl/data/RemoteCacheSync.py +154 -46
  17. edsl/data/hack.py +10 -0
  18. edsl/enums.py +7 -0
  19. edsl/inference_services/AnthropicService.py +38 -16
  20. edsl/inference_services/AvailableModelFetcher.py +7 -1
  21. edsl/inference_services/GoogleService.py +5 -1
  22. edsl/inference_services/InferenceServicesCollection.py +18 -2
  23. edsl/inference_services/OpenAIService.py +46 -31
  24. edsl/inference_services/TestService.py +1 -3
  25. edsl/inference_services/TogetherAIService.py +5 -3
  26. edsl/inference_services/data_structures.py +74 -2
  27. edsl/jobs/AnswerQuestionFunctionConstructor.py +148 -113
  28. edsl/jobs/FetchInvigilator.py +10 -3
  29. edsl/jobs/InterviewsConstructor.py +6 -4
  30. edsl/jobs/Jobs.py +299 -233
  31. edsl/jobs/JobsChecks.py +2 -2
  32. edsl/jobs/JobsPrompts.py +1 -1
  33. edsl/jobs/JobsRemoteInferenceHandler.py +160 -136
  34. edsl/jobs/async_interview_runner.py +138 -0
  35. edsl/jobs/check_survey_scenario_compatibility.py +85 -0
  36. edsl/jobs/data_structures.py +120 -0
  37. edsl/jobs/interviews/Interview.py +80 -42
  38. edsl/jobs/results_exceptions_handler.py +98 -0
  39. edsl/jobs/runners/JobsRunnerAsyncio.py +87 -357
  40. edsl/jobs/runners/JobsRunnerStatus.py +131 -164
  41. edsl/jobs/tasks/TaskHistory.py +24 -3
  42. edsl/language_models/LanguageModel.py +59 -4
  43. edsl/language_models/ModelList.py +19 -8
  44. edsl/language_models/__init__.py +1 -1
  45. edsl/language_models/model.py +256 -0
  46. edsl/language_models/repair.py +1 -1
  47. edsl/questions/QuestionBase.py +35 -26
  48. edsl/questions/QuestionBasePromptsMixin.py +1 -1
  49. edsl/questions/QuestionBudget.py +1 -1
  50. edsl/questions/QuestionCheckBox.py +2 -2
  51. edsl/questions/QuestionExtract.py +5 -7
  52. edsl/questions/QuestionFreeText.py +1 -1
  53. edsl/questions/QuestionList.py +9 -15
  54. edsl/questions/QuestionMatrix.py +1 -1
  55. edsl/questions/QuestionMultipleChoice.py +1 -1
  56. edsl/questions/QuestionNumerical.py +1 -1
  57. edsl/questions/QuestionRank.py +1 -1
  58. edsl/questions/SimpleAskMixin.py +1 -1
  59. edsl/questions/__init__.py +1 -1
  60. edsl/questions/data_structures.py +20 -0
  61. edsl/questions/{QuestionBaseGenMixin.py → question_base_gen_mixin.py} +52 -49
  62. edsl/questions/{ResponseValidatorABC.py → response_validator_abc.py} +6 -18
  63. edsl/questions/{ResponseValidatorFactory.py → response_validator_factory.py} +7 -1
  64. edsl/results/DatasetExportMixin.py +60 -119
  65. edsl/results/Result.py +109 -3
  66. edsl/results/Results.py +50 -39
  67. edsl/results/file_exports.py +252 -0
  68. edsl/scenarios/ScenarioList.py +35 -7
  69. edsl/surveys/Survey.py +71 -20
  70. edsl/test_h +1 -0
  71. edsl/utilities/gcp_bucket/example.py +50 -0
  72. {edsl-0.1.39.dev2.dist-info → edsl-0.1.39.dev4.dist-info}/METADATA +2 -2
  73. {edsl-0.1.39.dev2.dist-info → edsl-0.1.39.dev4.dist-info}/RECORD +85 -76
  74. edsl/language_models/registry.py +0 -180
  75. /edsl/agents/{QuestionOptionProcessor.py → question_option_processor.py} +0 -0
  76. /edsl/questions/{AnswerValidatorMixin.py → answer_validator_mixin.py} +0 -0
  77. /edsl/questions/{LoopProcessor.py → loop_processor.py} +0 -0
  78. /edsl/questions/{RegisterQuestionsMeta.py → register_questions_meta.py} +0 -0
  79. /edsl/results/{ResultsFetchMixin.py → results_fetch_mixin.py} +0 -0
  80. /edsl/results/{Selector.py → results_selector.py} +0 -0
  81. /edsl/results/{ResultsToolsMixin.py → results_tools_mixin.py} +0 -0
  82. /edsl/scenarios/{DirectoryScanner.py → directory_scanner.py} +0 -0
  83. /edsl/scenarios/{ScenarioJoin.py → scenario_join.py} +0 -0
  84. /edsl/scenarios/{ScenarioSelector.py → scenario_selector.py} +0 -0
  85. {edsl-0.1.39.dev2.dist-info → edsl-0.1.39.dev4.dist-info}/LICENSE +0 -0
  86. {edsl-0.1.39.dev2.dist-info → edsl-0.1.39.dev4.dist-info}/WHEEL +0 -0
@@ -0,0 +1,256 @@
1
+ import textwrap
2
+ from random import random
3
+ from typing import Optional, TYPE_CHECKING, List
4
+
5
+ from edsl.utilities.PrettyList import PrettyList
6
+ from edsl.config import CONFIG
7
+
8
+ from edsl.inference_services.InferenceServicesCollection import (
9
+ InferenceServicesCollection,
10
+ )
11
+ from edsl.inference_services.data_structures import AvailableModels
12
+ from edsl.inference_services.InferenceServiceABC import InferenceServiceABC
13
+ from edsl.enums import InferenceServiceLiteral
14
+
15
+ if TYPE_CHECKING:
16
+ from edsl.results.Dataset import Dataset
17
+
18
+
19
+ def get_model_class(model_name, registry: Optional[InferenceServicesCollection] = None):
20
+ from edsl.inference_services.registry import default
21
+
22
+ registry = registry or default
23
+ factory = registry.create_model_factory(model_name)
24
+ return factory
25
+
26
+
27
+ class Meta(type):
28
+ def __repr__(cls):
29
+ return textwrap.dedent(
30
+ f"""\
31
+ Available models: {cls.available()}
32
+
33
+ To create an instance, you can do:
34
+ >>> m = Model('gpt-4-1106-preview', temperature=0.5, ...)
35
+
36
+ To get the default model, you can leave out the model name.
37
+ To see the available models, you can do:
38
+ >>> Model.available()
39
+ """
40
+ )
41
+
42
+
43
+ class Model(metaclass=Meta):
44
+ default_model = CONFIG.get("EDSL_DEFAULT_MODEL")
45
+ _registry: InferenceServicesCollection = None # Class-level registry storage
46
+
47
+ @classmethod
48
+ def get_registry(cls) -> InferenceServicesCollection:
49
+ """Get the current registry or initialize with default if None"""
50
+ if cls._registry is None:
51
+ from edsl.inference_services.registry import default
52
+
53
+ cls._registry = default
54
+ return cls._registry
55
+
56
+ @classmethod
57
+ def set_registry(cls, registry: InferenceServicesCollection) -> None:
58
+ """Set a new registry"""
59
+ cls._registry = registry
60
+
61
+ def __new__(
62
+ cls,
63
+ model_name: Optional[str] = None,
64
+ service_name: Optional[InferenceServiceLiteral] = None,
65
+ registry: Optional[InferenceServicesCollection] = None,
66
+ *args,
67
+ **kwargs,
68
+ ):
69
+ "Instantiate a new language model."
70
+ # Map index to the respective subclass
71
+ if model_name is None:
72
+ model_name = (
73
+ cls.default_model
74
+ ) # when model_name is None, use the default model, set in the config file
75
+
76
+ if registry is not None:
77
+ cls.set_registry(registry)
78
+
79
+ if isinstance(model_name, int): # can refer to a model by index
80
+ model_name = cls.available(name_only=True)[model_name]
81
+
82
+ factory = cls.get_registry().create_model_factory(
83
+ model_name, service_name=service_name
84
+ )
85
+ return factory(*args, **kwargs)
86
+
87
+ @classmethod
88
+ def add_model(cls, service_name, model_name) -> None:
89
+ cls.get_registry().add_model(service_name, model_name)
90
+
91
+ @classmethod
92
+ def service_classes(cls) -> List["InferenceServiceABC"]:
93
+ """Returns a list of service classes.
94
+
95
+ >>> Model.service_classes()
96
+ [...]
97
+ """
98
+ return [r for r in cls.services(name_only=True)]
99
+
100
+ @classmethod
101
+ def services(cls, name_only: bool = False) -> List[str]:
102
+ """Returns a list of services, annotated with whether the user has local keys for them."""
103
+ services_with_local_keys = set(cls.key_info().select("service").to_list())
104
+ f = lambda service_name: (
105
+ "yes" if service_name in services_with_local_keys else " "
106
+ )
107
+ if name_only:
108
+ return PrettyList(
109
+ [r._inference_service_ for r in cls.get_registry().services],
110
+ columns=["Service Name"],
111
+ )
112
+ else:
113
+ return PrettyList(
114
+ [
115
+ (r._inference_service_, f(r._inference_service_))
116
+ for r in cls.get_registry().services
117
+ ],
118
+ columns=["Service Name", "Local key?"],
119
+ )
120
+
121
+ @classmethod
122
+ def services_with_local_keys(cls) -> set:
123
+ """Returns a list of services for which the user has local keys."""
124
+ return set(cls.key_info().select("service").to_list())
125
+
126
+ @classmethod
127
+ def key_info(cls, obscure_api_key: bool = True) -> "Dataset":
128
+ """Returns a dataset of local key information."""
129
+ from edsl.language_models.key_management.KeyLookupCollection import (
130
+ KeyLookupCollection,
131
+ )
132
+ from edsl.scenarios import Scenario, ScenarioList
133
+
134
+ klc = KeyLookupCollection()
135
+ klc.add_key_lookup(fetch_order=None)
136
+ sl = ScenarioList()
137
+ for service, entry in list(klc.data.values())[0].items():
138
+ sl.append(Scenario({"service": service} | entry.to_dict()))
139
+ if obscure_api_key:
140
+ for service in sl:
141
+ service["api_token"] = (
142
+ service["api_token"][:4] + "..." + service["api_token"][-4:]
143
+ )
144
+ return sl.to_dataset()
145
+
146
+ @classmethod
147
+ def search_models(cls, search_term: str):
148
+ return cls.available(search_term=search_term)
149
+
150
+ @classmethod
151
+ def all_known_models(cls) -> "AvailableModels":
152
+ return cls.get_registry().available()
153
+
154
+ @classmethod
155
+ def available_with_local_keys(cls):
156
+ services_with_local_keys = set(cls.key_info().select("service").to_list())
157
+ return [
158
+ m
159
+ for m in cls.get_registry().available()
160
+ if m.service_name in services_with_local_keys
161
+ ]
162
+
163
+ @classmethod
164
+ def available(
165
+ cls,
166
+ search_term: str = None,
167
+ name_only: bool = False,
168
+ service: Optional[str] = None,
169
+ ):
170
+ # if search_term is None and service is None:
171
+ # print("Getting available models...")
172
+ # print("You have local keys for the following services:")
173
+ # print(cls.services_with_local_keys())
174
+ # print("\n")
175
+ # print("To see models by service, use the 'service' parameter.")
176
+ # print("E.g., Model.available(service='openai')")
177
+ # return None
178
+
179
+ if service is not None:
180
+ if service not in cls.services(name_only=True):
181
+ raise ValueError(
182
+ f"Service {service} not found in available services.",
183
+ f"Available services are: {cls.services()}",
184
+ )
185
+
186
+ full_list = cls.get_registry().available(service=service)
187
+
188
+ if search_term is None:
189
+ if name_only:
190
+ return PrettyList(
191
+ [m.model_name for m in full_list],
192
+ columns=["Model Name"],
193
+ )
194
+ else:
195
+ return PrettyList(
196
+ [[m.model_name, m.service_name] for m in full_list],
197
+ columns=["Model Name", "Service Name"],
198
+ )
199
+ else:
200
+ filtered_results = [
201
+ m
202
+ for m in full_list
203
+ if search_term in m.model_name or search_term in m.service_name
204
+ ]
205
+ if name_only:
206
+ return PrettyList(
207
+ [m.model_name for m in filtered_results],
208
+ columns=["Model Name"],
209
+ )
210
+ else:
211
+ return PrettyList(
212
+ [[m.model_name, m.service_name] for m in full_list],
213
+ columns=["Model Name", "Service Name"],
214
+ )
215
+
216
+ @classmethod
217
+ def check_models(cls, verbose=False):
218
+ print("Checking all available models...\n")
219
+ for model in cls.available(name_only=True):
220
+ print(f"Now checking: {model}")
221
+ try:
222
+ m = cls(model)
223
+ except Exception as e:
224
+ print(f"Error creating instance of {model}: {e}")
225
+ continue
226
+ try:
227
+ results = m.hello(verbose)
228
+ if verbose:
229
+ print(f"Results from model call: {results}")
230
+ except Exception as e:
231
+ print(f"Error calling 'hello' on {model}: {e}")
232
+ continue
233
+ print("OK!")
234
+ print("\n")
235
+
236
+ @classmethod
237
+ def example(cls, randomize: bool = False) -> "Model":
238
+ """
239
+ Returns an example Model instance.
240
+
241
+ :param randomize: If True, the temperature is set to a random decimal between 0 and 1.
242
+ """
243
+ temperature = 0.5 if not randomize else round(random(), 2)
244
+ model_name = cls.default_model
245
+ return cls(model_name, temperature=temperature)
246
+
247
+
248
+ if __name__ == "__main__":
249
+ import doctest
250
+
251
+ doctest.testmod(optionflags=doctest.ELLIPSIS)
252
+
253
+ available = Model.available()
254
+ m = Model("gpt-4-1106-preview")
255
+ results = m.execute_model_call("Hello world")
256
+ print(results)
@@ -32,7 +32,7 @@ async def async_repair(
32
32
  else:
33
33
  return valid_dict, success
34
34
 
35
- from edsl.language_models.registry import Model
35
+ from edsl.language_models.model import Model
36
36
 
37
37
  m = Model()
38
38
 
@@ -2,23 +2,31 @@
2
2
 
3
3
  from __future__ import annotations
4
4
  from abc import ABC, abstractmethod
5
- from typing import Any, Type, Optional, List, Callable, Union, TypedDict
5
+ from typing import Any, Type, Optional, List, Callable, Union, TypedDict, TYPE_CHECKING
6
6
 
7
7
  from edsl.exceptions.questions import (
8
8
  QuestionSerializationError,
9
9
  )
10
10
  from edsl.questions.descriptors import QuestionNameDescriptor, QuestionTextDescriptor
11
11
 
12
- from edsl.questions.AnswerValidatorMixin import AnswerValidatorMixin
13
- from edsl.questions.RegisterQuestionsMeta import RegisterQuestionsMeta
12
+ from edsl.questions.answer_validator_mixin import AnswerValidatorMixin
13
+ from edsl.questions.register_questions_meta import RegisterQuestionsMeta
14
14
  from edsl.Base import PersistenceMixin, RepresentationMixin
15
15
  from edsl.BaseDiff import BaseDiff, BaseDiffCollection
16
16
 
17
17
  from edsl.questions.SimpleAskMixin import SimpleAskMixin
18
18
  from edsl.questions.QuestionBasePromptsMixin import QuestionBasePromptsMixin
19
- from edsl.questions.QuestionBaseGenMixin import QuestionBaseGenMixin
19
+ from edsl.questions.question_base_gen_mixin import QuestionBaseGenMixin
20
20
  from edsl.utilities.remove_edsl_version import remove_edsl_version
21
21
 
22
+ if TYPE_CHECKING:
23
+ from edsl.questions.response_validator_abc import ResponseValidatorABC
24
+ from edsl.language_models.LanguageModel import LanguageModel
25
+ from edsl.results.Results import Results
26
+ from edsl.agents.Agent import Agent
27
+ from edsl.surveys.Survey import Survey
28
+ from edsl.jobs.Jobs import Jobs
29
+
22
30
 
23
31
  class QuestionBase(
24
32
  PersistenceMixin,
@@ -49,13 +57,17 @@ class QuestionBase(
49
57
  _question_presentation = None
50
58
 
51
59
  @property
52
- def response_validator(self) -> "ResponseValidatorBase":
60
+ def response_validator(self) -> "ResponseValidatorABC":
53
61
  """Return the response validator."""
54
- from edsl.questions.ResponseValidatorFactory import ResponseValidatorFactory
62
+ from edsl.questions.response_validator_factory import ResponseValidatorFactory
55
63
 
56
64
  rvf = ResponseValidatorFactory(self)
57
65
  return rvf.response_validator
58
66
 
67
+ def duplicate(self):
68
+ """Return a duplicate of the question."""
69
+ return self.from_dict(self.to_dict())
70
+
59
71
  @property
60
72
  def fake_data_factory(self):
61
73
  """Return the fake data factory."""
@@ -159,7 +171,7 @@ class QuestionBase(
159
171
 
160
172
  return candidate_data
161
173
 
162
- def to_dict(self, add_edsl_version=True):
174
+ def to_dict(self, add_edsl_version: bool = True):
163
175
  """Convert the question to a dictionary that includes the question type (used in deserialization).
164
176
 
165
177
  >>> from edsl.questions import QuestionFreeText as Q; Q.example().to_dict(add_edsl_version = False)
@@ -213,9 +225,6 @@ class QuestionBase(
213
225
 
214
226
  return question_class(**local_data)
215
227
 
216
- # endregion
217
-
218
- # region: Running methods
219
228
  @classmethod
220
229
  def _get_test_model(self, canned_response: Optional[str] = None) -> "LanguageModel":
221
230
  """Get a test model for the question."""
@@ -243,7 +252,7 @@ class QuestionBase(
243
252
  Dataset([{'answer.how_are_you': ["Yo, what's up?"]}])
244
253
  """
245
254
  if model is None:
246
- from edsl.language_models.registry import Model
255
+ from edsl.language_models.model import Model
247
256
 
248
257
  model = Model()
249
258
  results = (
@@ -262,21 +271,22 @@ class QuestionBase(
262
271
 
263
272
  def __call__(
264
273
  self,
265
- just_answer=True,
266
- model=None,
267
- agent=None,
274
+ just_answer: bool = True,
275
+ model: Optional["LanguageModel"] = None,
276
+ agent: Optional["Agent"] = None,
268
277
  disable_remote_cache: bool = False,
269
278
  disable_remote_inference: bool = False,
270
279
  verbose: bool = False,
271
280
  **kwargs,
272
- ):
281
+ ) -> Union[Any, "Results"]:
273
282
  """Call the question.
274
283
 
275
284
 
276
285
  >>> from edsl import QuestionFreeText as Q
277
- >>> m = Q._get_test_model(canned_response = "Yo, what's up?")
286
+ >>> from edsl import Model
287
+ >>> m = Model("test", canned_response = "Yo, what's up?")
278
288
  >>> q = Q(question_name = "color", question_text = "What is your favorite color?")
279
- >>> q(model = m, disable_remote_cache = True, disable_remote_inference = True)
289
+ >>> q(model = m, disable_remote_cache = True, disable_remote_inference = True, cache = False)
280
290
  "Yo, what's up?"
281
291
 
282
292
  """
@@ -285,7 +295,6 @@ class QuestionBase(
285
295
  model=model,
286
296
  agent=agent,
287
297
  **kwargs,
288
- cache=False,
289
298
  verbose=verbose,
290
299
  disable_remote_cache=disable_remote_cache,
291
300
  disable_remote_inference=disable_remote_inference,
@@ -297,15 +306,16 @@ class QuestionBase(
297
306
 
298
307
  def run(self, *args, **kwargs) -> "Results":
299
308
  """Turn a single question into a survey and runs it."""
300
- from edsl.surveys.Survey import Survey
309
+ return self.to_survey().run(*args, **kwargs)
301
310
 
302
- s = self.to_survey()
303
- return s.run(*args, **kwargs)
311
+ def using(self, *args, **kwargs) -> "Jobs":
312
+ """Turn a single question into a survey and then a Job."""
313
+ return self.to_survey().to_jobs().using(*args, **kwargs)
304
314
 
305
315
  async def run_async(
306
316
  self,
307
317
  just_answer: bool = True,
308
- model: Optional["Model"] = None,
318
+ model: Optional["LanguageModel"] = None,
309
319
  agent: Optional["Agent"] = None,
310
320
  disable_remote_inference: bool = False,
311
321
  **kwargs,
@@ -316,7 +326,7 @@ class QuestionBase(
316
326
  >>> from edsl.questions import QuestionFreeText as Q
317
327
  >>> m = Q._get_test_model(canned_response = "Blue")
318
328
  >>> q = Q(question_name = "color", question_text = "What is your favorite color?")
319
- >>> async def test_run_async(): result = await q.run_async(model=m, disable_remote_inference = True); print(result)
329
+ >>> async def test_run_async(): result = await q.run_async(model=m, disable_remote_inference = True, disable_remote_cache = True); print(result)
320
330
  >>> asyncio.run(test_run_async())
321
331
  Blue
322
332
  """
@@ -420,8 +430,7 @@ class QuestionBase(
420
430
  """
421
431
  from edsl.surveys.Survey import Survey
422
432
 
423
- s = Survey([self])
424
- return s
433
+ return Survey([self])
425
434
 
426
435
  def by(self, *args) -> "Jobs":
427
436
  """Turn a single question into a survey and then a Job."""
@@ -464,7 +473,7 @@ class QuestionBase(
464
473
 
465
474
  @classmethod
466
475
  def example_model(cls):
467
- from edsl.language_models.registry import Model
476
+ from edsl.language_models.model import Model
468
477
 
469
478
  q = cls.example()
470
479
  m = Model("test", canned_response=cls._simulate_answer(q)["answer"])
@@ -69,7 +69,7 @@ class QuestionBasePromptsMixin:
69
69
  >>> q.get_instructions(model = "gpt3")
70
70
  Prompt(text=\"""{{question_text}}. Answer in valid JSON like so {'answer': 'comment: <>}\""")
71
71
  """
72
- from edsl.language_models.registry import Model
72
+ from edsl.language_models.model import Model
73
73
 
74
74
  if not hasattr(self, "_model_instructions"):
75
75
  self._model_instructions = {}
@@ -5,7 +5,7 @@ from pydantic import Field, BaseModel, validator
5
5
 
6
6
  from edsl.questions.QuestionBase import QuestionBase
7
7
  from edsl.questions.descriptors import IntegerDescriptor, QuestionOptionsDescriptor
8
- from edsl.questions.ResponseValidatorABC import ResponseValidatorABC
8
+ from edsl.questions.response_validator_abc import ResponseValidatorABC
9
9
 
10
10
 
11
11
  class BudgewResponseValidator(ResponseValidatorABC):
@@ -13,8 +13,8 @@ from edsl.questions.descriptors import (
13
13
  from edsl.questions.decorators import inject_exception
14
14
 
15
15
  from pydantic import field_validator
16
- from edsl.questions.ResponseValidatorABC import ResponseValidatorABC
17
- from edsl.questions.ResponseValidatorABC import BaseResponse
16
+ from edsl.questions.response_validator_abc import ResponseValidatorABC
17
+ from edsl.questions.data_structures import BaseResponse
18
18
 
19
19
  from edsl.exceptions.questions import QuestionAnswerValidationError
20
20
 
@@ -6,9 +6,8 @@ from typing import Any, Optional, Dict
6
6
  from edsl.questions.QuestionBase import QuestionBase
7
7
  from edsl.questions.descriptors import AnswerTemplateDescriptor
8
8
 
9
- from edsl.questions.ResponseValidatorABC import ResponseValidatorABC
10
- from edsl.questions.ResponseValidatorABC import BaseResponse
11
- from edsl.exceptions.questions import QuestionAnswerValidationError
9
+ from edsl.questions.response_validator_abc import ResponseValidatorABC
10
+ from edsl.questions.data_structures import BaseResponse
12
11
  from edsl.questions.decorators import inject_exception
13
12
 
14
13
  from typing import Dict, Any
@@ -57,7 +56,7 @@ def dict_to_pydantic_model(input_dict: Dict[str, Any]) -> Any:
57
56
  DynamicModel = create_model("DynamicModel", **field_definitions)
58
57
 
59
58
  class AnswerModel(BaseResponse):
60
- answer: DynamicModel
59
+ answer: "DynamicModel"
61
60
  generated_tokens: Optional[str] = None
62
61
  comment: Optional[str] = None
63
62
 
@@ -113,6 +112,8 @@ class QuestionExtract(QuestionBase):
113
112
  :param question_name: The name of the question.
114
113
  :param question_text: The text of the question.
115
114
  :param answer_template: The template for the answer.
115
+ :param answering_instructions: Instructions for answering the question.
116
+ :param question_presentation: The presentation of the question.
116
117
  """
117
118
  self.question_name = question_name
118
119
  self.question_text = question_text
@@ -142,9 +143,6 @@ class QuestionExtract(QuestionBase):
142
143
  )
143
144
  return question_html_content
144
145
 
145
- ################
146
- # Helpful methods
147
- ################
148
146
  @classmethod
149
147
  @inject_exception
150
148
  def example(cls) -> QuestionExtract:
@@ -5,7 +5,7 @@ from uuid import uuid4
5
5
  from pydantic import field_validator
6
6
 
7
7
  from edsl.questions.QuestionBase import QuestionBase
8
- from edsl.questions.ResponseValidatorABC import ResponseValidatorABC
8
+ from edsl.questions.response_validator_abc import ResponseValidatorABC
9
9
 
10
10
  from edsl.exceptions.questions import QuestionAnswerValidationError
11
11
  from edsl.questions.decorators import inject_exception
@@ -1,21 +1,18 @@
1
1
  from __future__ import annotations
2
2
  import json
3
-
4
3
  from typing import Any, Optional, Union
5
- from edsl.questions.QuestionBase import QuestionBase
6
- from edsl.questions.descriptors import IntegerOrNoneDescriptor
7
- from edsl.questions.decorators import inject_exception
8
4
 
9
- from pydantic import field_validator, Field
10
- from edsl.questions.ResponseValidatorABC import ResponseValidatorABC
11
- from edsl.questions.ResponseValidatorABC import BaseResponse
5
+ from pydantic import Field
6
+ from json_repair import repair_json
12
7
 
13
8
  from edsl.exceptions.questions import QuestionAnswerValidationError
14
-
15
- from json_repair import repair_json
9
+ from edsl.questions.QuestionBase import QuestionBase
10
+ from edsl.questions.descriptors import IntegerOrNoneDescriptor
11
+ from edsl.questions.decorators import inject_exception
12
+ from edsl.questions.response_validator_abc import ResponseValidatorABC
16
13
 
17
14
 
18
- def convert_string(s):
15
+ def convert_string(s: str) -> Union[float, int, str, dict]:
19
16
  """Convert a string to a more appropriate type if possible.
20
17
 
21
18
  >>> convert_string("3.14")
@@ -58,7 +55,7 @@ def convert_string(s):
58
55
  return s
59
56
 
60
57
 
61
- def create_model(max_list_items: int, permissive):
58
+ def create_model(max_list_items: int, permissive: bool) -> "ListResponse":
62
59
  from pydantic import BaseModel
63
60
 
64
61
  if permissive or max_list_items is None:
@@ -133,8 +130,8 @@ class QuestionList(QuestionBase):
133
130
  self,
134
131
  question_name: str,
135
132
  question_text: str,
136
- max_list_items: Optional[int] = None,
137
133
  include_comment: bool = True,
134
+ max_list_items: Optional[int] = None,
138
135
  answering_instructions: Optional[str] = None,
139
136
  question_presentation: Optional[str] = None,
140
137
  permissive: bool = False,
@@ -184,9 +181,6 @@ class QuestionList(QuestionBase):
184
181
  ).render(question_name=self.question_name)
185
182
  return question_html_content
186
183
 
187
- ################
188
- # Helpful methods
189
- ################
190
184
  @classmethod
191
185
  @inject_exception
192
186
  def example(
@@ -10,7 +10,7 @@ from edsl.questions.descriptors import (
10
10
  OptionLabelDescriptor,
11
11
  QuestionTextDescriptor,
12
12
  )
13
- from edsl.questions.ResponseValidatorABC import ResponseValidatorABC
13
+ from edsl.questions.response_validator_abc import ResponseValidatorABC
14
14
  from edsl.questions.decorators import inject_exception
15
15
  from edsl.exceptions.questions import (
16
16
  QuestionAnswerValidationError,
@@ -8,7 +8,7 @@ from edsl.scenarios.Scenario import Scenario
8
8
  from edsl.questions.QuestionBase import QuestionBase
9
9
  from edsl.questions.descriptors import QuestionOptionsDescriptor
10
10
  from edsl.questions.decorators import inject_exception
11
- from edsl.questions.ResponseValidatorABC import ResponseValidatorABC
11
+ from edsl.questions.response_validator_abc import ResponseValidatorABC
12
12
 
13
13
 
14
14
  def create_response_model(choices: List[str], permissive: bool = False):
@@ -9,7 +9,7 @@ from edsl.exceptions.questions import QuestionAnswerValidationError
9
9
  from edsl.questions.QuestionBase import QuestionBase
10
10
  from edsl.questions.descriptors import NumericalOrNoneDescriptor
11
11
  from edsl.questions.decorators import inject_exception
12
- from edsl.questions.ResponseValidatorABC import ResponseValidatorABC
12
+ from edsl.questions.response_validator_abc import ResponseValidatorABC
13
13
 
14
14
 
15
15
  def create_numeric_response(
@@ -8,7 +8,7 @@ from edsl.questions.descriptors import (
8
8
  QuestionOptionsDescriptor,
9
9
  NumSelectionsDescriptor,
10
10
  )
11
- from edsl.questions.ResponseValidatorABC import ResponseValidatorABC
11
+ from edsl.questions.response_validator_abc import ResponseValidatorABC
12
12
 
13
13
 
14
14
  def create_response_model(
@@ -66,7 +66,7 @@ class SimpleAskMixin:
66
66
  system_prompt="You are a helpful agent pretending to be a human. Do not break character",
67
67
  top_logprobs=4,
68
68
  ):
69
- from edsl.language_models.registry import Model
69
+ from edsl.language_models.model import Model
70
70
 
71
71
  if model is None:
72
72
  model = Model()
@@ -1,6 +1,6 @@
1
1
  # Schemas
2
2
  from edsl.questions.settings import Settings
3
- from edsl.questions.RegisterQuestionsMeta import RegisterQuestionsMeta
3
+ from edsl.questions.register_questions_meta import RegisterQuestionsMeta
4
4
 
5
5
  # Base Class
6
6
  from edsl.questions.QuestionBase import QuestionBase
@@ -0,0 +1,20 @@
1
+ from typing import Any, Optional, TypedDict
2
+ from pydantic import BaseModel
3
+
4
+
5
+ class RawEdslAnswerDict(TypedDict):
6
+ answer: Any
7
+ comment: Optional[str]
8
+ generated_tokens: Optional[str]
9
+
10
+
11
+ class BaseResponse(BaseModel):
12
+ answer: Any
13
+ comment: Optional[str] = None
14
+ generated_tokens: Optional[str] = None
15
+
16
+
17
+ class EdslAnswerDict(TypedDict):
18
+ answer: Any
19
+ comment: Optional[str]
20
+ generated_tokens: Optional[str]