edsl 0.1.30.dev4__py3-none-any.whl → 0.1.30.dev5__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
edsl/__version__.py CHANGED
@@ -1 +1 @@
1
- __version__ = "0.1.30.dev4"
1
+ __version__ = "0.1.30.dev5"
@@ -30,6 +30,7 @@ class TokenBucket:
30
30
  if self.turbo_mode:
31
31
  pass
32
32
  else:
33
+ #pass
33
34
  self.turbo_mode = True
34
35
  self.capacity = float("inf")
35
36
  self.refill_rate = float("inf")
@@ -72,7 +73,17 @@ class TokenBucket:
72
73
  self.log.append((time.monotonic(), self.tokens))
73
74
 
74
75
  def refill(self) -> None:
75
- """Refill the bucket with new tokens based on elapsed time."""
76
+ """Refill the bucket with new tokens based on elapsed time.
77
+
78
+
79
+
80
+ >>> bucket = TokenBucket(bucket_name="test", bucket_type="test", capacity=10, refill_rate=1)
81
+ >>> bucket.tokens = 0
82
+ >>> bucket.refill()
83
+ >>> bucket.tokens > 0
84
+ True
85
+
86
+ """
76
87
  now = time.monotonic()
77
88
  elapsed = now - self.last_refill
78
89
  refill_amount = elapsed * self.refill_rate
@@ -20,6 +20,10 @@ from edsl.jobs.interviews.retry_management import retry_strategy
20
20
  from edsl.jobs.interviews.InterviewTaskBuildingMixin import InterviewTaskBuildingMixin
21
21
  from edsl.jobs.interviews.InterviewStatusMixin import InterviewStatusMixin
22
22
 
23
+ import asyncio
24
+
25
+ def run_async(coro):
26
+ return asyncio.run(coro)
23
27
 
24
28
  class Interview(InterviewStatusMixin, InterviewTaskBuildingMixin):
25
29
  """
@@ -36,8 +40,8 @@ class Interview(InterviewStatusMixin, InterviewTaskBuildingMixin):
36
40
  model: Type["LanguageModel"],
37
41
  debug: Optional[bool] = False,
38
42
  iteration: int = 0,
39
- cache: "Cache" = None,
40
- sidecar_model: "LanguageModel" = None,
43
+ cache: Optional["Cache"] = None,
44
+ sidecar_model: Optional['LanguageModel'] = None,
41
45
  ):
42
46
  """Initialize the Interview instance.
43
47
 
@@ -45,6 +49,24 @@ class Interview(InterviewStatusMixin, InterviewTaskBuildingMixin):
45
49
  :param survey: the survey being administered to the agent.
46
50
  :param scenario: the scenario that populates the survey questions.
47
51
  :param model: the language model used to answer the questions.
52
+ :param debug: if True, run without calls to the language model.
53
+ :param iteration: the iteration number of the interview.
54
+ :param cache: the cache used to store the answers.
55
+ :param sidecar_model: a sidecar model used to answer questions.
56
+
57
+ >>> i = Interview.example()
58
+ >>> i.task_creators
59
+ {}
60
+
61
+ >>> i.exceptions
62
+ {}
63
+
64
+ >>> _ = asyncio.run(i.async_conduct_interview())
65
+ >>> i.task_status_logs['q0']
66
+ [{'log_time': ..., 'value': <TaskStatus.NOT_STARTED: 1>}, {'log_time': ..., 'value': <TaskStatus.WAITING_FOR_DEPENDENCIES: 2>}, {'log_time': ..., 'value': <TaskStatus.API_CALL_IN_PROGRESS: 7>}, {'log_time': ..., 'value': <TaskStatus.SUCCESS: 8>}]
67
+
68
+ >>> i.to_index
69
+ {'q0': 0, 'q1': 1, 'q2': 2}
48
70
 
49
71
  """
50
72
  self.agent = agent
@@ -64,7 +86,7 @@ class Interview(InterviewStatusMixin, InterviewTaskBuildingMixin):
64
86
  self.exceptions = InterviewExceptionCollection()
65
87
  self._task_status_log_dict = InterviewStatusLog()
66
88
 
67
- # dictionary mapping question names to their index in the survey."""
89
+ # dictionary mapping question names to their index in the survey.
68
90
  self.to_index = {
69
91
  question_name: index
70
92
  for index, question_name in enumerate(self.survey.question_names)
@@ -76,14 +98,16 @@ class Interview(InterviewStatusMixin, InterviewTaskBuildingMixin):
76
98
  model_buckets: ModelBuckets = None,
77
99
  debug: bool = False,
78
100
  stop_on_exception: bool = False,
79
- sidecar_model: Optional[LanguageModel] = None,
101
+ sidecar_model: Optional['LanguageModel'] = None,
80
102
  ) -> tuple["Answers", List[dict[str, Any]]]:
81
103
  """
82
104
  Conduct an Interview asynchronously.
105
+ It returns a tuple with the answers and a list of valid results.
83
106
 
84
107
  :param model_buckets: a dictionary of token buckets for the model.
85
108
  :param debug: run without calls to LLM.
86
109
  :param stop_on_exception: if True, stops the interview if an exception is raised.
110
+ :param sidecar_model: a sidecar model used to answer questions.
87
111
 
88
112
  Example usage:
89
113
 
@@ -91,22 +115,44 @@ class Interview(InterviewStatusMixin, InterviewTaskBuildingMixin):
91
115
  >>> result, _ = asyncio.run(i.async_conduct_interview())
92
116
  >>> result['q0']
93
117
  'yes'
118
+
119
+ >>> i = Interview.example(throw_exception = True)
120
+ >>> result, _ = asyncio.run(i.async_conduct_interview())
121
+ Attempt 1 failed with exception:This is a test error now waiting 1.00 seconds before retrying.Parameters: start=1.0, max=60.0, max_attempts=5.
122
+ <BLANKLINE>
123
+ <BLANKLINE>
124
+ Attempt 2 failed with exception:This is a test error now waiting 2.00 seconds before retrying.Parameters: start=1.0, max=60.0, max_attempts=5.
125
+ <BLANKLINE>
126
+ <BLANKLINE>
127
+ Attempt 3 failed with exception:This is a test error now waiting 4.00 seconds before retrying.Parameters: start=1.0, max=60.0, max_attempts=5.
128
+ <BLANKLINE>
129
+ <BLANKLINE>
130
+ Attempt 4 failed with exception:This is a test error now waiting 8.00 seconds before retrying.Parameters: start=1.0, max=60.0, max_attempts=5.
131
+ <BLANKLINE>
132
+ <BLANKLINE>
133
+
134
+ >>> i.exceptions
135
+ {'q0': [{'exception': "Exception('This is a test error')", 'time': ..., 'traceback': ...
136
+
137
+ >>> i = Interview.example()
138
+ >>> result, _ = asyncio.run(i.async_conduct_interview(stop_on_exception = True))
139
+ Traceback (most recent call last):
140
+ ...
141
+ asyncio.exceptions.CancelledError
94
142
  """
95
143
  self.sidecar_model = sidecar_model
96
144
 
97
145
  # if no model bucket is passed, create an 'infinity' bucket with no rate limits
98
- # print("model_buckets", model_buckets)
99
146
  if model_buckets is None or hasattr(self.agent, "answer_question_directly"):
100
147
  model_buckets = ModelBuckets.infinity_bucket()
101
148
 
102
- # FOR TESTING
103
- # model_buckets = ModelBuckets.infinity_bucket()
104
-
149
+
105
150
  ## build the tasks using the InterviewTaskBuildingMixin
106
151
  ## This is the key part---it creates a task for each question,
107
152
  ## with dependencies on the questions that must be answered before this one can be answered.
108
153
  self.tasks = self._build_question_tasks(
109
- debug=debug, model_buckets=model_buckets
154
+ debug=debug,
155
+ model_buckets=model_buckets
110
156
  )
111
157
 
112
158
  ## 'Invigilators' are used to administer the survey
@@ -123,6 +169,14 @@ class Interview(InterviewStatusMixin, InterviewTaskBuildingMixin):
123
169
  It iterates through the tasks and invigilators, and yields the results of the tasks that are done.
124
170
  If a task is not done, it raises a ValueError.
125
171
  If an exception is raised in the task, it records the exception in the Interview instance except if the task was cancelled, which is expected behavior.
172
+
173
+ >>> i = Interview.example()
174
+ >>> result, _ = asyncio.run(i.async_conduct_interview())
175
+ >>> results = list(i._extract_valid_results())
176
+ >>> len(results) == len(i.survey)
177
+ True
178
+ >>> type(results[0])
179
+ <class 'edsl.data_transfer_models.AgentResponseDict'>
126
180
  """
127
181
  assert len(self.tasks) == len(self.invigilators)
128
182
 
@@ -140,7 +194,18 @@ class Interview(InterviewStatusMixin, InterviewTaskBuildingMixin):
140
194
  yield result
141
195
 
142
196
  def _record_exception(self, task, exception: Exception) -> None:
143
- """Record an exception in the Interview instance."""
197
+ """Record an exception in the Interview instance.
198
+
199
+ It records the exception in the Interview instance, with the task name and the exception entry.
200
+
201
+ >>> i = Interview.example()
202
+ >>> result, _ = asyncio.run(i.async_conduct_interview())
203
+ >>> i.exceptions
204
+ {}
205
+ >>> i._record_exception(i.tasks[0], Exception("An exception occurred."))
206
+ >>> i.exceptions
207
+ {'q0': [{'exception': "Exception('An exception occurred.')", 'time': ..., 'traceback': 'NoneType: None\\n'}]}
208
+ """
144
209
  exception_entry = InterviewExceptionEntry(
145
210
  exception=repr(exception),
146
211
  time=time.time(),
@@ -156,6 +221,10 @@ class Interview(InterviewStatusMixin, InterviewTaskBuildingMixin):
156
221
  It is used to determine the order in which questions should be answered.
157
222
  This reflects both agent 'memory' considerations and 'skip' logic.
158
223
  The 'textify' parameter is set to True, so that the question names are returned as strings rather than integer indices.
224
+
225
+ >>> i = Interview.example()
226
+ >>> i.dag == {'q2': {'q0'}, 'q1': {'q0'}}
227
+ True
159
228
  """
160
229
  return self.survey.dag(textify=True)
161
230
 
@@ -166,8 +235,15 @@ class Interview(InterviewStatusMixin, InterviewTaskBuildingMixin):
166
235
  """Return a string representation of the Interview instance."""
167
236
  return f"Interview(agent = {repr(self.agent)}, survey = {repr(self.survey)}, scenario = {repr(self.scenario)}, model = {repr(self.model)})"
168
237
 
169
- def duplicate(self, iteration: int, cache: Cache) -> Interview:
170
- """Duplicate the interview, but with a new iteration number and cache."""
238
+ def duplicate(self, iteration: int, cache: 'Cache') -> Interview:
239
+ """Duplicate the interview, but with a new iteration number and cache.
240
+
241
+ >>> i = Interview.example()
242
+ >>> i2 = i.duplicate(1, None)
243
+ >>> i.iteration + 1 == i2.iteration
244
+ True
245
+
246
+ """
171
247
  return Interview(
172
248
  agent=self.agent,
173
249
  survey=self.survey,
@@ -178,7 +254,7 @@ class Interview(InterviewStatusMixin, InterviewTaskBuildingMixin):
178
254
  )
179
255
 
180
256
  @classmethod
181
- def example(self):
257
+ def example(self, throw_exception: bool = False) -> Interview:
182
258
  """Return an example Interview instance."""
183
259
  from edsl.agents import Agent
184
260
  from edsl.surveys import Survey
@@ -193,66 +269,15 @@ class Interview(InterviewStatusMixin, InterviewTaskBuildingMixin):
193
269
  survey = Survey.example()
194
270
  scenario = Scenario.example()
195
271
  model = LanguageModel.example()
272
+ if throw_exception:
273
+ model = LanguageModel.example(test_model = True, throw_exception=True)
274
+ agent = Agent.example()
275
+ return Interview(agent=agent, survey=survey, scenario=scenario, model=model)
196
276
  return Interview(agent=agent, survey=survey, scenario=scenario, model=model)
197
277
 
198
278
 
199
279
  if __name__ == "__main__":
200
280
  import doctest
201
281
 
202
- doctest.testmod()
203
- # from edsl import Model
204
- # from edsl.agents import Agent
205
- # from edsl.surveys import Survey
206
- # from edsl.scenarios import Scenario
207
- # from edsl.questions import QuestionMultipleChoice
208
-
209
- # # from edsl.jobs.Interview import Interview
210
-
211
- # # a survey with skip logic
212
- # q0 = QuestionMultipleChoice(
213
- # question_text="Do you like school?",
214
- # question_options=["yes", "no"],
215
- # question_name="q0",
216
- # )
217
- # q1 = QuestionMultipleChoice(
218
- # question_text="Why not?",
219
- # question_options=["killer bees in cafeteria", "other"],
220
- # question_name="q1",
221
- # )
222
- # q2 = QuestionMultipleChoice(
223
- # question_text="Why?",
224
- # question_options=["**lack*** of killer bees in cafeteria", "other"],
225
- # question_name="q2",
226
- # )
227
- # s = Survey(questions=[q0, q1, q2])
228
- # s = s.add_rule(q0, "q0 == 'yes'", q2)
229
-
230
- # # create an interview
231
- # a = Agent(traits=None)
232
-
233
- # def direct_question_answering_method(self, question, scenario):
234
- # """Answer a question directly."""
235
- # raise Exception("Error!")
236
- # # return "yes"
237
-
238
- # a.add_direct_question_answering_method(direct_question_answering_method)
239
- # scenario = Scenario()
240
- # m = Model()
241
- # I = Interview(agent=a, survey=s, scenario=scenario, model=m)
242
-
243
- # result = asyncio.run(I.async_conduct_interview())
244
- # # # conduct five interviews
245
- # # for _ in range(5):
246
- # # I.conduct_interview(debug=True)
247
-
248
- # # # replace missing answers
249
- # # I
250
- # # repr(I)
251
- # # eval(repr(I))
252
- # # print(I.task_status_logs.status_matrix(20))
253
- # status_matrix = I.task_status_logs.status_matrix(20)
254
- # numerical_matrix = I.task_status_logs.numerical_matrix(20)
255
- # I.task_status_logs.visualize()
256
-
257
- # I.exceptions.print()
258
- # I.exceptions.ascii_table()
282
+ # add ellipsis
283
+ doctest.testmod(optionflags=doctest.ELLIPSIS)
@@ -25,7 +25,7 @@ TIMEOUT = float(CONFIG.get("EDSL_API_TIMEOUT"))
25
25
  class InterviewTaskBuildingMixin:
26
26
  def _build_invigilators(
27
27
  self, debug: bool
28
- ) -> Generator[InvigilatorBase, None, None]:
28
+ ) -> Generator['InvigilatorBase', None, None]:
29
29
  """Create an invigilator for each question.
30
30
 
31
31
  :param debug: whether to use debug mode, in which case `InvigilatorDebug` is used.
@@ -35,7 +35,7 @@ class InterviewTaskBuildingMixin:
35
35
  for question in self.survey.questions:
36
36
  yield self._get_invigilator(question=question, debug=debug)
37
37
 
38
- def _get_invigilator(self, question: QuestionBase, debug: bool) -> "Invigilator":
38
+ def _get_invigilator(self, question: 'QuestionBase', debug: bool) -> "Invigilator":
39
39
  """Return an invigilator for the given question.
40
40
 
41
41
  :param question: the question to be answered
@@ -84,7 +84,7 @@ class InterviewTaskBuildingMixin:
84
84
  return tuple(tasks) # , invigilators
85
85
 
86
86
  def _get_tasks_that_must_be_completed_before(
87
- self, *, tasks: list[asyncio.Task], question: QuestionBase
87
+ self, *, tasks: list[asyncio.Task], question: 'QuestionBase'
88
88
  ) -> Generator[asyncio.Task, None, None]:
89
89
  """Return the tasks that must be completed before the given question can be answered.
90
90
 
@@ -100,7 +100,7 @@ class InterviewTaskBuildingMixin:
100
100
  def _create_question_task(
101
101
  self,
102
102
  *,
103
- question: QuestionBase,
103
+ question: 'QuestionBase',
104
104
  tasks_that_must_be_completed_before: list[asyncio.Task],
105
105
  model_buckets: ModelBuckets,
106
106
  debug: bool,
@@ -175,24 +175,12 @@ class InterviewTaskBuildingMixin:
175
175
 
176
176
  self._add_answer(response=response, question=question)
177
177
 
178
- # With the answer to the question, we can now cancel any skipped questions
179
178
  self._cancel_skipped_questions(question)
180
179
  return AgentResponseDict(**response)
181
180
  except Exception as e:
182
181
  raise e
183
- # import traceback
184
- # print("Exception caught:")
185
- # traceback.print_exc()
186
-
187
- # # Extract and print the traceback info
188
- # tb = e.__traceback__
189
- # while tb is not None:
190
- # print(f"File {tb.tb_frame.f_code.co_filename}, line {tb.tb_lineno}, in {tb.tb_frame.f_code.co_name}")
191
- # tb = tb.tb_next
192
- # breakpoint()
193
- # raise e
194
-
195
- def _add_answer(self, response: AgentResponseDict, question: QuestionBase) -> None:
182
+
183
+ def _add_answer(self, response: 'AgentResponseDict', question: 'QuestionBase') -> None:
196
184
  """Add the answer to the answers dictionary.
197
185
 
198
186
  :param response: the response to the question.
@@ -200,7 +188,7 @@ class InterviewTaskBuildingMixin:
200
188
  """
201
189
  self.answers.add_answer(response=response, question=question)
202
190
 
203
- def _skip_this_question(self, current_question: QuestionBase) -> bool:
191
+ def _skip_this_question(self, current_question: 'QuestionBase') -> bool:
204
192
  """Determine if the current question should be skipped.
205
193
 
206
194
  :param current_question: the question to be answered.
@@ -88,7 +88,8 @@ class QuestionTaskCreator(UserList):
88
88
  self.append(task)
89
89
 
90
90
  def generate_task(self, debug: bool) -> asyncio.Task:
91
- """Create a task that depends on the passed-in dependencies."""
91
+ """Create a task that depends on the passed-in dependencies.
92
+ """
92
93
  task = asyncio.create_task(
93
94
  self._run_task_async(debug), name=self.question.question_name
94
95
  )
@@ -144,18 +145,14 @@ class QuestionTaskCreator(UserList):
144
145
  self.task_status = TaskStatus.FAILED
145
146
  raise e
146
147
 
147
- ## This isn't working
148
- # breakpoint()
149
- if results.get("cache_used", False):
148
+ if results.get('cache_used', False):
150
149
  self.tokens_bucket.add_tokens(requested_tokens)
151
150
  self.requests_bucket.add_tokens(1)
152
151
  self.from_cache = True
153
- # print("Turning on turbo!")
152
+ # Turbo mode means that we don't wait for tokens or requests.
154
153
  self.tokens_bucket.turbo_mode_on()
155
154
  self.requests_bucket.turbo_mode_on()
156
155
  else:
157
- # breakpoint()
158
- # print("Turning off turbo!")
159
156
  self.tokens_bucket.turbo_mode_off()
160
157
  self.requests_bucket.turbo_mode_off()
161
158
 
@@ -494,7 +494,7 @@ class LanguageModel(
494
494
  return table
495
495
 
496
496
  @classmethod
497
- def example(cls, test_model: bool = False, canned_response: str = "Hello world"):
497
+ def example(cls, test_model: bool = False, canned_response: str = "Hello world", throw_exception: bool = False):
498
498
  """Return a default instance of the class.
499
499
 
500
500
  >>> from edsl.language_models import LanguageModel
@@ -519,6 +519,8 @@ class LanguageModel(
519
519
  ) -> dict[str, Any]:
520
520
  await asyncio.sleep(0.1)
521
521
  # return {"message": """{"answer": "Hello, world"}"""}
522
+ if throw_exception:
523
+ raise Exception("This is a test error")
522
524
  return {"message": f'{{"answer": "{canned_response}"}}'}
523
525
 
524
526
  def parse_response(self, raw_response: dict[str, Any]) -> str:
@@ -4,10 +4,33 @@ import inspect
4
4
  from edsl.questions.QuestionBase import QuestionBase
5
5
 
6
6
  from edsl.utilities.restricted_python import create_restricted_function
7
-
7
+ from edsl.utilities.decorators import add_edsl_version, remove_edsl_version
8
8
 
9
9
  class QuestionFunctional(QuestionBase):
10
- """A special type of question that is *not* answered by an LLM."""
10
+ """A special type of question that is *not* answered by an LLM.
11
+
12
+ >>> from edsl import Scenario, Agent
13
+
14
+ # Create an instance of QuestionFunctional with the new function
15
+ >>> question = QuestionFunctional.example()
16
+
17
+ # Activate and test the function
18
+ >>> question.activate()
19
+ >>> scenario = Scenario({"numbers": [1, 2, 3, 4, 5]})
20
+ >>> agent = Agent(traits={"multiplier": 10})
21
+ >>> results = question.by(scenario).by(agent).run()
22
+ >>> results.select("answer.*").to_list()[0] == 150
23
+ True
24
+
25
+ # Serialize the question to a dictionary
26
+
27
+ >>> from edsl.questions.QuestionBase import QuestionBase
28
+ >>> new_question = QuestionBase.from_dict(question.to_dict())
29
+ >>> results = new_question.by(scenario).by(agent).run()
30
+ >>> results.select("answer.*").to_list()[0] == 150
31
+ True
32
+
33
+ """
11
34
 
12
35
  question_type = "functional"
13
36
  default_instructions = ""
@@ -73,6 +96,7 @@ class QuestionFunctional(QuestionBase):
73
96
  """Required by Question, but not used by QuestionFunctional."""
74
97
  raise NotImplementedError
75
98
 
99
+ @add_edsl_version
76
100
  def to_dict(self):
77
101
  return {
78
102
  "question_name": self.question_name,
@@ -81,6 +105,8 @@ class QuestionFunctional(QuestionBase):
81
105
  "requires_loop": self.requires_loop,
82
106
  "function_name": self.function_name,
83
107
  }
108
+
109
+
84
110
 
85
111
  @classmethod
86
112
  def example(cls):
@@ -113,4 +139,9 @@ def main():
113
139
  scenario = Scenario({"numbers": [1, 2, 3, 4, 5]})
114
140
  agent = Agent(traits={"multiplier": 10})
115
141
  results = question.by(scenario).by(agent).run()
116
- print(results)
142
+ assert results.select("answer.*").to_list()[0] == 150
143
+
144
+ if __name__ == "__main__":
145
+ #main()
146
+ import doctest
147
+ doctest.testmod(optionflags=doctest.ELLIPSIS)
@@ -2,7 +2,7 @@ from __future__ import annotations
2
2
  import time
3
3
  from typing import Union
4
4
  import random
5
-
5
+ from typing import Optional
6
6
  from jinja2 import Template
7
7
 
8
8
  from edsl.questions.QuestionBase import QuestionBase
@@ -10,7 +10,11 @@ from edsl.questions.descriptors import QuestionOptionsDescriptor
10
10
 
11
11
 
12
12
  class QuestionMultipleChoice(QuestionBase):
13
- """This question prompts the agent to select one option from a list of options."""
13
+ """This question prompts the agent to select one option from a list of options.
14
+
15
+ https://docs.expectedparrot.com/en/latest/questions.html#questionmultiplechoice-class
16
+
17
+ """
14
18
 
15
19
  question_type = "multiple_choice"
16
20
  purpose = "When options are known and limited"
@@ -35,27 +39,69 @@ class QuestionMultipleChoice(QuestionBase):
35
39
  self.question_text = question_text
36
40
  self.question_options = question_options
37
41
 
42
+ # @property
43
+ # def question_options(self) -> Union[list[str], list[list], list[float], list[int]]:
44
+ # """Return the question options."""
45
+ # return self._question_options
46
+
38
47
  ################
39
48
  # Answer methods
40
49
  ################
41
50
  def _validate_answer(
42
51
  self, answer: dict[str, Union[str, int]]
43
52
  ) -> dict[str, Union[str, int]]:
44
- """Validate the answer."""
53
+ """Validate the answer.
54
+
55
+ >>> q = QuestionMultipleChoice.example()
56
+ >>> q._validate_answer({"answer": 0, "comment": "I like custard"})
57
+ {'answer': 0, 'comment': 'I like custard'}
58
+
59
+ >>> q = QuestionMultipleChoice(question_name="how_feeling", question_text="How are you?", question_options=["Good", "Great", "OK", "Bad"])
60
+ >>> q._validate_answer({"answer": -1, "comment": "I like custard"})
61
+ Traceback (most recent call last):
62
+ ...
63
+ edsl.exceptions.questions.QuestionAnswerValidationError: Answer code must be a non-negative integer (got -1).
64
+ """
45
65
  self._validate_answer_template_basic(answer)
46
66
  self._validate_answer_multiple_choice(answer)
47
67
  return answer
48
68
 
49
69
  def _translate_answer_code_to_answer(
50
- self, answer_code, scenario: "Scenario" = None
70
+ self,
71
+ answer_code: int,
72
+ scenario: Optional["Scenario"] = None
51
73
  ):
52
- """Translate the answer code to the actual answer."""
74
+ """Translate the answer code to the actual answer.
75
+
76
+ It is used to translate the answer code to the actual answer.
77
+ The question options might be templates, so they need to be rendered with the scenario.
78
+
79
+ >>> q = QuestionMultipleChoice.example()
80
+ >>> q._translate_answer_code_to_answer(0, {})
81
+ 'Good'
82
+
83
+ >>> q = QuestionMultipleChoice(question_name="how_feeling", question_text="How are you?", question_options=["{{emotion[0]}}", "emotion[1]"])
84
+ >>> q._translate_answer_code_to_answer(0, {"emotion": ["Happy", "Sad"]})
85
+ 'Happy'
86
+
87
+ """
53
88
  from edsl.scenarios.Scenario import Scenario
54
89
 
55
90
  scenario = scenario or Scenario()
56
- translated_options = [
57
- Template(str(option)).render(scenario) for option in self.question_options
58
- ]
91
+
92
+ if isinstance(self.question_options, str):
93
+ # If dynamic options are provided like {{ options }}, render them with the scenario
94
+ from jinja2 import Environment, meta
95
+ env = Environment()
96
+ parsed_content = env.parse(self.question_options)
97
+ question_option_key = list(meta.find_undeclared_variables(parsed_content))[0]
98
+ translated_options = scenario.get(question_option_key)
99
+ else:
100
+ translated_options = [
101
+ Template(str(option)).render(scenario) for option in self.question_options
102
+ ]
103
+ #print("Translated options:", translated_options)
104
+ #breakpoint()
59
105
  return translated_options[int(answer_code)]
60
106
 
61
107
  def _simulate_answer(
@@ -75,6 +121,7 @@ class QuestionMultipleChoice(QuestionBase):
75
121
 
76
122
  @property
77
123
  def question_html_content(self) -> str:
124
+ """Return the HTML version of the question."""
78
125
  if hasattr(self, "option_labels"):
79
126
  option_labels = self.option_labels
80
127
  else:
@@ -2,7 +2,7 @@
2
2
 
3
3
  from abc import ABC, abstractmethod
4
4
  import re
5
- from typing import Any, Callable
5
+ from typing import Any, Callable, List, Optional
6
6
  from edsl.exceptions import (
7
7
  QuestionCreationValidationError,
8
8
  QuestionAnswerValidationError,
@@ -242,6 +242,15 @@ class QuestionNameDescriptor(BaseDescriptor):
242
242
  class QuestionOptionsDescriptor(BaseDescriptor):
243
243
  """Validate that `question_options` is a list, does not exceed the min/max lengths, and has unique items."""
244
244
 
245
+ @classmethod
246
+ def example(cls):
247
+ class TestQuestion:
248
+ question_options = QuestionOptionsDescriptor()
249
+
250
+ def __init__(self, question_options: List[str]):
251
+ self.question_options = question_options
252
+ return TestQuestion
253
+
245
254
  def __init__(
246
255
  self,
247
256
  num_choices: int = None,
@@ -254,7 +263,31 @@ class QuestionOptionsDescriptor(BaseDescriptor):
254
263
  self.q_budget = q_budget
255
264
 
256
265
  def validate(self, value: Any, instance) -> None:
257
- """Validate the question options."""
266
+ """Validate the question options.
267
+
268
+ >>> q_class = QuestionOptionsDescriptor.example()
269
+ >>> _ = q_class(["a", "b", "c"])
270
+ >>> _ = q_class(["a", "b", "c", "d", "d"])
271
+ Traceback (most recent call last):
272
+ ...
273
+ edsl.exceptions.questions.QuestionCreationValidationError: Question options must be unique (got ['a', 'b', 'c', 'd', 'd']).
274
+
275
+ We allow dynamic question options, which are strings of the form '{{ question_options }}'.
276
+
277
+ >>> _ = q_class("{{dynamic_options}}")
278
+ >>> _ = q_class("dynamic_options")
279
+ Traceback (most recent call last):
280
+ ...
281
+ edsl.exceptions.questions.QuestionCreationValidationError: Dynamic question options must be of the form: '{{ question_options }}'.
282
+ """
283
+ if isinstance(value, str):
284
+ # Check if the string is a dynamic question option
285
+ if "{{" in value and "}}" in value:
286
+ return None
287
+ else:
288
+ raise QuestionCreationValidationError(
289
+ "Dynamic question options must be of the form: '{{ question_options }}'."
290
+ )
258
291
  if not isinstance(value, list):
259
292
  raise QuestionCreationValidationError(
260
293
  f"Question options must be a list (got {value})."
@@ -339,3 +372,8 @@ class QuestionTextDescriptor(BaseDescriptor):
339
372
  f"WARNING: Question text contains a single-braced substring: If you intended to parameterize the question with a Scenario this should be changed to a double-braced substring, e.g. {{variable}}.\nSee details on constructing Scenarios in the docs: https://docs.expectedparrot.com/en/latest/scenarios.html",
340
373
  UserWarning,
341
374
  )
375
+
376
+ if __name__ == "__main__":
377
+ import doctest
378
+
379
+ doctest.testmod(optionflags=doctest.ELLIPSIS)
@@ -15,6 +15,9 @@ class DatasetExportMixin:
15
15
  ) -> list:
16
16
  """Return the set of keys that are present in the dataset.
17
17
 
18
+ :param data_type: The data type to filter by.
19
+ :param remove_prefix: Whether to remove the prefix from the column names.
20
+
18
21
  >>> from edsl.results.Dataset import Dataset
19
22
  >>> d = Dataset([{'a.b':[1,2,3,4]}])
20
23
  >>> d.relevant_columns()
@@ -27,7 +30,6 @@ class DatasetExportMixin:
27
30
  ['answer.how_feeling', 'answer.how_feeling_yesterday']
28
31
  """
29
32
  columns = [list(x.keys())[0] for x in self]
30
- # columns = set([list(result.keys())[0] for result in self.data])
31
33
  if remove_prefix:
32
34
  columns = [column.split(".")[-1] for column in columns]
33
35
 
@@ -71,7 +73,15 @@ class DatasetExportMixin:
71
73
  return header, rows
72
74
 
73
75
  def print_long(self):
74
- """Print the results in a long format."""
76
+ """Print the results in a long format.
77
+ >>> from edsl.results import Results
78
+ >>> r = Results.example()
79
+ >>> r.select('how_feeling').print_long()
80
+ answer.how_feeling: OK
81
+ answer.how_feeling: Great
82
+ answer.how_feeling: Terrible
83
+ answer.how_feeling: OK
84
+ """
75
85
  for entry in self:
76
86
  key, list_of_values = list(entry.items())[0]
77
87
  for value in list_of_values:
@@ -117,6 +127,42 @@ class DatasetExportMixin:
117
127
  │ OK │
118
128
  └──────────────┘
119
129
 
130
+ >>> r = Results.example()
131
+ >>> r2 = r.select("how_feeling").print(format = "rich", tee = True, max_rows = 2)
132
+ ┏━━━━━━━━━━━━━━┓
133
+ ┃ answer ┃
134
+ ┃ .how_feeling ┃
135
+ ┡━━━━━━━━━━━━━━┩
136
+ │ OK │
137
+ ├──────────────┤
138
+ │ Great │
139
+ └──────────────┘
140
+ >>> r2
141
+ Dataset([{'answer.how_feeling': ['OK', 'Great', 'Terrible', 'OK']}])
142
+
143
+ >>> r.select('how_feeling').print(format = "rich", max_rows = 2)
144
+ ┏━━━━━━━━━━━━━━┓
145
+ ┃ answer ┃
146
+ ┃ .how_feeling ┃
147
+ ┡━━━━━━━━━━━━━━┩
148
+ │ OK │
149
+ ├──────────────┤
150
+ │ Great │
151
+ └──────────────┘
152
+
153
+ >>> r.select('how_feeling').print(format = "rich", split_at_dot = False)
154
+ ┏━━━━━━━━━━━━━━━━━━━━┓
155
+ ┃ answer.how_feeling ┃
156
+ ┡━━━━━━━━━━━━━━━━━━━━┩
157
+ │ OK │
158
+ ├────────────────────┤
159
+ │ Great │
160
+ ├────────────────────┤
161
+ │ Terrible │
162
+ ├────────────────────┤
163
+ │ OK │
164
+ └────────────────────┘
165
+
120
166
  Example: using the pretty_labels parameter
121
167
 
122
168
  >>> r.select('how_feeling').print(format="rich", pretty_labels = {'answer.how_feeling': "How are you feeling"})
@@ -154,6 +200,9 @@ class DatasetExportMixin:
154
200
 
155
201
  if pretty_labels is None:
156
202
  pretty_labels = {}
203
+ else:
204
+ # if the user passes in pretty_labels, we don't want to split at the dot
205
+ split_at_dot = False
157
206
 
158
207
  if format not in ["rich", "html", "markdown", "latex"]:
159
208
  raise ValueError("format must be one of 'rich', 'html', or 'markdown'.")
@@ -168,8 +217,7 @@ class DatasetExportMixin:
168
217
  for key in entry:
169
218
  actual_rows = len(entry[key])
170
219
  entry[key] = entry[key][:max_rows]
171
- # print(f"Showing only the first {max_rows} rows of {actual_rows} rows.")
172
-
220
+
173
221
  if format == "rich":
174
222
  from edsl.utilities.interface import print_dataset_with_rich
175
223
 
@@ -245,6 +293,10 @@ class DatasetExportMixin:
245
293
  >>> r = Results.example()
246
294
  >>> r.select('how_feeling').to_csv()
247
295
  'answer.how_feeling\\r\\nOK\\r\\nGreat\\r\\nTerrible\\r\\nOK\\r\\n'
296
+
297
+ >>> r.select('how_feeling').to_csv(pretty_labels = {'answer.how_feeling': "How are you feeling"})
298
+ 'How are you feeling\\r\\nOK\\r\\nGreat\\r\\nTerrible\\r\\nOK\\r\\n'
299
+
248
300
  """
249
301
  if pretty_labels is None:
250
302
  pretty_labels = {}
@@ -309,6 +361,15 @@ class DatasetExportMixin:
309
361
  return ScenarioList([Scenario(d) for d in list_of_dicts])
310
362
 
311
363
  def to_agent_list(self, remove_prefix: bool = True):
364
+ """Convert the results to a list of dictionaries, one per agent.
365
+
366
+ :param remove_prefix: Whether to remove the prefix from the column names.
367
+
368
+ >>> from edsl.results import Results
369
+ >>> r = Results.example()
370
+ >>> r.select('how_feeling').to_agent_list()
371
+ AgentList([Agent(traits = {'how_feeling': 'OK'}), Agent(traits = {'how_feeling': 'Great'}), Agent(traits = {'how_feeling': 'Terrible'}), Agent(traits = {'how_feeling': 'OK'})])
372
+ """
312
373
  from edsl import AgentList, Agent
313
374
 
314
375
  list_of_dicts = self.to_dicts(remove_prefix=remove_prefix)
@@ -344,6 +405,9 @@ class DatasetExportMixin:
344
405
  def to_list(self, flatten=False, remove_none=False) -> list[list]:
345
406
  """Convert the results to a list of lists.
346
407
 
408
+ :param flatten: Whether to flatten the list of lists.
409
+ :param remove_none: Whether to remove None values from the list.
410
+
347
411
  >>> from edsl.results import Results
348
412
  >>> Results.example().select('how_feeling', 'how_feeling_yesterday')
349
413
  Dataset([{'answer.how_feeling': ['OK', 'Great', 'Terrible', 'OK']}, {'answer.how_feeling_yesterday': ['Great', 'Good', 'OK', 'Terrible']}])
@@ -354,6 +418,18 @@ class DatasetExportMixin:
354
418
  >>> r = Results.example()
355
419
  >>> r.select('how_feeling').to_list()
356
420
  ['OK', 'Great', 'Terrible', 'OK']
421
+
422
+ >>> from edsl.results.Dataset import Dataset
423
+ >>> Dataset([{'a.b': [[1, 9], 2, 3, 4]}]).select('a.b').to_list(flatten = True)
424
+ [1, 9, 2, 3, 4]
425
+
426
+ >>> from edsl.results.Dataset import Dataset
427
+ >>> Dataset([{'a.b': [[1, 9], 2, 3, 4]}, {'c': [6, 2, 3, 4]}]).select('a.b', 'c').to_list(flatten = True)
428
+ Traceback (most recent call last):
429
+ ...
430
+ ValueError: Cannot flatten a list of lists when there are multiple columns selected.
431
+
432
+
357
433
  """
358
434
  if len(self.relevant_columns()) > 1 and flatten:
359
435
  raise ValueError(
@@ -385,7 +461,7 @@ class DatasetExportMixin:
385
461
  return list_to_return
386
462
 
387
463
  def html(
388
- self, filename: str = None, cta: str = "Open in browser", return_link=False
464
+ self, filename: Optional[str] = None, cta: str = "Open in browser", return_link:bool=False
389
465
  ):
390
466
  import os
391
467
  import tempfile
@@ -419,7 +495,7 @@ class DatasetExportMixin:
419
495
  return filename
420
496
 
421
497
  def tally(
422
- self, *fields: Optional[str], top_n=None, output="dict"
498
+ self, *fields: Optional[str], top_n:Optional[int]=None, output="dict"
423
499
  ) -> Union[dict, "Dataset"]:
424
500
  """Tally the values of a field or perform a cross-tab of multiple fields.
425
501
 
edsl/results/Result.py CHANGED
@@ -126,6 +126,9 @@ class Result(Base, UserDict):
126
126
  self.survey = survey
127
127
  self.question_to_attributes = question_to_attributes
128
128
 
129
+ self._combined_dict = None
130
+ self._problem_keys = None
131
+
129
132
  ###############
130
133
  # Used in Results
131
134
  ###############
@@ -164,25 +167,62 @@ class Result(Base, UserDict):
164
167
  "answer": self.answer,
165
168
  "prompt": self.prompt,
166
169
  "raw_model_response": self.raw_model_response,
167
- "iteration": {"iteration": self.iteration},
170
+ # "iteration": {"iteration": self.iteration},
168
171
  "question_text": question_text_dict,
169
172
  "question_options": question_options_dict,
170
173
  "question_type": question_type_dict,
171
174
  "comment": comments_dict,
172
175
  }
176
+
177
+ def check_expression(self, expression) -> None:
178
+ for key in self.problem_keys:
179
+ if key in expression and not key + "." in expression:
180
+ raise ValueError(f"Key by iself {key} is problematic. Use the full key {key + '.' + key} name instead.")
181
+ return None
173
182
 
174
183
  def code(self):
175
184
  """Return a string of code that can be used to recreate the Result object."""
176
185
  raise NotImplementedError
177
-
186
+
178
187
  @property
179
- def combined_dict(self) -> dict[str, Any]:
180
- """Return a dictionary that includes all sub_dicts, but also puts the key-value pairs in each sub_dict as a key_value pair in the combined dictionary."""
188
+ def problem_keys(self):
189
+ """Return a list of keys that are problematic."""
190
+ return self._problem_keys
191
+
192
+ def _compute_combined_dict_and_problem_keys(self) -> None:
181
193
  combined = {}
194
+ problem_keys = []
182
195
  for key, sub_dict in self.sub_dicts.items():
183
196
  combined.update(sub_dict)
197
+ # in some cases, the sub_dict might have keys that conflict with the main dict
198
+ if key in combined:
199
+ # The key is already in the combined dict
200
+ problem_keys = problem_keys + [key]
201
+
184
202
  combined.update({key: sub_dict})
185
- return combined
203
+ # I *think* this allows us to do do things like "answer.how_feelling" i.e., that the evaluator can use
204
+ # dot notation to access the subdicts.
205
+ self._combined_dict = combined
206
+ self._problem_keys = problem_keys
207
+
208
+ @property
209
+ def combined_dict(self) -> dict[str, Any]:
210
+ """Return a dictionary that includes all sub_dicts, but also puts the key-value pairs in each sub_dict as a key_value pair in the combined dictionary.
211
+
212
+ >>> r = Result.example()
213
+ >>> r.combined_dict['how_feeling']
214
+ 'OK'
215
+ """
216
+ if self._combined_dict is None or self._problem_keys is None:
217
+ self._compute_combined_dict_and_problem_keys()
218
+ return self._combined_dict
219
+
220
+ @property
221
+ def problem_keys(self):
222
+ """Return a list of keys that are problematic."""
223
+ if self._combined_dict is None or self._problem_keys is None:
224
+ self._compute_combined_dict_and_problem_keys()
225
+ return self._problem_keys
186
226
 
187
227
  def get_value(self, data_type: str, key: str) -> Any:
188
228
  """Return the value for a given data type and key.
@@ -226,7 +266,13 @@ class Result(Base, UserDict):
226
266
  return Result.from_dict(self.to_dict())
227
267
 
228
268
  def __eq__(self, other) -> bool:
229
- """Return True if the Result object is equal to another Result object."""
269
+ """Return True if the Result object is equal to another Result object.
270
+
271
+ >>> r = Result.example()
272
+ >>> r == r
273
+ True
274
+
275
+ """
230
276
  return self.to_dict() == other.to_dict()
231
277
 
232
278
  ###############
edsl/results/Results.py CHANGED
@@ -603,6 +603,36 @@ class Results(UserList, Mixins, Base):
603
603
  values = [d[key] for d in columns]
604
604
  self = self.add_column(key, values)
605
605
  return self
606
+
607
+ @staticmethod
608
+ def _create_evaluator(result: Result, functions_dict: Optional[dict] = None) -> EvalWithCompoundTypes:
609
+ """Create an evaluator for the expression.
610
+
611
+ >>> from unittest.mock import Mock
612
+ >>> result = Mock()
613
+ >>> result.combined_dict = {'how_feeling': 'OK'}
614
+
615
+ >>> evaluator = Results._create_evaluator(result = result, functions_dict = {})
616
+ >>> evaluator.eval("how_feeling == 'OK'")
617
+ True
618
+
619
+ >>> result.combined_dict = {'answer': {'how_feeling': 'OK'}}
620
+ >>> evaluator = Results._create_evaluator(result = result, functions_dict = {})
621
+ >>> evaluator.eval("answer.how_feeling== 'OK'")
622
+ True
623
+
624
+ Note that you need to refer to the answer dictionary in the expression.
625
+
626
+ >>> evaluator.eval("how_feeling== 'OK'")
627
+ Traceback (most recent call last):
628
+ ...
629
+ simpleeval.NameNotDefined: 'how_feeling' is not defined for expression 'how_feeling== 'OK''
630
+ """
631
+ if functions_dict is None:
632
+ functions_dict = {}
633
+ return EvalWithCompoundTypes(
634
+ names=result.combined_dict, functions=functions_dict
635
+ )
606
636
 
607
637
  def mutate(
608
638
  self, new_var_string: str, functions_dict: Optional[dict] = None
@@ -636,13 +666,8 @@ class Results(UserList, Mixins, Base):
636
666
  # create the evaluator
637
667
  functions_dict = functions_dict or {}
638
668
 
639
- def create_evaluator(result: Result) -> EvalWithCompoundTypes:
640
- return EvalWithCompoundTypes(
641
- names=result.combined_dict, functions=functions_dict
642
- )
643
-
644
669
  def new_result(old_result: "Result", var_name: str) -> "Result":
645
- evaluator = create_evaluator(old_result)
670
+ evaluator = self._create_evaluator(old_result, functions_dict)
646
671
  value = evaluator.eval(expression)
647
672
  new_result = old_result.copy()
648
673
  new_result["answer"][var_name] = value
@@ -742,6 +767,9 @@ class Results(UserList, Mixins, Base):
742
767
  >>> results = Results.example()
743
768
  >>> results.select('how_feeling')
744
769
  Dataset([{'answer.how_feeling': ['OK', 'Great', 'Terrible', 'OK']}])
770
+
771
+ >>> results.select('how_feeling', 'model', 'how_feeling')
772
+ Dataset([{'answer.how_feeling': ['OK', 'Great', 'Terrible', 'OK']}, {'model.model': ['gpt-4-1106-preview', 'gpt-4-1106-preview', 'gpt-4-1106-preview', 'gpt-4-1106-preview']}, {'answer.how_feeling': ['OK', 'Great', 'Terrible', 'OK']}])
745
773
  """
746
774
 
747
775
  if len(self) == 0:
@@ -799,10 +827,18 @@ class Results(UserList, Mixins, Base):
799
827
  # Return the index of this key in the list_of_keys
800
828
  return items_in_order.index(single_key)
801
829
 
802
- sorted(new_data, key=sort_by_key_order)
830
+ #sorted(new_data, key=sort_by_key_order)
803
831
  from edsl.results.Dataset import Dataset
832
+ sorted_new_data = []
804
833
 
805
- return Dataset(new_data)
834
+ # WORKS but slow
835
+ for key in items_in_order:
836
+ for d in new_data:
837
+ if key in d:
838
+ sorted_new_data.append(d)
839
+ break
840
+
841
+ return Dataset(sorted_new_data)
806
842
 
807
843
  def sort_by(self, *columns: str, reverse: bool = False) -> Results:
808
844
  import warnings
@@ -917,31 +953,31 @@ class Results(UserList, Mixins, Base):
917
953
  "You must use '==' instead of '=' in the filter expression."
918
954
  )
919
955
 
920
- def create_evaluator(result):
921
- """Create an evaluator for the given result.
922
- The 'combined_dict' is a mapping of all values for that Result object.
923
- """
924
- return EvalWithCompoundTypes(names=result.combined_dict)
925
-
926
956
  try:
927
957
  # iterates through all the results and evaluates the expression
928
- new_data = [
929
- result
930
- for result in self.data
931
- if create_evaluator(result).eval(expression)
932
- ]
958
+ new_data = []
959
+ for result in self.data:
960
+ evaluator = self._create_evaluator(result)
961
+ result.check_expression(expression) # check expression
962
+ if evaluator.eval(expression):
963
+ new_data.append(result)
964
+
965
+ except ValueError as e:
966
+ raise ResultsFilterError(
967
+ f"Error in filter. Exception:{e}",
968
+ f"The expression you provided was: {expression}",
969
+ "See https://docs.expectedparrot.com/en/latest/results.html#filtering-results for more details.",
970
+ )
933
971
  except Exception as e:
934
972
  raise ResultsFilterError(
935
- f"""Error in filter. Exception:{e}.
936
- The expression you provided was: {expression}.
937
- Please make sure that the expression is a valid Python expression that evaluates to a boolean.
938
- For example, 'how_feeling == "Great"' is a valid expression, as is 'how_feeling in ["Great", "Terrible"]'.
939
- However, 'how_feeling = "Great"' is not a valid expression.
940
-
941
- See https://docs.expectedparrot.com/en/latest/results.html#filtering-results for more details.
942
- """
973
+ f"""Error in filter. Exception:{e}.""",
974
+ f"""The expression you provided was: {expression}.""",
975
+ """Please make sure that the expression is a valid Python expression that evaluates to a boolean.""",
976
+ """For example, 'how_feeling == "Great"' is a valid expression, as is 'how_feeling in ["Great", "Terrible"]'., """,
977
+ """However, 'how_feeling = "Great"' is not a valid expression.""",
978
+ """See https://docs.expectedparrot.com/en/latest/results.html#filtering-results for more details."""
943
979
  )
944
-
980
+
945
981
  if len(new_data) == 0:
946
982
  import warnings
947
983
 
edsl/study/Study.py CHANGED
@@ -461,13 +461,13 @@ class Study:
461
461
  else:
462
462
  self.objects[oe.hash] = oe
463
463
 
464
- def push(self, refresh=False) -> None:
464
+ def push(self) -> dict:
465
465
  """Push the objects to coop."""
466
466
 
467
467
  from edsl import Coop
468
468
 
469
469
  coop = Coop()
470
- coop.create(self, description=self.description)
470
+ return coop.create(self, description=self.description)
471
471
 
472
472
  @classmethod
473
473
  def pull(cls, uuid: Optional[Union[str, UUID]] = None, url: Optional[str] = None):
edsl/surveys/Survey.py CHANGED
@@ -105,6 +105,36 @@ class Survey(SurveyExportMixin, SurveyFlowVisualizationMixin, Base):
105
105
  from edsl.utilities.utilities import dict_hash
106
106
 
107
107
  return dict_hash(self._to_dict())
108
+
109
+ def __add__(self, other: Survey) -> Survey:
110
+ """Combine two surveys.
111
+
112
+ :param other: The other survey to combine with this one.
113
+ >>> s1 = Survey.example()
114
+ >>> from edsl import QuestionFreeText
115
+ >>> s2 = Survey([QuestionFreeText(question_text="What is your name?", question_name="yo")])
116
+ >>> s3 = s1 + s2
117
+ Traceback (most recent call last):
118
+ ...
119
+ ValueError: ('Cannot combine two surveys with non-default rules.', "Please use the 'clear_non_default_rules' method to remove non-default rules from the survey.")
120
+ >>> s3 = s1.clear_non_default_rules() + s2
121
+ >>> len(s3.questions)
122
+ 4
123
+
124
+ """
125
+ if len(self.rule_collection.non_default_rules) > 0 or len(other.rule_collection.non_default_rules) > 0:
126
+ raise ValueError(
127
+ "Cannot combine two surveys with non-default rules.",
128
+ "Please use the 'clear_non_default_rules' method to remove non-default rules from the survey."
129
+ )
130
+
131
+ return Survey(questions=self.questions + other.questions)
132
+
133
+ def clear_non_default_rules(self) -> Survey:
134
+ s = Survey()
135
+ for question in self.questions:
136
+ s.add_question(question)
137
+ return s
108
138
 
109
139
  @property
110
140
  def parameters(self):
@@ -1151,4 +1181,5 @@ def main():
1151
1181
  if __name__ == "__main__":
1152
1182
  import doctest
1153
1183
 
1154
- doctest.testmod(optionflags=doctest.ELLIPSIS | doctest.SKIP)
1184
+ #doctest.testmod(optionflags=doctest.ELLIPSIS | doctest.SKIP)
1185
+ doctest.testmod(optionflags=doctest.ELLIPSIS)
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.1
2
2
  Name: edsl
3
- Version: 0.1.30.dev4
3
+ Version: 0.1.30.dev5
4
4
  Summary: Create and analyze LLM-based surveys
5
5
  Home-page: https://www.expectedparrot.com/
6
6
  License: MIT
@@ -1,7 +1,7 @@
1
1
  edsl/Base.py,sha256=ttNxUotSd9LSEJl2w6LdMtT78d0nMQvYDJ0q4JkqBfg,8945
2
2
  edsl/BaseDiff.py,sha256=RoVEh52UJs22yMa7k7jv8se01G62jJNWnBzaZngo-Ug,8260
3
3
  edsl/__init__.py,sha256=E6PkWI_owu8AUc4uJs2XWDVozqSbcRWzsIqf8_Kskho,1631
4
- edsl/__version__.py,sha256=XqB4ByNbNvd6vFzVKO_2YpxRUrkpXNUQUKNU_uIIOng,28
4
+ edsl/__version__.py,sha256=36CHcnfZnyhdfT-d4JZkCR1tvVI-WobrzSNjUWpJx80,28
5
5
  edsl/agents/Agent.py,sha256=qNJsQkN6HuTKqJrQbuUEgRX3Wo7Dwukle0oNWPi0UIE,27191
6
6
  edsl/agents/AgentList.py,sha256=_MsdeOEgaANAceLIXwuLC22mwlBn0ruGX4GEqz8_SSY,9467
7
7
  edsl/agents/Invigilator.py,sha256=WNgGT9VRKpHbk__h-vd4LASgjnlJnzepf-2FxQ3K98I,10798
@@ -70,14 +70,14 @@ edsl/jobs/Jobs.py,sha256=JrJcpTjR3wejMVvuFXZr7PHqCyj6zaRwqNu9eatvy9Y,29339
70
70
  edsl/jobs/__init__.py,sha256=aKuAyd_GoalGj-k7djOoVwEbFUE2XLPlikXaA1_8yAg,32
71
71
  edsl/jobs/buckets/BucketCollection.py,sha256=LA8DBVwMdeTFCbSDI0S2cDzfi_Qo6kRizwrG64tE8S4,1844
72
72
  edsl/jobs/buckets/ModelBuckets.py,sha256=hxw_tzc0V42CiB7mh5jIxlgwDVJ-zFZhlLtKrHEg8ho,2419
73
- edsl/jobs/buckets/TokenBucket.py,sha256=jeWOzJBr70FJKpa_pOhLFwTBz6RB39tg9-ki5x4PmtM,5774
74
- edsl/jobs/interviews/Interview.py,sha256=mnup2JIDlFrwE23pLBXzPDZpX9tJFp5hw2DqBoFoN0U,9750
73
+ edsl/jobs/buckets/TokenBucket.py,sha256=d9W464eYWoYeN1PmS6Nv2Agq3hSuU4S7cN7NnUYy7aQ,6023
74
+ edsl/jobs/interviews/Interview.py,sha256=FF4jgqHVoa9cXDkgU2JkJfr5XJsozswBkHUoTxnj3TA,11434
75
75
  edsl/jobs/interviews/InterviewStatistic.py,sha256=hY5d2EkIJ96NilPpZAvZZzZoxLXM7ss3xx5MIcKtTPs,1856
76
76
  edsl/jobs/interviews/InterviewStatisticsCollection.py,sha256=_ZZ0fnZBQiIywP9Q_wWjpWhlfcPe2cn32GKut10t5RI,788
77
77
  edsl/jobs/interviews/InterviewStatusDictionary.py,sha256=MSyys4hOWe1d8gfsUvAPbcKrs8YiPnz8jpufBSJL7SU,2485
78
78
  edsl/jobs/interviews/InterviewStatusLog.py,sha256=6u0F8gf5tha39VQL-IK_QPkCsQAYVOx_IesX7TDDX_A,3252
79
79
  edsl/jobs/interviews/InterviewStatusMixin.py,sha256=VV0Pel-crUsLoGpTifeIIkXsLGj0bfuO--UtpRnH-dU,1251
80
- edsl/jobs/interviews/InterviewTaskBuildingMixin.py,sha256=6F-sOyccoi5RRzE_NBMBIv0oHROeMYMSle7jJMUtSD8,11658
80
+ edsl/jobs/interviews/InterviewTaskBuildingMixin.py,sha256=sqbimDTBD5W6vOAJ_WUpyGTtOzOqmVbutKEFZ0C-bnk,11149
81
81
  edsl/jobs/interviews/ReportErrors.py,sha256=RSzDU2rWwtjfztj7sqaMab0quCiY-X2bG3AEOxhTim8,1745
82
82
  edsl/jobs/interviews/interview_exception_tracking.py,sha256=tIcX92udnkE5fcM5_WXjRF9xgTq2P0uaDXxZf3NQGG0,3271
83
83
  edsl/jobs/interviews/interview_status_enum.py,sha256=KJ-1yLAHdX-p8TiFnM0M3v1tnBwkq4aMCuBX6-ytrI8,229
@@ -85,7 +85,7 @@ edsl/jobs/interviews/retry_management.py,sha256=9Efn4B3aV45vbocnF6J5WQt88i2FgFjo
85
85
  edsl/jobs/runners/JobsRunnerAsyncio.py,sha256=L48NdyDiKrgUMQVwvd1wr2uOzT99oYfwXnlStDLHU9I,11934
86
86
  edsl/jobs/runners/JobsRunnerStatusData.py,sha256=-mxcmX0a38GGO9DQ-ItTmj6mvCUk5uC-UudT77lXTG4,10327
87
87
  edsl/jobs/runners/JobsRunnerStatusMixin.py,sha256=yxnXuOovwHgfDokNuluH_qulBcM0gCcbpCQibqVKXFI,3137
88
- edsl/jobs/tasks/QuestionTaskCreator.py,sha256=myiRwTJ48HERazdZRdqb7lRGlSSGYcXEVpTHcKho2I0,10483
88
+ edsl/jobs/tasks/QuestionTaskCreator.py,sha256=CCEx8k6LK12uXOy_XnvXZJPLvgtN3ig552lgSJEXlsI,10411
89
89
  edsl/jobs/tasks/TaskCreators.py,sha256=DbCt5BzJ0CsMSquqLyLdk8el031Wst7vCszVW5EltX8,2418
90
90
  edsl/jobs/tasks/TaskHistory.py,sha256=ZVellGW1cvwqdHt98dYPl0FYhk3VqRGHAZETDOxEkqg,10939
91
91
  edsl/jobs/tasks/TaskStatusLog.py,sha256=bqH36a32F12fjX-M-4lNOhHaK2-WLFzKE-r0PxZPRjI,546
@@ -93,7 +93,7 @@ edsl/jobs/tasks/task_management.py,sha256=KMToZuXzMlnHRHUF_VHL0-lHMTGhklf2GHVuwE
93
93
  edsl/jobs/tasks/task_status_enum.py,sha256=DOyrz61YlIS8R1W7izJNphcLrJ7I_ReUlfdRmk23h0Q,5333
94
94
  edsl/jobs/tokens/InterviewTokenUsage.py,sha256=u_6-IHpGFwZ6qMEXr24-jyLVUSSp4dSs_4iAZsBv7O4,1100
95
95
  edsl/jobs/tokens/TokenUsage.py,sha256=odj2-wDNEbHl9noyFAQ0DSKV0D9cv3aDOpmXufKZ8O4,1323
96
- edsl/language_models/LanguageModel.py,sha256=r2wLIaF2vTlCNjEhDrDRKOZozmWvw6KkCEANLnZXOPE,18916
96
+ edsl/language_models/LanguageModel.py,sha256=9orpce5VxYRlB3UdN-RJxjpnzy4fsTfYah_l5-Rf3uA,19043
97
97
  edsl/language_models/ModelList.py,sha256=DLeAq7o8uniZkP_-z8vJDMwf4JXksqLoPqOeeLI3QBE,2687
98
98
  edsl/language_models/RegisterLanguageModelsMeta.py,sha256=2bvWrVau2BRo-Bb1aO-QATH8xxuW_tF7NmqBMGDOfSg,8191
99
99
  edsl/language_models/__init__.py,sha256=bvY7Gy6VkX1gSbNkRbGPS-M1kUnb0EohL0FSagaEaTs,109
@@ -124,9 +124,9 @@ edsl/questions/QuestionBudget.py,sha256=K8cc1YOfoLWRoZBAkWO7WsMDZne0a5oAJMSxv2Jz
124
124
  edsl/questions/QuestionCheckBox.py,sha256=YHS-LEvR_1CWyg4usOlWfj9Gb_cCQlfIWIWhYRWn7Wo,6129
125
125
  edsl/questions/QuestionExtract.py,sha256=fjnsNLS2fNW6dfFuRyc2EgKEHx8ujjONmg2nSRynje4,3988
126
126
  edsl/questions/QuestionFreeText.py,sha256=ASj1s0EQYcZerJp476fscu_xEME8mKzVK3sPL6egiuU,3289
127
- edsl/questions/QuestionFunctional.py,sha256=s49mQBVGc7B4-3sX49_a_mgVZsR9bdPra2VYe4m8XoY,3961
127
+ edsl/questions/QuestionFunctional.py,sha256=rThYy1x_5_6blp7okSUlbHWXNa_4nd477TdkxxWW204,4984
128
128
  edsl/questions/QuestionList.py,sha256=Wf7xDXJsQBsAD_yOrzZ_GstKGT7aZjimTkU6qyqOhhM,4051
129
- edsl/questions/QuestionMultipleChoice.py,sha256=Oin_qOJz5wfZdcFopI6dyvlUn2LGocAvbNwSTmxWcQA,4491
129
+ edsl/questions/QuestionMultipleChoice.py,sha256=Rvmi4YgOU9SquH4WIv4RiccTQWpqMTvcIQYmiyIu10o,6603
130
130
  edsl/questions/QuestionNumerical.py,sha256=QArFDhP9Adb4l6y-udnUqPNk2Q6vT4pGsY13TkHsLGs,3631
131
131
  edsl/questions/QuestionRank.py,sha256=NEAwDt1at0zEM2S-E7jXMjglnlB0WhUlxSVJkzH4xSs,5876
132
132
  edsl/questions/RegisterQuestionsMeta.py,sha256=unON0CKpW-mveyhg9V3_BF_GYYwytMYP9h2ZZPetVNM,1994
@@ -138,13 +138,13 @@ edsl/questions/derived/QuestionLinearScale.py,sha256=7tybIaMaDTMkTFC6CymusW69so-
138
138
  edsl/questions/derived/QuestionTopK.py,sha256=TouXKZt_h6Jd-2rAjkEOCJOzzei4Wa-3hjYq9CLxWws,2744
139
139
  edsl/questions/derived/QuestionYesNo.py,sha256=KtMGuAPYWv-7-9WGa0fIS2UBxf6KKjFpuTk_h8FPZbg,2076
140
140
  edsl/questions/derived/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
141
- edsl/questions/descriptors.py,sha256=kqjV3pRbyq0ch-fSg4vkTz1ZH3ckud5Pd1HzR-R7pdE,13310
141
+ edsl/questions/descriptors.py,sha256=kCy7poJc_eo2ELfvI1AhDm6a-ge-PeLEeBoaViSVjYM,14812
142
142
  edsl/questions/question_registry.py,sha256=ZD7Y_towDdlnnmLq12vVewgQ3fEk9Ur0tCTWK8-WqeQ,5241
143
143
  edsl/questions/settings.py,sha256=er_z0ZW_dgmC5CHLWkaqBJiuWgAYzIund85M5YZFQAI,291
144
144
  edsl/results/Dataset.py,sha256=DZgb3vIj69ON7APQ6DimjBwAS1xZvZiXOg68CjW9E3I,8662
145
- edsl/results/DatasetExportMixin.py,sha256=QZEuSxJPisgJ5GvFkKbuqayCSgJzblclea1CFwsBZ2w,17959
146
- edsl/results/Result.py,sha256=X1qAACs9E6XhRmlIsb3CguDs6_laKkVyxE0JJpOJDZQ,12729
147
- edsl/results/Results.py,sha256=BgJHfOKDhvxMR8_ej6v-Lpjm0XkRL-UTTQBzFzvKuZc,36393
145
+ edsl/results/DatasetExportMixin.py,sha256=1n67zXiDPifahEEy34AgBimRnv8taevFfNLnUxfYK5c,21361
146
+ edsl/results/Result.py,sha256=TrsYmNFydMqk5CqsZ3dbNC4Zdzk_jPvCCldN3Oym780,14436
147
+ edsl/results/Results.py,sha256=wgHxPvXJEzAF7HJuy8z_IL8f_5Q9d4z1Mk6wiUfOqA0,38155
148
148
  edsl/results/ResultsDBMixin.py,sha256=Vs95zbSB4G7ENY4lU7OBdekg9evwTrtPH0IIL2NAFTk,7936
149
149
  edsl/results/ResultsExportMixin.py,sha256=XizBsPNxziyffirMA4kS7UHpYM1WIE4s1K-B7TqTfDw,1266
150
150
  edsl/results/ResultsFetchMixin.py,sha256=VEa0TKDcXbnTinSKs9YaE4WjOSLmlp9Po1_9kklFvSo,848
@@ -163,14 +163,14 @@ edsl/shared.py,sha256=lgLa-mCk2flIhxXarXLtfXZjXG_6XHhC2A3O8yRTjXc,20
163
163
  edsl/study/ObjectEntry.py,sha256=e3xRPH8wCN8Pum5HZsQRYgnSoauSvjXunIEH79wu5A8,5788
164
164
  edsl/study/ProofOfWork.py,sha256=FaqYtLgeiTEQXWKukPgPUTWMcIN5t1FR7h7Re8QEtgc,3433
165
165
  edsl/study/SnapShot.py,sha256=-5zoP4uTvnqtu3zRNMD-fKsNAVYX9psoKRADfotsF9E,2439
166
- edsl/study/Study.py,sha256=5yv5jT1uFxQD0oi_eODcKv_K6qTDbyAdMrAqBNNwOtE,16947
166
+ edsl/study/Study.py,sha256=zLy2mMvsX_QgZ6D4dcgYJoEacyajkRnARYjIFvyCO1o,16939
167
167
  edsl/study/__init__.py,sha256=YAvPLTPG3hK_eN9Ar3d1_d-E3laXpSya879A25-JAxU,170
168
168
  edsl/surveys/DAG.py,sha256=ozQuHo9ZQ8Eet5nDXtp7rFpiSocvvfxIHtyTnztvodg,2380
169
169
  edsl/surveys/Memory.py,sha256=-ikOtkkQldGB_BkPCW3o7AYwV5B_pIwlREw7aVCSHaQ,1113
170
170
  edsl/surveys/MemoryPlan.py,sha256=BeLuqS5Q8G2jSluHYFCAxVmj7cNPK-rDQ3mUsuDjikQ,7979
171
171
  edsl/surveys/Rule.py,sha256=ddZyZSObs4gsKtFSmcXkPigXDX8rrh1NFvAplP02TcA,11092
172
172
  edsl/surveys/RuleCollection.py,sha256=sN7aYDQJG3HmE-WxohgpctcQbHewjwE6NAqEVTxvFP8,13359
173
- edsl/surveys/Survey.py,sha256=oir5fnZD2chtUH1qsVSmTdVdXLRWcB0wkkby_Lcguu8,47227
173
+ edsl/surveys/Survey.py,sha256=uj8i76UOuSH0kXHncipq_4hvLIrt0QmtnodlI1iP6EA,48553
174
174
  edsl/surveys/SurveyCSS.py,sha256=NjJezs2sTlgFprN6IukjGKwNYmNdXnLjzV2w5K4z4RI,8415
175
175
  edsl/surveys/SurveyExportMixin.py,sha256=vj9bZReHx0wBK9sVuS0alzPIUDdg6AFFMd7bl1RKWKI,6555
176
176
  edsl/surveys/SurveyFlowVisualizationMixin.py,sha256=Z-YqeedMqWOtCFy003YJ9aneJ1n4bn70lDoILwLtTc0,3966
@@ -197,7 +197,7 @@ edsl/utilities/interface.py,sha256=AaKpWiwWBwP2swNXmnFlIf3ZFsjfsR5bjXQAW47tD-8,1
197
197
  edsl/utilities/repair_functions.py,sha256=tftmklAqam6LOQQu_-9U44N-llycffhW8LfO63vBmNw,929
198
198
  edsl/utilities/restricted_python.py,sha256=5-_zUhrNbos7pLhDl9nr8d24auRlquR6w-vKkmNjPiA,2060
199
199
  edsl/utilities/utilities.py,sha256=oU5Gg6szTGqsJ2yBOS0aC3XooezLE8By3SdrQLLpqvA,10107
200
- edsl-0.1.30.dev4.dist-info/LICENSE,sha256=_qszBDs8KHShVYcYzdMz3HNMtH-fKN_p5zjoVAVumFc,1111
201
- edsl-0.1.30.dev4.dist-info/METADATA,sha256=StzD_Wua0pqicToraGkUrdMlwlCtVqmxHVUQa1VhhvM,4103
202
- edsl-0.1.30.dev4.dist-info/WHEEL,sha256=FMvqSimYX_P7y0a7UY-_Mc83r5zkBZsCYPm7Lr0Bsq4,88
203
- edsl-0.1.30.dev4.dist-info/RECORD,,
200
+ edsl-0.1.30.dev5.dist-info/LICENSE,sha256=_qszBDs8KHShVYcYzdMz3HNMtH-fKN_p5zjoVAVumFc,1111
201
+ edsl-0.1.30.dev5.dist-info/METADATA,sha256=ZMHio4ApnpJuGkuSFhbeWi9r4mr6dTb4HNFUuZUTLfc,4103
202
+ edsl-0.1.30.dev5.dist-info/WHEEL,sha256=sP946D7jFCHeNz5Iq4fL4Lu-PrWrFsgfLXbbkciIZwg,88
203
+ edsl-0.1.30.dev5.dist-info/RECORD,,
@@ -1,4 +1,4 @@
1
1
  Wheel-Version: 1.0
2
- Generator: poetry-core 1.8.1
2
+ Generator: poetry-core 1.9.0
3
3
  Root-Is-Purelib: true
4
4
  Tag: py3-none-any