edsl 0.1.30__py3-none-any.whl → 0.1.30.dev1__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
edsl/__version__.py CHANGED
@@ -1 +1 @@
1
- __version__ = "0.1.30"
1
+ __version__ = "0.1.30.dev1"
edsl/agents/Agent.py CHANGED
@@ -4,8 +4,7 @@ from __future__ import annotations
4
4
  import copy
5
5
  import inspect
6
6
  import types
7
- from typing import Callable, Optional, Union
8
- from uuid import uuid4
7
+ from typing import Any, Callable, Optional, Union, Dict, Sequence
9
8
  from edsl.Base import Base
10
9
 
11
10
  from edsl.exceptions.agents import (
@@ -689,14 +688,13 @@ class Agent(Base):
689
688
  return table
690
689
 
691
690
  @classmethod
692
- def example(cls, randomize: bool = False) -> Agent:
693
- """
694
- Returns an example Agent instance.
691
+ def example(cls) -> Agent:
692
+ """Return an example agent.
695
693
 
696
- :param randomize: If True, adds a random string to the value of an example key.
694
+ >>> Agent.example()
695
+ Agent(traits = {'age': 22, 'hair': 'brown', 'height': 5.5})
697
696
  """
698
- addition = "" if not randomize else str(uuid4())
699
- return cls(traits={"age": 22, "hair": f"brown{addition}", "height": 5.5})
697
+ return cls(traits={"age": 22, "hair": "brown", "height": 5.5})
700
698
 
701
699
  def code(self) -> str:
702
700
  """Return the code for the agent.
edsl/agents/AgentList.py CHANGED
@@ -11,15 +11,23 @@ Example usage:
11
11
  """
12
12
 
13
13
  from __future__ import annotations
14
- import csv
15
- import json
16
14
  from collections import UserList
17
- from typing import Any, List, Optional, Union
15
+ from typing import Optional, Union, Sequence, List, Any
18
16
  from rich import print_json
19
17
  from rich.table import Table
18
+ import json
19
+ import csv
20
+
21
+
20
22
  from simpleeval import EvalWithCompoundTypes
23
+
21
24
  from edsl.Base import Base
22
- from edsl.utilities.decorators import add_edsl_version, remove_edsl_version
25
+
26
+ # from edsl.agents import Agent
27
+ from edsl.utilities.decorators import (
28
+ add_edsl_version,
29
+ remove_edsl_version,
30
+ )
23
31
 
24
32
 
25
33
  class AgentList(UserList, Base):
@@ -231,15 +239,17 @@ class AgentList(UserList, Base):
231
239
  return cls(agents)
232
240
 
233
241
  @classmethod
234
- def example(cls, randomize: bool = False) -> AgentList:
235
- """
236
- Returns an example AgentList instance.
242
+ def example(cls) -> "AgentList":
243
+ """Return an example AgentList.
244
+
245
+ >>> al = AgentList.example()
246
+ >>> len(al)
247
+ 2
237
248
 
238
- :param randomize: If True, uses Agent's randomize method.
239
249
  """
240
250
  from edsl.agents.Agent import Agent
241
251
 
242
- return cls([Agent.example(randomize), Agent.example(randomize)])
252
+ return cls([Agent.example(), Agent.example()])
243
253
 
244
254
  @classmethod
245
255
  def from_list(self, trait_name: str, values: List[Any]):
@@ -74,14 +74,15 @@ class InvigilatorAI(PromptConstructorMixin, InvigilatorBase):
74
74
 
75
75
  This cleans up the raw response to make it suitable to pass to AgentResponseDict.
76
76
  """
77
+ # not actually used, but this removes the temptation to delete agent from the signature
77
78
  _ = agent
78
79
  try:
79
80
  response = question._validate_answer(raw_response)
80
81
  except Exception as e:
81
- """If the response is invalid, remove it from the cache and raise the exception."""
82
82
  self._remove_from_cache(raw_response)
83
83
  raise e
84
84
 
85
+ # breakpoint()
85
86
  question_dict = self.survey.question_names_to_questions()
86
87
  for other_question, answer in self.current_answers.items():
87
88
  if other_question in question_dict:
@@ -94,10 +95,12 @@ class InvigilatorAI(PromptConstructorMixin, InvigilatorBase):
94
95
  question_dict[new_question].comment = answer
95
96
 
96
97
  combined_dict = {**question_dict, **scenario}
98
+ # print("combined_dict: ", combined_dict)
99
+ # print("response: ", response)
100
+ # breakpoint()
97
101
  answer = question._translate_answer_code_to_answer(
98
102
  response["answer"], combined_dict
99
103
  )
100
- # breakpoint()
101
104
  data = {
102
105
  "answer": answer,
103
106
  "comment": response.get(
@@ -108,8 +111,6 @@ class InvigilatorAI(PromptConstructorMixin, InvigilatorBase):
108
111
  "cached_response": raw_response.get("cached_response", None),
109
112
  "usage": raw_response.get("usage", {}),
110
113
  "raw_model_response": raw_model_response,
111
- "cache_used": raw_response.get("cache_used", False),
112
- "cache_key": raw_response.get("cache_key", None),
113
114
  }
114
115
  return AgentResponseDict(**data)
115
116
 
@@ -30,7 +30,7 @@ c1 = Conversation(agent_list=AgentList([a1, a3, a2]), max_turns=5, verbose=True)
30
30
  c2 = Conversation(agent_list=AgentList([a1, a2]), max_turns=5, verbose=True)
31
31
 
32
32
  c = Cache.load("car_talk.json.gz")
33
- # breakpoint()
33
+ breakpoint()
34
34
  combo = ConversationList([c1, c2], cache=c)
35
35
  combo.run()
36
36
  results = combo.to_results()
edsl/data/Cache.py CHANGED
@@ -7,10 +7,17 @@ import json
7
7
  import os
8
8
  import warnings
9
9
  from typing import Optional, Union
10
- from edsl.Base import Base
10
+ import time
11
+ from edsl.config import CONFIG
11
12
  from edsl.data.CacheEntry import CacheEntry
13
+
14
+ # from edsl.data.SQLiteDict import SQLiteDict
15
+ from edsl.Base import Base
12
16
  from edsl.utilities.utilities import dict_hash
13
- from edsl.utilities.decorators import add_edsl_version, remove_edsl_version
17
+ from edsl.utilities.decorators import (
18
+ add_edsl_version,
19
+ remove_edsl_version,
20
+ )
14
21
 
15
22
 
16
23
  class Cache(Base):
@@ -34,7 +41,6 @@ class Cache(Base):
34
41
  data: Optional[Union["SQLiteDict", dict]] = None,
35
42
  immediate_write: bool = True,
36
43
  method=None,
37
- verbose=False,
38
44
  ):
39
45
  """
40
46
  Create two dictionaries to store the cache data.
@@ -53,7 +59,6 @@ class Cache(Base):
53
59
  self.new_entries = {}
54
60
  self.new_entries_to_write_later = {}
55
61
  self.coop = None
56
- self.verbose = verbose
57
62
 
58
63
  self.filename = filename
59
64
  if filename and data:
@@ -117,7 +122,7 @@ class Cache(Base):
117
122
  system_prompt: str,
118
123
  user_prompt: str,
119
124
  iteration: int,
120
- ) -> tuple(Union[None, str], str):
125
+ ) -> Union[None, str]:
121
126
  """
122
127
  Fetch a value (LLM output) from the cache.
123
128
 
@@ -130,7 +135,7 @@ class Cache(Base):
130
135
  Return None if the response is not found.
131
136
 
132
137
  >>> c = Cache()
133
- >>> c.fetch(model="gpt-3", parameters="default", system_prompt="Hello", user_prompt="Hi", iteration=1)[0] is None
138
+ >>> c.fetch(model="gpt-3", parameters="default", system_prompt="Hello", user_prompt="Hi", iteration=1) is None
134
139
  True
135
140
 
136
141
 
@@ -146,13 +151,8 @@ class Cache(Base):
146
151
  )
147
152
  entry = self.data.get(key, None)
148
153
  if entry is not None:
149
- if self.verbose:
150
- print(f"Cache hit for key: {key}")
151
154
  self.fetched_data[key] = entry
152
- else:
153
- if self.verbose:
154
- print(f"Cache miss for key: {key}")
155
- return None if entry is None else entry.output, key
155
+ return None if entry is None else entry.output
156
156
 
157
157
  def store(
158
158
  self,
@@ -354,9 +354,6 @@ class Cache(Base):
354
354
  for key, entry in self.new_entries_to_write_later.items():
355
355
  self.data[key] = entry
356
356
 
357
- if self.filename:
358
- self.write(self.filename)
359
-
360
357
  ####################
361
358
  # DUNDER / USEFUL
362
359
  ####################
@@ -473,18 +470,12 @@ class Cache(Base):
473
470
  webbrowser.open("file://" + filepath)
474
471
 
475
472
  @classmethod
476
- def example(cls, randomize: bool = False) -> Cache:
473
+ def example(cls) -> Cache:
477
474
  """
478
- Returns an example Cache instance.
479
-
480
- :param randomize: If True, uses CacheEntry's randomize method.
475
+ Return an example Cache.
476
+ The example Cache has one entry.
481
477
  """
482
- return cls(
483
- data={
484
- CacheEntry.example(randomize).key: CacheEntry.example(),
485
- CacheEntry.example(randomize).key: CacheEntry.example(),
486
- }
487
- )
478
+ return cls(data={CacheEntry.example().key: CacheEntry.example()})
488
479
 
489
480
 
490
481
  if __name__ == "__main__":
edsl/data/CacheEntry.py CHANGED
@@ -2,8 +2,11 @@ from __future__ import annotations
2
2
  import json
3
3
  import datetime
4
4
  import hashlib
5
+ import random
5
6
  from typing import Optional
6
- from uuid import uuid4
7
+
8
+
9
+ # TODO: Timestamp should probably be float?
7
10
 
8
11
 
9
12
  class CacheEntry:
@@ -148,12 +151,10 @@ class CacheEntry:
148
151
  @classmethod
149
152
  def example(cls, randomize: bool = False) -> CacheEntry:
150
153
  """
151
- Returns an example CacheEntry instance.
152
-
153
- :param randomize: If True, adds a random string to the system prompt.
154
+ Returns a CacheEntry example.
154
155
  """
155
- # if random, create a uuid
156
- addition = "" if not randomize else str(uuid4())
156
+ # if random, create a random number for 0-100
157
+ addition = "" if not randomize else str(random.randint(0, 1000))
157
158
  return CacheEntry(
158
159
  model="gpt-3.5-turbo",
159
160
  parameters={"temperature": 0.5},
@@ -17,8 +17,6 @@ class AgentResponseDict(UserDict):
17
17
  cached_response=None,
18
18
  raw_model_response=None,
19
19
  simple_model_raw_response=None,
20
- cache_used=None,
21
- cache_key=None,
22
20
  ):
23
21
  """Initialize the AgentResponseDict object."""
24
22
  usage = usage or {"prompt_tokens": 0, "completion_tokens": 0}
@@ -32,7 +30,5 @@ class AgentResponseDict(UserDict):
32
30
  "cached_response": cached_response,
33
31
  "raw_model_response": raw_model_response,
34
32
  "simple_model_raw_response": simple_model_raw_response,
35
- "cache_used": cache_used,
36
- "cache_key": cache_key,
37
33
  }
38
34
  )
edsl/jobs/Jobs.py CHANGED
@@ -461,13 +461,6 @@ class Jobs(Base):
461
461
  remote_inference = False
462
462
 
463
463
  if remote_inference:
464
- from edsl.agents.Agent import Agent
465
- from edsl.language_models.registry import Model
466
- from edsl.results.Result import Result
467
- from edsl.results.Results import Results
468
- from edsl.scenarios.Scenario import Scenario
469
- from edsl.surveys.Survey import Survey
470
-
471
464
  self._output("Remote inference activated. Sending job to server...")
472
465
  if remote_cache:
473
466
  self._output(
@@ -687,9 +680,7 @@ class Jobs(Base):
687
680
  # Example methods
688
681
  #######################
689
682
  @classmethod
690
- def example(
691
- cls, throw_exception_probability: int = 0, randomize: bool = False
692
- ) -> Jobs:
683
+ def example(cls, throw_exception_probability=0) -> Jobs:
693
684
  """Return an example Jobs instance.
694
685
 
695
686
  :param throw_exception_probability: the probability that an exception will be thrown when answering a question. This is useful for testing error handling.
@@ -699,13 +690,10 @@ class Jobs(Base):
699
690
 
700
691
  """
701
692
  import random
702
- from uuid import uuid4
703
693
  from edsl.questions import QuestionMultipleChoice
704
694
  from edsl.agents.Agent import Agent
705
695
  from edsl.scenarios.Scenario import Scenario
706
696
 
707
- addition = "" if not randomize else str(uuid4())
708
-
709
697
  # (status, question, period)
710
698
  agent_answers = {
711
699
  ("Joyful", "how_feeling", "morning"): "OK",
@@ -748,10 +736,7 @@ class Jobs(Base):
748
736
  base_survey = Survey(questions=[q1, q2])
749
737
 
750
738
  scenario_list = ScenarioList(
751
- [
752
- Scenario({"period": f"morning{addition}"}),
753
- Scenario({"period": "afternoon"}),
754
- ]
739
+ [Scenario({"period": "morning"}), Scenario({"period": "afternoon"})]
755
740
  )
756
741
  job = base_survey.by(scenario_list).by(joy_agent, sad_agent)
757
742
 
@@ -25,16 +25,6 @@ class ModelBuckets:
25
25
  tokens_bucket=self.tokens_bucket + other.tokens_bucket,
26
26
  )
27
27
 
28
- def turbo_mode_on(self):
29
- """Set the refill rate to infinity for both buckets."""
30
- self.requests_bucket.turbo_mode_on()
31
- self.tokens_bucket.turbo_mode_on()
32
-
33
- def turbo_mode_off(self):
34
- """Restore the refill rate to its original value for both buckets."""
35
- self.requests_bucket.turbo_mode_off()
36
- self.tokens_bucket.turbo_mode_off()
37
-
38
28
  @classmethod
39
29
  def infinity_bucket(cls, model_name: str = "not_specified") -> "ModelBuckets":
40
30
  """Create a bucket with infinite capacity and refill rate."""
@@ -17,29 +17,11 @@ class TokenBucket:
17
17
  self.bucket_name = bucket_name
18
18
  self.bucket_type = bucket_type
19
19
  self.capacity = capacity # Maximum number of tokens
20
- self._old_capacity = capacity
21
20
  self.tokens = capacity # Current number of available tokens
22
21
  self.refill_rate = refill_rate # Rate at which tokens are refilled
23
- self._old_refill_rate = refill_rate
24
22
  self.last_refill = time.monotonic() # Last refill time
23
+
25
24
  self.log: List[Any] = []
26
- self.turbo_mode = False
27
-
28
- def turbo_mode_on(self):
29
- """Set the refill rate to infinity."""
30
- if self.turbo_mode:
31
- pass
32
- else:
33
- # pass
34
- self.turbo_mode = True
35
- self.capacity = float("inf")
36
- self.refill_rate = float("inf")
37
-
38
- def turbo_mode_off(self):
39
- """Restore the refill rate to its original value."""
40
- self.turbo_mode = False
41
- self.capacity = self._old_capacity
42
- self.refill_rate = self._old_refill_rate
43
25
 
44
26
  def __add__(self, other) -> "TokenBucket":
45
27
  """Combine two token buckets.
@@ -73,17 +55,7 @@ class TokenBucket:
73
55
  self.log.append((time.monotonic(), self.tokens))
74
56
 
75
57
  def refill(self) -> None:
76
- """Refill the bucket with new tokens based on elapsed time.
77
-
78
-
79
-
80
- >>> bucket = TokenBucket(bucket_name="test", bucket_type="test", capacity=10, refill_rate=1)
81
- >>> bucket.tokens = 0
82
- >>> bucket.refill()
83
- >>> bucket.tokens > 0
84
- True
85
-
86
- """
58
+ """Refill the bucket with new tokens based on elapsed time."""
87
59
  now = time.monotonic()
88
60
  elapsed = now - self.last_refill
89
61
  refill_amount = elapsed * self.refill_rate
@@ -126,7 +98,7 @@ class TokenBucket:
126
98
  raise ValueError(msg)
127
99
  while self.tokens < amount:
128
100
  self.refill()
129
- await asyncio.sleep(0.01) # Sleep briefly to prevent busy waiting
101
+ await asyncio.sleep(0.1) # Sleep briefly to prevent busy waiting
130
102
  self.tokens -= amount
131
103
 
132
104
  now = time.monotonic()
@@ -20,12 +20,6 @@ from edsl.jobs.interviews.retry_management import retry_strategy
20
20
  from edsl.jobs.interviews.InterviewTaskBuildingMixin import InterviewTaskBuildingMixin
21
21
  from edsl.jobs.interviews.InterviewStatusMixin import InterviewStatusMixin
22
22
 
23
- import asyncio
24
-
25
-
26
- def run_async(coro):
27
- return asyncio.run(coro)
28
-
29
23
 
30
24
  class Interview(InterviewStatusMixin, InterviewTaskBuildingMixin):
31
25
  """
@@ -36,14 +30,14 @@ class Interview(InterviewStatusMixin, InterviewTaskBuildingMixin):
36
30
 
37
31
  def __init__(
38
32
  self,
39
- agent: "Agent",
40
- survey: "Survey",
41
- scenario: "Scenario",
42
- model: Type["LanguageModel"],
43
- debug: Optional[bool] = False,
33
+ agent: Agent,
34
+ survey: Survey,
35
+ scenario: Scenario,
36
+ model: Type[LanguageModel],
37
+ debug: bool = False,
44
38
  iteration: int = 0,
45
- cache: Optional["Cache"] = None,
46
- sidecar_model: Optional["LanguageModel"] = None,
39
+ cache: "Cache" = None,
40
+ sidecar_model: LanguageModel = None,
47
41
  ):
48
42
  """Initialize the Interview instance.
49
43
 
@@ -51,24 +45,6 @@ class Interview(InterviewStatusMixin, InterviewTaskBuildingMixin):
51
45
  :param survey: the survey being administered to the agent.
52
46
  :param scenario: the scenario that populates the survey questions.
53
47
  :param model: the language model used to answer the questions.
54
- :param debug: if True, run without calls to the language model.
55
- :param iteration: the iteration number of the interview.
56
- :param cache: the cache used to store the answers.
57
- :param sidecar_model: a sidecar model used to answer questions.
58
-
59
- >>> i = Interview.example()
60
- >>> i.task_creators
61
- {}
62
-
63
- >>> i.exceptions
64
- {}
65
-
66
- >>> _ = asyncio.run(i.async_conduct_interview())
67
- >>> i.task_status_logs['q0']
68
- [{'log_time': ..., 'value': <TaskStatus.NOT_STARTED: 1>}, {'log_time': ..., 'value': <TaskStatus.WAITING_FOR_DEPENDENCIES: 2>}, {'log_time': ..., 'value': <TaskStatus.API_CALL_IN_PROGRESS: 7>}, {'log_time': ..., 'value': <TaskStatus.SUCCESS: 8>}]
69
-
70
- >>> i.to_index
71
- {'q0': 0, 'q1': 1, 'q2': 2}
72
48
 
73
49
  """
74
50
  self.agent = agent
@@ -88,7 +64,7 @@ class Interview(InterviewStatusMixin, InterviewTaskBuildingMixin):
88
64
  self.exceptions = InterviewExceptionCollection()
89
65
  self._task_status_log_dict = InterviewStatusLog()
90
66
 
91
- # dictionary mapping question names to their index in the survey.
67
+ # dictionary mapping question names to their index in the survey."""
92
68
  self.to_index = {
93
69
  question_name: index
94
70
  for index, question_name in enumerate(self.survey.question_names)
@@ -100,16 +76,14 @@ class Interview(InterviewStatusMixin, InterviewTaskBuildingMixin):
100
76
  model_buckets: ModelBuckets = None,
101
77
  debug: bool = False,
102
78
  stop_on_exception: bool = False,
103
- sidecar_model: Optional["LanguageModel"] = None,
79
+ sidecar_model: Optional[LanguageModel] = None,
104
80
  ) -> tuple["Answers", List[dict[str, Any]]]:
105
81
  """
106
82
  Conduct an Interview asynchronously.
107
- It returns a tuple with the answers and a list of valid results.
108
83
 
109
84
  :param model_buckets: a dictionary of token buckets for the model.
110
85
  :param debug: run without calls to LLM.
111
86
  :param stop_on_exception: if True, stops the interview if an exception is raised.
112
- :param sidecar_model: a sidecar model used to answer questions.
113
87
 
114
88
  Example usage:
115
89
 
@@ -117,37 +91,16 @@ class Interview(InterviewStatusMixin, InterviewTaskBuildingMixin):
117
91
  >>> result, _ = asyncio.run(i.async_conduct_interview())
118
92
  >>> result['q0']
119
93
  'yes'
120
-
121
- >>> i = Interview.example(throw_exception = True)
122
- >>> result, _ = asyncio.run(i.async_conduct_interview())
123
- Attempt 1 failed with exception:This is a test error now waiting 1.00 seconds before retrying.Parameters: start=1.0, max=60.0, max_attempts=5.
124
- <BLANKLINE>
125
- <BLANKLINE>
126
- Attempt 2 failed with exception:This is a test error now waiting 2.00 seconds before retrying.Parameters: start=1.0, max=60.0, max_attempts=5.
127
- <BLANKLINE>
128
- <BLANKLINE>
129
- Attempt 3 failed with exception:This is a test error now waiting 4.00 seconds before retrying.Parameters: start=1.0, max=60.0, max_attempts=5.
130
- <BLANKLINE>
131
- <BLANKLINE>
132
- Attempt 4 failed with exception:This is a test error now waiting 8.00 seconds before retrying.Parameters: start=1.0, max=60.0, max_attempts=5.
133
- <BLANKLINE>
134
- <BLANKLINE>
135
-
136
- >>> i.exceptions
137
- {'q0': [{'exception': "Exception('This is a test error')", 'time': ..., 'traceback': ...
138
-
139
- >>> i = Interview.example()
140
- >>> result, _ = asyncio.run(i.async_conduct_interview(stop_on_exception = True))
141
- Traceback (most recent call last):
142
- ...
143
- asyncio.exceptions.CancelledError
144
94
  """
145
95
  self.sidecar_model = sidecar_model
146
96
 
147
97
  # if no model bucket is passed, create an 'infinity' bucket with no rate limits
98
+ # print("model_buckets", model_buckets)
148
99
  if model_buckets is None or hasattr(self.agent, "answer_question_directly"):
149
100
  model_buckets = ModelBuckets.infinity_bucket()
150
101
 
102
+ # model_buckets = ModelBuckets.infinity_bucket()
103
+
151
104
  ## build the tasks using the InterviewTaskBuildingMixin
152
105
  ## This is the key part---it creates a task for each question,
153
106
  ## with dependencies on the questions that must be answered before this one can be answered.
@@ -169,14 +122,6 @@ class Interview(InterviewStatusMixin, InterviewTaskBuildingMixin):
169
122
  It iterates through the tasks and invigilators, and yields the results of the tasks that are done.
170
123
  If a task is not done, it raises a ValueError.
171
124
  If an exception is raised in the task, it records the exception in the Interview instance except if the task was cancelled, which is expected behavior.
172
-
173
- >>> i = Interview.example()
174
- >>> result, _ = asyncio.run(i.async_conduct_interview())
175
- >>> results = list(i._extract_valid_results())
176
- >>> len(results) == len(i.survey)
177
- True
178
- >>> type(results[0])
179
- <class 'edsl.data_transfer_models.AgentResponseDict'>
180
125
  """
181
126
  assert len(self.tasks) == len(self.invigilators)
182
127
 
@@ -194,18 +139,7 @@ class Interview(InterviewStatusMixin, InterviewTaskBuildingMixin):
194
139
  yield result
195
140
 
196
141
  def _record_exception(self, task, exception: Exception) -> None:
197
- """Record an exception in the Interview instance.
198
-
199
- It records the exception in the Interview instance, with the task name and the exception entry.
200
-
201
- >>> i = Interview.example()
202
- >>> result, _ = asyncio.run(i.async_conduct_interview())
203
- >>> i.exceptions
204
- {}
205
- >>> i._record_exception(i.tasks[0], Exception("An exception occurred."))
206
- >>> i.exceptions
207
- {'q0': [{'exception': "Exception('An exception occurred.')", 'time': ..., 'traceback': 'NoneType: None\\n'}]}
208
- """
142
+ """Record an exception in the Interview instance."""
209
143
  exception_entry = InterviewExceptionEntry(
210
144
  exception=repr(exception),
211
145
  time=time.time(),
@@ -221,10 +155,6 @@ class Interview(InterviewStatusMixin, InterviewTaskBuildingMixin):
221
155
  It is used to determine the order in which questions should be answered.
222
156
  This reflects both agent 'memory' considerations and 'skip' logic.
223
157
  The 'textify' parameter is set to True, so that the question names are returned as strings rather than integer indices.
224
-
225
- >>> i = Interview.example()
226
- >>> i.dag == {'q2': {'q0'}, 'q1': {'q0'}}
227
- True
228
158
  """
229
159
  return self.survey.dag(textify=True)
230
160
 
@@ -235,15 +165,8 @@ class Interview(InterviewStatusMixin, InterviewTaskBuildingMixin):
235
165
  """Return a string representation of the Interview instance."""
236
166
  return f"Interview(agent = {repr(self.agent)}, survey = {repr(self.survey)}, scenario = {repr(self.scenario)}, model = {repr(self.model)})"
237
167
 
238
- def duplicate(self, iteration: int, cache: "Cache") -> Interview:
239
- """Duplicate the interview, but with a new iteration number and cache.
240
-
241
- >>> i = Interview.example()
242
- >>> i2 = i.duplicate(1, None)
243
- >>> i.iteration + 1 == i2.iteration
244
- True
245
-
246
- """
168
+ def duplicate(self, iteration: int, cache: Cache) -> Interview:
169
+ """Duplicate the interview, but with a new iteration number and cache."""
247
170
  return Interview(
248
171
  agent=self.agent,
249
172
  survey=self.survey,
@@ -254,7 +177,7 @@ class Interview(InterviewStatusMixin, InterviewTaskBuildingMixin):
254
177
  )
255
178
 
256
179
  @classmethod
257
- def example(self, throw_exception: bool = False) -> Interview:
180
+ def example(self):
258
181
  """Return an example Interview instance."""
259
182
  from edsl.agents import Agent
260
183
  from edsl.surveys import Survey
@@ -269,15 +192,66 @@ class Interview(InterviewStatusMixin, InterviewTaskBuildingMixin):
269
192
  survey = Survey.example()
270
193
  scenario = Scenario.example()
271
194
  model = LanguageModel.example()
272
- if throw_exception:
273
- model = LanguageModel.example(test_model=True, throw_exception=True)
274
- agent = Agent.example()
275
- return Interview(agent=agent, survey=survey, scenario=scenario, model=model)
276
195
  return Interview(agent=agent, survey=survey, scenario=scenario, model=model)
277
196
 
278
197
 
279
198
  if __name__ == "__main__":
280
199
  import doctest
281
200
 
282
- # add ellipsis
283
- doctest.testmod(optionflags=doctest.ELLIPSIS)
201
+ doctest.testmod()
202
+ # from edsl import Model
203
+ # from edsl.agents import Agent
204
+ # from edsl.surveys import Survey
205
+ # from edsl.scenarios import Scenario
206
+ # from edsl.questions import QuestionMultipleChoice
207
+
208
+ # # from edsl.jobs.Interview import Interview
209
+
210
+ # # a survey with skip logic
211
+ # q0 = QuestionMultipleChoice(
212
+ # question_text="Do you like school?",
213
+ # question_options=["yes", "no"],
214
+ # question_name="q0",
215
+ # )
216
+ # q1 = QuestionMultipleChoice(
217
+ # question_text="Why not?",
218
+ # question_options=["killer bees in cafeteria", "other"],
219
+ # question_name="q1",
220
+ # )
221
+ # q2 = QuestionMultipleChoice(
222
+ # question_text="Why?",
223
+ # question_options=["**lack*** of killer bees in cafeteria", "other"],
224
+ # question_name="q2",
225
+ # )
226
+ # s = Survey(questions=[q0, q1, q2])
227
+ # s = s.add_rule(q0, "q0 == 'yes'", q2)
228
+
229
+ # # create an interview
230
+ # a = Agent(traits=None)
231
+
232
+ # def direct_question_answering_method(self, question, scenario):
233
+ # """Answer a question directly."""
234
+ # raise Exception("Error!")
235
+ # # return "yes"
236
+
237
+ # a.add_direct_question_answering_method(direct_question_answering_method)
238
+ # scenario = Scenario()
239
+ # m = Model()
240
+ # I = Interview(agent=a, survey=s, scenario=scenario, model=m)
241
+
242
+ # result = asyncio.run(I.async_conduct_interview())
243
+ # # # conduct five interviews
244
+ # # for _ in range(5):
245
+ # # I.conduct_interview(debug=True)
246
+
247
+ # # # replace missing answers
248
+ # # I
249
+ # # repr(I)
250
+ # # eval(repr(I))
251
+ # # print(I.task_status_logs.status_matrix(20))
252
+ # status_matrix = I.task_status_logs.status_matrix(20)
253
+ # numerical_matrix = I.task_status_logs.numerical_matrix(20)
254
+ # I.task_status_logs.visualize()
255
+
256
+ # I.exceptions.print()
257
+ # I.exceptions.ascii_table()