edsl 0.1.30__py3-none-any.whl → 0.1.30.dev2__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
edsl/__version__.py CHANGED
@@ -1 +1 @@
1
- __version__ = "0.1.30"
1
+ __version__ = "0.1.30.dev2"
edsl/agents/Agent.py CHANGED
@@ -4,8 +4,7 @@ from __future__ import annotations
4
4
  import copy
5
5
  import inspect
6
6
  import types
7
- from typing import Callable, Optional, Union
8
- from uuid import uuid4
7
+ from typing import Any, Callable, Optional, Union, Dict, Sequence
9
8
  from edsl.Base import Base
10
9
 
11
10
  from edsl.exceptions.agents import (
@@ -689,14 +688,13 @@ class Agent(Base):
689
688
  return table
690
689
 
691
690
  @classmethod
692
- def example(cls, randomize: bool = False) -> Agent:
693
- """
694
- Returns an example Agent instance.
691
+ def example(cls) -> Agent:
692
+ """Return an example agent.
695
693
 
696
- :param randomize: If True, adds a random string to the value of an example key.
694
+ >>> Agent.example()
695
+ Agent(traits = {'age': 22, 'hair': 'brown', 'height': 5.5})
697
696
  """
698
- addition = "" if not randomize else str(uuid4())
699
- return cls(traits={"age": 22, "hair": f"brown{addition}", "height": 5.5})
697
+ return cls(traits={"age": 22, "hair": "brown", "height": 5.5})
700
698
 
701
699
  def code(self) -> str:
702
700
  """Return the code for the agent.
edsl/agents/AgentList.py CHANGED
@@ -11,15 +11,23 @@ Example usage:
11
11
  """
12
12
 
13
13
  from __future__ import annotations
14
- import csv
15
- import json
16
14
  from collections import UserList
17
- from typing import Any, List, Optional, Union
15
+ from typing import Optional, Union, Sequence, List, Any
18
16
  from rich import print_json
19
17
  from rich.table import Table
18
+ import json
19
+ import csv
20
+
21
+
20
22
  from simpleeval import EvalWithCompoundTypes
23
+
21
24
  from edsl.Base import Base
22
- from edsl.utilities.decorators import add_edsl_version, remove_edsl_version
25
+
26
+ # from edsl.agents import Agent
27
+ from edsl.utilities.decorators import (
28
+ add_edsl_version,
29
+ remove_edsl_version,
30
+ )
23
31
 
24
32
 
25
33
  class AgentList(UserList, Base):
@@ -231,15 +239,17 @@ class AgentList(UserList, Base):
231
239
  return cls(agents)
232
240
 
233
241
  @classmethod
234
- def example(cls, randomize: bool = False) -> AgentList:
235
- """
236
- Returns an example AgentList instance.
242
+ def example(cls) -> "AgentList":
243
+ """Return an example AgentList.
244
+
245
+ >>> al = AgentList.example()
246
+ >>> len(al)
247
+ 2
237
248
 
238
- :param randomize: If True, uses Agent's randomize method.
239
249
  """
240
250
  from edsl.agents.Agent import Agent
241
251
 
242
- return cls([Agent.example(randomize), Agent.example(randomize)])
252
+ return cls([Agent.example(), Agent.example()])
243
253
 
244
254
  @classmethod
245
255
  def from_list(self, trait_name: str, values: List[Any]):
@@ -97,7 +97,7 @@ class InvigilatorAI(PromptConstructorMixin, InvigilatorBase):
97
97
  answer = question._translate_answer_code_to_answer(
98
98
  response["answer"], combined_dict
99
99
  )
100
- # breakpoint()
100
+ #breakpoint()
101
101
  data = {
102
102
  "answer": answer,
103
103
  "comment": response.get(
@@ -30,7 +30,7 @@ c1 = Conversation(agent_list=AgentList([a1, a3, a2]), max_turns=5, verbose=True)
30
30
  c2 = Conversation(agent_list=AgentList([a1, a2]), max_turns=5, verbose=True)
31
31
 
32
32
  c = Cache.load("car_talk.json.gz")
33
- # breakpoint()
33
+ breakpoint()
34
34
  combo = ConversationList([c1, c2], cache=c)
35
35
  combo.run()
36
36
  results = combo.to_results()
edsl/data/Cache.py CHANGED
@@ -7,10 +7,17 @@ import json
7
7
  import os
8
8
  import warnings
9
9
  from typing import Optional, Union
10
- from edsl.Base import Base
10
+ import time
11
+ from edsl.config import CONFIG
11
12
  from edsl.data.CacheEntry import CacheEntry
13
+
14
+ # from edsl.data.SQLiteDict import SQLiteDict
15
+ from edsl.Base import Base
12
16
  from edsl.utilities.utilities import dict_hash
13
- from edsl.utilities.decorators import add_edsl_version, remove_edsl_version
17
+ from edsl.utilities.decorators import (
18
+ add_edsl_version,
19
+ remove_edsl_version,
20
+ )
14
21
 
15
22
 
16
23
  class Cache(Base):
@@ -34,7 +41,7 @@ class Cache(Base):
34
41
  data: Optional[Union["SQLiteDict", dict]] = None,
35
42
  immediate_write: bool = True,
36
43
  method=None,
37
- verbose=False,
44
+ verbose = False
38
45
  ):
39
46
  """
40
47
  Create two dictionaries to store the cache data.
@@ -473,18 +480,12 @@ class Cache(Base):
473
480
  webbrowser.open("file://" + filepath)
474
481
 
475
482
  @classmethod
476
- def example(cls, randomize: bool = False) -> Cache:
483
+ def example(cls) -> Cache:
477
484
  """
478
- Returns an example Cache instance.
479
-
480
- :param randomize: If True, uses CacheEntry's randomize method.
485
+ Return an example Cache.
486
+ The example Cache has one entry.
481
487
  """
482
- return cls(
483
- data={
484
- CacheEntry.example(randomize).key: CacheEntry.example(),
485
- CacheEntry.example(randomize).key: CacheEntry.example(),
486
- }
487
- )
488
+ return cls(data={CacheEntry.example().key: CacheEntry.example()})
488
489
 
489
490
 
490
491
  if __name__ == "__main__":
edsl/data/CacheEntry.py CHANGED
@@ -2,8 +2,11 @@ from __future__ import annotations
2
2
  import json
3
3
  import datetime
4
4
  import hashlib
5
+ import random
5
6
  from typing import Optional
6
- from uuid import uuid4
7
+
8
+
9
+ # TODO: Timestamp should probably be float?
7
10
 
8
11
 
9
12
  class CacheEntry:
@@ -148,12 +151,10 @@ class CacheEntry:
148
151
  @classmethod
149
152
  def example(cls, randomize: bool = False) -> CacheEntry:
150
153
  """
151
- Returns an example CacheEntry instance.
152
-
153
- :param randomize: If True, adds a random string to the system prompt.
154
+ Returns a CacheEntry example.
154
155
  """
155
- # if random, create a uuid
156
- addition = "" if not randomize else str(uuid4())
156
+ # if random, create a random number for 0-100
157
+ addition = "" if not randomize else str(random.randint(0, 1000))
157
158
  return CacheEntry(
158
159
  model="gpt-3.5-turbo",
159
160
  parameters={"temperature": 0.5},
@@ -17,7 +17,7 @@ class AgentResponseDict(UserDict):
17
17
  cached_response=None,
18
18
  raw_model_response=None,
19
19
  simple_model_raw_response=None,
20
- cache_used=None,
20
+ cache_used=None,
21
21
  cache_key=None,
22
22
  ):
23
23
  """Initialize the AgentResponseDict object."""
edsl/jobs/Jobs.py CHANGED
@@ -687,9 +687,7 @@ class Jobs(Base):
687
687
  # Example methods
688
688
  #######################
689
689
  @classmethod
690
- def example(
691
- cls, throw_exception_probability: int = 0, randomize: bool = False
692
- ) -> Jobs:
690
+ def example(cls, throw_exception_probability=0) -> Jobs:
693
691
  """Return an example Jobs instance.
694
692
 
695
693
  :param throw_exception_probability: the probability that an exception will be thrown when answering a question. This is useful for testing error handling.
@@ -699,13 +697,10 @@ class Jobs(Base):
699
697
 
700
698
  """
701
699
  import random
702
- from uuid import uuid4
703
700
  from edsl.questions import QuestionMultipleChoice
704
701
  from edsl.agents.Agent import Agent
705
702
  from edsl.scenarios.Scenario import Scenario
706
703
 
707
- addition = "" if not randomize else str(uuid4())
708
-
709
704
  # (status, question, period)
710
705
  agent_answers = {
711
706
  ("Joyful", "how_feeling", "morning"): "OK",
@@ -748,10 +743,7 @@ class Jobs(Base):
748
743
  base_survey = Survey(questions=[q1, q2])
749
744
 
750
745
  scenario_list = ScenarioList(
751
- [
752
- Scenario({"period": f"morning{addition}"}),
753
- Scenario({"period": "afternoon"}),
754
- ]
746
+ [Scenario({"period": "morning"}), Scenario({"period": "afternoon"})]
755
747
  )
756
748
  job = base_survey.by(scenario_list).by(joy_agent, sad_agent)
757
749
 
@@ -24,7 +24,7 @@ class ModelBuckets:
24
24
  requests_bucket=self.requests_bucket + other.requests_bucket,
25
25
  tokens_bucket=self.tokens_bucket + other.tokens_bucket,
26
26
  )
27
-
27
+
28
28
  def turbo_mode_on(self):
29
29
  """Set the refill rate to infinity for both buckets."""
30
30
  self.requests_bucket.turbo_mode_on()
@@ -30,17 +30,16 @@ class TokenBucket:
30
30
  if self.turbo_mode:
31
31
  pass
32
32
  else:
33
- # pass
34
33
  self.turbo_mode = True
35
- self.capacity = float("inf")
36
- self.refill_rate = float("inf")
34
+ self.capacity=float("inf")
35
+ self.refill_rate=float("inf")
37
36
 
38
37
  def turbo_mode_off(self):
39
38
  """Restore the refill rate to its original value."""
40
39
  self.turbo_mode = False
41
40
  self.capacity = self._old_capacity
42
41
  self.refill_rate = self._old_refill_rate
43
-
42
+
44
43
  def __add__(self, other) -> "TokenBucket":
45
44
  """Combine two token buckets.
46
45
 
@@ -73,17 +72,7 @@ class TokenBucket:
73
72
  self.log.append((time.monotonic(), self.tokens))
74
73
 
75
74
  def refill(self) -> None:
76
- """Refill the bucket with new tokens based on elapsed time.
77
-
78
-
79
-
80
- >>> bucket = TokenBucket(bucket_name="test", bucket_type="test", capacity=10, refill_rate=1)
81
- >>> bucket.tokens = 0
82
- >>> bucket.refill()
83
- >>> bucket.tokens > 0
84
- True
85
-
86
- """
75
+ """Refill the bucket with new tokens based on elapsed time."""
87
76
  now = time.monotonic()
88
77
  elapsed = now - self.last_refill
89
78
  refill_amount = elapsed * self.refill_rate
@@ -20,12 +20,6 @@ from edsl.jobs.interviews.retry_management import retry_strategy
20
20
  from edsl.jobs.interviews.InterviewTaskBuildingMixin import InterviewTaskBuildingMixin
21
21
  from edsl.jobs.interviews.InterviewStatusMixin import InterviewStatusMixin
22
22
 
23
- import asyncio
24
-
25
-
26
- def run_async(coro):
27
- return asyncio.run(coro)
28
-
29
23
 
30
24
  class Interview(InterviewStatusMixin, InterviewTaskBuildingMixin):
31
25
  """
@@ -36,14 +30,14 @@ class Interview(InterviewStatusMixin, InterviewTaskBuildingMixin):
36
30
 
37
31
  def __init__(
38
32
  self,
39
- agent: "Agent",
40
- survey: "Survey",
41
- scenario: "Scenario",
42
- model: Type["LanguageModel"],
33
+ agent: 'Agent',
34
+ survey: 'Survey',
35
+ scenario: 'Scenario',
36
+ model: Type['LanguageModel'],
43
37
  debug: Optional[bool] = False,
44
38
  iteration: int = 0,
45
- cache: Optional["Cache"] = None,
46
- sidecar_model: Optional["LanguageModel"] = None,
39
+ cache: "Cache" = None,
40
+ sidecar_model: 'LanguageModel' = None,
47
41
  ):
48
42
  """Initialize the Interview instance.
49
43
 
@@ -51,24 +45,6 @@ class Interview(InterviewStatusMixin, InterviewTaskBuildingMixin):
51
45
  :param survey: the survey being administered to the agent.
52
46
  :param scenario: the scenario that populates the survey questions.
53
47
  :param model: the language model used to answer the questions.
54
- :param debug: if True, run without calls to the language model.
55
- :param iteration: the iteration number of the interview.
56
- :param cache: the cache used to store the answers.
57
- :param sidecar_model: a sidecar model used to answer questions.
58
-
59
- >>> i = Interview.example()
60
- >>> i.task_creators
61
- {}
62
-
63
- >>> i.exceptions
64
- {}
65
-
66
- >>> _ = asyncio.run(i.async_conduct_interview())
67
- >>> i.task_status_logs['q0']
68
- [{'log_time': ..., 'value': <TaskStatus.NOT_STARTED: 1>}, {'log_time': ..., 'value': <TaskStatus.WAITING_FOR_DEPENDENCIES: 2>}, {'log_time': ..., 'value': <TaskStatus.API_CALL_IN_PROGRESS: 7>}, {'log_time': ..., 'value': <TaskStatus.SUCCESS: 8>}]
69
-
70
- >>> i.to_index
71
- {'q0': 0, 'q1': 1, 'q2': 2}
72
48
 
73
49
  """
74
50
  self.agent = agent
@@ -88,7 +64,7 @@ class Interview(InterviewStatusMixin, InterviewTaskBuildingMixin):
88
64
  self.exceptions = InterviewExceptionCollection()
89
65
  self._task_status_log_dict = InterviewStatusLog()
90
66
 
91
- # dictionary mapping question names to their index in the survey.
67
+ # dictionary mapping question names to their index in the survey."""
92
68
  self.to_index = {
93
69
  question_name: index
94
70
  for index, question_name in enumerate(self.survey.question_names)
@@ -100,16 +76,14 @@ class Interview(InterviewStatusMixin, InterviewTaskBuildingMixin):
100
76
  model_buckets: ModelBuckets = None,
101
77
  debug: bool = False,
102
78
  stop_on_exception: bool = False,
103
- sidecar_model: Optional["LanguageModel"] = None,
79
+ sidecar_model: Optional[LanguageModel] = None,
104
80
  ) -> tuple["Answers", List[dict[str, Any]]]:
105
81
  """
106
82
  Conduct an Interview asynchronously.
107
- It returns a tuple with the answers and a list of valid results.
108
83
 
109
84
  :param model_buckets: a dictionary of token buckets for the model.
110
85
  :param debug: run without calls to LLM.
111
86
  :param stop_on_exception: if True, stops the interview if an exception is raised.
112
- :param sidecar_model: a sidecar model used to answer questions.
113
87
 
114
88
  Example usage:
115
89
 
@@ -117,37 +91,17 @@ class Interview(InterviewStatusMixin, InterviewTaskBuildingMixin):
117
91
  >>> result, _ = asyncio.run(i.async_conduct_interview())
118
92
  >>> result['q0']
119
93
  'yes'
120
-
121
- >>> i = Interview.example(throw_exception = True)
122
- >>> result, _ = asyncio.run(i.async_conduct_interview())
123
- Attempt 1 failed with exception:This is a test error now waiting 1.00 seconds before retrying.Parameters: start=1.0, max=60.0, max_attempts=5.
124
- <BLANKLINE>
125
- <BLANKLINE>
126
- Attempt 2 failed with exception:This is a test error now waiting 2.00 seconds before retrying.Parameters: start=1.0, max=60.0, max_attempts=5.
127
- <BLANKLINE>
128
- <BLANKLINE>
129
- Attempt 3 failed with exception:This is a test error now waiting 4.00 seconds before retrying.Parameters: start=1.0, max=60.0, max_attempts=5.
130
- <BLANKLINE>
131
- <BLANKLINE>
132
- Attempt 4 failed with exception:This is a test error now waiting 8.00 seconds before retrying.Parameters: start=1.0, max=60.0, max_attempts=5.
133
- <BLANKLINE>
134
- <BLANKLINE>
135
-
136
- >>> i.exceptions
137
- {'q0': [{'exception': "Exception('This is a test error')", 'time': ..., 'traceback': ...
138
-
139
- >>> i = Interview.example()
140
- >>> result, _ = asyncio.run(i.async_conduct_interview(stop_on_exception = True))
141
- Traceback (most recent call last):
142
- ...
143
- asyncio.exceptions.CancelledError
144
94
  """
145
95
  self.sidecar_model = sidecar_model
146
96
 
147
97
  # if no model bucket is passed, create an 'infinity' bucket with no rate limits
98
+ # print("model_buckets", model_buckets)
148
99
  if model_buckets is None or hasattr(self.agent, "answer_question_directly"):
149
100
  model_buckets = ModelBuckets.infinity_bucket()
150
101
 
102
+ # FOR TESTING
103
+ # model_buckets = ModelBuckets.infinity_bucket()
104
+
151
105
  ## build the tasks using the InterviewTaskBuildingMixin
152
106
  ## This is the key part---it creates a task for each question,
153
107
  ## with dependencies on the questions that must be answered before this one can be answered.
@@ -169,14 +123,6 @@ class Interview(InterviewStatusMixin, InterviewTaskBuildingMixin):
169
123
  It iterates through the tasks and invigilators, and yields the results of the tasks that are done.
170
124
  If a task is not done, it raises a ValueError.
171
125
  If an exception is raised in the task, it records the exception in the Interview instance except if the task was cancelled, which is expected behavior.
172
-
173
- >>> i = Interview.example()
174
- >>> result, _ = asyncio.run(i.async_conduct_interview())
175
- >>> results = list(i._extract_valid_results())
176
- >>> len(results) == len(i.survey)
177
- True
178
- >>> type(results[0])
179
- <class 'edsl.data_transfer_models.AgentResponseDict'>
180
126
  """
181
127
  assert len(self.tasks) == len(self.invigilators)
182
128
 
@@ -194,18 +140,7 @@ class Interview(InterviewStatusMixin, InterviewTaskBuildingMixin):
194
140
  yield result
195
141
 
196
142
  def _record_exception(self, task, exception: Exception) -> None:
197
- """Record an exception in the Interview instance.
198
-
199
- It records the exception in the Interview instance, with the task name and the exception entry.
200
-
201
- >>> i = Interview.example()
202
- >>> result, _ = asyncio.run(i.async_conduct_interview())
203
- >>> i.exceptions
204
- {}
205
- >>> i._record_exception(i.tasks[0], Exception("An exception occurred."))
206
- >>> i.exceptions
207
- {'q0': [{'exception': "Exception('An exception occurred.')", 'time': ..., 'traceback': 'NoneType: None\\n'}]}
208
- """
143
+ """Record an exception in the Interview instance."""
209
144
  exception_entry = InterviewExceptionEntry(
210
145
  exception=repr(exception),
211
146
  time=time.time(),
@@ -221,10 +156,6 @@ class Interview(InterviewStatusMixin, InterviewTaskBuildingMixin):
221
156
  It is used to determine the order in which questions should be answered.
222
157
  This reflects both agent 'memory' considerations and 'skip' logic.
223
158
  The 'textify' parameter is set to True, so that the question names are returned as strings rather than integer indices.
224
-
225
- >>> i = Interview.example()
226
- >>> i.dag == {'q2': {'q0'}, 'q1': {'q0'}}
227
- True
228
159
  """
229
160
  return self.survey.dag(textify=True)
230
161
 
@@ -235,15 +166,8 @@ class Interview(InterviewStatusMixin, InterviewTaskBuildingMixin):
235
166
  """Return a string representation of the Interview instance."""
236
167
  return f"Interview(agent = {repr(self.agent)}, survey = {repr(self.survey)}, scenario = {repr(self.scenario)}, model = {repr(self.model)})"
237
168
 
238
- def duplicate(self, iteration: int, cache: "Cache") -> Interview:
239
- """Duplicate the interview, but with a new iteration number and cache.
240
-
241
- >>> i = Interview.example()
242
- >>> i2 = i.duplicate(1, None)
243
- >>> i.iteration + 1 == i2.iteration
244
- True
245
-
246
- """
169
+ def duplicate(self, iteration: int, cache: Cache) -> Interview:
170
+ """Duplicate the interview, but with a new iteration number and cache."""
247
171
  return Interview(
248
172
  agent=self.agent,
249
173
  survey=self.survey,
@@ -254,7 +178,7 @@ class Interview(InterviewStatusMixin, InterviewTaskBuildingMixin):
254
178
  )
255
179
 
256
180
  @classmethod
257
- def example(self, throw_exception: bool = False) -> Interview:
181
+ def example(self):
258
182
  """Return an example Interview instance."""
259
183
  from edsl.agents import Agent
260
184
  from edsl.surveys import Survey
@@ -269,15 +193,66 @@ class Interview(InterviewStatusMixin, InterviewTaskBuildingMixin):
269
193
  survey = Survey.example()
270
194
  scenario = Scenario.example()
271
195
  model = LanguageModel.example()
272
- if throw_exception:
273
- model = LanguageModel.example(test_model=True, throw_exception=True)
274
- agent = Agent.example()
275
- return Interview(agent=agent, survey=survey, scenario=scenario, model=model)
276
196
  return Interview(agent=agent, survey=survey, scenario=scenario, model=model)
277
197
 
278
198
 
279
199
  if __name__ == "__main__":
280
200
  import doctest
281
201
 
282
- # add ellipsis
283
- doctest.testmod(optionflags=doctest.ELLIPSIS)
202
+ doctest.testmod()
203
+ # from edsl import Model
204
+ # from edsl.agents import Agent
205
+ # from edsl.surveys import Survey
206
+ # from edsl.scenarios import Scenario
207
+ # from edsl.questions import QuestionMultipleChoice
208
+
209
+ # # from edsl.jobs.Interview import Interview
210
+
211
+ # # a survey with skip logic
212
+ # q0 = QuestionMultipleChoice(
213
+ # question_text="Do you like school?",
214
+ # question_options=["yes", "no"],
215
+ # question_name="q0",
216
+ # )
217
+ # q1 = QuestionMultipleChoice(
218
+ # question_text="Why not?",
219
+ # question_options=["killer bees in cafeteria", "other"],
220
+ # question_name="q1",
221
+ # )
222
+ # q2 = QuestionMultipleChoice(
223
+ # question_text="Why?",
224
+ # question_options=["**lack*** of killer bees in cafeteria", "other"],
225
+ # question_name="q2",
226
+ # )
227
+ # s = Survey(questions=[q0, q1, q2])
228
+ # s = s.add_rule(q0, "q0 == 'yes'", q2)
229
+
230
+ # # create an interview
231
+ # a = Agent(traits=None)
232
+
233
+ # def direct_question_answering_method(self, question, scenario):
234
+ # """Answer a question directly."""
235
+ # raise Exception("Error!")
236
+ # # return "yes"
237
+
238
+ # a.add_direct_question_answering_method(direct_question_answering_method)
239
+ # scenario = Scenario()
240
+ # m = Model()
241
+ # I = Interview(agent=a, survey=s, scenario=scenario, model=m)
242
+
243
+ # result = asyncio.run(I.async_conduct_interview())
244
+ # # # conduct five interviews
245
+ # # for _ in range(5):
246
+ # # I.conduct_interview(debug=True)
247
+
248
+ # # # replace missing answers
249
+ # # I
250
+ # # repr(I)
251
+ # # eval(repr(I))
252
+ # # print(I.task_status_logs.status_matrix(20))
253
+ # status_matrix = I.task_status_logs.status_matrix(20)
254
+ # numerical_matrix = I.task_status_logs.numerical_matrix(20)
255
+ # I.task_status_logs.visualize()
256
+
257
+ # I.exceptions.print()
258
+ # I.exceptions.ascii_table()
@@ -25,7 +25,7 @@ TIMEOUT = float(CONFIG.get("EDSL_API_TIMEOUT"))
25
25
  class InterviewTaskBuildingMixin:
26
26
  def _build_invigilators(
27
27
  self, debug: bool
28
- ) -> Generator["InvigilatorBase", None, None]:
28
+ ) -> Generator[InvigilatorBase, None, None]:
29
29
  """Create an invigilator for each question.
30
30
 
31
31
  :param debug: whether to use debug mode, in which case `InvigilatorDebug` is used.
@@ -35,7 +35,7 @@ class InterviewTaskBuildingMixin:
35
35
  for question in self.survey.questions:
36
36
  yield self._get_invigilator(question=question, debug=debug)
37
37
 
38
- def _get_invigilator(self, question: "QuestionBase", debug: bool) -> "Invigilator":
38
+ def _get_invigilator(self, question: QuestionBase, debug: bool) -> "Invigilator":
39
39
  """Return an invigilator for the given question.
40
40
 
41
41
  :param question: the question to be answered
@@ -84,7 +84,7 @@ class InterviewTaskBuildingMixin:
84
84
  return tuple(tasks) # , invigilators
85
85
 
86
86
  def _get_tasks_that_must_be_completed_before(
87
- self, *, tasks: list[asyncio.Task], question: "QuestionBase"
87
+ self, *, tasks: list[asyncio.Task], question: QuestionBase
88
88
  ) -> Generator[asyncio.Task, None, None]:
89
89
  """Return the tasks that must be completed before the given question can be answered.
90
90
 
@@ -100,7 +100,7 @@ class InterviewTaskBuildingMixin:
100
100
  def _create_question_task(
101
101
  self,
102
102
  *,
103
- question: "QuestionBase",
103
+ question: QuestionBase,
104
104
  tasks_that_must_be_completed_before: list[asyncio.Task],
105
105
  model_buckets: ModelBuckets,
106
106
  debug: bool,
@@ -175,14 +175,24 @@ class InterviewTaskBuildingMixin:
175
175
 
176
176
  self._add_answer(response=response, question=question)
177
177
 
178
+ # With the answer to the question, we can now cancel any skipped questions
178
179
  self._cancel_skipped_questions(question)
179
180
  return AgentResponseDict(**response)
180
181
  except Exception as e:
181
182
  raise e
182
-
183
- def _add_answer(
184
- self, response: "AgentResponseDict", question: "QuestionBase"
185
- ) -> None:
183
+ # import traceback
184
+ # print("Exception caught:")
185
+ # traceback.print_exc()
186
+
187
+ # # Extract and print the traceback info
188
+ # tb = e.__traceback__
189
+ # while tb is not None:
190
+ # print(f"File {tb.tb_frame.f_code.co_filename}, line {tb.tb_lineno}, in {tb.tb_frame.f_code.co_name}")
191
+ # tb = tb.tb_next
192
+ # breakpoint()
193
+ # raise e
194
+
195
+ def _add_answer(self, response: AgentResponseDict, question: QuestionBase) -> None:
186
196
  """Add the answer to the answers dictionary.
187
197
 
188
198
  :param response: the response to the question.
@@ -190,7 +200,7 @@ class InterviewTaskBuildingMixin:
190
200
  """
191
201
  self.answers.add_answer(response=response, question=question)
192
202
 
193
- def _skip_this_question(self, current_question: "QuestionBase") -> bool:
203
+ def _skip_this_question(self, current_question: QuestionBase) -> bool:
194
204
  """Determine if the current question should be skipped.
195
205
 
196
206
  :param current_question: the question to be answered.
@@ -89,7 +89,6 @@ class JobsRunnerAsyncio(JobsRunnerStatusMixin):
89
89
 
90
90
  async def run_async(self, cache=None) -> Results:
91
91
  from edsl.results.Results import Results
92
-
93
92
  if cache is None:
94
93
  self.cache = Cache()
95
94
  else:
@@ -101,7 +100,6 @@ class JobsRunnerAsyncio(JobsRunnerStatusMixin):
101
100
 
102
101
  def simple_run(self):
103
102
  from edsl.results.Results import Results
104
-
105
103
  data = asyncio.run(self.run_async())
106
104
  return Results(survey=self.jobs.survey, data=data)
107
105
 
@@ -144,14 +144,18 @@ class QuestionTaskCreator(UserList):
144
144
  self.task_status = TaskStatus.FAILED
145
145
  raise e
146
146
 
147
- if results.get("cache_used", False):
147
+ ## This isn't working
148
+ #breakpoint()
149
+ if results.get('cache_used', False):
148
150
  self.tokens_bucket.add_tokens(requested_tokens)
149
151
  self.requests_bucket.add_tokens(1)
150
152
  self.from_cache = True
151
- # Turbo mode means that we don't wait for tokens or requests.
153
+ #print("Turning on turbo!")
152
154
  self.tokens_bucket.turbo_mode_on()
153
155
  self.requests_bucket.turbo_mode_on()
154
156
  else:
157
+ #breakpoint()
158
+ #print("Turning off turbo!")
155
159
  self.tokens_bucket.turbo_mode_off()
156
160
  self.requests_bucket.turbo_mode_off()
157
161
 
@@ -159,6 +163,7 @@ class QuestionTaskCreator(UserList):
159
163
 
160
164
  tracker = self.cached_token_usage if self.from_cache else self.new_token_usage
161
165
 
166
+
162
167
  # TODO: This is hacky. The 'func' call should return an object that definitely has a 'usage' key.
163
168
  usage = results.get("usage", {"prompt_tokens": 0, "completion_tokens": 0})
164
169
  prompt_tokens = usage.get("prompt_tokens", 0)