edsl 0.1.29.dev2__py3-none-any.whl → 0.1.29.dev5__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
edsl/__version__.py CHANGED
@@ -1 +1 @@
1
- __version__ = "0.1.29.dev2"
1
+ __version__ = "0.1.29.dev5"
edsl/agents/Agent.py CHANGED
@@ -273,6 +273,7 @@ class Agent(Base):
273
273
  *,
274
274
  question: QuestionBase,
275
275
  cache,
276
+ survey: Optional["Survey"] = None,
276
277
  scenario: Optional[Scenario] = None,
277
278
  model: Optional[LanguageModel] = None,
278
279
  debug: bool = False,
@@ -301,6 +302,7 @@ class Agent(Base):
301
302
  invigilator = self._create_invigilator(
302
303
  question=question,
303
304
  scenario=scenario,
305
+ survey=survey,
304
306
  model=model,
305
307
  debug=debug,
306
308
  memory_plan=memory_plan,
@@ -317,6 +319,7 @@ class Agent(Base):
317
319
  question: QuestionBase,
318
320
  cache: Cache,
319
321
  scenario: Optional[Scenario] = None,
322
+ survey: Optional["Survey"] = None,
320
323
  model: Optional[LanguageModel] = None,
321
324
  debug: bool = False,
322
325
  memory_plan: Optional[MemoryPlan] = None,
@@ -349,6 +352,7 @@ class Agent(Base):
349
352
  question=question,
350
353
  cache=cache,
351
354
  scenario=scenario,
355
+ survey=survey,
352
356
  model=model,
353
357
  debug=debug,
354
358
  memory_plan=memory_plan,
@@ -366,6 +370,7 @@ class Agent(Base):
366
370
  cache: Optional[Cache] = None,
367
371
  scenario: Optional[Scenario] = None,
368
372
  model: Optional[LanguageModel] = None,
373
+ survey: Optional["Survey"] = None,
369
374
  debug: bool = False,
370
375
  memory_plan: Optional[MemoryPlan] = None,
371
376
  current_answers: Optional[dict] = None,
@@ -404,6 +409,7 @@ class Agent(Base):
404
409
  self,
405
410
  question=question,
406
411
  scenario=scenario,
412
+ survey=survey,
407
413
  model=model,
408
414
  memory_plan=memory_plan,
409
415
  current_answers=current_answers,
@@ -479,6 +485,12 @@ class Agent(Base):
479
485
  """
480
486
  return self.data == other.data
481
487
 
488
+ def __getattr__(self, name):
489
+ # This will be called only if 'name' is not found in the usual places
490
+ if name in self.traits:
491
+ return self.traits[name]
492
+ raise AttributeError(f"'{type(self).__name__}' object has no attribute '{name}'")
493
+
482
494
  def print(self) -> None:
483
495
  from rich import print_json
484
496
  import json
edsl/agents/AgentList.py CHANGED
@@ -241,16 +241,16 @@ class AgentList(UserList, Base):
241
241
 
242
242
  """
243
243
  return cls([Agent.example(), Agent.example()])
244
-
244
+
245
245
  @classmethod
246
- def from_list(self, trait_name:str, values: List[Any]):
246
+ def from_list(self, trait_name: str, values: List[Any]):
247
247
  """Create an AgentList from a list of values.
248
248
 
249
249
  :param trait_name: The name of the trait.
250
250
  :param values: A list of values.
251
251
  """
252
252
  return AgentList([Agent({trait_name: value}) for value in values])
253
-
253
+
254
254
  def __mul__(self, other: AgentList) -> AgentList:
255
255
  """Takes the cross product of two AgentLists."""
256
256
  from itertools import product
@@ -260,7 +260,6 @@ class AgentList(UserList, Base):
260
260
  new_sl.append(s1 + s2)
261
261
  return AgentList(new_sl)
262
262
 
263
-
264
263
  def code(self, string=True) -> Union[str, list[str]]:
265
264
  """Return code to construct an AgentList.
266
265
 
@@ -46,6 +46,7 @@ class InvigilatorBase(ABC):
46
46
  model: LanguageModel,
47
47
  memory_plan: MemoryPlan,
48
48
  current_answers: dict,
49
+ survey: Optional["Survey"],
49
50
  cache: Optional[Cache] = None,
50
51
  iteration: Optional[int] = 1,
51
52
  additional_prompt_data: Optional[dict] = None,
@@ -57,11 +58,12 @@ class InvigilatorBase(ABC):
57
58
  self.scenario = scenario
58
59
  self.model = model
59
60
  self.memory_plan = memory_plan
60
- self.current_answers = current_answers
61
+ self.current_answers = current_answers or {}
61
62
  self.iteration = iteration
62
63
  self.additional_prompt_data = additional_prompt_data
63
64
  self.cache = cache
64
65
  self.sidecar_model = sidecar_model
66
+ self.survey = survey
65
67
 
66
68
  def __repr__(self) -> str:
67
69
  """Return a string representation of the Invigilator.
@@ -76,7 +78,7 @@ class InvigilatorBase(ABC):
76
78
  """Return an AgentResponseDict used in case the question-asking fails.
77
79
 
78
80
  >>> InvigilatorBase.example().get_failed_task_result()
79
- {'answer': None, 'comment': 'Failed to get response', 'question_name': 'how_feeling', ...}
81
+ {'answer': None, 'comment': 'Failed to get response', ...}
80
82
  """
81
83
  return AgentResponseDict(
82
84
  answer=None,
@@ -86,11 +88,8 @@ class InvigilatorBase(ABC):
86
88
  )
87
89
 
88
90
  def get_prompts(self) -> Dict[str, Prompt]:
89
- """Return the prompt used.
91
+ """Return the prompt used."""
90
92
 
91
- >>> InvigilatorBase.example().get_prompts()
92
- {'user_prompt': Prompt(text=\"""NA\"""), 'system_prompt': Prompt(text=\"""NA\""")}
93
- """
94
93
  return {
95
94
  "user_prompt": Prompt("NA"),
96
95
  "system_prompt": Prompt("NA"),
@@ -129,7 +128,7 @@ class InvigilatorBase(ABC):
129
128
  )
130
129
 
131
130
  @classmethod
132
- def example(cls, throw_an_exception=False):
131
+ def example(cls, throw_an_exception=False, question=None, scenario=None):
133
132
  """Return an example invigilator.
134
133
 
135
134
  >>> InvigilatorBase.example()
@@ -167,15 +166,20 @@ class InvigilatorBase(ABC):
167
166
  if throw_an_exception:
168
167
  model.throw_an_exception = True
169
168
  agent = Agent.example()
170
- question = QuestionMultipleChoice.example()
171
- scenario = Scenario.example()
169
+ # question = QuestionMultipleChoice.example()
170
+ from edsl.surveys import Survey
171
+
172
+ survey = Survey.example()
173
+ question = question or survey.questions[0]
174
+ scenario = scenario or Scenario.example()
172
175
  # memory_plan = None #memory_plan = MemoryPlan()
173
176
  from edsl import Survey
174
177
 
175
178
  memory_plan = MemoryPlan(survey=Survey.example())
176
179
  current_answers = None
180
+ from edsl.agents.PromptConstructionMixin import PromptConstructorMixin
177
181
 
178
- class InvigilatorExample(InvigilatorBase):
182
+ class InvigilatorExample(PromptConstructorMixin, InvigilatorBase):
179
183
  """An example invigilator."""
180
184
 
181
185
  async def async_answer_question(self):
@@ -188,6 +192,7 @@ class InvigilatorBase(ABC):
188
192
  agent=agent,
189
193
  question=question,
190
194
  scenario=scenario,
195
+ survey=survey,
191
196
  model=model,
192
197
  memory_plan=memory_plan,
193
198
  current_answers=current_answers,
@@ -1,134 +1,374 @@
1
- from typing import Dict, Any
1
+ from typing import Dict, Any, Optional
2
+ from collections import UserList
2
3
 
4
+ # from functools import reduce
3
5
  from edsl.prompts.Prompt import Prompt
4
- from edsl.utilities.decorators import sync_wrapper, jupyter_nb_handler
6
+
7
+ # from edsl.utilities.decorators import sync_wrapper, jupyter_nb_handler
5
8
  from edsl.prompts.registry import get_classes as prompt_lookup
6
9
  from edsl.exceptions import QuestionScenarioRenderError
7
10
 
11
+ import enum
8
12
 
9
- class PromptConstructorMixin:
10
- def construct_system_prompt(self) -> Prompt:
11
- """Construct the system prompt for the LLM call."""
12
13
 
13
- agent_instructions = self._get_agent_instructions_prompt()
14
- persona_prompt = self._get_persona_prompt()
14
+ class PromptComponent(enum.Enum):
15
+ AGENT_INSTRUCTIONS = "agent_instructions"
16
+ AGENT_PERSONA = "agent_persona"
17
+ QUESTION_INSTRUCTIONS = "question_instructions"
18
+ PRIOR_QUESTION_MEMORY = "prior_question_memory"
15
19
 
16
- return (
17
- agent_instructions
18
- + " " * int(len(persona_prompt.text) > 0)
19
- + persona_prompt
20
- )
21
20
 
22
- def _get_persona_prompt(self) -> Prompt:
23
- """Get the persona prompt.
21
+ class PromptList(UserList):
22
+ separator = Prompt(" ")
23
+
24
+ def reduce(self):
25
+ """Reduce the list of prompts to a single prompt.
24
26
 
25
- The is the description of the agent to the LLM.
27
+ >>> p = PromptList([Prompt("You are a happy-go lucky agent."), Prompt("You are an agent with the following persona: {'age': 22, 'hair': 'brown', 'height': 5.5}")])
28
+ >>> p.reduce()
29
+ Prompt(text=\"""You are a happy-go lucky agent. You are an agent with the following persona: {'age': 22, 'hair': 'brown', 'height': 5.5}\""")
26
30
 
27
- The agent_persona is constructed when the Agent is created.
28
- If the agent is passed a template for "agent_trait_presentation_template" that is used to construct the persona.
29
- If it does not exist, the persona is looked up in the prompt registry
30
31
  """
31
- if not hasattr(self.agent, "agent_persona"):
32
- applicable_prompts = prompt_lookup(
33
- component_type="agent_persona",
34
- model=self.model.model,
32
+ p = self[0]
33
+ for prompt in self[1:]:
34
+ if len(prompt) > 0:
35
+ p = p + self.separator + prompt
36
+ return p
37
+
38
+
39
+ class PromptPlan:
40
+ """A plan for constructing prompts for the LLM call.
41
+ Every prompt plan has a user prompt order and a system prompt order.
42
+ It must contain each of the values in the PromptComponent enum.
43
+
44
+
45
+ >>> p = PromptPlan(user_prompt_order=(PromptComponent.AGENT_INSTRUCTIONS, PromptComponent.AGENT_PERSONA),system_prompt_order=(PromptComponent.QUESTION_INSTRUCTIONS, PromptComponent.PRIOR_QUESTION_MEMORY))
46
+ >>> p._is_valid_plan()
47
+ True
48
+
49
+ >>> p.arrange_components(agent_instructions=1, agent_persona=2, question_instructions=3, prior_question_memory=4)
50
+ {'user_prompt': ..., 'system_prompt': ...}
51
+
52
+ >>> p = PromptPlan(user_prompt_order=("agent_instructions", ), system_prompt_order=("question_instructions", "prior_question_memory"))
53
+ Traceback (most recent call last):
54
+ ...
55
+ ValueError: Invalid plan: must contain each value of PromptComponent exactly once.
56
+
57
+ """
58
+
59
+ def __init__(
60
+ self,
61
+ user_prompt_order: Optional[tuple] = None,
62
+ system_prompt_order: Optional[tuple] = None,
63
+ ):
64
+ """Initialize the PromptPlan."""
65
+
66
+ if user_prompt_order is None:
67
+ user_prompt_order = (
68
+ PromptComponent.QUESTION_INSTRUCTIONS,
69
+ PromptComponent.PRIOR_QUESTION_MEMORY,
70
+ )
71
+ if system_prompt_order is None:
72
+ system_prompt_order = (
73
+ PromptComponent.AGENT_INSTRUCTIONS,
74
+ PromptComponent.AGENT_PERSONA,
35
75
  )
36
- persona_prompt_template = applicable_prompts[0]()
37
- else:
38
- persona_prompt_template = self.agent.agent_persona
39
-
40
- # TODO: This multiple passing of agent traits - not sure if it is necessary. Not harmful.
41
- if undefined := persona_prompt_template.undefined_template_variables(
42
- self.agent.traits
43
- | {"traits": self.agent.traits}
44
- | {"codebook": self.agent.codebook}
45
- | {"traits": self.agent.traits}
46
- ):
47
- raise QuestionScenarioRenderError(
48
- f"Agent persona still has variables that were not rendered: {undefined}"
76
+
77
+ # very commmon way to screw this up given how python treats single strings as iterables
78
+ if isinstance(user_prompt_order, str):
79
+ user_prompt_order = (user_prompt_order,)
80
+
81
+ if isinstance(system_prompt_order, str):
82
+ system_prompt_order = (system_prompt_order,)
83
+
84
+ if not isinstance(user_prompt_order, tuple):
85
+ raise TypeError(
86
+ f"Expected a tuple, but got {type(user_prompt_order).__name__}"
49
87
  )
50
88
 
51
- persona_prompt = persona_prompt_template.render(
52
- self.agent.traits | {"traits": self.agent.traits},
53
- codebook=self.agent.codebook,
54
- traits=self.agent.traits,
89
+ if not isinstance(system_prompt_order, tuple):
90
+ raise TypeError(
91
+ f"Expected a tuple, but got {type(system_prompt_order).__name__}"
92
+ )
93
+
94
+ self.user_prompt_order = self._convert_to_enum(user_prompt_order)
95
+ self.system_prompt_order = self._convert_to_enum(system_prompt_order)
96
+ if not self._is_valid_plan():
97
+ raise ValueError(
98
+ "Invalid plan: must contain each value of PromptComponent exactly once."
99
+ )
100
+
101
+ def _convert_to_enum(self, prompt_order: tuple):
102
+ """Convert string names to PromptComponent enum values."""
103
+ return tuple(
104
+ PromptComponent(component) if isinstance(component, str) else component
105
+ for component in prompt_order
55
106
  )
56
107
 
57
- if persona_prompt.has_variables:
58
- raise QuestionScenarioRenderError(
59
- "Agent persona still has variables that were not rendered."
108
+ def _is_valid_plan(self):
109
+ """Check if the plan is valid."""
110
+ combined = self.user_prompt_order + self.system_prompt_order
111
+ return set(combined) == set(PromptComponent)
112
+
113
+ def arrange_components(self, **kwargs) -> Dict[PromptComponent, Prompt]:
114
+ """Arrange the components in the order specified by the plan."""
115
+ # check is valid components passed
116
+ component_strings = set([pc.value for pc in PromptComponent])
117
+ if not set(kwargs.keys()) == component_strings:
118
+ raise ValueError(
119
+ f"Invalid components passed: {set(kwargs.keys())} but expected {PromptComponent}"
60
120
  )
61
- return persona_prompt
62
121
 
63
- def _get_agent_instructions_prompt(self) -> Prompt:
64
- """Get the agent instructions prompt."""
65
- applicable_prompts = prompt_lookup(
66
- component_type="agent_instructions",
67
- model=self.model.model,
122
+ user_prompt = PromptList(
123
+ [kwargs[component.value] for component in self.user_prompt_order]
68
124
  )
69
- if len(applicable_prompts) == 0:
70
- raise Exception("No applicable prompts found")
71
- return applicable_prompts[0](text=self.agent.instruction)
72
-
73
- def _get_question_instructions(self) -> Prompt:
74
- """Get the instructions for the question."""
75
- # applicable_prompts = prompt_lookup(
76
- # component_type="question_instructions",
77
- # question_type=self.question.question_type,
78
- # model=self.model.model,
79
- # )
80
- ## Get the question instructions and renders with the scenario & question.data
81
- # question_prompt = applicable_prompts[0]()
82
- question_prompt = self.question.get_instructions(model=self.model.model)
83
-
84
- undefined_template_variables = question_prompt.undefined_template_variables(
85
- self.question.data | self.scenario
125
+ system_prompt = PromptList(
126
+ [kwargs[component.value] for component in self.system_prompt_order]
86
127
  )
87
- if undefined_template_variables:
88
- print(undefined_template_variables)
89
- raise QuestionScenarioRenderError(
90
- "Question instructions still has variables."
128
+ return {"user_prompt": user_prompt, "system_prompt": system_prompt}
129
+
130
+ def get_prompts(self, **kwargs) -> Dict[str, Prompt]:
131
+ """Get both prompts for the LLM call."""
132
+ prompts = self.arrange_components(**kwargs)
133
+ return {
134
+ "user_prompt": prompts["user_prompt"].reduce(),
135
+ "system_prompt": prompts["system_prompt"].reduce(),
136
+ }
137
+
138
+
139
+ class PromptConstructorMixin:
140
+ """Mixin for constructing prompts for the LLM call.
141
+
142
+ The pieces of a prompt are:
143
+ - The agent instructions - "You are answering questions as if you were a human. Do not break character."
144
+ - The persona prompt - "You are an agent with the following persona: {'age': 22, 'hair': 'brown', 'height': 5.5}"
145
+ - The question instructions - "You are being asked the following question: Do you like school? The options are 0: yes 1: no Return a valid JSON formatted like this, selecting only the number of the option: {"answer": <put answer code here>, "comment": "<put explanation here>"} Only 1 option may be selected."
146
+ - The memory prompt - "Before the question you are now answering, you already answered the following question(s): Question: Do you like school? Answer: Prior answer"
147
+
148
+ This is mixed into the Invigilator class.
149
+ """
150
+
151
+ prompt_plan = PromptPlan()
152
+
153
+ @property
154
+ def agent_instructions_prompt(self) -> Prompt:
155
+ """
156
+ >>> from edsl.agents.InvigilatorBase import InvigilatorBase
157
+ >>> i = InvigilatorBase.example()
158
+ >>> i.agent_instructions_prompt
159
+ Prompt(text=\"""You are answering questions as if you were a human. Do not break character.\""")
160
+ """
161
+ if not hasattr(self, "_agent_instructions_prompt"):
162
+ applicable_prompts = prompt_lookup(
163
+ component_type="agent_instructions",
164
+ model=self.model.model,
165
+ )
166
+ if len(applicable_prompts) == 0:
167
+ raise Exception("No applicable prompts found")
168
+ self._agent_instructions_prompt = applicable_prompts[0](
169
+ text=self.agent.instruction
91
170
  )
171
+ return self._agent_instructions_prompt
92
172
 
93
- return question_prompt.render(self.question.data | self.scenario)
173
+ @property
174
+ def agent_persona_prompt(self) -> Prompt:
175
+ """
176
+ >>> from edsl.agents.InvigilatorBase import InvigilatorBase
177
+ >>> i = InvigilatorBase.example()
178
+ >>> i.agent_persona_prompt
179
+ Prompt(text=\"""You are an agent with the following persona:
180
+ {'age': 22, 'hair': 'brown', 'height': 5.5}\""")
181
+
182
+ """
183
+ if not hasattr(self, "_agent_persona_prompt"):
184
+ if not hasattr(self.agent, "agent_persona"):
185
+ applicable_prompts = prompt_lookup(
186
+ component_type="agent_persona",
187
+ model=self.model.model,
188
+ )
189
+ persona_prompt_template = applicable_prompts[0]()
190
+ else:
191
+ persona_prompt_template = self.agent.agent_persona
192
+
193
+ # TODO: This multiple passing of agent traits - not sure if it is necessary. Not harmful.
194
+ if undefined := persona_prompt_template.undefined_template_variables(
195
+ self.agent.traits
196
+ | {"traits": self.agent.traits}
197
+ | {"codebook": self.agent.codebook}
198
+ | {"traits": self.agent.traits}
199
+ ):
200
+ raise QuestionScenarioRenderError(
201
+ f"Agent persona still has variables that were not rendered: {undefined}"
202
+ )
203
+
204
+ persona_prompt = persona_prompt_template.render(
205
+ self.agent.traits | {"traits": self.agent.traits},
206
+ codebook=self.agent.codebook,
207
+ traits=self.agent.traits,
208
+ )
209
+ if persona_prompt.has_variables:
210
+ raise QuestionScenarioRenderError(
211
+ "Agent persona still has variables that were not rendered."
212
+ )
213
+ self._agent_persona_prompt = persona_prompt
214
+
215
+ return self._agent_persona_prompt
216
+
217
+ @property
218
+ def question_instructions_prompt(self) -> Prompt:
219
+ """
220
+ >>> from edsl.agents.InvigilatorBase import InvigilatorBase
221
+ >>> i = InvigilatorBase.example()
222
+ >>> i.question_instructions_prompt
223
+ Prompt(text=\"""You are being asked the following question: Do you like school?
224
+ The options are
225
+ <BLANKLINE>
226
+ 0: yes
227
+ <BLANKLINE>
228
+ 1: no
229
+ <BLANKLINE>
230
+ Return a valid JSON formatted like this, selecting only the number of the option:
231
+ {"answer": <put answer code here>, "comment": "<put explanation here>"}
232
+ Only 1 option may be selected.\""")
233
+
234
+ >>> from edsl import QuestionFreeText
235
+ >>> q = QuestionFreeText(question_text = "Consider {{ X }}. What is your favorite color?", question_name = "q_color")
236
+ >>> from edsl.agents.InvigilatorBase import InvigilatorBase
237
+ >>> i = InvigilatorBase.example(question = q)
238
+ >>> i.question_instructions_prompt
239
+ Traceback (most recent call last):
240
+ ...
241
+ edsl.exceptions.questions.QuestionScenarioRenderError: Question instructions still has variables: ['X'].
242
+
243
+
244
+ >>> from edsl import QuestionFreeText
245
+ >>> q = QuestionFreeText(question_text = "You were asked the question '{{ q0.question_text }}'. What is your favorite color?", question_name = "q_color")
246
+ >>> from edsl.agents.InvigilatorBase import InvigilatorBase
247
+ >>> i = InvigilatorBase.example(question = q)
248
+ >>> i.question_instructions_prompt
249
+ Prompt(text=\"""You are being asked the following question: You were asked the question 'Do you like school?'. What is your favorite color?
250
+ Return a valid JSON formatted like this:
251
+ {"answer": "<put free text answer here>"}\""")
252
+
253
+ >>> from edsl import QuestionFreeText
254
+ >>> q = QuestionFreeText(question_text = "You stated '{{ q0.answer }}'. What is your favorite color?", question_name = "q_color")
255
+ >>> from edsl.agents.InvigilatorBase import InvigilatorBase
256
+ >>> i = InvigilatorBase.example(question = q)
257
+ >>> i.current_answers = {"q0": "I like school"}
258
+ >>> i.question_instructions_prompt
259
+ Prompt(text=\"""You are being asked the following question: You stated 'I like school'. What is your favorite color?
260
+ Return a valid JSON formatted like this:
261
+ {"answer": "<put free text answer here>"}\""")
262
+
263
+
264
+ """
265
+ if not hasattr(self, "_question_instructions_prompt"):
266
+ question_prompt = self.question.get_instructions(model=self.model.model)
267
+
268
+ # TODO: Try to populate the answers in the question object if they are available
269
+ d = self.survey.question_names_to_questions()
270
+ for question, answer in self.current_answers.items():
271
+ if question in d:
272
+ d[question].answer = answer
273
+ else:
274
+ # adds a comment to the question
275
+ if (new_question := question.split("_comment")[0]) in d:
276
+ d[new_question].comment = answer
277
+
278
+ rendered_instructions = question_prompt.render(self.question.data | self.scenario | d | {'agent': self.agent})
279
+
280
+ undefined_template_variables = (
281
+ rendered_instructions.undefined_template_variables({})
282
+ )
283
+
284
+ # Check if it's the name of a question in the survey
285
+ for question_name in self.survey.question_names:
286
+ if question_name in undefined_template_variables:
287
+ print(
288
+ "Question name found in undefined_template_variables: ",
289
+ question_name,
290
+ )
291
+
292
+ if undefined_template_variables:
293
+ print(undefined_template_variables)
294
+ raise QuestionScenarioRenderError(
295
+ f"Question instructions still has variables: {undefined_template_variables}."
296
+ )
297
+
298
+ self._question_instructions_prompt = rendered_instructions
299
+ return self._question_instructions_prompt
300
+
301
+ @property
302
+ def prior_question_memory_prompt(self) -> Prompt:
303
+ if not hasattr(self, "_prior_question_memory_prompt"):
304
+ from edsl.prompts.Prompt import Prompt
305
+
306
+ memory_prompt = Prompt(text="")
307
+ if self.memory_plan is not None:
308
+ memory_prompt += self.create_memory_prompt(
309
+ self.question.question_name
310
+ ).render(self.scenario)
311
+ self._prior_question_memory_prompt = memory_prompt
312
+ return self._prior_question_memory_prompt
313
+
314
+ def construct_system_prompt(self) -> Prompt:
315
+ """Construct the system prompt for the LLM call."""
316
+ import warnings
317
+
318
+ warnings.warn(
319
+ "This method is deprecated. Use get_prompts instead.", DeprecationWarning
320
+ )
321
+ return self.get_prompts()["system_prompt"]
94
322
 
95
323
  def construct_user_prompt(self) -> Prompt:
96
324
  """Construct the user prompt for the LLM call."""
97
- user_prompt = self._get_question_instructions()
98
- if self.memory_plan is not None:
99
- user_prompt += self.create_memory_prompt(
100
- self.question.question_name
101
- ).render(self.scenario)
102
- return user_prompt
325
+ import warnings
326
+
327
+ warnings.warn(
328
+ "This method is deprecated. Use get_prompts instead.", DeprecationWarning
329
+ )
330
+ return self.get_prompts()["user_prompt"]
103
331
 
104
332
  def get_prompts(self) -> Dict[str, Prompt]:
105
- """Get both prompts for the LLM call."""
106
- system_prompt = self.construct_system_prompt()
107
- user_prompt = self.construct_user_prompt()
108
- prompts = {
109
- "user_prompt": user_prompt,
110
- "system_prompt": system_prompt,
111
- }
333
+ """Get both prompts for the LLM call.
334
+
335
+ >>> from edsl import QuestionFreeText
336
+ >>> from edsl.agents.InvigilatorBase import InvigilatorBase
337
+ >>> q = QuestionFreeText(question_text="How are you today?", question_name="q0")
338
+ >>> i = InvigilatorBase.example(question = q)
339
+ >>> i.get_prompts()
340
+ {'user_prompt': ..., 'system_prompt': ...}
341
+ >>> scenario = i._get_scenario_with_image()
342
+ >>> scenario.has_image
343
+ True
344
+ >>> q = QuestionFreeText(question_text="How are you today?", question_name="q0")
345
+ >>> i = InvigilatorBase.example(question = q, scenario = scenario)
346
+ >>> i.get_prompts()
347
+ {'user_prompt': ..., 'system_prompt': ..., 'encoded_image': ...'}
348
+ """
349
+ prompts = self.prompt_plan.get_prompts(
350
+ agent_instructions=self.agent_instructions_prompt,
351
+ agent_persona=self.agent_persona_prompt,
352
+ question_instructions=self.question_instructions_prompt,
353
+ prior_question_memory=self.prior_question_memory_prompt,
354
+ )
355
+
112
356
  if hasattr(self.scenario, "has_image") and self.scenario.has_image:
113
357
  prompts["encoded_image"] = self.scenario["encoded_image"]
114
358
  return prompts
115
359
 
360
+ def _get_scenario_with_image(self) -> Dict[str, Any]:
361
+ """This is a helper function to get a scenario with an image, for testing purposes."""
362
+ from edsl import Scenario
363
+
364
+ try:
365
+ scenario = Scenario.from_image("../../static/logo.png")
366
+ except FileNotFoundError:
367
+ scenario = Scenario.from_image("static/logo.png")
368
+ return scenario
369
+
116
370
 
117
371
  if __name__ == "__main__":
118
- from edsl import Model
119
- from edsl import Agent
120
-
121
- a = Agent(
122
- instruction="You are a happy-go lucky agent.",
123
- traits={"feeling": "happy", "age": "Young at heart"},
124
- codebook={"feeling": "Feelings right now", "age": "Age in years"},
125
- trait_presentation_template="",
126
- )
127
- p = PromptConstructorMixin()
128
- p.model = Model(Model.available()[0])
129
- p.agent = a
130
- instructions = p._get_agent_instructions_prompt()
131
- repr(instructions)
132
-
133
- persona = p._get_persona_prompt()
134
- repr(persona)
372
+ import doctest
373
+
374
+ doctest.testmod(optionflags=doctest.ELLIPSIS)