edsl 0.1.58__py3-none-any.whl → 0.1.60__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (37) hide show
  1. edsl/__version__.py +1 -1
  2. edsl/agents/agent.py +23 -4
  3. edsl/agents/agent_list.py +36 -6
  4. edsl/base/data_transfer_models.py +5 -0
  5. edsl/base/enums.py +7 -2
  6. edsl/coop/coop.py +103 -1
  7. edsl/dataset/dataset.py +74 -0
  8. edsl/dataset/dataset_operations_mixin.py +69 -64
  9. edsl/inference_services/services/__init__.py +3 -1
  10. edsl/inference_services/services/open_ai_service_v2.py +243 -0
  11. edsl/inference_services/services/test_service.py +1 -1
  12. edsl/interviews/exception_tracking.py +66 -20
  13. edsl/invigilators/invigilators.py +5 -1
  14. edsl/invigilators/prompt_constructor.py +299 -136
  15. edsl/jobs/data_structures.py +3 -0
  16. edsl/jobs/html_table_job_logger.py +18 -1
  17. edsl/jobs/jobs_pricing_estimation.py +6 -2
  18. edsl/jobs/jobs_remote_inference_logger.py +2 -0
  19. edsl/jobs/remote_inference.py +34 -7
  20. edsl/key_management/key_lookup_builder.py +25 -3
  21. edsl/language_models/language_model.py +41 -3
  22. edsl/language_models/raw_response_handler.py +126 -7
  23. edsl/prompts/prompt.py +1 -0
  24. edsl/questions/question_list.py +76 -20
  25. edsl/results/result.py +37 -0
  26. edsl/results/results.py +9 -1
  27. edsl/scenarios/file_store.py +8 -12
  28. edsl/scenarios/scenario.py +50 -2
  29. edsl/scenarios/scenario_list.py +34 -12
  30. edsl/surveys/survey.py +4 -0
  31. edsl/tasks/task_history.py +180 -6
  32. edsl/utilities/wikipedia.py +194 -0
  33. {edsl-0.1.58.dist-info → edsl-0.1.60.dist-info}/METADATA +5 -4
  34. {edsl-0.1.58.dist-info → edsl-0.1.60.dist-info}/RECORD +37 -35
  35. {edsl-0.1.58.dist-info → edsl-0.1.60.dist-info}/LICENSE +0 -0
  36. {edsl-0.1.58.dist-info → edsl-0.1.60.dist-info}/WHEEL +0 -0
  37. {edsl-0.1.58.dist-info → edsl-0.1.60.dist-info}/entry_points.txt +0 -0
@@ -23,26 +23,27 @@ if TYPE_CHECKING:
23
23
 
24
24
  logger = logging.getLogger(__name__)
25
25
 
26
+
26
27
  class BasePlaceholder:
27
28
  """
28
29
  Base class for placeholder values used when a question is not yet answered.
29
-
30
+
30
31
  This class provides a mechanism for handling references to previous question
31
32
  answers that don't yet exist or are unavailable. It serves as a marker or
32
33
  placeholder in prompts and template processing, ensuring that the system can
33
34
  gracefully handle dependencies on missing answers.
34
-
35
+
35
36
  Attributes:
36
37
  value: The default value to use when the placeholder is accessed directly.
37
38
  comment: Description of the placeholder's purpose.
38
39
  _type: The type of placeholder (e.g., "answer", "comment").
39
-
40
+
40
41
  Technical Design:
41
42
  - Implements __getitem__ to act like an empty collection when indexed
42
43
  - Provides clear string representation for debugging and logging
43
44
  - Serves as a base for specific placeholder types like PlaceholderAnswer
44
45
  - Used during template rendering to handle missing or future answers
45
-
46
+
46
47
  Implementation Notes:
47
48
  - This is important for template-based question logic where not all answers
48
49
  may be available at template rendering time
@@ -53,7 +54,7 @@ class BasePlaceholder:
53
54
  def __init__(self, placeholder_type: str = "answer"):
54
55
  """
55
56
  Initialize a new BasePlaceholder.
56
-
57
+
57
58
  Args:
58
59
  placeholder_type: The type of placeholder (e.g., "answer", "comment").
59
60
  """
@@ -64,13 +65,13 @@ class BasePlaceholder:
64
65
  def __getitem__(self, index: Any) -> str:
65
66
  """
66
67
  Allow indexing into the placeholder, always returning an empty string.
67
-
68
+
68
69
  This method makes placeholders act like empty collections when indexed,
69
70
  preventing errors when templates try to access specific items.
70
-
71
+
71
72
  Args:
72
73
  index: The index being accessed (ignored).
73
-
74
+
74
75
  Returns:
75
76
  An empty string.
76
77
  """
@@ -79,7 +80,7 @@ class BasePlaceholder:
79
80
  def __str__(self) -> str:
80
81
  """
81
82
  Get a string representation of the placeholder for display.
82
-
83
+
83
84
  Returns:
84
85
  A string identifying this as a placeholder of a specific type.
85
86
  """
@@ -88,7 +89,7 @@ class BasePlaceholder:
88
89
  def __repr__(self) -> str:
89
90
  """
90
91
  Get a string representation for debugging purposes.
91
-
92
+
92
93
  Returns:
93
94
  Same string as __str__.
94
95
  """
@@ -113,66 +114,65 @@ class PlaceholderGeneratedTokens(BasePlaceholder):
113
114
  class PromptConstructor:
114
115
  """
115
116
  Constructs structured prompts for language models based on questions, agents, and context.
116
-
117
+
117
118
  The PromptConstructor is a critical component in the invigilator architecture that
118
119
  assembles the various elements needed to form effective prompts for language models.
119
120
  It handles the complex task of combining question content, agent characteristics,
120
121
  response requirements, and contextual information into coherent prompts that elicit
121
122
  well-structured responses.
122
-
123
+
123
124
  Prompt Architecture:
124
- The constructor builds prompts with several distinct components:
125
-
126
- 1. Agent Instructions:
127
- - Core instructions about the agent's role and behavior
128
- - Example: "You are answering questions as if you were a human. Do not break character."
129
-
130
- 2. Persona Prompt:
131
- - Details about the agent's characteristics and traits
132
- - Example: "You are an agent with the following persona: {'age': 22, 'hair': 'brown'}"
133
-
134
- 3. Question Instructions:
135
- - The question itself with instructions on how to answer
136
- - Example: "You are being asked: Do you like school? The options are 0: yes 1: no
137
- Return a valid JSON with your answer code and explanation."
138
-
139
- 4. Memory Prompt:
140
- - Information about previous questions and answers in the sequence
141
- - Example: "Before this question, you answered: Question: Do you like school? Answer: Yes"
142
-
125
+ The constructor builds prompts with several distinct components:
126
+
127
+ 1. Agent Instructions:
128
+ Core instructions about the agent's role and behavior
129
+ Example: "You are answering questions as if you were a human. Do not break character."
130
+
131
+ 2. Persona Prompt:
132
+ Details about the agent's characteristics and traits
133
+ Example: "You are an agent with the following persona: {'age': 22, 'hair': 'brown'}"
134
+
135
+ 3. Question Instructions:
136
+ The question itself with instructions on how to answer
137
+ Example: "You are being asked: Do you like school? The options are 0: yes 1: no
138
+ Return a valid JSON with your answer code and explanation."
139
+
140
+ 4. Memory Prompt:
141
+ Information about previous questions and answers in the sequence
142
+ Example: "Before this question, you answered: Question: Do you like school? Answer: Yes"
143
+
143
144
  Technical Design:
144
- - Uses a template-based approach for flexibility and consistency
145
- - Processes question options to present them clearly to the model
146
- - Handles template variable replacements for scenarios and previous answers
147
- - Supports both system and user prompts with appropriate content separation
148
- - Caches computed properties for efficiency
149
-
145
+ - Uses a template-based approach for flexibility and consistency
146
+ - Processes question options to present them clearly to the model
147
+ - Handles template variable replacements for scenarios and previous answers
148
+ - Supports both system and user prompts with appropriate content separation
149
+ - Caches computed properties for efficiency
150
+
150
151
  Implementation Notes:
151
- - The class performs no direct I/O or model calls
152
- - It focuses solely on prompt construction, adhering to single responsibility principle
153
- - Various helper classes handle specialized aspects of prompt construction
154
- - Extensive use of cached_property for computational efficiency with complex prompts
152
+ - The class performs no direct I/O or model calls
153
+ - It focuses solely on prompt construction, adhering to single responsibility principle
154
+ - Various helper classes handle specialized aspects of prompt construction
155
+ - Extensive use of cached_property for computational efficiency with complex prompts
155
156
  """
157
+
156
158
  @classmethod
157
159
  def from_invigilator(
158
- cls,
159
- invigilator: "InvigilatorBase",
160
- prompt_plan: Optional["PromptPlan"] = None
160
+ cls, invigilator: "InvigilatorBase", prompt_plan: Optional["PromptPlan"] = None
161
161
  ) -> "PromptConstructor":
162
162
  """
163
163
  Create a PromptConstructor from an invigilator instance.
164
-
164
+
165
165
  This factory method extracts the necessary components from an invigilator
166
166
  and creates a PromptConstructor instance. This is the primary way to create
167
167
  a PromptConstructor in the context of administering questions.
168
-
168
+
169
169
  Args:
170
- invigilator: The invigilator instance containing all necessary components.
171
- prompt_plan: Optional custom prompt plan. If None, uses the invigilator's plan.
172
-
170
+ invigilator: The invigilator instance containing all necessary components
171
+ prompt_plan: Optional custom prompt plan. If None, uses the invigilator's plan
172
+
173
173
  Returns:
174
- A new PromptConstructor instance configured with the invigilator's components.
175
-
174
+ A new PromptConstructor instance configured with the invigilator's components
175
+
176
176
  Technical Notes:
177
177
  - This method simplifies the creation of a PromptConstructor with all necessary context
178
178
  - It extracts all required components from the invigilator
@@ -187,7 +187,7 @@ class PromptConstructor:
187
187
  model=invigilator.model,
188
188
  current_answers=invigilator.current_answers,
189
189
  memory_plan=invigilator.memory_plan,
190
- prompt_plan=prompt_plan or invigilator.prompt_plan
190
+ prompt_plan=prompt_plan or invigilator.prompt_plan,
191
191
  )
192
192
 
193
193
  def __init__(
@@ -199,26 +199,26 @@ class PromptConstructor:
199
199
  model: "LanguageModel",
200
200
  current_answers: dict,
201
201
  memory_plan: "MemoryPlan",
202
- prompt_plan: Optional["PromptPlan"] = None
202
+ prompt_plan: Optional["PromptPlan"] = None,
203
203
  ):
204
204
  """
205
205
  Initialize a new PromptConstructor with all necessary components.
206
-
206
+
207
207
  This constructor sets up a prompt constructor with references to all the
208
208
  components needed to build effective prompts for language models. It establishes
209
209
  the context for constructing prompts that are specific to the given question,
210
210
  agent, scenario, and other context.
211
-
211
+
212
212
  Args:
213
- agent: The agent for which to construct prompts.
214
- question: The question being asked.
215
- scenario: The scenario providing context for the question.
216
- survey: The survey containing the question.
217
- model: The language model that will process the prompts.
218
- current_answers: Dictionary of answers to previous questions.
219
- memory_plan: Plan for managing memory across questions.
220
- prompt_plan: Configuration for how to structure the prompts.
221
-
213
+ agent: The agent for which to construct prompts
214
+ question: The question being asked
215
+ scenario: The scenario providing context for the question
216
+ survey: The survey containing the question
217
+ model: The language model that will process the prompts
218
+ current_answers: Dictionary of answers to previous questions
219
+ memory_plan: Plan for managing memory across questions
220
+ prompt_plan: Configuration for how to structure the prompts
221
+
222
222
  Technical Notes:
223
223
  - All components are stored as instance attributes for use in prompt construction
224
224
  - The prompt_plan determines which components are included in the prompts and how
@@ -241,17 +241,28 @@ class PromptConstructor:
241
241
  def get_question_options(self, question_data: dict) -> list[str]:
242
242
  """
243
243
  Get formatted options for a question based on its data.
244
-
244
+
245
245
  This method delegates to a QuestionOptionProcessor to transform raw question
246
246
  option data into a format appropriate for inclusion in prompts. It handles
247
247
  various question types and their specific option formatting requirements.
248
-
248
+
249
249
  Args:
250
- question_data: Dictionary containing the question data, including options.
251
-
250
+ question_data: Dictionary containing the question data, including options
251
+
252
252
  Returns:
253
- A list of formatted option strings ready for inclusion in prompts.
254
-
253
+ list[str]: A list of formatted option strings ready for inclusion in prompts
254
+
255
+ Examples:
256
+ >>> from edsl.invigilators.invigilators import InvigilatorBase
257
+ >>> i = InvigilatorBase.example()
258
+ >>> question_data = {"options": ["yes", "no"], "option_codes": [0, 1]}
259
+ >>> i.prompt_constructor.get_question_options(question_data)
260
+ ['0: yes', '1: no']
261
+
262
+ >>> question_data = {"options": ["strongly agree", "agree", "disagree"]}
263
+ >>> i.prompt_constructor.get_question_options(question_data)
264
+ ['0: strongly agree', '1: agree', '2: disagree']
265
+
255
266
  Technical Notes:
256
267
  - Delegates the actual option processing to the QuestionOptionProcessor
257
268
  - The processor has specialized logic for different question types
@@ -259,18 +270,23 @@ class PromptConstructor:
259
270
  - This separation of concerns keeps the PromptConstructor focused on
260
271
  overall prompt construction rather than option formatting details
261
272
  """
262
- return (QuestionOptionProcessor
263
- .from_prompt_constructor(self)
264
- .get_question_options(question_data)
265
- )
273
+ return QuestionOptionProcessor.from_prompt_constructor(
274
+ self
275
+ ).get_question_options(question_data)
266
276
 
267
277
  @cached_property
268
278
  def agent_instructions_prompt(self) -> Prompt:
269
279
  """
270
- >>> from .invigilators import InvigilatorBase
271
- >>> i = InvigilatorBase.example()
272
- >>> i.prompt_constructor.agent_instructions_prompt
273
- Prompt(text=\"""You are answering questions as if you were a human. Do not break character.\""")
280
+ Get the agent's core instruction prompt.
281
+
282
+ Returns:
283
+ Prompt: A prompt containing the agent's core instructions
284
+
285
+ Examples:
286
+ >>> from .invigilators import InvigilatorBase
287
+ >>> i = InvigilatorBase.example()
288
+ >>> i.prompt_constructor.agent_instructions_prompt
289
+ Prompt(text=\"""You are answering questions as if you were a human. Do not break character.\""")
274
290
  """
275
291
  from ..agents import Agent
276
292
 
@@ -282,10 +298,16 @@ class PromptConstructor:
282
298
  @cached_property
283
299
  def agent_persona_prompt(self) -> Prompt:
284
300
  """
285
- >>> from edsl.invigilators.invigilators import InvigilatorBase
286
- >>> i = InvigilatorBase.example()
287
- >>> i.prompt_constructor.agent_persona_prompt
288
- Prompt(text=\"""Your traits: {'age': 22, 'hair': 'brown', 'height': 5.5}\""")
301
+ Get the agent's persona characteristics prompt.
302
+
303
+ Returns:
304
+ Prompt: A prompt containing the agent's traits and characteristics
305
+
306
+ Examples:
307
+ >>> from edsl.invigilators.invigilators import InvigilatorBase
308
+ >>> i = InvigilatorBase.example()
309
+ >>> i.prompt_constructor.agent_persona_prompt
310
+ Prompt(text=\"""Your traits: {'age': 22, 'hair': 'brown', 'height': 5.5}\""")
289
311
  """
290
312
  from ..agents import Agent
291
313
 
@@ -295,12 +317,17 @@ class PromptConstructor:
295
317
  return self.agent.prompt()
296
318
 
297
319
  def prior_answers_dict(self) -> dict[str, "QuestionBase"]:
298
- """This is a dictionary of prior answers, if they exist.
299
-
300
- >>> from edsl.invigilators.invigilators import InvigilatorBase
301
- >>> i = InvigilatorBase.example()
302
- >>> i.prompt_constructor.prior_answers_dict()
303
- {'q0': ..., 'q1': ...}
320
+ """
321
+ Get a dictionary of prior answers if they exist.
322
+
323
+ Returns:
324
+ dict[str, QuestionBase]: A dictionary mapping question names to their answered instances
325
+
326
+ Examples:
327
+ >>> from edsl.invigilators.invigilators import InvigilatorBase
328
+ >>> i = InvigilatorBase.example()
329
+ >>> i.prompt_constructor.prior_answers_dict()
330
+ {'q0': ..., 'q1': ...}
304
331
  """
305
332
  return self._add_answers(
306
333
  self.survey.question_names_to_questions(), self.current_answers
@@ -309,16 +336,23 @@ class PromptConstructor:
309
336
  @staticmethod
310
337
  def _extract_question_and_entry_type(key_entry) -> tuple[str, str]:
311
338
  """
312
- Extracts the question name and type for the current answer dictionary key entry.
339
+ Extract the question name and type from a dictionary key entry.
313
340
 
314
- >>> PromptConstructor._extract_question_and_entry_type("q0")
315
- ('q0', 'answer')
316
- >>> PromptConstructor._extract_question_and_entry_type("q0_comment")
317
- ('q0', 'comment')
318
- >>> PromptConstructor._extract_question_and_entry_type("q0_alternate_generated_tokens")
319
- ('q0_alternate', 'generated_tokens')
320
- >>> PromptConstructor._extract_question_and_entry_type("q0_alt_comment")
321
- ('q0_alt', 'comment')
341
+ Args:
342
+ key_entry: The key from the answers dictionary to parse
343
+
344
+ Returns:
345
+ tuple[str, str]: A tuple of (question_name, entry_type)
346
+
347
+ Examples:
348
+ >>> PromptConstructor._extract_question_and_entry_type("q0")
349
+ ('q0', 'answer')
350
+ >>> PromptConstructor._extract_question_and_entry_type("q0_comment")
351
+ ('q0', 'comment')
352
+ >>> PromptConstructor._extract_question_and_entry_type("q0_alternate_generated_tokens")
353
+ ('q0_alternate', 'generated_tokens')
354
+ >>> PromptConstructor._extract_question_and_entry_type("q0_alt_comment")
355
+ ('q0_alt', 'comment')
322
356
  """
323
357
  split_list = key_entry.rsplit("_", maxsplit=1)
324
358
  if len(split_list) == 1:
@@ -339,18 +373,29 @@ class PromptConstructor:
339
373
  @staticmethod
340
374
  def _augmented_answers_dict(current_answers: dict) -> dict:
341
375
  """
342
- Creates a nested dictionary of the current answers to question dictionaries; those question dictionaries have the answer, comment, and generated_tokens as keys.
376
+ Create a nested dictionary of current answers organized by question.
343
377
 
344
- >>> PromptConstructor._augmented_answers_dict({"q0": "LOVE IT!", "q0_comment": "I love school!"})
345
- {'q0': {'answer': 'LOVE IT!', 'comment': 'I love school!'}}
378
+ Creates a dictionary where each question's answers, comments, and generated tokens
379
+ are grouped together in a sub-dictionary.
380
+
381
+ Args:
382
+ current_answers: The flat dictionary of current answers
383
+
384
+ Returns:
385
+ dict: A nested dictionary with answers organized by question
386
+
387
+ Examples:
388
+ >>> PromptConstructor._augmented_answers_dict({"q0": "LOVE IT!", "q0_comment": "I love school!"})
389
+ {'q0': {'answer': 'LOVE IT!', 'comment': 'I love school!'}}
346
390
  """
347
391
  from collections import defaultdict
348
392
 
349
393
  d = defaultdict(dict)
350
394
  for key, value in current_answers.items():
351
- question_name, entry_type = (
352
- PromptConstructor._extract_question_and_entry_type(key)
353
- )
395
+ (
396
+ question_name,
397
+ entry_type,
398
+ ) = PromptConstructor._extract_question_and_entry_type(key)
354
399
  d[question_name][entry_type] = value
355
400
  return dict(d)
356
401
 
@@ -359,13 +404,21 @@ class PromptConstructor:
359
404
  answer_dict: dict, current_answers: dict
360
405
  ) -> dict[str, "QuestionBase"]:
361
406
  """
362
- Adds the current answers to the answer dictionary.
407
+ Add current answers to the answer dictionary, handling missing answers with placeholders.
408
+
409
+ Args:
410
+ answer_dict: The base dictionary of questions
411
+ current_answers: The dictionary of current answers to add
363
412
 
364
- >>> from edsl import QuestionFreeText
365
- >>> d = {"q0": QuestionFreeText(question_text="Do you like school?", question_name = "q0")}
366
- >>> current_answers = {"q0": "LOVE IT!"}
367
- >>> PromptConstructor._add_answers(d, current_answers)['q0'].answer
368
- 'LOVE IT!'
413
+ Returns:
414
+ dict[str, QuestionBase]: The updated dictionary with answers added
415
+
416
+ Examples:
417
+ >>> from edsl import QuestionFreeText
418
+ >>> d = {"q0": QuestionFreeText(question_text="Do you like school?", question_name = "q0")}
419
+ >>> current_answers = {"q0": "LOVE IT!"}
420
+ >>> PromptConstructor._add_answers(d, current_answers)['q0'].answer
421
+ 'LOVE IT!'
369
422
  """
370
423
  augmented_answers = PromptConstructor._augmented_answers_dict(current_answers)
371
424
 
@@ -381,11 +434,17 @@ class PromptConstructor:
381
434
 
382
435
  @cached_property
383
436
  def file_keys_from_question(self) -> list:
384
- """Extracts the file keys from the question text.
385
-
386
- It checks if the variables in the question text are in the scenario file keys.
387
437
  """
388
- return QuestionTemplateReplacementsBuilder.from_prompt_constructor(self).question_file_keys()
438
+ Extract file keys referenced in the question text.
439
+
440
+ Checks if variables in the question text correspond to scenario file keys.
441
+
442
+ Returns:
443
+ list: A list of file keys found in the question text
444
+ """
445
+ return QuestionTemplateReplacementsBuilder.from_prompt_constructor(
446
+ self
447
+ ).question_file_keys()
389
448
 
390
449
  @cached_property
391
450
  def question_instructions_prompt(self) -> Prompt:
@@ -399,17 +458,52 @@ class PromptConstructor:
399
458
  return self.build_question_instructions_prompt()
400
459
 
401
460
  def build_question_instructions_prompt(self) -> Prompt:
402
- """Buils the question instructions prompt."""
403
- from .question_instructions_prompt_builder import QuestionInstructionPromptBuilder
461
+ """
462
+ Builds the question instructions prompt by combining question text, options, and formatting.
463
+
464
+ This method uses the QuestionInstructionPromptBuilder to construct a complete
465
+ prompt that includes the question text, available options, and any necessary
466
+ formatting or additional instructions for the model.
467
+
468
+ Returns:
469
+ Prompt: A Prompt object containing the fully constructed question instructions
470
+
471
+ Examples:
472
+ >>> from edsl.invigilators.invigilators import InvigilatorBase
473
+ >>> i = InvigilatorBase.example()
474
+ >>> prompt = i.prompt_constructor.build_question_instructions_prompt()
475
+ >>> "Do you like school?" in prompt.text
476
+ True
477
+ >>> "0: yes" in prompt.text
478
+ True
479
+ >>> "1: no" in prompt.text
480
+ True
481
+
482
+ Technical Notes:
483
+ - Uses QuestionInstructionPromptBuilder for consistent prompt formatting
484
+ - Captures any variables set during prompt construction
485
+ - Updates the captured_variables dictionary with any new variables
486
+ - Returns a complete Prompt object ready for rendering
487
+ """
488
+ from .question_instructions_prompt_builder import (
489
+ QuestionInstructionPromptBuilder,
490
+ )
491
+
404
492
  qipb = QuestionInstructionPromptBuilder.from_prompt_constructor(self)
405
493
  prompt = qipb.build()
406
494
  if prompt.captured_variables:
407
495
  self.captured_variables.update(prompt.captured_variables)
408
-
496
+
409
497
  return prompt
410
-
498
+
411
499
  @cached_property
412
500
  def prior_question_memory_prompt(self) -> Prompt:
501
+ """
502
+ Get the prompt containing memory of prior questions and answers.
503
+
504
+ Returns:
505
+ Prompt: A prompt containing the relevant prior question memory
506
+ """
413
507
  memory_prompt = Prompt(text="")
414
508
  if self.memory_plan is not None:
415
509
  memory_prompt += self.create_memory_prompt(
@@ -418,40 +512,83 @@ class PromptConstructor:
418
512
  return memory_prompt
419
513
 
420
514
  def create_memory_prompt(self, question_name: str) -> Prompt:
421
- """Create a memory for the agent.
515
+ """
516
+ Create a memory prompt containing previous question answers for the agent.
422
517
 
423
- The returns a memory prompt for the agent.
518
+ Args:
519
+ question_name: The name of the current question
424
520
 
425
- >>> from edsl.invigilators.invigilators import InvigilatorBase
426
- >>> i = InvigilatorBase.example()
427
- >>> i.current_answers = {"q0": "Prior answer"}
428
- >>> i.memory_plan.add_single_memory("q1", "q0")
429
- >>> p = i.prompt_constructor.create_memory_prompt("q1")
430
- >>> p.text.strip().replace("\\n", " ").replace("\\t", " ")
431
- 'Before the question you are now answering, you already answered the following question(s): Question: Do you like school? Answer: Prior answer'
521
+ Returns:
522
+ Prompt: A memory prompt containing relevant prior answers
523
+
524
+ Examples:
525
+ >>> from edsl.invigilators.invigilators import InvigilatorBase
526
+ >>> i = InvigilatorBase.example()
527
+ >>> i.current_answers = {"q0": "Prior answer"}
528
+ >>> i.memory_plan.add_single_memory("q1", "q0")
529
+ >>> p = i.prompt_constructor.create_memory_prompt("q1")
530
+ >>> p.text.strip().replace("\\n", " ").replace("\\t", " ")
531
+ 'Before the question you are now answering, you already answered the following question(s): Question: Do you like school? Answer: Prior answer'
432
532
  """
433
533
  return self.memory_plan.get_memory_prompt_fragment(
434
534
  question_name, self.current_answers
435
535
  )
436
536
 
437
537
  def get_prompts(self) -> Dict[str, Any]:
438
- """Get the prompts for the question."""
538
+ """
539
+ Get all prompts needed for the question, properly formatted and organized.
540
+
541
+ This method assembles all the different components of the prompt system:
542
+ - Agent instructions
543
+ - Agent persona
544
+ - Question instructions
545
+ - Prior question memory
546
+ And combines them according to the prompt plan's specifications.
547
+
548
+ Returns:
549
+ Dict[str, Any]: A dictionary containing the formatted prompts and any associated files.
550
+ The dictionary typically includes:
551
+ - 'system_prompt': Instructions for the model's behavior
552
+ - 'user_prompt': The actual question and context
553
+ - 'files_list': Any relevant files (if file keys are present)
554
+
555
+ Examples:
556
+ >>> from edsl.invigilators.invigilators import InvigilatorBase
557
+ >>> i = InvigilatorBase.example()
558
+ >>> prompts = i.prompt_constructor.get_prompts()
559
+ >>> "Do not break character" in prompts['system_prompt']
560
+ True
561
+ >>> "Do you like school?" in prompts['user_prompt']
562
+ True
563
+
564
+ # Test with file keys
565
+ >>> i.prompt_constructor.file_keys_from_question = ['code.py']
566
+ >>> i.prompt_constructor.scenario = {'code.py': 'print("Hello")'}
567
+ >>> prompts = i.prompt_constructor.get_prompts()
568
+ >>> prompts['files_list']
569
+ ['print("Hello")']
570
+
571
+ Technical Notes:
572
+ - Builds all prompt components first
573
+ - Uses the prompt plan to organize components
574
+ - Handles file attachments if specified in the question
575
+ - Returns a complete dictionary ready for use with the language model
576
+ """
439
577
  # Build all the components
440
578
  agent_instructions = self.agent_instructions_prompt
441
579
  agent_persona = self.agent_persona_prompt
442
580
  question_instructions = self.question_instructions_prompt
443
581
  prior_question_memory = self.prior_question_memory_prompt
444
-
582
+
445
583
  # Get components dict
446
584
  components = {
447
585
  "agent_instructions": agent_instructions.text,
448
586
  "agent_persona": agent_persona.text,
449
587
  "question_instructions": question_instructions.text,
450
588
  "prior_question_memory": prior_question_memory.text,
451
- }
452
-
589
+ }
590
+
453
591
  prompts = self.prompt_plan.get_prompts(**components)
454
-
455
592
  # Handle file keys if present
456
593
  file_keys = self.file_keys_from_question
457
594
  if file_keys:
@@ -459,14 +596,40 @@ class PromptConstructor:
459
596
  for key in file_keys:
460
597
  files_list.append(self.scenario[key])
461
598
  prompts["files_list"] = files_list
462
-
599
+
463
600
  return prompts
464
-
601
+
465
602
  def get_captured_variables(self) -> dict:
466
- """Get the captured variables."""
603
+ """
604
+ Get all variables that were captured during prompt construction and rendering.
605
+
606
+ This method returns any variables that were set during the template rendering
607
+ process. These variables can be used for tracking state, storing intermediate
608
+ values, or capturing information about the prompt construction process.
609
+
610
+ Returns:
611
+ dict: A dictionary containing all captured variables and their values
612
+
613
+ Examples:
614
+ >>> from edsl.invigilators.invigilators import InvigilatorBase
615
+ >>> i = InvigilatorBase.example()
616
+ >>> i.prompt_constructor.captured_variables = {'answer_count': 5, 'last_response': 'yes'}
617
+ >>> vars = i.prompt_constructor.get_captured_variables()
618
+ >>> vars['answer_count']
619
+ 5
620
+ >>> vars['last_response']
621
+ 'yes'
622
+
623
+ Technical Notes:
624
+ - Variables are captured during template rendering
625
+ - The dictionary is updated throughout the prompt construction process
626
+ - Useful for debugging and tracking template variable usage
627
+ - Can be used to pass information between different parts of the system
628
+ """
467
629
  return self.captured_variables
468
630
 
469
631
 
470
- if __name__ == '__main__':
632
+ if __name__ == "__main__":
471
633
  import doctest
472
- doctest.testmod(optionflags=doctest.ELLIPSIS)
634
+
635
+ doctest.testmod(optionflags=doctest.ELLIPSIS)