edsl 0.1.14__py3-none-any.whl → 0.1.40__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (407) hide show
  1. edsl/Base.py +348 -38
  2. edsl/BaseDiff.py +260 -0
  3. edsl/TemplateLoader.py +24 -0
  4. edsl/__init__.py +46 -10
  5. edsl/__version__.py +1 -0
  6. edsl/agents/Agent.py +842 -144
  7. edsl/agents/AgentList.py +521 -25
  8. edsl/agents/Invigilator.py +250 -374
  9. edsl/agents/InvigilatorBase.py +257 -0
  10. edsl/agents/PromptConstructor.py +272 -0
  11. edsl/agents/QuestionInstructionPromptBuilder.py +128 -0
  12. edsl/agents/QuestionTemplateReplacementsBuilder.py +137 -0
  13. edsl/agents/descriptors.py +43 -13
  14. edsl/agents/prompt_helpers.py +129 -0
  15. edsl/agents/question_option_processor.py +172 -0
  16. edsl/auto/AutoStudy.py +130 -0
  17. edsl/auto/StageBase.py +243 -0
  18. edsl/auto/StageGenerateSurvey.py +178 -0
  19. edsl/auto/StageLabelQuestions.py +125 -0
  20. edsl/auto/StagePersona.py +61 -0
  21. edsl/auto/StagePersonaDimensionValueRanges.py +88 -0
  22. edsl/auto/StagePersonaDimensionValues.py +74 -0
  23. edsl/auto/StagePersonaDimensions.py +69 -0
  24. edsl/auto/StageQuestions.py +74 -0
  25. edsl/auto/SurveyCreatorPipeline.py +21 -0
  26. edsl/auto/utilities.py +218 -0
  27. edsl/base/Base.py +279 -0
  28. edsl/config.py +121 -104
  29. edsl/conversation/Conversation.py +290 -0
  30. edsl/conversation/car_buying.py +59 -0
  31. edsl/conversation/chips.py +95 -0
  32. edsl/conversation/mug_negotiation.py +81 -0
  33. edsl/conversation/next_speaker_utilities.py +93 -0
  34. edsl/coop/CoopFunctionsMixin.py +15 -0
  35. edsl/coop/ExpectedParrotKeyHandler.py +125 -0
  36. edsl/coop/PriceFetcher.py +54 -0
  37. edsl/coop/__init__.py +1 -0
  38. edsl/coop/coop.py +1029 -134
  39. edsl/coop/utils.py +131 -0
  40. edsl/data/Cache.py +560 -89
  41. edsl/data/CacheEntry.py +230 -0
  42. edsl/data/CacheHandler.py +168 -0
  43. edsl/data/RemoteCacheSync.py +186 -0
  44. edsl/data/SQLiteDict.py +292 -0
  45. edsl/data/__init__.py +5 -3
  46. edsl/data/orm.py +6 -33
  47. edsl/data_transfer_models.py +74 -27
  48. edsl/enums.py +165 -8
  49. edsl/exceptions/BaseException.py +21 -0
  50. edsl/exceptions/__init__.py +52 -46
  51. edsl/exceptions/agents.py +33 -15
  52. edsl/exceptions/cache.py +5 -0
  53. edsl/exceptions/coop.py +8 -0
  54. edsl/exceptions/general.py +34 -0
  55. edsl/exceptions/inference_services.py +5 -0
  56. edsl/exceptions/jobs.py +15 -0
  57. edsl/exceptions/language_models.py +46 -1
  58. edsl/exceptions/questions.py +80 -5
  59. edsl/exceptions/results.py +16 -5
  60. edsl/exceptions/scenarios.py +29 -0
  61. edsl/exceptions/surveys.py +13 -10
  62. edsl/inference_services/AnthropicService.py +106 -0
  63. edsl/inference_services/AvailableModelCacheHandler.py +184 -0
  64. edsl/inference_services/AvailableModelFetcher.py +215 -0
  65. edsl/inference_services/AwsBedrock.py +118 -0
  66. edsl/inference_services/AzureAI.py +215 -0
  67. edsl/inference_services/DeepInfraService.py +18 -0
  68. edsl/inference_services/GoogleService.py +143 -0
  69. edsl/inference_services/GroqService.py +20 -0
  70. edsl/inference_services/InferenceServiceABC.py +80 -0
  71. edsl/inference_services/InferenceServicesCollection.py +138 -0
  72. edsl/inference_services/MistralAIService.py +120 -0
  73. edsl/inference_services/OllamaService.py +18 -0
  74. edsl/inference_services/OpenAIService.py +236 -0
  75. edsl/inference_services/PerplexityService.py +160 -0
  76. edsl/inference_services/ServiceAvailability.py +135 -0
  77. edsl/inference_services/TestService.py +90 -0
  78. edsl/inference_services/TogetherAIService.py +172 -0
  79. edsl/inference_services/data_structures.py +134 -0
  80. edsl/inference_services/models_available_cache.py +118 -0
  81. edsl/inference_services/rate_limits_cache.py +25 -0
  82. edsl/inference_services/registry.py +41 -0
  83. edsl/inference_services/write_available.py +10 -0
  84. edsl/jobs/AnswerQuestionFunctionConstructor.py +223 -0
  85. edsl/jobs/Answers.py +21 -20
  86. edsl/jobs/FetchInvigilator.py +47 -0
  87. edsl/jobs/InterviewTaskManager.py +98 -0
  88. edsl/jobs/InterviewsConstructor.py +50 -0
  89. edsl/jobs/Jobs.py +684 -204
  90. edsl/jobs/JobsChecks.py +172 -0
  91. edsl/jobs/JobsComponentConstructor.py +189 -0
  92. edsl/jobs/JobsPrompts.py +270 -0
  93. edsl/jobs/JobsRemoteInferenceHandler.py +311 -0
  94. edsl/jobs/JobsRemoteInferenceLogger.py +239 -0
  95. edsl/jobs/RequestTokenEstimator.py +30 -0
  96. edsl/jobs/async_interview_runner.py +138 -0
  97. edsl/jobs/buckets/BucketCollection.py +104 -0
  98. edsl/jobs/buckets/ModelBuckets.py +65 -0
  99. edsl/jobs/buckets/TokenBucket.py +283 -0
  100. edsl/jobs/buckets/TokenBucketAPI.py +211 -0
  101. edsl/jobs/buckets/TokenBucketClient.py +191 -0
  102. edsl/jobs/check_survey_scenario_compatibility.py +85 -0
  103. edsl/jobs/data_structures.py +120 -0
  104. edsl/jobs/decorators.py +35 -0
  105. edsl/jobs/interviews/Interview.py +392 -0
  106. edsl/jobs/interviews/InterviewExceptionCollection.py +99 -0
  107. edsl/jobs/interviews/InterviewExceptionEntry.py +186 -0
  108. edsl/jobs/interviews/InterviewStatistic.py +63 -0
  109. edsl/jobs/interviews/InterviewStatisticsCollection.py +25 -0
  110. edsl/jobs/interviews/InterviewStatusDictionary.py +78 -0
  111. edsl/jobs/interviews/InterviewStatusLog.py +92 -0
  112. edsl/jobs/interviews/ReportErrors.py +66 -0
  113. edsl/jobs/interviews/interview_status_enum.py +9 -0
  114. edsl/jobs/jobs_status_enums.py +9 -0
  115. edsl/jobs/loggers/HTMLTableJobLogger.py +304 -0
  116. edsl/jobs/results_exceptions_handler.py +98 -0
  117. edsl/jobs/runners/JobsRunnerAsyncio.py +151 -110
  118. edsl/jobs/runners/JobsRunnerStatus.py +298 -0
  119. edsl/jobs/tasks/QuestionTaskCreator.py +244 -0
  120. edsl/jobs/tasks/TaskCreators.py +64 -0
  121. edsl/jobs/tasks/TaskHistory.py +470 -0
  122. edsl/jobs/tasks/TaskStatusLog.py +23 -0
  123. edsl/jobs/tasks/task_status_enum.py +161 -0
  124. edsl/jobs/tokens/InterviewTokenUsage.py +27 -0
  125. edsl/jobs/tokens/TokenUsage.py +34 -0
  126. edsl/language_models/ComputeCost.py +63 -0
  127. edsl/language_models/LanguageModel.py +507 -386
  128. edsl/language_models/ModelList.py +164 -0
  129. edsl/language_models/PriceManager.py +127 -0
  130. edsl/language_models/RawResponseHandler.py +106 -0
  131. edsl/language_models/RegisterLanguageModelsMeta.py +184 -0
  132. edsl/language_models/__init__.py +1 -8
  133. edsl/language_models/fake_openai_call.py +15 -0
  134. edsl/language_models/fake_openai_service.py +61 -0
  135. edsl/language_models/key_management/KeyLookup.py +63 -0
  136. edsl/language_models/key_management/KeyLookupBuilder.py +273 -0
  137. edsl/language_models/key_management/KeyLookupCollection.py +38 -0
  138. edsl/language_models/key_management/__init__.py +0 -0
  139. edsl/language_models/key_management/models.py +131 -0
  140. edsl/language_models/model.py +256 -0
  141. edsl/language_models/repair.py +109 -41
  142. edsl/language_models/utilities.py +65 -0
  143. edsl/notebooks/Notebook.py +263 -0
  144. edsl/notebooks/NotebookToLaTeX.py +142 -0
  145. edsl/notebooks/__init__.py +1 -0
  146. edsl/prompts/Prompt.py +222 -93
  147. edsl/prompts/__init__.py +1 -1
  148. edsl/questions/ExceptionExplainer.py +77 -0
  149. edsl/questions/HTMLQuestion.py +103 -0
  150. edsl/questions/QuestionBase.py +518 -0
  151. edsl/questions/QuestionBasePromptsMixin.py +221 -0
  152. edsl/questions/QuestionBudget.py +164 -67
  153. edsl/questions/QuestionCheckBox.py +281 -62
  154. edsl/questions/QuestionDict.py +343 -0
  155. edsl/questions/QuestionExtract.py +136 -50
  156. edsl/questions/QuestionFreeText.py +79 -55
  157. edsl/questions/QuestionFunctional.py +138 -41
  158. edsl/questions/QuestionList.py +184 -57
  159. edsl/questions/QuestionMatrix.py +265 -0
  160. edsl/questions/QuestionMultipleChoice.py +293 -69
  161. edsl/questions/QuestionNumerical.py +109 -56
  162. edsl/questions/QuestionRank.py +244 -49
  163. edsl/questions/Quick.py +41 -0
  164. edsl/questions/SimpleAskMixin.py +74 -0
  165. edsl/questions/__init__.py +9 -6
  166. edsl/questions/{AnswerValidatorMixin.py → answer_validator_mixin.py} +153 -38
  167. edsl/questions/compose_questions.py +13 -7
  168. edsl/questions/data_structures.py +20 -0
  169. edsl/questions/decorators.py +21 -0
  170. edsl/questions/derived/QuestionLikertFive.py +28 -26
  171. edsl/questions/derived/QuestionLinearScale.py +41 -28
  172. edsl/questions/derived/QuestionTopK.py +34 -26
  173. edsl/questions/derived/QuestionYesNo.py +40 -27
  174. edsl/questions/descriptors.py +228 -74
  175. edsl/questions/loop_processor.py +149 -0
  176. edsl/questions/prompt_templates/question_budget.jinja +13 -0
  177. edsl/questions/prompt_templates/question_checkbox.jinja +32 -0
  178. edsl/questions/prompt_templates/question_extract.jinja +11 -0
  179. edsl/questions/prompt_templates/question_free_text.jinja +3 -0
  180. edsl/questions/prompt_templates/question_linear_scale.jinja +11 -0
  181. edsl/questions/prompt_templates/question_list.jinja +17 -0
  182. edsl/questions/prompt_templates/question_multiple_choice.jinja +33 -0
  183. edsl/questions/prompt_templates/question_numerical.jinja +37 -0
  184. edsl/questions/question_base_gen_mixin.py +168 -0
  185. edsl/questions/question_registry.py +130 -46
  186. edsl/questions/register_questions_meta.py +71 -0
  187. edsl/questions/response_validator_abc.py +188 -0
  188. edsl/questions/response_validator_factory.py +34 -0
  189. edsl/questions/settings.py +5 -2
  190. edsl/questions/templates/__init__.py +0 -0
  191. edsl/questions/templates/budget/__init__.py +0 -0
  192. edsl/questions/templates/budget/answering_instructions.jinja +7 -0
  193. edsl/questions/templates/budget/question_presentation.jinja +7 -0
  194. edsl/questions/templates/checkbox/__init__.py +0 -0
  195. edsl/questions/templates/checkbox/answering_instructions.jinja +10 -0
  196. edsl/questions/templates/checkbox/question_presentation.jinja +22 -0
  197. edsl/questions/templates/dict/__init__.py +0 -0
  198. edsl/questions/templates/dict/answering_instructions.jinja +21 -0
  199. edsl/questions/templates/dict/question_presentation.jinja +1 -0
  200. edsl/questions/templates/extract/__init__.py +0 -0
  201. edsl/questions/templates/extract/answering_instructions.jinja +7 -0
  202. edsl/questions/templates/extract/question_presentation.jinja +1 -0
  203. edsl/questions/templates/free_text/__init__.py +0 -0
  204. edsl/questions/templates/free_text/answering_instructions.jinja +0 -0
  205. edsl/questions/templates/free_text/question_presentation.jinja +1 -0
  206. edsl/questions/templates/likert_five/__init__.py +0 -0
  207. edsl/questions/templates/likert_five/answering_instructions.jinja +10 -0
  208. edsl/questions/templates/likert_five/question_presentation.jinja +12 -0
  209. edsl/questions/templates/linear_scale/__init__.py +0 -0
  210. edsl/questions/templates/linear_scale/answering_instructions.jinja +5 -0
  211. edsl/questions/templates/linear_scale/question_presentation.jinja +5 -0
  212. edsl/questions/templates/list/__init__.py +0 -0
  213. edsl/questions/templates/list/answering_instructions.jinja +4 -0
  214. edsl/questions/templates/list/question_presentation.jinja +5 -0
  215. edsl/questions/templates/matrix/__init__.py +1 -0
  216. edsl/questions/templates/matrix/answering_instructions.jinja +5 -0
  217. edsl/questions/templates/matrix/question_presentation.jinja +20 -0
  218. edsl/questions/templates/multiple_choice/__init__.py +0 -0
  219. edsl/questions/templates/multiple_choice/answering_instructions.jinja +9 -0
  220. edsl/questions/templates/multiple_choice/html.jinja +0 -0
  221. edsl/questions/templates/multiple_choice/question_presentation.jinja +12 -0
  222. edsl/questions/templates/numerical/__init__.py +0 -0
  223. edsl/questions/templates/numerical/answering_instructions.jinja +7 -0
  224. edsl/questions/templates/numerical/question_presentation.jinja +7 -0
  225. edsl/questions/templates/rank/__init__.py +0 -0
  226. edsl/questions/templates/rank/answering_instructions.jinja +11 -0
  227. edsl/questions/templates/rank/question_presentation.jinja +15 -0
  228. edsl/questions/templates/top_k/__init__.py +0 -0
  229. edsl/questions/templates/top_k/answering_instructions.jinja +8 -0
  230. edsl/questions/templates/top_k/question_presentation.jinja +22 -0
  231. edsl/questions/templates/yes_no/__init__.py +0 -0
  232. edsl/questions/templates/yes_no/answering_instructions.jinja +6 -0
  233. edsl/questions/templates/yes_no/question_presentation.jinja +12 -0
  234. edsl/results/CSSParameterizer.py +108 -0
  235. edsl/results/Dataset.py +550 -19
  236. edsl/results/DatasetExportMixin.py +594 -0
  237. edsl/results/DatasetTree.py +295 -0
  238. edsl/results/MarkdownToDocx.py +122 -0
  239. edsl/results/MarkdownToPDF.py +111 -0
  240. edsl/results/Result.py +477 -173
  241. edsl/results/Results.py +987 -269
  242. edsl/results/ResultsExportMixin.py +28 -125
  243. edsl/results/ResultsGGMixin.py +83 -15
  244. edsl/results/TableDisplay.py +125 -0
  245. edsl/results/TextEditor.py +50 -0
  246. edsl/results/__init__.py +1 -1
  247. edsl/results/file_exports.py +252 -0
  248. edsl/results/results_fetch_mixin.py +33 -0
  249. edsl/results/results_selector.py +145 -0
  250. edsl/results/results_tools_mixin.py +98 -0
  251. edsl/results/smart_objects.py +96 -0
  252. edsl/results/table_data_class.py +12 -0
  253. edsl/results/table_display.css +78 -0
  254. edsl/results/table_renderers.py +118 -0
  255. edsl/results/tree_explore.py +115 -0
  256. edsl/scenarios/ConstructDownloadLink.py +109 -0
  257. edsl/scenarios/DocumentChunker.py +102 -0
  258. edsl/scenarios/DocxScenario.py +16 -0
  259. edsl/scenarios/FileStore.py +543 -0
  260. edsl/scenarios/PdfExtractor.py +40 -0
  261. edsl/scenarios/Scenario.py +431 -62
  262. edsl/scenarios/ScenarioHtmlMixin.py +65 -0
  263. edsl/scenarios/ScenarioList.py +1415 -45
  264. edsl/scenarios/ScenarioListExportMixin.py +45 -0
  265. edsl/scenarios/ScenarioListPdfMixin.py +239 -0
  266. edsl/scenarios/__init__.py +2 -0
  267. edsl/scenarios/directory_scanner.py +96 -0
  268. edsl/scenarios/file_methods.py +85 -0
  269. edsl/scenarios/handlers/__init__.py +13 -0
  270. edsl/scenarios/handlers/csv.py +49 -0
  271. edsl/scenarios/handlers/docx.py +76 -0
  272. edsl/scenarios/handlers/html.py +37 -0
  273. edsl/scenarios/handlers/json.py +111 -0
  274. edsl/scenarios/handlers/latex.py +5 -0
  275. edsl/scenarios/handlers/md.py +51 -0
  276. edsl/scenarios/handlers/pdf.py +68 -0
  277. edsl/scenarios/handlers/png.py +39 -0
  278. edsl/scenarios/handlers/pptx.py +105 -0
  279. edsl/scenarios/handlers/py.py +294 -0
  280. edsl/scenarios/handlers/sql.py +313 -0
  281. edsl/scenarios/handlers/sqlite.py +149 -0
  282. edsl/scenarios/handlers/txt.py +33 -0
  283. edsl/scenarios/scenario_join.py +131 -0
  284. edsl/scenarios/scenario_selector.py +156 -0
  285. edsl/shared.py +1 -0
  286. edsl/study/ObjectEntry.py +173 -0
  287. edsl/study/ProofOfWork.py +113 -0
  288. edsl/study/SnapShot.py +80 -0
  289. edsl/study/Study.py +521 -0
  290. edsl/study/__init__.py +4 -0
  291. edsl/surveys/ConstructDAG.py +92 -0
  292. edsl/surveys/DAG.py +92 -11
  293. edsl/surveys/EditSurvey.py +221 -0
  294. edsl/surveys/InstructionHandler.py +100 -0
  295. edsl/surveys/Memory.py +9 -4
  296. edsl/surveys/MemoryManagement.py +72 -0
  297. edsl/surveys/MemoryPlan.py +156 -35
  298. edsl/surveys/Rule.py +221 -74
  299. edsl/surveys/RuleCollection.py +241 -61
  300. edsl/surveys/RuleManager.py +172 -0
  301. edsl/surveys/Simulator.py +75 -0
  302. edsl/surveys/Survey.py +1079 -339
  303. edsl/surveys/SurveyCSS.py +273 -0
  304. edsl/surveys/SurveyExportMixin.py +235 -40
  305. edsl/surveys/SurveyFlowVisualization.py +181 -0
  306. edsl/surveys/SurveyQualtricsImport.py +284 -0
  307. edsl/surveys/SurveyToApp.py +141 -0
  308. edsl/surveys/__init__.py +4 -2
  309. edsl/surveys/base.py +19 -3
  310. edsl/surveys/descriptors.py +17 -6
  311. edsl/surveys/instructions/ChangeInstruction.py +48 -0
  312. edsl/surveys/instructions/Instruction.py +56 -0
  313. edsl/surveys/instructions/InstructionCollection.py +82 -0
  314. edsl/surveys/instructions/__init__.py +0 -0
  315. edsl/templates/error_reporting/base.html +24 -0
  316. edsl/templates/error_reporting/exceptions_by_model.html +35 -0
  317. edsl/templates/error_reporting/exceptions_by_question_name.html +17 -0
  318. edsl/templates/error_reporting/exceptions_by_type.html +17 -0
  319. edsl/templates/error_reporting/interview_details.html +116 -0
  320. edsl/templates/error_reporting/interviews.html +19 -0
  321. edsl/templates/error_reporting/overview.html +5 -0
  322. edsl/templates/error_reporting/performance_plot.html +2 -0
  323. edsl/templates/error_reporting/report.css +74 -0
  324. edsl/templates/error_reporting/report.html +118 -0
  325. edsl/templates/error_reporting/report.js +25 -0
  326. edsl/tools/__init__.py +1 -0
  327. edsl/tools/clusters.py +192 -0
  328. edsl/tools/embeddings.py +27 -0
  329. edsl/tools/embeddings_plotting.py +118 -0
  330. edsl/tools/plotting.py +112 -0
  331. edsl/tools/summarize.py +18 -0
  332. edsl/utilities/PrettyList.py +56 -0
  333. edsl/utilities/SystemInfo.py +5 -0
  334. edsl/utilities/__init__.py +21 -20
  335. edsl/utilities/ast_utilities.py +3 -0
  336. edsl/utilities/data/Registry.py +2 -0
  337. edsl/utilities/decorators.py +41 -0
  338. edsl/utilities/gcp_bucket/__init__.py +0 -0
  339. edsl/utilities/gcp_bucket/cloud_storage.py +96 -0
  340. edsl/utilities/interface.py +310 -60
  341. edsl/utilities/is_notebook.py +18 -0
  342. edsl/utilities/is_valid_variable_name.py +11 -0
  343. edsl/utilities/naming_utilities.py +263 -0
  344. edsl/utilities/remove_edsl_version.py +24 -0
  345. edsl/utilities/repair_functions.py +28 -0
  346. edsl/utilities/restricted_python.py +70 -0
  347. edsl/utilities/utilities.py +203 -13
  348. edsl-0.1.40.dist-info/METADATA +111 -0
  349. edsl-0.1.40.dist-info/RECORD +362 -0
  350. {edsl-0.1.14.dist-info → edsl-0.1.40.dist-info}/WHEEL +1 -1
  351. edsl/agents/AgentListExportMixin.py +0 -24
  352. edsl/coop/old.py +0 -31
  353. edsl/data/Database.py +0 -141
  354. edsl/data/crud.py +0 -121
  355. edsl/jobs/Interview.py +0 -417
  356. edsl/jobs/JobsRunner.py +0 -63
  357. edsl/jobs/JobsRunnerStatusMixin.py +0 -115
  358. edsl/jobs/base.py +0 -47
  359. edsl/jobs/buckets.py +0 -166
  360. edsl/jobs/runners/JobsRunnerDryRun.py +0 -19
  361. edsl/jobs/runners/JobsRunnerStreaming.py +0 -54
  362. edsl/jobs/task_management.py +0 -218
  363. edsl/jobs/token_tracking.py +0 -78
  364. edsl/language_models/DeepInfra.py +0 -69
  365. edsl/language_models/OpenAI.py +0 -98
  366. edsl/language_models/model_interfaces/GeminiPro.py +0 -66
  367. edsl/language_models/model_interfaces/LanguageModelOpenAIFour.py +0 -8
  368. edsl/language_models/model_interfaces/LanguageModelOpenAIThreeFiveTurbo.py +0 -8
  369. edsl/language_models/model_interfaces/LlamaTwo13B.py +0 -21
  370. edsl/language_models/model_interfaces/LlamaTwo70B.py +0 -21
  371. edsl/language_models/model_interfaces/Mixtral8x7B.py +0 -24
  372. edsl/language_models/registry.py +0 -81
  373. edsl/language_models/schemas.py +0 -15
  374. edsl/language_models/unused/ReplicateBase.py +0 -83
  375. edsl/prompts/QuestionInstructionsBase.py +0 -6
  376. edsl/prompts/library/agent_instructions.py +0 -29
  377. edsl/prompts/library/agent_persona.py +0 -17
  378. edsl/prompts/library/question_budget.py +0 -26
  379. edsl/prompts/library/question_checkbox.py +0 -32
  380. edsl/prompts/library/question_extract.py +0 -19
  381. edsl/prompts/library/question_freetext.py +0 -14
  382. edsl/prompts/library/question_linear_scale.py +0 -20
  383. edsl/prompts/library/question_list.py +0 -22
  384. edsl/prompts/library/question_multiple_choice.py +0 -44
  385. edsl/prompts/library/question_numerical.py +0 -31
  386. edsl/prompts/library/question_rank.py +0 -21
  387. edsl/prompts/prompt_config.py +0 -33
  388. edsl/prompts/registry.py +0 -185
  389. edsl/questions/Question.py +0 -240
  390. edsl/report/InputOutputDataTypes.py +0 -134
  391. edsl/report/RegressionMixin.py +0 -28
  392. edsl/report/ReportOutputs.py +0 -1228
  393. edsl/report/ResultsFetchMixin.py +0 -106
  394. edsl/report/ResultsOutputMixin.py +0 -14
  395. edsl/report/demo.ipynb +0 -645
  396. edsl/results/ResultsDBMixin.py +0 -184
  397. edsl/surveys/SurveyFlowVisualizationMixin.py +0 -92
  398. edsl/trackers/Tracker.py +0 -91
  399. edsl/trackers/TrackerAPI.py +0 -196
  400. edsl/trackers/TrackerTasks.py +0 -70
  401. edsl/utilities/pastebin.py +0 -141
  402. edsl-0.1.14.dist-info/METADATA +0 -69
  403. edsl-0.1.14.dist-info/RECORD +0 -141
  404. /edsl/{language_models/model_interfaces → inference_services}/__init__.py +0 -0
  405. /edsl/{report/__init__.py → jobs/runners/JobsRunnerStatusData.py} +0 -0
  406. /edsl/{trackers/__init__.py → language_models/ServiceDataSources.py} +0 -0
  407. {edsl-0.1.14.dist-info → edsl-0.1.40.dist-info}/LICENSE +0 -0
@@ -1,409 +1,285 @@
1
- from abc import ABC, abstractmethod
2
- import asyncio
3
- import json
4
- from typing import Coroutine, Dict, Any
5
- from collections import UserDict
6
-
7
- from edsl.exceptions import AgentRespondedWithBadJSONError
8
- from edsl.prompts.Prompt import Prompt
9
- from edsl.utilities.decorators import sync_wrapper, jupyter_nb_handler
10
- from edsl.prompts.registry import get_classes
11
- from edsl.exceptions import QuestionScenarioRenderError
12
-
13
- from edsl.data_transfer_models import AgentResponseDict
14
- from edsl.exceptions.agents import FailedTaskException
15
-
16
-
17
- class InvigilatorBase(ABC):
18
- """An invigiator (someone who administers an exam) is a class that is responsible for administering a question to an agent."""
19
-
20
- def __init__(
21
- self, agent, question, scenario, model, memory_plan, current_answers: dict
22
- ):
23
- self.agent = agent
24
- self.question = question
25
- self.scenario = scenario
26
- self.model = model
27
- self.memory_plan = memory_plan
28
- self.current_answers = current_answers
29
-
30
- def get_failed_task_result(self):
31
- return AgentResponseDict(
32
- answer=None,
33
- comment="Failed to get response",
34
- question_name=self.question.question_name,
35
- prompts=self.get_prompts(),
36
- )
37
-
38
- def get_prompts(self) -> Dict[str, Prompt]:
39
- return {
40
- "user_prompt": Prompt("NA").text,
41
- "system_prompt": Prompt("NA").text,
42
- }
1
+ """Module for creating Invigilators, which are objects to administer a question to an Agent."""
43
2
 
44
- @classmethod
45
- def example(cls):
46
- """Returns an example invigilator."""
47
- from edsl.agents.Agent import Agent
48
- from edsl.questions import QuestionMultipleChoice
49
- from edsl.scenarios.Scenario import Scenario
50
- from edsl.language_models import LanguageModel
51
-
52
- from edsl.enums import LanguageModelType, InferenceServiceType
53
-
54
- class TestLanguageModelGood(LanguageModel):
55
- _model_ = LanguageModelType.TEST.value
56
- _parameters_ = {"temperature": 0.5}
57
- _inference_service_ = InferenceServiceType.TEST.value
58
-
59
- async def async_execute_model_call(
60
- self, user_prompt: str, system_prompt: str
61
- ) -> dict[str, Any]:
62
- await asyncio.sleep(0.1)
63
- return {"message": """{"answer": "SPAM!"}"""}
64
-
65
- def parse_response(self, raw_response: dict[str, Any]) -> str:
66
- return raw_response["message"]
67
-
68
- model = TestLanguageModelGood()
69
- agent = Agent.example()
70
- question = QuestionMultipleChoice.example()
71
- scenario = Scenario.example()
72
- # model = LanguageModel.example()
73
- memory_plan = None
74
- current_answers = None
75
- return cls(
76
- agent=agent,
77
- question=question,
78
- scenario=scenario,
79
- model=model,
80
- memory_plan=memory_plan,
81
- current_answers=current_answers,
82
- )
3
+ from typing import Dict, Any, Optional, TYPE_CHECKING
83
4
 
84
- @abstractmethod
85
- async def async_answer_question(self):
86
- "This is the async method that actually answers the question."
87
- pass
5
+ from edsl.utilities.decorators import sync_wrapper
6
+ from edsl.exceptions.questions import QuestionAnswerValidationError
7
+ from edsl.agents.InvigilatorBase import InvigilatorBase
8
+ from edsl.data_transfer_models import AgentResponseDict, EDSLResultObjectInput
88
9
 
89
- @jupyter_nb_handler
90
- def answer_question(self) -> Coroutine:
91
- async def main():
92
- results = await asyncio.gather(self.async_answer_question())
93
- return results[0] # Since there's only one task, return its result
10
+ if TYPE_CHECKING:
11
+ from edsl.prompts.Prompt import Prompt
12
+ from edsl.scenarios.Scenario import Scenario
13
+ from edsl.surveys.Survey import Survey
94
14
 
95
- return main()
96
15
 
97
- def create_memory_prompt(self, question_name):
98
- """Creates a memory for the agent."""
99
- return self.memory_plan.get_memory_prompt_fragment(
100
- question_name, self.current_answers
101
- )
16
+ NA = "Not Applicable"
102
17
 
103
18
 
104
19
  class InvigilatorAI(InvigilatorBase):
105
20
  """An invigilator that uses an AI model to answer questions."""
106
21
 
107
- async def async_answer_question(self, failed=False) -> AgentResponseDict:
108
- data = {
109
- "agent": self.agent,
110
- "question": self.question,
111
- "scenario": self.scenario,
112
- }
113
- # This calls the self.async_get_response method w/ the prompts
114
- # The raw response is a dictionary.
115
- raw_response = await self.async_get_response(**self.get_prompts())
116
- assert "raw_model_response" in raw_response
117
- response = self._format_raw_response(
118
- **(
119
- data
120
- | {
121
- "raw_response": raw_response,
122
- "raw_model_response": raw_response["raw_model_response"],
123
- }
124
- )
125
- )
126
- return response
22
+ def get_prompts(self) -> Dict[str, "Prompt"]:
23
+ """Return the prompts used."""
24
+ return self.prompt_constructor.get_prompts()
127
25
 
128
- async def async_get_response(self, user_prompt: Prompt, system_prompt: Prompt):
129
- """Calls the LLM and gets a response. Used in the `answer_question` method."""
130
- try:
131
- response = await self.model.async_get_response(
132
- user_prompt.text, system_prompt.text
133
- )
134
- except json.JSONDecodeError as e:
135
- raise AgentRespondedWithBadJSONError(
136
- f"Returned bad JSON: {e}"
137
- f"Prompt: {user_prompt}"
138
- f"System Prompt: {system_prompt}"
139
- )
140
-
141
- return response
142
-
143
- def _format_raw_response(
144
- self, agent, question, scenario, raw_response, raw_model_response
145
- ) -> AgentResponseDict:
146
- response = question.validate_answer(raw_response)
147
- comment = response.get("comment", "")
148
- answer_code = response["answer"]
149
- answer = question.translate_answer_code_to_answer(answer_code, scenario)
150
- raw_model_response = raw_model_response
151
- data = {
152
- "answer": answer,
153
- "comment": comment,
154
- "question_name": question.question_name,
155
- "prompts": {k: v.to_dict() for k, v in self.get_prompts().items()},
156
- "cached_response": raw_response["cached_response"],
157
- "usage": raw_response.get("usage", {}),
158
- "raw_model_response": raw_model_response,
26
+ async def async_get_agent_response(self) -> AgentResponseDict:
27
+ prompts = self.get_prompts()
28
+ params = {
29
+ "user_prompt": prompts["user_prompt"].text,
30
+ "system_prompt": prompts["system_prompt"].text,
159
31
  }
160
- return AgentResponseDict(**data)
161
-
162
- get_response = sync_wrapper(async_get_response)
163
-
164
- def construct_system_prompt(self) -> Prompt:
165
- """Constructs the system prompt for the LLM call."""
166
- applicable_prompts = get_classes(
167
- component_type="agent_instructions",
168
- model=self.model.model,
169
- )
170
- if len(applicable_prompts) == 0:
171
- raise Exception("No applicable prompts found")
172
-
173
- agent_instructions = applicable_prompts[0](text=self.agent.instruction)
174
-
175
- if not hasattr(self.agent, "agent_persona"):
176
- applicable_prompts = get_classes(
177
- component_type="agent_persona",
178
- model=self.model.model,
179
- )
180
- persona_prompt_template = applicable_prompts[0]()
181
- else:
182
- persona_prompt_template = self.agent.agent_persona
183
-
184
- if undefined := persona_prompt_template.undefined_template_variables(
185
- self.agent.traits
186
- | {"traits": self.agent.traits}
187
- | {"codebook": self.agent.codebook}
188
- | {"traits": self.agent.traits}
189
- ):
190
- raise QuestionScenarioRenderError(
191
- f"Agent persona still has variables that were not rendered: {undefined}"
192
- )
193
-
194
- persona_prompt = persona_prompt_template.render(
195
- self.agent.traits | {"traits": self.agent.traits},
196
- codebook=self.agent.codebook,
197
- traits=self.agent.traits,
198
- )
199
-
200
- if persona_prompt.has_variables:
201
- raise QuestionScenarioRenderError(
202
- "Agent persona still has variables that were not rendered."
203
- )
204
-
205
- return (
206
- agent_instructions
207
- + " " * int(len(persona_prompt.text) > 0)
208
- + persona_prompt
32
+ if "encoded_image" in prompts:
33
+ params["encoded_image"] = prompts["encoded_image"]
34
+ raise NotImplementedError("encoded_image not implemented")
35
+
36
+ if "files_list" in prompts:
37
+ params["files_list"] = prompts["files_list"]
38
+
39
+ params.update({"iteration": self.iteration, "cache": self.cache})
40
+ params.update({"invigilator": self})
41
+
42
+ if self.key_lookup:
43
+ self.model.set_key_lookup(self.key_lookup)
44
+
45
+ return await self.model.async_get_response(**params)
46
+
47
+ def store_response(self, agent_response_dict: AgentResponseDict) -> None:
48
+ """Store the response in the invigilator, in case it is needed later because of validation failure."""
49
+ self.raw_model_response = agent_response_dict.model_outputs.response
50
+ self.generated_tokens = agent_response_dict.edsl_dict.generated_tokens
51
+ self.cache_key = agent_response_dict.model_outputs.cache_key
52
+
53
+ async def async_answer_question(self) -> EDSLResultObjectInput:
54
+ """Answer a question using the AI model.
55
+
56
+ >>> i = InvigilatorAI.example()
57
+ """
58
+ agent_response_dict: AgentResponseDict = await self.async_get_agent_response()
59
+ self.store_response(agent_response_dict)
60
+ return self._extract_edsl_result_entry_and_validate(agent_response_dict)
61
+
62
+ def _remove_from_cache(self, cache_key) -> None:
63
+ """Remove an entry from the cache."""
64
+ if cache_key:
65
+ del self.cache.data[cache_key]
66
+
67
+ def _determine_answer(self, raw_answer: str) -> Any:
68
+ """Determine the answer from the raw answer.
69
+
70
+ >>> i = InvigilatorAI.example()
71
+ >>> i._determine_answer("SPAM!")
72
+ 'SPAM!'
73
+
74
+ >>> from edsl.questions import QuestionMultipleChoice
75
+ >>> q = QuestionMultipleChoice(question_text = "How are you?", question_name = "how_are_you", question_options = ["Good", "Bad"], use_code = True)
76
+ >>> i = InvigilatorAI.example(question = q)
77
+ >>> i._determine_answer("1")
78
+ 'Bad'
79
+ >>> i._determine_answer("0")
80
+ 'Good'
81
+
82
+ This shows how the answer can depend on scenario details
83
+
84
+ >>> from edsl import Scenario
85
+ >>> s = Scenario({'feeling_options':['Good', 'Bad']})
86
+ >>> q = QuestionMultipleChoice(question_text = "How are you?", question_name = "how_are_you", question_options = "{{ feeling_options }}", use_code = True)
87
+ >>> i = InvigilatorAI.example(question = q, scenario = s)
88
+ >>> i._determine_answer("1")
89
+ 'Bad'
90
+
91
+ >>> from edsl import QuestionList, QuestionMultipleChoice, Survey
92
+ >>> q1 = QuestionList(question_name = "favs", question_text = "What are your top 3 colors?")
93
+ >>> q2 = QuestionMultipleChoice(question_text = "What is your favorite color?", question_name = "best", question_options = "{{ favs.answer }}", use_code = True)
94
+ >>> survey = Survey([q1, q2])
95
+ >>> i = InvigilatorAI.example(question = q2, scenario = s, survey = survey)
96
+ >>> i.current_answers = {"favs": ["Green", "Blue", "Red"]}
97
+ >>> i._determine_answer("2")
98
+ 'Red'
99
+ """
100
+ substitution_dict = self._prepare_substitution_dict(
101
+ self.survey, self.current_answers, self.scenario
209
102
  )
210
-
211
- def get_question_instructions(self) -> Prompt:
212
- """Gets the instructions for the question."""
213
- applicable_prompts = get_classes(
214
- component_type="question_instructions",
215
- question_type=self.question.question_type,
216
- model=self.model.model,
103
+ return self.question._translate_answer_code_to_answer(
104
+ raw_answer, substitution_dict
217
105
  )
218
- ## Get the question instructions and renders with the scenario & question.data
219
- question_prompt = applicable_prompts[0]()
220
106
 
221
- undefined_template_variables = question_prompt.undefined_template_variables(
222
- self.question.data | self.scenario
223
- )
224
- if undefined_template_variables:
225
- print(undefined_template_variables)
226
- raise QuestionScenarioRenderError(
227
- "Question instructions still has variables"
228
- )
229
-
230
- return question_prompt.render(self.question.data | self.scenario)
231
-
232
- def construct_user_prompt(self) -> Prompt:
233
- """Gets the user prompt for the LLM call."""
234
- user_prompt = self.get_question_instructions()
235
- if self.memory_plan is not None:
236
- user_prompt += self.create_memory_prompt(self.question.question_name)
237
- return user_prompt
238
-
239
- def get_prompts(self) -> Dict[str, Prompt]:
240
- """Gets the prompts for the LLM call."""
241
- system_prompt = self.construct_system_prompt()
242
- user_prompt = self.construct_user_prompt()
243
- return {
244
- "user_prompt": user_prompt,
245
- "system_prompt": system_prompt,
246
- }
107
+ @staticmethod
108
+ def _prepare_substitution_dict(
109
+ survey: "Survey", current_answers: dict, scenario: "Scenario"
110
+ ) -> Dict[str, Any]:
111
+ """Prepares a substitution dictionary for the question based on the survey, current answers, and scenario.
112
+
113
+ This is necessary beause sometimes the model's answer to a question could depend on details in
114
+ the prompt that were provided by the answer to a previous question or a scenario detail.
115
+
116
+ Note that the question object is getting the answer & a the comment appended to it, as the
117
+ jinja2 template might be referencing these values with a dot notation.
118
+
119
+ """
120
+ question_dict = survey.duplicate().question_names_to_questions()
121
+
122
+ # iterates through the current answers and updates the question_dict (which is all questions)
123
+ for other_question, answer in current_answers.items():
124
+ if other_question in question_dict:
125
+ question_dict[other_question].answer = answer
126
+ else:
127
+ # it might be a comment
128
+ if (
129
+ new_question := other_question.split("_comment")[0]
130
+ ) in question_dict:
131
+ question_dict[new_question].comment = answer
132
+
133
+ return {**question_dict, **scenario}
134
+
135
+ def _extract_edsl_result_entry_and_validate(
136
+ self, agent_response_dict: AgentResponseDict
137
+ ) -> EDSLResultObjectInput:
138
+ """Extract the EDSL result entry and validate it."""
139
+ edsl_dict = agent_response_dict.edsl_dict._asdict()
140
+ exception_occurred = None
141
+ validated = False
142
+ try:
143
+ # if the question has jinja parameters, it is easier to make a new question with the parameters
144
+ if self.question.parameters:
145
+ prior_answers_dict = self.prompt_constructor.prior_answers_dict()
146
+
147
+ # question options have be treated differently because of dynamic question
148
+ # this logic is all in the prompt constructor
149
+ if "question_options" in self.question.data:
150
+ new_question_options = self.prompt_constructor.get_question_options(
151
+ self.question.data
152
+ )
153
+ if new_question_options != self.question.data["question_options"]:
154
+ # I don't love this direct writing but it seems to work
155
+ self.question.question_options = new_question_options
156
+
157
+ question_with_validators = self.question.render(
158
+ self.scenario | prior_answers_dict
159
+ )
160
+ question_with_validators.use_code = self.question.use_code
161
+ else:
162
+ question_with_validators = self.question
163
+
164
+ validated_edsl_dict = question_with_validators._validate_answer(edsl_dict)
165
+ answer = self._determine_answer(validated_edsl_dict["answer"])
166
+ comment = validated_edsl_dict.get("comment", "")
167
+ validated = True
168
+ except QuestionAnswerValidationError as e:
169
+ answer = None
170
+ comment = "The response was not valid."
171
+ # if self.raise_validation_errors:
172
+ exception_occurred = e
173
+ except Exception as non_validation_error:
174
+ answer = None
175
+ comment = "Some other error occurred."
176
+ exception_occurred = non_validation_error
177
+ finally:
178
+ # even if validation failes, we still return the result
179
+ data = {
180
+ "answer": answer,
181
+ "comment": comment,
182
+ "generated_tokens": agent_response_dict.edsl_dict.generated_tokens,
183
+ "question_name": self.question.question_name,
184
+ "prompts": self.get_prompts(),
185
+ "cached_response": agent_response_dict.model_outputs.cached_response,
186
+ "raw_model_response": agent_response_dict.model_outputs.response,
187
+ "cache_used": agent_response_dict.model_outputs.cache_used,
188
+ "cache_key": agent_response_dict.model_outputs.cache_key,
189
+ "validated": validated,
190
+ "exception_occurred": exception_occurred,
191
+ "cost": agent_response_dict.model_outputs.cost,
192
+ }
193
+ result = EDSLResultObjectInput(**data)
194
+ return result
247
195
 
248
196
  answer_question = sync_wrapper(async_answer_question)
249
197
 
250
198
 
251
- class InvigilatorDebug(InvigilatorBase):
252
- async def async_answer_question(self) -> AgentResponseDict:
253
- results = self.question.simulate_answer(human_readable=True)
254
- results["prompts"] = self.get_prompts()
255
- results["question_name"] = self.question.question_name
256
- results["comment"] = "Debug comment"
257
- return AgentResponseDict(**results)
199
+ class InvigilatorHuman(InvigilatorBase):
200
+ """An invigilator for when a human is answering the question."""
258
201
 
259
- def get_prompts(self) -> Dict[str, Prompt]:
260
- return {
261
- "user_prompt": Prompt("NA").text,
262
- "system_prompt": Prompt("NA").text,
263
- }
202
+ validate_response: bool = False
203
+ translate_response: bool = False
264
204
 
205
+ async def async_answer_question(self, iteration: int = 0) -> AgentResponseDict:
206
+ """Return the answer to the question."""
207
+ comment = "This is a real survey response from a human."
265
208
 
266
- class InvigilatorHuman(InvigilatorBase):
267
- async def async_answer_question(self) -> AgentResponseDict:
268
- data = {
269
- "comment": "This is a real survey response from a human.",
270
- "answer": None,
271
- "prompts": self.get_prompts(),
272
- "question_name": self.question.question_name,
273
- }
209
+ def __repr__(self):
210
+ return f"{self.literal}"
211
+
212
+ exception_occurred = None
213
+ validated = False
274
214
  try:
275
215
  answer = self.agent.answer_question_directly(self.question, self.scenario)
276
- return AgentResponseDict(**(data | {"answer": answer}))
216
+ self.raw_model_response = answer
217
+
218
+ if self.validate_response:
219
+ _ = self.question._validate_answer({"answer": answer})
220
+ if self.translate_response:
221
+ answer = self.question._translate_answer_code_to_answer(
222
+ answer, self.scenario
223
+ )
224
+ validated = True
225
+ except QuestionAnswerValidationError as e:
226
+ answer = None
227
+ if self.raise_validation_errors:
228
+ exception_occurred = e
277
229
  except Exception as e:
278
- agent_response_dict = AgentResponseDict(
279
- **(data | {"answer": None, "comment": str(e)})
280
- )
281
- raise FailedTaskException(
282
- f"Failed to get response. The exception is {str(e)}",
283
- agent_response_dict,
284
- ) from e
230
+ answer = None
231
+ if self.raise_validation_errors:
232
+ exception_occurred = e
233
+ finally:
234
+ data = {
235
+ "generated_tokens": NA, # NotApplicable(),
236
+ "question_name": self.question.question_name,
237
+ "prompts": self.get_prompts(),
238
+ "cached_response": NA,
239
+ "raw_model_response": NA,
240
+ "cache_used": NA,
241
+ "cache_key": NA,
242
+ "answer": answer,
243
+ "comment": comment,
244
+ "validated": validated,
245
+ "exception_occurred": exception_occurred,
246
+ }
247
+ return EDSLResultObjectInput(**data)
285
248
 
286
249
 
287
250
  class InvigilatorFunctional(InvigilatorBase):
288
- async def async_answer_question(self) -> AgentResponseDict:
251
+ """A Invigilator for when the question has a answer_question_directly function."""
252
+
253
+ async def async_answer_question(self, iteration: int = 0) -> AgentResponseDict:
254
+ """Return the answer to the question."""
289
255
  func = self.question.answer_question_directly
290
- data = {
291
- "comment": "Functional.",
292
- "prompts": self.get_prompts(),
293
- "question_name": self.question.question_name,
294
- }
295
- try:
296
- answer = func(scenario=self.scenario, agent_traits=self.agent.traits)
297
- return AgentResponseDict(**(data | {"answer": answer}))
298
- except Exception as e:
299
- agent_response_dict = AgentResponseDict(
300
- **(data | {"answer": None, "comment": str(e)})
301
- )
302
- raise FailedTaskException(
303
- f"Failed to get response. The exception is {str(e)}",
304
- agent_response_dict,
305
- ) from e
306
-
307
- def get_prompts(self) -> Dict[str, Prompt]:
256
+ answer = func(scenario=self.scenario, agent_traits=self.agent.traits)
257
+
258
+ return EDSLResultObjectInput(
259
+ generated_tokens=str(answer),
260
+ question_name=self.question.question_name,
261
+ prompts=self.get_prompts(),
262
+ cached_response=NA,
263
+ raw_model_response=NA,
264
+ cache_used=NA,
265
+ cache_key=NA,
266
+ answer=answer["answer"],
267
+ comment="This is the result of a functional question.",
268
+ validated=True,
269
+ exception_occurred=None,
270
+ )
271
+
272
+ def get_prompts(self) -> Dict[str, "Prompt"]:
273
+ from edsl.prompts.Prompt import Prompt
274
+
275
+ """Return the prompts used."""
308
276
  return {
309
- "user_prompt": Prompt("NA").text,
310
- "system_prompt": Prompt("NA").text,
277
+ "user_prompt": Prompt("NA"),
278
+ "system_prompt": Prompt("NA"),
311
279
  }
312
280
 
313
281
 
314
282
  if __name__ == "__main__":
315
- from edsl.enums import LanguageModelType
316
-
317
- from edsl.agents.Agent import Agent
318
-
319
- a = Agent(
320
- instruction="You are a happy-go lucky agent.",
321
- traits={"feeling": "happy", "age": "Young at heart"},
322
- codebook={"feeling": "Feelings right now", "age": "Age in years"},
323
- trait_presentation_template="",
324
- )
325
-
326
- class MockModel:
327
- model = LanguageModelType.GPT_4.value
328
-
329
- class MockQuestion:
330
- question_type = "free_text"
331
- question_text = "How are you feeling?"
332
- question_name = "feelings_question"
333
- data = {
334
- "question_name": "feelings",
335
- "question_text": "How are you feeling?",
336
- "question_type": "feelings_question",
337
- }
283
+ import doctest
338
284
 
339
- i = InvigilatorAI(
340
- agent=a,
341
- question=MockQuestion(),
342
- scenario={},
343
- model=MockModel(),
344
- memory_plan=None,
345
- current_answers=None,
346
- )
347
- print(i.get_prompts()["system_prompt"])
348
- assert i.get_prompts()["system_prompt"].text == "You are a happy-go lucky agent."
349
-
350
- ###############
351
- ## Render one
352
- ###############
353
-
354
- a = Agent(
355
- instruction="You are a happy-go lucky agent.",
356
- traits={"feeling": "happy", "age": "Young at heart"},
357
- codebook={"feeling": "Feelings right now", "age": "Age in years"},
358
- trait_presentation_template="You are feeling {{ feeling }}.",
359
- )
360
-
361
- i = InvigilatorAI(
362
- agent=a,
363
- question=MockQuestion(),
364
- scenario={},
365
- model=MockModel(),
366
- memory_plan=None,
367
- current_answers=None,
368
- )
369
- print(i.get_prompts()["system_prompt"])
370
-
371
- assert (
372
- i.get_prompts()["system_prompt"].text
373
- == "You are a happy-go lucky agent. You are feeling happy."
374
- )
375
- try:
376
- assert i.get_prompts()["system_prompt"].unused_traits(a.traits) == ["age"]
377
- except AssertionError:
378
- unused_traits = i.get_prompts()["system_prompt"].unused_traits(a.traits)
379
- print(f"System prompt: {i.get_prompts()['system_prompt']}")
380
- print(f"Agent traits: {a.traits}")
381
- print(f"Unused_traits: {unused_traits}")
382
- # breakpoint()
383
-
384
- ###############
385
- ## Render one
386
- ###############
387
-
388
- a = Agent(
389
- instruction="You are a happy-go lucky agent.",
390
- traits={"feeling": "happy", "age": "Young at heart"},
391
- codebook={"feeling": "Feelings right now", "age": "Age in years"},
392
- trait_presentation_template="You are feeling {{ feeling }}. You eat lots of {{ food }}.",
393
- )
394
-
395
- i = InvigilatorAI(
396
- agent=a,
397
- question=MockQuestion(),
398
- scenario={},
399
- model=MockModel(),
400
- memory_plan=None,
401
- current_answers=None,
402
- )
403
- print(i.get_prompts()["system_prompt"])
404
-
405
- ## Should raise a QuestionScenarioRenderError
406
- assert (
407
- i.get_prompts()["system_prompt"].text
408
- == "You are a happy-go lucky agent. You are feeling happy."
409
- )
285
+ doctest.testmod(optionflags=doctest.ELLIPSIS)