edsl 0.1.39.dev2__py3-none-any.whl → 0.1.39.dev3__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (334) hide show
  1. edsl/Base.py +332 -385
  2. edsl/BaseDiff.py +260 -260
  3. edsl/TemplateLoader.py +24 -24
  4. edsl/__init__.py +49 -57
  5. edsl/__version__.py +1 -1
  6. edsl/agents/Agent.py +867 -1079
  7. edsl/agents/AgentList.py +413 -551
  8. edsl/agents/Invigilator.py +233 -285
  9. edsl/agents/InvigilatorBase.py +270 -254
  10. edsl/agents/PromptConstructor.py +354 -252
  11. edsl/agents/__init__.py +3 -2
  12. edsl/agents/descriptors.py +99 -99
  13. edsl/agents/prompt_helpers.py +129 -129
  14. edsl/auto/AutoStudy.py +117 -117
  15. edsl/auto/StageBase.py +230 -230
  16. edsl/auto/StageGenerateSurvey.py +178 -178
  17. edsl/auto/StageLabelQuestions.py +125 -125
  18. edsl/auto/StagePersona.py +61 -61
  19. edsl/auto/StagePersonaDimensionValueRanges.py +88 -88
  20. edsl/auto/StagePersonaDimensionValues.py +74 -74
  21. edsl/auto/StagePersonaDimensions.py +69 -69
  22. edsl/auto/StageQuestions.py +73 -73
  23. edsl/auto/SurveyCreatorPipeline.py +21 -21
  24. edsl/auto/utilities.py +224 -224
  25. edsl/base/Base.py +279 -279
  26. edsl/config.py +157 -177
  27. edsl/conversation/Conversation.py +290 -290
  28. edsl/conversation/car_buying.py +58 -59
  29. edsl/conversation/chips.py +95 -95
  30. edsl/conversation/mug_negotiation.py +81 -81
  31. edsl/conversation/next_speaker_utilities.py +93 -93
  32. edsl/coop/PriceFetcher.py +54 -54
  33. edsl/coop/__init__.py +2 -2
  34. edsl/coop/coop.py +1028 -1090
  35. edsl/coop/utils.py +131 -131
  36. edsl/data/Cache.py +555 -562
  37. edsl/data/CacheEntry.py +233 -230
  38. edsl/data/CacheHandler.py +149 -170
  39. edsl/data/RemoteCacheSync.py +78 -78
  40. edsl/data/SQLiteDict.py +292 -292
  41. edsl/data/__init__.py +4 -5
  42. edsl/data/orm.py +10 -10
  43. edsl/data_transfer_models.py +73 -74
  44. edsl/enums.py +175 -195
  45. edsl/exceptions/BaseException.py +21 -21
  46. edsl/exceptions/__init__.py +54 -54
  47. edsl/exceptions/agents.py +42 -54
  48. edsl/exceptions/cache.py +5 -5
  49. edsl/exceptions/configuration.py +16 -16
  50. edsl/exceptions/coop.py +10 -10
  51. edsl/exceptions/data.py +14 -14
  52. edsl/exceptions/general.py +34 -34
  53. edsl/exceptions/jobs.py +33 -33
  54. edsl/exceptions/language_models.py +63 -63
  55. edsl/exceptions/prompts.py +15 -15
  56. edsl/exceptions/questions.py +91 -109
  57. edsl/exceptions/results.py +29 -29
  58. edsl/exceptions/scenarios.py +22 -29
  59. edsl/exceptions/surveys.py +37 -37
  60. edsl/inference_services/AnthropicService.py +87 -84
  61. edsl/inference_services/AwsBedrock.py +120 -118
  62. edsl/inference_services/AzureAI.py +217 -215
  63. edsl/inference_services/DeepInfraService.py +18 -18
  64. edsl/inference_services/GoogleService.py +148 -139
  65. edsl/inference_services/GroqService.py +20 -20
  66. edsl/inference_services/InferenceServiceABC.py +147 -80
  67. edsl/inference_services/InferenceServicesCollection.py +97 -122
  68. edsl/inference_services/MistralAIService.py +123 -120
  69. edsl/inference_services/OllamaService.py +18 -18
  70. edsl/inference_services/OpenAIService.py +224 -221
  71. edsl/inference_services/PerplexityService.py +163 -160
  72. edsl/inference_services/TestService.py +89 -92
  73. edsl/inference_services/TogetherAIService.py +170 -170
  74. edsl/inference_services/models_available_cache.py +118 -118
  75. edsl/inference_services/rate_limits_cache.py +25 -25
  76. edsl/inference_services/registry.py +41 -41
  77. edsl/inference_services/write_available.py +10 -10
  78. edsl/jobs/Answers.py +56 -43
  79. edsl/jobs/Jobs.py +898 -757
  80. edsl/jobs/JobsChecks.py +147 -172
  81. edsl/jobs/JobsPrompts.py +268 -270
  82. edsl/jobs/JobsRemoteInferenceHandler.py +239 -287
  83. edsl/jobs/__init__.py +1 -1
  84. edsl/jobs/buckets/BucketCollection.py +63 -104
  85. edsl/jobs/buckets/ModelBuckets.py +65 -65
  86. edsl/jobs/buckets/TokenBucket.py +251 -283
  87. edsl/jobs/interviews/Interview.py +661 -358
  88. edsl/jobs/interviews/InterviewExceptionCollection.py +99 -99
  89. edsl/jobs/interviews/InterviewExceptionEntry.py +186 -186
  90. edsl/jobs/interviews/InterviewStatistic.py +63 -63
  91. edsl/jobs/interviews/InterviewStatisticsCollection.py +25 -25
  92. edsl/jobs/interviews/InterviewStatusDictionary.py +78 -78
  93. edsl/jobs/interviews/InterviewStatusLog.py +92 -92
  94. edsl/jobs/interviews/ReportErrors.py +66 -66
  95. edsl/jobs/interviews/interview_status_enum.py +9 -9
  96. edsl/jobs/runners/JobsRunnerAsyncio.py +466 -421
  97. edsl/jobs/runners/JobsRunnerStatus.py +330 -330
  98. edsl/jobs/tasks/QuestionTaskCreator.py +242 -244
  99. edsl/jobs/tasks/TaskCreators.py +64 -64
  100. edsl/jobs/tasks/TaskHistory.py +450 -449
  101. edsl/jobs/tasks/TaskStatusLog.py +23 -23
  102. edsl/jobs/tasks/task_status_enum.py +163 -161
  103. edsl/jobs/tokens/InterviewTokenUsage.py +27 -27
  104. edsl/jobs/tokens/TokenUsage.py +34 -34
  105. edsl/language_models/KeyLookup.py +30 -0
  106. edsl/language_models/LanguageModel.py +668 -571
  107. edsl/language_models/ModelList.py +155 -153
  108. edsl/language_models/RegisterLanguageModelsMeta.py +184 -184
  109. edsl/language_models/__init__.py +3 -2
  110. edsl/language_models/fake_openai_call.py +15 -15
  111. edsl/language_models/fake_openai_service.py +61 -61
  112. edsl/language_models/registry.py +190 -180
  113. edsl/language_models/repair.py +156 -156
  114. edsl/language_models/unused/ReplicateBase.py +83 -0
  115. edsl/language_models/utilities.py +64 -65
  116. edsl/notebooks/Notebook.py +258 -263
  117. edsl/notebooks/__init__.py +1 -1
  118. edsl/prompts/Prompt.py +362 -352
  119. edsl/prompts/__init__.py +2 -2
  120. edsl/questions/AnswerValidatorMixin.py +289 -334
  121. edsl/questions/QuestionBase.py +664 -509
  122. edsl/questions/QuestionBaseGenMixin.py +161 -165
  123. edsl/questions/QuestionBasePromptsMixin.py +217 -221
  124. edsl/questions/QuestionBudget.py +227 -227
  125. edsl/questions/QuestionCheckBox.py +359 -359
  126. edsl/questions/QuestionExtract.py +182 -182
  127. edsl/questions/QuestionFreeText.py +114 -113
  128. edsl/questions/QuestionFunctional.py +166 -166
  129. edsl/questions/QuestionList.py +231 -229
  130. edsl/questions/QuestionMultipleChoice.py +286 -330
  131. edsl/questions/QuestionNumerical.py +153 -151
  132. edsl/questions/QuestionRank.py +324 -314
  133. edsl/questions/Quick.py +41 -41
  134. edsl/questions/RegisterQuestionsMeta.py +71 -71
  135. edsl/questions/ResponseValidatorABC.py +174 -200
  136. edsl/questions/SimpleAskMixin.py +73 -74
  137. edsl/questions/__init__.py +26 -27
  138. edsl/questions/compose_questions.py +98 -98
  139. edsl/questions/decorators.py +21 -21
  140. edsl/questions/derived/QuestionLikertFive.py +76 -76
  141. edsl/questions/derived/QuestionLinearScale.py +87 -90
  142. edsl/questions/derived/QuestionTopK.py +93 -93
  143. edsl/questions/derived/QuestionYesNo.py +82 -82
  144. edsl/questions/descriptors.py +413 -427
  145. edsl/questions/prompt_templates/question_budget.jinja +13 -13
  146. edsl/questions/prompt_templates/question_checkbox.jinja +32 -32
  147. edsl/questions/prompt_templates/question_extract.jinja +11 -11
  148. edsl/questions/prompt_templates/question_free_text.jinja +3 -3
  149. edsl/questions/prompt_templates/question_linear_scale.jinja +11 -11
  150. edsl/questions/prompt_templates/question_list.jinja +17 -17
  151. edsl/questions/prompt_templates/question_multiple_choice.jinja +33 -33
  152. edsl/questions/prompt_templates/question_numerical.jinja +36 -36
  153. edsl/questions/question_registry.py +177 -177
  154. edsl/questions/settings.py +12 -12
  155. edsl/questions/templates/budget/answering_instructions.jinja +7 -7
  156. edsl/questions/templates/budget/question_presentation.jinja +7 -7
  157. edsl/questions/templates/checkbox/answering_instructions.jinja +10 -10
  158. edsl/questions/templates/checkbox/question_presentation.jinja +22 -22
  159. edsl/questions/templates/extract/answering_instructions.jinja +7 -7
  160. edsl/questions/templates/likert_five/answering_instructions.jinja +10 -10
  161. edsl/questions/templates/likert_five/question_presentation.jinja +11 -11
  162. edsl/questions/templates/linear_scale/answering_instructions.jinja +5 -5
  163. edsl/questions/templates/linear_scale/question_presentation.jinja +5 -5
  164. edsl/questions/templates/list/answering_instructions.jinja +3 -3
  165. edsl/questions/templates/list/question_presentation.jinja +5 -5
  166. edsl/questions/templates/multiple_choice/answering_instructions.jinja +9 -9
  167. edsl/questions/templates/multiple_choice/question_presentation.jinja +11 -11
  168. edsl/questions/templates/numerical/answering_instructions.jinja +6 -6
  169. edsl/questions/templates/numerical/question_presentation.jinja +6 -6
  170. edsl/questions/templates/rank/answering_instructions.jinja +11 -11
  171. edsl/questions/templates/rank/question_presentation.jinja +15 -15
  172. edsl/questions/templates/top_k/answering_instructions.jinja +8 -8
  173. edsl/questions/templates/top_k/question_presentation.jinja +22 -22
  174. edsl/questions/templates/yes_no/answering_instructions.jinja +6 -6
  175. edsl/questions/templates/yes_no/question_presentation.jinja +11 -11
  176. edsl/results/CSSParameterizer.py +108 -108
  177. edsl/results/Dataset.py +424 -587
  178. edsl/results/DatasetExportMixin.py +731 -653
  179. edsl/results/DatasetTree.py +275 -295
  180. edsl/results/Result.py +465 -451
  181. edsl/results/Results.py +1165 -1172
  182. edsl/results/ResultsDBMixin.py +238 -0
  183. edsl/results/ResultsExportMixin.py +43 -45
  184. edsl/results/ResultsFetchMixin.py +33 -33
  185. edsl/results/ResultsGGMixin.py +121 -121
  186. edsl/results/ResultsToolsMixin.py +98 -98
  187. edsl/results/Selector.py +135 -145
  188. edsl/results/TableDisplay.py +198 -125
  189. edsl/results/__init__.py +2 -2
  190. edsl/results/table_display.css +77 -77
  191. edsl/results/tree_explore.py +115 -115
  192. edsl/scenarios/FileStore.py +632 -511
  193. edsl/scenarios/Scenario.py +601 -498
  194. edsl/scenarios/ScenarioHtmlMixin.py +64 -65
  195. edsl/scenarios/ScenarioJoin.py +127 -131
  196. edsl/scenarios/ScenarioList.py +1287 -1430
  197. edsl/scenarios/ScenarioListExportMixin.py +52 -45
  198. edsl/scenarios/ScenarioListPdfMixin.py +261 -239
  199. edsl/scenarios/__init__.py +4 -3
  200. edsl/shared.py +1 -1
  201. edsl/study/ObjectEntry.py +173 -173
  202. edsl/study/ProofOfWork.py +113 -113
  203. edsl/study/SnapShot.py +80 -80
  204. edsl/study/Study.py +528 -521
  205. edsl/study/__init__.py +4 -4
  206. edsl/surveys/DAG.py +148 -148
  207. edsl/surveys/Memory.py +31 -31
  208. edsl/surveys/MemoryPlan.py +244 -244
  209. edsl/surveys/Rule.py +326 -327
  210. edsl/surveys/RuleCollection.py +387 -385
  211. edsl/surveys/Survey.py +1801 -1229
  212. edsl/surveys/SurveyCSS.py +261 -273
  213. edsl/surveys/SurveyExportMixin.py +259 -259
  214. edsl/surveys/{SurveyFlowVisualization.py → SurveyFlowVisualizationMixin.py} +179 -181
  215. edsl/surveys/SurveyQualtricsImport.py +284 -284
  216. edsl/surveys/__init__.py +3 -5
  217. edsl/surveys/base.py +53 -53
  218. edsl/surveys/descriptors.py +56 -60
  219. edsl/surveys/instructions/ChangeInstruction.py +49 -48
  220. edsl/surveys/instructions/Instruction.py +65 -56
  221. edsl/surveys/instructions/InstructionCollection.py +77 -82
  222. edsl/templates/error_reporting/base.html +23 -23
  223. edsl/templates/error_reporting/exceptions_by_model.html +34 -34
  224. edsl/templates/error_reporting/exceptions_by_question_name.html +16 -16
  225. edsl/templates/error_reporting/exceptions_by_type.html +16 -16
  226. edsl/templates/error_reporting/interview_details.html +115 -115
  227. edsl/templates/error_reporting/interviews.html +19 -19
  228. edsl/templates/error_reporting/overview.html +4 -4
  229. edsl/templates/error_reporting/performance_plot.html +1 -1
  230. edsl/templates/error_reporting/report.css +73 -73
  231. edsl/templates/error_reporting/report.html +117 -117
  232. edsl/templates/error_reporting/report.js +25 -25
  233. edsl/tools/__init__.py +1 -1
  234. edsl/tools/clusters.py +192 -192
  235. edsl/tools/embeddings.py +27 -27
  236. edsl/tools/embeddings_plotting.py +118 -118
  237. edsl/tools/plotting.py +112 -112
  238. edsl/tools/summarize.py +18 -18
  239. edsl/utilities/SystemInfo.py +28 -28
  240. edsl/utilities/__init__.py +22 -22
  241. edsl/utilities/ast_utilities.py +25 -25
  242. edsl/utilities/data/Registry.py +6 -6
  243. edsl/utilities/data/__init__.py +1 -1
  244. edsl/utilities/data/scooter_results.json +1 -1
  245. edsl/utilities/decorators.py +77 -77
  246. edsl/utilities/gcp_bucket/cloud_storage.py +96 -96
  247. edsl/utilities/interface.py +627 -627
  248. edsl/utilities/naming_utilities.py +263 -263
  249. edsl/utilities/repair_functions.py +28 -28
  250. edsl/utilities/restricted_python.py +70 -70
  251. edsl/utilities/utilities.py +424 -436
  252. {edsl-0.1.39.dev2.dist-info → edsl-0.1.39.dev3.dist-info}/LICENSE +21 -21
  253. {edsl-0.1.39.dev2.dist-info → edsl-0.1.39.dev3.dist-info}/METADATA +10 -12
  254. edsl-0.1.39.dev3.dist-info/RECORD +277 -0
  255. edsl/agents/QuestionInstructionPromptBuilder.py +0 -128
  256. edsl/agents/QuestionOptionProcessor.py +0 -172
  257. edsl/agents/QuestionTemplateReplacementsBuilder.py +0 -137
  258. edsl/coop/CoopFunctionsMixin.py +0 -15
  259. edsl/coop/ExpectedParrotKeyHandler.py +0 -125
  260. edsl/exceptions/inference_services.py +0 -5
  261. edsl/inference_services/AvailableModelCacheHandler.py +0 -184
  262. edsl/inference_services/AvailableModelFetcher.py +0 -209
  263. edsl/inference_services/ServiceAvailability.py +0 -135
  264. edsl/inference_services/data_structures.py +0 -62
  265. edsl/jobs/AnswerQuestionFunctionConstructor.py +0 -188
  266. edsl/jobs/FetchInvigilator.py +0 -40
  267. edsl/jobs/InterviewTaskManager.py +0 -98
  268. edsl/jobs/InterviewsConstructor.py +0 -48
  269. edsl/jobs/JobsComponentConstructor.py +0 -189
  270. edsl/jobs/JobsRemoteInferenceLogger.py +0 -239
  271. edsl/jobs/RequestTokenEstimator.py +0 -30
  272. edsl/jobs/buckets/TokenBucketAPI.py +0 -211
  273. edsl/jobs/buckets/TokenBucketClient.py +0 -191
  274. edsl/jobs/decorators.py +0 -35
  275. edsl/jobs/jobs_status_enums.py +0 -9
  276. edsl/jobs/loggers/HTMLTableJobLogger.py +0 -304
  277. edsl/language_models/ComputeCost.py +0 -63
  278. edsl/language_models/PriceManager.py +0 -127
  279. edsl/language_models/RawResponseHandler.py +0 -106
  280. edsl/language_models/ServiceDataSources.py +0 -0
  281. edsl/language_models/key_management/KeyLookup.py +0 -63
  282. edsl/language_models/key_management/KeyLookupBuilder.py +0 -273
  283. edsl/language_models/key_management/KeyLookupCollection.py +0 -38
  284. edsl/language_models/key_management/__init__.py +0 -0
  285. edsl/language_models/key_management/models.py +0 -131
  286. edsl/notebooks/NotebookToLaTeX.py +0 -142
  287. edsl/questions/ExceptionExplainer.py +0 -77
  288. edsl/questions/HTMLQuestion.py +0 -103
  289. edsl/questions/LoopProcessor.py +0 -149
  290. edsl/questions/QuestionMatrix.py +0 -265
  291. edsl/questions/ResponseValidatorFactory.py +0 -28
  292. edsl/questions/templates/matrix/__init__.py +0 -1
  293. edsl/questions/templates/matrix/answering_instructions.jinja +0 -5
  294. edsl/questions/templates/matrix/question_presentation.jinja +0 -20
  295. edsl/results/MarkdownToDocx.py +0 -122
  296. edsl/results/MarkdownToPDF.py +0 -111
  297. edsl/results/TextEditor.py +0 -50
  298. edsl/results/smart_objects.py +0 -96
  299. edsl/results/table_data_class.py +0 -12
  300. edsl/results/table_renderers.py +0 -118
  301. edsl/scenarios/ConstructDownloadLink.py +0 -109
  302. edsl/scenarios/DirectoryScanner.py +0 -96
  303. edsl/scenarios/DocumentChunker.py +0 -102
  304. edsl/scenarios/DocxScenario.py +0 -16
  305. edsl/scenarios/PdfExtractor.py +0 -40
  306. edsl/scenarios/ScenarioSelector.py +0 -156
  307. edsl/scenarios/file_methods.py +0 -85
  308. edsl/scenarios/handlers/__init__.py +0 -13
  309. edsl/scenarios/handlers/csv.py +0 -38
  310. edsl/scenarios/handlers/docx.py +0 -76
  311. edsl/scenarios/handlers/html.py +0 -37
  312. edsl/scenarios/handlers/json.py +0 -111
  313. edsl/scenarios/handlers/latex.py +0 -5
  314. edsl/scenarios/handlers/md.py +0 -51
  315. edsl/scenarios/handlers/pdf.py +0 -68
  316. edsl/scenarios/handlers/png.py +0 -39
  317. edsl/scenarios/handlers/pptx.py +0 -105
  318. edsl/scenarios/handlers/py.py +0 -294
  319. edsl/scenarios/handlers/sql.py +0 -313
  320. edsl/scenarios/handlers/sqlite.py +0 -149
  321. edsl/scenarios/handlers/txt.py +0 -33
  322. edsl/surveys/ConstructDAG.py +0 -92
  323. edsl/surveys/EditSurvey.py +0 -221
  324. edsl/surveys/InstructionHandler.py +0 -100
  325. edsl/surveys/MemoryManagement.py +0 -72
  326. edsl/surveys/RuleManager.py +0 -172
  327. edsl/surveys/Simulator.py +0 -75
  328. edsl/surveys/SurveyToApp.py +0 -141
  329. edsl/utilities/PrettyList.py +0 -56
  330. edsl/utilities/is_notebook.py +0 -18
  331. edsl/utilities/is_valid_variable_name.py +0 -11
  332. edsl/utilities/remove_edsl_version.py +0 -24
  333. edsl-0.1.39.dev2.dist-info/RECORD +0 -352
  334. {edsl-0.1.39.dev2.dist-info → edsl-0.1.39.dev3.dist-info}/WHEEL +0 -0
@@ -1,156 +1,156 @@
1
- import json
2
- import asyncio
3
- import warnings
4
-
5
-
6
- async def async_repair(
7
- bad_json, error_message="", user_prompt=None, system_prompt=None, cache=None
8
- ):
9
- from edsl.utilities.utilities import clean_json
10
-
11
- s = clean_json(bad_json)
12
-
13
- try:
14
- # this is the OpenAI version, but that's fine
15
- valid_dict = json.loads(s)
16
- success = True
17
- except json.JSONDecodeError:
18
- valid_dict = {}
19
- success = False
20
- # print("Replacing control characters didn't work. Trying extracting the sub-string.")
21
- else:
22
- return valid_dict, success
23
-
24
- try:
25
- from edsl.utilities.repair_functions import extract_json_from_string
26
-
27
- valid_dict = extract_json_from_string(s)
28
- success = True
29
- except ValueError:
30
- valid_dict = {}
31
- success = False
32
- else:
33
- return valid_dict, success
34
-
35
- from edsl.language_models.registry import Model
36
-
37
- m = Model()
38
-
39
- from edsl.questions.QuestionExtract import QuestionExtract
40
-
41
- with warnings.catch_warnings():
42
- warnings.simplefilter("ignore", UserWarning)
43
-
44
- q = QuestionExtract(
45
- question_text="""
46
- A language model was supposed to respond to a question.
47
- The response should have been JSON object with an answer to a question and some commentary.
48
-
49
- It should have retured a string like this:
50
-
51
- '{'answer': 'The answer to the question.', 'comment': 'Some commentary.'}'
52
-
53
- or:
54
-
55
- '{'answer': 'The answer to the question.'}'
56
-
57
- The answer field is very like an integer number. The comment field is always string.
58
-
59
- You job is to return just the repaired JSON object that the model should have returned, properly formatted.
60
-
61
- - It might have included some preliminary comments.
62
- - It might have included some control characters.
63
- - It might have included some extraneous text.
64
-
65
- DO NOT include any extraneous text in your response. Just return the repaired JSON object.
66
- Do not preface the JSON object with any text. Just return the JSON object.
67
-
68
- Bad answer: """
69
- + str(bad_json)
70
- + "The model received a user prompt of: '"
71
- + str(user_prompt)
72
- + """'
73
- The model received a system prompt of: ' """
74
- + str(system_prompt)
75
- + """
76
- '
77
- Please return the repaired JSON object, following the instructions the original model should have followed, though
78
- using 'new_answer' a nd 'new_comment' as the keys.""",
79
- answer_template={
80
- "new_answer": "<number, string, list, etc.>",
81
- "new_comment": "Model's comments",
82
- },
83
- question_name="model_repair",
84
- )
85
-
86
- results = await q.run_async(cache=cache)
87
-
88
- try:
89
- # this is the OpenAI version, but that's fine
90
- valid_dict = json.loads(json.dumps(results))
91
- success = True
92
- # this is to deal with the fact that the model returns the answer and comment as new_answer and new_comment
93
- valid_dict["answer"] = valid_dict.pop("new_answer")
94
- valid_dict["comment"] = valid_dict.pop("new_comment")
95
- except json.JSONDecodeError:
96
- valid_dict = {}
97
- success = False
98
- from rich import print
99
- from rich.console import Console
100
- from rich.syntax import Syntax
101
-
102
- console = Console()
103
- error_message = (
104
- f"All repairs. failed. LLM Model given [red]{str(bad_json)}[/red]"
105
- )
106
- console.print(" " + error_message)
107
- model_returned = results["choices"][0]["message"]["content"]
108
- console.print(f"LLM Model returned: [blue]{model_returned}[/blue]")
109
-
110
- return valid_dict, success
111
-
112
-
113
- def repair_wrapper(
114
- bad_json, error_message="", user_prompt=None, system_prompt=None, cache=None
115
- ):
116
- try:
117
- loop = asyncio.get_event_loop()
118
- if loop.is_running():
119
- # Add repair as a task to the running loop
120
- task = loop.create_task(
121
- async_repair(bad_json, error_message, user_prompt, system_prompt, cache)
122
- )
123
- return task
124
- else:
125
- # Run a new event loop for repair
126
- return loop.run_until_complete(
127
- async_repair(bad_json, error_message, user_prompt, system_prompt, cache)
128
- )
129
- except RuntimeError:
130
- # Create a new event loop if one is not already available
131
- loop = asyncio.new_event_loop()
132
- asyncio.set_event_loop(loop)
133
- return loop.run_until_complete(
134
- async_repair(bad_json, error_message, user_prompt, system_prompt, cache)
135
- )
136
-
137
-
138
- def repair(
139
- bad_json, error_message="", user_prompt=None, system_prompt=None, cache=None
140
- ):
141
- return repair_wrapper(bad_json, error_message, user_prompt, system_prompt, cache)
142
-
143
-
144
- if __name__ == "__main__":
145
- bad_json = """
146
- {
147
- 'answer': "The problematic phrase in the excerpt is \'typically\'. This word is vague and can lead to different interpretations. An alternative phrasing that would be less problematic is:
148
- 'On average, how long do you cook scrambled eggs?}
149
- """
150
- try:
151
- json.loads(bad_json)
152
- print("Loaded")
153
- except json.JSONDecodeError as e:
154
- error_message = str(e)
155
- repaired, success = repair(bad_json, error_message)
156
- print(f"Repaired: {repaired}")
1
+ import json
2
+ import asyncio
3
+ import warnings
4
+
5
+
6
+ async def async_repair(
7
+ bad_json, error_message="", user_prompt=None, system_prompt=None, cache=None
8
+ ):
9
+ from edsl.utilities.utilities import clean_json
10
+
11
+ s = clean_json(bad_json)
12
+
13
+ try:
14
+ # this is the OpenAI version, but that's fine
15
+ valid_dict = json.loads(s)
16
+ success = True
17
+ except json.JSONDecodeError:
18
+ valid_dict = {}
19
+ success = False
20
+ # print("Replacing control characters didn't work. Trying extracting the sub-string.")
21
+ else:
22
+ return valid_dict, success
23
+
24
+ try:
25
+ from edsl.utilities.repair_functions import extract_json_from_string
26
+
27
+ valid_dict = extract_json_from_string(s)
28
+ success = True
29
+ except ValueError:
30
+ valid_dict = {}
31
+ success = False
32
+ else:
33
+ return valid_dict, success
34
+
35
+ from edsl import Model
36
+
37
+ m = Model()
38
+
39
+ from edsl import QuestionExtract
40
+
41
+ with warnings.catch_warnings():
42
+ warnings.simplefilter("ignore", UserWarning)
43
+
44
+ q = QuestionExtract(
45
+ question_text="""
46
+ A language model was supposed to respond to a question.
47
+ The response should have been JSON object with an answer to a question and some commentary.
48
+
49
+ It should have retured a string like this:
50
+
51
+ '{'answer': 'The answer to the question.', 'comment': 'Some commentary.'}'
52
+
53
+ or:
54
+
55
+ '{'answer': 'The answer to the question.'}'
56
+
57
+ The answer field is very like an integer number. The comment field is always string.
58
+
59
+ You job is to return just the repaired JSON object that the model should have returned, properly formatted.
60
+
61
+ - It might have included some preliminary comments.
62
+ - It might have included some control characters.
63
+ - It might have included some extraneous text.
64
+
65
+ DO NOT include any extraneous text in your response. Just return the repaired JSON object.
66
+ Do not preface the JSON object with any text. Just return the JSON object.
67
+
68
+ Bad answer: """
69
+ + str(bad_json)
70
+ + "The model received a user prompt of: '"
71
+ + str(user_prompt)
72
+ + """'
73
+ The model received a system prompt of: ' """
74
+ + str(system_prompt)
75
+ + """
76
+ '
77
+ Please return the repaired JSON object, following the instructions the original model should have followed, though
78
+ using 'new_answer' a nd 'new_comment' as the keys.""",
79
+ answer_template={
80
+ "new_answer": "<number, string, list, etc.>",
81
+ "new_comment": "Model's comments",
82
+ },
83
+ question_name="model_repair",
84
+ )
85
+
86
+ results = await q.run_async(cache=cache)
87
+
88
+ try:
89
+ # this is the OpenAI version, but that's fine
90
+ valid_dict = json.loads(json.dumps(results))
91
+ success = True
92
+ # this is to deal with the fact that the model returns the answer and comment as new_answer and new_comment
93
+ valid_dict["answer"] = valid_dict.pop("new_answer")
94
+ valid_dict["comment"] = valid_dict.pop("new_comment")
95
+ except json.JSONDecodeError:
96
+ valid_dict = {}
97
+ success = False
98
+ from rich import print
99
+ from rich.console import Console
100
+ from rich.syntax import Syntax
101
+
102
+ console = Console()
103
+ error_message = (
104
+ f"All repairs. failed. LLM Model given [red]{str(bad_json)}[/red]"
105
+ )
106
+ console.print(" " + error_message)
107
+ model_returned = results["choices"][0]["message"]["content"]
108
+ console.print(f"LLM Model returned: [blue]{model_returned}[/blue]")
109
+
110
+ return valid_dict, success
111
+
112
+
113
+ def repair_wrapper(
114
+ bad_json, error_message="", user_prompt=None, system_prompt=None, cache=None
115
+ ):
116
+ try:
117
+ loop = asyncio.get_event_loop()
118
+ if loop.is_running():
119
+ # Add repair as a task to the running loop
120
+ task = loop.create_task(
121
+ async_repair(bad_json, error_message, user_prompt, system_prompt, cache)
122
+ )
123
+ return task
124
+ else:
125
+ # Run a new event loop for repair
126
+ return loop.run_until_complete(
127
+ async_repair(bad_json, error_message, user_prompt, system_prompt, cache)
128
+ )
129
+ except RuntimeError:
130
+ # Create a new event loop if one is not already available
131
+ loop = asyncio.new_event_loop()
132
+ asyncio.set_event_loop(loop)
133
+ return loop.run_until_complete(
134
+ async_repair(bad_json, error_message, user_prompt, system_prompt, cache)
135
+ )
136
+
137
+
138
+ def repair(
139
+ bad_json, error_message="", user_prompt=None, system_prompt=None, cache=None
140
+ ):
141
+ return repair_wrapper(bad_json, error_message, user_prompt, system_prompt, cache)
142
+
143
+
144
+ if __name__ == "__main__":
145
+ bad_json = """
146
+ {
147
+ 'answer': "The problematic phrase in the excerpt is \'typically\'. This word is vague and can lead to different interpretations. An alternative phrasing that would be less problematic is:
148
+ 'On average, how long do you cook scrambled eggs?}
149
+ """
150
+ try:
151
+ json.loads(bad_json)
152
+ print("Loaded")
153
+ except json.JSONDecodeError as e:
154
+ error_message = str(e)
155
+ repaired, success = repair(bad_json, error_message)
156
+ print(f"Repaired: {repaired}")
@@ -0,0 +1,83 @@
1
+ import asyncio
2
+ import aiohttp
3
+ import json
4
+ from typing import Any
5
+
6
+ from edsl import CONFIG
7
+
8
+ from edsl.language_models.LanguageModel import LanguageModel
9
+
10
+
11
+ def replicate_model_factory(model_name, base_url, api_token):
12
+ class ReplicateLanguageModelBase(LanguageModel):
13
+ _model_ = (
14
+ model_name # Example model name, replace with actual model name if needed
15
+ )
16
+ _parameters_ = {
17
+ "temperature": 0.1,
18
+ "topK": 50,
19
+ "topP": 0.9,
20
+ "max_new_tokens": 500,
21
+ "min_new_tokens": -1,
22
+ "repetition_penalty": 1.15,
23
+ # "version": "5fe0a3d7ac2852264a25279d1dfb798acbc4d49711d126646594e212cb821749",
24
+ "use_cache": True,
25
+ }
26
+ _api_token = api_token
27
+ _base_url = base_url
28
+
29
+ async def async_execute_model_call(
30
+ self, user_prompt: str, system_prompt: str = ""
31
+ ) -> dict[str, Any]:
32
+ self.api_token = self._api_token
33
+ self.headers = {
34
+ "Authorization": f"Token {self.api_token}",
35
+ "Content-Type": "application/json",
36
+ }
37
+ # combined_prompt = f"{system_prompt} {user_prompt}".strip()
38
+ # print(f"Prompt: {combined_prompt}")
39
+ data = {
40
+ # "version": self._parameters_["version"],
41
+ "input": {
42
+ "debug": False,
43
+ "top_k": self._parameters_["topK"],
44
+ "top_p": self._parameters_["topP"],
45
+ "prompt": user_prompt,
46
+ "system_prompt": system_prompt,
47
+ "temperature": self._parameters_["temperature"],
48
+ "max_new_tokens": self._parameters_["max_new_tokens"],
49
+ "min_new_tokens": self._parameters_["min_new_tokens"],
50
+ "prompt_template": "{prompt}",
51
+ "repetition_penalty": self._parameters_["repetition_penalty"],
52
+ },
53
+ }
54
+
55
+ async with aiohttp.ClientSession() as session:
56
+ async with session.post(
57
+ self._base_url, headers=self.headers, data=json.dumps(data)
58
+ ) as response:
59
+ raw_response_text = await response.text()
60
+ data = json.loads(raw_response_text)
61
+ print(f"This was the data returned by the model:{data}")
62
+ prediction_url = data["urls"]["get"]
63
+
64
+ while True:
65
+ async with session.get(
66
+ prediction_url, headers=self.headers
67
+ ) as get_response:
68
+ if get_response.status != 200:
69
+ # Handle non-success status codes appropriately
70
+ return None
71
+
72
+ get_data = await get_response.text()
73
+ get_data = json.loads(get_data)
74
+ if get_data["status"] == "succeeded":
75
+ return get_data
76
+ await asyncio.sleep(1)
77
+
78
+ def parse_response(self, raw_response: dict[str, Any]) -> str:
79
+ data = "".join(raw_response["output"])
80
+ print(f"This is what the model returned: {data}")
81
+ return data
82
+
83
+ return ReplicateLanguageModelBase
@@ -1,65 +1,64 @@
1
- import asyncio
2
- from typing import Any, Optional, List
3
- from edsl.enums import InferenceServiceType
4
-
5
-
6
- def create_survey(num_questions: int, chained: bool = True, take_scenario=False):
7
- from edsl.surveys.Survey import Survey
8
- from edsl.questions.QuestionFreeText import QuestionFreeText
9
-
10
- survey = Survey()
11
- for i in range(num_questions):
12
- if take_scenario:
13
- q = QuestionFreeText(
14
- question_text=f"XX{i}XX and {{scenario_value }}",
15
- question_name=f"question_{i}",
16
- )
17
- else:
18
- q = QuestionFreeText(
19
- question_text=f"XX{i}XX", question_name=f"question_{i}"
20
- )
21
- survey.add_question(q)
22
- if i > 0 and chained:
23
- survey.add_targeted_memory(f"question_{i}", f"question_{i-1}")
24
- return survey
25
-
26
-
27
- def create_language_model(
28
- exception: Exception, fail_at_number: int, never_ending=False
29
- ):
30
- from edsl.language_models.LanguageModel import LanguageModel
31
-
32
- class LanguageModelFromUtilities(LanguageModel):
33
- _model_ = "test"
34
- _parameters_ = {"temperature": 0.5}
35
- _inference_service_ = InferenceServiceType.TEST.value
36
- key_sequence = ["message", 0, "text"]
37
- usage_sequence = ["usage"]
38
- input_token_name = "prompt_tokens"
39
- output_token_name = "completion_tokens"
40
- _rpm = 1000000000000
41
- _tpm = 1000000000000
42
-
43
- async def async_execute_model_call(
44
- self,
45
- user_prompt: str,
46
- system_prompt: str,
47
- files_list: Optional[List[Any]] = None,
48
- ) -> dict[str, Any]:
49
- question_number = int(
50
- user_prompt.split("XX")[1]
51
- ) ## grabs the question number from the prompt
52
- await asyncio.sleep(0.1)
53
- if never_ending: ## you're not going anywhere buddy
54
- await asyncio.sleep(float("inf"))
55
- if question_number == fail_at_number:
56
- if asyncio.iscoroutinefunction(exception):
57
- await exception()
58
- else:
59
- raise exception
60
- return {
61
- "message": [{"text": "SPAM!"}],
62
- "usage": {"prompt_tokens": 1, "completion_tokens": 1},
63
- }
64
-
65
- return LanguageModelFromUtilities
1
+ import asyncio
2
+ from typing import Any, Optional, List
3
+ from edsl import Survey
4
+ from edsl.config import CONFIG
5
+ from edsl.enums import InferenceServiceType
6
+ from edsl.language_models.LanguageModel import LanguageModel
7
+ from edsl.questions import QuestionFreeText
8
+
9
+
10
+ def create_survey(num_questions: int, chained: bool = True, take_scenario=False):
11
+ survey = Survey()
12
+ for i in range(num_questions):
13
+ if take_scenario:
14
+ q = QuestionFreeText(
15
+ question_text=f"XX{i}XX and {{scenario_value }}",
16
+ question_name=f"question_{i}",
17
+ )
18
+ else:
19
+ q = QuestionFreeText(
20
+ question_text=f"XX{i}XX", question_name=f"question_{i}"
21
+ )
22
+ survey.add_question(q)
23
+ if i > 0 and chained:
24
+ survey.add_targeted_memory(f"question_{i}", f"question_{i-1}")
25
+ return survey
26
+
27
+
28
+ def create_language_model(
29
+ exception: Exception, fail_at_number: int, never_ending=False
30
+ ):
31
+ class LanguageModelFromUtilities(LanguageModel):
32
+ _model_ = "test"
33
+ _parameters_ = {"temperature": 0.5}
34
+ _inference_service_ = InferenceServiceType.TEST.value
35
+ key_sequence = ["message", 0, "text"]
36
+ usage_sequence = ["usage"]
37
+ input_token_name = "prompt_tokens"
38
+ output_token_name = "completion_tokens"
39
+ _rpm = 1000000000000
40
+ _tpm = 1000000000000
41
+
42
+ async def async_execute_model_call(
43
+ self,
44
+ user_prompt: str,
45
+ system_prompt: str,
46
+ files_list: Optional[List[Any]] = None,
47
+ ) -> dict[str, Any]:
48
+ question_number = int(
49
+ user_prompt.split("XX")[1]
50
+ ) ## grabs the question number from the prompt
51
+ await asyncio.sleep(0.1)
52
+ if never_ending: ## you're not going anywhere buddy
53
+ await asyncio.sleep(float("inf"))
54
+ if question_number == fail_at_number:
55
+ if asyncio.iscoroutinefunction(exception):
56
+ await exception()
57
+ else:
58
+ raise exception
59
+ return {
60
+ "message": [{"text": "SPAM!"}],
61
+ "usage": {"prompt_tokens": 1, "completion_tokens": 1},
62
+ }
63
+
64
+ return LanguageModelFromUtilities