edsl 0.1.38.dev1__py3-none-any.whl → 0.1.38.dev3__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (263) hide show
  1. edsl/Base.py +303 -303
  2. edsl/BaseDiff.py +260 -260
  3. edsl/TemplateLoader.py +24 -24
  4. edsl/__init__.py +49 -48
  5. edsl/__version__.py +1 -1
  6. edsl/agents/Agent.py +858 -855
  7. edsl/agents/AgentList.py +362 -350
  8. edsl/agents/Invigilator.py +222 -222
  9. edsl/agents/InvigilatorBase.py +284 -284
  10. edsl/agents/PromptConstructor.py +353 -353
  11. edsl/agents/__init__.py +3 -3
  12. edsl/agents/descriptors.py +99 -99
  13. edsl/agents/prompt_helpers.py +129 -129
  14. edsl/auto/AutoStudy.py +117 -117
  15. edsl/auto/StageBase.py +230 -230
  16. edsl/auto/StageGenerateSurvey.py +178 -178
  17. edsl/auto/StageLabelQuestions.py +125 -125
  18. edsl/auto/StagePersona.py +61 -61
  19. edsl/auto/StagePersonaDimensionValueRanges.py +88 -88
  20. edsl/auto/StagePersonaDimensionValues.py +74 -74
  21. edsl/auto/StagePersonaDimensions.py +69 -69
  22. edsl/auto/StageQuestions.py +73 -73
  23. edsl/auto/SurveyCreatorPipeline.py +21 -21
  24. edsl/auto/utilities.py +224 -224
  25. edsl/base/Base.py +279 -289
  26. edsl/config.py +149 -149
  27. edsl/conversation/Conversation.py +290 -290
  28. edsl/conversation/car_buying.py +58 -58
  29. edsl/conversation/chips.py +95 -95
  30. edsl/conversation/mug_negotiation.py +81 -81
  31. edsl/conversation/next_speaker_utilities.py +93 -93
  32. edsl/coop/PriceFetcher.py +54 -54
  33. edsl/coop/__init__.py +2 -2
  34. edsl/coop/coop.py +961 -958
  35. edsl/coop/utils.py +131 -131
  36. edsl/data/Cache.py +530 -527
  37. edsl/data/CacheEntry.py +228 -228
  38. edsl/data/CacheHandler.py +149 -149
  39. edsl/data/RemoteCacheSync.py +97 -97
  40. edsl/data/SQLiteDict.py +292 -292
  41. edsl/data/__init__.py +4 -4
  42. edsl/data/orm.py +10 -10
  43. edsl/data_transfer_models.py +73 -73
  44. edsl/enums.py +173 -173
  45. edsl/exceptions/BaseException.py +21 -21
  46. edsl/exceptions/__init__.py +54 -54
  47. edsl/exceptions/agents.py +42 -38
  48. edsl/exceptions/cache.py +5 -0
  49. edsl/exceptions/configuration.py +16 -16
  50. edsl/exceptions/coop.py +10 -10
  51. edsl/exceptions/data.py +14 -14
  52. edsl/exceptions/general.py +34 -34
  53. edsl/exceptions/jobs.py +33 -33
  54. edsl/exceptions/language_models.py +63 -63
  55. edsl/exceptions/prompts.py +15 -15
  56. edsl/exceptions/questions.py +91 -91
  57. edsl/exceptions/results.py +29 -29
  58. edsl/exceptions/scenarios.py +22 -22
  59. edsl/exceptions/surveys.py +37 -37
  60. edsl/inference_services/AnthropicService.py +87 -87
  61. edsl/inference_services/AwsBedrock.py +120 -120
  62. edsl/inference_services/AzureAI.py +217 -217
  63. edsl/inference_services/DeepInfraService.py +18 -18
  64. edsl/inference_services/GoogleService.py +156 -156
  65. edsl/inference_services/GroqService.py +20 -20
  66. edsl/inference_services/InferenceServiceABC.py +147 -147
  67. edsl/inference_services/InferenceServicesCollection.py +97 -97
  68. edsl/inference_services/MistralAIService.py +123 -123
  69. edsl/inference_services/OllamaService.py +18 -18
  70. edsl/inference_services/OpenAIService.py +224 -224
  71. edsl/inference_services/TestService.py +89 -89
  72. edsl/inference_services/TogetherAIService.py +170 -170
  73. edsl/inference_services/models_available_cache.py +118 -118
  74. edsl/inference_services/rate_limits_cache.py +25 -25
  75. edsl/inference_services/registry.py +39 -39
  76. edsl/inference_services/write_available.py +10 -10
  77. edsl/jobs/Answers.py +56 -56
  78. edsl/jobs/Jobs.py +1358 -1347
  79. edsl/jobs/__init__.py +1 -1
  80. edsl/jobs/buckets/BucketCollection.py +63 -63
  81. edsl/jobs/buckets/ModelBuckets.py +65 -65
  82. edsl/jobs/buckets/TokenBucket.py +251 -248
  83. edsl/jobs/interviews/Interview.py +661 -661
  84. edsl/jobs/interviews/InterviewExceptionCollection.py +99 -99
  85. edsl/jobs/interviews/InterviewExceptionEntry.py +186 -186
  86. edsl/jobs/interviews/InterviewStatistic.py +63 -63
  87. edsl/jobs/interviews/InterviewStatisticsCollection.py +25 -25
  88. edsl/jobs/interviews/InterviewStatusDictionary.py +78 -78
  89. edsl/jobs/interviews/InterviewStatusLog.py +92 -92
  90. edsl/jobs/interviews/ReportErrors.py +66 -66
  91. edsl/jobs/interviews/interview_status_enum.py +9 -9
  92. edsl/jobs/runners/JobsRunnerAsyncio.py +361 -338
  93. edsl/jobs/runners/JobsRunnerStatus.py +332 -332
  94. edsl/jobs/tasks/QuestionTaskCreator.py +242 -242
  95. edsl/jobs/tasks/TaskCreators.py +64 -64
  96. edsl/jobs/tasks/TaskHistory.py +451 -442
  97. edsl/jobs/tasks/TaskStatusLog.py +23 -23
  98. edsl/jobs/tasks/task_status_enum.py +163 -163
  99. edsl/jobs/tokens/InterviewTokenUsage.py +27 -27
  100. edsl/jobs/tokens/TokenUsage.py +34 -34
  101. edsl/language_models/KeyLookup.py +30 -30
  102. edsl/language_models/LanguageModel.py +708 -706
  103. edsl/language_models/ModelList.py +109 -102
  104. edsl/language_models/RegisterLanguageModelsMeta.py +184 -184
  105. edsl/language_models/__init__.py +3 -3
  106. edsl/language_models/fake_openai_call.py +15 -15
  107. edsl/language_models/fake_openai_service.py +61 -61
  108. edsl/language_models/registry.py +137 -137
  109. edsl/language_models/repair.py +156 -156
  110. edsl/language_models/unused/ReplicateBase.py +83 -83
  111. edsl/language_models/utilities.py +64 -64
  112. edsl/notebooks/Notebook.py +258 -259
  113. edsl/notebooks/__init__.py +1 -1
  114. edsl/prompts/Prompt.py +357 -357
  115. edsl/prompts/__init__.py +2 -2
  116. edsl/questions/AnswerValidatorMixin.py +289 -289
  117. edsl/questions/QuestionBase.py +660 -656
  118. edsl/questions/QuestionBaseGenMixin.py +161 -161
  119. edsl/questions/QuestionBasePromptsMixin.py +217 -234
  120. edsl/questions/QuestionBudget.py +227 -227
  121. edsl/questions/QuestionCheckBox.py +359 -359
  122. edsl/questions/QuestionExtract.py +183 -183
  123. edsl/questions/QuestionFreeText.py +114 -114
  124. edsl/questions/QuestionFunctional.py +166 -159
  125. edsl/questions/QuestionList.py +231 -231
  126. edsl/questions/QuestionMultipleChoice.py +286 -286
  127. edsl/questions/QuestionNumerical.py +153 -153
  128. edsl/questions/QuestionRank.py +324 -324
  129. edsl/questions/Quick.py +41 -41
  130. edsl/questions/RegisterQuestionsMeta.py +71 -71
  131. edsl/questions/ResponseValidatorABC.py +174 -174
  132. edsl/questions/SimpleAskMixin.py +73 -73
  133. edsl/questions/__init__.py +26 -26
  134. edsl/questions/compose_questions.py +98 -98
  135. edsl/questions/decorators.py +21 -21
  136. edsl/questions/derived/QuestionLikertFive.py +76 -76
  137. edsl/questions/derived/QuestionLinearScale.py +87 -87
  138. edsl/questions/derived/QuestionTopK.py +93 -91
  139. edsl/questions/derived/QuestionYesNo.py +82 -82
  140. edsl/questions/descriptors.py +413 -413
  141. edsl/questions/prompt_templates/question_budget.jinja +13 -13
  142. edsl/questions/prompt_templates/question_checkbox.jinja +32 -32
  143. edsl/questions/prompt_templates/question_extract.jinja +11 -11
  144. edsl/questions/prompt_templates/question_free_text.jinja +3 -3
  145. edsl/questions/prompt_templates/question_linear_scale.jinja +11 -11
  146. edsl/questions/prompt_templates/question_list.jinja +17 -17
  147. edsl/questions/prompt_templates/question_multiple_choice.jinja +33 -33
  148. edsl/questions/prompt_templates/question_numerical.jinja +36 -36
  149. edsl/questions/question_registry.py +147 -147
  150. edsl/questions/settings.py +12 -12
  151. edsl/questions/templates/budget/answering_instructions.jinja +7 -7
  152. edsl/questions/templates/budget/question_presentation.jinja +7 -7
  153. edsl/questions/templates/checkbox/answering_instructions.jinja +10 -10
  154. edsl/questions/templates/checkbox/question_presentation.jinja +22 -22
  155. edsl/questions/templates/extract/answering_instructions.jinja +7 -7
  156. edsl/questions/templates/likert_five/answering_instructions.jinja +10 -10
  157. edsl/questions/templates/likert_five/question_presentation.jinja +11 -11
  158. edsl/questions/templates/linear_scale/answering_instructions.jinja +5 -5
  159. edsl/questions/templates/linear_scale/question_presentation.jinja +5 -5
  160. edsl/questions/templates/list/answering_instructions.jinja +3 -3
  161. edsl/questions/templates/list/question_presentation.jinja +5 -5
  162. edsl/questions/templates/multiple_choice/answering_instructions.jinja +9 -9
  163. edsl/questions/templates/multiple_choice/question_presentation.jinja +11 -11
  164. edsl/questions/templates/numerical/answering_instructions.jinja +6 -6
  165. edsl/questions/templates/numerical/question_presentation.jinja +6 -6
  166. edsl/questions/templates/rank/answering_instructions.jinja +11 -11
  167. edsl/questions/templates/rank/question_presentation.jinja +15 -15
  168. edsl/questions/templates/top_k/answering_instructions.jinja +8 -8
  169. edsl/questions/templates/top_k/question_presentation.jinja +22 -22
  170. edsl/questions/templates/yes_no/answering_instructions.jinja +6 -6
  171. edsl/questions/templates/yes_no/question_presentation.jinja +11 -11
  172. edsl/results/Dataset.py +293 -293
  173. edsl/results/DatasetExportMixin.py +717 -717
  174. edsl/results/DatasetTree.py +145 -145
  175. edsl/results/Result.py +456 -450
  176. edsl/results/Results.py +1071 -1071
  177. edsl/results/ResultsDBMixin.py +238 -238
  178. edsl/results/ResultsExportMixin.py +43 -43
  179. edsl/results/ResultsFetchMixin.py +33 -33
  180. edsl/results/ResultsGGMixin.py +121 -121
  181. edsl/results/ResultsToolsMixin.py +98 -98
  182. edsl/results/Selector.py +135 -135
  183. edsl/results/__init__.py +2 -2
  184. edsl/results/tree_explore.py +115 -115
  185. edsl/scenarios/FileStore.py +458 -458
  186. edsl/scenarios/Scenario.py +544 -546
  187. edsl/scenarios/ScenarioHtmlMixin.py +64 -64
  188. edsl/scenarios/ScenarioList.py +1112 -1112
  189. edsl/scenarios/ScenarioListExportMixin.py +52 -52
  190. edsl/scenarios/ScenarioListPdfMixin.py +261 -261
  191. edsl/scenarios/__init__.py +4 -4
  192. edsl/shared.py +1 -1
  193. edsl/study/ObjectEntry.py +173 -173
  194. edsl/study/ProofOfWork.py +113 -113
  195. edsl/study/SnapShot.py +80 -80
  196. edsl/study/Study.py +528 -528
  197. edsl/study/__init__.py +4 -4
  198. edsl/surveys/DAG.py +148 -148
  199. edsl/surveys/Memory.py +31 -31
  200. edsl/surveys/MemoryPlan.py +244 -244
  201. edsl/surveys/Rule.py +326 -330
  202. edsl/surveys/RuleCollection.py +387 -387
  203. edsl/surveys/Survey.py +1787 -1795
  204. edsl/surveys/SurveyCSS.py +261 -261
  205. edsl/surveys/SurveyExportMixin.py +259 -259
  206. edsl/surveys/SurveyFlowVisualizationMixin.py +121 -121
  207. edsl/surveys/SurveyQualtricsImport.py +284 -284
  208. edsl/surveys/__init__.py +3 -3
  209. edsl/surveys/base.py +53 -53
  210. edsl/surveys/descriptors.py +56 -56
  211. edsl/surveys/instructions/ChangeInstruction.py +49 -47
  212. edsl/surveys/instructions/Instruction.py +53 -51
  213. edsl/surveys/instructions/InstructionCollection.py +77 -77
  214. edsl/templates/error_reporting/base.html +23 -23
  215. edsl/templates/error_reporting/exceptions_by_model.html +34 -34
  216. edsl/templates/error_reporting/exceptions_by_question_name.html +16 -16
  217. edsl/templates/error_reporting/exceptions_by_type.html +16 -16
  218. edsl/templates/error_reporting/interview_details.html +115 -115
  219. edsl/templates/error_reporting/interviews.html +9 -9
  220. edsl/templates/error_reporting/overview.html +4 -4
  221. edsl/templates/error_reporting/performance_plot.html +1 -1
  222. edsl/templates/error_reporting/report.css +73 -73
  223. edsl/templates/error_reporting/report.html +117 -117
  224. edsl/templates/error_reporting/report.js +25 -25
  225. edsl/tools/__init__.py +1 -1
  226. edsl/tools/clusters.py +192 -192
  227. edsl/tools/embeddings.py +27 -27
  228. edsl/tools/embeddings_plotting.py +118 -118
  229. edsl/tools/plotting.py +112 -112
  230. edsl/tools/summarize.py +18 -18
  231. edsl/utilities/SystemInfo.py +28 -28
  232. edsl/utilities/__init__.py +22 -22
  233. edsl/utilities/ast_utilities.py +25 -25
  234. edsl/utilities/data/Registry.py +6 -6
  235. edsl/utilities/data/__init__.py +1 -1
  236. edsl/utilities/data/scooter_results.json +1 -1
  237. edsl/utilities/decorators.py +77 -77
  238. edsl/utilities/gcp_bucket/cloud_storage.py +96 -96
  239. edsl/utilities/interface.py +627 -627
  240. edsl/{conjure → utilities}/naming_utilities.py +263 -263
  241. edsl/utilities/repair_functions.py +28 -28
  242. edsl/utilities/restricted_python.py +70 -70
  243. edsl/utilities/utilities.py +409 -409
  244. {edsl-0.1.38.dev1.dist-info → edsl-0.1.38.dev3.dist-info}/LICENSE +21 -21
  245. {edsl-0.1.38.dev1.dist-info → edsl-0.1.38.dev3.dist-info}/METADATA +1 -1
  246. edsl-0.1.38.dev3.dist-info/RECORD +269 -0
  247. edsl/conjure/AgentConstructionMixin.py +0 -160
  248. edsl/conjure/Conjure.py +0 -62
  249. edsl/conjure/InputData.py +0 -659
  250. edsl/conjure/InputDataCSV.py +0 -48
  251. edsl/conjure/InputDataMixinQuestionStats.py +0 -182
  252. edsl/conjure/InputDataPyRead.py +0 -91
  253. edsl/conjure/InputDataSPSS.py +0 -8
  254. edsl/conjure/InputDataStata.py +0 -8
  255. edsl/conjure/QuestionOptionMixin.py +0 -76
  256. edsl/conjure/QuestionTypeMixin.py +0 -23
  257. edsl/conjure/RawQuestion.py +0 -65
  258. edsl/conjure/SurveyResponses.py +0 -7
  259. edsl/conjure/__init__.py +0 -9
  260. edsl/conjure/examples/placeholder.txt +0 -0
  261. edsl/conjure/utilities.py +0 -201
  262. edsl-0.1.38.dev1.dist-info/RECORD +0 -283
  263. {edsl-0.1.38.dev1.dist-info → edsl-0.1.38.dev3.dist-info}/WHEEL +0 -0
@@ -1,156 +1,156 @@
1
- import json
2
- import asyncio
3
- import warnings
4
-
5
-
6
- async def async_repair(
7
- bad_json, error_message="", user_prompt=None, system_prompt=None, cache=None
8
- ):
9
- from edsl.utilities.utilities import clean_json
10
-
11
- s = clean_json(bad_json)
12
-
13
- try:
14
- # this is the OpenAI version, but that's fine
15
- valid_dict = json.loads(s)
16
- success = True
17
- except json.JSONDecodeError:
18
- valid_dict = {}
19
- success = False
20
- # print("Replacing control characters didn't work. Trying extracting the sub-string.")
21
- else:
22
- return valid_dict, success
23
-
24
- try:
25
- from edsl.utilities.repair_functions import extract_json_from_string
26
-
27
- valid_dict = extract_json_from_string(s)
28
- success = True
29
- except ValueError:
30
- valid_dict = {}
31
- success = False
32
- else:
33
- return valid_dict, success
34
-
35
- from edsl import Model
36
-
37
- m = Model()
38
-
39
- from edsl import QuestionExtract
40
-
41
- with warnings.catch_warnings():
42
- warnings.simplefilter("ignore", UserWarning)
43
-
44
- q = QuestionExtract(
45
- question_text="""
46
- A language model was supposed to respond to a question.
47
- The response should have been JSON object with an answer to a question and some commentary.
48
-
49
- It should have retured a string like this:
50
-
51
- '{'answer': 'The answer to the question.', 'comment': 'Some commentary.'}'
52
-
53
- or:
54
-
55
- '{'answer': 'The answer to the question.'}'
56
-
57
- The answer field is very like an integer number. The comment field is always string.
58
-
59
- You job is to return just the repaired JSON object that the model should have returned, properly formatted.
60
-
61
- - It might have included some preliminary comments.
62
- - It might have included some control characters.
63
- - It might have included some extraneous text.
64
-
65
- DO NOT include any extraneous text in your response. Just return the repaired JSON object.
66
- Do not preface the JSON object with any text. Just return the JSON object.
67
-
68
- Bad answer: """
69
- + str(bad_json)
70
- + "The model received a user prompt of: '"
71
- + str(user_prompt)
72
- + """'
73
- The model received a system prompt of: ' """
74
- + str(system_prompt)
75
- + """
76
- '
77
- Please return the repaired JSON object, following the instructions the original model should have followed, though
78
- using 'new_answer' a nd 'new_comment' as the keys.""",
79
- answer_template={
80
- "new_answer": "<number, string, list, etc.>",
81
- "new_comment": "Model's comments",
82
- },
83
- question_name="model_repair",
84
- )
85
-
86
- results = await q.run_async(cache=cache)
87
-
88
- try:
89
- # this is the OpenAI version, but that's fine
90
- valid_dict = json.loads(json.dumps(results))
91
- success = True
92
- # this is to deal with the fact that the model returns the answer and comment as new_answer and new_comment
93
- valid_dict["answer"] = valid_dict.pop("new_answer")
94
- valid_dict["comment"] = valid_dict.pop("new_comment")
95
- except json.JSONDecodeError:
96
- valid_dict = {}
97
- success = False
98
- from rich import print
99
- from rich.console import Console
100
- from rich.syntax import Syntax
101
-
102
- console = Console()
103
- error_message = (
104
- f"All repairs. failed. LLM Model given [red]{str(bad_json)}[/red]"
105
- )
106
- console.print(" " + error_message)
107
- model_returned = results["choices"][0]["message"]["content"]
108
- console.print(f"LLM Model returned: [blue]{model_returned}[/blue]")
109
-
110
- return valid_dict, success
111
-
112
-
113
- def repair_wrapper(
114
- bad_json, error_message="", user_prompt=None, system_prompt=None, cache=None
115
- ):
116
- try:
117
- loop = asyncio.get_event_loop()
118
- if loop.is_running():
119
- # Add repair as a task to the running loop
120
- task = loop.create_task(
121
- async_repair(bad_json, error_message, user_prompt, system_prompt, cache)
122
- )
123
- return task
124
- else:
125
- # Run a new event loop for repair
126
- return loop.run_until_complete(
127
- async_repair(bad_json, error_message, user_prompt, system_prompt, cache)
128
- )
129
- except RuntimeError:
130
- # Create a new event loop if one is not already available
131
- loop = asyncio.new_event_loop()
132
- asyncio.set_event_loop(loop)
133
- return loop.run_until_complete(
134
- async_repair(bad_json, error_message, user_prompt, system_prompt, cache)
135
- )
136
-
137
-
138
- def repair(
139
- bad_json, error_message="", user_prompt=None, system_prompt=None, cache=None
140
- ):
141
- return repair_wrapper(bad_json, error_message, user_prompt, system_prompt, cache)
142
-
143
-
144
- if __name__ == "__main__":
145
- bad_json = """
146
- {
147
- 'answer': "The problematic phrase in the excerpt is \'typically\'. This word is vague and can lead to different interpretations. An alternative phrasing that would be less problematic is:
148
- 'On average, how long do you cook scrambled eggs?}
149
- """
150
- try:
151
- json.loads(bad_json)
152
- print("Loaded")
153
- except json.JSONDecodeError as e:
154
- error_message = str(e)
155
- repaired, success = repair(bad_json, error_message)
156
- print(f"Repaired: {repaired}")
1
+ import json
2
+ import asyncio
3
+ import warnings
4
+
5
+
6
+ async def async_repair(
7
+ bad_json, error_message="", user_prompt=None, system_prompt=None, cache=None
8
+ ):
9
+ from edsl.utilities.utilities import clean_json
10
+
11
+ s = clean_json(bad_json)
12
+
13
+ try:
14
+ # this is the OpenAI version, but that's fine
15
+ valid_dict = json.loads(s)
16
+ success = True
17
+ except json.JSONDecodeError:
18
+ valid_dict = {}
19
+ success = False
20
+ # print("Replacing control characters didn't work. Trying extracting the sub-string.")
21
+ else:
22
+ return valid_dict, success
23
+
24
+ try:
25
+ from edsl.utilities.repair_functions import extract_json_from_string
26
+
27
+ valid_dict = extract_json_from_string(s)
28
+ success = True
29
+ except ValueError:
30
+ valid_dict = {}
31
+ success = False
32
+ else:
33
+ return valid_dict, success
34
+
35
+ from edsl import Model
36
+
37
+ m = Model()
38
+
39
+ from edsl import QuestionExtract
40
+
41
+ with warnings.catch_warnings():
42
+ warnings.simplefilter("ignore", UserWarning)
43
+
44
+ q = QuestionExtract(
45
+ question_text="""
46
+ A language model was supposed to respond to a question.
47
+ The response should have been JSON object with an answer to a question and some commentary.
48
+
49
+ It should have retured a string like this:
50
+
51
+ '{'answer': 'The answer to the question.', 'comment': 'Some commentary.'}'
52
+
53
+ or:
54
+
55
+ '{'answer': 'The answer to the question.'}'
56
+
57
+ The answer field is very like an integer number. The comment field is always string.
58
+
59
+ You job is to return just the repaired JSON object that the model should have returned, properly formatted.
60
+
61
+ - It might have included some preliminary comments.
62
+ - It might have included some control characters.
63
+ - It might have included some extraneous text.
64
+
65
+ DO NOT include any extraneous text in your response. Just return the repaired JSON object.
66
+ Do not preface the JSON object with any text. Just return the JSON object.
67
+
68
+ Bad answer: """
69
+ + str(bad_json)
70
+ + "The model received a user prompt of: '"
71
+ + str(user_prompt)
72
+ + """'
73
+ The model received a system prompt of: ' """
74
+ + str(system_prompt)
75
+ + """
76
+ '
77
+ Please return the repaired JSON object, following the instructions the original model should have followed, though
78
+ using 'new_answer' a nd 'new_comment' as the keys.""",
79
+ answer_template={
80
+ "new_answer": "<number, string, list, etc.>",
81
+ "new_comment": "Model's comments",
82
+ },
83
+ question_name="model_repair",
84
+ )
85
+
86
+ results = await q.run_async(cache=cache)
87
+
88
+ try:
89
+ # this is the OpenAI version, but that's fine
90
+ valid_dict = json.loads(json.dumps(results))
91
+ success = True
92
+ # this is to deal with the fact that the model returns the answer and comment as new_answer and new_comment
93
+ valid_dict["answer"] = valid_dict.pop("new_answer")
94
+ valid_dict["comment"] = valid_dict.pop("new_comment")
95
+ except json.JSONDecodeError:
96
+ valid_dict = {}
97
+ success = False
98
+ from rich import print
99
+ from rich.console import Console
100
+ from rich.syntax import Syntax
101
+
102
+ console = Console()
103
+ error_message = (
104
+ f"All repairs. failed. LLM Model given [red]{str(bad_json)}[/red]"
105
+ )
106
+ console.print(" " + error_message)
107
+ model_returned = results["choices"][0]["message"]["content"]
108
+ console.print(f"LLM Model returned: [blue]{model_returned}[/blue]")
109
+
110
+ return valid_dict, success
111
+
112
+
113
+ def repair_wrapper(
114
+ bad_json, error_message="", user_prompt=None, system_prompt=None, cache=None
115
+ ):
116
+ try:
117
+ loop = asyncio.get_event_loop()
118
+ if loop.is_running():
119
+ # Add repair as a task to the running loop
120
+ task = loop.create_task(
121
+ async_repair(bad_json, error_message, user_prompt, system_prompt, cache)
122
+ )
123
+ return task
124
+ else:
125
+ # Run a new event loop for repair
126
+ return loop.run_until_complete(
127
+ async_repair(bad_json, error_message, user_prompt, system_prompt, cache)
128
+ )
129
+ except RuntimeError:
130
+ # Create a new event loop if one is not already available
131
+ loop = asyncio.new_event_loop()
132
+ asyncio.set_event_loop(loop)
133
+ return loop.run_until_complete(
134
+ async_repair(bad_json, error_message, user_prompt, system_prompt, cache)
135
+ )
136
+
137
+
138
+ def repair(
139
+ bad_json, error_message="", user_prompt=None, system_prompt=None, cache=None
140
+ ):
141
+ return repair_wrapper(bad_json, error_message, user_prompt, system_prompt, cache)
142
+
143
+
144
+ if __name__ == "__main__":
145
+ bad_json = """
146
+ {
147
+ 'answer': "The problematic phrase in the excerpt is \'typically\'. This word is vague and can lead to different interpretations. An alternative phrasing that would be less problematic is:
148
+ 'On average, how long do you cook scrambled eggs?}
149
+ """
150
+ try:
151
+ json.loads(bad_json)
152
+ print("Loaded")
153
+ except json.JSONDecodeError as e:
154
+ error_message = str(e)
155
+ repaired, success = repair(bad_json, error_message)
156
+ print(f"Repaired: {repaired}")
@@ -1,83 +1,83 @@
1
- import asyncio
2
- import aiohttp
3
- import json
4
- from typing import Any
5
-
6
- from edsl import CONFIG
7
-
8
- from edsl.language_models.LanguageModel import LanguageModel
9
-
10
-
11
- def replicate_model_factory(model_name, base_url, api_token):
12
- class ReplicateLanguageModelBase(LanguageModel):
13
- _model_ = (
14
- model_name # Example model name, replace with actual model name if needed
15
- )
16
- _parameters_ = {
17
- "temperature": 0.1,
18
- "topK": 50,
19
- "topP": 0.9,
20
- "max_new_tokens": 500,
21
- "min_new_tokens": -1,
22
- "repetition_penalty": 1.15,
23
- # "version": "5fe0a3d7ac2852264a25279d1dfb798acbc4d49711d126646594e212cb821749",
24
- "use_cache": True,
25
- }
26
- _api_token = api_token
27
- _base_url = base_url
28
-
29
- async def async_execute_model_call(
30
- self, user_prompt: str, system_prompt: str = ""
31
- ) -> dict[str, Any]:
32
- self.api_token = self._api_token
33
- self.headers = {
34
- "Authorization": f"Token {self.api_token}",
35
- "Content-Type": "application/json",
36
- }
37
- # combined_prompt = f"{system_prompt} {user_prompt}".strip()
38
- # print(f"Prompt: {combined_prompt}")
39
- data = {
40
- # "version": self._parameters_["version"],
41
- "input": {
42
- "debug": False,
43
- "top_k": self._parameters_["topK"],
44
- "top_p": self._parameters_["topP"],
45
- "prompt": user_prompt,
46
- "system_prompt": system_prompt,
47
- "temperature": self._parameters_["temperature"],
48
- "max_new_tokens": self._parameters_["max_new_tokens"],
49
- "min_new_tokens": self._parameters_["min_new_tokens"],
50
- "prompt_template": "{prompt}",
51
- "repetition_penalty": self._parameters_["repetition_penalty"],
52
- },
53
- }
54
-
55
- async with aiohttp.ClientSession() as session:
56
- async with session.post(
57
- self._base_url, headers=self.headers, data=json.dumps(data)
58
- ) as response:
59
- raw_response_text = await response.text()
60
- data = json.loads(raw_response_text)
61
- print(f"This was the data returned by the model:{data}")
62
- prediction_url = data["urls"]["get"]
63
-
64
- while True:
65
- async with session.get(
66
- prediction_url, headers=self.headers
67
- ) as get_response:
68
- if get_response.status != 200:
69
- # Handle non-success status codes appropriately
70
- return None
71
-
72
- get_data = await get_response.text()
73
- get_data = json.loads(get_data)
74
- if get_data["status"] == "succeeded":
75
- return get_data
76
- await asyncio.sleep(1)
77
-
78
- def parse_response(self, raw_response: dict[str, Any]) -> str:
79
- data = "".join(raw_response["output"])
80
- print(f"This is what the model returned: {data}")
81
- return data
82
-
83
- return ReplicateLanguageModelBase
1
+ import asyncio
2
+ import aiohttp
3
+ import json
4
+ from typing import Any
5
+
6
+ from edsl import CONFIG
7
+
8
+ from edsl.language_models.LanguageModel import LanguageModel
9
+
10
+
11
+ def replicate_model_factory(model_name, base_url, api_token):
12
+ class ReplicateLanguageModelBase(LanguageModel):
13
+ _model_ = (
14
+ model_name # Example model name, replace with actual model name if needed
15
+ )
16
+ _parameters_ = {
17
+ "temperature": 0.1,
18
+ "topK": 50,
19
+ "topP": 0.9,
20
+ "max_new_tokens": 500,
21
+ "min_new_tokens": -1,
22
+ "repetition_penalty": 1.15,
23
+ # "version": "5fe0a3d7ac2852264a25279d1dfb798acbc4d49711d126646594e212cb821749",
24
+ "use_cache": True,
25
+ }
26
+ _api_token = api_token
27
+ _base_url = base_url
28
+
29
+ async def async_execute_model_call(
30
+ self, user_prompt: str, system_prompt: str = ""
31
+ ) -> dict[str, Any]:
32
+ self.api_token = self._api_token
33
+ self.headers = {
34
+ "Authorization": f"Token {self.api_token}",
35
+ "Content-Type": "application/json",
36
+ }
37
+ # combined_prompt = f"{system_prompt} {user_prompt}".strip()
38
+ # print(f"Prompt: {combined_prompt}")
39
+ data = {
40
+ # "version": self._parameters_["version"],
41
+ "input": {
42
+ "debug": False,
43
+ "top_k": self._parameters_["topK"],
44
+ "top_p": self._parameters_["topP"],
45
+ "prompt": user_prompt,
46
+ "system_prompt": system_prompt,
47
+ "temperature": self._parameters_["temperature"],
48
+ "max_new_tokens": self._parameters_["max_new_tokens"],
49
+ "min_new_tokens": self._parameters_["min_new_tokens"],
50
+ "prompt_template": "{prompt}",
51
+ "repetition_penalty": self._parameters_["repetition_penalty"],
52
+ },
53
+ }
54
+
55
+ async with aiohttp.ClientSession() as session:
56
+ async with session.post(
57
+ self._base_url, headers=self.headers, data=json.dumps(data)
58
+ ) as response:
59
+ raw_response_text = await response.text()
60
+ data = json.loads(raw_response_text)
61
+ print(f"This was the data returned by the model:{data}")
62
+ prediction_url = data["urls"]["get"]
63
+
64
+ while True:
65
+ async with session.get(
66
+ prediction_url, headers=self.headers
67
+ ) as get_response:
68
+ if get_response.status != 200:
69
+ # Handle non-success status codes appropriately
70
+ return None
71
+
72
+ get_data = await get_response.text()
73
+ get_data = json.loads(get_data)
74
+ if get_data["status"] == "succeeded":
75
+ return get_data
76
+ await asyncio.sleep(1)
77
+
78
+ def parse_response(self, raw_response: dict[str, Any]) -> str:
79
+ data = "".join(raw_response["output"])
80
+ print(f"This is what the model returned: {data}")
81
+ return data
82
+
83
+ return ReplicateLanguageModelBase
@@ -1,64 +1,64 @@
1
- import asyncio
2
- from typing import Any, Optional, List
3
- from edsl import Survey
4
- from edsl.config import CONFIG
5
- from edsl.enums import InferenceServiceType
6
- from edsl.language_models.LanguageModel import LanguageModel
7
- from edsl.questions import QuestionFreeText
8
-
9
-
10
- def create_survey(num_questions: int, chained: bool = True, take_scenario=False):
11
- survey = Survey()
12
- for i in range(num_questions):
13
- if take_scenario:
14
- q = QuestionFreeText(
15
- question_text=f"XX{i}XX and {{scenario_value }}",
16
- question_name=f"question_{i}",
17
- )
18
- else:
19
- q = QuestionFreeText(
20
- question_text=f"XX{i}XX", question_name=f"question_{i}"
21
- )
22
- survey.add_question(q)
23
- if i > 0 and chained:
24
- survey.add_targeted_memory(f"question_{i}", f"question_{i-1}")
25
- return survey
26
-
27
-
28
- def create_language_model(
29
- exception: Exception, fail_at_number: int, never_ending=False
30
- ):
31
- class LanguageModelFromUtilities(LanguageModel):
32
- _model_ = "test"
33
- _parameters_ = {"temperature": 0.5}
34
- _inference_service_ = InferenceServiceType.TEST.value
35
- key_sequence = ["message", 0, "text"]
36
- usage_sequence = ["usage"]
37
- input_token_name = "prompt_tokens"
38
- output_token_name = "completion_tokens"
39
- _rpm = 1000000000000
40
- _tpm = 1000000000000
41
-
42
- async def async_execute_model_call(
43
- self,
44
- user_prompt: str,
45
- system_prompt: str,
46
- files_list: Optional[List[Any]] = None,
47
- ) -> dict[str, Any]:
48
- question_number = int(
49
- user_prompt.split("XX")[1]
50
- ) ## grabs the question number from the prompt
51
- await asyncio.sleep(0.1)
52
- if never_ending: ## you're not going anywhere buddy
53
- await asyncio.sleep(float("inf"))
54
- if question_number == fail_at_number:
55
- if asyncio.iscoroutinefunction(exception):
56
- await exception()
57
- else:
58
- raise exception
59
- return {
60
- "message": [{"text": "SPAM!"}],
61
- "usage": {"prompt_tokens": 1, "completion_tokens": 1},
62
- }
63
-
64
- return LanguageModelFromUtilities
1
+ import asyncio
2
+ from typing import Any, Optional, List
3
+ from edsl import Survey
4
+ from edsl.config import CONFIG
5
+ from edsl.enums import InferenceServiceType
6
+ from edsl.language_models.LanguageModel import LanguageModel
7
+ from edsl.questions import QuestionFreeText
8
+
9
+
10
+ def create_survey(num_questions: int, chained: bool = True, take_scenario=False):
11
+ survey = Survey()
12
+ for i in range(num_questions):
13
+ if take_scenario:
14
+ q = QuestionFreeText(
15
+ question_text=f"XX{i}XX and {{scenario_value }}",
16
+ question_name=f"question_{i}",
17
+ )
18
+ else:
19
+ q = QuestionFreeText(
20
+ question_text=f"XX{i}XX", question_name=f"question_{i}"
21
+ )
22
+ survey.add_question(q)
23
+ if i > 0 and chained:
24
+ survey.add_targeted_memory(f"question_{i}", f"question_{i-1}")
25
+ return survey
26
+
27
+
28
+ def create_language_model(
29
+ exception: Exception, fail_at_number: int, never_ending=False
30
+ ):
31
+ class LanguageModelFromUtilities(LanguageModel):
32
+ _model_ = "test"
33
+ _parameters_ = {"temperature": 0.5}
34
+ _inference_service_ = InferenceServiceType.TEST.value
35
+ key_sequence = ["message", 0, "text"]
36
+ usage_sequence = ["usage"]
37
+ input_token_name = "prompt_tokens"
38
+ output_token_name = "completion_tokens"
39
+ _rpm = 1000000000000
40
+ _tpm = 1000000000000
41
+
42
+ async def async_execute_model_call(
43
+ self,
44
+ user_prompt: str,
45
+ system_prompt: str,
46
+ files_list: Optional[List[Any]] = None,
47
+ ) -> dict[str, Any]:
48
+ question_number = int(
49
+ user_prompt.split("XX")[1]
50
+ ) ## grabs the question number from the prompt
51
+ await asyncio.sleep(0.1)
52
+ if never_ending: ## you're not going anywhere buddy
53
+ await asyncio.sleep(float("inf"))
54
+ if question_number == fail_at_number:
55
+ if asyncio.iscoroutinefunction(exception):
56
+ await exception()
57
+ else:
58
+ raise exception
59
+ return {
60
+ "message": [{"text": "SPAM!"}],
61
+ "usage": {"prompt_tokens": 1, "completion_tokens": 1},
62
+ }
63
+
64
+ return LanguageModelFromUtilities