edsl 0.1.36.dev6__py3-none-any.whl → 0.1.37.dev2__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- edsl/Base.py +303 -303
- edsl/BaseDiff.py +260 -260
- edsl/TemplateLoader.py +24 -24
- edsl/__init__.py +48 -47
- edsl/__version__.py +1 -1
- edsl/agents/Agent.py +804 -804
- edsl/agents/AgentList.py +345 -337
- edsl/agents/Invigilator.py +222 -222
- edsl/agents/InvigilatorBase.py +305 -294
- edsl/agents/PromptConstructor.py +312 -312
- edsl/agents/__init__.py +3 -3
- edsl/agents/descriptors.py +86 -86
- edsl/agents/prompt_helpers.py +129 -129
- edsl/auto/AutoStudy.py +117 -117
- edsl/auto/StageBase.py +230 -230
- edsl/auto/StageGenerateSurvey.py +178 -178
- edsl/auto/StageLabelQuestions.py +125 -125
- edsl/auto/StagePersona.py +61 -61
- edsl/auto/StagePersonaDimensionValueRanges.py +88 -88
- edsl/auto/StagePersonaDimensionValues.py +74 -74
- edsl/auto/StagePersonaDimensions.py +69 -69
- edsl/auto/StageQuestions.py +73 -73
- edsl/auto/SurveyCreatorPipeline.py +21 -21
- edsl/auto/utilities.py +224 -224
- edsl/base/Base.py +289 -289
- edsl/config.py +149 -149
- edsl/conjure/AgentConstructionMixin.py +152 -152
- edsl/conjure/Conjure.py +62 -62
- edsl/conjure/InputData.py +659 -659
- edsl/conjure/InputDataCSV.py +48 -48
- edsl/conjure/InputDataMixinQuestionStats.py +182 -182
- edsl/conjure/InputDataPyRead.py +91 -91
- edsl/conjure/InputDataSPSS.py +8 -8
- edsl/conjure/InputDataStata.py +8 -8
- edsl/conjure/QuestionOptionMixin.py +76 -76
- edsl/conjure/QuestionTypeMixin.py +23 -23
- edsl/conjure/RawQuestion.py +65 -65
- edsl/conjure/SurveyResponses.py +7 -7
- edsl/conjure/__init__.py +9 -9
- edsl/conjure/naming_utilities.py +263 -263
- edsl/conjure/utilities.py +201 -201
- edsl/conversation/Conversation.py +238 -238
- edsl/conversation/car_buying.py +58 -58
- edsl/conversation/mug_negotiation.py +81 -81
- edsl/conversation/next_speaker_utilities.py +93 -93
- edsl/coop/PriceFetcher.py +54 -54
- edsl/coop/__init__.py +2 -2
- edsl/coop/coop.py +824 -849
- edsl/coop/utils.py +131 -131
- edsl/data/Cache.py +527 -527
- edsl/data/CacheEntry.py +228 -228
- edsl/data/CacheHandler.py +149 -149
- edsl/data/RemoteCacheSync.py +97 -84
- edsl/data/SQLiteDict.py +292 -292
- edsl/data/__init__.py +4 -4
- edsl/data/orm.py +10 -10
- edsl/data_transfer_models.py +73 -73
- edsl/enums.py +173 -173
- edsl/exceptions/__init__.py +50 -50
- edsl/exceptions/agents.py +40 -40
- edsl/exceptions/configuration.py +16 -16
- edsl/exceptions/coop.py +10 -10
- edsl/exceptions/data.py +14 -14
- edsl/exceptions/general.py +34 -34
- edsl/exceptions/jobs.py +33 -33
- edsl/exceptions/language_models.py +63 -63
- edsl/exceptions/prompts.py +15 -15
- edsl/exceptions/questions.py +91 -91
- edsl/exceptions/results.py +26 -26
- edsl/exceptions/surveys.py +34 -34
- edsl/inference_services/AnthropicService.py +87 -87
- edsl/inference_services/AwsBedrock.py +115 -115
- edsl/inference_services/AzureAI.py +217 -217
- edsl/inference_services/DeepInfraService.py +18 -18
- edsl/inference_services/GoogleService.py +156 -156
- edsl/inference_services/GroqService.py +20 -20
- edsl/inference_services/InferenceServiceABC.py +147 -147
- edsl/inference_services/InferenceServicesCollection.py +74 -72
- edsl/inference_services/MistralAIService.py +123 -123
- edsl/inference_services/OllamaService.py +18 -18
- edsl/inference_services/OpenAIService.py +224 -224
- edsl/inference_services/TestService.py +89 -89
- edsl/inference_services/TogetherAIService.py +170 -170
- edsl/inference_services/models_available_cache.py +118 -118
- edsl/inference_services/rate_limits_cache.py +25 -25
- edsl/inference_services/registry.py +39 -39
- edsl/inference_services/write_available.py +10 -10
- edsl/jobs/Answers.py +56 -56
- edsl/jobs/Jobs.py +1112 -1112
- edsl/jobs/__init__.py +1 -1
- edsl/jobs/buckets/BucketCollection.py +63 -63
- edsl/jobs/buckets/ModelBuckets.py +65 -65
- edsl/jobs/buckets/TokenBucket.py +248 -248
- edsl/jobs/interviews/Interview.py +661 -651
- edsl/jobs/interviews/InterviewExceptionCollection.py +99 -99
- edsl/jobs/interviews/InterviewExceptionEntry.py +182 -182
- edsl/jobs/interviews/InterviewStatistic.py +63 -63
- edsl/jobs/interviews/InterviewStatisticsCollection.py +25 -25
- edsl/jobs/interviews/InterviewStatusDictionary.py +78 -78
- edsl/jobs/interviews/InterviewStatusLog.py +92 -92
- edsl/jobs/interviews/ReportErrors.py +66 -66
- edsl/jobs/interviews/interview_status_enum.py +9 -9
- edsl/jobs/runners/JobsRunnerAsyncio.py +338 -337
- edsl/jobs/runners/JobsRunnerStatus.py +332 -332
- edsl/jobs/tasks/QuestionTaskCreator.py +242 -242
- edsl/jobs/tasks/TaskCreators.py +64 -64
- edsl/jobs/tasks/TaskHistory.py +441 -441
- edsl/jobs/tasks/TaskStatusLog.py +23 -23
- edsl/jobs/tasks/task_status_enum.py +163 -163
- edsl/jobs/tokens/InterviewTokenUsage.py +27 -27
- edsl/jobs/tokens/TokenUsage.py +34 -34
- edsl/language_models/LanguageModel.py +718 -718
- edsl/language_models/ModelList.py +102 -102
- edsl/language_models/RegisterLanguageModelsMeta.py +184 -184
- edsl/language_models/__init__.py +2 -2
- edsl/language_models/fake_openai_call.py +15 -15
- edsl/language_models/fake_openai_service.py +61 -61
- edsl/language_models/registry.py +137 -137
- edsl/language_models/repair.py +156 -156
- edsl/language_models/unused/ReplicateBase.py +83 -83
- edsl/language_models/utilities.py +64 -64
- edsl/notebooks/Notebook.py +259 -259
- edsl/notebooks/__init__.py +1 -1
- edsl/prompts/Prompt.py +353 -358
- edsl/prompts/__init__.py +2 -2
- edsl/questions/AnswerValidatorMixin.py +289 -289
- edsl/questions/QuestionBase.py +616 -616
- edsl/questions/QuestionBaseGenMixin.py +161 -161
- edsl/questions/QuestionBasePromptsMixin.py +266 -266
- edsl/questions/QuestionBudget.py +227 -227
- edsl/questions/QuestionCheckBox.py +359 -359
- edsl/questions/QuestionExtract.py +183 -183
- edsl/questions/QuestionFreeText.py +114 -113
- edsl/questions/QuestionFunctional.py +159 -159
- edsl/questions/QuestionList.py +231 -231
- edsl/questions/QuestionMultipleChoice.py +286 -286
- edsl/questions/QuestionNumerical.py +153 -153
- edsl/questions/QuestionRank.py +324 -324
- edsl/questions/Quick.py +41 -41
- edsl/questions/RegisterQuestionsMeta.py +71 -71
- edsl/questions/ResponseValidatorABC.py +174 -174
- edsl/questions/SimpleAskMixin.py +73 -73
- edsl/questions/__init__.py +26 -26
- edsl/questions/compose_questions.py +98 -98
- edsl/questions/decorators.py +21 -21
- edsl/questions/derived/QuestionLikertFive.py +76 -76
- edsl/questions/derived/QuestionLinearScale.py +87 -87
- edsl/questions/derived/QuestionTopK.py +91 -91
- edsl/questions/derived/QuestionYesNo.py +82 -82
- edsl/questions/descriptors.py +418 -418
- edsl/questions/prompt_templates/question_budget.jinja +13 -13
- edsl/questions/prompt_templates/question_checkbox.jinja +32 -32
- edsl/questions/prompt_templates/question_extract.jinja +11 -11
- edsl/questions/prompt_templates/question_free_text.jinja +3 -3
- edsl/questions/prompt_templates/question_linear_scale.jinja +11 -11
- edsl/questions/prompt_templates/question_list.jinja +17 -17
- edsl/questions/prompt_templates/question_multiple_choice.jinja +33 -33
- edsl/questions/prompt_templates/question_numerical.jinja +36 -36
- edsl/questions/question_registry.py +147 -147
- edsl/questions/settings.py +12 -12
- edsl/questions/templates/budget/answering_instructions.jinja +7 -7
- edsl/questions/templates/budget/question_presentation.jinja +7 -7
- edsl/questions/templates/checkbox/answering_instructions.jinja +10 -10
- edsl/questions/templates/checkbox/question_presentation.jinja +22 -22
- edsl/questions/templates/extract/answering_instructions.jinja +7 -7
- edsl/questions/templates/likert_five/answering_instructions.jinja +10 -10
- edsl/questions/templates/likert_five/question_presentation.jinja +11 -11
- edsl/questions/templates/linear_scale/answering_instructions.jinja +5 -5
- edsl/questions/templates/linear_scale/question_presentation.jinja +5 -5
- edsl/questions/templates/list/answering_instructions.jinja +3 -3
- edsl/questions/templates/list/question_presentation.jinja +5 -5
- edsl/questions/templates/multiple_choice/answering_instructions.jinja +9 -9
- edsl/questions/templates/multiple_choice/question_presentation.jinja +11 -11
- edsl/questions/templates/numerical/answering_instructions.jinja +6 -6
- edsl/questions/templates/numerical/question_presentation.jinja +6 -6
- edsl/questions/templates/rank/answering_instructions.jinja +11 -11
- edsl/questions/templates/rank/question_presentation.jinja +15 -15
- edsl/questions/templates/top_k/answering_instructions.jinja +8 -8
- edsl/questions/templates/top_k/question_presentation.jinja +22 -22
- edsl/questions/templates/yes_no/answering_instructions.jinja +6 -6
- edsl/questions/templates/yes_no/question_presentation.jinja +11 -11
- edsl/results/Dataset.py +293 -293
- edsl/results/DatasetExportMixin.py +693 -693
- edsl/results/DatasetTree.py +145 -145
- edsl/results/Result.py +435 -433
- edsl/results/Results.py +1160 -1158
- edsl/results/ResultsDBMixin.py +238 -238
- edsl/results/ResultsExportMixin.py +43 -43
- edsl/results/ResultsFetchMixin.py +33 -33
- edsl/results/ResultsGGMixin.py +121 -121
- edsl/results/ResultsToolsMixin.py +98 -98
- edsl/results/Selector.py +118 -118
- edsl/results/__init__.py +2 -2
- edsl/results/tree_explore.py +115 -115
- edsl/scenarios/FileStore.py +458 -443
- edsl/scenarios/Scenario.py +510 -507
- edsl/scenarios/ScenarioHtmlMixin.py +59 -59
- edsl/scenarios/ScenarioList.py +1101 -1101
- edsl/scenarios/ScenarioListExportMixin.py +52 -52
- edsl/scenarios/ScenarioListPdfMixin.py +261 -261
- edsl/scenarios/__init__.py +4 -2
- edsl/shared.py +1 -1
- edsl/study/ObjectEntry.py +173 -173
- edsl/study/ProofOfWork.py +113 -113
- edsl/study/SnapShot.py +80 -80
- edsl/study/Study.py +528 -528
- edsl/study/__init__.py +4 -4
- edsl/surveys/DAG.py +148 -148
- edsl/surveys/Memory.py +31 -31
- edsl/surveys/MemoryPlan.py +244 -244
- edsl/surveys/Rule.py +324 -324
- edsl/surveys/RuleCollection.py +387 -387
- edsl/surveys/Survey.py +1772 -1772
- edsl/surveys/SurveyCSS.py +261 -261
- edsl/surveys/SurveyExportMixin.py +259 -259
- edsl/surveys/SurveyFlowVisualizationMixin.py +121 -121
- edsl/surveys/SurveyQualtricsImport.py +284 -284
- edsl/surveys/__init__.py +3 -3
- edsl/surveys/base.py +53 -53
- edsl/surveys/descriptors.py +56 -56
- edsl/surveys/instructions/ChangeInstruction.py +47 -47
- edsl/surveys/instructions/Instruction.py +51 -51
- edsl/surveys/instructions/InstructionCollection.py +77 -77
- edsl/templates/error_reporting/base.html +23 -23
- edsl/templates/error_reporting/exceptions_by_model.html +34 -34
- edsl/templates/error_reporting/exceptions_by_question_name.html +16 -16
- edsl/templates/error_reporting/exceptions_by_type.html +16 -16
- edsl/templates/error_reporting/interview_details.html +115 -115
- edsl/templates/error_reporting/interviews.html +9 -9
- edsl/templates/error_reporting/overview.html +4 -4
- edsl/templates/error_reporting/performance_plot.html +1 -1
- edsl/templates/error_reporting/report.css +73 -73
- edsl/templates/error_reporting/report.html +117 -117
- edsl/templates/error_reporting/report.js +25 -25
- edsl/tools/__init__.py +1 -1
- edsl/tools/clusters.py +192 -192
- edsl/tools/embeddings.py +27 -27
- edsl/tools/embeddings_plotting.py +118 -118
- edsl/tools/plotting.py +112 -112
- edsl/tools/summarize.py +18 -18
- edsl/utilities/SystemInfo.py +28 -28
- edsl/utilities/__init__.py +22 -22
- edsl/utilities/ast_utilities.py +25 -25
- edsl/utilities/data/Registry.py +6 -6
- edsl/utilities/data/__init__.py +1 -1
- edsl/utilities/data/scooter_results.json +1 -1
- edsl/utilities/decorators.py +77 -77
- edsl/utilities/gcp_bucket/cloud_storage.py +96 -96
- edsl/utilities/interface.py +627 -627
- edsl/utilities/repair_functions.py +28 -28
- edsl/utilities/restricted_python.py +70 -70
- edsl/utilities/utilities.py +391 -391
- {edsl-0.1.36.dev6.dist-info → edsl-0.1.37.dev2.dist-info}/LICENSE +21 -21
- {edsl-0.1.36.dev6.dist-info → edsl-0.1.37.dev2.dist-info}/METADATA +1 -1
- edsl-0.1.37.dev2.dist-info/RECORD +279 -0
- edsl-0.1.36.dev6.dist-info/RECORD +0 -279
- {edsl-0.1.36.dev6.dist-info → edsl-0.1.37.dev2.dist-info}/WHEEL +0 -0
edsl/language_models/repair.py
CHANGED
@@ -1,156 +1,156 @@
|
|
1
|
-
import json
|
2
|
-
import asyncio
|
3
|
-
import warnings
|
4
|
-
|
5
|
-
|
6
|
-
async def async_repair(
|
7
|
-
bad_json, error_message="", user_prompt=None, system_prompt=None, cache=None
|
8
|
-
):
|
9
|
-
from edsl.utilities.utilities import clean_json
|
10
|
-
|
11
|
-
s = clean_json(bad_json)
|
12
|
-
|
13
|
-
try:
|
14
|
-
# this is the OpenAI version, but that's fine
|
15
|
-
valid_dict = json.loads(s)
|
16
|
-
success = True
|
17
|
-
except json.JSONDecodeError:
|
18
|
-
valid_dict = {}
|
19
|
-
success = False
|
20
|
-
# print("Replacing control characters didn't work. Trying extracting the sub-string.")
|
21
|
-
else:
|
22
|
-
return valid_dict, success
|
23
|
-
|
24
|
-
try:
|
25
|
-
from edsl.utilities.repair_functions import extract_json_from_string
|
26
|
-
|
27
|
-
valid_dict = extract_json_from_string(s)
|
28
|
-
success = True
|
29
|
-
except ValueError:
|
30
|
-
valid_dict = {}
|
31
|
-
success = False
|
32
|
-
else:
|
33
|
-
return valid_dict, success
|
34
|
-
|
35
|
-
from edsl import Model
|
36
|
-
|
37
|
-
m = Model()
|
38
|
-
|
39
|
-
from edsl import QuestionExtract
|
40
|
-
|
41
|
-
with warnings.catch_warnings():
|
42
|
-
warnings.simplefilter("ignore", UserWarning)
|
43
|
-
|
44
|
-
q = QuestionExtract(
|
45
|
-
question_text="""
|
46
|
-
A language model was supposed to respond to a question.
|
47
|
-
The response should have been JSON object with an answer to a question and some commentary.
|
48
|
-
|
49
|
-
It should have retured a string like this:
|
50
|
-
|
51
|
-
'{'answer': 'The answer to the question.', 'comment': 'Some commentary.'}'
|
52
|
-
|
53
|
-
or:
|
54
|
-
|
55
|
-
'{'answer': 'The answer to the question.'}'
|
56
|
-
|
57
|
-
The answer field is very like an integer number. The comment field is always string.
|
58
|
-
|
59
|
-
You job is to return just the repaired JSON object that the model should have returned, properly formatted.
|
60
|
-
|
61
|
-
- It might have included some preliminary comments.
|
62
|
-
- It might have included some control characters.
|
63
|
-
- It might have included some extraneous text.
|
64
|
-
|
65
|
-
DO NOT include any extraneous text in your response. Just return the repaired JSON object.
|
66
|
-
Do not preface the JSON object with any text. Just return the JSON object.
|
67
|
-
|
68
|
-
Bad answer: """
|
69
|
-
+ str(bad_json)
|
70
|
-
+ "The model received a user prompt of: '"
|
71
|
-
+ str(user_prompt)
|
72
|
-
+ """'
|
73
|
-
The model received a system prompt of: ' """
|
74
|
-
+ str(system_prompt)
|
75
|
-
+ """
|
76
|
-
'
|
77
|
-
Please return the repaired JSON object, following the instructions the original model should have followed, though
|
78
|
-
using 'new_answer' a nd 'new_comment' as the keys.""",
|
79
|
-
answer_template={
|
80
|
-
"new_answer": "<number, string, list, etc.>",
|
81
|
-
"new_comment": "Model's comments",
|
82
|
-
},
|
83
|
-
question_name="model_repair",
|
84
|
-
)
|
85
|
-
|
86
|
-
results = await q.run_async(cache=cache)
|
87
|
-
|
88
|
-
try:
|
89
|
-
# this is the OpenAI version, but that's fine
|
90
|
-
valid_dict = json.loads(json.dumps(results))
|
91
|
-
success = True
|
92
|
-
# this is to deal with the fact that the model returns the answer and comment as new_answer and new_comment
|
93
|
-
valid_dict["answer"] = valid_dict.pop("new_answer")
|
94
|
-
valid_dict["comment"] = valid_dict.pop("new_comment")
|
95
|
-
except json.JSONDecodeError:
|
96
|
-
valid_dict = {}
|
97
|
-
success = False
|
98
|
-
from rich import print
|
99
|
-
from rich.console import Console
|
100
|
-
from rich.syntax import Syntax
|
101
|
-
|
102
|
-
console = Console()
|
103
|
-
error_message = (
|
104
|
-
f"All repairs. failed. LLM Model given [red]{str(bad_json)}[/red]"
|
105
|
-
)
|
106
|
-
console.print(" " + error_message)
|
107
|
-
model_returned = results["choices"][0]["message"]["content"]
|
108
|
-
console.print(f"LLM Model returned: [blue]{model_returned}[/blue]")
|
109
|
-
|
110
|
-
return valid_dict, success
|
111
|
-
|
112
|
-
|
113
|
-
def repair_wrapper(
|
114
|
-
bad_json, error_message="", user_prompt=None, system_prompt=None, cache=None
|
115
|
-
):
|
116
|
-
try:
|
117
|
-
loop = asyncio.get_event_loop()
|
118
|
-
if loop.is_running():
|
119
|
-
# Add repair as a task to the running loop
|
120
|
-
task = loop.create_task(
|
121
|
-
async_repair(bad_json, error_message, user_prompt, system_prompt, cache)
|
122
|
-
)
|
123
|
-
return task
|
124
|
-
else:
|
125
|
-
# Run a new event loop for repair
|
126
|
-
return loop.run_until_complete(
|
127
|
-
async_repair(bad_json, error_message, user_prompt, system_prompt, cache)
|
128
|
-
)
|
129
|
-
except RuntimeError:
|
130
|
-
# Create a new event loop if one is not already available
|
131
|
-
loop = asyncio.new_event_loop()
|
132
|
-
asyncio.set_event_loop(loop)
|
133
|
-
return loop.run_until_complete(
|
134
|
-
async_repair(bad_json, error_message, user_prompt, system_prompt, cache)
|
135
|
-
)
|
136
|
-
|
137
|
-
|
138
|
-
def repair(
|
139
|
-
bad_json, error_message="", user_prompt=None, system_prompt=None, cache=None
|
140
|
-
):
|
141
|
-
return repair_wrapper(bad_json, error_message, user_prompt, system_prompt, cache)
|
142
|
-
|
143
|
-
|
144
|
-
if __name__ == "__main__":
|
145
|
-
bad_json = """
|
146
|
-
{
|
147
|
-
'answer': "The problematic phrase in the excerpt is \'typically\'. This word is vague and can lead to different interpretations. An alternative phrasing that would be less problematic is:
|
148
|
-
'On average, how long do you cook scrambled eggs?}
|
149
|
-
"""
|
150
|
-
try:
|
151
|
-
json.loads(bad_json)
|
152
|
-
print("Loaded")
|
153
|
-
except json.JSONDecodeError as e:
|
154
|
-
error_message = str(e)
|
155
|
-
repaired, success = repair(bad_json, error_message)
|
156
|
-
print(f"Repaired: {repaired}")
|
1
|
+
import json
|
2
|
+
import asyncio
|
3
|
+
import warnings
|
4
|
+
|
5
|
+
|
6
|
+
async def async_repair(
|
7
|
+
bad_json, error_message="", user_prompt=None, system_prompt=None, cache=None
|
8
|
+
):
|
9
|
+
from edsl.utilities.utilities import clean_json
|
10
|
+
|
11
|
+
s = clean_json(bad_json)
|
12
|
+
|
13
|
+
try:
|
14
|
+
# this is the OpenAI version, but that's fine
|
15
|
+
valid_dict = json.loads(s)
|
16
|
+
success = True
|
17
|
+
except json.JSONDecodeError:
|
18
|
+
valid_dict = {}
|
19
|
+
success = False
|
20
|
+
# print("Replacing control characters didn't work. Trying extracting the sub-string.")
|
21
|
+
else:
|
22
|
+
return valid_dict, success
|
23
|
+
|
24
|
+
try:
|
25
|
+
from edsl.utilities.repair_functions import extract_json_from_string
|
26
|
+
|
27
|
+
valid_dict = extract_json_from_string(s)
|
28
|
+
success = True
|
29
|
+
except ValueError:
|
30
|
+
valid_dict = {}
|
31
|
+
success = False
|
32
|
+
else:
|
33
|
+
return valid_dict, success
|
34
|
+
|
35
|
+
from edsl import Model
|
36
|
+
|
37
|
+
m = Model()
|
38
|
+
|
39
|
+
from edsl import QuestionExtract
|
40
|
+
|
41
|
+
with warnings.catch_warnings():
|
42
|
+
warnings.simplefilter("ignore", UserWarning)
|
43
|
+
|
44
|
+
q = QuestionExtract(
|
45
|
+
question_text="""
|
46
|
+
A language model was supposed to respond to a question.
|
47
|
+
The response should have been JSON object with an answer to a question and some commentary.
|
48
|
+
|
49
|
+
It should have retured a string like this:
|
50
|
+
|
51
|
+
'{'answer': 'The answer to the question.', 'comment': 'Some commentary.'}'
|
52
|
+
|
53
|
+
or:
|
54
|
+
|
55
|
+
'{'answer': 'The answer to the question.'}'
|
56
|
+
|
57
|
+
The answer field is very like an integer number. The comment field is always string.
|
58
|
+
|
59
|
+
You job is to return just the repaired JSON object that the model should have returned, properly formatted.
|
60
|
+
|
61
|
+
- It might have included some preliminary comments.
|
62
|
+
- It might have included some control characters.
|
63
|
+
- It might have included some extraneous text.
|
64
|
+
|
65
|
+
DO NOT include any extraneous text in your response. Just return the repaired JSON object.
|
66
|
+
Do not preface the JSON object with any text. Just return the JSON object.
|
67
|
+
|
68
|
+
Bad answer: """
|
69
|
+
+ str(bad_json)
|
70
|
+
+ "The model received a user prompt of: '"
|
71
|
+
+ str(user_prompt)
|
72
|
+
+ """'
|
73
|
+
The model received a system prompt of: ' """
|
74
|
+
+ str(system_prompt)
|
75
|
+
+ """
|
76
|
+
'
|
77
|
+
Please return the repaired JSON object, following the instructions the original model should have followed, though
|
78
|
+
using 'new_answer' a nd 'new_comment' as the keys.""",
|
79
|
+
answer_template={
|
80
|
+
"new_answer": "<number, string, list, etc.>",
|
81
|
+
"new_comment": "Model's comments",
|
82
|
+
},
|
83
|
+
question_name="model_repair",
|
84
|
+
)
|
85
|
+
|
86
|
+
results = await q.run_async(cache=cache)
|
87
|
+
|
88
|
+
try:
|
89
|
+
# this is the OpenAI version, but that's fine
|
90
|
+
valid_dict = json.loads(json.dumps(results))
|
91
|
+
success = True
|
92
|
+
# this is to deal with the fact that the model returns the answer and comment as new_answer and new_comment
|
93
|
+
valid_dict["answer"] = valid_dict.pop("new_answer")
|
94
|
+
valid_dict["comment"] = valid_dict.pop("new_comment")
|
95
|
+
except json.JSONDecodeError:
|
96
|
+
valid_dict = {}
|
97
|
+
success = False
|
98
|
+
from rich import print
|
99
|
+
from rich.console import Console
|
100
|
+
from rich.syntax import Syntax
|
101
|
+
|
102
|
+
console = Console()
|
103
|
+
error_message = (
|
104
|
+
f"All repairs. failed. LLM Model given [red]{str(bad_json)}[/red]"
|
105
|
+
)
|
106
|
+
console.print(" " + error_message)
|
107
|
+
model_returned = results["choices"][0]["message"]["content"]
|
108
|
+
console.print(f"LLM Model returned: [blue]{model_returned}[/blue]")
|
109
|
+
|
110
|
+
return valid_dict, success
|
111
|
+
|
112
|
+
|
113
|
+
def repair_wrapper(
|
114
|
+
bad_json, error_message="", user_prompt=None, system_prompt=None, cache=None
|
115
|
+
):
|
116
|
+
try:
|
117
|
+
loop = asyncio.get_event_loop()
|
118
|
+
if loop.is_running():
|
119
|
+
# Add repair as a task to the running loop
|
120
|
+
task = loop.create_task(
|
121
|
+
async_repair(bad_json, error_message, user_prompt, system_prompt, cache)
|
122
|
+
)
|
123
|
+
return task
|
124
|
+
else:
|
125
|
+
# Run a new event loop for repair
|
126
|
+
return loop.run_until_complete(
|
127
|
+
async_repair(bad_json, error_message, user_prompt, system_prompt, cache)
|
128
|
+
)
|
129
|
+
except RuntimeError:
|
130
|
+
# Create a new event loop if one is not already available
|
131
|
+
loop = asyncio.new_event_loop()
|
132
|
+
asyncio.set_event_loop(loop)
|
133
|
+
return loop.run_until_complete(
|
134
|
+
async_repair(bad_json, error_message, user_prompt, system_prompt, cache)
|
135
|
+
)
|
136
|
+
|
137
|
+
|
138
|
+
def repair(
|
139
|
+
bad_json, error_message="", user_prompt=None, system_prompt=None, cache=None
|
140
|
+
):
|
141
|
+
return repair_wrapper(bad_json, error_message, user_prompt, system_prompt, cache)
|
142
|
+
|
143
|
+
|
144
|
+
if __name__ == "__main__":
|
145
|
+
bad_json = """
|
146
|
+
{
|
147
|
+
'answer': "The problematic phrase in the excerpt is \'typically\'. This word is vague and can lead to different interpretations. An alternative phrasing that would be less problematic is:
|
148
|
+
'On average, how long do you cook scrambled eggs?}
|
149
|
+
"""
|
150
|
+
try:
|
151
|
+
json.loads(bad_json)
|
152
|
+
print("Loaded")
|
153
|
+
except json.JSONDecodeError as e:
|
154
|
+
error_message = str(e)
|
155
|
+
repaired, success = repair(bad_json, error_message)
|
156
|
+
print(f"Repaired: {repaired}")
|
@@ -1,83 +1,83 @@
|
|
1
|
-
import asyncio
|
2
|
-
import aiohttp
|
3
|
-
import json
|
4
|
-
from typing import Any
|
5
|
-
|
6
|
-
from edsl import CONFIG
|
7
|
-
|
8
|
-
from edsl.language_models.LanguageModel import LanguageModel
|
9
|
-
|
10
|
-
|
11
|
-
def replicate_model_factory(model_name, base_url, api_token):
|
12
|
-
class ReplicateLanguageModelBase(LanguageModel):
|
13
|
-
_model_ = (
|
14
|
-
model_name # Example model name, replace with actual model name if needed
|
15
|
-
)
|
16
|
-
_parameters_ = {
|
17
|
-
"temperature": 0.1,
|
18
|
-
"topK": 50,
|
19
|
-
"topP": 0.9,
|
20
|
-
"max_new_tokens": 500,
|
21
|
-
"min_new_tokens": -1,
|
22
|
-
"repetition_penalty": 1.15,
|
23
|
-
# "version": "5fe0a3d7ac2852264a25279d1dfb798acbc4d49711d126646594e212cb821749",
|
24
|
-
"use_cache": True,
|
25
|
-
}
|
26
|
-
_api_token = api_token
|
27
|
-
_base_url = base_url
|
28
|
-
|
29
|
-
async def async_execute_model_call(
|
30
|
-
self, user_prompt: str, system_prompt: str = ""
|
31
|
-
) -> dict[str, Any]:
|
32
|
-
self.api_token = self._api_token
|
33
|
-
self.headers = {
|
34
|
-
"Authorization": f"Token {self.api_token}",
|
35
|
-
"Content-Type": "application/json",
|
36
|
-
}
|
37
|
-
# combined_prompt = f"{system_prompt} {user_prompt}".strip()
|
38
|
-
# print(f"Prompt: {combined_prompt}")
|
39
|
-
data = {
|
40
|
-
# "version": self._parameters_["version"],
|
41
|
-
"input": {
|
42
|
-
"debug": False,
|
43
|
-
"top_k": self._parameters_["topK"],
|
44
|
-
"top_p": self._parameters_["topP"],
|
45
|
-
"prompt": user_prompt,
|
46
|
-
"system_prompt": system_prompt,
|
47
|
-
"temperature": self._parameters_["temperature"],
|
48
|
-
"max_new_tokens": self._parameters_["max_new_tokens"],
|
49
|
-
"min_new_tokens": self._parameters_["min_new_tokens"],
|
50
|
-
"prompt_template": "{prompt}",
|
51
|
-
"repetition_penalty": self._parameters_["repetition_penalty"],
|
52
|
-
},
|
53
|
-
}
|
54
|
-
|
55
|
-
async with aiohttp.ClientSession() as session:
|
56
|
-
async with session.post(
|
57
|
-
self._base_url, headers=self.headers, data=json.dumps(data)
|
58
|
-
) as response:
|
59
|
-
raw_response_text = await response.text()
|
60
|
-
data = json.loads(raw_response_text)
|
61
|
-
print(f"This was the data returned by the model:{data}")
|
62
|
-
prediction_url = data["urls"]["get"]
|
63
|
-
|
64
|
-
while True:
|
65
|
-
async with session.get(
|
66
|
-
prediction_url, headers=self.headers
|
67
|
-
) as get_response:
|
68
|
-
if get_response.status != 200:
|
69
|
-
# Handle non-success status codes appropriately
|
70
|
-
return None
|
71
|
-
|
72
|
-
get_data = await get_response.text()
|
73
|
-
get_data = json.loads(get_data)
|
74
|
-
if get_data["status"] == "succeeded":
|
75
|
-
return get_data
|
76
|
-
await asyncio.sleep(1)
|
77
|
-
|
78
|
-
def parse_response(self, raw_response: dict[str, Any]) -> str:
|
79
|
-
data = "".join(raw_response["output"])
|
80
|
-
print(f"This is what the model returned: {data}")
|
81
|
-
return data
|
82
|
-
|
83
|
-
return ReplicateLanguageModelBase
|
1
|
+
import asyncio
|
2
|
+
import aiohttp
|
3
|
+
import json
|
4
|
+
from typing import Any
|
5
|
+
|
6
|
+
from edsl import CONFIG
|
7
|
+
|
8
|
+
from edsl.language_models.LanguageModel import LanguageModel
|
9
|
+
|
10
|
+
|
11
|
+
def replicate_model_factory(model_name, base_url, api_token):
|
12
|
+
class ReplicateLanguageModelBase(LanguageModel):
|
13
|
+
_model_ = (
|
14
|
+
model_name # Example model name, replace with actual model name if needed
|
15
|
+
)
|
16
|
+
_parameters_ = {
|
17
|
+
"temperature": 0.1,
|
18
|
+
"topK": 50,
|
19
|
+
"topP": 0.9,
|
20
|
+
"max_new_tokens": 500,
|
21
|
+
"min_new_tokens": -1,
|
22
|
+
"repetition_penalty": 1.15,
|
23
|
+
# "version": "5fe0a3d7ac2852264a25279d1dfb798acbc4d49711d126646594e212cb821749",
|
24
|
+
"use_cache": True,
|
25
|
+
}
|
26
|
+
_api_token = api_token
|
27
|
+
_base_url = base_url
|
28
|
+
|
29
|
+
async def async_execute_model_call(
|
30
|
+
self, user_prompt: str, system_prompt: str = ""
|
31
|
+
) -> dict[str, Any]:
|
32
|
+
self.api_token = self._api_token
|
33
|
+
self.headers = {
|
34
|
+
"Authorization": f"Token {self.api_token}",
|
35
|
+
"Content-Type": "application/json",
|
36
|
+
}
|
37
|
+
# combined_prompt = f"{system_prompt} {user_prompt}".strip()
|
38
|
+
# print(f"Prompt: {combined_prompt}")
|
39
|
+
data = {
|
40
|
+
# "version": self._parameters_["version"],
|
41
|
+
"input": {
|
42
|
+
"debug": False,
|
43
|
+
"top_k": self._parameters_["topK"],
|
44
|
+
"top_p": self._parameters_["topP"],
|
45
|
+
"prompt": user_prompt,
|
46
|
+
"system_prompt": system_prompt,
|
47
|
+
"temperature": self._parameters_["temperature"],
|
48
|
+
"max_new_tokens": self._parameters_["max_new_tokens"],
|
49
|
+
"min_new_tokens": self._parameters_["min_new_tokens"],
|
50
|
+
"prompt_template": "{prompt}",
|
51
|
+
"repetition_penalty": self._parameters_["repetition_penalty"],
|
52
|
+
},
|
53
|
+
}
|
54
|
+
|
55
|
+
async with aiohttp.ClientSession() as session:
|
56
|
+
async with session.post(
|
57
|
+
self._base_url, headers=self.headers, data=json.dumps(data)
|
58
|
+
) as response:
|
59
|
+
raw_response_text = await response.text()
|
60
|
+
data = json.loads(raw_response_text)
|
61
|
+
print(f"This was the data returned by the model:{data}")
|
62
|
+
prediction_url = data["urls"]["get"]
|
63
|
+
|
64
|
+
while True:
|
65
|
+
async with session.get(
|
66
|
+
prediction_url, headers=self.headers
|
67
|
+
) as get_response:
|
68
|
+
if get_response.status != 200:
|
69
|
+
# Handle non-success status codes appropriately
|
70
|
+
return None
|
71
|
+
|
72
|
+
get_data = await get_response.text()
|
73
|
+
get_data = json.loads(get_data)
|
74
|
+
if get_data["status"] == "succeeded":
|
75
|
+
return get_data
|
76
|
+
await asyncio.sleep(1)
|
77
|
+
|
78
|
+
def parse_response(self, raw_response: dict[str, Any]) -> str:
|
79
|
+
data = "".join(raw_response["output"])
|
80
|
+
print(f"This is what the model returned: {data}")
|
81
|
+
return data
|
82
|
+
|
83
|
+
return ReplicateLanguageModelBase
|
@@ -1,64 +1,64 @@
|
|
1
|
-
import asyncio
|
2
|
-
from typing import Any, Optional, List
|
3
|
-
from edsl import Survey
|
4
|
-
from edsl.config import CONFIG
|
5
|
-
from edsl.enums import InferenceServiceType
|
6
|
-
from edsl.language_models.LanguageModel import LanguageModel
|
7
|
-
from edsl.questions import QuestionFreeText
|
8
|
-
|
9
|
-
|
10
|
-
def create_survey(num_questions: int, chained: bool = True, take_scenario=False):
|
11
|
-
survey = Survey()
|
12
|
-
for i in range(num_questions):
|
13
|
-
if take_scenario:
|
14
|
-
q = QuestionFreeText(
|
15
|
-
question_text=f"XX{i}XX and {{scenario_value }}",
|
16
|
-
question_name=f"question_{i}",
|
17
|
-
)
|
18
|
-
else:
|
19
|
-
q = QuestionFreeText(
|
20
|
-
question_text=f"XX{i}XX", question_name=f"question_{i}"
|
21
|
-
)
|
22
|
-
survey.add_question(q)
|
23
|
-
if i > 0 and chained:
|
24
|
-
survey.add_targeted_memory(f"question_{i}", f"question_{i-1}")
|
25
|
-
return survey
|
26
|
-
|
27
|
-
|
28
|
-
def create_language_model(
|
29
|
-
exception: Exception, fail_at_number: int, never_ending=False
|
30
|
-
):
|
31
|
-
class LanguageModelFromUtilities(LanguageModel):
|
32
|
-
_model_ = "test"
|
33
|
-
_parameters_ = {"temperature": 0.5}
|
34
|
-
_inference_service_ = InferenceServiceType.TEST.value
|
35
|
-
key_sequence = ["message", 0, "text"]
|
36
|
-
usage_sequence = ["usage"]
|
37
|
-
input_token_name = "prompt_tokens"
|
38
|
-
output_token_name = "completion_tokens"
|
39
|
-
_rpm = 1000000000000
|
40
|
-
_tpm = 1000000000000
|
41
|
-
|
42
|
-
async def async_execute_model_call(
|
43
|
-
self,
|
44
|
-
user_prompt: str,
|
45
|
-
system_prompt: str,
|
46
|
-
files_list: Optional[List[Any]] = None,
|
47
|
-
) -> dict[str, Any]:
|
48
|
-
question_number = int(
|
49
|
-
user_prompt.split("XX")[1]
|
50
|
-
) ## grabs the question number from the prompt
|
51
|
-
await asyncio.sleep(0.1)
|
52
|
-
if never_ending: ## you're not going anywhere buddy
|
53
|
-
await asyncio.sleep(float("inf"))
|
54
|
-
if question_number == fail_at_number:
|
55
|
-
if asyncio.iscoroutinefunction(exception):
|
56
|
-
await exception()
|
57
|
-
else:
|
58
|
-
raise exception
|
59
|
-
return {
|
60
|
-
"message": [{"text": "SPAM!"}],
|
61
|
-
"usage": {"prompt_tokens": 1, "completion_tokens": 1},
|
62
|
-
}
|
63
|
-
|
64
|
-
return LanguageModelFromUtilities
|
1
|
+
import asyncio
|
2
|
+
from typing import Any, Optional, List
|
3
|
+
from edsl import Survey
|
4
|
+
from edsl.config import CONFIG
|
5
|
+
from edsl.enums import InferenceServiceType
|
6
|
+
from edsl.language_models.LanguageModel import LanguageModel
|
7
|
+
from edsl.questions import QuestionFreeText
|
8
|
+
|
9
|
+
|
10
|
+
def create_survey(num_questions: int, chained: bool = True, take_scenario=False):
|
11
|
+
survey = Survey()
|
12
|
+
for i in range(num_questions):
|
13
|
+
if take_scenario:
|
14
|
+
q = QuestionFreeText(
|
15
|
+
question_text=f"XX{i}XX and {{scenario_value }}",
|
16
|
+
question_name=f"question_{i}",
|
17
|
+
)
|
18
|
+
else:
|
19
|
+
q = QuestionFreeText(
|
20
|
+
question_text=f"XX{i}XX", question_name=f"question_{i}"
|
21
|
+
)
|
22
|
+
survey.add_question(q)
|
23
|
+
if i > 0 and chained:
|
24
|
+
survey.add_targeted_memory(f"question_{i}", f"question_{i-1}")
|
25
|
+
return survey
|
26
|
+
|
27
|
+
|
28
|
+
def create_language_model(
|
29
|
+
exception: Exception, fail_at_number: int, never_ending=False
|
30
|
+
):
|
31
|
+
class LanguageModelFromUtilities(LanguageModel):
|
32
|
+
_model_ = "test"
|
33
|
+
_parameters_ = {"temperature": 0.5}
|
34
|
+
_inference_service_ = InferenceServiceType.TEST.value
|
35
|
+
key_sequence = ["message", 0, "text"]
|
36
|
+
usage_sequence = ["usage"]
|
37
|
+
input_token_name = "prompt_tokens"
|
38
|
+
output_token_name = "completion_tokens"
|
39
|
+
_rpm = 1000000000000
|
40
|
+
_tpm = 1000000000000
|
41
|
+
|
42
|
+
async def async_execute_model_call(
|
43
|
+
self,
|
44
|
+
user_prompt: str,
|
45
|
+
system_prompt: str,
|
46
|
+
files_list: Optional[List[Any]] = None,
|
47
|
+
) -> dict[str, Any]:
|
48
|
+
question_number = int(
|
49
|
+
user_prompt.split("XX")[1]
|
50
|
+
) ## grabs the question number from the prompt
|
51
|
+
await asyncio.sleep(0.1)
|
52
|
+
if never_ending: ## you're not going anywhere buddy
|
53
|
+
await asyncio.sleep(float("inf"))
|
54
|
+
if question_number == fail_at_number:
|
55
|
+
if asyncio.iscoroutinefunction(exception):
|
56
|
+
await exception()
|
57
|
+
else:
|
58
|
+
raise exception
|
59
|
+
return {
|
60
|
+
"message": [{"text": "SPAM!"}],
|
61
|
+
"usage": {"prompt_tokens": 1, "completion_tokens": 1},
|
62
|
+
}
|
63
|
+
|
64
|
+
return LanguageModelFromUtilities
|