edsl 0.1.32__py3-none-any.whl → 0.1.33__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- edsl/Base.py +9 -3
- edsl/TemplateLoader.py +24 -0
- edsl/__init__.py +8 -3
- edsl/__version__.py +1 -1
- edsl/agents/Agent.py +40 -8
- edsl/agents/AgentList.py +43 -0
- edsl/agents/Invigilator.py +135 -219
- edsl/agents/InvigilatorBase.py +148 -59
- edsl/agents/{PromptConstructionMixin.py → PromptConstructor.py} +138 -89
- edsl/agents/__init__.py +1 -0
- edsl/auto/AutoStudy.py +117 -0
- edsl/auto/StageBase.py +230 -0
- edsl/auto/StageGenerateSurvey.py +178 -0
- edsl/auto/StageLabelQuestions.py +125 -0
- edsl/auto/StagePersona.py +61 -0
- edsl/auto/StagePersonaDimensionValueRanges.py +88 -0
- edsl/auto/StagePersonaDimensionValues.py +74 -0
- edsl/auto/StagePersonaDimensions.py +69 -0
- edsl/auto/StageQuestions.py +73 -0
- edsl/auto/SurveyCreatorPipeline.py +21 -0
- edsl/auto/utilities.py +224 -0
- edsl/config.py +47 -56
- edsl/coop/PriceFetcher.py +58 -0
- edsl/coop/coop.py +50 -7
- edsl/data/Cache.py +35 -1
- edsl/data_transfer_models.py +73 -38
- edsl/enums.py +4 -0
- edsl/exceptions/language_models.py +25 -1
- edsl/exceptions/questions.py +62 -5
- edsl/exceptions/results.py +4 -0
- edsl/inference_services/AnthropicService.py +13 -11
- edsl/inference_services/AwsBedrock.py +19 -17
- edsl/inference_services/AzureAI.py +37 -20
- edsl/inference_services/GoogleService.py +16 -12
- edsl/inference_services/GroqService.py +2 -0
- edsl/inference_services/InferenceServiceABC.py +58 -3
- edsl/inference_services/MistralAIService.py +120 -0
- edsl/inference_services/OpenAIService.py +48 -54
- edsl/inference_services/TestService.py +80 -0
- edsl/inference_services/TogetherAIService.py +170 -0
- edsl/inference_services/models_available_cache.py +0 -6
- edsl/inference_services/registry.py +6 -0
- edsl/jobs/Answers.py +10 -12
- edsl/jobs/FailedQuestion.py +78 -0
- edsl/jobs/Jobs.py +37 -22
- edsl/jobs/buckets/BucketCollection.py +24 -15
- edsl/jobs/buckets/TokenBucket.py +93 -14
- edsl/jobs/interviews/Interview.py +366 -78
- edsl/jobs/interviews/{interview_exception_tracking.py → InterviewExceptionCollection.py} +14 -68
- edsl/jobs/interviews/InterviewExceptionEntry.py +85 -19
- edsl/jobs/runners/JobsRunnerAsyncio.py +146 -175
- edsl/jobs/runners/JobsRunnerStatus.py +331 -0
- edsl/jobs/tasks/QuestionTaskCreator.py +30 -23
- edsl/jobs/tasks/TaskHistory.py +148 -213
- edsl/language_models/LanguageModel.py +261 -156
- edsl/language_models/ModelList.py +2 -2
- edsl/language_models/RegisterLanguageModelsMeta.py +14 -29
- edsl/language_models/fake_openai_call.py +15 -0
- edsl/language_models/fake_openai_service.py +61 -0
- edsl/language_models/registry.py +23 -6
- edsl/language_models/repair.py +0 -19
- edsl/language_models/utilities.py +61 -0
- edsl/notebooks/Notebook.py +20 -2
- edsl/prompts/Prompt.py +52 -2
- edsl/questions/AnswerValidatorMixin.py +23 -26
- edsl/questions/QuestionBase.py +330 -249
- edsl/questions/QuestionBaseGenMixin.py +133 -0
- edsl/questions/QuestionBasePromptsMixin.py +266 -0
- edsl/questions/QuestionBudget.py +99 -41
- edsl/questions/QuestionCheckBox.py +227 -35
- edsl/questions/QuestionExtract.py +98 -27
- edsl/questions/QuestionFreeText.py +52 -29
- edsl/questions/QuestionFunctional.py +7 -0
- edsl/questions/QuestionList.py +141 -22
- edsl/questions/QuestionMultipleChoice.py +159 -65
- edsl/questions/QuestionNumerical.py +88 -46
- edsl/questions/QuestionRank.py +182 -24
- edsl/questions/Quick.py +41 -0
- edsl/questions/RegisterQuestionsMeta.py +31 -12
- edsl/questions/ResponseValidatorABC.py +170 -0
- edsl/questions/__init__.py +3 -4
- edsl/questions/decorators.py +21 -0
- edsl/questions/derived/QuestionLikertFive.py +10 -5
- edsl/questions/derived/QuestionLinearScale.py +15 -2
- edsl/questions/derived/QuestionTopK.py +10 -1
- edsl/questions/derived/QuestionYesNo.py +24 -3
- edsl/questions/descriptors.py +43 -7
- edsl/questions/prompt_templates/question_budget.jinja +13 -0
- edsl/questions/prompt_templates/question_checkbox.jinja +32 -0
- edsl/questions/prompt_templates/question_extract.jinja +11 -0
- edsl/questions/prompt_templates/question_free_text.jinja +3 -0
- edsl/questions/prompt_templates/question_linear_scale.jinja +11 -0
- edsl/questions/prompt_templates/question_list.jinja +17 -0
- edsl/questions/prompt_templates/question_multiple_choice.jinja +33 -0
- edsl/questions/prompt_templates/question_numerical.jinja +37 -0
- edsl/questions/question_registry.py +6 -2
- edsl/questions/templates/__init__.py +0 -0
- edsl/questions/templates/budget/__init__.py +0 -0
- edsl/questions/templates/budget/answering_instructions.jinja +7 -0
- edsl/questions/templates/budget/question_presentation.jinja +7 -0
- edsl/questions/templates/checkbox/__init__.py +0 -0
- edsl/questions/templates/checkbox/answering_instructions.jinja +10 -0
- edsl/questions/templates/checkbox/question_presentation.jinja +22 -0
- edsl/questions/templates/extract/__init__.py +0 -0
- edsl/questions/templates/extract/answering_instructions.jinja +7 -0
- edsl/questions/templates/extract/question_presentation.jinja +1 -0
- edsl/questions/templates/free_text/__init__.py +0 -0
- edsl/questions/templates/free_text/answering_instructions.jinja +0 -0
- edsl/questions/templates/free_text/question_presentation.jinja +1 -0
- edsl/questions/templates/likert_five/__init__.py +0 -0
- edsl/questions/templates/likert_five/answering_instructions.jinja +10 -0
- edsl/questions/templates/likert_five/question_presentation.jinja +12 -0
- edsl/questions/templates/linear_scale/__init__.py +0 -0
- edsl/questions/templates/linear_scale/answering_instructions.jinja +5 -0
- edsl/questions/templates/linear_scale/question_presentation.jinja +5 -0
- edsl/questions/templates/list/__init__.py +0 -0
- edsl/questions/templates/list/answering_instructions.jinja +4 -0
- edsl/questions/templates/list/question_presentation.jinja +5 -0
- edsl/questions/templates/multiple_choice/__init__.py +0 -0
- edsl/questions/templates/multiple_choice/answering_instructions.jinja +9 -0
- edsl/questions/templates/multiple_choice/html.jinja +0 -0
- edsl/questions/templates/multiple_choice/question_presentation.jinja +12 -0
- edsl/questions/templates/numerical/__init__.py +0 -0
- edsl/questions/templates/numerical/answering_instructions.jinja +8 -0
- edsl/questions/templates/numerical/question_presentation.jinja +7 -0
- edsl/questions/templates/rank/__init__.py +0 -0
- edsl/questions/templates/rank/answering_instructions.jinja +11 -0
- edsl/questions/templates/rank/question_presentation.jinja +15 -0
- edsl/questions/templates/top_k/__init__.py +0 -0
- edsl/questions/templates/top_k/answering_instructions.jinja +8 -0
- edsl/questions/templates/top_k/question_presentation.jinja +22 -0
- edsl/questions/templates/yes_no/__init__.py +0 -0
- edsl/questions/templates/yes_no/answering_instructions.jinja +6 -0
- edsl/questions/templates/yes_no/question_presentation.jinja +12 -0
- edsl/results/Dataset.py +20 -0
- edsl/results/DatasetExportMixin.py +46 -48
- edsl/results/DatasetTree.py +145 -0
- edsl/results/Result.py +32 -5
- edsl/results/Results.py +135 -46
- edsl/results/ResultsDBMixin.py +3 -3
- edsl/results/Selector.py +118 -0
- edsl/results/tree_explore.py +115 -0
- edsl/scenarios/FileStore.py +71 -10
- edsl/scenarios/Scenario.py +96 -25
- edsl/scenarios/ScenarioImageMixin.py +2 -2
- edsl/scenarios/ScenarioList.py +361 -39
- edsl/scenarios/ScenarioListExportMixin.py +9 -0
- edsl/scenarios/ScenarioListPdfMixin.py +150 -4
- edsl/study/SnapShot.py +8 -1
- edsl/study/Study.py +32 -0
- edsl/surveys/Rule.py +10 -1
- edsl/surveys/RuleCollection.py +21 -5
- edsl/surveys/Survey.py +637 -311
- edsl/surveys/SurveyExportMixin.py +71 -9
- edsl/surveys/SurveyFlowVisualizationMixin.py +2 -1
- edsl/surveys/SurveyQualtricsImport.py +75 -4
- edsl/surveys/instructions/ChangeInstruction.py +47 -0
- edsl/surveys/instructions/Instruction.py +34 -0
- edsl/surveys/instructions/InstructionCollection.py +77 -0
- edsl/surveys/instructions/__init__.py +0 -0
- edsl/templates/error_reporting/base.html +24 -0
- edsl/templates/error_reporting/exceptions_by_model.html +35 -0
- edsl/templates/error_reporting/exceptions_by_question_name.html +17 -0
- edsl/templates/error_reporting/exceptions_by_type.html +17 -0
- edsl/templates/error_reporting/interview_details.html +116 -0
- edsl/templates/error_reporting/interviews.html +10 -0
- edsl/templates/error_reporting/overview.html +5 -0
- edsl/templates/error_reporting/performance_plot.html +2 -0
- edsl/templates/error_reporting/report.css +74 -0
- edsl/templates/error_reporting/report.html +118 -0
- edsl/templates/error_reporting/report.js +25 -0
- edsl/utilities/utilities.py +9 -1
- {edsl-0.1.32.dist-info → edsl-0.1.33.dist-info}/METADATA +5 -2
- edsl-0.1.33.dist-info/RECORD +295 -0
- edsl/jobs/interviews/InterviewTaskBuildingMixin.py +0 -286
- edsl/jobs/interviews/retry_management.py +0 -37
- edsl/jobs/runners/JobsRunnerStatusMixin.py +0 -333
- edsl/utilities/gcp_bucket/simple_example.py +0 -9
- edsl-0.1.32.dist-info/RECORD +0 -209
- {edsl-0.1.32.dist-info → edsl-0.1.33.dist-info}/LICENSE +0 -0
- {edsl-0.1.32.dist-info → edsl-0.1.33.dist-info}/WHEEL +0 -0
@@ -0,0 +1,61 @@
|
|
1
|
+
import threading
|
2
|
+
import asyncio
|
3
|
+
from fastapi import FastAPI, Request
|
4
|
+
from fastapi.responses import JSONResponse
|
5
|
+
import uvicorn
|
6
|
+
import json
|
7
|
+
from typing import Any
|
8
|
+
|
9
|
+
app = FastAPI()
|
10
|
+
|
11
|
+
|
12
|
+
async def generate_response(question_number: int) -> dict:
|
13
|
+
# Simulate some asynchronous work
|
14
|
+
await asyncio.sleep(1)
|
15
|
+
return {
|
16
|
+
"id": "chatcmpl-123",
|
17
|
+
"object": "chat.completion",
|
18
|
+
"created": 1677652288,
|
19
|
+
"model": "gpt-3.5-turbo-0613",
|
20
|
+
"choices": [
|
21
|
+
{
|
22
|
+
"index": 0,
|
23
|
+
"message": {
|
24
|
+
"role": "assistant",
|
25
|
+
"content": json.dumps(
|
26
|
+
{"answer": f"SPAM for question {question_number}!"}
|
27
|
+
),
|
28
|
+
},
|
29
|
+
"finish_reason": "stop",
|
30
|
+
}
|
31
|
+
],
|
32
|
+
"usage": {"prompt_tokens": 9, "completion_tokens": 12, "total_tokens": 21},
|
33
|
+
}
|
34
|
+
|
35
|
+
|
36
|
+
@app.post("/v1/chat/completions")
|
37
|
+
async def chat_completions(request: Request):
|
38
|
+
body = await request.json()
|
39
|
+
user_prompt = body["messages"][-1]["content"]
|
40
|
+
question_number = int(user_prompt.split("XX")[1])
|
41
|
+
|
42
|
+
response = await generate_response(question_number)
|
43
|
+
return JSONResponse(content=response)
|
44
|
+
|
45
|
+
|
46
|
+
def run_server():
|
47
|
+
uvicorn.run(app, host="127.0.0.1", port=8000)
|
48
|
+
|
49
|
+
|
50
|
+
if __name__ == "__main__":
|
51
|
+
# Start the server in a separate thread
|
52
|
+
server_thread = threading.Thread(target=run_server)
|
53
|
+
server_thread.start()
|
54
|
+
|
55
|
+
# Your main code here
|
56
|
+
# ...
|
57
|
+
|
58
|
+
# To use this with the OpenAI SDK:
|
59
|
+
# from openai import AsyncOpenAI
|
60
|
+
# client = AsyncOpenAI(base_url="http://127.0.0.1:8000/v1", api_key="fake_key")
|
61
|
+
# response = await client.chat.completions.create(model="gpt-3.5-turbo", messages=[...])
|
edsl/language_models/registry.py
CHANGED
@@ -1,5 +1,11 @@
|
|
1
1
|
import textwrap
|
2
2
|
from random import random
|
3
|
+
from edsl.config import CONFIG
|
4
|
+
|
5
|
+
# if "EDSL_DEFAULT_MODEL" not in CONFIG:
|
6
|
+
# default_model = "test"
|
7
|
+
# else:
|
8
|
+
# default_model = CONFIG.get("EDSL_DEFAULT_MODEL")
|
3
9
|
|
4
10
|
|
5
11
|
def get_model_class(model_name, registry=None):
|
@@ -27,20 +33,24 @@ class Meta(type):
|
|
27
33
|
|
28
34
|
|
29
35
|
class Model(metaclass=Meta):
|
30
|
-
default_model = "
|
36
|
+
default_model = CONFIG.get("EDSL_DEFAULT_MODEL")
|
31
37
|
|
32
|
-
def __new__(
|
38
|
+
def __new__(
|
39
|
+
cls, model_name=None, registry=None, service_name=None, *args, **kwargs
|
40
|
+
):
|
33
41
|
# Map index to the respective subclass
|
34
42
|
if model_name is None:
|
35
|
-
model_name =
|
43
|
+
model_name = (
|
44
|
+
cls.default_model
|
45
|
+
) # when model_name is None, use the default model, set in the config file
|
36
46
|
from edsl.inference_services.registry import default
|
37
47
|
|
38
48
|
registry = registry or default
|
39
49
|
|
40
|
-
if isinstance(model_name, int):
|
50
|
+
if isinstance(model_name, int): # can refer to a model by index
|
41
51
|
model_name = cls.available(name_only=True)[model_name]
|
42
52
|
|
43
|
-
factory = registry.create_model_factory(model_name)
|
53
|
+
factory = registry.create_model_factory(model_name, service_name=service_name)
|
44
54
|
return factory(*args, **kwargs)
|
45
55
|
|
46
56
|
@classmethod
|
@@ -58,11 +68,18 @@ class Model(metaclass=Meta):
|
|
58
68
|
return [r._inference_service_ for r in registry.services]
|
59
69
|
|
60
70
|
@classmethod
|
61
|
-
def available(cls, search_term=None, name_only=False, registry=None):
|
71
|
+
def available(cls, search_term=None, name_only=False, registry=None, service=None):
|
62
72
|
from edsl.inference_services.registry import default
|
63
73
|
|
64
74
|
registry = registry or default
|
65
75
|
full_list = registry.available()
|
76
|
+
|
77
|
+
if service is not None:
|
78
|
+
if service not in cls.services(registry=registry):
|
79
|
+
raise ValueError(f"Service {service} not found in available services.")
|
80
|
+
|
81
|
+
full_list = [m for m in full_list if m[1] == service]
|
82
|
+
|
66
83
|
if search_term is None:
|
67
84
|
if name_only:
|
68
85
|
return [m[0] for m in full_list]
|
edsl/language_models/repair.py
CHANGED
@@ -141,25 +141,6 @@ def repair(
|
|
141
141
|
return repair_wrapper(bad_json, error_message, user_prompt, system_prompt, cache)
|
142
142
|
|
143
143
|
|
144
|
-
# Example usage:
|
145
|
-
# result, success = repair_wrapper('{"name": "John Doe", "age": 30,}') # example bad JSON
|
146
|
-
|
147
|
-
|
148
|
-
# def repair_wrapper(bad_json, error_message=""):
|
149
|
-
# loop = asyncio.get_event_loop()
|
150
|
-
# if loop.is_running():
|
151
|
-
# # Add repair as a task to the running loop
|
152
|
-
# task = loop.create_task(repair(bad_json, error_message))
|
153
|
-
# return task
|
154
|
-
# else:
|
155
|
-
# # Run a new event loop for repair
|
156
|
-
# return loop.run_until_complete(repair(bad_json, error_message))
|
157
|
-
|
158
|
-
|
159
|
-
# Example usage:
|
160
|
-
# result, success = repair_wrapper('{"name": "John Doe", "age": 30,}') # example bad JSON
|
161
|
-
|
162
|
-
|
163
144
|
if __name__ == "__main__":
|
164
145
|
bad_json = """
|
165
146
|
{
|
@@ -0,0 +1,61 @@
|
|
1
|
+
import asyncio
|
2
|
+
from typing import Any
|
3
|
+
from edsl import Survey
|
4
|
+
from edsl.config import CONFIG
|
5
|
+
from edsl.enums import InferenceServiceType
|
6
|
+
from edsl.language_models.LanguageModel import LanguageModel
|
7
|
+
from edsl.questions import QuestionFreeText
|
8
|
+
|
9
|
+
|
10
|
+
def create_survey(num_questions: int, chained: bool = True, take_scenario=False):
|
11
|
+
survey = Survey()
|
12
|
+
for i in range(num_questions):
|
13
|
+
if take_scenario:
|
14
|
+
q = QuestionFreeText(
|
15
|
+
question_text=f"XX{i}XX and {{scenario_value }}",
|
16
|
+
question_name=f"question_{i}",
|
17
|
+
)
|
18
|
+
else:
|
19
|
+
q = QuestionFreeText(
|
20
|
+
question_text=f"XX{i}XX", question_name=f"question_{i}"
|
21
|
+
)
|
22
|
+
survey.add_question(q)
|
23
|
+
if i > 0 and chained:
|
24
|
+
survey.add_targeted_memory(f"question_{i}", f"question_{i-1}")
|
25
|
+
return survey
|
26
|
+
|
27
|
+
|
28
|
+
def create_language_model(
|
29
|
+
exception: Exception, fail_at_number: int, never_ending=False
|
30
|
+
):
|
31
|
+
class LanguageModelFromUtilities(LanguageModel):
|
32
|
+
_model_ = "test"
|
33
|
+
_parameters_ = {"temperature": 0.5}
|
34
|
+
_inference_service_ = InferenceServiceType.TEST.value
|
35
|
+
key_sequence = ["message", 0, "text"]
|
36
|
+
usage_sequence = ["usage"]
|
37
|
+
input_token_name = "prompt_tokens"
|
38
|
+
output_token_name = "completion_tokens"
|
39
|
+
_rpm = 1000000000000
|
40
|
+
_tpm = 1000000000000
|
41
|
+
|
42
|
+
async def async_execute_model_call(
|
43
|
+
self, user_prompt: str, system_prompt: str
|
44
|
+
) -> dict[str, Any]:
|
45
|
+
question_number = int(
|
46
|
+
user_prompt.split("XX")[1]
|
47
|
+
) ## grabs the question number from the prompt
|
48
|
+
await asyncio.sleep(0.1)
|
49
|
+
if never_ending: ## you're not going anywhere buddy
|
50
|
+
await asyncio.sleep(float("inf"))
|
51
|
+
if question_number == fail_at_number:
|
52
|
+
if asyncio.iscoroutinefunction(exception):
|
53
|
+
await exception()
|
54
|
+
else:
|
55
|
+
raise exception
|
56
|
+
return {
|
57
|
+
"message": [{"text": "SPAM!"}],
|
58
|
+
"usage": {"prompt_tokens": 1, "completion_tokens": 1},
|
59
|
+
}
|
60
|
+
|
61
|
+
return LanguageModelFromUtilities
|
edsl/notebooks/Notebook.py
CHANGED
@@ -2,7 +2,7 @@
|
|
2
2
|
|
3
3
|
from __future__ import annotations
|
4
4
|
import json
|
5
|
-
from typing import Dict, List, Optional
|
5
|
+
from typing import Dict, List, Optional, Union
|
6
6
|
from uuid import uuid4
|
7
7
|
from edsl.Base import Base
|
8
8
|
from edsl.utilities.decorators import add_edsl_version, remove_edsl_version
|
@@ -56,6 +56,8 @@ class Notebook(Base):
|
|
56
56
|
|
57
57
|
@classmethod
|
58
58
|
def from_script(cls, path: str, name: Optional[str] = None) -> "Notebook":
|
59
|
+
import nbformat
|
60
|
+
|
59
61
|
# Read the script file
|
60
62
|
with open(path, "r") as script_file:
|
61
63
|
script_content = script_file.read()
|
@@ -92,12 +94,26 @@ class Notebook(Base):
|
|
92
94
|
"""
|
93
95
|
return self.data == other.data
|
94
96
|
|
97
|
+
def __hash__(self) -> int:
|
98
|
+
"""
|
99
|
+
Allow the model to be used as a key in a dictionary.
|
100
|
+
"""
|
101
|
+
from edsl.utilities.utilities import dict_hash
|
102
|
+
|
103
|
+
return dict_hash(self.data["cells"])
|
104
|
+
|
105
|
+
def _to_dict(self) -> dict:
|
106
|
+
"""
|
107
|
+
Serialize to a dictionary.
|
108
|
+
"""
|
109
|
+
return {"name": self.name, "data": self.data}
|
110
|
+
|
95
111
|
@add_edsl_version
|
96
112
|
def to_dict(self) -> dict:
|
97
113
|
"""
|
98
114
|
Convert a Notebook to a dictionary.
|
99
115
|
"""
|
100
|
-
return
|
116
|
+
return self._to_dict()
|
101
117
|
|
102
118
|
@classmethod
|
103
119
|
@remove_edsl_version
|
@@ -111,6 +127,8 @@ class Notebook(Base):
|
|
111
127
|
"""
|
112
128
|
Save the notebook at the specified filepath.
|
113
129
|
"""
|
130
|
+
import nbformat
|
131
|
+
|
114
132
|
nbformat.write(nbformat.from_dict(self.data), fp=path)
|
115
133
|
|
116
134
|
def print(self):
|
edsl/prompts/Prompt.py
CHANGED
@@ -3,13 +3,17 @@ from typing import Optional
|
|
3
3
|
from abc import ABC
|
4
4
|
from typing import Any, List
|
5
5
|
|
6
|
+
from jinja2 import Environment, FileSystemLoader
|
7
|
+
from typing import Union, Dict
|
8
|
+
from pathlib import Path
|
9
|
+
|
6
10
|
from rich.table import Table
|
7
11
|
from jinja2 import Template, Environment, meta, TemplateSyntaxError, Undefined
|
8
12
|
|
9
13
|
|
10
14
|
class PreserveUndefined(Undefined):
|
11
15
|
def __str__(self):
|
12
|
-
return "{{ " + self._undefined_name + " }}"
|
16
|
+
return "{{ " + str(self._undefined_name) + " }}"
|
13
17
|
|
14
18
|
|
15
19
|
from edsl.exceptions.prompts import TemplateRenderError
|
@@ -70,6 +74,50 @@ class PromptBase(
|
|
70
74
|
text = f.read()
|
71
75
|
return cls(text=text)
|
72
76
|
|
77
|
+
@classmethod
|
78
|
+
def from_template(
|
79
|
+
cls,
|
80
|
+
file_name: str,
|
81
|
+
path_to_folder: Optional[Union[str, Path]] = None,
|
82
|
+
**kwargs: Dict[str, Any],
|
83
|
+
) -> "PromptBase":
|
84
|
+
"""Create a `PromptBase` from a Jinja template.
|
85
|
+
|
86
|
+
Args:
|
87
|
+
file_name (str): The name of the Jinja template file.
|
88
|
+
path_to_folder (Union[str, Path]): The path to the folder containing the template.
|
89
|
+
Can be absolute or relative.
|
90
|
+
**kwargs: Variables to be passed to the template for rendering.
|
91
|
+
|
92
|
+
Returns:
|
93
|
+
PromptBase: An instance of PromptBase with the rendered template as text.
|
94
|
+
"""
|
95
|
+
# if file_name lacks the .j2 extension, add it
|
96
|
+
if not file_name.endswith(".jinja"):
|
97
|
+
file_name += ".jinja"
|
98
|
+
|
99
|
+
# Convert path_to_folder to a Path object if it's a string
|
100
|
+
if path_to_folder is None:
|
101
|
+
from importlib import resources
|
102
|
+
import os
|
103
|
+
|
104
|
+
path_to_folder = resources.path("edsl.questions", "prompt_templates")
|
105
|
+
|
106
|
+
try:
|
107
|
+
folder_path = Path(path_to_folder)
|
108
|
+
except Exception as e:
|
109
|
+
raise ValueError(f"Invalid path: {path_to_folder}. Error: {e}")
|
110
|
+
|
111
|
+
with open(folder_path.joinpath(file_name), "r") as f:
|
112
|
+
text = f.read()
|
113
|
+
return cls(text=text)
|
114
|
+
# Resolve the path to get the absolute path
|
115
|
+
# absolute_path = folder_path.resolve()
|
116
|
+
# env = Environment(loader=FileSystemLoader(absolute_path))
|
117
|
+
# template = env.get_template(file_name)
|
118
|
+
# rendered_text = template.render({})
|
119
|
+
# return cls(text=rendered_text)
|
120
|
+
|
73
121
|
@property
|
74
122
|
def text(self):
|
75
123
|
"""Return the `Prompt` text."""
|
@@ -247,7 +295,9 @@ class PromptBase(
|
|
247
295
|
"Too much nesting - you created an infinite loop here, pal"
|
248
296
|
)
|
249
297
|
except TemplateSyntaxError as e:
|
250
|
-
raise TemplateRenderError(
|
298
|
+
raise TemplateRenderError(
|
299
|
+
f"Template syntax error: {e}. Bad template: {text}"
|
300
|
+
)
|
251
301
|
|
252
302
|
def to_dict(self) -> dict[str, Any]:
|
253
303
|
"""Return the `Prompt` as a dictionary.
|
@@ -16,19 +16,27 @@ class AnswerValidatorMixin:
|
|
16
16
|
- Question specific validation: validators for specific question types
|
17
17
|
"""
|
18
18
|
|
19
|
+
def failing_job(self):
|
20
|
+
from edsl import Agent
|
21
|
+
|
22
|
+
a = Agent()
|
23
|
+
|
24
|
+
def f(self, question, scenario):
|
25
|
+
return []
|
26
|
+
|
27
|
+
a.add_direct_question_answering_method(f, validate_response=True)
|
28
|
+
from edsl import QuestionNumerical
|
29
|
+
|
30
|
+
q = QuestionNumerical.example()
|
31
|
+
results = q.by(a).run()
|
32
|
+
return results
|
33
|
+
|
19
34
|
#####################
|
20
35
|
# TEMPLATE VALIDATION
|
21
36
|
#####################
|
22
37
|
def _validate_answer_template_basic(self, answer: Any) -> None:
|
23
38
|
"""Check that the answer (i) is a dictionary (ii) has an 'answer' key.
|
24
39
|
|
25
|
-
>>> avm = AnswerValidatorMixin()
|
26
|
-
>>> avm._validate_answer_template_basic({'answer': 1})
|
27
|
-
>>> avm._validate_answer_template_basic([])
|
28
|
-
Traceback (most recent call last):
|
29
|
-
...
|
30
|
-
edsl.exceptions.questions.QuestionAnswerValidationError: Answer must be a dictionary (got []).
|
31
|
-
|
32
40
|
- E.g., both {'answer': 1} and {'answer': {'a': 1}, 'other_key'=[1,2,3]} are valid
|
33
41
|
"""
|
34
42
|
if not isinstance(answer, dict):
|
@@ -56,14 +64,9 @@ class AnswerValidatorMixin:
|
|
56
64
|
def _validate_answer_key_value_numeric(
|
57
65
|
self, answer: dict[str, Any], key: str
|
58
66
|
) -> None:
|
59
|
-
"""Check that the value
|
60
|
-
|
61
|
-
|
62
|
-
>>> avm._validate_answer_key_value_numeric({'answer': 1}, 'answer')
|
63
|
-
>>> avm._validate_answer_key_value_numeric({'answer': 'poo'}, 'answer')
|
64
|
-
Traceback (most recent call last):
|
65
|
-
...
|
66
|
-
edsl.exceptions.questions.QuestionAnswerValidationError: Answer should be numerical (int or float). Got 'poo'
|
67
|
+
"""Check that the value is numeric (int or float).
|
68
|
+
Can also deal with strings that contain commas and other characters.
|
69
|
+
|
67
70
|
"""
|
68
71
|
value = answer.get(key)
|
69
72
|
initial_value = value
|
@@ -128,15 +131,6 @@ class AnswerValidatorMixin:
|
|
128
131
|
|
129
132
|
:param answer: Answer to validate
|
130
133
|
|
131
|
-
>>> avm = AnswerValidatorMixin()
|
132
|
-
>>> avm.question_options = ["a", "b", "c"]
|
133
|
-
>>> avm.min_selections = 1
|
134
|
-
>>> avm.max_selections = 2
|
135
|
-
>>> avm._validate_answer_checkbox({"answer": ["0", "1"]})
|
136
|
-
>>> avm._validate_answer_checkbox({"answer": []})
|
137
|
-
Traceback (most recent call last):
|
138
|
-
...
|
139
|
-
edsl.exceptions.questions.QuestionAnswerValidationError:...
|
140
134
|
|
141
135
|
Check that answer["answer"]:
|
142
136
|
- has elements that are strings, bytes-like objects or real numbers evaluating to integers
|
@@ -287,6 +281,9 @@ class AnswerValidatorMixin:
|
|
287
281
|
|
288
282
|
|
289
283
|
if __name__ == "__main__":
|
290
|
-
|
284
|
+
pass
|
285
|
+
# import doctest
|
286
|
+
|
287
|
+
# doctest.testmod(optionflags=doctest.ELLIPSIS)
|
291
288
|
|
292
|
-
|
289
|
+
# results = AnswerValidatorMixin().failing_job()
|