edsl 0.1.35__py3-none-any.whl → 0.1.36.dev1__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- edsl/__version__.py +1 -1
- edsl/agents/Agent.py +38 -14
- edsl/agents/Invigilator.py +2 -1
- edsl/agents/PromptConstructor.py +6 -51
- edsl/jobs/Jobs.py +146 -48
- edsl/jobs/interviews/Interview.py +42 -15
- edsl/jobs/interviews/InterviewExceptionEntry.py +0 -3
- edsl/jobs/tasks/QuestionTaskCreator.py +1 -5
- edsl/language_models/LanguageModel.py +3 -0
- edsl/prompts/Prompt.py +24 -38
- edsl/prompts/__init__.py +1 -1
- edsl/questions/QuestionBasePromptsMixin.py +18 -18
- edsl/questions/descriptors.py +24 -24
- {edsl-0.1.35.dist-info → edsl-0.1.36.dev1.dist-info}/METADATA +1 -1
- {edsl-0.1.35.dist-info → edsl-0.1.36.dev1.dist-info}/RECORD +17 -34
- edsl/jobs/FailedQuestion.py +0 -78
- edsl/jobs/interviews/InterviewStatusMixin.py +0 -33
- edsl/jobs/tasks/task_management.py +0 -13
- edsl/prompts/QuestionInstructionsBase.py +0 -10
- edsl/prompts/library/agent_instructions.py +0 -38
- edsl/prompts/library/agent_persona.py +0 -21
- edsl/prompts/library/question_budget.py +0 -30
- edsl/prompts/library/question_checkbox.py +0 -38
- edsl/prompts/library/question_extract.py +0 -23
- edsl/prompts/library/question_freetext.py +0 -18
- edsl/prompts/library/question_linear_scale.py +0 -24
- edsl/prompts/library/question_list.py +0 -26
- edsl/prompts/library/question_multiple_choice.py +0 -54
- edsl/prompts/library/question_numerical.py +0 -35
- edsl/prompts/library/question_rank.py +0 -25
- edsl/prompts/prompt_config.py +0 -37
- edsl/prompts/registry.py +0 -202
- {edsl-0.1.35.dist-info → edsl-0.1.36.dev1.dist-info}/LICENSE +0 -0
- {edsl-0.1.35.dist-info → edsl-0.1.36.dev1.dist-info}/WHEEL +0 -0
@@ -1,24 +0,0 @@
|
|
1
|
-
"""Linear scale question type."""
|
2
|
-
|
3
|
-
import textwrap
|
4
|
-
|
5
|
-
from edsl.prompts.QuestionInstructionsBase import QuestionInstuctionsBase
|
6
|
-
|
7
|
-
|
8
|
-
class LinearScale(QuestionInstuctionsBase):
|
9
|
-
"""Linear scale question type."""
|
10
|
-
|
11
|
-
question_type = "linear_scale"
|
12
|
-
model = "gpt-4-1106-preview"
|
13
|
-
default_instructions = textwrap.dedent(
|
14
|
-
"""\
|
15
|
-
You are being asked the following question: {{question_text}}
|
16
|
-
The options are
|
17
|
-
{% for option in question_options %}
|
18
|
-
{{ loop.index0 }}: {{option}}
|
19
|
-
{% endfor %}
|
20
|
-
Return a valid JSON formatted like this, selecting only the code of the option (codes start at 0):
|
21
|
-
{"answer": <put answer code here>, "comment": "<put explanation here>"}
|
22
|
-
Only 1 option may be selected.
|
23
|
-
"""
|
24
|
-
)
|
@@ -1,26 +0,0 @@
|
|
1
|
-
"""List question type."""
|
2
|
-
|
3
|
-
import textwrap
|
4
|
-
|
5
|
-
from edsl.prompts.QuestionInstructionsBase import QuestionInstuctionsBase
|
6
|
-
|
7
|
-
|
8
|
-
class ListQuestion(QuestionInstuctionsBase):
|
9
|
-
"""List question type."""
|
10
|
-
|
11
|
-
question_type = "list"
|
12
|
-
model = "gpt-4-1106-preview"
|
13
|
-
default_instructions = textwrap.dedent(
|
14
|
-
"""\
|
15
|
-
{{question_text}}
|
16
|
-
|
17
|
-
Your response should be only a valid JSON in the following format:
|
18
|
-
{
|
19
|
-
"answer": [<comma-separated list of responsive words or phrases as independent strings>],
|
20
|
-
"comment": "<put comment here>"
|
21
|
-
}
|
22
|
-
{% if max_list_items is not none %}
|
23
|
-
The list must not contain more than {{ max_list_items }} items.
|
24
|
-
{% endif %}
|
25
|
-
"""
|
26
|
-
)
|
@@ -1,54 +0,0 @@
|
|
1
|
-
"""Multiple choice question type."""
|
2
|
-
|
3
|
-
import textwrap
|
4
|
-
from edsl.prompts.QuestionInstructionsBase import QuestionInstuctionsBase
|
5
|
-
|
6
|
-
|
7
|
-
class MultipleChoiceTurbo(QuestionInstuctionsBase):
|
8
|
-
"""Multiple choice question type."""
|
9
|
-
|
10
|
-
question_type = "multiple_choice"
|
11
|
-
model = "gpt-3.5-turbo"
|
12
|
-
default_instructions = textwrap.dedent(
|
13
|
-
"""\
|
14
|
-
You are being asked the following question: {{question_text}}
|
15
|
-
The options are
|
16
|
-
{% for option in question_options %}
|
17
|
-
{{ loop.index0 }}: {{option}}
|
18
|
-
{% endfor %}
|
19
|
-
Return a valid JSON formatted like this, selecting only the number of the option:
|
20
|
-
{"answer": <put answer code here>, "comment": "<put explanation here>"}
|
21
|
-
Only 1 option may be selected.
|
22
|
-
"""
|
23
|
-
)
|
24
|
-
|
25
|
-
|
26
|
-
class MultipleChoice(QuestionInstuctionsBase):
|
27
|
-
"""Multiple choice question type."""
|
28
|
-
|
29
|
-
question_type = "multiple_choice"
|
30
|
-
model = "gpt-4-1106-preview"
|
31
|
-
default_instructions = textwrap.dedent(
|
32
|
-
"""\
|
33
|
-
You are being asked the following question: {{question_text}}
|
34
|
-
The options are
|
35
|
-
{% for option in question_options %}
|
36
|
-
{{ loop.index0 }}: {{option}}
|
37
|
-
{% endfor %}
|
38
|
-
Return a valid JSON formatted like this, selecting only the number of the option:
|
39
|
-
{"answer": <put answer code here>, "comment": "<put explanation here>"}
|
40
|
-
Only 1 option may be selected.
|
41
|
-
"""
|
42
|
-
)
|
43
|
-
|
44
|
-
|
45
|
-
class LikertFive(MultipleChoice):
|
46
|
-
"""Likert five question type."""
|
47
|
-
|
48
|
-
question_type = "likert_five"
|
49
|
-
|
50
|
-
|
51
|
-
class YesNo(MultipleChoice):
|
52
|
-
"""Yes/No question type."""
|
53
|
-
|
54
|
-
question_type = "yes_no"
|
@@ -1,35 +0,0 @@
|
|
1
|
-
"""Numerical question type."""
|
2
|
-
|
3
|
-
import textwrap
|
4
|
-
|
5
|
-
from edsl.prompts.QuestionInstructionsBase import QuestionInstuctionsBase
|
6
|
-
|
7
|
-
|
8
|
-
class Numerical(QuestionInstuctionsBase):
|
9
|
-
"""Numerical question type."""
|
10
|
-
|
11
|
-
question_type = "numerical"
|
12
|
-
model = "gpt-4-1106-preview"
|
13
|
-
default_instructions = textwrap.dedent(
|
14
|
-
"""\
|
15
|
-
You are being asked a question that requires a numerical response
|
16
|
-
in the form of an integer or decimal (e.g., -12, 0, 1, 2, 3.45, ...).
|
17
|
-
Your response must be in the following format:
|
18
|
-
{"answer": "<your numerical answer here>", "comment": "<your explanation here"}
|
19
|
-
You must only include an integer or decimal in the quoted "answer" part of your response.
|
20
|
-
Here is an example of a valid response:
|
21
|
-
{"answer": "100", "comment": "This is my explanation..."}
|
22
|
-
Here is an example of a response that is invalid because the "answer" includes words:
|
23
|
-
{"answer": "I don't know.", "comment": "This is my explanation..."}
|
24
|
-
If your response is equivalent to zero, your formatted response should look like this:
|
25
|
-
{"answer": "0", "comment": "This is my explanation..."}
|
26
|
-
|
27
|
-
You are being asked the following question: {{question_text}}
|
28
|
-
{% if min_value is not none %}
|
29
|
-
Minimum answer value: {{min_value}}
|
30
|
-
{% endif %}
|
31
|
-
{% if max_value is not none %}
|
32
|
-
Maximum answer value: {{max_value}}
|
33
|
-
{% endif %}
|
34
|
-
"""
|
35
|
-
)
|
@@ -1,25 +0,0 @@
|
|
1
|
-
"""Rank question type."""
|
2
|
-
|
3
|
-
import textwrap
|
4
|
-
|
5
|
-
from edsl.prompts.QuestionInstructionsBase import QuestionInstuctionsBase
|
6
|
-
|
7
|
-
|
8
|
-
class Rank(QuestionInstuctionsBase):
|
9
|
-
"""Rank question type."""
|
10
|
-
|
11
|
-
question_type = "rank"
|
12
|
-
model = "gpt-4-1106-preview"
|
13
|
-
default_instructions = textwrap.dedent(
|
14
|
-
"""\
|
15
|
-
You are being asked the following question: {{question_text}}
|
16
|
-
The options are
|
17
|
-
{% for option in question_options %}
|
18
|
-
{{ loop.index0 }}: {{option}}
|
19
|
-
{% endfor %}
|
20
|
-
Return a valid JSON formatted like this, selecting the numbers of the options in order of preference,
|
21
|
-
with the most preferred option first, and the least preferred option last:
|
22
|
-
{"answer": [<put comma-separated list of answer codes here>], "comment": "<put explanation here>"}
|
23
|
-
Exactly {{num_selections}} options must be selected.
|
24
|
-
"""
|
25
|
-
)
|
edsl/prompts/prompt_config.py
DELETED
@@ -1,37 +0,0 @@
|
|
1
|
-
"""This file contains the configuration for the prompt generation."""
|
2
|
-
|
3
|
-
from enum import Enum
|
4
|
-
|
5
|
-
NEGATIVE_INFINITY = float("-inf")
|
6
|
-
|
7
|
-
|
8
|
-
class AttributeTypes(Enum):
|
9
|
-
"""The types of attributes that a prompt can have."""
|
10
|
-
|
11
|
-
COMPONENT_TYPE = "component_type"
|
12
|
-
MODEL = "model"
|
13
|
-
QUESTION_TYPE = "question_type"
|
14
|
-
|
15
|
-
|
16
|
-
class ComponentTypes(Enum):
|
17
|
-
"""The types of attributes that a prompt can have."""
|
18
|
-
|
19
|
-
TEST = "test"
|
20
|
-
GENERIC = "generic"
|
21
|
-
QUESTION_DATA = "question_data"
|
22
|
-
QUESTION_INSTRUCTIONS = "question_instructions"
|
23
|
-
AGENT_INSTRUCTIONS = "agent_instructions"
|
24
|
-
AGENT_PERSONA = "agent_persona"
|
25
|
-
SURVEY_INSTRUCTIONS = "survey_instructions"
|
26
|
-
SURVEY_DATA = "survey_data"
|
27
|
-
|
28
|
-
|
29
|
-
names_to_component_types = {v.value: v for k, v in ComponentTypes.__members__.items()}
|
30
|
-
|
31
|
-
C2A = {
|
32
|
-
ComponentTypes.QUESTION_INSTRUCTIONS: [
|
33
|
-
AttributeTypes.QUESTION_TYPE,
|
34
|
-
AttributeTypes.MODEL,
|
35
|
-
],
|
36
|
-
ComponentTypes.AGENT_INSTRUCTIONS: [AttributeTypes.MODEL],
|
37
|
-
}
|
edsl/prompts/registry.py
DELETED
@@ -1,202 +0,0 @@
|
|
1
|
-
"""This module contains the RegisterPromptsMeta metaclass, which is used to register prompts."""
|
2
|
-
|
3
|
-
import traceback
|
4
|
-
from collections import defaultdict
|
5
|
-
from typing import List, Any
|
6
|
-
|
7
|
-
from abc import ABCMeta, abstractmethod
|
8
|
-
|
9
|
-
from edsl.prompts.prompt_config import (
|
10
|
-
C2A,
|
11
|
-
names_to_component_types,
|
12
|
-
ComponentTypes,
|
13
|
-
NEGATIVE_INFINITY,
|
14
|
-
)
|
15
|
-
|
16
|
-
from edsl.enums import QuestionType # , LanguageModelType
|
17
|
-
|
18
|
-
from edsl.exceptions.prompts import (
|
19
|
-
PromptBadQuestionTypeError,
|
20
|
-
PromptBadLanguageModelTypeError,
|
21
|
-
)
|
22
|
-
|
23
|
-
|
24
|
-
class RegisterPromptsMeta(ABCMeta):
|
25
|
-
"""Metaclass to register prompts."""
|
26
|
-
|
27
|
-
_registry = defaultdict(list) # Initialize the registry as a dictionary
|
28
|
-
_prompts_by_component_type = defaultdict(list)
|
29
|
-
# _instances = {}
|
30
|
-
|
31
|
-
# def __new__(mcs, name, bases, dct):
|
32
|
-
# if mcs not in mcs._instances:
|
33
|
-
# mcs._instances[mcs] = super(RegisterPromptsMeta, mcs).__new__(
|
34
|
-
# mcs, name, bases, dct
|
35
|
-
# )
|
36
|
-
# return mcs._instances[mcs]
|
37
|
-
|
38
|
-
def __init__(cls, name, bases, dct):
|
39
|
-
"""
|
40
|
-
We can only have one prompt class per name.
|
41
|
-
|
42
|
-
Each prompt class must have a component type from the ComponentTypes enum.
|
43
|
-
|
44
|
-
Example usage:
|
45
|
-
>>> class Prompt1(PromptBase):
|
46
|
-
... component_type = ComponentTypes.TEST
|
47
|
-
|
48
|
-
>>> class Prompt1(PromptBase):
|
49
|
-
... component_type = ComponentTypes.TEST
|
50
|
-
Traceback (most recent call last):
|
51
|
-
...
|
52
|
-
Exception: We already have a Prompt class named Prompt1.
|
53
|
-
"""
|
54
|
-
super(RegisterPromptsMeta, cls).__init__(name, bases, dct)
|
55
|
-
# print(f"Current state of registry: {RegisterPromptsMeta._registry}")
|
56
|
-
# print(f"Registry called with {name}")
|
57
|
-
if "Base" in name or name == "Prompt":
|
58
|
-
# print("Exiting")
|
59
|
-
return None # We don't want to register the base class
|
60
|
-
|
61
|
-
if name in RegisterPromptsMeta._registry:
|
62
|
-
if RegisterPromptsMeta._registry[name] != cls:
|
63
|
-
raise Exception(f"We already have a Prompt class named {name}.")
|
64
|
-
else:
|
65
|
-
# print("It's the same thing - it's fine.")
|
66
|
-
return None
|
67
|
-
|
68
|
-
RegisterPromptsMeta._registry[name] = cls
|
69
|
-
# print(f"Current registry: {RegisterPromptsMeta._registry}")
|
70
|
-
if (
|
71
|
-
component_type := getattr(cls, "component_type", None)
|
72
|
-
) not in ComponentTypes:
|
73
|
-
raise Exception(f"Prompt {name} is not in the list of component types")
|
74
|
-
|
75
|
-
## Make sure that the prompt has a question_type class attribute & it's valid
|
76
|
-
if component_type == ComponentTypes.QUESTION_INSTRUCTIONS:
|
77
|
-
if not hasattr(cls, "question_type"):
|
78
|
-
raise PromptBadQuestionTypeError(
|
79
|
-
"A QuestionInstructions prompt must has a question_type value"
|
80
|
-
)
|
81
|
-
if not QuestionType.is_value_valid(cls.question_type):
|
82
|
-
acceptable_values = [item.value for item in QuestionType]
|
83
|
-
raise PromptBadQuestionTypeError(
|
84
|
-
f"""
|
85
|
-
A Prompt's question_type must be one of {QuestionType} values, which are
|
86
|
-
currently {acceptable_values}. You passed {cls.question_type}."""
|
87
|
-
)
|
88
|
-
|
89
|
-
## Make sure that if the prompt has a model class attribute, it's valid
|
90
|
-
# if hasattr(cls, "model"):
|
91
|
-
# if not LanguageModelType.is_value_valid(cls.model):
|
92
|
-
# acceptable_values = [item.value for item in LanguageModelType]
|
93
|
-
# raise PromptBadLanguageModelTypeError(
|
94
|
-
# f"""
|
95
|
-
# A Prompt's model must be one of {LanguageModelType} values, which are
|
96
|
-
# currently {acceptable_values}. You passed {cls.model}."""
|
97
|
-
# )
|
98
|
-
|
99
|
-
key = cls._create_prompt_class_key(dct, component_type)
|
100
|
-
cls.data = key
|
101
|
-
RegisterPromptsMeta._prompts_by_component_type[component_type].append(cls)
|
102
|
-
|
103
|
-
@classmethod
|
104
|
-
def _create_prompt_class_key(cls, dct, component_type) -> tuple[tuple[str, Any]]:
|
105
|
-
"""Create a key for the prompt class.
|
106
|
-
|
107
|
-
This is a helper function.
|
108
|
-
"""
|
109
|
-
attributes = [attribute.value for attribute in C2A.get(component_type, [])]
|
110
|
-
cls_data = {key: value for key, value in dct.items() if key in attributes}
|
111
|
-
return tuple(cls_data.items())
|
112
|
-
|
113
|
-
@classmethod
|
114
|
-
def _get_classes_with_scores(cls, **kwargs) -> List[tuple[float, "PromptBase"]]:
|
115
|
-
"""
|
116
|
-
Find matching prompts.
|
117
|
-
|
118
|
-
NB that _get_classes_with_scores returns a list of tuples.
|
119
|
-
The first element of the tuple is the score, and the second element is the prompt class.
|
120
|
-
There is a public-facing function called get_classes that returns only the prompt classes.
|
121
|
-
|
122
|
-
The kwargs are the attributes that we want to match on. E.g., supposed you
|
123
|
-
wanted a prompt with component_type = "question_instructions" and question_type = "multiple_choice".
|
124
|
-
You would run:
|
125
|
-
|
126
|
-
>>> get_classes(component_type="question_instructions", question_type="multiple_choice", model="gpt-4-1106-preview")
|
127
|
-
[<class '__main__.MultipleChoice'>, <class '__main__.MultipleChoiceTurbo'>]
|
128
|
-
|
129
|
-
In the above example, we have two prompts that match. Note that the order of the prompts is determined by the score and the regular MultipleChoice
|
130
|
-
is ranked higher because it matches on the model as well.
|
131
|
-
|
132
|
-
Scores are computed by the _score method. The score is the number of attributes that match, with their weights.
|
133
|
-
However, if a required attribute doesn't match, then the score is -inf and it can never be selected.
|
134
|
-
|
135
|
-
The function will throw an exception if you don't specify a component type that's in the ComponentTypes enum.
|
136
|
-
|
137
|
-
>>> get_classes(component_type="chicken_tenders", question_type="multiple_choice")
|
138
|
-
Traceback (most recent call last):
|
139
|
-
...
|
140
|
-
Exception: You must specify a component type. It must be one of dict_keys([...])
|
141
|
-
|
142
|
-
>>> get_classes(component_type="generic")
|
143
|
-
[]
|
144
|
-
"""
|
145
|
-
component_type_string = kwargs.get("component_type", None)
|
146
|
-
component_type = names_to_component_types.get(component_type_string, None)
|
147
|
-
|
148
|
-
if component_type is None:
|
149
|
-
raise Exception(
|
150
|
-
f"You must specify a component type. It must be one of {names_to_component_types.keys()}"
|
151
|
-
)
|
152
|
-
|
153
|
-
try:
|
154
|
-
prompts = cls._prompts_by_component_type[component_type]
|
155
|
-
except KeyError:
|
156
|
-
raise Exception(f"No prompts for component type {component_type}")
|
157
|
-
|
158
|
-
with_scores = [(cls._score(kwargs, prompt), prompt) for prompt in prompts]
|
159
|
-
with_scores = sorted(with_scores, key=lambda x: -x[0])
|
160
|
-
# filter out the ones with -inf
|
161
|
-
matches_with_scores = cls._filter_out_non_matches(with_scores)
|
162
|
-
return matches_with_scores
|
163
|
-
|
164
|
-
@classmethod
|
165
|
-
def _filter_out_non_matches(cls, prompts_with_scores):
|
166
|
-
"""Filter out the prompts that have a score of -inf."""
|
167
|
-
return [
|
168
|
-
(score, prompt)
|
169
|
-
for score, prompt in prompts_with_scores
|
170
|
-
if score > NEGATIVE_INFINITY
|
171
|
-
]
|
172
|
-
|
173
|
-
@classmethod
|
174
|
-
def get_classes(cls, **kwargs):
|
175
|
-
"""Return only the prompt classes and not the scores.
|
176
|
-
|
177
|
-
Public-facing function.
|
178
|
-
"""
|
179
|
-
with_scores = cls._get_classes_with_scores(**kwargs)
|
180
|
-
return [prompt for _, prompt in with_scores]
|
181
|
-
# return with_scores
|
182
|
-
|
183
|
-
@classmethod
|
184
|
-
def _score(cls, kwargs, prompt):
|
185
|
-
"""Score the prompt based on the attributes that match."""
|
186
|
-
required_list = ["question_type"]
|
187
|
-
score = 0
|
188
|
-
for key, value in kwargs.items():
|
189
|
-
if prompt_value := getattr(prompt, key, None) == value:
|
190
|
-
score += 1
|
191
|
-
else:
|
192
|
-
if key in required_list:
|
193
|
-
score += NEGATIVE_INFINITY
|
194
|
-
return score
|
195
|
-
|
196
|
-
@classmethod
|
197
|
-
def get_registered_classes(cls):
|
198
|
-
"""Return the registry."""
|
199
|
-
return cls._registry
|
200
|
-
|
201
|
-
|
202
|
-
get_classes = RegisterPromptsMeta.get_classes
|
File without changes
|
File without changes
|