edsl 0.1.35__py3-none-any.whl → 0.1.36__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (58) hide show
  1. edsl/Base.py +5 -0
  2. edsl/__init__.py +1 -0
  3. edsl/__version__.py +1 -1
  4. edsl/agents/Agent.py +37 -9
  5. edsl/agents/Invigilator.py +2 -1
  6. edsl/agents/InvigilatorBase.py +5 -1
  7. edsl/agents/PromptConstructor.py +31 -67
  8. edsl/conversation/Conversation.py +1 -1
  9. edsl/coop/PriceFetcher.py +14 -18
  10. edsl/coop/coop.py +42 -8
  11. edsl/data/RemoteCacheSync.py +97 -0
  12. edsl/exceptions/coop.py +8 -0
  13. edsl/inference_services/InferenceServiceABC.py +28 -0
  14. edsl/inference_services/InferenceServicesCollection.py +10 -4
  15. edsl/inference_services/models_available_cache.py +25 -1
  16. edsl/inference_services/registry.py +24 -16
  17. edsl/jobs/Jobs.py +327 -206
  18. edsl/jobs/interviews/Interview.py +65 -10
  19. edsl/jobs/interviews/InterviewExceptionCollection.py +9 -0
  20. edsl/jobs/interviews/InterviewExceptionEntry.py +31 -9
  21. edsl/jobs/runners/JobsRunnerAsyncio.py +8 -13
  22. edsl/jobs/tasks/QuestionTaskCreator.py +1 -5
  23. edsl/jobs/tasks/TaskHistory.py +23 -7
  24. edsl/language_models/LanguageModel.py +3 -0
  25. edsl/prompts/Prompt.py +24 -38
  26. edsl/prompts/__init__.py +1 -1
  27. edsl/questions/QuestionBasePromptsMixin.py +18 -18
  28. edsl/questions/QuestionFunctional.py +7 -3
  29. edsl/questions/descriptors.py +24 -24
  30. edsl/results/Dataset.py +12 -0
  31. edsl/results/Result.py +2 -0
  32. edsl/results/Results.py +13 -1
  33. edsl/scenarios/FileStore.py +20 -5
  34. edsl/scenarios/Scenario.py +15 -1
  35. edsl/scenarios/__init__.py +2 -0
  36. edsl/surveys/Survey.py +3 -0
  37. edsl/surveys/instructions/Instruction.py +20 -3
  38. {edsl-0.1.35.dist-info → edsl-0.1.36.dist-info}/METADATA +1 -1
  39. {edsl-0.1.35.dist-info → edsl-0.1.36.dist-info}/RECORD +41 -57
  40. edsl/jobs/FailedQuestion.py +0 -78
  41. edsl/jobs/interviews/InterviewStatusMixin.py +0 -33
  42. edsl/jobs/tasks/task_management.py +0 -13
  43. edsl/prompts/QuestionInstructionsBase.py +0 -10
  44. edsl/prompts/library/agent_instructions.py +0 -38
  45. edsl/prompts/library/agent_persona.py +0 -21
  46. edsl/prompts/library/question_budget.py +0 -30
  47. edsl/prompts/library/question_checkbox.py +0 -38
  48. edsl/prompts/library/question_extract.py +0 -23
  49. edsl/prompts/library/question_freetext.py +0 -18
  50. edsl/prompts/library/question_linear_scale.py +0 -24
  51. edsl/prompts/library/question_list.py +0 -26
  52. edsl/prompts/library/question_multiple_choice.py +0 -54
  53. edsl/prompts/library/question_numerical.py +0 -35
  54. edsl/prompts/library/question_rank.py +0 -25
  55. edsl/prompts/prompt_config.py +0 -37
  56. edsl/prompts/registry.py +0 -202
  57. {edsl-0.1.35.dist-info → edsl-0.1.36.dist-info}/LICENSE +0 -0
  58. {edsl-0.1.35.dist-info → edsl-0.1.36.dist-info}/WHEEL +0 -0
@@ -1,38 +0,0 @@
1
- """Agent instructions for a human agent."""
2
-
3
- import textwrap
4
-
5
- from edsl.prompts.Prompt import PromptBase
6
- from edsl.prompts.prompt_config import ComponentTypes
7
-
8
- # from edsl.enums import LanguageModelType
9
-
10
-
11
- class AgentInstruction(PromptBase):
12
- """Agent instructions for a human agent."""
13
-
14
- # model = LanguageModelType.GPT_3_5_Turbo.value
15
- # from edsl import Model
16
- # model = Model().model
17
- model = "gpt-3.5-turbo"
18
- component_type = ComponentTypes.AGENT_INSTRUCTIONS
19
- default_instructions = textwrap.dedent(
20
- """\
21
- You are playing the role of a human answering survey questions.
22
- Do not break character.
23
- """
24
- )
25
-
26
-
27
- # class AgentInstructionLlama(PromptBase):
28
- # """Agent instructions for a human agent."""
29
-
30
- # model = LanguageModelType.LLAMA_2_70B_CHAT_HF.value
31
- # component_type = ComponentTypes.AGENT_INSTRUCTIONS
32
- # default_instructions = textwrap.dedent(
33
- # """\
34
- # You are playing the role of a human answering questions.
35
- # Do not break character.
36
- # Only respond in JSON, with one answer formatted as specified.
37
- # """
38
- # )
@@ -1,21 +0,0 @@
1
- """Agent persona for a human agent."""
2
-
3
- import textwrap
4
-
5
- from edsl.prompts.Prompt import PromptBase
6
- from edsl.prompts.prompt_config import ComponentTypes
7
-
8
- # from edsl.enums import LanguageModelType
9
-
10
-
11
- class AgentPersona(PromptBase):
12
- """Agent persona for a human agent."""
13
-
14
- model = "gpt-4-1106-preview"
15
- component_type = ComponentTypes.AGENT_PERSONA
16
- default_instructions = textwrap.dedent(
17
- """\
18
- You are an agent with the following persona:
19
- {{ traits }}
20
- """
21
- )
@@ -1,30 +0,0 @@
1
- """Budget question instructions."""
2
-
3
- import textwrap
4
-
5
- from edsl.prompts.QuestionInstructionsBase import QuestionInstuctionsBase
6
-
7
-
8
- class Budget(QuestionInstuctionsBase):
9
- """Budget question instructions."""
10
-
11
- question_type = "budget"
12
- model = "gpt-4-1106-preview"
13
- default_instructions = textwrap.dedent(
14
- """\
15
- You are being asked the following question: {{question_text}}
16
- The options are
17
- {% for option in question_options %}
18
- {{ loop.index0 }}: {{option}}
19
- {% endfor %}
20
- Return a valid JSON formatted as follows, with a dictionary for your "answer"
21
- where the keys are the option numbers and the values are the amounts you want
22
- to allocate to the options, and the sum of the values is {{budget_sum}}:
23
- {"answer": {<put dict of option numbers and allocation amounts here>},
24
- "comment": "<put explanation here>"}
25
- Example response for a budget of 100 and 4 options:
26
- {"answer": {"0": 25, "1": 25, "2": 25, "3": 25},
27
- "comment": "I allocated 25 to each option."}
28
- There must be an allocation listed for each item (including 0).
29
- """
30
- )
@@ -1,38 +0,0 @@
1
- """Checkbox question type."""
2
-
3
- import textwrap
4
- from edsl.prompts.QuestionInstructionsBase import QuestionInstuctionsBase
5
-
6
-
7
- class CheckBox(QuestionInstuctionsBase):
8
- """Checkbox question type."""
9
-
10
- question_type = "checkbox"
11
- model = "gpt-4-1106-preview"
12
- default_instructions = textwrap.dedent(
13
- """\
14
- You are being asked the following question: {{question_text}}
15
- The options are
16
- {% for option in question_options %}
17
- {{ loop.index0 }}: {{option}}
18
- {% endfor %}
19
- Return a valid JSON formatted like this, selecting only the number of the option:
20
- {"answer": [<put comma-separated list of answer codes here>], "comment": "<put explanation here>"}
21
- {% if min_selections != None and max_selections != None and min_selections == max_selections %}
22
- You must select exactly {{min_selections}} options.
23
- {% elif min_selections != None and max_selections != None %}
24
- Minimum number of options that must be selected: {{min_selections}}.
25
- Maximum number of options that must be selected: {{max_selections}}.
26
- {% elif min_selections != None %}
27
- Minimum number of options that must be selected: {{min_selections}}.
28
- {% elif max_selections != None %}
29
- Maximum number of options that must be selected: {{max_selections}}.
30
- {% endif %}
31
- """
32
- )
33
-
34
-
35
- class TopK(CheckBox):
36
- """Top K question type."""
37
-
38
- question_type = "top_k"
@@ -1,23 +0,0 @@
1
- """Extract question type."""
2
-
3
- import textwrap
4
-
5
- from edsl.prompts.QuestionInstructionsBase import QuestionInstuctionsBase
6
-
7
-
8
- class Extract(QuestionInstuctionsBase):
9
- """Extract question type."""
10
-
11
- question_type = "extract"
12
- model = "gpt-4-1106-preview"
13
- default_instructions = textwrap.dedent(
14
- """\
15
- You are given the following input: "{{question_text}}".
16
- Create an ANSWER should be formatted like this: "{{ answer_template }}",
17
- and it should have the same keys but values extracted from the input.
18
- If the value of a key is not present in the input, fill with "null".
19
- Return a valid JSON formatted like this:
20
- {"answer": <put your ANSWER here>}
21
- ONLY RETURN THE JSON, AND NOTHING ELSE.
22
- """
23
- )
@@ -1,18 +0,0 @@
1
- """Free text question type."""
2
-
3
- import textwrap
4
- from edsl.prompts.QuestionInstructionsBase import QuestionInstuctionsBase
5
-
6
-
7
- class FreeText(QuestionInstuctionsBase):
8
- """Free text question type."""
9
-
10
- question_type = "free_text"
11
- model = "gpt-4-1106-preview"
12
- default_instructions = textwrap.dedent(
13
- """\
14
- You are being asked the following question: {{question_text}}
15
- Return a valid JSON formatted like this:
16
- {"answer": "<put free text answer here>"}
17
- """
18
- )
@@ -1,24 +0,0 @@
1
- """Linear scale question type."""
2
-
3
- import textwrap
4
-
5
- from edsl.prompts.QuestionInstructionsBase import QuestionInstuctionsBase
6
-
7
-
8
- class LinearScale(QuestionInstuctionsBase):
9
- """Linear scale question type."""
10
-
11
- question_type = "linear_scale"
12
- model = "gpt-4-1106-preview"
13
- default_instructions = textwrap.dedent(
14
- """\
15
- You are being asked the following question: {{question_text}}
16
- The options are
17
- {% for option in question_options %}
18
- {{ loop.index0 }}: {{option}}
19
- {% endfor %}
20
- Return a valid JSON formatted like this, selecting only the code of the option (codes start at 0):
21
- {"answer": <put answer code here>, "comment": "<put explanation here>"}
22
- Only 1 option may be selected.
23
- """
24
- )
@@ -1,26 +0,0 @@
1
- """List question type."""
2
-
3
- import textwrap
4
-
5
- from edsl.prompts.QuestionInstructionsBase import QuestionInstuctionsBase
6
-
7
-
8
- class ListQuestion(QuestionInstuctionsBase):
9
- """List question type."""
10
-
11
- question_type = "list"
12
- model = "gpt-4-1106-preview"
13
- default_instructions = textwrap.dedent(
14
- """\
15
- {{question_text}}
16
-
17
- Your response should be only a valid JSON in the following format:
18
- {
19
- "answer": [<comma-separated list of responsive words or phrases as independent strings>],
20
- "comment": "<put comment here>"
21
- }
22
- {% if max_list_items is not none %}
23
- The list must not contain more than {{ max_list_items }} items.
24
- {% endif %}
25
- """
26
- )
@@ -1,54 +0,0 @@
1
- """Multiple choice question type."""
2
-
3
- import textwrap
4
- from edsl.prompts.QuestionInstructionsBase import QuestionInstuctionsBase
5
-
6
-
7
- class MultipleChoiceTurbo(QuestionInstuctionsBase):
8
- """Multiple choice question type."""
9
-
10
- question_type = "multiple_choice"
11
- model = "gpt-3.5-turbo"
12
- default_instructions = textwrap.dedent(
13
- """\
14
- You are being asked the following question: {{question_text}}
15
- The options are
16
- {% for option in question_options %}
17
- {{ loop.index0 }}: {{option}}
18
- {% endfor %}
19
- Return a valid JSON formatted like this, selecting only the number of the option:
20
- {"answer": <put answer code here>, "comment": "<put explanation here>"}
21
- Only 1 option may be selected.
22
- """
23
- )
24
-
25
-
26
- class MultipleChoice(QuestionInstuctionsBase):
27
- """Multiple choice question type."""
28
-
29
- question_type = "multiple_choice"
30
- model = "gpt-4-1106-preview"
31
- default_instructions = textwrap.dedent(
32
- """\
33
- You are being asked the following question: {{question_text}}
34
- The options are
35
- {% for option in question_options %}
36
- {{ loop.index0 }}: {{option}}
37
- {% endfor %}
38
- Return a valid JSON formatted like this, selecting only the number of the option:
39
- {"answer": <put answer code here>, "comment": "<put explanation here>"}
40
- Only 1 option may be selected.
41
- """
42
- )
43
-
44
-
45
- class LikertFive(MultipleChoice):
46
- """Likert five question type."""
47
-
48
- question_type = "likert_five"
49
-
50
-
51
- class YesNo(MultipleChoice):
52
- """Yes/No question type."""
53
-
54
- question_type = "yes_no"
@@ -1,35 +0,0 @@
1
- """Numerical question type."""
2
-
3
- import textwrap
4
-
5
- from edsl.prompts.QuestionInstructionsBase import QuestionInstuctionsBase
6
-
7
-
8
- class Numerical(QuestionInstuctionsBase):
9
- """Numerical question type."""
10
-
11
- question_type = "numerical"
12
- model = "gpt-4-1106-preview"
13
- default_instructions = textwrap.dedent(
14
- """\
15
- You are being asked a question that requires a numerical response
16
- in the form of an integer or decimal (e.g., -12, 0, 1, 2, 3.45, ...).
17
- Your response must be in the following format:
18
- {"answer": "<your numerical answer here>", "comment": "<your explanation here"}
19
- You must only include an integer or decimal in the quoted "answer" part of your response.
20
- Here is an example of a valid response:
21
- {"answer": "100", "comment": "This is my explanation..."}
22
- Here is an example of a response that is invalid because the "answer" includes words:
23
- {"answer": "I don't know.", "comment": "This is my explanation..."}
24
- If your response is equivalent to zero, your formatted response should look like this:
25
- {"answer": "0", "comment": "This is my explanation..."}
26
-
27
- You are being asked the following question: {{question_text}}
28
- {% if min_value is not none %}
29
- Minimum answer value: {{min_value}}
30
- {% endif %}
31
- {% if max_value is not none %}
32
- Maximum answer value: {{max_value}}
33
- {% endif %}
34
- """
35
- )
@@ -1,25 +0,0 @@
1
- """Rank question type."""
2
-
3
- import textwrap
4
-
5
- from edsl.prompts.QuestionInstructionsBase import QuestionInstuctionsBase
6
-
7
-
8
- class Rank(QuestionInstuctionsBase):
9
- """Rank question type."""
10
-
11
- question_type = "rank"
12
- model = "gpt-4-1106-preview"
13
- default_instructions = textwrap.dedent(
14
- """\
15
- You are being asked the following question: {{question_text}}
16
- The options are
17
- {% for option in question_options %}
18
- {{ loop.index0 }}: {{option}}
19
- {% endfor %}
20
- Return a valid JSON formatted like this, selecting the numbers of the options in order of preference,
21
- with the most preferred option first, and the least preferred option last:
22
- {"answer": [<put comma-separated list of answer codes here>], "comment": "<put explanation here>"}
23
- Exactly {{num_selections}} options must be selected.
24
- """
25
- )
@@ -1,37 +0,0 @@
1
- """This file contains the configuration for the prompt generation."""
2
-
3
- from enum import Enum
4
-
5
- NEGATIVE_INFINITY = float("-inf")
6
-
7
-
8
- class AttributeTypes(Enum):
9
- """The types of attributes that a prompt can have."""
10
-
11
- COMPONENT_TYPE = "component_type"
12
- MODEL = "model"
13
- QUESTION_TYPE = "question_type"
14
-
15
-
16
- class ComponentTypes(Enum):
17
- """The types of attributes that a prompt can have."""
18
-
19
- TEST = "test"
20
- GENERIC = "generic"
21
- QUESTION_DATA = "question_data"
22
- QUESTION_INSTRUCTIONS = "question_instructions"
23
- AGENT_INSTRUCTIONS = "agent_instructions"
24
- AGENT_PERSONA = "agent_persona"
25
- SURVEY_INSTRUCTIONS = "survey_instructions"
26
- SURVEY_DATA = "survey_data"
27
-
28
-
29
- names_to_component_types = {v.value: v for k, v in ComponentTypes.__members__.items()}
30
-
31
- C2A = {
32
- ComponentTypes.QUESTION_INSTRUCTIONS: [
33
- AttributeTypes.QUESTION_TYPE,
34
- AttributeTypes.MODEL,
35
- ],
36
- ComponentTypes.AGENT_INSTRUCTIONS: [AttributeTypes.MODEL],
37
- }
edsl/prompts/registry.py DELETED
@@ -1,202 +0,0 @@
1
- """This module contains the RegisterPromptsMeta metaclass, which is used to register prompts."""
2
-
3
- import traceback
4
- from collections import defaultdict
5
- from typing import List, Any
6
-
7
- from abc import ABCMeta, abstractmethod
8
-
9
- from edsl.prompts.prompt_config import (
10
- C2A,
11
- names_to_component_types,
12
- ComponentTypes,
13
- NEGATIVE_INFINITY,
14
- )
15
-
16
- from edsl.enums import QuestionType # , LanguageModelType
17
-
18
- from edsl.exceptions.prompts import (
19
- PromptBadQuestionTypeError,
20
- PromptBadLanguageModelTypeError,
21
- )
22
-
23
-
24
- class RegisterPromptsMeta(ABCMeta):
25
- """Metaclass to register prompts."""
26
-
27
- _registry = defaultdict(list) # Initialize the registry as a dictionary
28
- _prompts_by_component_type = defaultdict(list)
29
- # _instances = {}
30
-
31
- # def __new__(mcs, name, bases, dct):
32
- # if mcs not in mcs._instances:
33
- # mcs._instances[mcs] = super(RegisterPromptsMeta, mcs).__new__(
34
- # mcs, name, bases, dct
35
- # )
36
- # return mcs._instances[mcs]
37
-
38
- def __init__(cls, name, bases, dct):
39
- """
40
- We can only have one prompt class per name.
41
-
42
- Each prompt class must have a component type from the ComponentTypes enum.
43
-
44
- Example usage:
45
- >>> class Prompt1(PromptBase):
46
- ... component_type = ComponentTypes.TEST
47
-
48
- >>> class Prompt1(PromptBase):
49
- ... component_type = ComponentTypes.TEST
50
- Traceback (most recent call last):
51
- ...
52
- Exception: We already have a Prompt class named Prompt1.
53
- """
54
- super(RegisterPromptsMeta, cls).__init__(name, bases, dct)
55
- # print(f"Current state of registry: {RegisterPromptsMeta._registry}")
56
- # print(f"Registry called with {name}")
57
- if "Base" in name or name == "Prompt":
58
- # print("Exiting")
59
- return None # We don't want to register the base class
60
-
61
- if name in RegisterPromptsMeta._registry:
62
- if RegisterPromptsMeta._registry[name] != cls:
63
- raise Exception(f"We already have a Prompt class named {name}.")
64
- else:
65
- # print("It's the same thing - it's fine.")
66
- return None
67
-
68
- RegisterPromptsMeta._registry[name] = cls
69
- # print(f"Current registry: {RegisterPromptsMeta._registry}")
70
- if (
71
- component_type := getattr(cls, "component_type", None)
72
- ) not in ComponentTypes:
73
- raise Exception(f"Prompt {name} is not in the list of component types")
74
-
75
- ## Make sure that the prompt has a question_type class attribute & it's valid
76
- if component_type == ComponentTypes.QUESTION_INSTRUCTIONS:
77
- if not hasattr(cls, "question_type"):
78
- raise PromptBadQuestionTypeError(
79
- "A QuestionInstructions prompt must has a question_type value"
80
- )
81
- if not QuestionType.is_value_valid(cls.question_type):
82
- acceptable_values = [item.value for item in QuestionType]
83
- raise PromptBadQuestionTypeError(
84
- f"""
85
- A Prompt's question_type must be one of {QuestionType} values, which are
86
- currently {acceptable_values}. You passed {cls.question_type}."""
87
- )
88
-
89
- ## Make sure that if the prompt has a model class attribute, it's valid
90
- # if hasattr(cls, "model"):
91
- # if not LanguageModelType.is_value_valid(cls.model):
92
- # acceptable_values = [item.value for item in LanguageModelType]
93
- # raise PromptBadLanguageModelTypeError(
94
- # f"""
95
- # A Prompt's model must be one of {LanguageModelType} values, which are
96
- # currently {acceptable_values}. You passed {cls.model}."""
97
- # )
98
-
99
- key = cls._create_prompt_class_key(dct, component_type)
100
- cls.data = key
101
- RegisterPromptsMeta._prompts_by_component_type[component_type].append(cls)
102
-
103
- @classmethod
104
- def _create_prompt_class_key(cls, dct, component_type) -> tuple[tuple[str, Any]]:
105
- """Create a key for the prompt class.
106
-
107
- This is a helper function.
108
- """
109
- attributes = [attribute.value for attribute in C2A.get(component_type, [])]
110
- cls_data = {key: value for key, value in dct.items() if key in attributes}
111
- return tuple(cls_data.items())
112
-
113
- @classmethod
114
- def _get_classes_with_scores(cls, **kwargs) -> List[tuple[float, "PromptBase"]]:
115
- """
116
- Find matching prompts.
117
-
118
- NB that _get_classes_with_scores returns a list of tuples.
119
- The first element of the tuple is the score, and the second element is the prompt class.
120
- There is a public-facing function called get_classes that returns only the prompt classes.
121
-
122
- The kwargs are the attributes that we want to match on. E.g., supposed you
123
- wanted a prompt with component_type = "question_instructions" and question_type = "multiple_choice".
124
- You would run:
125
-
126
- >>> get_classes(component_type="question_instructions", question_type="multiple_choice", model="gpt-4-1106-preview")
127
- [<class '__main__.MultipleChoice'>, <class '__main__.MultipleChoiceTurbo'>]
128
-
129
- In the above example, we have two prompts that match. Note that the order of the prompts is determined by the score and the regular MultipleChoice
130
- is ranked higher because it matches on the model as well.
131
-
132
- Scores are computed by the _score method. The score is the number of attributes that match, with their weights.
133
- However, if a required attribute doesn't match, then the score is -inf and it can never be selected.
134
-
135
- The function will throw an exception if you don't specify a component type that's in the ComponentTypes enum.
136
-
137
- >>> get_classes(component_type="chicken_tenders", question_type="multiple_choice")
138
- Traceback (most recent call last):
139
- ...
140
- Exception: You must specify a component type. It must be one of dict_keys([...])
141
-
142
- >>> get_classes(component_type="generic")
143
- []
144
- """
145
- component_type_string = kwargs.get("component_type", None)
146
- component_type = names_to_component_types.get(component_type_string, None)
147
-
148
- if component_type is None:
149
- raise Exception(
150
- f"You must specify a component type. It must be one of {names_to_component_types.keys()}"
151
- )
152
-
153
- try:
154
- prompts = cls._prompts_by_component_type[component_type]
155
- except KeyError:
156
- raise Exception(f"No prompts for component type {component_type}")
157
-
158
- with_scores = [(cls._score(kwargs, prompt), prompt) for prompt in prompts]
159
- with_scores = sorted(with_scores, key=lambda x: -x[0])
160
- # filter out the ones with -inf
161
- matches_with_scores = cls._filter_out_non_matches(with_scores)
162
- return matches_with_scores
163
-
164
- @classmethod
165
- def _filter_out_non_matches(cls, prompts_with_scores):
166
- """Filter out the prompts that have a score of -inf."""
167
- return [
168
- (score, prompt)
169
- for score, prompt in prompts_with_scores
170
- if score > NEGATIVE_INFINITY
171
- ]
172
-
173
- @classmethod
174
- def get_classes(cls, **kwargs):
175
- """Return only the prompt classes and not the scores.
176
-
177
- Public-facing function.
178
- """
179
- with_scores = cls._get_classes_with_scores(**kwargs)
180
- return [prompt for _, prompt in with_scores]
181
- # return with_scores
182
-
183
- @classmethod
184
- def _score(cls, kwargs, prompt):
185
- """Score the prompt based on the attributes that match."""
186
- required_list = ["question_type"]
187
- score = 0
188
- for key, value in kwargs.items():
189
- if prompt_value := getattr(prompt, key, None) == value:
190
- score += 1
191
- else:
192
- if key in required_list:
193
- score += NEGATIVE_INFINITY
194
- return score
195
-
196
- @classmethod
197
- def get_registered_classes(cls):
198
- """Return the registry."""
199
- return cls._registry
200
-
201
-
202
- get_classes = RegisterPromptsMeta.get_classes
File without changes
File without changes