edsl 0.1.34.dev2__py3-none-any.whl → 0.1.36__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- edsl/Base.py +5 -0
- edsl/__init__.py +1 -0
- edsl/__version__.py +1 -1
- edsl/agents/Agent.py +37 -9
- edsl/agents/Invigilator.py +2 -1
- edsl/agents/InvigilatorBase.py +5 -1
- edsl/agents/PromptConstructor.py +39 -71
- edsl/conversation/Conversation.py +1 -1
- edsl/coop/PriceFetcher.py +14 -18
- edsl/coop/coop.py +42 -8
- edsl/data/RemoteCacheSync.py +97 -0
- edsl/exceptions/coop.py +8 -0
- edsl/inference_services/InferenceServiceABC.py +28 -0
- edsl/inference_services/InferenceServicesCollection.py +10 -4
- edsl/inference_services/OpenAIService.py +13 -2
- edsl/inference_services/models_available_cache.py +25 -1
- edsl/inference_services/registry.py +24 -16
- edsl/jobs/Jobs.py +327 -206
- edsl/jobs/interviews/Interview.py +66 -8
- edsl/jobs/interviews/InterviewExceptionCollection.py +9 -0
- edsl/jobs/interviews/InterviewExceptionEntry.py +31 -9
- edsl/jobs/runners/JobsRunnerAsyncio.py +8 -13
- edsl/jobs/tasks/QuestionTaskCreator.py +1 -5
- edsl/jobs/tasks/TaskHistory.py +23 -7
- edsl/language_models/LanguageModel.py +3 -0
- edsl/prompts/Prompt.py +24 -38
- edsl/prompts/__init__.py +1 -1
- edsl/questions/QuestionBasePromptsMixin.py +18 -18
- edsl/questions/QuestionFunctional.py +7 -3
- edsl/questions/descriptors.py +24 -24
- edsl/questions/templates/numerical/answering_instructions.jinja +0 -1
- edsl/results/Dataset.py +12 -0
- edsl/results/Result.py +2 -0
- edsl/results/Results.py +13 -1
- edsl/scenarios/FileStore.py +20 -5
- edsl/scenarios/Scenario.py +15 -1
- edsl/scenarios/ScenarioList.py +9 -0
- edsl/scenarios/__init__.py +2 -0
- edsl/surveys/Survey.py +3 -0
- edsl/surveys/base.py +4 -0
- edsl/surveys/instructions/Instruction.py +20 -3
- {edsl-0.1.34.dev2.dist-info → edsl-0.1.36.dist-info}/METADATA +1 -1
- {edsl-0.1.34.dev2.dist-info → edsl-0.1.36.dist-info}/RECORD +45 -61
- edsl/jobs/FailedQuestion.py +0 -78
- edsl/jobs/interviews/InterviewStatusMixin.py +0 -33
- edsl/jobs/tasks/task_management.py +0 -13
- edsl/prompts/QuestionInstructionsBase.py +0 -10
- edsl/prompts/library/agent_instructions.py +0 -38
- edsl/prompts/library/agent_persona.py +0 -21
- edsl/prompts/library/question_budget.py +0 -30
- edsl/prompts/library/question_checkbox.py +0 -38
- edsl/prompts/library/question_extract.py +0 -23
- edsl/prompts/library/question_freetext.py +0 -18
- edsl/prompts/library/question_linear_scale.py +0 -24
- edsl/prompts/library/question_list.py +0 -26
- edsl/prompts/library/question_multiple_choice.py +0 -54
- edsl/prompts/library/question_numerical.py +0 -35
- edsl/prompts/library/question_rank.py +0 -25
- edsl/prompts/prompt_config.py +0 -37
- edsl/prompts/registry.py +0 -202
- {edsl-0.1.34.dev2.dist-info → edsl-0.1.36.dist-info}/LICENSE +0 -0
- {edsl-0.1.34.dev2.dist-info → edsl-0.1.36.dist-info}/WHEEL +0 -0
@@ -56,13 +56,19 @@ class InferenceServicesCollection:
|
|
56
56
|
self.services.append(service)
|
57
57
|
|
58
58
|
def create_model_factory(self, model_name: str, service_name=None, index=None):
|
59
|
+
from edsl.inference_services.TestService import TestService
|
60
|
+
|
61
|
+
if model_name == "test":
|
62
|
+
return TestService.create_model(model_name)
|
63
|
+
|
64
|
+
if service_name:
|
65
|
+
for service in self.services:
|
66
|
+
if service_name == service._inference_service_:
|
67
|
+
return service.create_model(model_name)
|
68
|
+
|
59
69
|
for service in self.services:
|
60
70
|
if model_name in self._get_service_available(service):
|
61
71
|
if service_name is None or service_name == service._inference_service_:
|
62
72
|
return service.create_model(model_name)
|
63
73
|
|
64
|
-
# if model_name == "test":
|
65
|
-
# from edsl.language_models import LanguageModel
|
66
|
-
# return LanguageModel(test = True)
|
67
|
-
|
68
74
|
raise Exception(f"Model {model_name} not found in any of the services")
|
@@ -188,12 +188,16 @@ class OpenAIService(InferenceServiceABC):
|
|
188
188
|
else:
|
189
189
|
content = user_prompt
|
190
190
|
client = self.async_client()
|
191
|
+
|
191
192
|
messages = [
|
192
193
|
{"role": "system", "content": system_prompt},
|
193
194
|
{"role": "user", "content": content},
|
194
195
|
]
|
195
|
-
if
|
196
|
+
if (
|
197
|
+
system_prompt == "" and self.omit_system_prompt_if_empty
|
198
|
+
) or "o1" in self.model:
|
196
199
|
messages = messages[1:]
|
200
|
+
|
197
201
|
params = {
|
198
202
|
"model": self.model,
|
199
203
|
"messages": messages,
|
@@ -205,7 +209,14 @@ class OpenAIService(InferenceServiceABC):
|
|
205
209
|
"logprobs": self.logprobs,
|
206
210
|
"top_logprobs": self.top_logprobs if self.logprobs else None,
|
207
211
|
}
|
208
|
-
|
212
|
+
if "o1" in self.model:
|
213
|
+
params.pop("max_tokens")
|
214
|
+
params["max_completion_tokens"] = self.max_tokens
|
215
|
+
params["temperature"] = 1
|
216
|
+
try:
|
217
|
+
response = await client.chat.completions.create(**params)
|
218
|
+
except Exception as e:
|
219
|
+
print(e)
|
209
220
|
return response.model_dump()
|
210
221
|
|
211
222
|
LLM.__name__ = "LanguageModel"
|
@@ -65,7 +65,31 @@ models_available = {
|
|
65
65
|
"meta-llama/Meta-Llama-3-70B-Instruct",
|
66
66
|
"openchat/openchat_3.5",
|
67
67
|
],
|
68
|
-
"google": [
|
68
|
+
"google": [
|
69
|
+
"gemini-1.0-pro",
|
70
|
+
"gemini-1.0-pro-001",
|
71
|
+
"gemini-1.0-pro-latest",
|
72
|
+
"gemini-1.0-pro-vision-latest",
|
73
|
+
"gemini-1.5-flash",
|
74
|
+
"gemini-1.5-flash-001",
|
75
|
+
"gemini-1.5-flash-001-tuning",
|
76
|
+
"gemini-1.5-flash-002",
|
77
|
+
"gemini-1.5-flash-8b",
|
78
|
+
"gemini-1.5-flash-8b-001",
|
79
|
+
"gemini-1.5-flash-8b-exp-0827",
|
80
|
+
"gemini-1.5-flash-8b-exp-0924",
|
81
|
+
"gemini-1.5-flash-8b-latest",
|
82
|
+
"gemini-1.5-flash-exp-0827",
|
83
|
+
"gemini-1.5-flash-latest",
|
84
|
+
"gemini-1.5-pro",
|
85
|
+
"gemini-1.5-pro-001",
|
86
|
+
"gemini-1.5-pro-002",
|
87
|
+
"gemini-1.5-pro-exp-0801",
|
88
|
+
"gemini-1.5-pro-exp-0827",
|
89
|
+
"gemini-1.5-pro-latest",
|
90
|
+
"gemini-pro",
|
91
|
+
"gemini-pro-vision",
|
92
|
+
],
|
69
93
|
"bedrock": [
|
70
94
|
"amazon.titan-tg1-large",
|
71
95
|
"amazon.titan-text-lite-v1",
|
@@ -11,21 +11,29 @@ from edsl.inference_services.AwsBedrock import AwsBedrockService
|
|
11
11
|
from edsl.inference_services.AzureAI import AzureAIService
|
12
12
|
from edsl.inference_services.OllamaService import OllamaService
|
13
13
|
from edsl.inference_services.TestService import TestService
|
14
|
-
from edsl.inference_services.MistralAIService import MistralAIService
|
15
14
|
from edsl.inference_services.TogetherAIService import TogetherAIService
|
16
15
|
|
17
|
-
|
18
|
-
|
19
|
-
|
20
|
-
|
21
|
-
|
22
|
-
|
23
|
-
|
24
|
-
|
25
|
-
|
26
|
-
|
27
|
-
|
28
|
-
|
29
|
-
|
30
|
-
|
31
|
-
|
16
|
+
try:
|
17
|
+
from edsl.inference_services.MistralAIService import MistralAIService
|
18
|
+
|
19
|
+
mistral_available = True
|
20
|
+
except Exception as e:
|
21
|
+
mistral_available = False
|
22
|
+
|
23
|
+
services = [
|
24
|
+
OpenAIService,
|
25
|
+
AnthropicService,
|
26
|
+
DeepInfraService,
|
27
|
+
GoogleService,
|
28
|
+
GroqService,
|
29
|
+
AwsBedrockService,
|
30
|
+
AzureAIService,
|
31
|
+
OllamaService,
|
32
|
+
TestService,
|
33
|
+
TogetherAIService,
|
34
|
+
]
|
35
|
+
|
36
|
+
if mistral_available:
|
37
|
+
services.append(MistralAIService)
|
38
|
+
|
39
|
+
default = InferenceServicesCollection(services)
|