edsl 0.1.27.dev2__py3-none-any.whl → 0.1.28__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- edsl/Base.py +99 -22
- edsl/BaseDiff.py +260 -0
- edsl/__init__.py +4 -0
- edsl/__version__.py +1 -1
- edsl/agents/Agent.py +26 -5
- edsl/agents/AgentList.py +62 -7
- edsl/agents/Invigilator.py +4 -9
- edsl/agents/InvigilatorBase.py +5 -5
- edsl/agents/descriptors.py +3 -1
- edsl/conjure/AgentConstructionMixin.py +152 -0
- edsl/conjure/Conjure.py +56 -0
- edsl/conjure/InputData.py +628 -0
- edsl/conjure/InputDataCSV.py +48 -0
- edsl/conjure/InputDataMixinQuestionStats.py +182 -0
- edsl/conjure/InputDataPyRead.py +91 -0
- edsl/conjure/InputDataSPSS.py +8 -0
- edsl/conjure/InputDataStata.py +8 -0
- edsl/conjure/QuestionOptionMixin.py +76 -0
- edsl/conjure/QuestionTypeMixin.py +23 -0
- edsl/conjure/RawQuestion.py +65 -0
- edsl/conjure/SurveyResponses.py +7 -0
- edsl/conjure/__init__.py +9 -4
- edsl/conjure/examples/placeholder.txt +0 -0
- edsl/conjure/naming_utilities.py +263 -0
- edsl/conjure/utilities.py +165 -28
- edsl/conversation/Conversation.py +238 -0
- edsl/conversation/car_buying.py +58 -0
- edsl/conversation/mug_negotiation.py +81 -0
- edsl/conversation/next_speaker_utilities.py +93 -0
- edsl/coop/coop.py +191 -12
- edsl/coop/utils.py +20 -2
- edsl/data/Cache.py +55 -17
- edsl/data/CacheHandler.py +10 -9
- edsl/inference_services/AnthropicService.py +1 -0
- edsl/inference_services/DeepInfraService.py +20 -13
- edsl/inference_services/GoogleService.py +7 -1
- edsl/inference_services/InferenceServicesCollection.py +33 -7
- edsl/inference_services/OpenAIService.py +17 -10
- edsl/inference_services/models_available_cache.py +69 -0
- edsl/inference_services/rate_limits_cache.py +25 -0
- edsl/inference_services/write_available.py +10 -0
- edsl/jobs/Jobs.py +240 -36
- edsl/jobs/buckets/BucketCollection.py +9 -3
- edsl/jobs/interviews/Interview.py +4 -1
- edsl/jobs/interviews/InterviewTaskBuildingMixin.py +24 -10
- edsl/jobs/interviews/retry_management.py +4 -4
- edsl/jobs/runners/JobsRunnerAsyncio.py +87 -45
- edsl/jobs/runners/JobsRunnerStatusData.py +3 -3
- edsl/jobs/tasks/QuestionTaskCreator.py +4 -2
- edsl/language_models/LanguageModel.py +37 -44
- edsl/language_models/ModelList.py +96 -0
- edsl/language_models/registry.py +14 -0
- edsl/language_models/repair.py +95 -24
- edsl/notebooks/Notebook.py +119 -31
- edsl/questions/QuestionBase.py +109 -12
- edsl/questions/descriptors.py +5 -2
- edsl/questions/question_registry.py +7 -0
- edsl/results/Result.py +20 -8
- edsl/results/Results.py +85 -11
- edsl/results/ResultsDBMixin.py +3 -6
- edsl/results/ResultsExportMixin.py +47 -16
- edsl/results/ResultsToolsMixin.py +5 -5
- edsl/scenarios/Scenario.py +59 -5
- edsl/scenarios/ScenarioList.py +97 -40
- edsl/study/ObjectEntry.py +97 -0
- edsl/study/ProofOfWork.py +110 -0
- edsl/study/SnapShot.py +77 -0
- edsl/study/Study.py +491 -0
- edsl/study/__init__.py +2 -0
- edsl/surveys/Survey.py +79 -31
- edsl/surveys/SurveyExportMixin.py +21 -3
- edsl/utilities/__init__.py +1 -0
- edsl/utilities/gcp_bucket/__init__.py +0 -0
- edsl/utilities/gcp_bucket/cloud_storage.py +96 -0
- edsl/utilities/gcp_bucket/simple_example.py +9 -0
- edsl/utilities/interface.py +24 -28
- edsl/utilities/repair_functions.py +28 -0
- edsl/utilities/utilities.py +57 -2
- {edsl-0.1.27.dev2.dist-info → edsl-0.1.28.dist-info}/METADATA +43 -17
- {edsl-0.1.27.dev2.dist-info → edsl-0.1.28.dist-info}/RECORD +83 -55
- edsl-0.1.28.dist-info/entry_points.txt +3 -0
- edsl/conjure/RawResponseColumn.py +0 -327
- edsl/conjure/SurveyBuilder.py +0 -308
- edsl/conjure/SurveyBuilderCSV.py +0 -78
- edsl/conjure/SurveyBuilderSPSS.py +0 -118
- edsl/data/RemoteDict.py +0 -103
- {edsl-0.1.27.dev2.dist-info → edsl-0.1.28.dist-info}/LICENSE +0 -0
- {edsl-0.1.27.dev2.dist-info → edsl-0.1.28.dist-info}/WHEEL +0 -0
@@ -1,7 +1,7 @@
|
|
1
1
|
import aiohttp
|
2
2
|
import json
|
3
3
|
import requests
|
4
|
-
from typing import Any
|
4
|
+
from typing import Any, List
|
5
5
|
from edsl.inference_services.InferenceServiceABC import InferenceServiceABC
|
6
6
|
from edsl.language_models import LanguageModel
|
7
7
|
|
@@ -12,6 +12,8 @@ class DeepInfraService(InferenceServiceABC):
|
|
12
12
|
_inference_service_ = "deep_infra"
|
13
13
|
_env_key_name_ = "DEEP_INFRA_API_KEY"
|
14
14
|
|
15
|
+
_models_list_cache: List[str] = []
|
16
|
+
|
15
17
|
@classmethod
|
16
18
|
def available(cls):
|
17
19
|
text_models = cls.full_details_available()
|
@@ -19,20 +21,25 @@ class DeepInfraService(InferenceServiceABC):
|
|
19
21
|
|
20
22
|
@classmethod
|
21
23
|
def full_details_available(cls, verbose=False):
|
22
|
-
|
23
|
-
|
24
|
-
|
25
|
-
|
26
|
-
|
27
|
-
|
28
|
-
|
29
|
-
|
24
|
+
if not cls._models_list_cache:
|
25
|
+
url = "https://api.deepinfra.com/models/list"
|
26
|
+
response = requests.get(url)
|
27
|
+
if response.status_code == 200:
|
28
|
+
text_generation_models = [
|
29
|
+
r for r in response.json() if r["type"] == "text-generation"
|
30
|
+
]
|
31
|
+
cls._models_list_cache = text_generation_models
|
32
|
+
|
33
|
+
from rich import print_json
|
34
|
+
import json
|
30
35
|
|
31
|
-
|
32
|
-
|
33
|
-
|
36
|
+
if verbose:
|
37
|
+
print_json(json.dumps(text_generation_models))
|
38
|
+
return text_generation_models
|
39
|
+
else:
|
40
|
+
return f"Failed to fetch data: Status code {response.status_code}"
|
34
41
|
else:
|
35
|
-
return
|
42
|
+
return cls._models_list_cache
|
36
43
|
|
37
44
|
@classmethod
|
38
45
|
def create_model(cls, model_name: str, model_class_name=None) -> LanguageModel:
|
@@ -60,7 +60,13 @@ class GoogleService(InferenceServiceABC):
|
|
60
60
|
|
61
61
|
def parse_response(self, raw_response: dict[str, Any]) -> str:
|
62
62
|
data = raw_response
|
63
|
-
|
63
|
+
try:
|
64
|
+
return data["candidates"][0]["content"]["parts"][0]["text"]
|
65
|
+
except KeyError as e:
|
66
|
+
print(
|
67
|
+
f"The data return was {data}, which was missing the key 'candidates'"
|
68
|
+
)
|
69
|
+
raise e
|
64
70
|
|
65
71
|
LLM.__name__ = model_name
|
66
72
|
|
@@ -1,21 +1,47 @@
|
|
1
1
|
from edsl.inference_services.InferenceServiceABC import InferenceServiceABC
|
2
|
+
import warnings
|
2
3
|
|
3
4
|
|
4
5
|
class InferenceServicesCollection:
|
6
|
+
added_models = {}
|
7
|
+
|
5
8
|
def __init__(self, services: list[InferenceServiceABC] = None):
|
6
9
|
self.services = services or []
|
7
10
|
|
11
|
+
@classmethod
|
12
|
+
def add_model(cls, service_name, model_name):
|
13
|
+
if service_name not in cls.added_models:
|
14
|
+
cls.added_models[service_name] = []
|
15
|
+
cls.added_models[service_name].append(model_name)
|
16
|
+
|
17
|
+
@staticmethod
|
18
|
+
def _get_service_available(service) -> list[str]:
|
19
|
+
from_api = True
|
20
|
+
try:
|
21
|
+
service_models = service.available()
|
22
|
+
except Exception as e:
|
23
|
+
warnings.warn(
|
24
|
+
f"Error getting models for {service._inference_service_}. Relying on cache.",
|
25
|
+
UserWarning,
|
26
|
+
)
|
27
|
+
from edsl.inference_services.models_available_cache import models_available
|
28
|
+
|
29
|
+
service_models = models_available.get(service._inference_service_, [])
|
30
|
+
# cache results
|
31
|
+
service._models_list_cache = service_models
|
32
|
+
from_api = False
|
33
|
+
return service_models # , from_api
|
34
|
+
|
8
35
|
def available(self):
|
9
36
|
total_models = []
|
10
37
|
for service in self.services:
|
11
|
-
|
12
|
-
service_models = service.available()
|
13
|
-
except Exception as e:
|
14
|
-
print(f"Error getting models for {service._inference_service_}: {e}")
|
15
|
-
service_models = []
|
16
|
-
continue
|
38
|
+
service_models = self._get_service_available(service)
|
17
39
|
for model in service_models:
|
18
40
|
total_models.append([model, service._inference_service_, -1])
|
41
|
+
|
42
|
+
for model in self.added_models.get(service._inference_service_, []):
|
43
|
+
total_models.append([model, service._inference_service_, -1])
|
44
|
+
|
19
45
|
sorted_models = sorted(total_models)
|
20
46
|
for i, model in enumerate(sorted_models):
|
21
47
|
model[2] = i
|
@@ -27,7 +53,7 @@ class InferenceServicesCollection:
|
|
27
53
|
|
28
54
|
def create_model_factory(self, model_name: str, service_name=None, index=None):
|
29
55
|
for service in self.services:
|
30
|
-
if model_name in
|
56
|
+
if model_name in self._get_service_available(service):
|
31
57
|
if service_name is None or service_name == service._inference_service_:
|
32
58
|
return service.create_model(model_name)
|
33
59
|
|
@@ -4,6 +4,7 @@ from openai import AsyncOpenAI
|
|
4
4
|
|
5
5
|
from edsl.inference_services.InferenceServiceABC import InferenceServiceABC
|
6
6
|
from edsl.language_models import LanguageModel
|
7
|
+
from edsl.inference_services.rate_limits_cache import rate_limits
|
7
8
|
|
8
9
|
|
9
10
|
class OpenAIService(InferenceServiceABC):
|
@@ -43,15 +44,16 @@ class OpenAIService(InferenceServiceABC):
|
|
43
44
|
if m.id not in cls.model_exclude_list
|
44
45
|
]
|
45
46
|
except Exception as e:
|
46
|
-
|
47
|
-
|
48
|
-
|
49
|
-
|
50
|
-
|
51
|
-
|
52
|
-
|
53
|
-
|
54
|
-
|
47
|
+
raise
|
48
|
+
# print(
|
49
|
+
# f"""Error retrieving models: {e}.
|
50
|
+
# See instructions about storing your API keys: https://docs.expectedparrot.com/en/latest/api_keys.html"""
|
51
|
+
# )
|
52
|
+
# cls._models_list_cache = [
|
53
|
+
# "gpt-3.5-turbo",
|
54
|
+
# "gpt-4-1106-preview",
|
55
|
+
# "gpt-4",
|
56
|
+
# ] # Fallback list
|
55
57
|
return cls._models_list_cache
|
56
58
|
|
57
59
|
@classmethod
|
@@ -98,7 +100,12 @@ class OpenAIService(InferenceServiceABC):
|
|
98
100
|
|
99
101
|
def get_rate_limits(self) -> dict[str, Any]:
|
100
102
|
try:
|
101
|
-
|
103
|
+
if "openai" in rate_limits:
|
104
|
+
headers = rate_limits["openai"]
|
105
|
+
|
106
|
+
else:
|
107
|
+
headers = self.get_headers()
|
108
|
+
|
102
109
|
except Exception as e:
|
103
110
|
return {
|
104
111
|
"rpm": 10_000,
|
@@ -0,0 +1,69 @@
|
|
1
|
+
models_available = {
|
2
|
+
"openai": [
|
3
|
+
"gpt-3.5-turbo-1106",
|
4
|
+
"gpt-4-0125-preview",
|
5
|
+
"gpt-4-turbo-preview",
|
6
|
+
"gpt-3.5-turbo-16k",
|
7
|
+
"gpt-4-1106-preview",
|
8
|
+
"gpt-4-turbo-2024-04-09",
|
9
|
+
"gpt-3.5-turbo-16k-0613",
|
10
|
+
"gpt-4o-2024-05-13",
|
11
|
+
"gpt-4-turbo",
|
12
|
+
"gpt-3.5-turbo-0613",
|
13
|
+
"gpt-4",
|
14
|
+
"gpt-4-0613",
|
15
|
+
"gpt-3.5-turbo-0125",
|
16
|
+
"gpt-3.5-turbo",
|
17
|
+
"gpt-3.5-turbo-instruct",
|
18
|
+
"gpt-3.5-turbo-instruct-0914",
|
19
|
+
"gpt-3.5-turbo-0301",
|
20
|
+
"gpt-4-vision-preview",
|
21
|
+
"gpt-4-1106-vision-preview",
|
22
|
+
"gpt-4o",
|
23
|
+
],
|
24
|
+
"anthropic": [
|
25
|
+
"claude-3-5-sonnet-20240620",
|
26
|
+
"claude-3-opus-20240229",
|
27
|
+
"claude-3-sonnet-20240229",
|
28
|
+
"claude-3-haiku-20240307",
|
29
|
+
],
|
30
|
+
"deep_infra": [
|
31
|
+
"meta-llama/Llama-2-13b-chat-hf",
|
32
|
+
"mistralai/Mixtral-8x22B-Instruct-v0.1",
|
33
|
+
"Gryphe/MythoMax-L2-13b-turbo",
|
34
|
+
"mistralai/Mistral-7B-Instruct-v0.1",
|
35
|
+
"Austism/chronos-hermes-13b-v2",
|
36
|
+
"meta-llama/Llama-2-70b-chat-hf",
|
37
|
+
"mistralai/Mistral-7B-Instruct-v0.3",
|
38
|
+
"meta-llama/Llama-2-7b-chat-hf",
|
39
|
+
"Qwen/Qwen2-72B-Instruct",
|
40
|
+
"HuggingFaceH4/zephyr-orpo-141b-A35b-v0.1",
|
41
|
+
"cognitivecomputations/dolphin-2.6-mixtral-8x7b",
|
42
|
+
"bigcode/starcoder2-15b",
|
43
|
+
"microsoft/WizardLM-2-8x22B",
|
44
|
+
"codellama/CodeLlama-70b-Instruct-hf",
|
45
|
+
"Gryphe/MythoMax-L2-13b",
|
46
|
+
"microsoft/WizardLM-2-7B",
|
47
|
+
"01-ai/Yi-34B-Chat",
|
48
|
+
"bigcode/starcoder2-15b-instruct-v0.1",
|
49
|
+
"mistralai/Mixtral-8x7B-Instruct-v0.1",
|
50
|
+
"openchat/openchat-3.6-8b",
|
51
|
+
"meta-llama/Meta-Llama-3-8B-Instruct",
|
52
|
+
"microsoft/Phi-3-medium-4k-instruct",
|
53
|
+
"Phind/Phind-CodeLlama-34B-v2",
|
54
|
+
"google/codegemma-7b-it",
|
55
|
+
"mistralai/Mistral-7B-Instruct-v0.2",
|
56
|
+
"deepinfra/airoboros-70b",
|
57
|
+
"mistralai/Mixtral-8x22B-v0.1",
|
58
|
+
"llava-hf/llava-1.5-7b-hf",
|
59
|
+
"codellama/CodeLlama-34b-Instruct-hf",
|
60
|
+
"google/gemma-1.1-7b-it",
|
61
|
+
"lizpreciatior/lzlv_70b_fp16_hf",
|
62
|
+
"databricks/dbrx-instruct",
|
63
|
+
"nvidia/Nemotron-4-340B-Instruct",
|
64
|
+
"Qwen/Qwen2-7B-Instruct",
|
65
|
+
"meta-llama/Meta-Llama-3-70B-Instruct",
|
66
|
+
"openchat/openchat_3.5",
|
67
|
+
],
|
68
|
+
"google": ["gemini-pro"],
|
69
|
+
}
|
@@ -0,0 +1,25 @@
|
|
1
|
+
rate_limits = {
|
2
|
+
"openai": {
|
3
|
+
"date": "Tue, 02 Jul 2024 15:25:28 GMT",
|
4
|
+
"content-type": "application/json",
|
5
|
+
"transfer-encoding": "chunked",
|
6
|
+
"connection": "keep-alive",
|
7
|
+
"openai-organization": "user-wmu32omw8ulzzutk6mjhtqgk",
|
8
|
+
"openai-processing-ms": "760",
|
9
|
+
"openai-version": "2020-10-01",
|
10
|
+
"strict-transport-security": "max-age=31536000; includeSubDomains",
|
11
|
+
"x-ratelimit-limit-requests": "5000",
|
12
|
+
"x-ratelimit-limit-tokens": "600000",
|
13
|
+
"x-ratelimit-remaining-requests": "4999",
|
14
|
+
"x-ratelimit-remaining-tokens": "599978",
|
15
|
+
"x-ratelimit-reset-requests": "12ms",
|
16
|
+
"x-ratelimit-reset-tokens": "2ms",
|
17
|
+
"x-request-id": "req_971608f3647f660a0cd6537fbe21f69c",
|
18
|
+
"cf-cache-status": "DYNAMIC",
|
19
|
+
"set-cookie": "__cf_bm=MJfUk.0TXdjtiNkUUqlUO2gaN3wzm0iHsRQRWExy52o-1719933928-1.0.1.1-0xk9gFxy_mD1KzAsKQ_HpL2pdQJ90D4B5frt65xU.c9k9QwD0oTBILqXB0rykXNh04Pm1UB1.H_W9sFJVOcSaw; path=/; expires=Tue, 02-Jul-24 15:55:28 GMT; domain=.api.openai.com; HttpOnly; Secure; SameSite=None, _cfuvid=GbheRct_iw9_I8iLWmt5ZRcLYZ_QVnroCrAt8QMVsUg-1719933928399-0.0.1.1-604800000; path=/; domain=.api.openai.com; HttpOnly; Secure; SameSite=None",
|
20
|
+
"server": "cloudflare",
|
21
|
+
"cf-ray": "89cfa6059bb9b68f-OTP",
|
22
|
+
"content-encoding": "gzip",
|
23
|
+
"alt-svc": 'h3=":443"; ma=86400',
|
24
|
+
}
|
25
|
+
}
|
@@ -0,0 +1,10 @@
|
|
1
|
+
from edsl.inference_services.registry import default
|
2
|
+
|
3
|
+
|
4
|
+
def write_available():
|
5
|
+
d = {}
|
6
|
+
for service in default.services:
|
7
|
+
d[service._inference_service_] = service.available()
|
8
|
+
|
9
|
+
with open("models_available_cache.py", "w") as f:
|
10
|
+
f.write(f"models_available = {d}")
|