edsl 0.1.39.dev1__py3-none-any.whl → 0.1.39.dev3__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- edsl/Base.py +332 -332
- edsl/BaseDiff.py +260 -260
- edsl/TemplateLoader.py +24 -24
- edsl/__init__.py +49 -49
- edsl/__version__.py +1 -1
- edsl/agents/Agent.py +867 -867
- edsl/agents/AgentList.py +413 -413
- edsl/agents/Invigilator.py +233 -233
- edsl/agents/InvigilatorBase.py +270 -265
- edsl/agents/PromptConstructor.py +354 -354
- edsl/agents/__init__.py +3 -3
- edsl/agents/descriptors.py +99 -99
- edsl/agents/prompt_helpers.py +129 -129
- edsl/auto/AutoStudy.py +117 -117
- edsl/auto/StageBase.py +230 -230
- edsl/auto/StageGenerateSurvey.py +178 -178
- edsl/auto/StageLabelQuestions.py +125 -125
- edsl/auto/StagePersona.py +61 -61
- edsl/auto/StagePersonaDimensionValueRanges.py +88 -88
- edsl/auto/StagePersonaDimensionValues.py +74 -74
- edsl/auto/StagePersonaDimensions.py +69 -69
- edsl/auto/StageQuestions.py +73 -73
- edsl/auto/SurveyCreatorPipeline.py +21 -21
- edsl/auto/utilities.py +224 -224
- edsl/base/Base.py +279 -279
- edsl/config.py +157 -157
- edsl/conversation/Conversation.py +290 -290
- edsl/conversation/car_buying.py +58 -58
- edsl/conversation/chips.py +95 -95
- edsl/conversation/mug_negotiation.py +81 -81
- edsl/conversation/next_speaker_utilities.py +93 -93
- edsl/coop/PriceFetcher.py +54 -54
- edsl/coop/__init__.py +2 -2
- edsl/coop/coop.py +1028 -1028
- edsl/coop/utils.py +131 -131
- edsl/data/Cache.py +555 -555
- edsl/data/CacheEntry.py +233 -233
- edsl/data/CacheHandler.py +149 -149
- edsl/data/RemoteCacheSync.py +78 -78
- edsl/data/SQLiteDict.py +292 -292
- edsl/data/__init__.py +4 -4
- edsl/data/orm.py +10 -10
- edsl/data_transfer_models.py +73 -73
- edsl/enums.py +175 -175
- edsl/exceptions/BaseException.py +21 -21
- edsl/exceptions/__init__.py +54 -54
- edsl/exceptions/agents.py +42 -42
- edsl/exceptions/cache.py +5 -5
- edsl/exceptions/configuration.py +16 -16
- edsl/exceptions/coop.py +10 -10
- edsl/exceptions/data.py +14 -14
- edsl/exceptions/general.py +34 -34
- edsl/exceptions/jobs.py +33 -33
- edsl/exceptions/language_models.py +63 -63
- edsl/exceptions/prompts.py +15 -15
- edsl/exceptions/questions.py +91 -91
- edsl/exceptions/results.py +29 -29
- edsl/exceptions/scenarios.py +22 -22
- edsl/exceptions/surveys.py +37 -37
- edsl/inference_services/AnthropicService.py +87 -87
- edsl/inference_services/AwsBedrock.py +120 -120
- edsl/inference_services/AzureAI.py +217 -217
- edsl/inference_services/DeepInfraService.py +18 -18
- edsl/inference_services/GoogleService.py +148 -148
- edsl/inference_services/GroqService.py +20 -20
- edsl/inference_services/InferenceServiceABC.py +147 -147
- edsl/inference_services/InferenceServicesCollection.py +97 -97
- edsl/inference_services/MistralAIService.py +123 -123
- edsl/inference_services/OllamaService.py +18 -18
- edsl/inference_services/OpenAIService.py +224 -224
- edsl/inference_services/PerplexityService.py +163 -163
- edsl/inference_services/TestService.py +89 -89
- edsl/inference_services/TogetherAIService.py +170 -170
- edsl/inference_services/models_available_cache.py +118 -118
- edsl/inference_services/rate_limits_cache.py +25 -25
- edsl/inference_services/registry.py +41 -41
- edsl/inference_services/write_available.py +10 -10
- edsl/jobs/Answers.py +56 -56
- edsl/jobs/Jobs.py +898 -898
- edsl/jobs/JobsChecks.py +147 -147
- edsl/jobs/JobsPrompts.py +268 -268
- edsl/jobs/JobsRemoteInferenceHandler.py +239 -239
- edsl/jobs/__init__.py +1 -1
- edsl/jobs/buckets/BucketCollection.py +63 -63
- edsl/jobs/buckets/ModelBuckets.py +65 -65
- edsl/jobs/buckets/TokenBucket.py +251 -251
- edsl/jobs/interviews/Interview.py +661 -661
- edsl/jobs/interviews/InterviewExceptionCollection.py +99 -99
- edsl/jobs/interviews/InterviewExceptionEntry.py +186 -186
- edsl/jobs/interviews/InterviewStatistic.py +63 -63
- edsl/jobs/interviews/InterviewStatisticsCollection.py +25 -25
- edsl/jobs/interviews/InterviewStatusDictionary.py +78 -78
- edsl/jobs/interviews/InterviewStatusLog.py +92 -92
- edsl/jobs/interviews/ReportErrors.py +66 -66
- edsl/jobs/interviews/interview_status_enum.py +9 -9
- edsl/jobs/runners/JobsRunnerAsyncio.py +466 -466
- edsl/jobs/runners/JobsRunnerStatus.py +330 -330
- edsl/jobs/tasks/QuestionTaskCreator.py +242 -242
- edsl/jobs/tasks/TaskCreators.py +64 -64
- edsl/jobs/tasks/TaskHistory.py +450 -450
- edsl/jobs/tasks/TaskStatusLog.py +23 -23
- edsl/jobs/tasks/task_status_enum.py +163 -163
- edsl/jobs/tokens/InterviewTokenUsage.py +27 -27
- edsl/jobs/tokens/TokenUsage.py +34 -34
- edsl/language_models/KeyLookup.py +30 -30
- edsl/language_models/LanguageModel.py +668 -668
- edsl/language_models/ModelList.py +155 -155
- edsl/language_models/RegisterLanguageModelsMeta.py +184 -184
- edsl/language_models/__init__.py +3 -3
- edsl/language_models/fake_openai_call.py +15 -15
- edsl/language_models/fake_openai_service.py +61 -61
- edsl/language_models/registry.py +190 -190
- edsl/language_models/repair.py +156 -156
- edsl/language_models/unused/ReplicateBase.py +83 -83
- edsl/language_models/utilities.py +64 -64
- edsl/notebooks/Notebook.py +258 -258
- edsl/notebooks/__init__.py +1 -1
- edsl/prompts/Prompt.py +362 -362
- edsl/prompts/__init__.py +2 -2
- edsl/questions/AnswerValidatorMixin.py +289 -289
- edsl/questions/QuestionBase.py +664 -664
- edsl/questions/QuestionBaseGenMixin.py +161 -161
- edsl/questions/QuestionBasePromptsMixin.py +217 -217
- edsl/questions/QuestionBudget.py +227 -227
- edsl/questions/QuestionCheckBox.py +359 -359
- edsl/questions/QuestionExtract.py +182 -182
- edsl/questions/QuestionFreeText.py +114 -114
- edsl/questions/QuestionFunctional.py +166 -166
- edsl/questions/QuestionList.py +231 -231
- edsl/questions/QuestionMultipleChoice.py +286 -286
- edsl/questions/QuestionNumerical.py +153 -153
- edsl/questions/QuestionRank.py +324 -324
- edsl/questions/Quick.py +41 -41
- edsl/questions/RegisterQuestionsMeta.py +71 -71
- edsl/questions/ResponseValidatorABC.py +174 -174
- edsl/questions/SimpleAskMixin.py +73 -73
- edsl/questions/__init__.py +26 -26
- edsl/questions/compose_questions.py +98 -98
- edsl/questions/decorators.py +21 -21
- edsl/questions/derived/QuestionLikertFive.py +76 -76
- edsl/questions/derived/QuestionLinearScale.py +87 -87
- edsl/questions/derived/QuestionTopK.py +93 -93
- edsl/questions/derived/QuestionYesNo.py +82 -82
- edsl/questions/descriptors.py +413 -413
- edsl/questions/prompt_templates/question_budget.jinja +13 -13
- edsl/questions/prompt_templates/question_checkbox.jinja +32 -32
- edsl/questions/prompt_templates/question_extract.jinja +11 -11
- edsl/questions/prompt_templates/question_free_text.jinja +3 -3
- edsl/questions/prompt_templates/question_linear_scale.jinja +11 -11
- edsl/questions/prompt_templates/question_list.jinja +17 -17
- edsl/questions/prompt_templates/question_multiple_choice.jinja +33 -33
- edsl/questions/prompt_templates/question_numerical.jinja +36 -36
- edsl/questions/question_registry.py +177 -177
- edsl/questions/settings.py +12 -12
- edsl/questions/templates/budget/answering_instructions.jinja +7 -7
- edsl/questions/templates/budget/question_presentation.jinja +7 -7
- edsl/questions/templates/checkbox/answering_instructions.jinja +10 -10
- edsl/questions/templates/checkbox/question_presentation.jinja +22 -22
- edsl/questions/templates/extract/answering_instructions.jinja +7 -7
- edsl/questions/templates/likert_five/answering_instructions.jinja +10 -10
- edsl/questions/templates/likert_five/question_presentation.jinja +11 -11
- edsl/questions/templates/linear_scale/answering_instructions.jinja +5 -5
- edsl/questions/templates/linear_scale/question_presentation.jinja +5 -5
- edsl/questions/templates/list/answering_instructions.jinja +3 -3
- edsl/questions/templates/list/question_presentation.jinja +5 -5
- edsl/questions/templates/multiple_choice/answering_instructions.jinja +9 -9
- edsl/questions/templates/multiple_choice/question_presentation.jinja +11 -11
- edsl/questions/templates/numerical/answering_instructions.jinja +6 -6
- edsl/questions/templates/numerical/question_presentation.jinja +6 -6
- edsl/questions/templates/rank/answering_instructions.jinja +11 -11
- edsl/questions/templates/rank/question_presentation.jinja +15 -15
- edsl/questions/templates/top_k/answering_instructions.jinja +8 -8
- edsl/questions/templates/top_k/question_presentation.jinja +22 -22
- edsl/questions/templates/yes_no/answering_instructions.jinja +6 -6
- edsl/questions/templates/yes_no/question_presentation.jinja +11 -11
- edsl/results/CSSParameterizer.py +108 -108
- edsl/results/Dataset.py +424 -424
- edsl/results/DatasetExportMixin.py +731 -731
- edsl/results/DatasetTree.py +275 -275
- edsl/results/Result.py +465 -465
- edsl/results/Results.py +1165 -1165
- edsl/results/ResultsDBMixin.py +238 -238
- edsl/results/ResultsExportMixin.py +43 -43
- edsl/results/ResultsFetchMixin.py +33 -33
- edsl/results/ResultsGGMixin.py +121 -121
- edsl/results/ResultsToolsMixin.py +98 -98
- edsl/results/Selector.py +135 -135
- edsl/results/TableDisplay.py +198 -198
- edsl/results/__init__.py +2 -2
- edsl/results/table_display.css +77 -77
- edsl/results/tree_explore.py +115 -115
- edsl/scenarios/FileStore.py +632 -632
- edsl/scenarios/Scenario.py +601 -601
- edsl/scenarios/ScenarioHtmlMixin.py +64 -64
- edsl/scenarios/ScenarioJoin.py +127 -127
- edsl/scenarios/ScenarioList.py +1287 -1287
- edsl/scenarios/ScenarioListExportMixin.py +52 -52
- edsl/scenarios/ScenarioListPdfMixin.py +261 -261
- edsl/scenarios/__init__.py +4 -4
- edsl/shared.py +1 -1
- edsl/study/ObjectEntry.py +173 -173
- edsl/study/ProofOfWork.py +113 -113
- edsl/study/SnapShot.py +80 -80
- edsl/study/Study.py +528 -528
- edsl/study/__init__.py +4 -4
- edsl/surveys/DAG.py +148 -148
- edsl/surveys/Memory.py +31 -31
- edsl/surveys/MemoryPlan.py +244 -244
- edsl/surveys/Rule.py +326 -326
- edsl/surveys/RuleCollection.py +387 -387
- edsl/surveys/Survey.py +1801 -1801
- edsl/surveys/SurveyCSS.py +261 -261
- edsl/surveys/SurveyExportMixin.py +259 -259
- edsl/surveys/SurveyFlowVisualizationMixin.py +179 -179
- edsl/surveys/SurveyQualtricsImport.py +284 -284
- edsl/surveys/__init__.py +3 -3
- edsl/surveys/base.py +53 -53
- edsl/surveys/descriptors.py +56 -56
- edsl/surveys/instructions/ChangeInstruction.py +49 -49
- edsl/surveys/instructions/Instruction.py +65 -65
- edsl/surveys/instructions/InstructionCollection.py +77 -77
- edsl/templates/error_reporting/base.html +23 -23
- edsl/templates/error_reporting/exceptions_by_model.html +34 -34
- edsl/templates/error_reporting/exceptions_by_question_name.html +16 -16
- edsl/templates/error_reporting/exceptions_by_type.html +16 -16
- edsl/templates/error_reporting/interview_details.html +115 -115
- edsl/templates/error_reporting/interviews.html +19 -19
- edsl/templates/error_reporting/overview.html +4 -4
- edsl/templates/error_reporting/performance_plot.html +1 -1
- edsl/templates/error_reporting/report.css +73 -73
- edsl/templates/error_reporting/report.html +117 -117
- edsl/templates/error_reporting/report.js +25 -25
- edsl/tools/__init__.py +1 -1
- edsl/tools/clusters.py +192 -192
- edsl/tools/embeddings.py +27 -27
- edsl/tools/embeddings_plotting.py +118 -118
- edsl/tools/plotting.py +112 -112
- edsl/tools/summarize.py +18 -18
- edsl/utilities/SystemInfo.py +28 -28
- edsl/utilities/__init__.py +22 -22
- edsl/utilities/ast_utilities.py +25 -25
- edsl/utilities/data/Registry.py +6 -6
- edsl/utilities/data/__init__.py +1 -1
- edsl/utilities/data/scooter_results.json +1 -1
- edsl/utilities/decorators.py +77 -77
- edsl/utilities/gcp_bucket/cloud_storage.py +96 -96
- edsl/utilities/interface.py +627 -627
- edsl/utilities/naming_utilities.py +263 -263
- edsl/utilities/repair_functions.py +28 -28
- edsl/utilities/restricted_python.py +70 -70
- edsl/utilities/utilities.py +424 -424
- {edsl-0.1.39.dev1.dist-info → edsl-0.1.39.dev3.dist-info}/LICENSE +21 -21
- {edsl-0.1.39.dev1.dist-info → edsl-0.1.39.dev3.dist-info}/METADATA +1 -1
- edsl-0.1.39.dev3.dist-info/RECORD +277 -0
- edsl-0.1.39.dev1.dist-info/RECORD +0 -277
- {edsl-0.1.39.dev1.dist-info → edsl-0.1.39.dev3.dist-info}/WHEEL +0 -0
@@ -1,163 +1,163 @@
|
|
1
|
-
import aiohttp
|
2
|
-
import json
|
3
|
-
import requests
|
4
|
-
from typing import Any, List, Optional
|
5
|
-
from edsl.inference_services.rate_limits_cache import rate_limits
|
6
|
-
|
7
|
-
# from edsl.inference_services.InferenceServiceABC import InferenceServiceABC
|
8
|
-
from edsl.language_models import LanguageModel
|
9
|
-
|
10
|
-
from edsl.inference_services.OpenAIService import OpenAIService
|
11
|
-
|
12
|
-
|
13
|
-
class PerplexityService(OpenAIService):
|
14
|
-
"""Perplexity service class."""
|
15
|
-
|
16
|
-
_inference_service_ = "perplexity"
|
17
|
-
_env_key_name_ = "PERPLEXITY_API_KEY"
|
18
|
-
_base_url_ = "https://api.perplexity.ai"
|
19
|
-
_models_list_cache: List[str] = []
|
20
|
-
# default perplexity parameters
|
21
|
-
_parameters_ = {
|
22
|
-
"temperature": 0.5,
|
23
|
-
"max_tokens": 1000,
|
24
|
-
"top_p": 1,
|
25
|
-
"logprobs": False,
|
26
|
-
"top_logprobs": 3,
|
27
|
-
}
|
28
|
-
|
29
|
-
@classmethod
|
30
|
-
def available(cls) -> List[str]:
|
31
|
-
return [
|
32
|
-
"llama-3.1-sonar-huge-128k-online",
|
33
|
-
"llama-3.1-sonar-large-128k-online",
|
34
|
-
"llama-3.1-sonar-small-128k-online",
|
35
|
-
]
|
36
|
-
|
37
|
-
@classmethod
|
38
|
-
def create_model(
|
39
|
-
cls, model_name="llama-3.1-sonar-large-128k-online", model_class_name=None
|
40
|
-
) -> LanguageModel:
|
41
|
-
if model_class_name is None:
|
42
|
-
model_class_name = cls.to_class_name(model_name)
|
43
|
-
|
44
|
-
class LLM(LanguageModel):
|
45
|
-
"""
|
46
|
-
Child class of LanguageModel for interacting with Perplexity models
|
47
|
-
"""
|
48
|
-
|
49
|
-
key_sequence = cls.key_sequence
|
50
|
-
usage_sequence = cls.usage_sequence
|
51
|
-
input_token_name = cls.input_token_name
|
52
|
-
output_token_name = cls.output_token_name
|
53
|
-
|
54
|
-
_rpm = cls.get_rpm(cls)
|
55
|
-
_tpm = cls.get_tpm(cls)
|
56
|
-
|
57
|
-
_inference_service_ = cls._inference_service_
|
58
|
-
_model_ = model_name
|
59
|
-
|
60
|
-
_parameters_ = {
|
61
|
-
"temperature": 0.5,
|
62
|
-
"max_tokens": 1000,
|
63
|
-
"top_p": 1,
|
64
|
-
"frequency_penalty": 1,
|
65
|
-
"presence_penalty": 0,
|
66
|
-
# "logprobs": False, # Enable this returns 'Neither or both of logprobs and top_logprobs must be set.
|
67
|
-
# "top_logprobs": 3,
|
68
|
-
}
|
69
|
-
|
70
|
-
def sync_client(self):
|
71
|
-
return cls.sync_client()
|
72
|
-
|
73
|
-
def async_client(self):
|
74
|
-
return cls.async_client()
|
75
|
-
|
76
|
-
@classmethod
|
77
|
-
def available(cls) -> list[str]:
|
78
|
-
return cls.sync_client().models.list()
|
79
|
-
|
80
|
-
def get_headers(self) -> dict[str, Any]:
|
81
|
-
client = self.sync_client()
|
82
|
-
response = client.chat.completions.with_raw_response.create(
|
83
|
-
messages=[
|
84
|
-
{
|
85
|
-
"role": "user",
|
86
|
-
"content": "Say this is a test",
|
87
|
-
}
|
88
|
-
],
|
89
|
-
model=self.model,
|
90
|
-
)
|
91
|
-
return dict(response.headers)
|
92
|
-
|
93
|
-
def get_rate_limits(self) -> dict[str, Any]:
|
94
|
-
try:
|
95
|
-
if "openai" in rate_limits:
|
96
|
-
headers = rate_limits["openai"]
|
97
|
-
|
98
|
-
else:
|
99
|
-
headers = self.get_headers()
|
100
|
-
|
101
|
-
except Exception as e:
|
102
|
-
return {
|
103
|
-
"rpm": 10_000,
|
104
|
-
"tpm": 2_000_000,
|
105
|
-
}
|
106
|
-
else:
|
107
|
-
return {
|
108
|
-
"rpm": int(headers["x-ratelimit-limit-requests"]),
|
109
|
-
"tpm": int(headers["x-ratelimit-limit-tokens"]),
|
110
|
-
}
|
111
|
-
|
112
|
-
async def async_execute_model_call(
|
113
|
-
self,
|
114
|
-
user_prompt: str,
|
115
|
-
system_prompt: str = "",
|
116
|
-
files_list: Optional[List["Files"]] = None,
|
117
|
-
invigilator: Optional[
|
118
|
-
"InvigilatorAI"
|
119
|
-
] = None, # TBD - can eventually be used for function-calling
|
120
|
-
) -> dict[str, Any]:
|
121
|
-
"""Calls the OpenAI API and returns the API response."""
|
122
|
-
if files_list:
|
123
|
-
encoded_image = files_list[0].base64_string
|
124
|
-
content = [{"type": "text", "text": user_prompt}]
|
125
|
-
content.append(
|
126
|
-
{
|
127
|
-
"type": "image_url",
|
128
|
-
"image_url": {
|
129
|
-
"url": f"data:image/jpeg;base64,{encoded_image}"
|
130
|
-
},
|
131
|
-
}
|
132
|
-
)
|
133
|
-
else:
|
134
|
-
content = user_prompt
|
135
|
-
client = self.async_client()
|
136
|
-
|
137
|
-
messages = [
|
138
|
-
{"role": "system", "content": system_prompt},
|
139
|
-
{"role": "user", "content": content},
|
140
|
-
]
|
141
|
-
if system_prompt == "" and self.omit_system_prompt_if_empty:
|
142
|
-
messages = messages[1:]
|
143
|
-
|
144
|
-
params = {
|
145
|
-
"model": self.model,
|
146
|
-
"messages": messages,
|
147
|
-
"temperature": self.temperature,
|
148
|
-
"max_tokens": self.max_tokens,
|
149
|
-
"top_p": self.top_p,
|
150
|
-
"frequency_penalty": self.frequency_penalty,
|
151
|
-
"presence_penalty": self.presence_penalty,
|
152
|
-
# "logprobs": self.logprobs,
|
153
|
-
# "top_logprobs": self.top_logprobs if self.logprobs else None,
|
154
|
-
}
|
155
|
-
try:
|
156
|
-
response = await client.chat.completions.create(**params)
|
157
|
-
except Exception as e:
|
158
|
-
print(e, flush=True)
|
159
|
-
return response.model_dump()
|
160
|
-
|
161
|
-
LLM.__name__ = "LanguageModel"
|
162
|
-
|
163
|
-
return LLM
|
1
|
+
import aiohttp
|
2
|
+
import json
|
3
|
+
import requests
|
4
|
+
from typing import Any, List, Optional
|
5
|
+
from edsl.inference_services.rate_limits_cache import rate_limits
|
6
|
+
|
7
|
+
# from edsl.inference_services.InferenceServiceABC import InferenceServiceABC
|
8
|
+
from edsl.language_models import LanguageModel
|
9
|
+
|
10
|
+
from edsl.inference_services.OpenAIService import OpenAIService
|
11
|
+
|
12
|
+
|
13
|
+
class PerplexityService(OpenAIService):
|
14
|
+
"""Perplexity service class."""
|
15
|
+
|
16
|
+
_inference_service_ = "perplexity"
|
17
|
+
_env_key_name_ = "PERPLEXITY_API_KEY"
|
18
|
+
_base_url_ = "https://api.perplexity.ai"
|
19
|
+
_models_list_cache: List[str] = []
|
20
|
+
# default perplexity parameters
|
21
|
+
_parameters_ = {
|
22
|
+
"temperature": 0.5,
|
23
|
+
"max_tokens": 1000,
|
24
|
+
"top_p": 1,
|
25
|
+
"logprobs": False,
|
26
|
+
"top_logprobs": 3,
|
27
|
+
}
|
28
|
+
|
29
|
+
@classmethod
|
30
|
+
def available(cls) -> List[str]:
|
31
|
+
return [
|
32
|
+
"llama-3.1-sonar-huge-128k-online",
|
33
|
+
"llama-3.1-sonar-large-128k-online",
|
34
|
+
"llama-3.1-sonar-small-128k-online",
|
35
|
+
]
|
36
|
+
|
37
|
+
@classmethod
|
38
|
+
def create_model(
|
39
|
+
cls, model_name="llama-3.1-sonar-large-128k-online", model_class_name=None
|
40
|
+
) -> LanguageModel:
|
41
|
+
if model_class_name is None:
|
42
|
+
model_class_name = cls.to_class_name(model_name)
|
43
|
+
|
44
|
+
class LLM(LanguageModel):
|
45
|
+
"""
|
46
|
+
Child class of LanguageModel for interacting with Perplexity models
|
47
|
+
"""
|
48
|
+
|
49
|
+
key_sequence = cls.key_sequence
|
50
|
+
usage_sequence = cls.usage_sequence
|
51
|
+
input_token_name = cls.input_token_name
|
52
|
+
output_token_name = cls.output_token_name
|
53
|
+
|
54
|
+
_rpm = cls.get_rpm(cls)
|
55
|
+
_tpm = cls.get_tpm(cls)
|
56
|
+
|
57
|
+
_inference_service_ = cls._inference_service_
|
58
|
+
_model_ = model_name
|
59
|
+
|
60
|
+
_parameters_ = {
|
61
|
+
"temperature": 0.5,
|
62
|
+
"max_tokens": 1000,
|
63
|
+
"top_p": 1,
|
64
|
+
"frequency_penalty": 1,
|
65
|
+
"presence_penalty": 0,
|
66
|
+
# "logprobs": False, # Enable this returns 'Neither or both of logprobs and top_logprobs must be set.
|
67
|
+
# "top_logprobs": 3,
|
68
|
+
}
|
69
|
+
|
70
|
+
def sync_client(self):
|
71
|
+
return cls.sync_client()
|
72
|
+
|
73
|
+
def async_client(self):
|
74
|
+
return cls.async_client()
|
75
|
+
|
76
|
+
@classmethod
|
77
|
+
def available(cls) -> list[str]:
|
78
|
+
return cls.sync_client().models.list()
|
79
|
+
|
80
|
+
def get_headers(self) -> dict[str, Any]:
|
81
|
+
client = self.sync_client()
|
82
|
+
response = client.chat.completions.with_raw_response.create(
|
83
|
+
messages=[
|
84
|
+
{
|
85
|
+
"role": "user",
|
86
|
+
"content": "Say this is a test",
|
87
|
+
}
|
88
|
+
],
|
89
|
+
model=self.model,
|
90
|
+
)
|
91
|
+
return dict(response.headers)
|
92
|
+
|
93
|
+
def get_rate_limits(self) -> dict[str, Any]:
|
94
|
+
try:
|
95
|
+
if "openai" in rate_limits:
|
96
|
+
headers = rate_limits["openai"]
|
97
|
+
|
98
|
+
else:
|
99
|
+
headers = self.get_headers()
|
100
|
+
|
101
|
+
except Exception as e:
|
102
|
+
return {
|
103
|
+
"rpm": 10_000,
|
104
|
+
"tpm": 2_000_000,
|
105
|
+
}
|
106
|
+
else:
|
107
|
+
return {
|
108
|
+
"rpm": int(headers["x-ratelimit-limit-requests"]),
|
109
|
+
"tpm": int(headers["x-ratelimit-limit-tokens"]),
|
110
|
+
}
|
111
|
+
|
112
|
+
async def async_execute_model_call(
|
113
|
+
self,
|
114
|
+
user_prompt: str,
|
115
|
+
system_prompt: str = "",
|
116
|
+
files_list: Optional[List["Files"]] = None,
|
117
|
+
invigilator: Optional[
|
118
|
+
"InvigilatorAI"
|
119
|
+
] = None, # TBD - can eventually be used for function-calling
|
120
|
+
) -> dict[str, Any]:
|
121
|
+
"""Calls the OpenAI API and returns the API response."""
|
122
|
+
if files_list:
|
123
|
+
encoded_image = files_list[0].base64_string
|
124
|
+
content = [{"type": "text", "text": user_prompt}]
|
125
|
+
content.append(
|
126
|
+
{
|
127
|
+
"type": "image_url",
|
128
|
+
"image_url": {
|
129
|
+
"url": f"data:image/jpeg;base64,{encoded_image}"
|
130
|
+
},
|
131
|
+
}
|
132
|
+
)
|
133
|
+
else:
|
134
|
+
content = user_prompt
|
135
|
+
client = self.async_client()
|
136
|
+
|
137
|
+
messages = [
|
138
|
+
{"role": "system", "content": system_prompt},
|
139
|
+
{"role": "user", "content": content},
|
140
|
+
]
|
141
|
+
if system_prompt == "" and self.omit_system_prompt_if_empty:
|
142
|
+
messages = messages[1:]
|
143
|
+
|
144
|
+
params = {
|
145
|
+
"model": self.model,
|
146
|
+
"messages": messages,
|
147
|
+
"temperature": self.temperature,
|
148
|
+
"max_tokens": self.max_tokens,
|
149
|
+
"top_p": self.top_p,
|
150
|
+
"frequency_penalty": self.frequency_penalty,
|
151
|
+
"presence_penalty": self.presence_penalty,
|
152
|
+
# "logprobs": self.logprobs,
|
153
|
+
# "top_logprobs": self.top_logprobs if self.logprobs else None,
|
154
|
+
}
|
155
|
+
try:
|
156
|
+
response = await client.chat.completions.create(**params)
|
157
|
+
except Exception as e:
|
158
|
+
print(e, flush=True)
|
159
|
+
return response.model_dump()
|
160
|
+
|
161
|
+
LLM.__name__ = "LanguageModel"
|
162
|
+
|
163
|
+
return LLM
|
@@ -1,89 +1,89 @@
|
|
1
|
-
from typing import Any, List, Optional
|
2
|
-
import os
|
3
|
-
import asyncio
|
4
|
-
from edsl.inference_services.InferenceServiceABC import InferenceServiceABC
|
5
|
-
from edsl.language_models import LanguageModel
|
6
|
-
from edsl.inference_services.rate_limits_cache import rate_limits
|
7
|
-
from edsl.utilities.utilities import fix_partial_correct_response
|
8
|
-
|
9
|
-
from edsl.enums import InferenceServiceType
|
10
|
-
import random
|
11
|
-
|
12
|
-
|
13
|
-
class TestService(InferenceServiceABC):
|
14
|
-
"""OpenAI service class."""
|
15
|
-
|
16
|
-
_inference_service_ = "test"
|
17
|
-
_env_key_name_ = None
|
18
|
-
_base_url_ = None
|
19
|
-
|
20
|
-
_sync_client_ = None
|
21
|
-
_async_client_ = None
|
22
|
-
|
23
|
-
_sync_client_instance = None
|
24
|
-
_async_client_instance = None
|
25
|
-
|
26
|
-
key_sequence = None
|
27
|
-
usage_sequence = None
|
28
|
-
model_exclude_list = []
|
29
|
-
input_token_name = "prompt_tokens"
|
30
|
-
output_token_name = "completion_tokens"
|
31
|
-
|
32
|
-
@classmethod
|
33
|
-
def available(cls) -> list[str]:
|
34
|
-
return ["test"]
|
35
|
-
|
36
|
-
@classmethod
|
37
|
-
def create_model(cls, model_name, model_class_name=None) -> LanguageModel:
|
38
|
-
throw_exception = False
|
39
|
-
|
40
|
-
class TestServiceLanguageModel(LanguageModel):
|
41
|
-
_model_ = "test"
|
42
|
-
_parameters_ = {"temperature": 0.5}
|
43
|
-
_inference_service_ = InferenceServiceType.TEST.value
|
44
|
-
usage_sequence = ["usage"]
|
45
|
-
key_sequence = ["message", 0, "text"]
|
46
|
-
input_token_name = cls.input_token_name
|
47
|
-
output_token_name = cls.output_token_name
|
48
|
-
_rpm = 1000
|
49
|
-
_tpm = 100000
|
50
|
-
|
51
|
-
@property
|
52
|
-
def _canned_response(self):
|
53
|
-
if hasattr(self, "canned_response"):
|
54
|
-
return self.canned_response
|
55
|
-
else:
|
56
|
-
return "Hello, world"
|
57
|
-
|
58
|
-
async def async_execute_model_call(
|
59
|
-
self,
|
60
|
-
user_prompt: str,
|
61
|
-
system_prompt: str,
|
62
|
-
# func: Optional[callable] = None,
|
63
|
-
files_list: Optional[List["File"]] = None,
|
64
|
-
) -> dict[str, Any]:
|
65
|
-
await asyncio.sleep(0.1)
|
66
|
-
# return {"message": """{"answer": "Hello, world"}"""}
|
67
|
-
|
68
|
-
if hasattr(self, "func"):
|
69
|
-
return {
|
70
|
-
"message": [
|
71
|
-
{"text": self.func(user_prompt, system_prompt, files_list)}
|
72
|
-
],
|
73
|
-
"usage": {"prompt_tokens": 1, "completion_tokens": 1},
|
74
|
-
}
|
75
|
-
|
76
|
-
if hasattr(self, "throw_exception") and self.throw_exception:
|
77
|
-
if hasattr(self, "exception_probability"):
|
78
|
-
p = self.exception_probability
|
79
|
-
else:
|
80
|
-
p = 1
|
81
|
-
|
82
|
-
if random.random() < p:
|
83
|
-
raise Exception("This is a test error")
|
84
|
-
return {
|
85
|
-
"message": [{"text": f"{self._canned_response}"}],
|
86
|
-
"usage": {"prompt_tokens": 1, "completion_tokens": 1},
|
87
|
-
}
|
88
|
-
|
89
|
-
return TestServiceLanguageModel
|
1
|
+
from typing import Any, List, Optional
|
2
|
+
import os
|
3
|
+
import asyncio
|
4
|
+
from edsl.inference_services.InferenceServiceABC import InferenceServiceABC
|
5
|
+
from edsl.language_models import LanguageModel
|
6
|
+
from edsl.inference_services.rate_limits_cache import rate_limits
|
7
|
+
from edsl.utilities.utilities import fix_partial_correct_response
|
8
|
+
|
9
|
+
from edsl.enums import InferenceServiceType
|
10
|
+
import random
|
11
|
+
|
12
|
+
|
13
|
+
class TestService(InferenceServiceABC):
|
14
|
+
"""OpenAI service class."""
|
15
|
+
|
16
|
+
_inference_service_ = "test"
|
17
|
+
_env_key_name_ = None
|
18
|
+
_base_url_ = None
|
19
|
+
|
20
|
+
_sync_client_ = None
|
21
|
+
_async_client_ = None
|
22
|
+
|
23
|
+
_sync_client_instance = None
|
24
|
+
_async_client_instance = None
|
25
|
+
|
26
|
+
key_sequence = None
|
27
|
+
usage_sequence = None
|
28
|
+
model_exclude_list = []
|
29
|
+
input_token_name = "prompt_tokens"
|
30
|
+
output_token_name = "completion_tokens"
|
31
|
+
|
32
|
+
@classmethod
|
33
|
+
def available(cls) -> list[str]:
|
34
|
+
return ["test"]
|
35
|
+
|
36
|
+
@classmethod
|
37
|
+
def create_model(cls, model_name, model_class_name=None) -> LanguageModel:
|
38
|
+
throw_exception = False
|
39
|
+
|
40
|
+
class TestServiceLanguageModel(LanguageModel):
|
41
|
+
_model_ = "test"
|
42
|
+
_parameters_ = {"temperature": 0.5}
|
43
|
+
_inference_service_ = InferenceServiceType.TEST.value
|
44
|
+
usage_sequence = ["usage"]
|
45
|
+
key_sequence = ["message", 0, "text"]
|
46
|
+
input_token_name = cls.input_token_name
|
47
|
+
output_token_name = cls.output_token_name
|
48
|
+
_rpm = 1000
|
49
|
+
_tpm = 100000
|
50
|
+
|
51
|
+
@property
|
52
|
+
def _canned_response(self):
|
53
|
+
if hasattr(self, "canned_response"):
|
54
|
+
return self.canned_response
|
55
|
+
else:
|
56
|
+
return "Hello, world"
|
57
|
+
|
58
|
+
async def async_execute_model_call(
|
59
|
+
self,
|
60
|
+
user_prompt: str,
|
61
|
+
system_prompt: str,
|
62
|
+
# func: Optional[callable] = None,
|
63
|
+
files_list: Optional[List["File"]] = None,
|
64
|
+
) -> dict[str, Any]:
|
65
|
+
await asyncio.sleep(0.1)
|
66
|
+
# return {"message": """{"answer": "Hello, world"}"""}
|
67
|
+
|
68
|
+
if hasattr(self, "func"):
|
69
|
+
return {
|
70
|
+
"message": [
|
71
|
+
{"text": self.func(user_prompt, system_prompt, files_list)}
|
72
|
+
],
|
73
|
+
"usage": {"prompt_tokens": 1, "completion_tokens": 1},
|
74
|
+
}
|
75
|
+
|
76
|
+
if hasattr(self, "throw_exception") and self.throw_exception:
|
77
|
+
if hasattr(self, "exception_probability"):
|
78
|
+
p = self.exception_probability
|
79
|
+
else:
|
80
|
+
p = 1
|
81
|
+
|
82
|
+
if random.random() < p:
|
83
|
+
raise Exception("This is a test error")
|
84
|
+
return {
|
85
|
+
"message": [{"text": f"{self._canned_response}"}],
|
86
|
+
"usage": {"prompt_tokens": 1, "completion_tokens": 1},
|
87
|
+
}
|
88
|
+
|
89
|
+
return TestServiceLanguageModel
|