edsl 0.1.38.dev2__py3-none-any.whl → 0.1.38.dev4__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (53) hide show
  1. edsl/Base.py +60 -31
  2. edsl/__version__.py +1 -1
  3. edsl/agents/Agent.py +18 -9
  4. edsl/agents/AgentList.py +59 -8
  5. edsl/agents/Invigilator.py +18 -7
  6. edsl/agents/InvigilatorBase.py +0 -19
  7. edsl/agents/PromptConstructor.py +5 -4
  8. edsl/config.py +8 -0
  9. edsl/coop/coop.py +74 -7
  10. edsl/data/Cache.py +27 -2
  11. edsl/data/CacheEntry.py +8 -3
  12. edsl/data/RemoteCacheSync.py +0 -19
  13. edsl/enums.py +2 -0
  14. edsl/inference_services/GoogleService.py +7 -15
  15. edsl/inference_services/PerplexityService.py +163 -0
  16. edsl/inference_services/registry.py +2 -0
  17. edsl/jobs/Jobs.py +88 -548
  18. edsl/jobs/JobsChecks.py +147 -0
  19. edsl/jobs/JobsPrompts.py +268 -0
  20. edsl/jobs/JobsRemoteInferenceHandler.py +239 -0
  21. edsl/jobs/interviews/Interview.py +11 -11
  22. edsl/jobs/runners/JobsRunnerAsyncio.py +140 -35
  23. edsl/jobs/runners/JobsRunnerStatus.py +0 -2
  24. edsl/jobs/tasks/TaskHistory.py +15 -16
  25. edsl/language_models/LanguageModel.py +44 -84
  26. edsl/language_models/ModelList.py +47 -1
  27. edsl/language_models/registry.py +57 -4
  28. edsl/prompts/Prompt.py +8 -3
  29. edsl/questions/QuestionBase.py +20 -16
  30. edsl/questions/QuestionExtract.py +3 -4
  31. edsl/questions/question_registry.py +36 -6
  32. edsl/results/CSSParameterizer.py +108 -0
  33. edsl/results/Dataset.py +146 -15
  34. edsl/results/DatasetExportMixin.py +231 -217
  35. edsl/results/DatasetTree.py +134 -4
  36. edsl/results/Result.py +18 -9
  37. edsl/results/Results.py +145 -51
  38. edsl/results/TableDisplay.py +198 -0
  39. edsl/results/table_display.css +78 -0
  40. edsl/scenarios/FileStore.py +187 -13
  41. edsl/scenarios/Scenario.py +61 -4
  42. edsl/scenarios/ScenarioJoin.py +127 -0
  43. edsl/scenarios/ScenarioList.py +237 -62
  44. edsl/surveys/Survey.py +16 -2
  45. edsl/surveys/SurveyFlowVisualizationMixin.py +67 -9
  46. edsl/surveys/instructions/Instruction.py +12 -0
  47. edsl/templates/error_reporting/interview_details.html +3 -3
  48. edsl/templates/error_reporting/interviews.html +18 -9
  49. edsl/utilities/utilities.py +15 -0
  50. {edsl-0.1.38.dev2.dist-info → edsl-0.1.38.dev4.dist-info}/METADATA +2 -1
  51. {edsl-0.1.38.dev2.dist-info → edsl-0.1.38.dev4.dist-info}/RECORD +53 -45
  52. {edsl-0.1.38.dev2.dist-info → edsl-0.1.38.dev4.dist-info}/LICENSE +0 -0
  53. {edsl-0.1.38.dev2.dist-info → edsl-0.1.38.dev4.dist-info}/WHEEL +0 -0
@@ -8,6 +8,7 @@ from google.api_core.exceptions import InvalidArgument
8
8
  from edsl.exceptions import MissingAPIKeyError
9
9
  from edsl.language_models.LanguageModel import LanguageModel
10
10
  from edsl.inference_services.InferenceServiceABC import InferenceServiceABC
11
+ from edsl.coop import Coop
11
12
 
12
13
  safety_settings = [
13
14
  {
@@ -79,22 +80,8 @@ class GoogleService(InferenceServiceABC):
79
80
  api_token = None
80
81
  model = None
81
82
 
82
- @classmethod
83
- def initialize(cls):
84
- if cls.api_token is None:
85
- cls.api_token = os.getenv("GOOGLE_API_KEY")
86
- if not cls.api_token:
87
- raise MissingAPIKeyError(
88
- "GOOGLE_API_KEY environment variable is not set"
89
- )
90
- genai.configure(api_key=cls.api_token)
91
- cls.generative_model = genai.GenerativeModel(
92
- cls._model_, safety_settings=safety_settings
93
- )
94
-
95
83
  def __init__(self, *args, **kwargs):
96
84
  super().__init__(*args, **kwargs)
97
- self.initialize()
98
85
 
99
86
  def get_generation_config(self) -> GenerationConfig:
100
87
  return GenerationConfig(
@@ -116,6 +103,7 @@ class GoogleService(InferenceServiceABC):
116
103
  if files_list is None:
117
104
  files_list = []
118
105
 
106
+ genai.configure(api_key=self.api_token)
119
107
  if (
120
108
  system_prompt is not None
121
109
  and system_prompt != ""
@@ -133,7 +121,11 @@ class GoogleService(InferenceServiceABC):
133
121
  )
134
122
  print("Will add system_prompt to user_prompt")
135
123
  user_prompt = f"{system_prompt}\n{user_prompt}"
136
-
124
+ else:
125
+ self.generative_model = genai.GenerativeModel(
126
+ self._model_,
127
+ safety_settings=safety_settings,
128
+ )
137
129
  combined_prompt = [user_prompt]
138
130
  for file in files_list:
139
131
  if "google" not in file.external_locations:
@@ -0,0 +1,163 @@
1
+ import aiohttp
2
+ import json
3
+ import requests
4
+ from typing import Any, List, Optional
5
+ from edsl.inference_services.rate_limits_cache import rate_limits
6
+
7
+ # from edsl.inference_services.InferenceServiceABC import InferenceServiceABC
8
+ from edsl.language_models import LanguageModel
9
+
10
+ from edsl.inference_services.OpenAIService import OpenAIService
11
+
12
+
13
+ class PerplexityService(OpenAIService):
14
+ """Perplexity service class."""
15
+
16
+ _inference_service_ = "perplexity"
17
+ _env_key_name_ = "PERPLEXITY_API_KEY"
18
+ _base_url_ = "https://api.perplexity.ai"
19
+ _models_list_cache: List[str] = []
20
+ # default perplexity parameters
21
+ _parameters_ = {
22
+ "temperature": 0.5,
23
+ "max_tokens": 1000,
24
+ "top_p": 1,
25
+ "logprobs": False,
26
+ "top_logprobs": 3,
27
+ }
28
+
29
+ @classmethod
30
+ def available(cls) -> List[str]:
31
+ return [
32
+ "llama-3.1-sonar-huge-128k-online",
33
+ "llama-3.1-sonar-large-128k-online",
34
+ "llama-3.1-sonar-small-128k-online",
35
+ ]
36
+
37
+ @classmethod
38
+ def create_model(
39
+ cls, model_name="llama-3.1-sonar-large-128k-online", model_class_name=None
40
+ ) -> LanguageModel:
41
+ if model_class_name is None:
42
+ model_class_name = cls.to_class_name(model_name)
43
+
44
+ class LLM(LanguageModel):
45
+ """
46
+ Child class of LanguageModel for interacting with Perplexity models
47
+ """
48
+
49
+ key_sequence = cls.key_sequence
50
+ usage_sequence = cls.usage_sequence
51
+ input_token_name = cls.input_token_name
52
+ output_token_name = cls.output_token_name
53
+
54
+ _rpm = cls.get_rpm(cls)
55
+ _tpm = cls.get_tpm(cls)
56
+
57
+ _inference_service_ = cls._inference_service_
58
+ _model_ = model_name
59
+
60
+ _parameters_ = {
61
+ "temperature": 0.5,
62
+ "max_tokens": 1000,
63
+ "top_p": 1,
64
+ "frequency_penalty": 1,
65
+ "presence_penalty": 0,
66
+ # "logprobs": False, # Enable this returns 'Neither or both of logprobs and top_logprobs must be set.
67
+ # "top_logprobs": 3,
68
+ }
69
+
70
+ def sync_client(self):
71
+ return cls.sync_client()
72
+
73
+ def async_client(self):
74
+ return cls.async_client()
75
+
76
+ @classmethod
77
+ def available(cls) -> list[str]:
78
+ return cls.sync_client().models.list()
79
+
80
+ def get_headers(self) -> dict[str, Any]:
81
+ client = self.sync_client()
82
+ response = client.chat.completions.with_raw_response.create(
83
+ messages=[
84
+ {
85
+ "role": "user",
86
+ "content": "Say this is a test",
87
+ }
88
+ ],
89
+ model=self.model,
90
+ )
91
+ return dict(response.headers)
92
+
93
+ def get_rate_limits(self) -> dict[str, Any]:
94
+ try:
95
+ if "openai" in rate_limits:
96
+ headers = rate_limits["openai"]
97
+
98
+ else:
99
+ headers = self.get_headers()
100
+
101
+ except Exception as e:
102
+ return {
103
+ "rpm": 10_000,
104
+ "tpm": 2_000_000,
105
+ }
106
+ else:
107
+ return {
108
+ "rpm": int(headers["x-ratelimit-limit-requests"]),
109
+ "tpm": int(headers["x-ratelimit-limit-tokens"]),
110
+ }
111
+
112
+ async def async_execute_model_call(
113
+ self,
114
+ user_prompt: str,
115
+ system_prompt: str = "",
116
+ files_list: Optional[List["Files"]] = None,
117
+ invigilator: Optional[
118
+ "InvigilatorAI"
119
+ ] = None, # TBD - can eventually be used for function-calling
120
+ ) -> dict[str, Any]:
121
+ """Calls the OpenAI API and returns the API response."""
122
+ if files_list:
123
+ encoded_image = files_list[0].base64_string
124
+ content = [{"type": "text", "text": user_prompt}]
125
+ content.append(
126
+ {
127
+ "type": "image_url",
128
+ "image_url": {
129
+ "url": f"data:image/jpeg;base64,{encoded_image}"
130
+ },
131
+ }
132
+ )
133
+ else:
134
+ content = user_prompt
135
+ client = self.async_client()
136
+
137
+ messages = [
138
+ {"role": "system", "content": system_prompt},
139
+ {"role": "user", "content": content},
140
+ ]
141
+ if system_prompt == "" and self.omit_system_prompt_if_empty:
142
+ messages = messages[1:]
143
+
144
+ params = {
145
+ "model": self.model,
146
+ "messages": messages,
147
+ "temperature": self.temperature,
148
+ "max_tokens": self.max_tokens,
149
+ "top_p": self.top_p,
150
+ "frequency_penalty": self.frequency_penalty,
151
+ "presence_penalty": self.presence_penalty,
152
+ # "logprobs": self.logprobs,
153
+ # "top_logprobs": self.top_logprobs if self.logprobs else None,
154
+ }
155
+ try:
156
+ response = await client.chat.completions.create(**params)
157
+ except Exception as e:
158
+ print(e, flush=True)
159
+ return response.model_dump()
160
+
161
+ LLM.__name__ = "LanguageModel"
162
+
163
+ return LLM
@@ -12,6 +12,7 @@ from edsl.inference_services.AzureAI import AzureAIService
12
12
  from edsl.inference_services.OllamaService import OllamaService
13
13
  from edsl.inference_services.TestService import TestService
14
14
  from edsl.inference_services.TogetherAIService import TogetherAIService
15
+ from edsl.inference_services.PerplexityService import PerplexityService
15
16
 
16
17
  try:
17
18
  from edsl.inference_services.MistralAIService import MistralAIService
@@ -31,6 +32,7 @@ services = [
31
32
  OllamaService,
32
33
  TestService,
33
34
  TogetherAIService,
35
+ PerplexityService,
34
36
  ]
35
37
 
36
38
  if mistral_available: