edsl 0.1.33__py3-none-any.whl → 0.1.33.dev1__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (180) hide show
  1. edsl/Base.py +3 -9
  2. edsl/__init__.py +3 -8
  3. edsl/__version__.py +1 -1
  4. edsl/agents/Agent.py +8 -40
  5. edsl/agents/AgentList.py +0 -43
  6. edsl/agents/Invigilator.py +219 -135
  7. edsl/agents/InvigilatorBase.py +59 -148
  8. edsl/agents/{PromptConstructor.py → PromptConstructionMixin.py} +89 -138
  9. edsl/agents/__init__.py +0 -1
  10. edsl/config.py +56 -47
  11. edsl/coop/coop.py +7 -50
  12. edsl/data/Cache.py +1 -35
  13. edsl/data_transfer_models.py +38 -73
  14. edsl/enums.py +0 -4
  15. edsl/exceptions/language_models.py +1 -25
  16. edsl/exceptions/questions.py +5 -62
  17. edsl/exceptions/results.py +0 -4
  18. edsl/inference_services/AnthropicService.py +11 -13
  19. edsl/inference_services/AwsBedrock.py +17 -19
  20. edsl/inference_services/AzureAI.py +20 -37
  21. edsl/inference_services/GoogleService.py +12 -16
  22. edsl/inference_services/GroqService.py +0 -2
  23. edsl/inference_services/InferenceServiceABC.py +3 -58
  24. edsl/inference_services/OpenAIService.py +54 -48
  25. edsl/inference_services/models_available_cache.py +6 -0
  26. edsl/inference_services/registry.py +0 -6
  27. edsl/jobs/Answers.py +12 -10
  28. edsl/jobs/Jobs.py +21 -36
  29. edsl/jobs/buckets/BucketCollection.py +15 -24
  30. edsl/jobs/buckets/TokenBucket.py +14 -93
  31. edsl/jobs/interviews/Interview.py +78 -366
  32. edsl/jobs/interviews/InterviewExceptionEntry.py +19 -85
  33. edsl/jobs/interviews/InterviewTaskBuildingMixin.py +286 -0
  34. edsl/jobs/interviews/{InterviewExceptionCollection.py → interview_exception_tracking.py} +68 -14
  35. edsl/jobs/interviews/retry_management.py +37 -0
  36. edsl/jobs/runners/JobsRunnerAsyncio.py +175 -146
  37. edsl/jobs/runners/JobsRunnerStatusMixin.py +333 -0
  38. edsl/jobs/tasks/QuestionTaskCreator.py +23 -30
  39. edsl/jobs/tasks/TaskHistory.py +213 -148
  40. edsl/language_models/LanguageModel.py +156 -261
  41. edsl/language_models/ModelList.py +2 -2
  42. edsl/language_models/RegisterLanguageModelsMeta.py +29 -14
  43. edsl/language_models/registry.py +6 -23
  44. edsl/language_models/repair.py +19 -0
  45. edsl/prompts/Prompt.py +2 -52
  46. edsl/questions/AnswerValidatorMixin.py +26 -23
  47. edsl/questions/QuestionBase.py +249 -329
  48. edsl/questions/QuestionBudget.py +41 -99
  49. edsl/questions/QuestionCheckBox.py +35 -227
  50. edsl/questions/QuestionExtract.py +27 -98
  51. edsl/questions/QuestionFreeText.py +29 -52
  52. edsl/questions/QuestionFunctional.py +0 -7
  53. edsl/questions/QuestionList.py +22 -141
  54. edsl/questions/QuestionMultipleChoice.py +65 -159
  55. edsl/questions/QuestionNumerical.py +46 -88
  56. edsl/questions/QuestionRank.py +24 -182
  57. edsl/questions/RegisterQuestionsMeta.py +12 -31
  58. edsl/questions/__init__.py +4 -3
  59. edsl/questions/derived/QuestionLikertFive.py +5 -10
  60. edsl/questions/derived/QuestionLinearScale.py +2 -15
  61. edsl/questions/derived/QuestionTopK.py +1 -10
  62. edsl/questions/derived/QuestionYesNo.py +3 -24
  63. edsl/questions/descriptors.py +7 -43
  64. edsl/questions/question_registry.py +2 -6
  65. edsl/results/Dataset.py +0 -20
  66. edsl/results/DatasetExportMixin.py +48 -46
  67. edsl/results/Result.py +5 -32
  68. edsl/results/Results.py +46 -135
  69. edsl/results/ResultsDBMixin.py +3 -3
  70. edsl/scenarios/FileStore.py +10 -71
  71. edsl/scenarios/Scenario.py +25 -96
  72. edsl/scenarios/ScenarioImageMixin.py +2 -2
  73. edsl/scenarios/ScenarioList.py +39 -361
  74. edsl/scenarios/ScenarioListExportMixin.py +0 -9
  75. edsl/scenarios/ScenarioListPdfMixin.py +4 -150
  76. edsl/study/SnapShot.py +1 -8
  77. edsl/study/Study.py +0 -32
  78. edsl/surveys/Rule.py +1 -10
  79. edsl/surveys/RuleCollection.py +5 -21
  80. edsl/surveys/Survey.py +310 -636
  81. edsl/surveys/SurveyExportMixin.py +9 -71
  82. edsl/surveys/SurveyFlowVisualizationMixin.py +1 -2
  83. edsl/surveys/SurveyQualtricsImport.py +4 -75
  84. edsl/utilities/gcp_bucket/simple_example.py +9 -0
  85. edsl/utilities/utilities.py +1 -9
  86. {edsl-0.1.33.dist-info → edsl-0.1.33.dev1.dist-info}/METADATA +2 -5
  87. edsl-0.1.33.dev1.dist-info/RECORD +209 -0
  88. edsl/TemplateLoader.py +0 -24
  89. edsl/auto/AutoStudy.py +0 -117
  90. edsl/auto/StageBase.py +0 -230
  91. edsl/auto/StageGenerateSurvey.py +0 -178
  92. edsl/auto/StageLabelQuestions.py +0 -125
  93. edsl/auto/StagePersona.py +0 -61
  94. edsl/auto/StagePersonaDimensionValueRanges.py +0 -88
  95. edsl/auto/StagePersonaDimensionValues.py +0 -74
  96. edsl/auto/StagePersonaDimensions.py +0 -69
  97. edsl/auto/StageQuestions.py +0 -73
  98. edsl/auto/SurveyCreatorPipeline.py +0 -21
  99. edsl/auto/utilities.py +0 -224
  100. edsl/coop/PriceFetcher.py +0 -58
  101. edsl/inference_services/MistralAIService.py +0 -120
  102. edsl/inference_services/TestService.py +0 -80
  103. edsl/inference_services/TogetherAIService.py +0 -170
  104. edsl/jobs/FailedQuestion.py +0 -78
  105. edsl/jobs/runners/JobsRunnerStatus.py +0 -331
  106. edsl/language_models/fake_openai_call.py +0 -15
  107. edsl/language_models/fake_openai_service.py +0 -61
  108. edsl/language_models/utilities.py +0 -61
  109. edsl/questions/QuestionBaseGenMixin.py +0 -133
  110. edsl/questions/QuestionBasePromptsMixin.py +0 -266
  111. edsl/questions/Quick.py +0 -41
  112. edsl/questions/ResponseValidatorABC.py +0 -170
  113. edsl/questions/decorators.py +0 -21
  114. edsl/questions/prompt_templates/question_budget.jinja +0 -13
  115. edsl/questions/prompt_templates/question_checkbox.jinja +0 -32
  116. edsl/questions/prompt_templates/question_extract.jinja +0 -11
  117. edsl/questions/prompt_templates/question_free_text.jinja +0 -3
  118. edsl/questions/prompt_templates/question_linear_scale.jinja +0 -11
  119. edsl/questions/prompt_templates/question_list.jinja +0 -17
  120. edsl/questions/prompt_templates/question_multiple_choice.jinja +0 -33
  121. edsl/questions/prompt_templates/question_numerical.jinja +0 -37
  122. edsl/questions/templates/__init__.py +0 -0
  123. edsl/questions/templates/budget/__init__.py +0 -0
  124. edsl/questions/templates/budget/answering_instructions.jinja +0 -7
  125. edsl/questions/templates/budget/question_presentation.jinja +0 -7
  126. edsl/questions/templates/checkbox/__init__.py +0 -0
  127. edsl/questions/templates/checkbox/answering_instructions.jinja +0 -10
  128. edsl/questions/templates/checkbox/question_presentation.jinja +0 -22
  129. edsl/questions/templates/extract/__init__.py +0 -0
  130. edsl/questions/templates/extract/answering_instructions.jinja +0 -7
  131. edsl/questions/templates/extract/question_presentation.jinja +0 -1
  132. edsl/questions/templates/free_text/__init__.py +0 -0
  133. edsl/questions/templates/free_text/answering_instructions.jinja +0 -0
  134. edsl/questions/templates/free_text/question_presentation.jinja +0 -1
  135. edsl/questions/templates/likert_five/__init__.py +0 -0
  136. edsl/questions/templates/likert_five/answering_instructions.jinja +0 -10
  137. edsl/questions/templates/likert_five/question_presentation.jinja +0 -12
  138. edsl/questions/templates/linear_scale/__init__.py +0 -0
  139. edsl/questions/templates/linear_scale/answering_instructions.jinja +0 -5
  140. edsl/questions/templates/linear_scale/question_presentation.jinja +0 -5
  141. edsl/questions/templates/list/__init__.py +0 -0
  142. edsl/questions/templates/list/answering_instructions.jinja +0 -4
  143. edsl/questions/templates/list/question_presentation.jinja +0 -5
  144. edsl/questions/templates/multiple_choice/__init__.py +0 -0
  145. edsl/questions/templates/multiple_choice/answering_instructions.jinja +0 -9
  146. edsl/questions/templates/multiple_choice/html.jinja +0 -0
  147. edsl/questions/templates/multiple_choice/question_presentation.jinja +0 -12
  148. edsl/questions/templates/numerical/__init__.py +0 -0
  149. edsl/questions/templates/numerical/answering_instructions.jinja +0 -8
  150. edsl/questions/templates/numerical/question_presentation.jinja +0 -7
  151. edsl/questions/templates/rank/__init__.py +0 -0
  152. edsl/questions/templates/rank/answering_instructions.jinja +0 -11
  153. edsl/questions/templates/rank/question_presentation.jinja +0 -15
  154. edsl/questions/templates/top_k/__init__.py +0 -0
  155. edsl/questions/templates/top_k/answering_instructions.jinja +0 -8
  156. edsl/questions/templates/top_k/question_presentation.jinja +0 -22
  157. edsl/questions/templates/yes_no/__init__.py +0 -0
  158. edsl/questions/templates/yes_no/answering_instructions.jinja +0 -6
  159. edsl/questions/templates/yes_no/question_presentation.jinja +0 -12
  160. edsl/results/DatasetTree.py +0 -145
  161. edsl/results/Selector.py +0 -118
  162. edsl/results/tree_explore.py +0 -115
  163. edsl/surveys/instructions/ChangeInstruction.py +0 -47
  164. edsl/surveys/instructions/Instruction.py +0 -34
  165. edsl/surveys/instructions/InstructionCollection.py +0 -77
  166. edsl/surveys/instructions/__init__.py +0 -0
  167. edsl/templates/error_reporting/base.html +0 -24
  168. edsl/templates/error_reporting/exceptions_by_model.html +0 -35
  169. edsl/templates/error_reporting/exceptions_by_question_name.html +0 -17
  170. edsl/templates/error_reporting/exceptions_by_type.html +0 -17
  171. edsl/templates/error_reporting/interview_details.html +0 -116
  172. edsl/templates/error_reporting/interviews.html +0 -10
  173. edsl/templates/error_reporting/overview.html +0 -5
  174. edsl/templates/error_reporting/performance_plot.html +0 -2
  175. edsl/templates/error_reporting/report.css +0 -74
  176. edsl/templates/error_reporting/report.html +0 -118
  177. edsl/templates/error_reporting/report.js +0 -25
  178. edsl-0.1.33.dist-info/RECORD +0 -295
  179. {edsl-0.1.33.dist-info → edsl-0.1.33.dev1.dist-info}/LICENSE +0 -0
  180. {edsl-0.1.33.dist-info → edsl-0.1.33.dev1.dist-info}/WHEEL +0 -0
@@ -1,80 +0,0 @@
1
- from typing import Any, List
2
- import os
3
- import asyncio
4
- from edsl.inference_services.InferenceServiceABC import InferenceServiceABC
5
- from edsl.language_models import LanguageModel
6
- from edsl.inference_services.rate_limits_cache import rate_limits
7
- from edsl.utilities.utilities import fix_partial_correct_response
8
-
9
- from edsl.enums import InferenceServiceType
10
- import random
11
-
12
-
13
- class TestService(InferenceServiceABC):
14
- """OpenAI service class."""
15
-
16
- _inference_service_ = "test"
17
- _env_key_name_ = None
18
- _base_url_ = None
19
-
20
- _sync_client_ = None
21
- _async_client_ = None
22
-
23
- _sync_client_instance = None
24
- _async_client_instance = None
25
-
26
- key_sequence = None
27
- usage_sequence = None
28
- model_exclude_list = []
29
- input_token_name = "prompt_tokens"
30
- output_token_name = "completion_tokens"
31
-
32
- @classmethod
33
- def available(cls) -> list[str]:
34
- return ["test"]
35
-
36
- @classmethod
37
- def create_model(cls, model_name, model_class_name=None) -> LanguageModel:
38
- throw_exception = False
39
-
40
- class TestServiceLanguageModel(LanguageModel):
41
- _model_ = "test"
42
- _parameters_ = {"temperature": 0.5}
43
- _inference_service_ = InferenceServiceType.TEST.value
44
- usage_sequence = ["usage"]
45
- key_sequence = ["message", 0, "text"]
46
- input_token_name = cls.input_token_name
47
- output_token_name = cls.output_token_name
48
- _rpm = 1000
49
- _tpm = 100000
50
-
51
- @property
52
- def _canned_response(self):
53
- if hasattr(self, "canned_response"):
54
- return self.canned_response
55
- else:
56
- return "Hello, world"
57
-
58
- async def async_execute_model_call(
59
- self,
60
- user_prompt: str,
61
- system_prompt: str,
62
- encoded_image=None,
63
- ) -> dict[str, Any]:
64
- await asyncio.sleep(0.1)
65
- # return {"message": """{"answer": "Hello, world"}"""}
66
-
67
- if hasattr(self, "throw_exception") and self.throw_exception:
68
- if hasattr(self, "exception_probability"):
69
- p = self.exception_probability
70
- else:
71
- p = 1
72
-
73
- if random.random() < p:
74
- raise Exception("This is a test error")
75
- return {
76
- "message": [{"text": f"{self._canned_response}"}],
77
- "usage": {"prompt_tokens": 1, "completion_tokens": 1},
78
- }
79
-
80
- return TestServiceLanguageModel
@@ -1,170 +0,0 @@
1
- import aiohttp
2
- import json
3
- import requests
4
- from typing import Any, List
5
-
6
- # from edsl.inference_services.InferenceServiceABC import InferenceServiceABC
7
- from edsl.language_models import LanguageModel
8
-
9
- from edsl.inference_services.OpenAIService import OpenAIService
10
- import openai
11
-
12
-
13
- class TogetherAIService(OpenAIService):
14
- """DeepInfra service class."""
15
-
16
- _inference_service_ = "together"
17
- _env_key_name_ = "TOGETHER_API_KEY"
18
- _base_url_ = "https://api.together.xyz/v1"
19
- _models_list_cache: List[str] = []
20
-
21
- # These are non-serverless models. There was no api param to filter them
22
- model_exclude_list = [
23
- "EleutherAI/llemma_7b",
24
- "HuggingFaceH4/zephyr-7b-beta",
25
- "Nexusflow/NexusRaven-V2-13B",
26
- "NousResearch/Hermes-2-Theta-Llama-3-70B",
27
- "NousResearch/Nous-Capybara-7B-V1p9",
28
- "NousResearch/Nous-Hermes-13b",
29
- "NousResearch/Nous-Hermes-2-Mistral-7B-DPO",
30
- "NousResearch/Nous-Hermes-2-Mixtral-8x7B-SFT",
31
- "NousResearch/Nous-Hermes-Llama2-13b",
32
- "NousResearch/Nous-Hermes-Llama2-70b",
33
- "NousResearch/Nous-Hermes-llama-2-7b",
34
- "NumbersStation/nsql-llama-2-7B",
35
- "Open-Orca/Mistral-7B-OpenOrca",
36
- "Phind/Phind-CodeLlama-34B-Python-v1",
37
- "Phind/Phind-CodeLlama-34B-v2",
38
- "Qwen/Qwen1.5-0.5B",
39
- "Qwen/Qwen1.5-0.5B-Chat",
40
- "Qwen/Qwen1.5-1.8B",
41
- "Qwen/Qwen1.5-1.8B-Chat",
42
- "Qwen/Qwen1.5-14B",
43
- "Qwen/Qwen1.5-14B-Chat",
44
- "Qwen/Qwen1.5-32B",
45
- "Qwen/Qwen1.5-32B-Chat",
46
- "Qwen/Qwen1.5-4B",
47
- "Qwen/Qwen1.5-4B-Chat",
48
- "Qwen/Qwen1.5-72B",
49
- "Qwen/Qwen1.5-7B",
50
- "Qwen/Qwen1.5-7B-Chat",
51
- "Qwen/Qwen2-1.5B",
52
- "Qwen/Qwen2-1.5B-Instruct",
53
- "Qwen/Qwen2-72B",
54
- "Qwen/Qwen2-7B",
55
- "Qwen/Qwen2-7B-Instruct",
56
- "SG161222/Realistic_Vision_V3.0_VAE",
57
- "Snowflake/snowflake-arctic-instruct",
58
- "Undi95/ReMM-SLERP-L2-13B",
59
- "Undi95/Toppy-M-7B",
60
- "WizardLM/WizardCoder-Python-34B-V1.0",
61
- "WizardLM/WizardLM-13B-V1.2",
62
- "WizardLM/WizardLM-70B-V1.0",
63
- "allenai/OLMo-7B",
64
- "allenai/OLMo-7B-Instruct",
65
- "bert-base-uncased",
66
- "codellama/CodeLlama-13b-Instruct-hf",
67
- "codellama/CodeLlama-13b-Python-hf",
68
- "codellama/CodeLlama-13b-hf",
69
- "codellama/CodeLlama-34b-Python-hf",
70
- "codellama/CodeLlama-34b-hf",
71
- "codellama/CodeLlama-70b-Instruct-hf",
72
- "codellama/CodeLlama-70b-Python-hf",
73
- "codellama/CodeLlama-70b-hf",
74
- "codellama/CodeLlama-7b-Instruct-hf",
75
- "codellama/CodeLlama-7b-Python-hf",
76
- "codellama/CodeLlama-7b-hf",
77
- "cognitivecomputations/dolphin-2.5-mixtral-8x7b",
78
- "deepseek-ai/deepseek-coder-33b-instruct",
79
- "garage-bAInd/Platypus2-70B-instruct",
80
- "google/gemma-2b",
81
- "google/gemma-7b",
82
- "google/gemma-7b-it",
83
- "gradientai/Llama-3-70B-Instruct-Gradient-1048k",
84
- "hazyresearch/M2-BERT-2k-Retrieval-Encoder-V1",
85
- "huggyllama/llama-13b",
86
- "huggyllama/llama-30b",
87
- "huggyllama/llama-65b",
88
- "huggyllama/llama-7b",
89
- "lmsys/vicuna-13b-v1.3",
90
- "lmsys/vicuna-13b-v1.5",
91
- "lmsys/vicuna-13b-v1.5-16k",
92
- "lmsys/vicuna-7b-v1.3",
93
- "lmsys/vicuna-7b-v1.5",
94
- "meta-llama/Llama-2-13b-hf",
95
- "meta-llama/Llama-2-70b-chat-hf",
96
- "meta-llama/Llama-2-7b-hf",
97
- "meta-llama/Llama-3-70b-hf",
98
- "meta-llama/Llama-3-8b-hf",
99
- "meta-llama/Meta-Llama-3-70B",
100
- "meta-llama/Meta-Llama-3-70B-Instruct",
101
- "meta-llama/Meta-Llama-3-8B-Instruct",
102
- "meta-llama/Meta-Llama-3.1-70B-Instruct-Reference",
103
- "meta-llama/Meta-Llama-3.1-70B-Reference",
104
- "meta-llama/Meta-Llama-3.1-8B-Reference",
105
- "microsoft/phi-2",
106
- "mistralai/Mixtral-8x22B",
107
- "openchat/openchat-3.5-1210",
108
- "prompthero/openjourney",
109
- "runwayml/stable-diffusion-v1-5",
110
- "sentence-transformers/msmarco-bert-base-dot-v5",
111
- "snorkelai/Snorkel-Mistral-PairRM-DPO",
112
- "stabilityai/stable-diffusion-2-1",
113
- "teknium/OpenHermes-2-Mistral-7B",
114
- "teknium/OpenHermes-2p5-Mistral-7B",
115
- "togethercomputer/CodeLlama-13b-Instruct",
116
- "togethercomputer/CodeLlama-13b-Python",
117
- "togethercomputer/CodeLlama-34b",
118
- "togethercomputer/CodeLlama-34b-Python",
119
- "togethercomputer/CodeLlama-7b-Instruct",
120
- "togethercomputer/CodeLlama-7b-Python",
121
- "togethercomputer/Koala-13B",
122
- "togethercomputer/Koala-7B",
123
- "togethercomputer/LLaMA-2-7B-32K",
124
- "togethercomputer/SOLAR-10.7B-Instruct-v1.0-int4",
125
- "togethercomputer/StripedHyena-Hessian-7B",
126
- "togethercomputer/alpaca-7b",
127
- "togethercomputer/evo-1-131k-base",
128
- "togethercomputer/evo-1-8k-base",
129
- "togethercomputer/guanaco-13b",
130
- "togethercomputer/guanaco-33b",
131
- "togethercomputer/guanaco-65b",
132
- "togethercomputer/guanaco-7b",
133
- "togethercomputer/llama-2-13b",
134
- "togethercomputer/llama-2-70b-chat",
135
- "togethercomputer/llama-2-7b",
136
- "wavymulder/Analog-Diffusion",
137
- "zero-one-ai/Yi-34B",
138
- "zero-one-ai/Yi-34B-Chat",
139
- "zero-one-ai/Yi-6B",
140
- ]
141
-
142
- _sync_client_ = openai.OpenAI
143
- _async_client_ = openai.AsyncOpenAI
144
-
145
- @classmethod
146
- def get_model_list(cls):
147
- # Togheter.ai has a different response in model list then openai
148
- # and the OpenAI class returns an error when calling .models.list()
149
- import requests
150
- import os
151
-
152
- url = "https://api.together.xyz/v1/models?filter=serverless"
153
- token = os.getenv(cls._env_key_name_)
154
- headers = {"accept": "application/json", "authorization": f"Bearer {token}"}
155
-
156
- response = requests.get(url, headers=headers)
157
- return response.json()
158
-
159
- @classmethod
160
- def available(cls) -> List[str]:
161
- if not cls._models_list_cache:
162
- try:
163
- cls._models_list_cache = [
164
- m["id"]
165
- for m in cls.get_model_list()
166
- if m["id"] not in cls.model_exclude_list
167
- ]
168
- except Exception as e:
169
- raise
170
- return cls._models_list_cache
@@ -1,78 +0,0 @@
1
- from edsl.questions import QuestionBase
2
- from edsl import Question, Scenario, Model, Agent
3
-
4
- from edsl.language_models.LanguageModel import LanguageModel
5
-
6
-
7
- class FailedQuestion:
8
- # tests/jobs/test_Interview.py::test_handle_model_exceptions
9
-
10
- # (Pdb) dir(self.exception.__traceback__)
11
- # ['tb_frame', 'tb_lasti', 'tb_lineno', 'tb_next']
12
-
13
- def __init__(
14
- self, question, scenario, model, agent, raw_model_response, exception, prompts
15
- ):
16
- self.question = question
17
- self.scenario = scenario
18
- self.model = model
19
- self.agent = agent
20
- self.raw_model_response = raw_model_response # JSON
21
- self.exception = exception
22
- self.prompts = prompts
23
-
24
- def to_dict(self):
25
- return {
26
- "question": self.question._to_dict(),
27
- "scenario": self.scenario._to_dict(),
28
- "model": self.model._to_dict(),
29
- "agent": self.agent._to_dict(),
30
- "raw_model_response": self.raw_model_response,
31
- "exception": self.exception.__class__.__name__, # self.exception,
32
- "prompts": self.prompts,
33
- }
34
-
35
- @classmethod
36
- def from_dict(cls, data):
37
- question = QuestionBase.from_dict(data["question"])
38
- scenario = Scenario.from_dict(data["scenario"])
39
- model = LanguageModel.from_dict(data["model"])
40
- agent = Agent.from_dict(data["agent"])
41
- raw_model_response = data["raw_model_response"]
42
- exception = data["exception"]
43
- prompts = data["prompts"]
44
- return cls(
45
- question, scenario, model, agent, raw_model_response, exception, prompts
46
- )
47
-
48
- def __repr__(self):
49
- return f"{self.__class__.__name__}(question={repr(self.question)}, scenario={repr(self.scenario)}, model={repr(self.model)}, agent={repr(self.agent)}, raw_model_response={repr(self.raw_model_response)}, exception={repr(self.exception)})"
50
-
51
- @property
52
- def jobs(self):
53
- return self.question.by(self.scenario).by(self.agent).by(self.model)
54
-
55
- def rerun(self):
56
- results = self.jobs.run()
57
- return results
58
-
59
- def help(self):
60
- pass
61
-
62
- @classmethod
63
- def example(cls):
64
- from edsl.language_models.utilities import create_language_model
65
- from edsl.language_models.utilities import create_survey
66
-
67
- survey = create_survey(2, chained=False, take_scenario=False)
68
- fail_at_number = 1
69
- model = create_language_model(ValueError, fail_at_number)()
70
- from edsl import Survey
71
-
72
- results = survey.by(model).run()
73
- return results.failed_questions[0][0]
74
-
75
-
76
- if __name__ == "__main__":
77
- fq = FailedQuestion.example()
78
- new_fq = FailedQuestion.from_dict(fq.to_dict())
@@ -1,331 +0,0 @@
1
- from __future__ import annotations
2
-
3
- import time
4
- from dataclasses import dataclass, asdict
5
-
6
- from typing import List, DefaultDict, Optional, Type, Literal
7
- from collections import UserDict, defaultdict
8
-
9
- from rich.text import Text
10
- from rich.box import SIMPLE
11
- from rich.table import Table
12
- from rich.live import Live
13
- from rich.panel import Panel
14
- from rich.progress import Progress, TextColumn, BarColumn, TaskProgressColumn
15
- from rich.layout import Layout
16
- from rich.console import Group
17
- from rich import box
18
-
19
- from edsl.jobs.interviews.InterviewStatusDictionary import InterviewStatusDictionary
20
- from edsl.jobs.tokens.InterviewTokenUsage import InterviewTokenUsage
21
- from edsl.jobs.tokens.TokenUsage import TokenUsage
22
- from edsl.enums import get_token_pricing
23
- from edsl.jobs.tasks.task_status_enum import TaskStatus
24
-
25
- InterviewTokenUsageMapping = DefaultDict[str, InterviewTokenUsage]
26
-
27
- from edsl.jobs.interviews.InterviewStatistic import InterviewStatistic
28
- from edsl.jobs.interviews.InterviewStatisticsCollection import (
29
- InterviewStatisticsCollection,
30
- )
31
- from edsl.jobs.tokens.InterviewTokenUsage import InterviewTokenUsage
32
-
33
-
34
- @dataclass
35
- class ModelInfo:
36
- model_name: str
37
- TPM_limit_k: float
38
- RPM_limit_k: float
39
- num_tasks_waiting: int
40
- token_usage_info: dict
41
-
42
-
43
- @dataclass
44
- class ModelTokenUsageStats:
45
- token_usage_type: str
46
- details: List[dict]
47
- cost: str
48
-
49
-
50
- class Stats:
51
- def elapsed_time(self):
52
- InterviewStatistic("elapsed_time", value=elapsed_time, digits=1, units="sec.")
53
-
54
-
55
- class JobsRunnerStatus:
56
- def __init__(
57
- self, jobs_runner: "JobsRunnerAsyncio", n: int, refresh_rate: float = 0.25
58
- ):
59
- self.jobs_runner = jobs_runner
60
- self.start_time = time.time()
61
- self.completed_interviews = []
62
- self.refresh_rate = refresh_rate
63
- self.statistics = [
64
- "elapsed_time",
65
- "total_interviews_requested",
66
- "completed_interviews",
67
- # "percent_complete",
68
- "average_time_per_interview",
69
- # "task_remaining",
70
- "estimated_time_remaining",
71
- "exceptions",
72
- "unfixed_exceptions",
73
- "throughput",
74
- ]
75
- self.num_total_interviews = n * len(self.jobs_runner.interviews)
76
-
77
- self.distinct_models = list(
78
- set(i.model.model for i in self.jobs_runner.interviews)
79
- )
80
-
81
- self.completed_interview_by_model = defaultdict(list)
82
-
83
- def add_completed_interview(self, result):
84
- self.completed_interviews.append(result.interview_hash)
85
-
86
- relevant_model = result.model.model
87
- self.completed_interview_by_model[relevant_model].append(result.interview_hash)
88
-
89
- def _compute_statistic(self, stat_name: str):
90
- completed_tasks = self.completed_interviews
91
- elapsed_time = time.time() - self.start_time
92
- interviews = self.jobs_runner.total_interviews
93
-
94
- stat_definitions = {
95
- "elapsed_time": lambda: InterviewStatistic(
96
- "elapsed_time", value=elapsed_time, digits=1, units="sec."
97
- ),
98
- "total_interviews_requested": lambda: InterviewStatistic(
99
- "total_interviews_requested", value=len(interviews), units=""
100
- ),
101
- "completed_interviews": lambda: InterviewStatistic(
102
- "completed_interviews", value=len(completed_tasks), units=""
103
- ),
104
- "percent_complete": lambda: InterviewStatistic(
105
- "percent_complete",
106
- value=(
107
- len(completed_tasks) / len(interviews) * 100
108
- if len(interviews) > 0
109
- else 0
110
- ),
111
- digits=1,
112
- units="%",
113
- ),
114
- "average_time_per_interview": lambda: InterviewStatistic(
115
- "average_time_per_interview",
116
- value=elapsed_time / len(completed_tasks) if completed_tasks else 0,
117
- digits=2,
118
- units="sec.",
119
- ),
120
- "task_remaining": lambda: InterviewStatistic(
121
- "task_remaining", value=len(interviews) - len(completed_tasks), units=""
122
- ),
123
- "estimated_time_remaining": lambda: InterviewStatistic(
124
- "estimated_time_remaining",
125
- value=(
126
- (len(interviews) - len(completed_tasks))
127
- * (elapsed_time / len(completed_tasks))
128
- if len(completed_tasks) > 0
129
- else 0
130
- ),
131
- digits=1,
132
- units="sec.",
133
- ),
134
- "exceptions": lambda: InterviewStatistic(
135
- "exceptions",
136
- value=sum(len(i.exceptions) for i in interviews),
137
- units="",
138
- ),
139
- "unfixed_exceptions": lambda: InterviewStatistic(
140
- "unfixed_exceptions",
141
- value=sum(i.exceptions.num_unfixed() for i in interviews),
142
- units="",
143
- ),
144
- "throughput": lambda: InterviewStatistic(
145
- "throughput",
146
- value=len(completed_tasks) / elapsed_time if elapsed_time > 0 else 0,
147
- digits=2,
148
- units="interviews/sec.",
149
- ),
150
- }
151
- return stat_definitions[stat_name]()
152
-
153
- def create_progress_bar(self):
154
- return Progress(
155
- TextColumn("[progress.description]{task.description}"),
156
- BarColumn(),
157
- TaskProgressColumn(),
158
- TextColumn("{task.completed}/{task.total}"),
159
- )
160
-
161
- def generate_model_queues_table(self):
162
- table = Table(show_header=False, box=box.SIMPLE)
163
- table.add_column("Info", style="cyan")
164
- table.add_column("Value", style="magenta")
165
- # table.add_row("Bucket collection", str(self.jobs_runner.bucket_collection))
166
- for model, bucket in self.jobs_runner.bucket_collection.items():
167
- table.add_row(Text(model.model, style="bold blue"), "")
168
- bucket_types = ["requests_bucket", "tokens_bucket"]
169
- for bucket_type in bucket_types:
170
- table.add_row(Text(" " + bucket_type, style="green"), "")
171
- # table.add_row(
172
- # f" Current level (capacity = {round(getattr(bucket, bucket_type).capacity, 3)})",
173
- # str(round(getattr(bucket, bucket_type).tokens, 3)),
174
- # )
175
- num_requests = getattr(bucket, bucket_type).num_requests
176
- num_released = getattr(bucket, bucket_type).num_released
177
- tokens_returned = getattr(bucket, bucket_type).tokens_returned
178
- # table.add_row(
179
- # f" Requested",
180
- # str(num_requests),
181
- # )
182
- # table.add_row(
183
- # f" Completed",
184
- # str(num_released),
185
- # )
186
- table.add_row(
187
- " Completed vs. Requested", f"{num_released} vs. {num_requests}"
188
- )
189
- table.add_row(
190
- " Added tokens (from cache)",
191
- str(tokens_returned),
192
- )
193
- if bucket_type == "tokens_bucket":
194
- rate_name = "TPM"
195
- else:
196
- rate_name = "RPM"
197
- target_rate = round(getattr(bucket, bucket_type).target_rate, 1)
198
- table.add_row(
199
- f" Empirical {rate_name} (target = {target_rate})",
200
- str(round(getattr(bucket, bucket_type).get_throughput(), 0)),
201
- )
202
-
203
- return table
204
-
205
- def generate_layout(self):
206
- progress = self.create_progress_bar()
207
- task_ids = []
208
- for model in self.distinct_models:
209
- task_id = progress.add_task(
210
- f"[cyan]{model}...",
211
- total=int(self.num_total_interviews / len(self.distinct_models)),
212
- )
213
- task_ids.append((model, task_id))
214
-
215
- progress_height = min(5, 2 + len(self.distinct_models))
216
- layout = Layout()
217
-
218
- # Create the top row with only the progress panel
219
- layout.split_column(
220
- Layout(
221
- Panel(
222
- progress,
223
- title="Interview Progress",
224
- border_style="cyan",
225
- box=box.ROUNDED,
226
- ),
227
- name="progress",
228
- size=progress_height, # Adjusted size
229
- ),
230
- Layout(name="bottom_row"), # Adjusted size
231
- )
232
-
233
- # Split the bottom row into two columns for metrics and model queues
234
- layout["bottom_row"].split_row(
235
- Layout(
236
- Panel(
237
- self.generate_metrics_table(),
238
- title="Metrics",
239
- border_style="magenta",
240
- box=box.ROUNDED,
241
- ),
242
- name="metrics",
243
- ),
244
- Layout(
245
- Panel(
246
- self.generate_model_queues_table(),
247
- title="Model Queues",
248
- border_style="yellow",
249
- box=box.ROUNDED,
250
- ),
251
- name="model_queues",
252
- ),
253
- )
254
-
255
- return layout, progress, task_ids
256
-
257
- def generate_metrics_table(self):
258
- table = Table(show_header=True, header_style="bold magenta", box=box.SIMPLE)
259
- table.add_column("Metric", style="cyan", no_wrap=True)
260
- table.add_column("Value", justify="right")
261
-
262
- for stat_name in self.statistics:
263
- pretty_name, value = list(self._compute_statistic(stat_name).items())[0]
264
- # breakpoint()
265
- table.add_row(pretty_name, value)
266
- return table
267
-
268
- def update_progress(self):
269
- layout, progress, task_ids = self.generate_layout()
270
-
271
- with Live(
272
- layout, refresh_per_second=int(1 / self.refresh_rate), transient=True
273
- ) as live:
274
- while len(self.completed_interviews) < len(
275
- self.jobs_runner.total_interviews
276
- ):
277
- completed_tasks = len(self.completed_interviews)
278
- total_tasks = len(self.jobs_runner.total_interviews)
279
-
280
- for model, task_id in task_ids:
281
- completed_tasks = len(self.completed_interview_by_model[model])
282
- progress.update(
283
- task_id,
284
- completed=completed_tasks,
285
- description=f"[cyan]Conducting interviews for {model}...",
286
- )
287
-
288
- layout["metrics"].update(
289
- Panel(
290
- self.generate_metrics_table(),
291
- title="Metrics",
292
- border_style="magenta",
293
- box=box.ROUNDED,
294
- )
295
- )
296
- layout["model_queues"].update(
297
- Panel(
298
- self.generate_model_queues_table(),
299
- title="Final Model Queues",
300
- border_style="yellow",
301
- box=box.ROUNDED,
302
- )
303
- )
304
-
305
- time.sleep(self.refresh_rate)
306
-
307
- # Final update
308
- for model, task_id in task_ids:
309
- completed_tasks = len(self.completed_interview_by_model[model])
310
- progress.update(
311
- task_id,
312
- completed=completed_tasks,
313
- description=f"[cyan]Conducting interviews for {model}...",
314
- )
315
-
316
- layout["metrics"].update(
317
- Panel(
318
- self.generate_metrics_table(),
319
- title="Final Metrics",
320
- border_style="magenta",
321
- box=box.ROUNDED,
322
- )
323
- )
324
- live.update(layout)
325
- time.sleep(1) # Show final state for 1 second
326
-
327
-
328
- if __name__ == "__main__":
329
- import doctest
330
-
331
- doctest.testmod(optionflags=doctest.ELLIPSIS)
@@ -1,15 +0,0 @@
1
- from openai import AsyncOpenAI
2
- import asyncio
3
-
4
- client = AsyncOpenAI(base_url="http://127.0.0.1:8000/v1", api_key="fake_key")
5
-
6
-
7
- async def main():
8
- response = await client.chat.completions.create(
9
- model="gpt-3.5-turbo", messages=[{"role": "user", "content": "Question XX42"}]
10
- )
11
- print(response)
12
-
13
-
14
- if __name__ == "__main__":
15
- asyncio.run(main())