edsl 0.1.49__py3-none-any.whl → 0.1.50__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (239) hide show
  1. edsl/__init__.py +124 -53
  2. edsl/__version__.py +1 -1
  3. edsl/agents/agent.py +21 -21
  4. edsl/agents/agent_list.py +2 -5
  5. edsl/agents/exceptions.py +119 -5
  6. edsl/base/__init__.py +10 -35
  7. edsl/base/base_class.py +71 -36
  8. edsl/base/base_exception.py +204 -0
  9. edsl/base/data_transfer_models.py +1 -1
  10. edsl/base/exceptions.py +94 -0
  11. edsl/buckets/__init__.py +15 -1
  12. edsl/buckets/bucket_collection.py +3 -4
  13. edsl/buckets/exceptions.py +75 -0
  14. edsl/buckets/model_buckets.py +1 -2
  15. edsl/buckets/token_bucket.py +11 -6
  16. edsl/buckets/token_bucket_api.py +1 -2
  17. edsl/buckets/token_bucket_client.py +9 -7
  18. edsl/caching/cache.py +7 -2
  19. edsl/caching/cache_entry.py +10 -9
  20. edsl/caching/exceptions.py +113 -7
  21. edsl/caching/remote_cache_sync.py +1 -2
  22. edsl/caching/sql_dict.py +17 -12
  23. edsl/cli.py +43 -0
  24. edsl/config/config_class.py +30 -6
  25. edsl/conversation/Conversation.py +3 -2
  26. edsl/conversation/exceptions.py +58 -0
  27. edsl/conversation/mug_negotiation.py +0 -2
  28. edsl/coop/__init__.py +20 -1
  29. edsl/coop/coop.py +120 -29
  30. edsl/coop/exceptions.py +188 -9
  31. edsl/coop/price_fetcher.py +3 -6
  32. edsl/coop/utils.py +4 -6
  33. edsl/dataset/__init__.py +5 -4
  34. edsl/dataset/dataset.py +53 -43
  35. edsl/dataset/dataset_operations_mixin.py +86 -72
  36. edsl/dataset/dataset_tree.py +9 -5
  37. edsl/dataset/display/table_display.py +0 -2
  38. edsl/dataset/display/table_renderers.py +0 -1
  39. edsl/dataset/exceptions.py +125 -0
  40. edsl/dataset/file_exports.py +18 -11
  41. edsl/dataset/r/ggplot.py +13 -6
  42. edsl/display/__init__.py +27 -0
  43. edsl/display/core.py +147 -0
  44. edsl/display/plugin.py +189 -0
  45. edsl/display/utils.py +52 -0
  46. edsl/inference_services/__init__.py +9 -1
  47. edsl/inference_services/available_model_cache_handler.py +1 -1
  48. edsl/inference_services/available_model_fetcher.py +4 -5
  49. edsl/inference_services/data_structures.py +9 -6
  50. edsl/inference_services/exceptions.py +132 -1
  51. edsl/inference_services/inference_service_abc.py +2 -2
  52. edsl/inference_services/inference_services_collection.py +2 -6
  53. edsl/inference_services/registry.py +4 -3
  54. edsl/inference_services/service_availability.py +2 -1
  55. edsl/inference_services/services/anthropic_service.py +4 -1
  56. edsl/inference_services/services/aws_bedrock.py +13 -12
  57. edsl/inference_services/services/azure_ai.py +12 -10
  58. edsl/inference_services/services/deep_infra_service.py +1 -4
  59. edsl/inference_services/services/deep_seek_service.py +1 -5
  60. edsl/inference_services/services/google_service.py +6 -2
  61. edsl/inference_services/services/groq_service.py +1 -1
  62. edsl/inference_services/services/mistral_ai_service.py +4 -2
  63. edsl/inference_services/services/ollama_service.py +1 -1
  64. edsl/inference_services/services/open_ai_service.py +7 -5
  65. edsl/inference_services/services/perplexity_service.py +6 -2
  66. edsl/inference_services/services/test_service.py +8 -7
  67. edsl/inference_services/services/together_ai_service.py +2 -3
  68. edsl/inference_services/services/xai_service.py +1 -1
  69. edsl/instructions/__init__.py +1 -1
  70. edsl/instructions/change_instruction.py +3 -2
  71. edsl/instructions/exceptions.py +61 -0
  72. edsl/instructions/instruction.py +5 -2
  73. edsl/instructions/instruction_collection.py +2 -1
  74. edsl/instructions/instruction_handler.py +4 -9
  75. edsl/interviews/ReportErrors.py +0 -3
  76. edsl/interviews/__init__.py +9 -2
  77. edsl/interviews/answering_function.py +11 -13
  78. edsl/interviews/exception_tracking.py +14 -7
  79. edsl/interviews/exceptions.py +79 -0
  80. edsl/interviews/interview.py +32 -29
  81. edsl/interviews/interview_status_dictionary.py +4 -2
  82. edsl/interviews/interview_status_log.py +2 -1
  83. edsl/interviews/interview_task_manager.py +3 -3
  84. edsl/interviews/request_token_estimator.py +3 -1
  85. edsl/interviews/statistics.py +2 -3
  86. edsl/invigilators/__init__.py +7 -1
  87. edsl/invigilators/exceptions.py +79 -0
  88. edsl/invigilators/invigilator_base.py +0 -1
  89. edsl/invigilators/invigilators.py +8 -12
  90. edsl/invigilators/prompt_constructor.py +1 -5
  91. edsl/invigilators/prompt_helpers.py +8 -4
  92. edsl/invigilators/question_instructions_prompt_builder.py +1 -1
  93. edsl/invigilators/question_option_processor.py +9 -5
  94. edsl/invigilators/question_template_replacements_builder.py +3 -2
  95. edsl/jobs/__init__.py +3 -3
  96. edsl/jobs/async_interview_runner.py +24 -22
  97. edsl/jobs/check_survey_scenario_compatibility.py +7 -6
  98. edsl/jobs/data_structures.py +7 -4
  99. edsl/jobs/exceptions.py +177 -8
  100. edsl/jobs/fetch_invigilator.py +1 -1
  101. edsl/jobs/jobs.py +72 -67
  102. edsl/jobs/jobs_checks.py +2 -3
  103. edsl/jobs/jobs_component_constructor.py +2 -2
  104. edsl/jobs/jobs_pricing_estimation.py +3 -2
  105. edsl/jobs/jobs_remote_inference_logger.py +5 -4
  106. edsl/jobs/jobs_runner_asyncio.py +1 -2
  107. edsl/jobs/jobs_runner_status.py +8 -9
  108. edsl/jobs/remote_inference.py +26 -23
  109. edsl/jobs/results_exceptions_handler.py +8 -5
  110. edsl/key_management/__init__.py +3 -1
  111. edsl/key_management/exceptions.py +62 -0
  112. edsl/key_management/key_lookup.py +1 -1
  113. edsl/key_management/key_lookup_builder.py +37 -14
  114. edsl/key_management/key_lookup_collection.py +2 -0
  115. edsl/language_models/__init__.py +1 -1
  116. edsl/language_models/exceptions.py +302 -14
  117. edsl/language_models/language_model.py +4 -7
  118. edsl/language_models/model.py +4 -4
  119. edsl/language_models/model_list.py +1 -1
  120. edsl/language_models/price_manager.py +1 -1
  121. edsl/language_models/raw_response_handler.py +14 -9
  122. edsl/language_models/registry.py +17 -21
  123. edsl/language_models/repair.py +0 -6
  124. edsl/language_models/unused/fake_openai_service.py +0 -1
  125. edsl/load_plugins.py +69 -0
  126. edsl/logger.py +146 -0
  127. edsl/notebooks/notebook.py +1 -1
  128. edsl/notebooks/notebook_to_latex.py +0 -1
  129. edsl/plugins/__init__.py +63 -0
  130. edsl/plugins/built_in/export_example.py +50 -0
  131. edsl/plugins/built_in/pig_latin.py +67 -0
  132. edsl/plugins/cli.py +372 -0
  133. edsl/plugins/cli_typer.py +283 -0
  134. edsl/plugins/exceptions.py +31 -0
  135. edsl/plugins/hookspec.py +51 -0
  136. edsl/plugins/plugin_host.py +128 -0
  137. edsl/plugins/plugin_manager.py +633 -0
  138. edsl/plugins/plugins_registry.py +168 -0
  139. edsl/prompts/__init__.py +2 -0
  140. edsl/prompts/exceptions.py +107 -5
  141. edsl/prompts/prompt.py +14 -6
  142. edsl/questions/HTMLQuestion.py +5 -11
  143. edsl/questions/Quick.py +0 -1
  144. edsl/questions/__init__.py +2 -0
  145. edsl/questions/answer_validator_mixin.py +318 -318
  146. edsl/questions/compose_questions.py +2 -2
  147. edsl/questions/descriptors.py +10 -49
  148. edsl/questions/exceptions.py +278 -22
  149. edsl/questions/loop_processor.py +7 -5
  150. edsl/questions/prompt_templates/question_list.jinja +3 -0
  151. edsl/questions/question_base.py +14 -16
  152. edsl/questions/question_base_gen_mixin.py +2 -2
  153. edsl/questions/question_base_prompts_mixin.py +9 -3
  154. edsl/questions/question_budget.py +9 -5
  155. edsl/questions/question_check_box.py +3 -5
  156. edsl/questions/question_dict.py +171 -194
  157. edsl/questions/question_extract.py +1 -1
  158. edsl/questions/question_free_text.py +4 -6
  159. edsl/questions/question_functional.py +4 -3
  160. edsl/questions/question_list.py +36 -9
  161. edsl/questions/question_matrix.py +95 -61
  162. edsl/questions/question_multiple_choice.py +6 -4
  163. edsl/questions/question_numerical.py +2 -4
  164. edsl/questions/question_registry.py +4 -2
  165. edsl/questions/register_questions_meta.py +0 -1
  166. edsl/questions/response_validator_abc.py +7 -13
  167. edsl/questions/templates/dict/answering_instructions.jinja +1 -0
  168. edsl/questions/templates/rank/question_presentation.jinja +1 -1
  169. edsl/results/__init__.py +1 -1
  170. edsl/results/exceptions.py +141 -7
  171. edsl/results/report.py +0 -1
  172. edsl/results/result.py +4 -5
  173. edsl/results/results.py +10 -51
  174. edsl/results/results_selector.py +8 -4
  175. edsl/scenarios/PdfExtractor.py +2 -2
  176. edsl/scenarios/construct_download_link.py +69 -35
  177. edsl/scenarios/directory_scanner.py +33 -14
  178. edsl/scenarios/document_chunker.py +1 -1
  179. edsl/scenarios/exceptions.py +238 -14
  180. edsl/scenarios/file_methods.py +1 -1
  181. edsl/scenarios/file_store.py +7 -3
  182. edsl/scenarios/handlers/__init__.py +17 -0
  183. edsl/scenarios/handlers/docx_file_store.py +0 -5
  184. edsl/scenarios/handlers/pdf_file_store.py +0 -1
  185. edsl/scenarios/handlers/pptx_file_store.py +0 -5
  186. edsl/scenarios/handlers/py_file_store.py +0 -1
  187. edsl/scenarios/handlers/sql_file_store.py +1 -4
  188. edsl/scenarios/handlers/sqlite_file_store.py +0 -1
  189. edsl/scenarios/handlers/txt_file_store.py +1 -1
  190. edsl/scenarios/scenario.py +0 -1
  191. edsl/scenarios/scenario_list.py +152 -18
  192. edsl/scenarios/scenario_list_pdf_tools.py +1 -0
  193. edsl/scenarios/scenario_selector.py +0 -1
  194. edsl/surveys/__init__.py +3 -4
  195. edsl/surveys/dag/__init__.py +4 -2
  196. edsl/surveys/descriptors.py +1 -1
  197. edsl/surveys/edit_survey.py +1 -0
  198. edsl/surveys/exceptions.py +165 -9
  199. edsl/surveys/memory/__init__.py +5 -3
  200. edsl/surveys/memory/memory_management.py +1 -0
  201. edsl/surveys/memory/memory_plan.py +6 -15
  202. edsl/surveys/rules/__init__.py +5 -3
  203. edsl/surveys/rules/rule.py +1 -2
  204. edsl/surveys/rules/rule_collection.py +1 -1
  205. edsl/surveys/survey.py +12 -24
  206. edsl/surveys/survey_export.py +6 -3
  207. edsl/surveys/survey_flow_visualization.py +10 -1
  208. edsl/tasks/__init__.py +2 -0
  209. edsl/tasks/question_task_creator.py +3 -3
  210. edsl/tasks/task_creators.py +1 -3
  211. edsl/tasks/task_history.py +5 -7
  212. edsl/tasks/task_status_log.py +1 -2
  213. edsl/tokens/__init__.py +3 -1
  214. edsl/tokens/token_usage.py +1 -1
  215. edsl/utilities/__init__.py +21 -1
  216. edsl/utilities/decorators.py +1 -2
  217. edsl/utilities/markdown_to_docx.py +2 -2
  218. edsl/utilities/markdown_to_pdf.py +1 -1
  219. edsl/utilities/repair_functions.py +0 -1
  220. edsl/utilities/restricted_python.py +0 -1
  221. edsl/utilities/template_loader.py +2 -3
  222. edsl/utilities/utilities.py +8 -29
  223. {edsl-0.1.49.dist-info → edsl-0.1.50.dist-info}/METADATA +32 -2
  224. edsl-0.1.50.dist-info/RECORD +363 -0
  225. edsl-0.1.50.dist-info/entry_points.txt +3 -0
  226. edsl/dataset/smart_objects.py +0 -96
  227. edsl/exceptions/BaseException.py +0 -21
  228. edsl/exceptions/__init__.py +0 -54
  229. edsl/exceptions/configuration.py +0 -16
  230. edsl/exceptions/general.py +0 -34
  231. edsl/study/ObjectEntry.py +0 -173
  232. edsl/study/ProofOfWork.py +0 -113
  233. edsl/study/SnapShot.py +0 -80
  234. edsl/study/Study.py +0 -520
  235. edsl/study/__init__.py +0 -6
  236. edsl/utilities/interface.py +0 -135
  237. edsl-0.1.49.dist-info/RECORD +0 -347
  238. {edsl-0.1.49.dist-info → edsl-0.1.50.dist-info}/LICENSE +0 -0
  239. {edsl-0.1.49.dist-info → edsl-0.1.50.dist-info}/WHEEL +0 -0
@@ -2,4 +2,135 @@ from ..base import BaseException
2
2
 
3
3
 
4
4
  class InferenceServiceError(BaseException):
5
- relevant_doc = "https://docs.expectedparrot.com/"
5
+ """
6
+ Exception raised when an error occurs with an inference service.
7
+
8
+ This exception is raised in the following scenarios:
9
+ - When a service connection fails or times out
10
+ - When the API returns an error response (e.g., rate limit exceeded)
11
+ - When model parameters are invalid for the specified service
12
+ - When the service doesn't support requested functionality
13
+ - When there's an issue with authentication or API keys
14
+
15
+ To fix this error:
16
+ 1. Check your API key is valid and has appropriate permissions
17
+ 2. Verify your network connection is stable
18
+ 3. Ensure the requested model is available on the service
19
+ 4. Check if provided parameters are valid for the model
20
+ 5. Verify you're not exceeding service rate limits
21
+
22
+ If the issue persists, you may need to:
23
+ - Switch to a different service provider
24
+ - Use a different model with similar capabilities
25
+ - Implement retry logic with exponential backoff
26
+
27
+ Examples:
28
+ ```python
29
+ # Authentication error
30
+ model = Model("gpt-4", api_key="invalid-key") # Raises InferenceServiceError
31
+
32
+ # Invalid model parameters
33
+ model = Model("claude-3-opus", temperature=2.5) # Raises InferenceServiceError
34
+ ```
35
+ """
36
+ relevant_doc = "https://docs.expectedparrot.com/en/latest/remote_inference.html"
37
+
38
+
39
+ class InferenceServiceValueError(InferenceServiceError):
40
+ """
41
+ Exception raised when invalid values are provided to inference services.
42
+
43
+ This exception occurs when parameters, configurations, or inputs for an inference
44
+ service have invalid values, such as:
45
+ - Invalid model names or identifiers
46
+ - Out-of-range parameter values (e.g., temperature > 1.0)
47
+ - Incorrect service names
48
+ - Invalid regular expressions for pattern matching
49
+
50
+ Examples:
51
+ ```python
52
+ # Invalid model name
53
+ model = Model("non-existent-model") # Raises InferenceServiceValueError
54
+
55
+ # Out-of-range temperature
56
+ model = Model("gpt-4", temperature=2.5) # Raises InferenceServiceValueError
57
+ ```
58
+ """
59
+ relevant_doc = "https://docs.expectedparrot.com/en/latest/language_models.html#model-parameters"
60
+
61
+
62
+ class InferenceServiceIndexError(InferenceServiceError):
63
+ """
64
+ Exception raised when attempting to access an invalid index in a collection.
65
+
66
+ This exception occurs when trying to access elements outside the valid range
67
+ of an inference service's internal collections, such as:
68
+ - Accessing results that don't exist
69
+ - Using an invalid index in a list of models or services
70
+
71
+ Examples:
72
+ ```python
73
+ service_models = get_available_models_for_service("openai")
74
+ model = service_models[999] # Raises InferenceServiceIndexError if there aren't 1000 models
75
+ ```
76
+ """
77
+ relevant_doc = "https://docs.expectedparrot.com/en/latest/language_models.html#available-models"
78
+
79
+
80
+ class InferenceServiceNotImplementedError(InferenceServiceError):
81
+ """
82
+ Exception raised when a requested feature or method is not implemented.
83
+
84
+ This exception occurs when trying to use functionality that is defined in
85
+ the interface but not yet implemented in a particular service, such as:
86
+ - Using a feature only available in certain services
87
+ - Calling methods that are not implemented in the concrete service class
88
+ - Using functionality that is planned but not yet available
89
+
90
+ Examples:
91
+ ```python
92
+ # Using a feature only available in certain services
93
+ model = Model("test-model")
94
+ model.streaming_generate() # Raises InferenceServiceNotImplementedError if streaming is not supported
95
+ ```
96
+ """
97
+ relevant_doc = "https://docs.expectedparrot.com/en/latest/language_models.html#available-models"
98
+
99
+
100
+ class InferenceServiceRuntimeError(InferenceServiceError):
101
+ """
102
+ Exception raised when a runtime error occurs during inference.
103
+
104
+ This exception occurs when there is a problem during the execution
105
+ of an inference service operation, such as:
106
+ - Service availability changes during operation
107
+ - Network interruptions during API calls
108
+ - Resource limits reached during execution
109
+
110
+ Examples:
111
+ ```python
112
+ # Service becomes unavailable during operation
113
+ model = Model("gpt-4")
114
+ model.generate() # Might raise InferenceServiceRuntimeError if service goes down mid-request
115
+ ```
116
+ """
117
+ relevant_doc = "https://docs.expectedparrot.com/en/latest/remote_inference.html"
118
+
119
+
120
+ class InferenceServiceEnvironmentError(InferenceServiceError):
121
+ """
122
+ Exception raised when there's an issue with the environment configuration.
123
+
124
+ This exception occurs when the environment is not properly set up for
125
+ using a particular inference service, such as:
126
+ - Missing required environment variables (API keys, endpoints)
127
+ - Improperly formatted environment variables
128
+ - Insufficient permissions in the environment
129
+
130
+ Examples:
131
+ ```python
132
+ # Missing required environment variables
133
+ model = Model("azure-gpt4") # Raises InferenceServiceEnvironmentError if AZURE_API_KEY is not set
134
+ ```
135
+ """
136
+ relevant_doc = "https://docs.expectedparrot.com/en/latest/api_keys.html"
@@ -1,7 +1,6 @@
1
1
  from abc import abstractmethod, ABC
2
2
  import re
3
3
  from datetime import datetime, timedelta
4
- from ..config import CONFIG
5
4
 
6
5
 
7
6
  class InferenceServiceABC(ABC):
@@ -27,7 +26,8 @@ class InferenceServiceABC(ABC):
27
26
  ]
28
27
  for attr in must_have_attributes:
29
28
  if not hasattr(cls, attr):
30
- raise NotImplementedError(
29
+ from edsl.inference_services.exceptions import InferenceServiceNotImplementedError
30
+ raise InferenceServiceNotImplementedError(
31
31
  f"Class {cls.__name__} must have a '{attr}' attribute."
32
32
  )
33
33
 
@@ -1,8 +1,7 @@
1
- from functools import lru_cache
2
1
  from collections import defaultdict
3
- from typing import Optional, Protocol, Dict, List, Tuple, TYPE_CHECKING, Literal, Type, TypeVar, Union
4
- import os
2
+ from typing import Optional, Protocol, Dict, List, Tuple, TYPE_CHECKING
5
3
 
4
+ from edsl.enums import InferenceServiceLiteral
6
5
  from .inference_service_abc import InferenceServiceABC
7
6
  from .available_model_fetcher import AvailableModelFetcher
8
7
  from .exceptions import InferenceServiceError
@@ -17,9 +16,6 @@ class ModelCreator(Protocol):
17
16
  ...
18
17
 
19
18
 
20
- from edsl.enums import InferenceServiceLiteral
21
-
22
-
23
19
  class ModelResolver:
24
20
  def __init__(
25
21
  self,
@@ -1,8 +1,9 @@
1
1
  from .inference_services_collection import InferenceServicesCollection
2
2
 
3
- from .services import *
4
- from .services import __all__
3
+ # Import services module
4
+ from . import services
5
5
 
6
- services = [globals()[service_name] for service_name in __all__ if service_name in globals()]
6
+ # Use __all__ from services to get service class names
7
+ services = [getattr(services, service_name) for service_name in services.__all__]
7
8
 
8
9
  default = InferenceServicesCollection(services)
@@ -74,7 +74,8 @@ class ServiceAvailability:
74
74
  continue
75
75
 
76
76
  # If we get here, all sources failed
77
- raise RuntimeError(
77
+ from edsl.inference_services.exceptions import InferenceServiceRuntimeError
78
+ raise InferenceServiceRuntimeError(
78
79
  f"All sources failed to fetch models. Last error: {last_error}"
79
80
  )
80
81
 
@@ -1,10 +1,13 @@
1
1
  import os
2
- from typing import Any, Optional, List
2
+ from typing import Any, Optional, List, TYPE_CHECKING
3
3
  from anthropic import AsyncAnthropic
4
4
 
5
5
  from ..inference_service_abc import InferenceServiceABC
6
6
  from ...language_models import LanguageModel
7
7
 
8
+ if TYPE_CHECKING:
9
+ from ....scenarios.file_store import FileStore as Files
10
+
8
11
 
9
12
  class AnthropicService(InferenceServiceABC):
10
13
  """Anthropic service class."""
@@ -1,12 +1,12 @@
1
1
  import os
2
- from typing import Any, List, Optional
3
- import re
2
+ from typing import Any, List, Optional, TYPE_CHECKING
4
3
  import boto3
5
4
  from botocore.exceptions import ClientError
6
5
  from ..inference_service_abc import InferenceServiceABC
7
6
  from ...language_models import LanguageModel
8
- import json
9
- from edsl.utilities.utilities import fix_partial_correct_response
7
+
8
+ if TYPE_CHECKING:
9
+ from ....scenarios.file_store import FileStore
10
10
 
11
11
 
12
12
  class AwsBedrockService(InferenceServiceABC):
@@ -78,9 +78,8 @@ class AwsBedrockService(InferenceServiceABC):
78
78
  ) -> dict[str, Any]:
79
79
  """Calls the AWS Bedrock API and returns the API response."""
80
80
 
81
- api_token = (
82
- self.api_token
83
- ) # call to check the if env variables are set.
81
+ # Ensure credentials are available
82
+ _ = self.api_token # call to check if env variables are set.
84
83
 
85
84
  region = os.getenv("AWS_REGION", "us-east-1")
86
85
  client = boto3.client("bedrock-runtime", region_name=region)
@@ -91,11 +90,13 @@ class AwsBedrockService(InferenceServiceABC):
91
90
  "content": [{"text": user_prompt}],
92
91
  }
93
92
  ]
94
- system = [
95
- {
96
- "text": system_prompt,
97
- }
98
- ]
93
+ # We'll need to handle system prompt in the future
94
+ # Commented out to avoid unused variable warning
95
+ # system_content = [
96
+ # {
97
+ # "text": system_prompt,
98
+ # }
99
+ # ]
99
100
  try:
100
101
  response = client.converse(
101
102
  modelId=self._model_,
@@ -1,16 +1,15 @@
1
1
  import os
2
- from typing import Any, Optional, List
3
- import re
2
+ from typing import Any, Optional, List, TYPE_CHECKING
4
3
  from openai import AsyncAzureOpenAI
5
4
  from ..inference_service_abc import InferenceServiceABC
6
5
  from ...language_models import LanguageModel
7
6
 
7
+ if TYPE_CHECKING:
8
+ from ....scenarios.file_store import FileStore
9
+
8
10
  from azure.ai.inference.aio import ChatCompletionsClient
9
11
  from azure.core.credentials import AzureKeyCredential
10
12
  from azure.ai.inference.models import SystemMessage, UserMessage
11
- import asyncio
12
- import json
13
- from edsl.utilities.utilities import fix_partial_correct_response
14
13
 
15
14
 
16
15
  def json_handle_none(value: Any) -> Any:
@@ -47,7 +46,8 @@ class AzureAIService(InferenceServiceABC):
47
46
  out = []
48
47
  azure_endpoints = os.getenv("AZURE_ENDPOINT_URL_AND_KEY", None)
49
48
  if not azure_endpoints:
50
- raise EnvironmentError(f"AZURE_ENDPOINT_URL_AND_KEY is not defined")
49
+ from edsl.inference_services.exceptions import InferenceServiceEnvironmentError
50
+ raise InferenceServiceEnvironmentError("AZURE_ENDPOINT_URL_AND_KEY is not defined")
51
51
  azure_endpoints = azure_endpoints.split(",")
52
52
  for data in azure_endpoints:
53
53
  try:
@@ -131,21 +131,23 @@ class AzureAIService(InferenceServiceABC):
131
131
  api_key = cls._model_id_to_endpoint_and_key[model_name][
132
132
  "azure_endpoint_key"
133
133
  ]
134
- except:
134
+ except (KeyError, TypeError):
135
135
  api_key = None
136
136
 
137
137
  if not api_key:
138
- raise EnvironmentError(
138
+ from edsl.inference_services.exceptions import InferenceServiceEnvironmentError
139
+ raise InferenceServiceEnvironmentError(
139
140
  f"AZURE_ENDPOINT_URL_AND_KEY doesn't have the endpoint:key pair for your model: {model_name}"
140
141
  )
141
142
 
142
143
  try:
143
144
  endpoint = cls._model_id_to_endpoint_and_key[model_name]["endpoint"]
144
- except:
145
+ except (KeyError, TypeError):
145
146
  endpoint = None
146
147
 
147
148
  if not endpoint:
148
- raise EnvironmentError(
149
+ from edsl.inference_services.exceptions import InferenceServiceEnvironmentError
150
+ raise InferenceServiceEnvironmentError(
149
151
  f"AZURE_ENDPOINT_URL_AND_KEY doesn't have the endpoint:key pair for your model: {model_name}"
150
152
  )
151
153
 
@@ -1,7 +1,4 @@
1
- import aiohttp
2
- import json
3
- import requests
4
- from typing import Any, List
1
+ from typing import List
5
2
 
6
3
 
7
4
  from .open_ai_service import OpenAIService
@@ -1,9 +1,5 @@
1
- import aiohttp
2
- import json
3
- import requests
4
- from typing import Any, List
1
+ from typing import List
5
2
 
6
- from ...language_models import LanguageModel
7
3
  from .open_ai_service import OpenAIService
8
4
 
9
5
 
@@ -1,5 +1,5 @@
1
1
  # import os
2
- from typing import Any, Dict, List, Optional
2
+ from typing import Any, Dict, List, Optional, TYPE_CHECKING
3
3
  import google
4
4
  import google.generativeai as genai
5
5
  from google.generativeai.types import GenerationConfig
@@ -7,6 +7,10 @@ from google.api_core.exceptions import InvalidArgument
7
7
 
8
8
  # from edsl.exceptions.general import MissingAPIKeyError
9
9
  from ..inference_service_abc import InferenceServiceABC
10
+ from ...language_models import LanguageModel
11
+
12
+ if TYPE_CHECKING:
13
+ from ....scenarios.file_store import FileStore as Files
10
14
  #from ...coop import Coop
11
15
 
12
16
  safety_settings = [
@@ -115,7 +119,7 @@ class GoogleService(InferenceServiceABC):
115
119
  safety_settings=safety_settings,
116
120
  system_instruction=system_prompt,
117
121
  )
118
- except InvalidArgument as e:
122
+ except InvalidArgument:
119
123
  print(
120
124
  f"This model, {self._model_}, does not support system_instruction"
121
125
  )
@@ -1,4 +1,4 @@
1
- from typing import Any, List
1
+ from typing import List
2
2
 
3
3
  import groq
4
4
 
@@ -1,12 +1,14 @@
1
1
  import os
2
- from typing import Any, List, Optional
3
- import asyncio
2
+ from typing import Any, List, Optional, TYPE_CHECKING
4
3
  from mistralai import Mistral
5
4
 
6
5
 
7
6
  from ..inference_service_abc import InferenceServiceABC
8
7
  from ...language_models import LanguageModel
9
8
 
9
+ if TYPE_CHECKING:
10
+ from ....scenarios.file_store import FileStore
11
+
10
12
 
11
13
  class MistralAIService(InferenceServiceABC):
12
14
  """Mistral AI service class."""
@@ -1,4 +1,4 @@
1
- from typing import Any, List
1
+ from typing import List
2
2
 
3
3
  from .open_ai_service import OpenAIService
4
4
 
@@ -1,5 +1,5 @@
1
1
  from __future__ import annotations
2
- from typing import Any, List, Optional, Dict, NewType
2
+ from typing import Any, List, Optional, Dict, NewType, TYPE_CHECKING
3
3
  import os
4
4
 
5
5
  import openai
@@ -7,9 +7,11 @@ import openai
7
7
  from ..inference_service_abc import InferenceServiceABC
8
8
  from ...language_models import LanguageModel
9
9
  from ..rate_limits_cache import rate_limits
10
- from ...utilities.utilities import fix_partial_correct_response
11
10
 
12
- from ...config import CONFIG
11
+ if TYPE_CHECKING:
12
+ from ....scenarios.file_store import FileStore as Files
13
+ from ....invigilators.invigilator_base import InvigilatorBase as InvigilatorAI
14
+
13
15
 
14
16
  APIToken = NewType("APIToken", str)
15
17
 
@@ -103,7 +105,7 @@ class OpenAIService(InferenceServiceABC):
103
105
  for m in cls.get_model_list(api_key=api_token)
104
106
  if m.id not in cls.model_exclude_list
105
107
  ]
106
- except Exception as e:
108
+ except Exception:
107
109
  raise
108
110
  return cls._models_list_cache
109
111
 
@@ -165,7 +167,7 @@ class OpenAIService(InferenceServiceABC):
165
167
  else:
166
168
  headers = self.get_headers()
167
169
 
168
- except Exception as e:
170
+ except Exception:
169
171
  return {
170
172
  "rpm": 10_000,
171
173
  "tpm": 2_000_000,
@@ -1,10 +1,14 @@
1
- from typing import Any, List, Optional
1
+ from typing import Any, List, Optional, TYPE_CHECKING
2
2
  from ..rate_limits_cache import rate_limits
3
3
 
4
4
  from ...language_models import LanguageModel
5
5
 
6
6
  from .open_ai_service import OpenAIService
7
7
 
8
+ if TYPE_CHECKING:
9
+ from ....scenarios.file_store import FileStore as Files
10
+ from ....invigilators.invigilator_base import InvigilatorBase as InvigilatorAI
11
+
8
12
 
9
13
  class PerplexityService(OpenAIService):
10
14
  """Perplexity service class."""
@@ -94,7 +98,7 @@ class PerplexityService(OpenAIService):
94
98
  else:
95
99
  headers = self.get_headers()
96
100
 
97
- except Exception as e:
101
+ except Exception:
98
102
  return {
99
103
  "rpm": 10_000,
100
104
  "tpm": 2_000_000,
@@ -1,15 +1,15 @@
1
- from typing import Any, List, Optional
2
- import os
1
+ from typing import Any, List, Optional, TYPE_CHECKING
3
2
  import asyncio
4
3
  import random
5
4
 
6
5
  from ..inference_service_abc import InferenceServiceABC
7
- from ..rate_limits_cache import rate_limits
8
6
 
9
7
  from ...language_models import LanguageModel
10
- from ...utilities.utilities import fix_partial_correct_response
11
8
  from ...enums import InferenceServiceType
12
9
 
10
+ if TYPE_CHECKING:
11
+ from ....scenarios.file_store import FileStore as File
12
+
13
13
 
14
14
  class TestService(InferenceServiceABC):
15
15
  """OpenAI service class."""
@@ -37,7 +37,7 @@ class TestService(InferenceServiceABC):
37
37
 
38
38
  @classmethod
39
39
  def create_model(cls, model_name, model_class_name=None) -> LanguageModel:
40
- throw_exception = False
40
+ # Removed unused variable
41
41
 
42
42
  class TestServiceLanguageModel(LanguageModel):
43
43
  _model_ = "test"
@@ -56,7 +56,7 @@ class TestService(InferenceServiceABC):
56
56
 
57
57
  return self.canned_response
58
58
  else:
59
- return "Hello, world"
59
+ return "Hello, world X"
60
60
 
61
61
  async def async_execute_model_call(
62
62
  self,
@@ -74,7 +74,8 @@ class TestService(InferenceServiceABC):
74
74
  p = 1
75
75
 
76
76
  if random.random() < p:
77
- raise Exception("This is a test error")
77
+ from edsl.inference_services.exceptions import InferenceServiceError
78
+ raise InferenceServiceError("This is a test error")
78
79
 
79
80
  if hasattr(self, "func"):
80
81
  return {
@@ -1,6 +1,5 @@
1
- from typing import Any, List, Optional
1
+ from typing import List
2
2
 
3
- from ...language_models import LanguageModel
4
3
  from .open_ai_service import OpenAIService
5
4
 
6
5
  import openai
@@ -163,6 +162,6 @@ class TogetherAIService(OpenAIService):
163
162
  for m in cls.get_model_list()
164
163
  if m["id"] not in cls.model_exclude_list
165
164
  ]
166
- except Exception as e:
165
+ except Exception:
167
166
  raise
168
167
  return cls._models_list_cache
@@ -1,4 +1,4 @@
1
- from typing import Any, List
1
+ from typing import List
2
2
 
3
3
  from .open_ai_service import OpenAIService
4
4
 
@@ -3,4 +3,4 @@ from .instruction_collection import InstructionCollection
3
3
  from .change_instruction import ChangeInstruction
4
4
  from .instruction_handler import InstructionHandler
5
5
 
6
- __all__ = ["Instruction", "ChangeInstruction"]
6
+ __all__ = ["Instruction", "ChangeInstruction", "InstructionCollection", "InstructionHandler"]
@@ -9,13 +9,14 @@ class ChangeInstruction:
9
9
  drop: Optional[List[str]] = None,
10
10
  ):
11
11
  if keep is None and drop is None:
12
- raise ValueError("Keep and drop cannot both be None")
12
+ from edsl.instructions.exceptions import InstructionValueError
13
+ raise InstructionValueError("Keep and drop cannot both be None")
13
14
 
14
15
  self.keep = keep or []
15
16
  self.drop = drop or []
16
17
 
17
18
  def include_instruction(self, instruction_name) -> bool:
18
- return (instruction_name in self.keep) or (not instruction_name in self.drop)
19
+ return (instruction_name in self.keep) or (instruction_name not in self.drop)
19
20
 
20
21
  def add_name(self, index) -> None:
21
22
  self.name = "change_instruction_{}".format(index)
@@ -0,0 +1,61 @@
1
+ """
2
+ Exceptions specific to the instructions module.
3
+
4
+ This module defines custom exception classes for all instruction-related errors
5
+ in the EDSL framework, ensuring consistent error handling and user feedback.
6
+ """
7
+
8
+ from ..base import BaseException
9
+
10
+
11
+ class InstructionError(BaseException):
12
+ """
13
+ Base exception class for all instruction-related errors.
14
+
15
+ This is the parent class for all exceptions related to instruction creation,
16
+ modification, and application.
17
+
18
+ Examples:
19
+ ```python
20
+ # Usually not raised directly, but through subclasses
21
+ # For example, when creating invalid instructions:
22
+ instruction = Instruction(keep=None, drop=None) # Would raise InstructionValueError
23
+ ```
24
+ """
25
+ relevant_doc = "https://docs.expectedparrot.com/en/latest/instructions.html"
26
+
27
+
28
+ class InstructionValueError(InstructionError):
29
+ """
30
+ Exception raised when invalid values are provided to instruction methods.
31
+
32
+ This exception occurs when:
33
+ - Both keep and drop parameters are None in an Instruction
34
+ - Invalid instruction options are provided
35
+ - The instruction content is improperly formatted
36
+
37
+ Examples:
38
+ ```python
39
+ # Creating an instruction with invalid parameters
40
+ instruction = Instruction(keep=None, drop=None) # Raises InstructionValueError
41
+ ```
42
+ """
43
+ relevant_doc = "https://docs.expectedparrot.com/en/latest/instructions.html"
44
+
45
+
46
+ class InstructionCollectionError(InstructionError):
47
+ """
48
+ Exception raised when there's an issue with an instruction collection.
49
+
50
+ This exception occurs when:
51
+ - Instructions in a collection are invalid or incompatible
52
+ - There's an attempt to add a duplicate instruction
53
+ - The collection is used in an invalid context
54
+
55
+ Examples:
56
+ ```python
57
+ # Adding an incompatible instruction to a collection
58
+ collection.add(invalid_instruction) # Raises InstructionCollectionError
59
+ ```
60
+ """
61
+ relevant_doc = "https://docs.expectedparrot.com/en/latest/instructions.html"
@@ -1,7 +1,10 @@
1
- from typing import Union, Optional, List, Generator, Dict
1
+ from __future__ import annotations
2
2
  from ..utilities.remove_edsl_version import remove_edsl_version
3
3
  from ..base import RepresentationMixin
4
- #from ..surveys import Survey
4
+ from typing import TYPE_CHECKING
5
+
6
+ if TYPE_CHECKING:
7
+ from ..surveys import Survey
5
8
 
6
9
 
7
10
  class Instruction(RepresentationMixin):
@@ -42,7 +42,8 @@ class InstructionCollection(UserDict):
42
42
  self, question_name
43
43
  ) -> tuple[List[Instruction], List[ChangeInstruction]]:
44
44
  if question_name not in self.question_names:
45
- raise ValueError(
45
+ from edsl.instructions.exceptions import InstructionCollectionError
46
+ raise InstructionCollectionError(
46
47
  f"Question name not found in the list of questions: got '{question_name}'; list is {self.question_names}"
47
48
  )
48
49
  instructions, changes = [], []