edsl 0.1.37.dev4__py3-none-any.whl → 0.1.37.dev6__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (45) hide show
  1. edsl/__version__.py +1 -1
  2. edsl/agents/Agent.py +86 -35
  3. edsl/agents/AgentList.py +5 -0
  4. edsl/agents/InvigilatorBase.py +2 -23
  5. edsl/agents/PromptConstructor.py +147 -106
  6. edsl/agents/descriptors.py +17 -4
  7. edsl/config.py +1 -1
  8. edsl/conjure/AgentConstructionMixin.py +11 -3
  9. edsl/conversation/Conversation.py +66 -14
  10. edsl/conversation/chips.py +95 -0
  11. edsl/coop/coop.py +134 -3
  12. edsl/data/Cache.py +1 -1
  13. edsl/exceptions/BaseException.py +21 -0
  14. edsl/exceptions/__init__.py +7 -3
  15. edsl/exceptions/agents.py +17 -19
  16. edsl/exceptions/results.py +11 -8
  17. edsl/exceptions/scenarios.py +22 -0
  18. edsl/exceptions/surveys.py +13 -10
  19. edsl/inference_services/InferenceServicesCollection.py +32 -9
  20. edsl/jobs/Jobs.py +265 -53
  21. edsl/jobs/interviews/InterviewExceptionEntry.py +5 -1
  22. edsl/jobs/tasks/TaskHistory.py +1 -0
  23. edsl/language_models/KeyLookup.py +30 -0
  24. edsl/language_models/LanguageModel.py +47 -59
  25. edsl/language_models/__init__.py +1 -0
  26. edsl/prompts/Prompt.py +8 -4
  27. edsl/questions/QuestionBase.py +53 -13
  28. edsl/questions/QuestionBasePromptsMixin.py +1 -33
  29. edsl/questions/QuestionFunctional.py +2 -2
  30. edsl/questions/descriptors.py +23 -28
  31. edsl/results/DatasetExportMixin.py +25 -1
  32. edsl/results/Result.py +16 -1
  33. edsl/results/Results.py +31 -120
  34. edsl/results/ResultsDBMixin.py +1 -1
  35. edsl/results/Selector.py +18 -1
  36. edsl/scenarios/Scenario.py +48 -12
  37. edsl/scenarios/ScenarioHtmlMixin.py +7 -2
  38. edsl/scenarios/ScenarioList.py +12 -1
  39. edsl/surveys/Rule.py +10 -4
  40. edsl/surveys/Survey.py +100 -77
  41. edsl/utilities/utilities.py +18 -0
  42. {edsl-0.1.37.dev4.dist-info → edsl-0.1.37.dev6.dist-info}/METADATA +1 -1
  43. {edsl-0.1.37.dev4.dist-info → edsl-0.1.37.dev6.dist-info}/RECORD +45 -41
  44. {edsl-0.1.37.dev4.dist-info → edsl-0.1.37.dev6.dist-info}/LICENSE +0 -0
  45. {edsl-0.1.37.dev4.dist-info → edsl-0.1.37.dev6.dist-info}/WHEEL +0 -0
@@ -16,25 +16,48 @@ class InferenceServicesCollection:
16
16
 
17
17
  @staticmethod
18
18
  def _get_service_available(service, warn: bool = False) -> list[str]:
19
- from_api = True
20
19
  try:
21
20
  service_models = service.available()
22
- except Exception as e:
21
+ except Exception:
23
22
  if warn:
24
23
  warnings.warn(
25
24
  f"""Error getting models for {service._inference_service_}.
26
25
  Check that you have properly stored your Expected Parrot API key and activated remote inference, or stored your own API keys for the language models that you want to use.
27
26
  See https://docs.expectedparrot.com/en/latest/api_keys.html for instructions on storing API keys.
28
- Relying on cache.""",
27
+ Relying on Coop.""",
29
28
  UserWarning,
30
29
  )
31
- from edsl.inference_services.models_available_cache import models_available
32
30
 
33
- service_models = models_available.get(service._inference_service_, [])
34
- # cache results
35
- service._models_list_cache = service_models
36
- from_api = False
37
- return service_models # , from_api
31
+ # Use the list of models on Coop as a fallback
32
+ try:
33
+ from edsl import Coop
34
+
35
+ c = Coop()
36
+ models_from_coop = c.fetch_models()
37
+ service_models = models_from_coop.get(service._inference_service_, [])
38
+
39
+ # cache results
40
+ service._models_list_cache = service_models
41
+
42
+ # Finally, use the available models cache from the Python file
43
+ except Exception:
44
+ if warn:
45
+ warnings.warn(
46
+ f"""Error getting models for {service._inference_service_}.
47
+ Relying on EDSL cache.""",
48
+ UserWarning,
49
+ )
50
+
51
+ from edsl.inference_services.models_available_cache import (
52
+ models_available,
53
+ )
54
+
55
+ service_models = models_available.get(service._inference_service_, [])
56
+
57
+ # cache results
58
+ service._models_list_cache = service_models
59
+
60
+ return service_models
38
61
 
39
62
  def available(self):
40
63
  total_models = []
edsl/jobs/Jobs.py CHANGED
@@ -3,9 +3,10 @@ from __future__ import annotations
3
3
  import warnings
4
4
  import requests
5
5
  from itertools import product
6
- from typing import Optional, Union, Sequence, Generator
6
+ from typing import Literal, Optional, Union, Sequence, Generator
7
7
 
8
8
  from edsl.Base import Base
9
+
9
10
  from edsl.exceptions import MissingAPIKeyError
10
11
  from edsl.jobs.buckets.BucketCollection import BucketCollection
11
12
  from edsl.jobs.interviews.Interview import Interview
@@ -209,14 +210,14 @@ class Jobs(Base):
209
210
  )
210
211
  return d
211
212
 
212
- def show_prompts(self, all=False) -> None:
213
+ def show_prompts(self, all=False, max_rows: Optional[int] = None) -> None:
213
214
  """Print the prompts."""
214
215
  if all:
215
- self.prompts().to_scenario_list().print(format="rich")
216
+ self.prompts().to_scenario_list().print(format="rich", max_rows=max_rows)
216
217
  else:
217
218
  self.prompts().select(
218
219
  "user_prompt", "system_prompt"
219
- ).to_scenario_list().print(format="rich")
220
+ ).to_scenario_list().print(format="rich", max_rows=max_rows)
220
221
 
221
222
  @staticmethod
222
223
  def estimate_prompt_cost(
@@ -722,7 +723,11 @@ class Jobs(Base):
722
723
  return self._raise_validation_errors
723
724
 
724
725
  def create_remote_inference_job(
725
- self, iterations: int = 1, remote_inference_description: Optional[str] = None
726
+ self,
727
+ iterations: int = 1,
728
+ remote_inference_description: Optional[str] = None,
729
+ remote_inference_results_visibility: Optional[VisibilityType] = "unlisted",
730
+ verbose=False,
726
731
  ):
727
732
  """ """
728
733
  from edsl.coop.coop import Coop
@@ -734,9 +739,11 @@ class Jobs(Base):
734
739
  description=remote_inference_description,
735
740
  status="queued",
736
741
  iterations=iterations,
742
+ initial_results_visibility=remote_inference_results_visibility,
737
743
  )
738
744
  job_uuid = remote_job_creation_data.get("uuid")
739
- print(f"Job sent to server. (Job uuid={job_uuid}).")
745
+ if self.verbose:
746
+ print(f"Job sent to server. (Job uuid={job_uuid}).")
740
747
  return remote_job_creation_data
741
748
 
742
749
  @staticmethod
@@ -747,7 +754,7 @@ class Jobs(Base):
747
754
  return coop.remote_inference_get(job_uuid)
748
755
 
749
756
  def poll_remote_inference_job(
750
- self, remote_job_creation_data: dict
757
+ self, remote_job_creation_data: dict, verbose=False, poll_interval=5
751
758
  ) -> Union[Results, None]:
752
759
  from edsl.coop.coop import Coop
753
760
  import time
@@ -764,42 +771,46 @@ class Jobs(Base):
764
771
  remote_job_data = coop.remote_inference_get(job_uuid)
765
772
  status = remote_job_data.get("status")
766
773
  if status == "cancelled":
767
- print("\r" + " " * 80 + "\r", end="")
768
- print("Job cancelled by the user.")
769
- print(
770
- f"See {expected_parrot_url}/home/remote-inference for more details."
771
- )
774
+ if self.verbose:
775
+ print("\r" + " " * 80 + "\r", end="")
776
+ print("Job cancelled by the user.")
777
+ print(
778
+ f"See {expected_parrot_url}/home/remote-inference for more details."
779
+ )
772
780
  return None
773
781
  elif status == "failed":
774
- print("\r" + " " * 80 + "\r", end="")
775
- print("Job failed.")
776
- print(
777
- f"See {expected_parrot_url}/home/remote-inference for more details."
778
- )
782
+ if self.verbose:
783
+ print("\r" + " " * 80 + "\r", end="")
784
+ print("Job failed.")
785
+ print(
786
+ f"See {expected_parrot_url}/home/remote-inference for more details."
787
+ )
779
788
  return None
780
789
  elif status == "completed":
781
790
  results_uuid = remote_job_data.get("results_uuid")
782
791
  results = coop.get(results_uuid, expected_object_type="results")
783
- print("\r" + " " * 80 + "\r", end="")
784
- url = f"{expected_parrot_url}/content/{results_uuid}"
785
- print(f"Job completed and Results stored on Coop: {url}.")
792
+ if self.verbose:
793
+ print("\r" + " " * 80 + "\r", end="")
794
+ url = f"{expected_parrot_url}/content/{results_uuid}"
795
+ print(f"Job completed and Results stored on Coop: {url}.")
786
796
  return results
787
797
  else:
788
- duration = 5
798
+ duration = poll_interval
789
799
  time_checked = datetime.now().strftime("%Y-%m-%d %I:%M:%S %p")
790
800
  frames = ["⠋", "⠙", "⠹", "⠸", "⠼", "⠴", "⠦", "⠧", "⠇", "⠏"]
791
801
  start_time = time.time()
792
802
  i = 0
793
803
  while time.time() - start_time < duration:
794
- print(
795
- f"\r{frames[i % len(frames)]} Job status: {status} - last update: {time_checked}",
796
- end="",
797
- flush=True,
798
- )
804
+ if self.verbose:
805
+ print(
806
+ f"\r{frames[i % len(frames)]} Job status: {status} - last update: {time_checked}",
807
+ end="",
808
+ flush=True,
809
+ )
799
810
  time.sleep(0.1)
800
811
  i += 1
801
812
 
802
- def use_remote_inference(self, disable_remote_inference: bool):
813
+ def use_remote_inference(self, disable_remote_inference: bool) -> bool:
803
814
  if disable_remote_inference:
804
815
  return False
805
816
  if not disable_remote_inference:
@@ -815,20 +826,23 @@ class Jobs(Base):
815
826
 
816
827
  return False
817
828
 
818
- def use_remote_cache(self):
819
- try:
820
- from edsl import Coop
829
+ def use_remote_cache(self, disable_remote_cache: bool) -> bool:
830
+ if disable_remote_cache:
831
+ return False
832
+ if not disable_remote_cache:
833
+ try:
834
+ from edsl import Coop
821
835
 
822
- user_edsl_settings = Coop().edsl_settings
823
- return user_edsl_settings.get("remote_caching", False)
824
- except requests.ConnectionError:
825
- pass
826
- except CoopServerResponseError as e:
827
- pass
836
+ user_edsl_settings = Coop().edsl_settings
837
+ return user_edsl_settings.get("remote_caching", False)
838
+ except requests.ConnectionError:
839
+ pass
840
+ except CoopServerResponseError as e:
841
+ pass
828
842
 
829
843
  return False
830
844
 
831
- def check_api_keys(self):
845
+ def check_api_keys(self) -> None:
832
846
  from edsl import Model
833
847
 
834
848
  for model in self.models + [Model()]:
@@ -838,6 +852,86 @@ class Jobs(Base):
838
852
  inference_service=model._inference_service_,
839
853
  )
840
854
 
855
+ def get_missing_api_keys(self) -> set:
856
+ """
857
+ Returns a list of the api keys that a user needs to run this job, but does not currently have in their .env file.
858
+ """
859
+
860
+ missing_api_keys = set()
861
+
862
+ from edsl import Model
863
+ from edsl.enums import service_to_api_keyname
864
+
865
+ for model in self.models + [Model()]:
866
+ if not model.has_valid_api_key():
867
+ key_name = service_to_api_keyname.get(
868
+ model._inference_service_, "NOT FOUND"
869
+ )
870
+ missing_api_keys.add(key_name)
871
+
872
+ return missing_api_keys
873
+
874
+ def user_has_all_model_keys(self):
875
+ """
876
+ Returns True if the user has all model keys required to run their job.
877
+
878
+ Otherwise, returns False.
879
+ """
880
+
881
+ try:
882
+ self.check_api_keys()
883
+ return True
884
+ except MissingAPIKeyError:
885
+ return False
886
+ except Exception:
887
+ raise
888
+
889
+ def user_has_ep_api_key(self) -> bool:
890
+ """
891
+ Returns True if the user has an EXPECTED_PARROT_API_KEY in their env.
892
+
893
+ Otherwise, returns False.
894
+ """
895
+
896
+ import os
897
+
898
+ coop_api_key = os.getenv("EXPECTED_PARROT_API_KEY")
899
+
900
+ if coop_api_key is not None:
901
+ return True
902
+ else:
903
+ return False
904
+
905
+ def needs_external_llms(self) -> bool:
906
+ """
907
+ Returns True if the job needs external LLMs to run.
908
+
909
+ Otherwise, returns False.
910
+ """
911
+ # These cases are necessary to skip the API key check during doctests
912
+
913
+ # Accounts for Results.example()
914
+ all_agents_answer_questions_directly = len(self.agents) > 0 and all(
915
+ [hasattr(a, "answer_question_directly") for a in self.agents]
916
+ )
917
+
918
+ # Accounts for InterviewExceptionEntry.example()
919
+ only_model_is_test = set([m.model for m in self.models]) == set(["test"])
920
+
921
+ # Accounts for Survey.__call__
922
+ all_questions_are_functional = set(
923
+ [q.question_type for q in self.survey.questions]
924
+ ) == set(["functional"])
925
+
926
+ if (
927
+ all_agents_answer_questions_directly
928
+ or only_model_is_test
929
+ or all_questions_are_functional
930
+ ):
931
+ return False
932
+ else:
933
+ return True
934
+
841
935
  def run(
842
936
  self,
843
937
  n: int = 1,
@@ -850,22 +944,28 @@ class Jobs(Base):
850
944
  print_exceptions=True,
851
945
  remote_cache_description: Optional[str] = None,
852
946
  remote_inference_description: Optional[str] = None,
947
+ remote_inference_results_visibility: Optional[
948
+ Literal["private", "public", "unlisted"]
949
+ ] = "unlisted",
853
950
  skip_retry: bool = False,
854
951
  raise_validation_errors: bool = False,
952
+ disable_remote_cache: bool = False,
855
953
  disable_remote_inference: bool = False,
856
954
  ) -> Results:
857
955
  """
858
956
  Runs the Job: conducts Interviews and returns their results.
859
957
 
860
- :param n: how many times to run each interview
861
- :param progress_bar: shows a progress bar
862
- :param stop_on_exception: stops the job if an exception is raised
863
- :param cache: a cache object to store results
864
- :param check_api_keys: check if the API keys are valid
865
- :param batch_mode: run the job in batch mode i.e., no expecation of interaction with the user
866
- :param verbose: prints messages
867
- :param remote_cache_description: specifies a description for this group of entries in the remote cache
868
- :param remote_inference_description: specifies a description for the remote inference job
958
+ :param n: How many times to run each interview
959
+ :param progress_bar: Whether to show a progress bar
960
+ :param stop_on_exception: Stops the job if an exception is raised
961
+ :param cache: A Cache object to store results
962
+ :param check_api_keys: Raises an error if API keys are invalid
963
+ :param verbose: Prints extra messages
964
+ :param remote_cache_description: Specifies a description for this group of entries in the remote cache
965
+ :param remote_inference_description: Specifies a description for the remote inference job
966
+ :param remote_inference_results_visibility: The initial visibility of the Results object on Coop. This will only be used for remote jobs!
967
+ :param disable_remote_cache: If True, the job will not use remote cache. This only works for local jobs!
968
+ :param disable_remote_inference: If True, the job will not use remote inference
869
969
  """
870
970
  from edsl.coop.coop import Coop
871
971
 
@@ -875,9 +975,54 @@ class Jobs(Base):
875
975
 
876
976
  self.verbose = verbose
877
977
 
978
+ if (
979
+ not self.user_has_all_model_keys()
980
+ and not self.user_has_ep_api_key()
981
+ and self.needs_external_llms()
982
+ ):
983
+ import secrets
984
+ from dotenv import load_dotenv
985
+ from edsl import CONFIG
986
+ from edsl.coop.coop import Coop
987
+ from edsl.utilities.utilities import write_api_key_to_env
988
+
989
+ missing_api_keys = self.get_missing_api_keys()
990
+
991
+ edsl_auth_token = secrets.token_urlsafe(16)
992
+
993
+ print("You're missing some of the API keys needed to run this job:")
994
+ for api_key in missing_api_keys:
995
+ print(f" 🔑 {api_key}")
996
+ print(
997
+ "\nYou can either add the missing keys to your .env file, or use remote inference."
998
+ )
999
+ print("Remote inference allows you to run jobs on our server.")
1000
+ print("\n🚀 To use remote inference, sign up at the following link:")
1001
+
1002
+ coop = Coop()
1003
+ coop._display_login_url(edsl_auth_token=edsl_auth_token)
1004
+
1005
+ print(
1006
+ "\nOnce you log in, we will automatically retrieve your Expected Parrot API key and continue your job remotely."
1007
+ )
1008
+
1009
+ api_key = coop._poll_for_api_key(edsl_auth_token)
1010
+
1011
+ if api_key is None:
1012
+ print("\nTimed out waiting for login. Please try again.")
1013
+ return
1014
+
1015
+ write_api_key_to_env(api_key)
1016
+ print("✨ API key retrieved and written to .env file.\n")
1017
+
1018
+ # Retrieve API key so we can continue running the job
1019
+ load_dotenv()
1020
+
878
1021
  if remote_inference := self.use_remote_inference(disable_remote_inference):
879
1022
  remote_job_creation_data = self.create_remote_inference_job(
880
- iterations=n, remote_inference_description=remote_inference_description
1023
+ iterations=n,
1024
+ remote_inference_description=remote_inference_description,
1025
+ remote_inference_results_visibility=remote_inference_results_visibility,
881
1026
  )
882
1027
  results = self.poll_remote_inference_job(remote_job_creation_data)
883
1028
  if results is None:
@@ -897,7 +1042,7 @@ class Jobs(Base):
897
1042
 
898
1043
  cache = Cache()
899
1044
 
900
- remote_cache = self.use_remote_cache()
1045
+ remote_cache = self.use_remote_cache(disable_remote_cache)
901
1046
  with RemoteCacheSync(
902
1047
  coop=Coop(),
903
1048
  cache=cache,
@@ -918,17 +1063,84 @@ class Jobs(Base):
918
1063
  results.cache = cache.new_entries_cache()
919
1064
  return results
920
1065
 
1066
+ async def create_and_poll_remote_job(
1067
+ self,
1068
+ iterations: int = 1,
1069
+ remote_inference_description: Optional[str] = None,
1070
+ remote_inference_results_visibility: Optional[
1071
+ Literal["private", "public", "unlisted"]
1072
+ ] = "unlisted",
1073
+ ) -> Union[Results, None]:
1074
+ """
1075
+ Creates and polls a remote inference job asynchronously.
1076
+ Reuses existing synchronous methods but runs them in an async context.
1077
+
1078
+ :param iterations: Number of times to run each interview
1079
+ :param remote_inference_description: Optional description for the remote job
1080
+ :param remote_inference_results_visibility: Visibility setting for results
1081
+ :return: Results object if successful, None if job fails or is cancelled
1082
+ """
1083
+ import asyncio
1084
+ from functools import partial
1085
+
1086
+ # Create job using existing method
1087
+ loop = asyncio.get_event_loop()
1088
+ remote_job_creation_data = await loop.run_in_executor(
1089
+ None,
1090
+ partial(
1091
+ self.create_remote_inference_job,
1092
+ iterations=iterations,
1093
+ remote_inference_description=remote_inference_description,
1094
+ remote_inference_results_visibility=remote_inference_results_visibility,
1095
+ ),
1096
+ )
1097
+
1098
+ # Poll using existing method but with async sleep
1099
+ return await loop.run_in_executor(
1100
+ None, partial(self.poll_remote_inference_job, remote_job_creation_data)
1101
+ )
1102
+
1103
+ async def run_async(
1104
+ self,
1105
+ cache=None,
1106
+ n=1,
1107
+ disable_remote_inference: bool = False,
1108
+ remote_inference_description: Optional[str] = None,
1109
+ remote_inference_results_visibility: Optional[
1110
+ Literal["private", "public", "unlisted"]
1111
+ ] = "unlisted",
1112
+ **kwargs,
1113
+ ):
1114
+ """Run the job asynchronously, either locally or remotely.
1115
+
1116
+ :param cache: Cache object or boolean
1117
+ :param n: Number of iterations
1118
+ :param disable_remote_inference: If True, forces local execution
1119
+ :param remote_inference_description: Description for remote jobs
1120
+ :param remote_inference_results_visibility: Visibility setting for remote results
1121
+ :param kwargs: Additional arguments passed to local execution
1122
+ :return: Results object
1123
+ """
1124
+ # Check if we should use remote inference
1125
+ if remote_inference := self.use_remote_inference(disable_remote_inference):
1126
+ results = await self.create_and_poll_remote_job(
1127
+ iterations=n,
1128
+ remote_inference_description=remote_inference_description,
1129
+ remote_inference_results_visibility=remote_inference_results_visibility,
1130
+ )
1131
+ if results is None:
1132
+ self._output("Job failed.")
1133
+ return results
1134
+
1135
+ # If not using remote inference, run locally with async
1136
+ return await JobsRunnerAsyncio(self).run_async(cache=cache, n=n, **kwargs)
1137
+
921
1138
  def _run_local(self, *args, **kwargs):
922
1139
  """Run the job locally."""
923
1140
 
924
1141
  results = JobsRunnerAsyncio(self).run(*args, **kwargs)
925
1142
  return results
926
1143
 
927
- async def run_async(self, cache=None, n=1, **kwargs):
928
- """Run asynchronously."""
929
- results = await JobsRunnerAsyncio(self).run_async(cache=cache, n=n, **kwargs)
930
- return results
931
-
932
1144
  def all_question_parameters(self):
933
1145
  """Return all the fields in the questions in the survey.
934
1146
  >>> from edsl.jobs import Jobs
@@ -67,7 +67,11 @@ class InterviewExceptionEntry:
67
67
  m = LanguageModel.example(test_model=True)
68
68
  q = QuestionFreeText.example(exception_to_throw=ValueError)
69
69
  results = q.by(m).run(
70
- skip_retry=True, print_exceptions=False, raise_validation_errors=True
70
+ skip_retry=True,
71
+ print_exceptions=False,
72
+ raise_validation_errors=True,
73
+ disable_remote_cache=True,
74
+ disable_remote_inference=True,
71
75
  )
72
76
  return results.task_history.exceptions[0]["how_are_you"][0]
73
77
 
@@ -39,6 +39,7 @@ class TaskHistory:
39
39
  skip_retry=True,
40
40
  cache=False,
41
41
  raise_validation_errors=True,
42
+ disable_remote_cache=True,
42
43
  disable_remote_inference=True,
43
44
  )
44
45
 
@@ -0,0 +1,30 @@
1
+ import os
2
+ from collections import UserDict
3
+
4
+ from edsl.enums import service_to_api_keyname
5
+ from edsl.exceptions import MissingAPIKeyError
6
+
7
+
8
+ class KeyLookup(UserDict):
9
+ @classmethod
10
+ def from_os_environ(cls):
11
+ """Create an instance of KeyLookupAPI with keys from os.environ"""
12
+ return cls({key: value for key, value in os.environ.items()})
13
+
14
+ def get_api_token(self, service: str, remote: bool = False):
15
+ key_name = service_to_api_keyname.get(service, "NOT FOUND")
16
+
17
+ if service == "bedrock":
18
+ api_token = [self.get(key_name[0]), self.get(key_name[1])]
19
+ missing_token = any(token is None for token in api_token)
20
+ else:
21
+ api_token = self.get(key_name)
22
+ missing_token = api_token is None
23
+
24
+ if missing_token and service != "test" and not remote:
25
+ raise MissingAPIKeyError(
26
+ f"""The key for service: `{service}` is not set.
27
+ Need a key with name {key_name} in your .env file."""
28
+ )
29
+
30
+ return api_token