edsl 0.1.31.dev1__py3-none-any.whl → 0.1.31.dev2__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
edsl/__version__.py CHANGED
@@ -1 +1 @@
1
- __version__ = "0.1.31.dev1"
1
+ __version__ = "0.1.31.dev2"
@@ -275,8 +275,20 @@ class PromptConstructorMixin:
275
275
  if (new_question := question.split("_comment")[0]) in d:
276
276
  d[new_question].comment = answer
277
277
 
278
+ question_data = self.question.data.copy()
279
+
280
+ # check to see if the questio_options is actuall a string
281
+ if "question_options" in question_data:
282
+ if isinstance(self.question.data["question_options"], str):
283
+ from jinja2 import Environment, meta
284
+ env = Environment()
285
+ parsed_content = env.parse(self.question.data['question_options'])
286
+ question_option_key = list(meta.find_undeclared_variables(parsed_content))[0]
287
+ question_data["question_options"] = self.scenario.get(question_option_key)
288
+
289
+ #breakpoint()
278
290
  rendered_instructions = question_prompt.render(
279
- self.question.data | self.scenario | d | {"agent": self.agent}
291
+ question_data | self.scenario | d | {"agent": self.agent}
280
292
  )
281
293
 
282
294
  undefined_template_variables = (
@@ -21,7 +21,10 @@ class InferenceServicesCollection:
21
21
  service_models = service.available()
22
22
  except Exception as e:
23
23
  warnings.warn(
24
- f"Error getting models for {service._inference_service_}. Relying on cache.",
24
+ f"""Error getting models for {service._inference_service_}.
25
+ Check that you have properly stored your Expected Parrot API key and activated remote inference, or stored your own API keys for the language models that you want to use.
26
+ See https://docs.expectedparrot.com/en/latest/api_keys.html for instructions on storing API keys.
27
+ Relying on cache.""",
25
28
  UserWarning,
26
29
  )
27
30
  from edsl.inference_services.models_available_cache import models_available
edsl/jobs/Jobs.py CHANGED
@@ -3,9 +3,7 @@ from __future__ import annotations
3
3
  import warnings
4
4
  from itertools import product
5
5
  from typing import Optional, Union, Sequence, Generator
6
-
7
6
  from edsl.Base import Base
8
-
9
7
  from edsl.exceptions import MissingAPIKeyError
10
8
  from edsl.jobs.buckets.BucketCollection import BucketCollection
11
9
  from edsl.jobs.interviews.Interview import Interview
@@ -461,12 +459,11 @@ class Jobs(Base):
461
459
  remote_inference = False
462
460
 
463
461
  if remote_inference:
464
- from edsl.agents.Agent import Agent
465
- from edsl.language_models.registry import Model
466
- from edsl.results.Result import Result
467
- from edsl.results.Results import Results
468
- from edsl.scenarios.Scenario import Scenario
469
- from edsl.surveys.Survey import Survey
462
+ import time
463
+ from datetime import datetime
464
+ from edsl.config import CONFIG
465
+
466
+ expected_parrot_url = CONFIG.get("EXPECTED_PARROT_URL")
470
467
 
471
468
  self._output("Remote inference activated. Sending job to server...")
472
469
  if remote_cache:
@@ -474,33 +471,59 @@ class Jobs(Base):
474
471
  "Remote caching activated. The remote cache will be used for this job."
475
472
  )
476
473
 
477
- remote_job_data = coop.remote_inference_create(
474
+ remote_job_creation_data = coop.remote_inference_create(
478
475
  self,
479
476
  description=remote_inference_description,
480
477
  status="queued",
481
478
  )
482
- self._output("Job sent!")
483
- # Create mock results object to store job data
484
- results = Results(
485
- survey=Survey(),
486
- data=[
487
- Result(
488
- agent=Agent.example(),
489
- scenario=Scenario.example(),
490
- model=Model(),
491
- iteration=1,
492
- answer={"info": "Remote job details"},
479
+ time_queued = datetime.now().strftime("%m/%d/%Y %I:%M:%S %p")
480
+ job_uuid = remote_job_creation_data.get("uuid")
481
+ print(f"Remote inference started (Job uuid={job_uuid}).")
482
+ # print(f"Job queued at {time_queued}.")
483
+ job_in_queue = True
484
+ while job_in_queue:
485
+ remote_job_data = coop.remote_inference_get(job_uuid)
486
+ status = remote_job_data.get("status")
487
+ if status == "cancelled":
488
+ print("\r" + " " * 80 + "\r", end="")
489
+ print("Job cancelled by the user.")
490
+ print(
491
+ f"See {expected_parrot_url}/home/remote-inference for more details."
493
492
  )
494
- ],
495
- )
496
- results.add_columns_from_dict([remote_job_data])
497
- if self.verbose:
498
- results.select(["info", "uuid", "status", "version"]).print(
499
- format="rich"
500
- )
501
- return results
493
+ return None
494
+ elif status == "failed":
495
+ print("\r" + " " * 80 + "\r", end="")
496
+ print("Job failed.")
497
+ print(
498
+ f"See {expected_parrot_url}/home/remote-inference for more details."
499
+ )
500
+ return None
501
+ elif status == "completed":
502
+ results_uuid = remote_job_data.get("results_uuid")
503
+ results = coop.get(results_uuid, expected_object_type="results")
504
+ print("\r" + " " * 80 + "\r", end="")
505
+ print(
506
+ f"Job completed and Results stored on Coop (Results uuid={results_uuid})."
507
+ )
508
+ return results
509
+ else:
510
+ duration = 10 if len(self) < 10 else 60
511
+ time_checked = datetime.now().strftime("%Y-%m-%d %I:%M:%S %p")
512
+ frames = ["⠋", "⠙", "⠹", "⠸", "⠼", "⠴", "⠦", "⠧", "⠇", "⠏"]
513
+ start_time = time.time()
514
+ i = 0
515
+ while time.time() - start_time < duration:
516
+ print(
517
+ f"\r{frames[i % len(frames)]} Job status: {status} - last update: {time_checked}",
518
+ end="",
519
+ flush=True,
520
+ )
521
+ time.sleep(0.1)
522
+ i += 1
502
523
  else:
503
524
  if check_api_keys:
525
+ from edsl import Model
526
+
504
527
  for model in self.models + [Model()]:
505
528
  if not model.has_valid_api_key():
506
529
  raise MissingAPIKeyError(
@@ -7,9 +7,35 @@ import asyncio
7
7
  import json
8
8
  import time
9
9
  import os
10
+ import hashlib
10
11
  from typing import Coroutine, Any, Callable, Type, List, get_type_hints
11
12
  from abc import ABC, abstractmethod
12
13
 
14
+ class IntendedModelCallOutcome:
15
+ "This is a tuple-like class that holds the response, cache_used, and cache_key."
16
+
17
+ def __init__(self, response: dict, cache_used: bool, cache_key: str):
18
+ self.response = response
19
+ self.cache_used = cache_used
20
+ self.cache_key = cache_key
21
+
22
+ def __iter__(self):
23
+ """Iterate over the class attributes.
24
+
25
+ >>> a, b, c = IntendedModelCallOutcome({'answer': "yes"}, True, 'x1289')
26
+ >>> a
27
+ {'answer': 'yes'}
28
+ """
29
+ yield self.response
30
+ yield self.cache_used
31
+ yield self.cache_key
32
+
33
+ def __len__(self):
34
+ return 3
35
+
36
+ def __repr__(self):
37
+ return f"IntendedModelCallOutcome(response = {self.response}, cache_used = {self.cache_used}, cache_key = '{self.cache_key}')"
38
+
13
39
  from edsl.config import CONFIG
14
40
 
15
41
  from edsl.utilities.decorators import sync_wrapper, jupyter_nb_handler
@@ -149,7 +175,7 @@ class LanguageModel(
149
175
  key_value = os.getenv(key_name)
150
176
  return key_value is not None
151
177
 
152
- def __hash__(self):
178
+ def __hash__(self) -> str:
153
179
  """Allow the model to be used as a key in a dictionary."""
154
180
  from edsl.utilities.utilities import dict_hash
155
181
 
@@ -176,6 +202,8 @@ class LanguageModel(
176
202
  """
177
203
  self._set_rate_limits(rpm=rpm, tpm=tpm)
178
204
 
205
+
206
+
179
207
  def _set_rate_limits(self, rpm=None, tpm=None) -> None:
180
208
  """Set the rate limits for the model.
181
209
 
@@ -216,19 +244,23 @@ class LanguageModel(
216
244
  >>> LanguageModel._overide_default_parameters(passed_parameter_dict={"temperature": 0.5}, default_parameter_dict={"temperature":0.9, "max_tokens": 1000})
217
245
  {'temperature': 0.5, 'max_tokens': 1000}
218
246
  """
219
- parameters = dict({})
220
- for parameter, default_value in default_parameter_dict.items():
221
- if parameter in passed_parameter_dict:
222
- parameters[parameter] = passed_parameter_dict[parameter]
223
- else:
224
- parameters[parameter] = default_value
225
- return parameters
226
-
247
+ #parameters = dict({})
248
+
249
+ return {parameter_name: passed_parameter_dict.get(parameter_name, default_value)
250
+ for parameter_name, default_value in default_parameter_dict.items()}
251
+
252
+ def __call__(self, user_prompt:str, system_prompt:str):
253
+ return self.execute_model_call(user_prompt, system_prompt)
254
+
227
255
  @abstractmethod
228
256
  async def async_execute_model_call(user_prompt: str, system_prompt: str):
229
- """Execute the model call and returns the result as a coroutine.
257
+ """Execute the model call and returns a coroutine.
230
258
 
231
259
  >>> m = LanguageModel.example(test_model = True)
260
+ >>> async def test(): return await m.async_execute_model_call("Hello, model!", "You are a helpful agent.")
261
+ >>> asyncio.run(test())
262
+ {'message': '{"answer": "Hello world"}'}
263
+
232
264
  >>> m.execute_model_call("Hello, model!", "You are a helpful agent.")
233
265
  {'message': '{"answer": "Hello world"}'}
234
266
 
@@ -274,10 +306,37 @@ class LanguageModel(
274
306
 
275
307
  What is returned by the API is model-specific and often includes meta-data that we do not need.
276
308
  For example, here is the results from a call to GPT-4:
277
- To actually tract the response, we need to grab
309
+ To actually track the response, we need to grab
278
310
  data["choices[0]"]["message"]["content"].
279
311
  """
280
312
  raise NotImplementedError
313
+
314
+ async def _async_prepare_response(self, model_call_outcome: IntendedModelCallOutcome, cache: "Cache") -> dict:
315
+ """Prepare the response for return."""
316
+
317
+ model_response = {
318
+ "cache_used": model_call_outcome.cache_used,
319
+ "cache_key": model_call_outcome.cache_key,
320
+ "usage": model_call_outcome.response.get("usage", {}),
321
+ "raw_model_response": model_call_outcome.response,
322
+ }
323
+
324
+ answer_portion = self.parse_response(model_call_outcome.response)
325
+ try:
326
+ answer_dict = json.loads(answer_portion)
327
+ except json.JSONDecodeError as e:
328
+ # TODO: Turn into logs to generate issues
329
+ answer_dict, success = await repair(
330
+ bad_json=answer_portion,
331
+ error_message=str(e),
332
+ cache=cache
333
+ )
334
+ if not success:
335
+ raise Exception(
336
+ f"""Even the repair failed. The error was: {e}. The response was: {answer_portion}."""
337
+ )
338
+
339
+ return {**model_response, **answer_dict}
281
340
 
282
341
  async def async_get_raw_response(
283
342
  self,
@@ -286,7 +345,26 @@ class LanguageModel(
286
345
  cache: "Cache",
287
346
  iteration: int = 0,
288
347
  encoded_image=None,
289
- ) -> tuple[dict, bool, str]:
348
+ ) -> IntendedModelCallOutcome:
349
+ import warnings
350
+ warnings.warn("This method is deprecated. Use async_get_intended_model_call_outcome.")
351
+ return await self._async_get_intended_model_call_outcome(
352
+ user_prompt=user_prompt,
353
+ system_prompt=system_prompt,
354
+ cache=cache,
355
+ iteration=iteration,
356
+ encoded_image=encoded_image
357
+ )
358
+
359
+
360
+ async def _async_get_intended_model_call_outcome(
361
+ self,
362
+ user_prompt: str,
363
+ system_prompt: str,
364
+ cache: "Cache",
365
+ iteration: int = 0,
366
+ encoded_image=None,
367
+ ) -> IntendedModelCallOutcome:
290
368
  """Handle caching of responses.
291
369
 
292
370
  :param user_prompt: The user's prompt.
@@ -304,52 +382,41 @@ class LanguageModel(
304
382
 
305
383
  >>> from edsl import Cache
306
384
  >>> m = LanguageModel.example(test_model = True)
307
- >>> m.get_raw_response(user_prompt = "Hello", system_prompt = "hello", cache = Cache())
308
- ({'message': '{"answer": "Hello world"}'}, False, '24ff6ac2bc2f1729f817f261e0792577')
385
+ >>> m._get_intended_model_call_outcome(user_prompt = "Hello", system_prompt = "hello", cache = Cache())
386
+ IntendedModelCallOutcome(response = {'message': '{"answer": "Hello world"}'}, cache_used = False, cache_key = '24ff6ac2bc2f1729f817f261e0792577')
309
387
  """
310
- start_time = time.time()
388
+
389
+ if encoded_image:
390
+ # the image has is appended to the user_prompt for hash-lookup purposes
391
+ image_hash = hashlib.md5(encoded_image.encode()).hexdigest()
311
392
 
312
393
  cache_call_params = {
313
394
  "model": str(self.model),
314
395
  "parameters": self.parameters,
315
396
  "system_prompt": system_prompt,
316
- "user_prompt": user_prompt,
397
+ "user_prompt": user_prompt + "" if not encoded_image else f" {image_hash}",
317
398
  "iteration": iteration,
318
399
  }
319
-
320
- if encoded_image:
321
- import hashlib
322
-
323
- image_hash = hashlib.md5(encoded_image.encode()).hexdigest()
324
- cache_call_params["user_prompt"] = f"{user_prompt} {image_hash}"
325
-
326
400
  cached_response, cache_key = cache.fetch(**cache_call_params)
327
- if cached_response:
401
+
402
+ if (cache_used := cached_response is not None):
328
403
  response = json.loads(cached_response)
329
- cache_used = True
330
404
  else:
331
- remote_call = hasattr(self, "remote") and self.remote
332
405
  f = (
333
406
  self.remote_async_execute_model_call
334
- if remote_call
407
+ if hasattr(self, "remote") and self.remote
335
408
  else self.async_execute_model_call
336
409
  )
337
- params = {"user_prompt": user_prompt, "system_prompt": system_prompt}
338
- if encoded_image:
339
- params["encoded_image"] = encoded_image
410
+ params = {"user_prompt": user_prompt, "system_prompt": system_prompt,
411
+ **({"encoded_image": encoded_image} if encoded_image else {})
412
+ }
340
413
  response = await f(**params)
341
- new_cache_key = cache.store(
342
- user_prompt=user_prompt,
343
- model=str(self.model),
344
- parameters=self.parameters,
345
- system_prompt=system_prompt,
346
- response=response,
347
- iteration=iteration,
348
- )
349
- assert new_cache_key == cache_key
350
- cache_used = False
414
+ new_cache_key = cache.store(**cache_call_params, response=response) # store the response in the cache
415
+ assert new_cache_key == cache_key # should be the same
416
+
417
+ return IntendedModelCallOutcome(response = response, cache_used = cache_used, cache_key = cache_key)
351
418
 
352
- return response, cache_used, cache_key
419
+ _get_intended_model_call_outcome = sync_wrapper(_async_get_intended_model_call_outcome)
353
420
 
354
421
  get_raw_response = sync_wrapper(async_get_raw_response)
355
422
 
@@ -370,7 +437,7 @@ class LanguageModel(
370
437
  self,
371
438
  user_prompt: str,
372
439
  system_prompt: str,
373
- cache: Cache,
440
+ cache: 'Cache',
374
441
  iteration: int = 1,
375
442
  encoded_image=None,
376
443
  ) -> dict:
@@ -388,36 +455,10 @@ class LanguageModel(
388
455
  "system_prompt": system_prompt,
389
456
  "iteration": iteration,
390
457
  "cache": cache,
391
- }
392
- if encoded_image:
393
- params["encoded_image"] = encoded_image
394
-
395
- raw_response, cache_used, cache_key = await self.async_get_raw_response(
396
- **params
397
- )
398
- response = self.parse_response(raw_response)
399
-
400
- try:
401
- dict_response = json.loads(response)
402
- except json.JSONDecodeError as e:
403
- # TODO: Turn into logs to generate issues
404
- dict_response, success = await repair(
405
- bad_json=response, error_message=str(e), cache=cache
406
- )
407
- if not success:
408
- raise Exception(
409
- f"""Even the repair failed. The error was: {e}. The response was: {response}."""
410
- )
411
-
412
- dict_response.update(
413
- {
414
- "cache_used": cache_used,
415
- "cache_key": cache_key,
416
- "usage": raw_response.get("usage", {}),
417
- "raw_model_response": raw_response,
418
- }
419
- )
420
- return dict_response
458
+ **({"encoded_image": encoded_image} if encoded_image else {})
459
+ }
460
+ model_call_outcome = await self._async_get_intended_model_call_outcome(**params)
461
+ return await self._async_prepare_response(model_call_outcome, cache=cache)
421
462
 
422
463
  get_response = sync_wrapper(async_get_response)
423
464
 
@@ -543,8 +584,3 @@ if __name__ == "__main__":
543
584
  import doctest
544
585
 
545
586
  doctest.testmod(optionflags=doctest.ELLIPSIS)
546
-
547
- # from edsl.language_models import LanguageModel
548
-
549
- # from edsl.language_models import LanguageModel
550
- # print(LanguageModel.example())
@@ -96,6 +96,7 @@ class QuestionMultipleChoice(QuestionBase):
96
96
  question_option_key = list(meta.find_undeclared_variables(parsed_content))[
97
97
  0
98
98
  ]
99
+ #breakpoint()
99
100
  translated_options = scenario.get(question_option_key)
100
101
  else:
101
102
  translated_options = [
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.1
2
2
  Name: edsl
3
- Version: 0.1.31.dev1
3
+ Version: 0.1.31.dev2
4
4
  Summary: Create and analyze LLM-based surveys
5
5
  Home-page: https://www.expectedparrot.com/
6
6
  License: MIT
@@ -22,7 +22,7 @@ Requires-Dist: black[jupyter] (>=24.4.2,<25.0.0)
22
22
  Requires-Dist: jinja2 (>=3.1.2,<4.0.0)
23
23
  Requires-Dist: jupyter (>=1.0.0,<2.0.0)
24
24
  Requires-Dist: markdown2 (>=2.4.11,<3.0.0)
25
- Requires-Dist: matplotlib (>=3.8.4,<4.0.0)
25
+ Requires-Dist: matplotlib (>=3.8,<3.9)
26
26
  Requires-Dist: nest-asyncio (>=1.5.9,<2.0.0)
27
27
  Requires-Dist: numpy (>=1.22,<2.0)
28
28
  Requires-Dist: openai (>=1.4.0,<2.0.0)
@@ -1,12 +1,12 @@
1
1
  edsl/Base.py,sha256=ttNxUotSd9LSEJl2w6LdMtT78d0nMQvYDJ0q4JkqBfg,8945
2
2
  edsl/BaseDiff.py,sha256=RoVEh52UJs22yMa7k7jv8se01G62jJNWnBzaZngo-Ug,8260
3
3
  edsl/__init__.py,sha256=E6PkWI_owu8AUc4uJs2XWDVozqSbcRWzsIqf8_Kskho,1631
4
- edsl/__version__.py,sha256=muI5jd-fDNbXxztFFWI5wdZkX6SVhOtKL4K3XxztZsg,28
4
+ edsl/__version__.py,sha256=BN1dLwi4HgyXIf5fxZomtYU3o3qkECZEfi25Xh6EtA4,28
5
5
  edsl/agents/Agent.py,sha256=qNJsQkN6HuTKqJrQbuUEgRX3Wo7Dwukle0oNWPi0UIE,27191
6
6
  edsl/agents/AgentList.py,sha256=_MsdeOEgaANAceLIXwuLC22mwlBn0ruGX4GEqz8_SSY,9467
7
7
  edsl/agents/Invigilator.py,sha256=WNgGT9VRKpHbk__h-vd4LASgjnlJnzepf-2FxQ3K98I,10798
8
8
  edsl/agents/InvigilatorBase.py,sha256=ncha1HF2V1Dz4f50Gekg6AzUXCD2Af82ztfSJZbgOHY,7469
9
- edsl/agents/PromptConstructionMixin.py,sha256=MP2Frmm9iP3J50ijXKrr6YYIjB_38UuA2J7Mysfs0ZQ,15913
9
+ edsl/agents/PromptConstructionMixin.py,sha256=uqk5SfGbvI191eda44bUtYHL7N0xEAxx64ga59SB610,16576
10
10
  edsl/agents/__init__.py,sha256=a3H1lxDwu9HR8fwh79C5DgxPSFv_bE2rzQ6y1D8Ba5c,80
11
11
  edsl/agents/descriptors.py,sha256=m8ND3-2-JbgNX1HGakBNLIeemwsgYa1mQxYO9GW33hw,2934
12
12
  edsl/base/Base.py,sha256=DShsfI6A2ojg42muPFpVtUgTX33pnqT5vtN0SRlr-9Q,8866
@@ -58,7 +58,7 @@ edsl/inference_services/AnthropicService.py,sha256=tjYRJRIvQ7Z6uCYdqxm5ZlVjZdVZC
58
58
  edsl/inference_services/DeepInfraService.py,sha256=fpjLJl7gyUFWVdR6U7XwIspD_pH5x55f4LamKT01Et4,3853
59
59
  edsl/inference_services/GoogleService.py,sha256=IwSwXr7khvHjpDzgiW5PRVKMhI2wQ9D1_H1MRnHjefU,2732
60
60
  edsl/inference_services/InferenceServiceABC.py,sha256=H6jW2gDKTLC3xgmqiSBdX4pGY1oauEO8VqGZoB3qvnQ,1790
61
- edsl/inference_services/InferenceServicesCollection.py,sha256=1ulvUYs2gaJsb7t8Mkua-6_3gff6rJmH72_BbItkaCE,2274
61
+ edsl/inference_services/InferenceServicesCollection.py,sha256=7Q04aiJUWJx3zzjSY1uH7MkhrJi41RshU3Zy-6rM6kI,2596
62
62
  edsl/inference_services/OpenAIService.py,sha256=OvczbY2wlEehZdaaMq69yeqzkjdrbbXlP8uC9WjgHq0,6021
63
63
  edsl/inference_services/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
64
64
  edsl/inference_services/models_available_cache.py,sha256=ZT2pBGxJqTgwynthu-SqBjv8zl7ql44q3gA6xy7kqSU,2338
@@ -66,7 +66,7 @@ edsl/inference_services/rate_limits_cache.py,sha256=HYslviz7mxF9U4CUTPAkoyBsiXjS
66
66
  edsl/inference_services/registry.py,sha256=-Yz86do-KZraunIrziVs9b95EbY-__JUnQb5Ulni7KI,483
67
67
  edsl/inference_services/write_available.py,sha256=NNwhATlaMp8IYY635MSx-oYxt5X15acjAfaqYCo_I1Y,285
68
68
  edsl/jobs/Answers.py,sha256=z4TADN-iHIrbMtI1jVyiaetv0OkTv768dFBpREIQC6c,1799
69
- edsl/jobs/Jobs.py,sha256=JrJcpTjR3wejMVvuFXZr7PHqCyj6zaRwqNu9eatvy9Y,29339
69
+ edsl/jobs/Jobs.py,sha256=rd9wlx7f7Q0aF-toV6A4ps6hd73MEmB5UlQleRBzPoE,30737
70
70
  edsl/jobs/__init__.py,sha256=aKuAyd_GoalGj-k7djOoVwEbFUE2XLPlikXaA1_8yAg,32
71
71
  edsl/jobs/buckets/BucketCollection.py,sha256=LA8DBVwMdeTFCbSDI0S2cDzfi_Qo6kRizwrG64tE8S4,1844
72
72
  edsl/jobs/buckets/ModelBuckets.py,sha256=hxw_tzc0V42CiB7mh5jIxlgwDVJ-zFZhlLtKrHEg8ho,2419
@@ -93,7 +93,7 @@ edsl/jobs/tasks/task_management.py,sha256=KMToZuXzMlnHRHUF_VHL0-lHMTGhklf2GHVuwE
93
93
  edsl/jobs/tasks/task_status_enum.py,sha256=DOyrz61YlIS8R1W7izJNphcLrJ7I_ReUlfdRmk23h0Q,5333
94
94
  edsl/jobs/tokens/InterviewTokenUsage.py,sha256=u_6-IHpGFwZ6qMEXr24-jyLVUSSp4dSs_4iAZsBv7O4,1100
95
95
  edsl/jobs/tokens/TokenUsage.py,sha256=odj2-wDNEbHl9noyFAQ0DSKV0D9cv3aDOpmXufKZ8O4,1323
96
- edsl/language_models/LanguageModel.py,sha256=cou6R-oNYk-_C8vfCQMkaeUbbAFEtBcG4d5lTZKN0YI,19082
96
+ edsl/language_models/LanguageModel.py,sha256=2TYZl1LM8kSrkf7wRvGy-OF8WIJHIOTlswdOM_ESkC4,20940
97
97
  edsl/language_models/ModelList.py,sha256=G8tzHqzz4exc28BGvGgVRk1Xwu8EDCiVWxMC5l8VnvI,2862
98
98
  edsl/language_models/RegisterLanguageModelsMeta.py,sha256=2bvWrVau2BRo-Bb1aO-QATH8xxuW_tF7NmqBMGDOfSg,8191
99
99
  edsl/language_models/__init__.py,sha256=bvY7Gy6VkX1gSbNkRbGPS-M1kUnb0EohL0FSagaEaTs,109
@@ -126,7 +126,7 @@ edsl/questions/QuestionExtract.py,sha256=fjnsNLS2fNW6dfFuRyc2EgKEHx8ujjONmg2nSRy
126
126
  edsl/questions/QuestionFreeText.py,sha256=ASj1s0EQYcZerJp476fscu_xEME8mKzVK3sPL6egiuU,3289
127
127
  edsl/questions/QuestionFunctional.py,sha256=jlC1eNE-kpp9o5CXKo-c3Re4PIq1_WmdJ66u9nD-W7w,4967
128
128
  edsl/questions/QuestionList.py,sha256=Wf7xDXJsQBsAD_yOrzZ_GstKGT7aZjimTkU6qyqOhhM,4051
129
- edsl/questions/QuestionMultipleChoice.py,sha256=C6FjGZxLESvpq76PjxbSR_yrrExJpEvF31JSbY1luuQ,6605
129
+ edsl/questions/QuestionMultipleChoice.py,sha256=P1GR5Ml7ppk-xeJnlS7ZWctj5poR9NHZ3MPKrlKSync,6631
130
130
  edsl/questions/QuestionNumerical.py,sha256=QArFDhP9Adb4l6y-udnUqPNk2Q6vT4pGsY13TkHsLGs,3631
131
131
  edsl/questions/QuestionRank.py,sha256=NEAwDt1at0zEM2S-E7jXMjglnlB0WhUlxSVJkzH4xSs,5876
132
132
  edsl/questions/RegisterQuestionsMeta.py,sha256=unON0CKpW-mveyhg9V3_BF_GYYwytMYP9h2ZZPetVNM,1994
@@ -197,7 +197,7 @@ edsl/utilities/interface.py,sha256=AaKpWiwWBwP2swNXmnFlIf3ZFsjfsR5bjXQAW47tD-8,1
197
197
  edsl/utilities/repair_functions.py,sha256=tftmklAqam6LOQQu_-9U44N-llycffhW8LfO63vBmNw,929
198
198
  edsl/utilities/restricted_python.py,sha256=5-_zUhrNbos7pLhDl9nr8d24auRlquR6w-vKkmNjPiA,2060
199
199
  edsl/utilities/utilities.py,sha256=oU5Gg6szTGqsJ2yBOS0aC3XooezLE8By3SdrQLLpqvA,10107
200
- edsl-0.1.31.dev1.dist-info/LICENSE,sha256=_qszBDs8KHShVYcYzdMz3HNMtH-fKN_p5zjoVAVumFc,1111
201
- edsl-0.1.31.dev1.dist-info/METADATA,sha256=tsrurkFgZhvJzc-CO2ZiYEAoBmVHYyYgULThTH8kcWQ,4137
202
- edsl-0.1.31.dev1.dist-info/WHEEL,sha256=sP946D7jFCHeNz5Iq4fL4Lu-PrWrFsgfLXbbkciIZwg,88
203
- edsl-0.1.31.dev1.dist-info/RECORD,,
200
+ edsl-0.1.31.dev2.dist-info/LICENSE,sha256=_qszBDs8KHShVYcYzdMz3HNMtH-fKN_p5zjoVAVumFc,1111
201
+ edsl-0.1.31.dev2.dist-info/METADATA,sha256=gQlFRVjdYP2rKQ4GQvub6WC5FdqTorn0IwT_l3_WSrk,4133
202
+ edsl-0.1.31.dev2.dist-info/WHEEL,sha256=sP946D7jFCHeNz5Iq4fL4Lu-PrWrFsgfLXbbkciIZwg,88
203
+ edsl-0.1.31.dev2.dist-info/RECORD,,