edsl 0.1.59__tar.gz → 0.1.60__tar.gz

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (391) hide show
  1. {edsl-0.1.59 → edsl-0.1.60}/PKG-INFO +2 -2
  2. edsl-0.1.60/edsl/__version__.py +1 -0
  3. {edsl-0.1.59 → edsl-0.1.60}/edsl/base/data_transfer_models.py +5 -0
  4. {edsl-0.1.59 → edsl-0.1.60}/edsl/base/enums.py +7 -2
  5. {edsl-0.1.59 → edsl-0.1.60}/edsl/dataset/dataset_operations_mixin.py +2 -2
  6. {edsl-0.1.59 → edsl-0.1.60}/edsl/inference_services/services/__init__.py +3 -1
  7. edsl-0.1.60/edsl/inference_services/services/open_ai_service_v2.py +243 -0
  8. {edsl-0.1.59 → edsl-0.1.60}/edsl/jobs/data_structures.py +3 -0
  9. {edsl-0.1.59 → edsl-0.1.60}/edsl/key_management/key_lookup_builder.py +25 -3
  10. {edsl-0.1.59 → edsl-0.1.60}/edsl/language_models/language_model.py +2 -1
  11. edsl-0.1.60/edsl/language_models/raw_response_handler.py +230 -0
  12. {edsl-0.1.59 → edsl-0.1.60}/edsl/results/result.py +37 -0
  13. {edsl-0.1.59 → edsl-0.1.60}/edsl/results/results.py +1 -0
  14. {edsl-0.1.59 → edsl-0.1.60}/pyproject.toml +2 -2
  15. edsl-0.1.59/edsl/__version__.py +0 -1
  16. edsl-0.1.59/edsl/language_models/raw_response_handler.py +0 -111
  17. {edsl-0.1.59 → edsl-0.1.60}/LICENSE +0 -0
  18. {edsl-0.1.59 → edsl-0.1.60}/README.md +0 -0
  19. {edsl-0.1.59 → edsl-0.1.60}/edsl/__init__.py +0 -0
  20. {edsl-0.1.59 → edsl-0.1.60}/edsl/__init__original.py +0 -0
  21. {edsl-0.1.59 → edsl-0.1.60}/edsl/agents/__init__.py +0 -0
  22. {edsl-0.1.59 → edsl-0.1.60}/edsl/agents/agent.py +0 -0
  23. {edsl-0.1.59 → edsl-0.1.60}/edsl/agents/agent_list.py +0 -0
  24. {edsl-0.1.59 → edsl-0.1.60}/edsl/agents/descriptors.py +0 -0
  25. {edsl-0.1.59 → edsl-0.1.60}/edsl/agents/exceptions.py +0 -0
  26. {edsl-0.1.59 → edsl-0.1.60}/edsl/base/__init__.py +0 -0
  27. {edsl-0.1.59 → edsl-0.1.60}/edsl/base/base_class.py +0 -0
  28. {edsl-0.1.59 → edsl-0.1.60}/edsl/base/base_exception.py +0 -0
  29. {edsl-0.1.59 → edsl-0.1.60}/edsl/base/exceptions.py +0 -0
  30. {edsl-0.1.59 → edsl-0.1.60}/edsl/base.py +0 -0
  31. {edsl-0.1.59 → edsl-0.1.60}/edsl/buckets/__init__.py +0 -0
  32. {edsl-0.1.59 → edsl-0.1.60}/edsl/buckets/bucket_collection.py +0 -0
  33. {edsl-0.1.59 → edsl-0.1.60}/edsl/buckets/exceptions.py +0 -0
  34. {edsl-0.1.59 → edsl-0.1.60}/edsl/buckets/model_buckets.py +0 -0
  35. {edsl-0.1.59 → edsl-0.1.60}/edsl/buckets/token_bucket.py +0 -0
  36. {edsl-0.1.59 → edsl-0.1.60}/edsl/buckets/token_bucket_api.py +0 -0
  37. {edsl-0.1.59 → edsl-0.1.60}/edsl/buckets/token_bucket_client.py +0 -0
  38. {edsl-0.1.59 → edsl-0.1.60}/edsl/caching/__init__.py +0 -0
  39. {edsl-0.1.59 → edsl-0.1.60}/edsl/caching/cache.py +0 -0
  40. {edsl-0.1.59 → edsl-0.1.60}/edsl/caching/cache_entry.py +0 -0
  41. {edsl-0.1.59 → edsl-0.1.60}/edsl/caching/cache_handler.py +0 -0
  42. {edsl-0.1.59 → edsl-0.1.60}/edsl/caching/exceptions.py +0 -0
  43. {edsl-0.1.59 → edsl-0.1.60}/edsl/caching/orm.py +0 -0
  44. {edsl-0.1.59 → edsl-0.1.60}/edsl/caching/remote_cache_sync.py +0 -0
  45. {edsl-0.1.59 → edsl-0.1.60}/edsl/caching/sql_dict.py +0 -0
  46. {edsl-0.1.59 → edsl-0.1.60}/edsl/cli.py +0 -0
  47. {edsl-0.1.59 → edsl-0.1.60}/edsl/config/__init__.py +0 -0
  48. {edsl-0.1.59 → edsl-0.1.60}/edsl/config/config_class.py +0 -0
  49. {edsl-0.1.59 → edsl-0.1.60}/edsl/config.py +0 -0
  50. {edsl-0.1.59 → edsl-0.1.60}/edsl/conversation/Conversation.py +0 -0
  51. {edsl-0.1.59 → edsl-0.1.60}/edsl/conversation/__init__.py +0 -0
  52. {edsl-0.1.59 → edsl-0.1.60}/edsl/conversation/car_buying.py +0 -0
  53. {edsl-0.1.59 → edsl-0.1.60}/edsl/conversation/chips.py +0 -0
  54. {edsl-0.1.59 → edsl-0.1.60}/edsl/conversation/exceptions.py +0 -0
  55. {edsl-0.1.59 → edsl-0.1.60}/edsl/conversation/mug_negotiation.py +0 -0
  56. {edsl-0.1.59 → edsl-0.1.60}/edsl/conversation/next_speaker_utilities.py +0 -0
  57. {edsl-0.1.59 → edsl-0.1.60}/edsl/coop/__init__.py +0 -0
  58. {edsl-0.1.59 → edsl-0.1.60}/edsl/coop/coop.py +0 -0
  59. {edsl-0.1.59 → edsl-0.1.60}/edsl/coop/coop_functions.py +0 -0
  60. {edsl-0.1.59 → edsl-0.1.60}/edsl/coop/coop_jobs_objects.py +0 -0
  61. {edsl-0.1.59 → edsl-0.1.60}/edsl/coop/coop_objects.py +0 -0
  62. {edsl-0.1.59 → edsl-0.1.60}/edsl/coop/coop_regular_objects.py +0 -0
  63. {edsl-0.1.59 → edsl-0.1.60}/edsl/coop/ep_key_handling.py +0 -0
  64. {edsl-0.1.59 → edsl-0.1.60}/edsl/coop/exceptions.py +0 -0
  65. {edsl-0.1.59 → edsl-0.1.60}/edsl/coop/price_fetcher.py +0 -0
  66. {edsl-0.1.59 → edsl-0.1.60}/edsl/coop/utils.py +0 -0
  67. {edsl-0.1.59 → edsl-0.1.60}/edsl/data_transfer_models.py +0 -0
  68. {edsl-0.1.59 → edsl-0.1.60}/edsl/dataset/__init__.py +0 -0
  69. {edsl-0.1.59 → edsl-0.1.60}/edsl/dataset/dataset.py +0 -0
  70. {edsl-0.1.59 → edsl-0.1.60}/edsl/dataset/dataset_tree.py +0 -0
  71. {edsl-0.1.59 → edsl-0.1.60}/edsl/dataset/display/CSSParameterizer.py +0 -0
  72. {edsl-0.1.59 → edsl-0.1.60}/edsl/dataset/display/__init__.py +0 -0
  73. {edsl-0.1.59 → edsl-0.1.60}/edsl/dataset/display/table_data_class.py +0 -0
  74. {edsl-0.1.59 → edsl-0.1.60}/edsl/dataset/display/table_display.css +0 -0
  75. {edsl-0.1.59 → edsl-0.1.60}/edsl/dataset/display/table_display.py +0 -0
  76. {edsl-0.1.59 → edsl-0.1.60}/edsl/dataset/display/table_renderers.py +0 -0
  77. {edsl-0.1.59 → edsl-0.1.60}/edsl/dataset/exceptions.py +0 -0
  78. {edsl-0.1.59 → edsl-0.1.60}/edsl/dataset/file_exports.py +0 -0
  79. {edsl-0.1.59 → edsl-0.1.60}/edsl/dataset/r/ggplot.py +0 -0
  80. {edsl-0.1.59 → edsl-0.1.60}/edsl/dataset/tree_explore.py +0 -0
  81. {edsl-0.1.59 → edsl-0.1.60}/edsl/db_list/sqlite_list.py +0 -0
  82. {edsl-0.1.59 → edsl-0.1.60}/edsl/display/__init__.py +0 -0
  83. {edsl-0.1.59 → edsl-0.1.60}/edsl/display/core.py +0 -0
  84. {edsl-0.1.59 → edsl-0.1.60}/edsl/display/plugin.py +0 -0
  85. {edsl-0.1.59 → edsl-0.1.60}/edsl/display/utils.py +0 -0
  86. {edsl-0.1.59 → edsl-0.1.60}/edsl/enums.py +0 -0
  87. {edsl-0.1.59 → edsl-0.1.60}/edsl/inference_services/__init__.py +0 -0
  88. {edsl-0.1.59 → edsl-0.1.60}/edsl/inference_services/available_model_cache_handler.py +0 -0
  89. {edsl-0.1.59 → edsl-0.1.60}/edsl/inference_services/available_model_fetcher.py +0 -0
  90. {edsl-0.1.59 → edsl-0.1.60}/edsl/inference_services/data_structures.py +0 -0
  91. {edsl-0.1.59 → edsl-0.1.60}/edsl/inference_services/exceptions.py +0 -0
  92. {edsl-0.1.59 → edsl-0.1.60}/edsl/inference_services/inference_service_abc.py +0 -0
  93. {edsl-0.1.59 → edsl-0.1.60}/edsl/inference_services/inference_services_collection.py +0 -0
  94. {edsl-0.1.59 → edsl-0.1.60}/edsl/inference_services/models_available_cache.py +0 -0
  95. {edsl-0.1.59 → edsl-0.1.60}/edsl/inference_services/rate_limits_cache.py +0 -0
  96. {edsl-0.1.59 → edsl-0.1.60}/edsl/inference_services/registry.py +0 -0
  97. {edsl-0.1.59 → edsl-0.1.60}/edsl/inference_services/service_availability.py +0 -0
  98. {edsl-0.1.59 → edsl-0.1.60}/edsl/inference_services/services/anthropic_service.py +0 -0
  99. {edsl-0.1.59 → edsl-0.1.60}/edsl/inference_services/services/aws_bedrock.py +0 -0
  100. {edsl-0.1.59 → edsl-0.1.60}/edsl/inference_services/services/azure_ai.py +0 -0
  101. {edsl-0.1.59 → edsl-0.1.60}/edsl/inference_services/services/deep_infra_service.py +0 -0
  102. {edsl-0.1.59 → edsl-0.1.60}/edsl/inference_services/services/deep_seek_service.py +0 -0
  103. {edsl-0.1.59 → edsl-0.1.60}/edsl/inference_services/services/google_service.py +0 -0
  104. {edsl-0.1.59 → edsl-0.1.60}/edsl/inference_services/services/groq_service.py +0 -0
  105. {edsl-0.1.59 → edsl-0.1.60}/edsl/inference_services/services/mistral_ai_service.py +0 -0
  106. {edsl-0.1.59 → edsl-0.1.60}/edsl/inference_services/services/ollama_service.py +0 -0
  107. {edsl-0.1.59 → edsl-0.1.60}/edsl/inference_services/services/open_ai_service.py +0 -0
  108. {edsl-0.1.59 → edsl-0.1.60}/edsl/inference_services/services/perplexity_service.py +0 -0
  109. {edsl-0.1.59 → edsl-0.1.60}/edsl/inference_services/services/test_service.py +0 -0
  110. {edsl-0.1.59 → edsl-0.1.60}/edsl/inference_services/services/together_ai_service.py +0 -0
  111. {edsl-0.1.59 → edsl-0.1.60}/edsl/inference_services/services/xai_service.py +0 -0
  112. {edsl-0.1.59 → edsl-0.1.60}/edsl/inference_services/write_available.py +0 -0
  113. {edsl-0.1.59 → edsl-0.1.60}/edsl/instructions/__init__.py +0 -0
  114. {edsl-0.1.59 → edsl-0.1.60}/edsl/instructions/change_instruction.py +0 -0
  115. {edsl-0.1.59 → edsl-0.1.60}/edsl/instructions/exceptions.py +0 -0
  116. {edsl-0.1.59 → edsl-0.1.60}/edsl/instructions/instruction.py +0 -0
  117. {edsl-0.1.59 → edsl-0.1.60}/edsl/instructions/instruction_collection.py +0 -0
  118. {edsl-0.1.59 → edsl-0.1.60}/edsl/instructions/instruction_handler.py +0 -0
  119. {edsl-0.1.59 → edsl-0.1.60}/edsl/interviews/ReportErrors.py +0 -0
  120. {edsl-0.1.59 → edsl-0.1.60}/edsl/interviews/__init__.py +0 -0
  121. {edsl-0.1.59 → edsl-0.1.60}/edsl/interviews/answering_function.py +0 -0
  122. {edsl-0.1.59 → edsl-0.1.60}/edsl/interviews/exception_tracking.py +0 -0
  123. {edsl-0.1.59 → edsl-0.1.60}/edsl/interviews/exceptions.py +0 -0
  124. {edsl-0.1.59 → edsl-0.1.60}/edsl/interviews/interview.py +0 -0
  125. {edsl-0.1.59 → edsl-0.1.60}/edsl/interviews/interview_status_dictionary.py +0 -0
  126. {edsl-0.1.59 → edsl-0.1.60}/edsl/interviews/interview_status_enum.py +0 -0
  127. {edsl-0.1.59 → edsl-0.1.60}/edsl/interviews/interview_status_log.py +0 -0
  128. {edsl-0.1.59 → edsl-0.1.60}/edsl/interviews/interview_task_manager.py +0 -0
  129. {edsl-0.1.59 → edsl-0.1.60}/edsl/interviews/request_token_estimator.py +0 -0
  130. {edsl-0.1.59 → edsl-0.1.60}/edsl/interviews/statistics.py +0 -0
  131. {edsl-0.1.59 → edsl-0.1.60}/edsl/invigilators/__init__.py +0 -0
  132. {edsl-0.1.59 → edsl-0.1.60}/edsl/invigilators/exceptions.py +0 -0
  133. {edsl-0.1.59 → edsl-0.1.60}/edsl/invigilators/invigilator_base.py +0 -0
  134. {edsl-0.1.59 → edsl-0.1.60}/edsl/invigilators/invigilators.py +0 -0
  135. {edsl-0.1.59 → edsl-0.1.60}/edsl/invigilators/prompt_constructor.py +0 -0
  136. {edsl-0.1.59 → edsl-0.1.60}/edsl/invigilators/prompt_helpers.py +0 -0
  137. {edsl-0.1.59 → edsl-0.1.60}/edsl/invigilators/question_instructions_prompt_builder.py +0 -0
  138. {edsl-0.1.59 → edsl-0.1.60}/edsl/invigilators/question_option_processor.py +0 -0
  139. {edsl-0.1.59 → edsl-0.1.60}/edsl/invigilators/question_template_replacements_builder.py +0 -0
  140. {edsl-0.1.59 → edsl-0.1.60}/edsl/jobs/__init__.py +0 -0
  141. {edsl-0.1.59 → edsl-0.1.60}/edsl/jobs/async_interview_runner.py +0 -0
  142. {edsl-0.1.59 → edsl-0.1.60}/edsl/jobs/check_survey_scenario_compatibility.py +0 -0
  143. {edsl-0.1.59 → edsl-0.1.60}/edsl/jobs/decorators.py +0 -0
  144. {edsl-0.1.59 → edsl-0.1.60}/edsl/jobs/exceptions.py +0 -0
  145. {edsl-0.1.59 → edsl-0.1.60}/edsl/jobs/fetch_invigilator.py +0 -0
  146. {edsl-0.1.59 → edsl-0.1.60}/edsl/jobs/html_table_job_logger.py +0 -0
  147. {edsl-0.1.59 → edsl-0.1.60}/edsl/jobs/jobs.py +0 -0
  148. {edsl-0.1.59 → edsl-0.1.60}/edsl/jobs/jobs_checks.py +0 -0
  149. {edsl-0.1.59 → edsl-0.1.60}/edsl/jobs/jobs_component_constructor.py +0 -0
  150. {edsl-0.1.59 → edsl-0.1.60}/edsl/jobs/jobs_interview_constructor.py +0 -0
  151. {edsl-0.1.59 → edsl-0.1.60}/edsl/jobs/jobs_pricing_estimation.py +0 -0
  152. {edsl-0.1.59 → edsl-0.1.60}/edsl/jobs/jobs_remote_inference_logger.py +0 -0
  153. {edsl-0.1.59 → edsl-0.1.60}/edsl/jobs/jobs_runner_status.py +0 -0
  154. {edsl-0.1.59 → edsl-0.1.60}/edsl/jobs/jobs_status_enums.py +0 -0
  155. {edsl-0.1.59 → edsl-0.1.60}/edsl/jobs/progress_bar_manager.py +0 -0
  156. {edsl-0.1.59 → edsl-0.1.60}/edsl/jobs/remote_inference.py +0 -0
  157. {edsl-0.1.59 → edsl-0.1.60}/edsl/jobs/results_exceptions_handler.py +0 -0
  158. {edsl-0.1.59 → edsl-0.1.60}/edsl/key_management/__init__.py +0 -0
  159. {edsl-0.1.59 → edsl-0.1.60}/edsl/key_management/exceptions.py +0 -0
  160. {edsl-0.1.59 → edsl-0.1.60}/edsl/key_management/key_lookup.py +0 -0
  161. {edsl-0.1.59 → edsl-0.1.60}/edsl/key_management/key_lookup_collection.py +0 -0
  162. {edsl-0.1.59 → edsl-0.1.60}/edsl/key_management/models.py +0 -0
  163. {edsl-0.1.59 → edsl-0.1.60}/edsl/language_models/__init__.py +0 -0
  164. {edsl-0.1.59 → edsl-0.1.60}/edsl/language_models/exceptions.py +0 -0
  165. {edsl-0.1.59 → edsl-0.1.60}/edsl/language_models/model.py +0 -0
  166. {edsl-0.1.59 → edsl-0.1.60}/edsl/language_models/model_list.py +0 -0
  167. {edsl-0.1.59 → edsl-0.1.60}/edsl/language_models/price_manager.py +0 -0
  168. {edsl-0.1.59 → edsl-0.1.60}/edsl/language_models/registry.py +0 -0
  169. {edsl-0.1.59 → edsl-0.1.60}/edsl/language_models/repair.py +0 -0
  170. {edsl-0.1.59 → edsl-0.1.60}/edsl/language_models/unused/fake_openai_call.py +0 -0
  171. {edsl-0.1.59 → edsl-0.1.60}/edsl/language_models/utilities.py +0 -0
  172. {edsl-0.1.59 → edsl-0.1.60}/edsl/load_plugins.py +0 -0
  173. {edsl-0.1.59 → edsl-0.1.60}/edsl/logger.py +0 -0
  174. {edsl-0.1.59 → edsl-0.1.60}/edsl/notebooks/__init__.py +0 -0
  175. {edsl-0.1.59 → edsl-0.1.60}/edsl/notebooks/exceptions.py +0 -0
  176. {edsl-0.1.59 → edsl-0.1.60}/edsl/notebooks/notebook.py +0 -0
  177. {edsl-0.1.59 → edsl-0.1.60}/edsl/notebooks/notebook_to_latex.py +0 -0
  178. {edsl-0.1.59 → edsl-0.1.60}/edsl/plugins/__init__.py +0 -0
  179. {edsl-0.1.59 → edsl-0.1.60}/edsl/plugins/built_in/export_example.py +0 -0
  180. {edsl-0.1.59 → edsl-0.1.60}/edsl/plugins/built_in/pig_latin.py +0 -0
  181. {edsl-0.1.59 → edsl-0.1.60}/edsl/plugins/cli.py +0 -0
  182. {edsl-0.1.59 → edsl-0.1.60}/edsl/plugins/cli_typer.py +0 -0
  183. {edsl-0.1.59 → edsl-0.1.60}/edsl/plugins/exceptions.py +0 -0
  184. {edsl-0.1.59 → edsl-0.1.60}/edsl/plugins/hookspec.py +0 -0
  185. {edsl-0.1.59 → edsl-0.1.60}/edsl/plugins/plugin_host.py +0 -0
  186. {edsl-0.1.59 → edsl-0.1.60}/edsl/plugins/plugin_manager.py +0 -0
  187. {edsl-0.1.59 → edsl-0.1.60}/edsl/plugins/plugins_registry.py +0 -0
  188. {edsl-0.1.59 → edsl-0.1.60}/edsl/prompts/__init__.py +0 -0
  189. {edsl-0.1.59 → edsl-0.1.60}/edsl/prompts/exceptions.py +0 -0
  190. {edsl-0.1.59 → edsl-0.1.60}/edsl/prompts/prompt.py +0 -0
  191. {edsl-0.1.59 → edsl-0.1.60}/edsl/questions/ExceptionExplainer.py +0 -0
  192. {edsl-0.1.59 → edsl-0.1.60}/edsl/questions/HTMLQuestion.py +0 -0
  193. {edsl-0.1.59 → edsl-0.1.60}/edsl/questions/Quick.py +0 -0
  194. {edsl-0.1.59 → edsl-0.1.60}/edsl/questions/VALIDATION_README.md +0 -0
  195. {edsl-0.1.59 → edsl-0.1.60}/edsl/questions/__init__.py +0 -0
  196. {edsl-0.1.59 → edsl-0.1.60}/edsl/questions/answer_validator_mixin.py +0 -0
  197. {edsl-0.1.59 → edsl-0.1.60}/edsl/questions/compose_questions.py +0 -0
  198. {edsl-0.1.59 → edsl-0.1.60}/edsl/questions/data_structures.py +0 -0
  199. {edsl-0.1.59 → edsl-0.1.60}/edsl/questions/decorators.py +0 -0
  200. {edsl-0.1.59 → edsl-0.1.60}/edsl/questions/descriptors.py +0 -0
  201. {edsl-0.1.59 → edsl-0.1.60}/edsl/questions/exceptions.py +0 -0
  202. {edsl-0.1.59 → edsl-0.1.60}/edsl/questions/loop_processor.py +0 -0
  203. {edsl-0.1.59 → edsl-0.1.60}/edsl/questions/prompt_templates/question_budget.jinja +0 -0
  204. {edsl-0.1.59 → edsl-0.1.60}/edsl/questions/prompt_templates/question_checkbox.jinja +0 -0
  205. {edsl-0.1.59 → edsl-0.1.60}/edsl/questions/prompt_templates/question_extract.jinja +0 -0
  206. {edsl-0.1.59 → edsl-0.1.60}/edsl/questions/prompt_templates/question_free_text.jinja +0 -0
  207. {edsl-0.1.59 → edsl-0.1.60}/edsl/questions/prompt_templates/question_linear_scale.jinja +0 -0
  208. {edsl-0.1.59 → edsl-0.1.60}/edsl/questions/prompt_templates/question_list.jinja +0 -0
  209. {edsl-0.1.59 → edsl-0.1.60}/edsl/questions/prompt_templates/question_multiple_choice.jinja +0 -0
  210. {edsl-0.1.59 → edsl-0.1.60}/edsl/questions/prompt_templates/question_numerical.jinja +0 -0
  211. {edsl-0.1.59 → edsl-0.1.60}/edsl/questions/question_base.py +0 -0
  212. {edsl-0.1.59 → edsl-0.1.60}/edsl/questions/question_base_gen_mixin.py +0 -0
  213. {edsl-0.1.59 → edsl-0.1.60}/edsl/questions/question_base_prompts_mixin.py +0 -0
  214. {edsl-0.1.59 → edsl-0.1.60}/edsl/questions/question_budget.py +0 -0
  215. {edsl-0.1.59 → edsl-0.1.60}/edsl/questions/question_check_box.py +0 -0
  216. {edsl-0.1.59 → edsl-0.1.60}/edsl/questions/question_dict.py +0 -0
  217. {edsl-0.1.59 → edsl-0.1.60}/edsl/questions/question_extract.py +0 -0
  218. {edsl-0.1.59 → edsl-0.1.60}/edsl/questions/question_free_text.py +0 -0
  219. {edsl-0.1.59 → edsl-0.1.60}/edsl/questions/question_functional.py +0 -0
  220. {edsl-0.1.59 → edsl-0.1.60}/edsl/questions/question_likert_five.py +0 -0
  221. {edsl-0.1.59 → edsl-0.1.60}/edsl/questions/question_linear_scale.py +0 -0
  222. {edsl-0.1.59 → edsl-0.1.60}/edsl/questions/question_list.py +0 -0
  223. {edsl-0.1.59 → edsl-0.1.60}/edsl/questions/question_matrix.py +0 -0
  224. {edsl-0.1.59 → edsl-0.1.60}/edsl/questions/question_multiple_choice.py +0 -0
  225. {edsl-0.1.59 → edsl-0.1.60}/edsl/questions/question_multiple_choice_with_other.py +0 -0
  226. {edsl-0.1.59 → edsl-0.1.60}/edsl/questions/question_numerical.py +0 -0
  227. {edsl-0.1.59 → edsl-0.1.60}/edsl/questions/question_rank.py +0 -0
  228. {edsl-0.1.59 → edsl-0.1.60}/edsl/questions/question_registry.py +0 -0
  229. {edsl-0.1.59 → edsl-0.1.60}/edsl/questions/question_top_k.py +0 -0
  230. {edsl-0.1.59 → edsl-0.1.60}/edsl/questions/question_yes_no.py +0 -0
  231. {edsl-0.1.59 → edsl-0.1.60}/edsl/questions/register_questions_meta.py +0 -0
  232. {edsl-0.1.59 → edsl-0.1.60}/edsl/questions/response_validator_abc.py +0 -0
  233. {edsl-0.1.59 → edsl-0.1.60}/edsl/questions/response_validator_factory.py +0 -0
  234. {edsl-0.1.59 → edsl-0.1.60}/edsl/questions/settings.py +0 -0
  235. {edsl-0.1.59 → edsl-0.1.60}/edsl/questions/simple_ask_mixin.py +0 -0
  236. {edsl-0.1.59 → edsl-0.1.60}/edsl/questions/templates/__init__.py +0 -0
  237. {edsl-0.1.59 → edsl-0.1.60}/edsl/questions/templates/budget/__init__.py +0 -0
  238. {edsl-0.1.59 → edsl-0.1.60}/edsl/questions/templates/budget/answering_instructions.jinja +0 -0
  239. {edsl-0.1.59 → edsl-0.1.60}/edsl/questions/templates/budget/question_presentation.jinja +0 -0
  240. {edsl-0.1.59 → edsl-0.1.60}/edsl/questions/templates/checkbox/__init__.py +0 -0
  241. {edsl-0.1.59 → edsl-0.1.60}/edsl/questions/templates/checkbox/answering_instructions.jinja +0 -0
  242. {edsl-0.1.59 → edsl-0.1.60}/edsl/questions/templates/checkbox/question_presentation.jinja +0 -0
  243. {edsl-0.1.59 → edsl-0.1.60}/edsl/questions/templates/dict/__init__.py +0 -0
  244. {edsl-0.1.59 → edsl-0.1.60}/edsl/questions/templates/dict/answering_instructions.jinja +0 -0
  245. {edsl-0.1.59 → edsl-0.1.60}/edsl/questions/templates/dict/question_presentation.jinja +0 -0
  246. {edsl-0.1.59 → edsl-0.1.60}/edsl/questions/templates/extract/__init__.py +0 -0
  247. {edsl-0.1.59 → edsl-0.1.60}/edsl/questions/templates/extract/answering_instructions.jinja +0 -0
  248. {edsl-0.1.59 → edsl-0.1.60}/edsl/questions/templates/extract/question_presentation.jinja +0 -0
  249. {edsl-0.1.59 → edsl-0.1.60}/edsl/questions/templates/free_text/__init__.py +0 -0
  250. {edsl-0.1.59 → edsl-0.1.60}/edsl/questions/templates/free_text/answering_instructions.jinja +0 -0
  251. {edsl-0.1.59 → edsl-0.1.60}/edsl/questions/templates/free_text/question_presentation.jinja +0 -0
  252. {edsl-0.1.59 → edsl-0.1.60}/edsl/questions/templates/likert_five/__init__.py +0 -0
  253. {edsl-0.1.59 → edsl-0.1.60}/edsl/questions/templates/likert_five/answering_instructions.jinja +0 -0
  254. {edsl-0.1.59 → edsl-0.1.60}/edsl/questions/templates/likert_five/question_presentation.jinja +0 -0
  255. {edsl-0.1.59 → edsl-0.1.60}/edsl/questions/templates/linear_scale/__init__.py +0 -0
  256. {edsl-0.1.59 → edsl-0.1.60}/edsl/questions/templates/linear_scale/answering_instructions.jinja +0 -0
  257. {edsl-0.1.59 → edsl-0.1.60}/edsl/questions/templates/linear_scale/question_presentation.jinja +0 -0
  258. {edsl-0.1.59 → edsl-0.1.60}/edsl/questions/templates/list/__init__.py +0 -0
  259. {edsl-0.1.59 → edsl-0.1.60}/edsl/questions/templates/list/answering_instructions.jinja +0 -0
  260. {edsl-0.1.59 → edsl-0.1.60}/edsl/questions/templates/list/question_presentation.jinja +0 -0
  261. {edsl-0.1.59 → edsl-0.1.60}/edsl/questions/templates/matrix/__init__.py +0 -0
  262. {edsl-0.1.59 → edsl-0.1.60}/edsl/questions/templates/matrix/answering_instructions.jinja +0 -0
  263. {edsl-0.1.59 → edsl-0.1.60}/edsl/questions/templates/matrix/question_presentation.jinja +0 -0
  264. {edsl-0.1.59 → edsl-0.1.60}/edsl/questions/templates/multiple_choice/__init__.py +0 -0
  265. {edsl-0.1.59 → edsl-0.1.60}/edsl/questions/templates/multiple_choice/answering_instructions.jinja +0 -0
  266. {edsl-0.1.59 → edsl-0.1.60}/edsl/questions/templates/multiple_choice/html.jinja +0 -0
  267. {edsl-0.1.59 → edsl-0.1.60}/edsl/questions/templates/multiple_choice/question_presentation.jinja +0 -0
  268. {edsl-0.1.59 → edsl-0.1.60}/edsl/questions/templates/multiple_choice_with_other/__init__.py +0 -0
  269. {edsl-0.1.59 → edsl-0.1.60}/edsl/questions/templates/multiple_choice_with_other/answering_instructions.jinja +0 -0
  270. {edsl-0.1.59 → edsl-0.1.60}/edsl/questions/templates/multiple_choice_with_other/question_presentation.jinja +0 -0
  271. {edsl-0.1.59 → edsl-0.1.60}/edsl/questions/templates/numerical/__init__.py +0 -0
  272. {edsl-0.1.59 → edsl-0.1.60}/edsl/questions/templates/numerical/answering_instructions.jinja +0 -0
  273. {edsl-0.1.59 → edsl-0.1.60}/edsl/questions/templates/numerical/question_presentation.jinja +0 -0
  274. {edsl-0.1.59 → edsl-0.1.60}/edsl/questions/templates/rank/__init__.py +0 -0
  275. {edsl-0.1.59 → edsl-0.1.60}/edsl/questions/templates/rank/answering_instructions.jinja +0 -0
  276. {edsl-0.1.59 → edsl-0.1.60}/edsl/questions/templates/rank/question_presentation.jinja +0 -0
  277. {edsl-0.1.59 → edsl-0.1.60}/edsl/questions/templates/top_k/__init__.py +0 -0
  278. {edsl-0.1.59 → edsl-0.1.60}/edsl/questions/templates/top_k/answering_instructions.jinja +0 -0
  279. {edsl-0.1.59 → edsl-0.1.60}/edsl/questions/templates/top_k/question_presentation.jinja +0 -0
  280. {edsl-0.1.59 → edsl-0.1.60}/edsl/questions/templates/yes_no/__init__.py +0 -0
  281. {edsl-0.1.59 → edsl-0.1.60}/edsl/questions/templates/yes_no/answering_instructions.jinja +0 -0
  282. {edsl-0.1.59 → edsl-0.1.60}/edsl/questions/templates/yes_no/question_presentation.jinja +0 -0
  283. {edsl-0.1.59 → edsl-0.1.60}/edsl/questions/validation_analysis.py +0 -0
  284. {edsl-0.1.59 → edsl-0.1.60}/edsl/questions/validation_cli.py +0 -0
  285. {edsl-0.1.59 → edsl-0.1.60}/edsl/questions/validation_html_report.py +0 -0
  286. {edsl-0.1.59 → edsl-0.1.60}/edsl/questions/validation_logger.py +0 -0
  287. {edsl-0.1.59 → edsl-0.1.60}/edsl/results/__init__.py +0 -0
  288. {edsl-0.1.59 → edsl-0.1.60}/edsl/results/exceptions.py +0 -0
  289. {edsl-0.1.59 → edsl-0.1.60}/edsl/results/report.py +0 -0
  290. {edsl-0.1.59 → edsl-0.1.60}/edsl/results/results_selector.py +0 -0
  291. {edsl-0.1.59 → edsl-0.1.60}/edsl/scenarios/DocxScenario.py +0 -0
  292. {edsl-0.1.59 → edsl-0.1.60}/edsl/scenarios/PdfExtractor.py +0 -0
  293. {edsl-0.1.59 → edsl-0.1.60}/edsl/scenarios/__init__.py +0 -0
  294. {edsl-0.1.59 → edsl-0.1.60}/edsl/scenarios/construct_download_link.py +0 -0
  295. {edsl-0.1.59 → edsl-0.1.60}/edsl/scenarios/directory_scanner.py +0 -0
  296. {edsl-0.1.59 → edsl-0.1.60}/edsl/scenarios/document_chunker.py +0 -0
  297. {edsl-0.1.59 → edsl-0.1.60}/edsl/scenarios/exceptions.py +0 -0
  298. {edsl-0.1.59 → edsl-0.1.60}/edsl/scenarios/file_methods.py +0 -0
  299. {edsl-0.1.59 → edsl-0.1.60}/edsl/scenarios/file_store.py +0 -0
  300. {edsl-0.1.59 → edsl-0.1.60}/edsl/scenarios/handlers/__init__.py +0 -0
  301. {edsl-0.1.59 → edsl-0.1.60}/edsl/scenarios/handlers/csv_file_store.py +0 -0
  302. {edsl-0.1.59 → edsl-0.1.60}/edsl/scenarios/handlers/docx_file_store.py +0 -0
  303. {edsl-0.1.59 → edsl-0.1.60}/edsl/scenarios/handlers/html_file_store.py +0 -0
  304. {edsl-0.1.59 → edsl-0.1.60}/edsl/scenarios/handlers/jpeg_file_store.py +0 -0
  305. {edsl-0.1.59 → edsl-0.1.60}/edsl/scenarios/handlers/json_file_store.py +0 -0
  306. {edsl-0.1.59 → edsl-0.1.60}/edsl/scenarios/handlers/latex_file_store.py +0 -0
  307. {edsl-0.1.59 → edsl-0.1.60}/edsl/scenarios/handlers/md_file_store.py +0 -0
  308. {edsl-0.1.59 → edsl-0.1.60}/edsl/scenarios/handlers/mp4_file_store.py +0 -0
  309. {edsl-0.1.59 → edsl-0.1.60}/edsl/scenarios/handlers/pdf_file_store.py +0 -0
  310. {edsl-0.1.59 → edsl-0.1.60}/edsl/scenarios/handlers/png_file_store.py +0 -0
  311. {edsl-0.1.59 → edsl-0.1.60}/edsl/scenarios/handlers/pptx_file_store.py +0 -0
  312. {edsl-0.1.59 → edsl-0.1.60}/edsl/scenarios/handlers/py_file_store.py +0 -0
  313. {edsl-0.1.59 → edsl-0.1.60}/edsl/scenarios/handlers/sql_file_store.py +0 -0
  314. {edsl-0.1.59 → edsl-0.1.60}/edsl/scenarios/handlers/sqlite_file_store.py +0 -0
  315. {edsl-0.1.59 → edsl-0.1.60}/edsl/scenarios/handlers/txt_file_store.py +0 -0
  316. {edsl-0.1.59 → edsl-0.1.60}/edsl/scenarios/handlers/webm_file_store.py +0 -0
  317. {edsl-0.1.59 → edsl-0.1.60}/edsl/scenarios/scenario.py +0 -0
  318. {edsl-0.1.59 → edsl-0.1.60}/edsl/scenarios/scenario_join.py +0 -0
  319. {edsl-0.1.59 → edsl-0.1.60}/edsl/scenarios/scenario_list.py +0 -0
  320. {edsl-0.1.59 → edsl-0.1.60}/edsl/scenarios/scenario_list_gc_test.py +0 -0
  321. {edsl-0.1.59 → edsl-0.1.60}/edsl/scenarios/scenario_list_memory_test.py +0 -0
  322. {edsl-0.1.59 → edsl-0.1.60}/edsl/scenarios/scenario_list_pdf_tools.py +0 -0
  323. {edsl-0.1.59 → edsl-0.1.60}/edsl/scenarios/scenario_list_source_refactor.md +0 -0
  324. {edsl-0.1.59 → edsl-0.1.60}/edsl/scenarios/scenario_selector.py +0 -0
  325. {edsl-0.1.59 → edsl-0.1.60}/edsl/scenarios/scenario_source.py +0 -0
  326. {edsl-0.1.59 → edsl-0.1.60}/edsl/scenarios/tests/test_scenario_list_sources.py +0 -0
  327. {edsl-0.1.59 → edsl-0.1.60}/edsl/surveys/__init__.py +0 -0
  328. {edsl-0.1.59 → edsl-0.1.60}/edsl/surveys/base.py +0 -0
  329. {edsl-0.1.59 → edsl-0.1.60}/edsl/surveys/dag/__init__.py +0 -0
  330. {edsl-0.1.59 → edsl-0.1.60}/edsl/surveys/dag/construct_dag.py +0 -0
  331. {edsl-0.1.59 → edsl-0.1.60}/edsl/surveys/dag/dag.py +0 -0
  332. {edsl-0.1.59 → edsl-0.1.60}/edsl/surveys/descriptors.py +0 -0
  333. {edsl-0.1.59 → edsl-0.1.60}/edsl/surveys/edit_survey.py +0 -0
  334. {edsl-0.1.59 → edsl-0.1.60}/edsl/surveys/exceptions.py +0 -0
  335. {edsl-0.1.59 → edsl-0.1.60}/edsl/surveys/memory/__init__.py +0 -0
  336. {edsl-0.1.59 → edsl-0.1.60}/edsl/surveys/memory/memory.py +0 -0
  337. {edsl-0.1.59 → edsl-0.1.60}/edsl/surveys/memory/memory_management.py +0 -0
  338. {edsl-0.1.59 → edsl-0.1.60}/edsl/surveys/memory/memory_plan.py +0 -0
  339. {edsl-0.1.59 → edsl-0.1.60}/edsl/surveys/rules/__init__.py +0 -0
  340. {edsl-0.1.59 → edsl-0.1.60}/edsl/surveys/rules/rule.py +0 -0
  341. {edsl-0.1.59 → edsl-0.1.60}/edsl/surveys/rules/rule_collection.py +0 -0
  342. {edsl-0.1.59 → edsl-0.1.60}/edsl/surveys/rules/rule_manager.py +0 -0
  343. {edsl-0.1.59 → edsl-0.1.60}/edsl/surveys/survey.py +0 -0
  344. {edsl-0.1.59 → edsl-0.1.60}/edsl/surveys/survey_css.py +0 -0
  345. {edsl-0.1.59 → edsl-0.1.60}/edsl/surveys/survey_export.py +0 -0
  346. {edsl-0.1.59 → edsl-0.1.60}/edsl/surveys/survey_flow_visualization.py +0 -0
  347. {edsl-0.1.59 → edsl-0.1.60}/edsl/surveys/survey_simulator.py +0 -0
  348. {edsl-0.1.59 → edsl-0.1.60}/edsl/tasks/__init__.py +0 -0
  349. {edsl-0.1.59 → edsl-0.1.60}/edsl/tasks/exceptions.py +0 -0
  350. {edsl-0.1.59 → edsl-0.1.60}/edsl/tasks/question_task_creator.py +0 -0
  351. {edsl-0.1.59 → edsl-0.1.60}/edsl/tasks/task_creators.py +0 -0
  352. {edsl-0.1.59 → edsl-0.1.60}/edsl/tasks/task_history.py +0 -0
  353. {edsl-0.1.59 → edsl-0.1.60}/edsl/tasks/task_status_enum.py +0 -0
  354. {edsl-0.1.59 → edsl-0.1.60}/edsl/tasks/task_status_log.py +0 -0
  355. {edsl-0.1.59 → edsl-0.1.60}/edsl/templates/error_reporting/base.html +0 -0
  356. {edsl-0.1.59 → edsl-0.1.60}/edsl/templates/error_reporting/exceptions_by_model.html +0 -0
  357. {edsl-0.1.59 → edsl-0.1.60}/edsl/templates/error_reporting/exceptions_by_question_name.html +0 -0
  358. {edsl-0.1.59 → edsl-0.1.60}/edsl/templates/error_reporting/exceptions_by_type.html +0 -0
  359. {edsl-0.1.59 → edsl-0.1.60}/edsl/templates/error_reporting/exceptions_table.html +0 -0
  360. {edsl-0.1.59 → edsl-0.1.60}/edsl/templates/error_reporting/interview_details.html +0 -0
  361. {edsl-0.1.59 → edsl-0.1.60}/edsl/templates/error_reporting/interviews.html +0 -0
  362. {edsl-0.1.59 → edsl-0.1.60}/edsl/templates/error_reporting/overview.html +0 -0
  363. {edsl-0.1.59 → edsl-0.1.60}/edsl/templates/error_reporting/performance_plot.html +0 -0
  364. {edsl-0.1.59 → edsl-0.1.60}/edsl/templates/error_reporting/report.css +0 -0
  365. {edsl-0.1.59 → edsl-0.1.60}/edsl/templates/error_reporting/report.html +0 -0
  366. {edsl-0.1.59 → edsl-0.1.60}/edsl/templates/error_reporting/report.js +0 -0
  367. {edsl-0.1.59 → edsl-0.1.60}/edsl/tests/scenarios/test_ScenarioSource.py +0 -0
  368. {edsl-0.1.59 → edsl-0.1.60}/edsl/tests/scenarios/test_scenario_list_sources.py +0 -0
  369. {edsl-0.1.59 → edsl-0.1.60}/edsl/tokens/__init__.py +0 -0
  370. {edsl-0.1.59 → edsl-0.1.60}/edsl/tokens/exceptions.py +0 -0
  371. {edsl-0.1.59 → edsl-0.1.60}/edsl/tokens/interview_token_usage.py +0 -0
  372. {edsl-0.1.59 → edsl-0.1.60}/edsl/tokens/token_usage.py +0 -0
  373. {edsl-0.1.59 → edsl-0.1.60}/edsl/utilities/PrettyList.py +0 -0
  374. {edsl-0.1.59 → edsl-0.1.60}/edsl/utilities/SystemInfo.py +0 -0
  375. {edsl-0.1.59 → edsl-0.1.60}/edsl/utilities/__init__.py +0 -0
  376. {edsl-0.1.59 → edsl-0.1.60}/edsl/utilities/ast_utilities.py +0 -0
  377. {edsl-0.1.59 → edsl-0.1.60}/edsl/utilities/decorators.py +0 -0
  378. {edsl-0.1.59 → edsl-0.1.60}/edsl/utilities/gcp_bucket/__init__.py +0 -0
  379. {edsl-0.1.59 → edsl-0.1.60}/edsl/utilities/gcp_bucket/cloud_storage.py +0 -0
  380. {edsl-0.1.59 → edsl-0.1.60}/edsl/utilities/is_notebook.py +0 -0
  381. {edsl-0.1.59 → edsl-0.1.60}/edsl/utilities/is_valid_variable_name.py +0 -0
  382. {edsl-0.1.59 → edsl-0.1.60}/edsl/utilities/markdown_to_docx.py +0 -0
  383. {edsl-0.1.59 → edsl-0.1.60}/edsl/utilities/markdown_to_pdf.py +0 -0
  384. {edsl-0.1.59 → edsl-0.1.60}/edsl/utilities/memory_debugger.py +0 -0
  385. {edsl-0.1.59 → edsl-0.1.60}/edsl/utilities/naming_utilities.py +0 -0
  386. {edsl-0.1.59 → edsl-0.1.60}/edsl/utilities/remove_edsl_version.py +0 -0
  387. {edsl-0.1.59 → edsl-0.1.60}/edsl/utilities/repair_functions.py +0 -0
  388. {edsl-0.1.59 → edsl-0.1.60}/edsl/utilities/restricted_python.py +0 -0
  389. {edsl-0.1.59 → edsl-0.1.60}/edsl/utilities/template_loader.py +0 -0
  390. {edsl-0.1.59 → edsl-0.1.60}/edsl/utilities/utilities.py +0 -0
  391. {edsl-0.1.59 → edsl-0.1.60}/edsl/utilities/wikipedia.py +0 -0
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.1
2
2
  Name: edsl
3
- Version: 0.1.59
3
+ Version: 0.1.60
4
4
  Summary: Create and analyze LLM-based surveys
5
5
  Home-page: https://www.expectedparrot.com/
6
6
  License: MIT
@@ -43,7 +43,7 @@ Requires-Dist: pydot (>=2.0.0,<3.0.0)
43
43
  Requires-Dist: pygments (>=2.17.2,<3.0.0)
44
44
  Requires-Dist: pymupdf (>=1.25.5,<2.0.0)
45
45
  Requires-Dist: pypdf2 (>=3.0.1,<4.0.0)
46
- Requires-Dist: pyreadstat (>=1.2.7,<2.0.0)
46
+ Requires-Dist: pyreadstat (==1.2.8)
47
47
  Requires-Dist: python-docx (>=1.1.0,<2.0.0)
48
48
  Requires-Dist: python-dotenv (>=1.0.0,<2.0.0)
49
49
  Requires-Dist: python-pptx (>=1.0.2,<2.0.0)
@@ -0,0 +1 @@
1
+ __version__ = "0.1.60"
@@ -17,6 +17,7 @@ class EDSLOutput(NamedTuple):
17
17
  answer: Any
18
18
  generated_tokens: str
19
19
  comment: Optional[str] = None
20
+ reasoning_summary: Optional[Any] = None
20
21
 
21
22
 
22
23
  class ModelResponse(NamedTuple):
@@ -49,6 +50,7 @@ class EDSLResultObjectInput(NamedTuple):
49
50
  cache_key: str
50
51
  answer: Any
51
52
  comment: str
53
+ reasoning_summary: Optional[Any] = None
52
54
  validated: bool = False
53
55
  exception_occurred: Exception = None
54
56
  input_tokens: Optional[int] = None
@@ -96,12 +98,15 @@ class Answers(UserDict):
96
98
  answer = response.answer
97
99
  comment = response.comment
98
100
  generated_tokens = response.generated_tokens
101
+ reasoning_summary = response.reasoning_summary
99
102
  # record the answer
100
103
  if generated_tokens:
101
104
  self[question.question_name + "_generated_tokens"] = generated_tokens
102
105
  self[question.question_name] = answer
103
106
  if comment:
104
107
  self[question.question_name + "_comment"] = comment
108
+ if reasoning_summary:
109
+ self[question.question_name + "_reasoning_summary"] = reasoning_summary
105
110
 
106
111
  def replace_missing_answers_with_none(self, survey: "Survey") -> None:
107
112
  """Replace missing answers with None. Answers can be missing if the agent skips a question."""
@@ -57,6 +57,7 @@ class InferenceServiceType(EnumWithChecks):
57
57
  DEEP_INFRA = "deep_infra"
58
58
  REPLICATE = "replicate"
59
59
  OPENAI = "openai"
60
+ OPENAI_V2 = "openai_v2"
60
61
  GOOGLE = "google"
61
62
  TEST = "test"
62
63
  ANTHROPIC = "anthropic"
@@ -77,6 +78,7 @@ InferenceServiceLiteral = Literal[
77
78
  "deep_infra",
78
79
  "replicate",
79
80
  "openai",
81
+ "openai_v2",
80
82
  "google",
81
83
  "test",
82
84
  "anthropic",
@@ -93,6 +95,7 @@ InferenceServiceLiteral = Literal[
93
95
  available_models_urls = {
94
96
  "anthropic": "https://docs.anthropic.com/en/docs/about-claude/models",
95
97
  "openai": "https://platform.openai.com/docs/models/gp",
98
+ "openai_v2": "https://platform.openai.com/docs/models/gp",
96
99
  "groq": "https://console.groq.com/docs/models",
97
100
  "google": "https://cloud.google.com/vertex-ai/generative-ai/docs/learn/models",
98
101
  }
@@ -102,6 +105,7 @@ service_to_api_keyname = {
102
105
  InferenceServiceType.DEEP_INFRA.value: "DEEP_INFRA_API_KEY",
103
106
  InferenceServiceType.REPLICATE.value: "TBD",
104
107
  InferenceServiceType.OPENAI.value: "OPENAI_API_KEY",
108
+ InferenceServiceType.OPENAI_V2.value: "OPENAI_API_KEY",
105
109
  InferenceServiceType.GOOGLE.value: "GOOGLE_API_KEY",
106
110
  InferenceServiceType.TEST.value: "TBD",
107
111
  InferenceServiceType.ANTHROPIC.value: "ANTHROPIC_API_KEY",
@@ -135,7 +139,7 @@ class TokenPricing:
135
139
  and self.prompt_token_price == other.prompt_token_price
136
140
  and self.completion_token_price == other.completion_token_price
137
141
  )
138
-
142
+
139
143
  @classmethod
140
144
  def example(cls) -> "TokenPricing":
141
145
  """Return an example TokenPricing object."""
@@ -145,6 +149,7 @@ class TokenPricing:
145
149
  completion_token_price_per_k=0.03,
146
150
  )
147
151
 
152
+
148
153
  pricing = {
149
154
  "dbrx-instruct": TokenPricing(
150
155
  model_name="dbrx-instruct",
@@ -212,4 +217,4 @@ def get_token_pricing(model_name):
212
217
  model_name=model_name,
213
218
  prompt_token_price_per_k=0.0,
214
219
  completion_token_price_per_k=0.0,
215
- )
220
+ )
@@ -357,7 +357,7 @@ class DataOperationsBase:
357
357
  4
358
358
  >>> engine = Results.example()._db(shape = "long")
359
359
  >>> len(engine.execute(text("SELECT * FROM self")).fetchall())
360
- 204
360
+ 212
361
361
  """
362
362
  # Import needed for database connection
363
363
  from sqlalchemy import create_engine
@@ -442,7 +442,7 @@ class DataOperationsBase:
442
442
 
443
443
  # Using long format
444
444
  >>> len(r.sql("SELECT * FROM self", shape="long"))
445
- 204
445
+ 212
446
446
  """
447
447
  import pandas as pd
448
448
 
@@ -8,6 +8,7 @@ from .groq_service import GroqService
8
8
  from .mistral_ai_service import MistralAIService
9
9
  from .ollama_service import OllamaService
10
10
  from .open_ai_service import OpenAIService
11
+ from .open_ai_service_v2 import OpenAIServiceV2
11
12
  from .perplexity_service import PerplexityService
12
13
  from .test_service import TestService
13
14
  from .together_ai_service import TogetherAIService
@@ -24,8 +25,9 @@ __all__ = [
24
25
  "MistralAIService",
25
26
  "OllamaService",
26
27
  "OpenAIService",
28
+ "OpenAIServiceV2",
27
29
  "PerplexityService",
28
30
  "TestService",
29
31
  "TogetherAIService",
30
32
  "XAIService",
31
- ]
33
+ ]
@@ -0,0 +1,243 @@
1
+ from __future__ import annotations
2
+ from typing import Any, List, Optional, Dict, NewType, TYPE_CHECKING
3
+ import os
4
+
5
+ import openai
6
+
7
+ from ..inference_service_abc import InferenceServiceABC
8
+
9
+ # Use TYPE_CHECKING to avoid circular imports at runtime
10
+ if TYPE_CHECKING:
11
+ from ...language_models import LanguageModel
12
+ from ..rate_limits_cache import rate_limits
13
+
14
+ # Default to completions API but can use responses API with parameter
15
+
16
+ if TYPE_CHECKING:
17
+ from ....scenarios.file_store import FileStore as Files
18
+ from ....invigilators.invigilator_base import InvigilatorBase as InvigilatorAI
19
+
20
+
21
+ APIToken = NewType("APIToken", str)
22
+
23
+
24
+ class OpenAIServiceV2(InferenceServiceABC):
25
+ """OpenAI service class using the Responses API."""
26
+
27
+ _inference_service_ = "openai_v2"
28
+ _env_key_name_ = "OPENAI_API_KEY"
29
+ _base_url_ = None
30
+
31
+ _sync_client_ = openai.OpenAI
32
+ _async_client_ = openai.AsyncOpenAI
33
+
34
+ _sync_client_instances: Dict[APIToken, openai.OpenAI] = {}
35
+ _async_client_instances: Dict[APIToken, openai.AsyncOpenAI] = {}
36
+
37
+ # sequence to extract text from response.output
38
+ key_sequence = ["output", 1, "content", 0, "text"]
39
+ usage_sequence = ["usage"]
40
+ # sequence to extract reasoning summary from response.output
41
+ reasoning_sequence = ["output", 0, "summary"]
42
+ input_token_name = "prompt_tokens"
43
+ output_token_name = "completion_tokens"
44
+
45
+ available_models_url = "https://platform.openai.com/docs/models/gp"
46
+
47
+ def __init_subclass__(cls, **kwargs):
48
+ super().__init_subclass__(**kwargs)
49
+ cls._sync_client_instances = {}
50
+ cls._async_client_instances = {}
51
+
52
+ @classmethod
53
+ def sync_client(cls, api_key: str) -> openai.OpenAI:
54
+ if api_key not in cls._sync_client_instances:
55
+ client = cls._sync_client_(
56
+ api_key=api_key,
57
+ base_url=cls._base_url_,
58
+ )
59
+ cls._sync_client_instances[api_key] = client
60
+ return cls._sync_client_instances[api_key]
61
+
62
+ @classmethod
63
+ def async_client(cls, api_key: str) -> openai.AsyncOpenAI:
64
+ if api_key not in cls._async_client_instances:
65
+ client = cls._async_client_(
66
+ api_key=api_key,
67
+ base_url=cls._base_url_,
68
+ )
69
+ cls._async_client_instances[api_key] = client
70
+ return cls._async_client_instances[api_key]
71
+
72
+ model_exclude_list = [
73
+ "whisper-1",
74
+ "davinci-002",
75
+ "dall-e-2",
76
+ "tts-1-hd-1106",
77
+ "tts-1-hd",
78
+ "dall-e-3",
79
+ "tts-1",
80
+ "babbage-002",
81
+ "tts-1-1106",
82
+ "text-embedding-3-large",
83
+ "text-embedding-3-small",
84
+ "text-embedding-ada-002",
85
+ "ft:davinci-002:mit-horton-lab::8OfuHgoo",
86
+ "gpt-3.5-turbo-instruct-0914",
87
+ "gpt-3.5-turbo-instruct",
88
+ ]
89
+ _models_list_cache: List[str] = []
90
+
91
+ @classmethod
92
+ def get_model_list(cls, api_key: str | None = None) -> List[str]:
93
+ if api_key is None:
94
+ api_key = os.getenv(cls._env_key_name_)
95
+ raw = cls.sync_client(api_key).models.list()
96
+ return raw.data if hasattr(raw, "data") else raw
97
+
98
+ @classmethod
99
+ def available(cls, api_token: str | None = None) -> List[str]:
100
+ if api_token is None:
101
+ api_token = os.getenv(cls._env_key_name_)
102
+ if not cls._models_list_cache:
103
+ data = cls.get_model_list(api_key=api_token)
104
+ cls._models_list_cache = [
105
+ m.id for m in data if m.id not in cls.model_exclude_list
106
+ ]
107
+ return cls._models_list_cache
108
+
109
+ @classmethod
110
+ def create_model(
111
+ cls,
112
+ model_name: str,
113
+ model_class_name: str | None = None,
114
+ ) -> LanguageModel:
115
+ if model_class_name is None:
116
+ model_class_name = cls.to_class_name(model_name)
117
+
118
+ from ...language_models import LanguageModel
119
+
120
+ class LLM(LanguageModel):
121
+ """Child class for OpenAI Responses API"""
122
+
123
+ key_sequence = cls.key_sequence
124
+ usage_sequence = cls.usage_sequence
125
+ reasoning_sequence = cls.reasoning_sequence
126
+ input_token_name = cls.input_token_name
127
+ output_token_name = cls.output_token_name
128
+ _inference_service_ = cls._inference_service_
129
+ _model_ = model_name
130
+ _parameters_ = {
131
+ "temperature": 0.5,
132
+ "max_tokens": 2000,
133
+ "top_p": 1,
134
+ "frequency_penalty": 0,
135
+ "presence_penalty": 0,
136
+ "logprobs": False,
137
+ "top_logprobs": 3,
138
+ }
139
+
140
+ def sync_client(self) -> openai.OpenAI:
141
+ return cls.sync_client(api_key=self.api_token)
142
+
143
+ def async_client(self) -> openai.AsyncOpenAI:
144
+ return cls.async_client(api_key=self.api_token)
145
+
146
+ @classmethod
147
+ def available(cls) -> list[str]:
148
+ return cls.sync_client().models.list().data
149
+
150
+ def get_headers(self) -> dict[str, Any]:
151
+ client = self.sync_client()
152
+ response = client.responses.with_raw_response.create(
153
+ model=self.model,
154
+ input=[{"role": "user", "content": "Say this is a test"}],
155
+ store=False,
156
+ )
157
+ return dict(response.headers)
158
+
159
+ def get_rate_limits(self) -> dict[str, Any]:
160
+ try:
161
+ headers = rate_limits.get("openai", self.get_headers())
162
+ except Exception:
163
+ return {"rpm": 10000, "tpm": 2000000}
164
+ return {
165
+ "rpm": int(headers["x-ratelimit-limit-requests"]),
166
+ "tpm": int(headers["x-ratelimit-limit-tokens"]),
167
+ }
168
+
169
+ async def async_execute_model_call(
170
+ self,
171
+ user_prompt: str,
172
+ system_prompt: str = "",
173
+ files_list: Optional[List[Files]] = None,
174
+ invigilator: Optional[InvigilatorAI] = None,
175
+ ) -> dict[str, Any]:
176
+ content = user_prompt
177
+ if files_list:
178
+ # embed files as separate inputs
179
+ content = [{"type": "text", "text": user_prompt}]
180
+ for f in files_list:
181
+ content.append(
182
+ {
183
+ "type": "image_url",
184
+ "image_url": {
185
+ "url": f"data:{f.mime_type};base64,{f.base64_string}"
186
+ },
187
+ }
188
+ )
189
+ # build input sequence
190
+ messages: Any
191
+ if system_prompt and not self.omit_system_prompt_if_empty:
192
+ messages = [
193
+ {"role": "system", "content": system_prompt},
194
+ {"role": "user", "content": content},
195
+ ]
196
+ else:
197
+ messages = [{"role": "user", "content": content}]
198
+
199
+ # All OpenAI models with the responses API use these base parameters
200
+ params = {
201
+ "model": self.model,
202
+ "input": messages,
203
+ "temperature": self.temperature,
204
+ "top_p": self.top_p,
205
+ "store": False,
206
+ }
207
+
208
+ # Check if this is a reasoning model (o-series models)
209
+ is_reasoning_model = any(tag in self.model for tag in ["o1", "o1-mini", "o3", "o3-mini", "o1-pro", "o4-mini"])
210
+
211
+ # Only add reasoning parameter for reasoning models
212
+ if is_reasoning_model:
213
+ params["reasoning"] = {"summary": "auto"}
214
+
215
+ # For all models using the responses API, use max_output_tokens
216
+ # instead of max_tokens (which is for the completions API)
217
+ params["max_output_tokens"] = self.max_tokens
218
+
219
+ # Specifically for o-series, we also set temperature to 1
220
+ if is_reasoning_model:
221
+ params["temperature"] = 1
222
+
223
+ client = self.async_client()
224
+ try:
225
+ response = await client.responses.create(**params)
226
+
227
+ except Exception as e:
228
+ return {"message": str(e)}
229
+
230
+ # convert to dict
231
+ response_dict = response.model_dump()
232
+ return response_dict
233
+
234
+ LLM.__name__ = model_class_name
235
+ return LLM
236
+
237
+ @staticmethod
238
+ def _create_reasoning_sequence():
239
+ """Create the reasoning sequence for extracting reasoning summaries from model responses."""
240
+ # For OpenAI responses, the reasoning summary is typically found at:
241
+ # ["output", 0, "summary"]
242
+ # This is the path to the 'summary' field in the first item of the 'output' array
243
+ return ["output", 0, "summary"]
@@ -213,6 +213,9 @@ class Answers(UserDict):
213
213
  if comment:
214
214
  self[question.question_name + "_comment"] = comment
215
215
 
216
+ if getattr(response, "reasoning_summary", None):
217
+ self[question.question_name + "_reasoning_summary"] = response.reasoning_summary
218
+
216
219
  def replace_missing_answers_with_none(self, survey: "Survey") -> None:
217
220
  """
218
221
  Replace missing answers with None for all questions in the survey.
@@ -363,13 +363,35 @@ class KeyLookupBuilder:
363
363
  >>> builder._add_api_key("OPENAI_API_KEY", "sk-1234", "env")
364
364
  >>> 'sk-1234' == builder.key_data["openai"][-1].value
365
365
  True
366
+ >>> 'sk-1234' == builder.key_data["openai_v2"][-1].value
367
+ True
366
368
  """
367
369
  service = api_keyname_to_service[key]
368
370
  new_entry = APIKeyEntry(service=service, name=key, value=value, source=source)
369
- if service not in self.key_data:
370
- self.key_data[service] = [new_entry]
371
+
372
+ # Special case for OPENAI_API_KEY - add to both openai and openai_v2
373
+ if key == "OPENAI_API_KEY":
374
+ # Add to openai service
375
+ openai_service = "openai"
376
+ openai_entry = APIKeyEntry(service=openai_service, name=key, value=value, source=source)
377
+ if openai_service not in self.key_data:
378
+ self.key_data[openai_service] = [openai_entry]
379
+ else:
380
+ self.key_data[openai_service].append(openai_entry)
381
+
382
+ # Add to openai_v2 service
383
+ openai_v2_service = "openai_v2"
384
+ openai_v2_entry = APIKeyEntry(service=openai_v2_service, name=key, value=value, source=source)
385
+ if openai_v2_service not in self.key_data:
386
+ self.key_data[openai_v2_service] = [openai_v2_entry]
387
+ else:
388
+ self.key_data[openai_v2_service].append(openai_v2_entry)
371
389
  else:
372
- self.key_data[service].append(new_entry)
390
+ # Normal case for all other API keys
391
+ if service not in self.key_data:
392
+ self.key_data[service] = [new_entry]
393
+ else:
394
+ self.key_data[service].append(new_entry)
373
395
 
374
396
  def update_from_dict(self, d: dict) -> None:
375
397
  """
@@ -174,7 +174,8 @@ class LanguageModel(
174
174
  """
175
175
  key_sequence = cls.key_sequence
176
176
  usage_sequence = cls.usage_sequence if hasattr(cls, "usage_sequence") else None
177
- return RawResponseHandler(key_sequence, usage_sequence)
177
+ reasoning_sequence = cls.reasoning_sequence if hasattr(cls, "reasoning_sequence") else None
178
+ return RawResponseHandler(key_sequence, usage_sequence, reasoning_sequence)
178
179
 
179
180
  def __init__(
180
181
  self,
@@ -0,0 +1,230 @@
1
+ import json
2
+ from typing import Optional, Any, List
3
+ from .exceptions import (
4
+ LanguageModelBadResponseError,
5
+ LanguageModelTypeError,
6
+ LanguageModelIndexError,
7
+ LanguageModelKeyError
8
+ )
9
+
10
+ from json_repair import repair_json
11
+
12
+
13
+ def _extract_item_from_raw_response(data, sequence):
14
+ if isinstance(data, str):
15
+ try:
16
+ data = json.loads(data)
17
+ except json.JSONDecodeError:
18
+ return data
19
+ current_data = data
20
+ for i, key in enumerate(sequence):
21
+ try:
22
+ if isinstance(current_data, (list, tuple)):
23
+ if not isinstance(key, int):
24
+ raise LanguageModelTypeError(
25
+ f"Expected integer index for sequence at position {i}, got {type(key).__name__}"
26
+ )
27
+ if key < 0 or key >= len(current_data):
28
+ raise LanguageModelIndexError(
29
+ f"Index {key} out of range for sequence of length {len(current_data)} at position {i}"
30
+ )
31
+ elif isinstance(current_data, dict):
32
+ if key not in current_data:
33
+ raise LanguageModelKeyError(
34
+ f"Key '{key}' not found in dictionary at position {i}"
35
+ )
36
+ else:
37
+ raise LanguageModelTypeError(
38
+ f"Cannot index into {type(current_data).__name__} at position {i}. Full response is: {data} of type {type(data)}. Key sequence is: {sequence}"
39
+ )
40
+
41
+ current_data = current_data[key]
42
+ except Exception as e:
43
+ path = " -> ".join(map(str, sequence[: i + 1]))
44
+
45
+ # Create a safe error message that won't be None
46
+ if "error" in data and data["error"] is not None:
47
+ msg = str(data["error"])
48
+ else:
49
+ msg = f"Error accessing path: {path}. {str(e)}. Full response is: '{data}'"
50
+
51
+ raise LanguageModelBadResponseError(message=msg, response_json=data)
52
+ if isinstance(current_data, str):
53
+ return current_data.strip()
54
+ else:
55
+ return current_data
56
+
57
+
58
+ class RawResponseHandler:
59
+ """Class to handle raw responses from language models."""
60
+
61
+ def __init__(self, key_sequence: list, usage_sequence: Optional[list] = None, reasoning_sequence: Optional[list] = None):
62
+ self.key_sequence = key_sequence
63
+ self.usage_sequence = usage_sequence
64
+ self.reasoning_sequence = reasoning_sequence
65
+
66
+ def get_generated_token_string(self, raw_response):
67
+ try:
68
+ return _extract_item_from_raw_response(raw_response, self.key_sequence)
69
+ except (LanguageModelKeyError, LanguageModelIndexError, LanguageModelTypeError, LanguageModelBadResponseError) as e:
70
+ # For non-reasoning models or reasoning models with different response formats,
71
+ # try to extract text directly from common response formats
72
+ if isinstance(raw_response, dict):
73
+ # Responses API format for non-reasoning models
74
+ if 'output' in raw_response and isinstance(raw_response['output'], list):
75
+ # Try to get first message content
76
+ if len(raw_response['output']) > 0:
77
+ item = raw_response['output'][0]
78
+ if isinstance(item, dict) and 'content' in item:
79
+ if isinstance(item['content'], list) and len(item['content']) > 0:
80
+ first_content = item['content'][0]
81
+ if isinstance(first_content, dict) and 'text' in first_content:
82
+ return first_content['text']
83
+ elif isinstance(item['content'], str):
84
+ return item['content']
85
+
86
+ # OpenAI completions format
87
+ if 'choices' in raw_response and isinstance(raw_response['choices'], list) and len(raw_response['choices']) > 0:
88
+ choice = raw_response['choices'][0]
89
+ if isinstance(choice, dict):
90
+ if 'text' in choice:
91
+ return choice['text']
92
+ elif 'message' in choice and isinstance(choice['message'], dict) and 'content' in choice['message']:
93
+ return choice['message']['content']
94
+
95
+ # Text directly in response
96
+ if 'text' in raw_response:
97
+ return raw_response['text']
98
+ elif 'content' in raw_response:
99
+ return raw_response['content']
100
+
101
+ # Error message - try to return a coherent error for debugging
102
+ if 'message' in raw_response:
103
+ return f"[ERROR: {raw_response['message']}]"
104
+
105
+ # If we get a string directly, return it
106
+ if isinstance(raw_response, str):
107
+ return raw_response
108
+
109
+ # As a last resort, convert the whole response to string
110
+ try:
111
+ return f"[ERROR: Could not extract text. Raw response: {str(raw_response)}]"
112
+ except:
113
+ return "[ERROR: Could not extract text from response]"
114
+
115
+ def get_usage_dict(self, raw_response):
116
+ if self.usage_sequence is None:
117
+ return {}
118
+ try:
119
+ return _extract_item_from_raw_response(raw_response, self.usage_sequence)
120
+ except (LanguageModelKeyError, LanguageModelIndexError, LanguageModelTypeError, LanguageModelBadResponseError):
121
+ # For non-reasoning models, try to extract usage from common response formats
122
+ if isinstance(raw_response, dict):
123
+ # Standard OpenAI usage format
124
+ if 'usage' in raw_response:
125
+ return raw_response['usage']
126
+
127
+ # Look for nested usage info
128
+ if 'choices' in raw_response and len(raw_response['choices']) > 0:
129
+ choice = raw_response['choices'][0]
130
+ if isinstance(choice, dict) and 'usage' in choice:
131
+ return choice['usage']
132
+
133
+ # If no usage info found, return empty dict
134
+ return {}
135
+
136
+ def get_reasoning_summary(self, raw_response):
137
+ """
138
+ Extract reasoning summary from the model response.
139
+
140
+ Handles various response structures:
141
+ 1. Standard path extraction using self.reasoning_sequence
142
+ 2. Direct access to output[0]['summary'] for OpenAI responses
143
+ 3. List responses where the first item contains the output structure
144
+ """
145
+ if self.reasoning_sequence is None:
146
+ return None
147
+
148
+ try:
149
+ # First try the standard extraction path
150
+ summary_data = _extract_item_from_raw_response(raw_response, self.reasoning_sequence)
151
+
152
+ # If summary_data is a list of dictionaries with 'text' and 'type' fields
153
+ # (as in OpenAI's response format), combine them into a single string
154
+ if isinstance(summary_data, list) and all(isinstance(item, dict) and 'text' in item for item in summary_data):
155
+ return '\n\n'.join(item['text'] for item in summary_data)
156
+
157
+ return summary_data
158
+ except Exception:
159
+ # Fallback approaches for different response structures
160
+ try:
161
+ # Case 1: Direct dict with 'output' field (common OpenAI format)
162
+ if isinstance(raw_response, dict) and 'output' in raw_response:
163
+ output = raw_response['output']
164
+ if isinstance(output, list) and len(output) > 0 and 'summary' in output[0]:
165
+ summary_data = output[0]['summary']
166
+ if isinstance(summary_data, list) and all(isinstance(item, dict) and 'text' in item for item in summary_data):
167
+ return '\n\n'.join(item['text'] for item in summary_data)
168
+
169
+ # Case 2: List where the first item is a dict with 'output' field
170
+ if isinstance(raw_response, list) and len(raw_response) > 0:
171
+ first_item = raw_response[0]
172
+ if isinstance(first_item, dict) and 'output' in first_item:
173
+ output = first_item['output']
174
+ if isinstance(output, list) and len(output) > 0 and 'summary' in output[0]:
175
+ summary_data = output[0]['summary']
176
+ if isinstance(summary_data, list) and all(isinstance(item, dict) and 'text' in item for item in summary_data):
177
+ return '\n\n'.join(item['text'] for item in summary_data)
178
+ except Exception:
179
+ pass
180
+
181
+ return None
182
+
183
+ def parse_response(self, raw_response: dict[str, Any]) -> Any:
184
+ """Parses the API response and returns the response text."""
185
+
186
+ from edsl.data_transfer_models import EDSLOutput
187
+
188
+ generated_token_string = self.get_generated_token_string(raw_response)
189
+ # Ensure generated_token_string is a string before using string methods
190
+ if not isinstance(generated_token_string, str):
191
+ generated_token_string = str(generated_token_string)
192
+ last_newline = generated_token_string.rfind("\n")
193
+ reasoning_summary = self.get_reasoning_summary(raw_response)
194
+
195
+ if last_newline == -1:
196
+ # There is no comment
197
+ edsl_dict = {
198
+ "answer": self.convert_answer(generated_token_string),
199
+ "generated_tokens": generated_token_string,
200
+ "comment": None,
201
+ "reasoning_summary": reasoning_summary,
202
+ }
203
+ else:
204
+ edsl_dict = {
205
+ "answer": self.convert_answer(generated_token_string[:last_newline]),
206
+ "comment": generated_token_string[last_newline + 1:].strip(),
207
+ "generated_tokens": generated_token_string,
208
+ "reasoning_summary": reasoning_summary,
209
+ }
210
+ return EDSLOutput(**edsl_dict)
211
+
212
+ @staticmethod
213
+ def convert_answer(response_part):
214
+ import json
215
+
216
+ response_part = response_part.strip()
217
+
218
+ if response_part == "None":
219
+ return None
220
+
221
+ repaired = repair_json(response_part)
222
+ if repaired == '""':
223
+ # it was a literal string
224
+ return response_part
225
+
226
+ try:
227
+ return json.loads(repaired)
228
+ except json.JSONDecodeError:
229
+ # last resort
230
+ return response_part