edsl 0.1.39.dev3__py3-none-any.whl → 0.1.39.dev4__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (344) hide show
  1. edsl/Base.py +413 -332
  2. edsl/BaseDiff.py +260 -260
  3. edsl/TemplateLoader.py +24 -24
  4. edsl/__init__.py +57 -49
  5. edsl/__version__.py +1 -1
  6. edsl/agents/Agent.py +1071 -867
  7. edsl/agents/AgentList.py +551 -413
  8. edsl/agents/Invigilator.py +284 -233
  9. edsl/agents/InvigilatorBase.py +257 -270
  10. edsl/agents/PromptConstructor.py +272 -354
  11. edsl/agents/QuestionInstructionPromptBuilder.py +128 -0
  12. edsl/agents/QuestionTemplateReplacementsBuilder.py +137 -0
  13. edsl/agents/__init__.py +2 -3
  14. edsl/agents/descriptors.py +99 -99
  15. edsl/agents/prompt_helpers.py +129 -129
  16. edsl/agents/question_option_processor.py +172 -0
  17. edsl/auto/AutoStudy.py +130 -117
  18. edsl/auto/StageBase.py +243 -230
  19. edsl/auto/StageGenerateSurvey.py +178 -178
  20. edsl/auto/StageLabelQuestions.py +125 -125
  21. edsl/auto/StagePersona.py +61 -61
  22. edsl/auto/StagePersonaDimensionValueRanges.py +88 -88
  23. edsl/auto/StagePersonaDimensionValues.py +74 -74
  24. edsl/auto/StagePersonaDimensions.py +69 -69
  25. edsl/auto/StageQuestions.py +74 -73
  26. edsl/auto/SurveyCreatorPipeline.py +21 -21
  27. edsl/auto/utilities.py +218 -224
  28. edsl/base/Base.py +279 -279
  29. edsl/config.py +177 -157
  30. edsl/conversation/Conversation.py +290 -290
  31. edsl/conversation/car_buying.py +59 -58
  32. edsl/conversation/chips.py +95 -95
  33. edsl/conversation/mug_negotiation.py +81 -81
  34. edsl/conversation/next_speaker_utilities.py +93 -93
  35. edsl/coop/CoopFunctionsMixin.py +15 -0
  36. edsl/coop/ExpectedParrotKeyHandler.py +125 -0
  37. edsl/coop/PriceFetcher.py +54 -54
  38. edsl/coop/__init__.py +2 -2
  39. edsl/coop/coop.py +1106 -1028
  40. edsl/coop/utils.py +131 -131
  41. edsl/data/Cache.py +573 -555
  42. edsl/data/CacheEntry.py +230 -233
  43. edsl/data/CacheHandler.py +168 -149
  44. edsl/data/RemoteCacheSync.py +186 -78
  45. edsl/data/SQLiteDict.py +292 -292
  46. edsl/data/__init__.py +5 -4
  47. edsl/data/hack.py +10 -0
  48. edsl/data/orm.py +10 -10
  49. edsl/data_transfer_models.py +74 -73
  50. edsl/enums.py +202 -175
  51. edsl/exceptions/BaseException.py +21 -21
  52. edsl/exceptions/__init__.py +54 -54
  53. edsl/exceptions/agents.py +54 -42
  54. edsl/exceptions/cache.py +5 -5
  55. edsl/exceptions/configuration.py +16 -16
  56. edsl/exceptions/coop.py +10 -10
  57. edsl/exceptions/data.py +14 -14
  58. edsl/exceptions/general.py +34 -34
  59. edsl/exceptions/inference_services.py +5 -0
  60. edsl/exceptions/jobs.py +33 -33
  61. edsl/exceptions/language_models.py +63 -63
  62. edsl/exceptions/prompts.py +15 -15
  63. edsl/exceptions/questions.py +109 -91
  64. edsl/exceptions/results.py +29 -29
  65. edsl/exceptions/scenarios.py +29 -22
  66. edsl/exceptions/surveys.py +37 -37
  67. edsl/inference_services/AnthropicService.py +106 -87
  68. edsl/inference_services/AvailableModelCacheHandler.py +184 -0
  69. edsl/inference_services/AvailableModelFetcher.py +215 -0
  70. edsl/inference_services/AwsBedrock.py +118 -120
  71. edsl/inference_services/AzureAI.py +215 -217
  72. edsl/inference_services/DeepInfraService.py +18 -18
  73. edsl/inference_services/GoogleService.py +143 -148
  74. edsl/inference_services/GroqService.py +20 -20
  75. edsl/inference_services/InferenceServiceABC.py +80 -147
  76. edsl/inference_services/InferenceServicesCollection.py +138 -97
  77. edsl/inference_services/MistralAIService.py +120 -123
  78. edsl/inference_services/OllamaService.py +18 -18
  79. edsl/inference_services/OpenAIService.py +236 -224
  80. edsl/inference_services/PerplexityService.py +160 -163
  81. edsl/inference_services/ServiceAvailability.py +135 -0
  82. edsl/inference_services/TestService.py +90 -89
  83. edsl/inference_services/TogetherAIService.py +172 -170
  84. edsl/inference_services/data_structures.py +134 -0
  85. edsl/inference_services/models_available_cache.py +118 -118
  86. edsl/inference_services/rate_limits_cache.py +25 -25
  87. edsl/inference_services/registry.py +41 -41
  88. edsl/inference_services/write_available.py +10 -10
  89. edsl/jobs/AnswerQuestionFunctionConstructor.py +223 -0
  90. edsl/jobs/Answers.py +43 -56
  91. edsl/jobs/FetchInvigilator.py +47 -0
  92. edsl/jobs/InterviewTaskManager.py +98 -0
  93. edsl/jobs/InterviewsConstructor.py +50 -0
  94. edsl/jobs/Jobs.py +823 -898
  95. edsl/jobs/JobsChecks.py +172 -147
  96. edsl/jobs/JobsComponentConstructor.py +189 -0
  97. edsl/jobs/JobsPrompts.py +270 -268
  98. edsl/jobs/JobsRemoteInferenceHandler.py +311 -239
  99. edsl/jobs/JobsRemoteInferenceLogger.py +239 -0
  100. edsl/jobs/RequestTokenEstimator.py +30 -0
  101. edsl/jobs/__init__.py +1 -1
  102. edsl/jobs/async_interview_runner.py +138 -0
  103. edsl/jobs/buckets/BucketCollection.py +104 -63
  104. edsl/jobs/buckets/ModelBuckets.py +65 -65
  105. edsl/jobs/buckets/TokenBucket.py +283 -251
  106. edsl/jobs/buckets/TokenBucketAPI.py +211 -0
  107. edsl/jobs/buckets/TokenBucketClient.py +191 -0
  108. edsl/jobs/check_survey_scenario_compatibility.py +85 -0
  109. edsl/jobs/data_structures.py +120 -0
  110. edsl/jobs/decorators.py +35 -0
  111. edsl/jobs/interviews/Interview.py +396 -661
  112. edsl/jobs/interviews/InterviewExceptionCollection.py +99 -99
  113. edsl/jobs/interviews/InterviewExceptionEntry.py +186 -186
  114. edsl/jobs/interviews/InterviewStatistic.py +63 -63
  115. edsl/jobs/interviews/InterviewStatisticsCollection.py +25 -25
  116. edsl/jobs/interviews/InterviewStatusDictionary.py +78 -78
  117. edsl/jobs/interviews/InterviewStatusLog.py +92 -92
  118. edsl/jobs/interviews/ReportErrors.py +66 -66
  119. edsl/jobs/interviews/interview_status_enum.py +9 -9
  120. edsl/jobs/jobs_status_enums.py +9 -0
  121. edsl/jobs/loggers/HTMLTableJobLogger.py +304 -0
  122. edsl/jobs/results_exceptions_handler.py +98 -0
  123. edsl/jobs/runners/JobsRunnerAsyncio.py +151 -466
  124. edsl/jobs/runners/JobsRunnerStatus.py +297 -330
  125. edsl/jobs/tasks/QuestionTaskCreator.py +244 -242
  126. edsl/jobs/tasks/TaskCreators.py +64 -64
  127. edsl/jobs/tasks/TaskHistory.py +470 -450
  128. edsl/jobs/tasks/TaskStatusLog.py +23 -23
  129. edsl/jobs/tasks/task_status_enum.py +161 -163
  130. edsl/jobs/tokens/InterviewTokenUsage.py +27 -27
  131. edsl/jobs/tokens/TokenUsage.py +34 -34
  132. edsl/language_models/ComputeCost.py +63 -0
  133. edsl/language_models/LanguageModel.py +626 -668
  134. edsl/language_models/ModelList.py +164 -155
  135. edsl/language_models/PriceManager.py +127 -0
  136. edsl/language_models/RawResponseHandler.py +106 -0
  137. edsl/language_models/RegisterLanguageModelsMeta.py +184 -184
  138. edsl/language_models/ServiceDataSources.py +0 -0
  139. edsl/language_models/__init__.py +2 -3
  140. edsl/language_models/fake_openai_call.py +15 -15
  141. edsl/language_models/fake_openai_service.py +61 -61
  142. edsl/language_models/key_management/KeyLookup.py +63 -0
  143. edsl/language_models/key_management/KeyLookupBuilder.py +273 -0
  144. edsl/language_models/key_management/KeyLookupCollection.py +38 -0
  145. edsl/language_models/key_management/__init__.py +0 -0
  146. edsl/language_models/key_management/models.py +131 -0
  147. edsl/language_models/model.py +256 -0
  148. edsl/language_models/repair.py +156 -156
  149. edsl/language_models/utilities.py +65 -64
  150. edsl/notebooks/Notebook.py +263 -258
  151. edsl/notebooks/NotebookToLaTeX.py +142 -0
  152. edsl/notebooks/__init__.py +1 -1
  153. edsl/prompts/Prompt.py +352 -362
  154. edsl/prompts/__init__.py +2 -2
  155. edsl/questions/ExceptionExplainer.py +77 -0
  156. edsl/questions/HTMLQuestion.py +103 -0
  157. edsl/questions/QuestionBase.py +518 -664
  158. edsl/questions/QuestionBasePromptsMixin.py +221 -217
  159. edsl/questions/QuestionBudget.py +227 -227
  160. edsl/questions/QuestionCheckBox.py +359 -359
  161. edsl/questions/QuestionExtract.py +180 -182
  162. edsl/questions/QuestionFreeText.py +113 -114
  163. edsl/questions/QuestionFunctional.py +166 -166
  164. edsl/questions/QuestionList.py +223 -231
  165. edsl/questions/QuestionMatrix.py +265 -0
  166. edsl/questions/QuestionMultipleChoice.py +330 -286
  167. edsl/questions/QuestionNumerical.py +151 -153
  168. edsl/questions/QuestionRank.py +314 -324
  169. edsl/questions/Quick.py +41 -41
  170. edsl/questions/SimpleAskMixin.py +74 -73
  171. edsl/questions/__init__.py +27 -26
  172. edsl/questions/{AnswerValidatorMixin.py → answer_validator_mixin.py} +334 -289
  173. edsl/questions/compose_questions.py +98 -98
  174. edsl/questions/data_structures.py +20 -0
  175. edsl/questions/decorators.py +21 -21
  176. edsl/questions/derived/QuestionLikertFive.py +76 -76
  177. edsl/questions/derived/QuestionLinearScale.py +90 -87
  178. edsl/questions/derived/QuestionTopK.py +93 -93
  179. edsl/questions/derived/QuestionYesNo.py +82 -82
  180. edsl/questions/descriptors.py +427 -413
  181. edsl/questions/loop_processor.py +149 -0
  182. edsl/questions/prompt_templates/question_budget.jinja +13 -13
  183. edsl/questions/prompt_templates/question_checkbox.jinja +32 -32
  184. edsl/questions/prompt_templates/question_extract.jinja +11 -11
  185. edsl/questions/prompt_templates/question_free_text.jinja +3 -3
  186. edsl/questions/prompt_templates/question_linear_scale.jinja +11 -11
  187. edsl/questions/prompt_templates/question_list.jinja +17 -17
  188. edsl/questions/prompt_templates/question_multiple_choice.jinja +33 -33
  189. edsl/questions/prompt_templates/question_numerical.jinja +36 -36
  190. edsl/questions/{QuestionBaseGenMixin.py → question_base_gen_mixin.py} +168 -161
  191. edsl/questions/question_registry.py +177 -177
  192. edsl/questions/{RegisterQuestionsMeta.py → register_questions_meta.py} +71 -71
  193. edsl/questions/{ResponseValidatorABC.py → response_validator_abc.py} +188 -174
  194. edsl/questions/response_validator_factory.py +34 -0
  195. edsl/questions/settings.py +12 -12
  196. edsl/questions/templates/budget/answering_instructions.jinja +7 -7
  197. edsl/questions/templates/budget/question_presentation.jinja +7 -7
  198. edsl/questions/templates/checkbox/answering_instructions.jinja +10 -10
  199. edsl/questions/templates/checkbox/question_presentation.jinja +22 -22
  200. edsl/questions/templates/extract/answering_instructions.jinja +7 -7
  201. edsl/questions/templates/likert_five/answering_instructions.jinja +10 -10
  202. edsl/questions/templates/likert_five/question_presentation.jinja +11 -11
  203. edsl/questions/templates/linear_scale/answering_instructions.jinja +5 -5
  204. edsl/questions/templates/linear_scale/question_presentation.jinja +5 -5
  205. edsl/questions/templates/list/answering_instructions.jinja +3 -3
  206. edsl/questions/templates/list/question_presentation.jinja +5 -5
  207. edsl/questions/templates/matrix/__init__.py +1 -0
  208. edsl/questions/templates/matrix/answering_instructions.jinja +5 -0
  209. edsl/questions/templates/matrix/question_presentation.jinja +20 -0
  210. edsl/questions/templates/multiple_choice/answering_instructions.jinja +9 -9
  211. edsl/questions/templates/multiple_choice/question_presentation.jinja +11 -11
  212. edsl/questions/templates/numerical/answering_instructions.jinja +6 -6
  213. edsl/questions/templates/numerical/question_presentation.jinja +6 -6
  214. edsl/questions/templates/rank/answering_instructions.jinja +11 -11
  215. edsl/questions/templates/rank/question_presentation.jinja +15 -15
  216. edsl/questions/templates/top_k/answering_instructions.jinja +8 -8
  217. edsl/questions/templates/top_k/question_presentation.jinja +22 -22
  218. edsl/questions/templates/yes_no/answering_instructions.jinja +6 -6
  219. edsl/questions/templates/yes_no/question_presentation.jinja +11 -11
  220. edsl/results/CSSParameterizer.py +108 -108
  221. edsl/results/Dataset.py +587 -424
  222. edsl/results/DatasetExportMixin.py +594 -731
  223. edsl/results/DatasetTree.py +295 -275
  224. edsl/results/MarkdownToDocx.py +122 -0
  225. edsl/results/MarkdownToPDF.py +111 -0
  226. edsl/results/Result.py +557 -465
  227. edsl/results/Results.py +1183 -1165
  228. edsl/results/ResultsExportMixin.py +45 -43
  229. edsl/results/ResultsGGMixin.py +121 -121
  230. edsl/results/TableDisplay.py +125 -198
  231. edsl/results/TextEditor.py +50 -0
  232. edsl/results/__init__.py +2 -2
  233. edsl/results/file_exports.py +252 -0
  234. edsl/results/{ResultsFetchMixin.py → results_fetch_mixin.py} +33 -33
  235. edsl/results/{Selector.py → results_selector.py} +145 -135
  236. edsl/results/{ResultsToolsMixin.py → results_tools_mixin.py} +98 -98
  237. edsl/results/smart_objects.py +96 -0
  238. edsl/results/table_data_class.py +12 -0
  239. edsl/results/table_display.css +77 -77
  240. edsl/results/table_renderers.py +118 -0
  241. edsl/results/tree_explore.py +115 -115
  242. edsl/scenarios/ConstructDownloadLink.py +109 -0
  243. edsl/scenarios/DocumentChunker.py +102 -0
  244. edsl/scenarios/DocxScenario.py +16 -0
  245. edsl/scenarios/FileStore.py +511 -632
  246. edsl/scenarios/PdfExtractor.py +40 -0
  247. edsl/scenarios/Scenario.py +498 -601
  248. edsl/scenarios/ScenarioHtmlMixin.py +65 -64
  249. edsl/scenarios/ScenarioList.py +1458 -1287
  250. edsl/scenarios/ScenarioListExportMixin.py +45 -52
  251. edsl/scenarios/ScenarioListPdfMixin.py +239 -261
  252. edsl/scenarios/__init__.py +3 -4
  253. edsl/scenarios/directory_scanner.py +96 -0
  254. edsl/scenarios/file_methods.py +85 -0
  255. edsl/scenarios/handlers/__init__.py +13 -0
  256. edsl/scenarios/handlers/csv.py +38 -0
  257. edsl/scenarios/handlers/docx.py +76 -0
  258. edsl/scenarios/handlers/html.py +37 -0
  259. edsl/scenarios/handlers/json.py +111 -0
  260. edsl/scenarios/handlers/latex.py +5 -0
  261. edsl/scenarios/handlers/md.py +51 -0
  262. edsl/scenarios/handlers/pdf.py +68 -0
  263. edsl/scenarios/handlers/png.py +39 -0
  264. edsl/scenarios/handlers/pptx.py +105 -0
  265. edsl/scenarios/handlers/py.py +294 -0
  266. edsl/scenarios/handlers/sql.py +313 -0
  267. edsl/scenarios/handlers/sqlite.py +149 -0
  268. edsl/scenarios/handlers/txt.py +33 -0
  269. edsl/scenarios/{ScenarioJoin.py → scenario_join.py} +131 -127
  270. edsl/scenarios/scenario_selector.py +156 -0
  271. edsl/shared.py +1 -1
  272. edsl/study/ObjectEntry.py +173 -173
  273. edsl/study/ProofOfWork.py +113 -113
  274. edsl/study/SnapShot.py +80 -80
  275. edsl/study/Study.py +521 -528
  276. edsl/study/__init__.py +4 -4
  277. edsl/surveys/ConstructDAG.py +92 -0
  278. edsl/surveys/DAG.py +148 -148
  279. edsl/surveys/EditSurvey.py +221 -0
  280. edsl/surveys/InstructionHandler.py +100 -0
  281. edsl/surveys/Memory.py +31 -31
  282. edsl/surveys/MemoryManagement.py +72 -0
  283. edsl/surveys/MemoryPlan.py +244 -244
  284. edsl/surveys/Rule.py +327 -326
  285. edsl/surveys/RuleCollection.py +385 -387
  286. edsl/surveys/RuleManager.py +172 -0
  287. edsl/surveys/Simulator.py +75 -0
  288. edsl/surveys/Survey.py +1280 -1801
  289. edsl/surveys/SurveyCSS.py +273 -261
  290. edsl/surveys/SurveyExportMixin.py +259 -259
  291. edsl/surveys/{SurveyFlowVisualizationMixin.py → SurveyFlowVisualization.py} +181 -179
  292. edsl/surveys/SurveyQualtricsImport.py +284 -284
  293. edsl/surveys/SurveyToApp.py +141 -0
  294. edsl/surveys/__init__.py +5 -3
  295. edsl/surveys/base.py +53 -53
  296. edsl/surveys/descriptors.py +60 -56
  297. edsl/surveys/instructions/ChangeInstruction.py +48 -49
  298. edsl/surveys/instructions/Instruction.py +56 -65
  299. edsl/surveys/instructions/InstructionCollection.py +82 -77
  300. edsl/templates/error_reporting/base.html +23 -23
  301. edsl/templates/error_reporting/exceptions_by_model.html +34 -34
  302. edsl/templates/error_reporting/exceptions_by_question_name.html +16 -16
  303. edsl/templates/error_reporting/exceptions_by_type.html +16 -16
  304. edsl/templates/error_reporting/interview_details.html +115 -115
  305. edsl/templates/error_reporting/interviews.html +19 -19
  306. edsl/templates/error_reporting/overview.html +4 -4
  307. edsl/templates/error_reporting/performance_plot.html +1 -1
  308. edsl/templates/error_reporting/report.css +73 -73
  309. edsl/templates/error_reporting/report.html +117 -117
  310. edsl/templates/error_reporting/report.js +25 -25
  311. edsl/test_h +1 -0
  312. edsl/tools/__init__.py +1 -1
  313. edsl/tools/clusters.py +192 -192
  314. edsl/tools/embeddings.py +27 -27
  315. edsl/tools/embeddings_plotting.py +118 -118
  316. edsl/tools/plotting.py +112 -112
  317. edsl/tools/summarize.py +18 -18
  318. edsl/utilities/PrettyList.py +56 -0
  319. edsl/utilities/SystemInfo.py +28 -28
  320. edsl/utilities/__init__.py +22 -22
  321. edsl/utilities/ast_utilities.py +25 -25
  322. edsl/utilities/data/Registry.py +6 -6
  323. edsl/utilities/data/__init__.py +1 -1
  324. edsl/utilities/data/scooter_results.json +1 -1
  325. edsl/utilities/decorators.py +77 -77
  326. edsl/utilities/gcp_bucket/cloud_storage.py +96 -96
  327. edsl/utilities/gcp_bucket/example.py +50 -0
  328. edsl/utilities/interface.py +627 -627
  329. edsl/utilities/is_notebook.py +18 -0
  330. edsl/utilities/is_valid_variable_name.py +11 -0
  331. edsl/utilities/naming_utilities.py +263 -263
  332. edsl/utilities/remove_edsl_version.py +24 -0
  333. edsl/utilities/repair_functions.py +28 -28
  334. edsl/utilities/restricted_python.py +70 -70
  335. edsl/utilities/utilities.py +436 -424
  336. {edsl-0.1.39.dev3.dist-info → edsl-0.1.39.dev4.dist-info}/LICENSE +21 -21
  337. {edsl-0.1.39.dev3.dist-info → edsl-0.1.39.dev4.dist-info}/METADATA +13 -11
  338. edsl-0.1.39.dev4.dist-info/RECORD +361 -0
  339. edsl/language_models/KeyLookup.py +0 -30
  340. edsl/language_models/registry.py +0 -190
  341. edsl/language_models/unused/ReplicateBase.py +0 -83
  342. edsl/results/ResultsDBMixin.py +0 -238
  343. edsl-0.1.39.dev3.dist-info/RECORD +0 -277
  344. {edsl-0.1.39.dev3.dist-info → edsl-0.1.39.dev4.dist-info}/WHEEL +0 -0
edsl/results/Dataset.py CHANGED
@@ -1,424 +1,587 @@
1
- """A module to represent a dataset of observations."""
2
-
3
- from __future__ import annotations
4
- import random
5
- import json
6
- from collections import UserList
7
- from typing import Any, Union, Optional
8
- import sys
9
- import numpy as np
10
-
11
- from edsl.results.ResultsExportMixin import ResultsExportMixin
12
- from edsl.results.DatasetTree import Tree
13
- from edsl.results.TableDisplay import TableDisplay
14
-
15
-
16
- class Dataset(UserList, ResultsExportMixin):
17
- """A class to represent a dataset of observations."""
18
-
19
- def __init__(
20
- self, data: list[dict[str, Any]] = None, print_parameters: Optional[dict] = None
21
- ):
22
- """Initialize the dataset with the given data."""
23
- super().__init__(data)
24
- self.print_parameters = print_parameters
25
-
26
- def __len__(self) -> int:
27
- """Return the number of observations in the dataset.
28
-
29
- Need to override the __len__ method to return the number of observations in the dataset because
30
- otherwise, the UserList class would return the number of dictionaries in the dataset.
31
-
32
- >>> d = Dataset([{'a.b':[1,2,3,4]}])
33
- >>> len(d)
34
- 4
35
- """
36
- _, values = list(self.data[0].items())[0]
37
- return len(values)
38
-
39
- def keys(self) -> list[str]:
40
- """Return the keys of the first observation in the dataset.
41
-
42
- >>> d = Dataset([{'a.b':[1,2,3,4]}])
43
- >>> d.keys()
44
- ['a.b']
45
- """
46
- return [list(o.keys())[0] for o in self]
47
-
48
- def filter(self, expression):
49
- return self.to_scenario_list().filter(expression).to_dataset()
50
-
51
- def __repr__(self) -> str:
52
- """Return a string representation of the dataset."""
53
- return f"Dataset({self.data})"
54
-
55
- def write(self, filename: str, tablefmt: Optional[str] = None) -> None:
56
- return self.table(tablefmt=tablefmt).write(filename)
57
-
58
- def _repr_html_(self):
59
- # headers, data = self._tabular()
60
- return self.table(print_parameters=self.print_parameters)._repr_html_()
61
- # return TableDisplay(headers=headers, data=data, raw_data_set=self)
62
-
63
- def _tabular(self) -> tuple[list[str], list[list[Any]]]:
64
- # Extract headers
65
- headers = []
66
- for entry in self.data:
67
- headers.extend(entry.keys())
68
- headers = list(dict.fromkeys(headers)) # Ensure unique headers
69
-
70
- # Extract data
71
- max_len = max(len(values) for entry in self.data for values in entry.values())
72
- rows = []
73
- for i in range(max_len):
74
- row = []
75
- for header in headers:
76
- for entry in self.data:
77
- if header in entry:
78
- values = entry[header]
79
- row.append(values[i] if i < len(values) else None)
80
- break
81
- else:
82
- row.append(None) # Default to None if header is missing
83
- rows.append(row)
84
-
85
- return headers, rows
86
-
87
- def _key_to_value(self, key: str) -> Any:
88
- """Retrieve the value associated with the given key from the dataset.
89
-
90
- >>> d = Dataset([{'a.b':[1,2,3,4]}])
91
- >>> d._key_to_value('a.b')
92
- [1, 2, 3, 4]
93
-
94
- >>> d._key_to_value('a')
95
- Traceback (most recent call last):
96
- ...
97
- KeyError: "Key 'a' not found in any of the dictionaries."
98
-
99
- """
100
- potential_matches = []
101
- for data_dict in self.data:
102
- data_key, data_values = list(data_dict.items())[0]
103
- if key == data_key:
104
- return data_values
105
- if key == data_key.split(".")[-1]:
106
- potential_matches.append((data_key, data_values))
107
-
108
- if len(potential_matches) == 1:
109
- return potential_matches[0][1]
110
- elif len(potential_matches) > 1:
111
- raise KeyError(
112
- f"Key '{key}' found in more than one location: {[m[0] for m in potential_matches]}"
113
- )
114
-
115
- raise KeyError(f"Key '{key}' not found in any of the dictionaries.")
116
-
117
- def first(self) -> dict[str, Any]:
118
- """Get the first value of the first key in the first dictionary.
119
-
120
- >>> d = Dataset([{'a.b':[1,2,3,4]}])
121
- >>> d.first()
122
- 1
123
- """
124
-
125
- def get_values(d):
126
- """Get the values of the first key in the dictionary."""
127
- return list(d.values())[0]
128
-
129
- return get_values(self.data[0])[0]
130
-
131
- def print(self, pretty_labels=None, **kwargs):
132
- if "format" in kwargs:
133
- if kwargs["format"] not in ["html", "markdown", "rich", "latex"]:
134
- raise ValueError(f"Format '{kwargs['format']}' not supported.")
135
- if pretty_labels is None:
136
- pretty_labels = {}
137
- else:
138
- return self.rename(pretty_labels).print(**kwargs)
139
- return self.table()
140
-
141
- def rename(self, rename_dic) -> Dataset:
142
- new_data = []
143
- for observation in self.data:
144
- key, values = list(observation.items())[0]
145
- new_key = rename_dic.get(key, key)
146
- new_data.append({new_key: values})
147
- return Dataset(new_data)
148
-
149
- def select(self, *keys) -> Dataset:
150
- """Return a new dataset with only the selected keys.
151
-
152
- :param keys: The keys to select.
153
-
154
- >>> d = Dataset([{'a.b':[1,2,3,4]}, {'c.d':[5,6,7,8]}])
155
- >>> d.select('a.b')
156
- Dataset([{'a.b': [1, 2, 3, 4]}])
157
-
158
- >>> d.select('a.b', 'c.d')
159
- Dataset([{'a.b': [1, 2, 3, 4]}, {'c.d': [5, 6, 7, 8]}])
160
- """
161
- if isinstance(keys, str):
162
- keys = [keys]
163
-
164
- new_data = []
165
- for observation in self.data:
166
- observation_key = list(observation.keys())[0]
167
- if observation_key in keys:
168
- new_data.append(observation)
169
- return Dataset(new_data)
170
-
171
- def to_json(self):
172
- """Return a JSON representation of the dataset.
173
-
174
- >>> d = Dataset([{'a.b':[1,2,3,4]}])
175
- >>> d.to_json()
176
- [{'a.b': [1, 2, 3, 4]}]
177
- """
178
- return json.loads(
179
- json.dumps(self.data)
180
- ) # janky but I want to make sure it's serializable & deserializable
181
-
182
- def shuffle(self, seed=None) -> Dataset:
183
- """Return a new dataset with the observations shuffled.
184
-
185
- >>> d = Dataset([{'a.b':[1,2,3,4]}])
186
- >>> d.shuffle(seed=0)
187
- Dataset([{'a.b': [3, 1, 2, 4]}])
188
- """
189
- if seed is not None:
190
- random.seed(seed)
191
-
192
- indices = None
193
-
194
- for entry in self:
195
- key, values = list(entry.items())[0]
196
- if indices is None:
197
- indices = list(range(len(values)))
198
- random.shuffle(indices)
199
- entry[key] = [values[i] for i in indices]
200
-
201
- return self
202
-
203
- def expand(self, field):
204
- return self.to_scenario_list().expand(field).to_dataset()
205
-
206
- def sample(
207
- self,
208
- n: int = None,
209
- frac: float = None,
210
- with_replacement: bool = True,
211
- seed: Union[str, int, float] = None,
212
- ) -> Dataset:
213
- """Return a new dataset with a sample of the observations.
214
-
215
- :param n: The number of samples to take.
216
- :param frac: The fraction of samples to take.
217
- :param with_replacement: Whether to sample with replacement.
218
- :param seed: The seed for the random number generator.
219
-
220
- >>> d = Dataset([{'a.b':[1,2,3,4]}])
221
- >>> d.sample(n=2, seed=0, with_replacement=True)
222
- Dataset([{'a.b': [4, 4]}])
223
-
224
- >>> d.sample(n = 10, seed=0, with_replacement=False)
225
- Traceback (most recent call last):
226
- ...
227
- ValueError: Sample size cannot be greater than the number of available elements when sampling without replacement.
228
- """
229
- if seed is not None:
230
- random.seed(seed)
231
-
232
- # Validate the input for sampling parameters
233
- if n is None and frac is None:
234
- raise ValueError("Either 'n' or 'frac' must be provided for sampling.")
235
-
236
- if n is not None and frac is not None:
237
- raise ValueError("Only one of 'n' or 'frac' should be specified.")
238
-
239
- # Get the length of the lists from the first entry
240
- first_key, first_values = list(self[0].items())[0]
241
- total_length = len(first_values)
242
-
243
- # Determine the number of samples based on 'n' or 'frac'
244
- if n is None:
245
- n = int(total_length * frac)
246
-
247
- if not with_replacement and n > total_length:
248
- raise ValueError(
249
- "Sample size cannot be greater than the number of available elements when sampling without replacement."
250
- )
251
-
252
- # Sample indices based on the method chosen
253
- if with_replacement:
254
- indices = [random.randint(0, total_length - 1) for _ in range(n)]
255
- else:
256
- indices = random.sample(range(total_length), k=n)
257
-
258
- # Apply the same indices to all entries
259
- for entry in self:
260
- key, values = list(entry.items())[0]
261
- entry[key] = [values[i] for i in indices]
262
-
263
- return self
264
-
265
- def order_by(self, sort_key: str, reverse: bool = False) -> Dataset:
266
- """Return a new dataset with the observations sorted by the given key.
267
-
268
- :param sort_key: The key to sort the observations by.
269
- :param reverse: Whether to sort in reverse order.
270
-
271
- >>> d = Dataset([{'a':[1,2,3,4]}, {'b':[4,3,2,1]}])
272
- >>> d.order_by('a')
273
- Dataset([{'a': [1, 2, 3, 4]}, {'b': [4, 3, 2, 1]}])
274
-
275
- >>> d.order_by('a', reverse=True)
276
- Dataset([{'a': [4, 3, 2, 1]}, {'b': [1, 2, 3, 4]}])
277
-
278
- >>> d = Dataset([{'X.a':[1,2,3,4]}, {'X.b':[4,3,2,1]}])
279
- >>> d.order_by('a')
280
- Dataset([{'X.a': [1, 2, 3, 4]}, {'X.b': [4, 3, 2, 1]}])
281
-
282
-
283
- """
284
-
285
- def sort_indices(lst: list[Any]) -> list[int]:
286
- """
287
- Return the indices that would sort the list.
288
-
289
- :param lst: The list to be sorted.
290
- :return: A list of indices that would sort the list.
291
- """
292
- indices = np.argsort(lst).tolist()
293
- if reverse:
294
- indices.reverse()
295
- return indices
296
-
297
- number_found = 0
298
- for obs in self.data:
299
- key, values = list(obs.items())[0]
300
- # an obseration is {'a':[1,2,3,4]}
301
- # key = list(obs.keys())[0]
302
- if (
303
- sort_key == key or sort_key == key.split(".")[-1]
304
- ): # e.g., "age" in "scenario.age"
305
- relevant_values = values
306
- number_found += 1
307
-
308
- if number_found == 0:
309
- raise ValueError(f"Key '{sort_key}' not found in any of the dictionaries.")
310
- elif number_found > 1:
311
- raise ValueError(f"Key '{sort_key}' found in more than one dictionary.")
312
-
313
- # relevant_values = self._key_to_value(sort_key)
314
- sort_indices_list = sort_indices(relevant_values)
315
- new_data = []
316
- for observation in self.data:
317
- # print(observation)
318
- key, values = list(observation.items())[0]
319
- new_values = [values[i] for i in sort_indices_list]
320
- new_data.append({key: new_values})
321
-
322
- return Dataset(new_data)
323
-
324
- def tree(self, node_order: Optional[list[str]] = None) -> Tree:
325
- """Return a tree representation of the dataset.
326
-
327
- >>> d = Dataset([{'a':[1,2,3,4]}, {'b':[4,3,2,1]}])
328
- >>> d.tree()
329
- Tree(Dataset({'a': [1, 2, 3, 4], 'b': [4, 3, 2, 1]}))
330
- """
331
- return Tree(self, node_order=node_order)
332
-
333
- def table(
334
- self,
335
- *fields,
336
- tablefmt: Optional[str] = None,
337
- max_rows: Optional[int] = None,
338
- pretty_labels=None,
339
- print_parameters: Optional[dict] = None,
340
- ):
341
- if pretty_labels is not None:
342
- new_fields = []
343
- for field in fields:
344
- new_fields.append(pretty_labels.get(field, field))
345
- return self.rename(pretty_labels).table(
346
- *new_fields, tablefmt=tablefmt, max_rows=max_rows
347
- )
348
-
349
- self.print_parameters = print_parameters
350
-
351
- headers, data = self._tabular()
352
-
353
- if tablefmt is not None:
354
- from tabulate import tabulate_formats
355
-
356
- if tablefmt not in tabulate_formats:
357
- print(
358
- f"Error: The following table format is not supported: {tablefmt}",
359
- file=sys.stderr,
360
- )
361
- print(f"\nAvailable formats are: {tabulate_formats}", file=sys.stderr)
362
- return None
363
-
364
- if max_rows:
365
- if len(data) < max_rows:
366
- max_rows = None
367
-
368
- if fields:
369
- full_data = data
370
- data = []
371
- indices = []
372
- for field in fields:
373
- if field not in headers:
374
- print(
375
- f"Error: The following field was not found: {field}",
376
- file=sys.stderr,
377
- )
378
- print(f"\nAvailable fields are: {headers}", file=sys.stderr)
379
-
380
- # Optional: Suggest similar fields using difflib
381
- import difflib
382
-
383
- matches = difflib.get_close_matches(field, headers)
384
- if matches:
385
- print(f"\nDid you mean: {matches[0]} ?", file=sys.stderr)
386
- return None
387
- indices.append(headers.index(field))
388
- headers = fields
389
- for row in full_data:
390
- data.append([row[i] for i in indices])
391
-
392
- if max_rows is not None:
393
- if max_rows > len(data):
394
- raise ValueError(
395
- "max_rows cannot be greater than the number of rows in the dataset."
396
- )
397
- last_line = data[-1]
398
- spaces = len(data[max_rows])
399
- filler_line = ["." for i in range(spaces)]
400
- data = data[:max_rows]
401
- data.append(filler_line)
402
- data.append(last_line)
403
-
404
- return TableDisplay(
405
- data=data, headers=headers, tablefmt=tablefmt, raw_data_set=self
406
- )
407
-
408
- def summary(self):
409
- return Dataset([{"num_observations": [len(self)], "keys": [self.keys()]}])
410
-
411
- @classmethod
412
- def example(self):
413
- """Return an example dataset.
414
-
415
- >>> Dataset.example()
416
- Dataset([{'a': [1, 2, 3, 4]}, {'b': [4, 3, 2, 1]}])
417
- """
418
- return Dataset([{"a": [1, 2, 3, 4]}, {"b": [4, 3, 2, 1]}])
419
-
420
-
421
- if __name__ == "__main__":
422
- import doctest
423
-
424
- doctest.testmod(optionflags=doctest.ELLIPSIS)
1
+ """A module to represent a dataset of observations."""
2
+
3
+ from __future__ import annotations
4
+ import sys
5
+ import json
6
+ import random
7
+ from collections import UserList
8
+ from typing import Any, Union, Optional
9
+
10
+ from edsl.results.ResultsExportMixin import ResultsExportMixin
11
+ from edsl.results.DatasetTree import Tree
12
+ from edsl.results.TableDisplay import TableDisplay
13
+ from edsl.Base import PersistenceMixin, HashingMixin
14
+
15
+
16
+ from edsl.results.smart_objects import FirstObject
17
+
18
+
19
+ class Dataset(UserList, ResultsExportMixin, PersistenceMixin, HashingMixin):
20
+ """A class to represent a dataset of observations."""
21
+
22
+ def __init__(
23
+ self, data: list[dict[str, Any]] = None, print_parameters: Optional[dict] = None
24
+ ):
25
+ """Initialize the dataset with the given data."""
26
+ super().__init__(data)
27
+ self.print_parameters = print_parameters
28
+
29
+ def __len__(self) -> int:
30
+ """Return the number of observations in the dataset.
31
+
32
+ Need to override the __len__ method to return the number of observations in the dataset because
33
+ otherwise, the UserList class would return the number of dictionaries in the dataset.
34
+
35
+ >>> d = Dataset([{'a.b':[1,2,3,4]}])
36
+ >>> len(d)
37
+ 4
38
+ """
39
+ _, values = list(self.data[0].items())[0]
40
+ return len(values)
41
+
42
+ def tail(self, n: int = 5) -> Dataset:
43
+ """Return the last n observations in the dataset.
44
+
45
+ >>> d = Dataset([{'a.b':[1,2,3,4]}])
46
+ >>> d.tail(2)
47
+ Dataset([{'a.b': [3, 4]}])
48
+ """
49
+ new_data = []
50
+ for observation in self.data:
51
+ key, values = list(observation.items())[0]
52
+ new_data.append({key: values[-n:]})
53
+ return Dataset(new_data)
54
+
55
+ def head(self, n: int = 5) -> Dataset:
56
+ """Return the first n observations in the dataset.
57
+
58
+ >>> d = Dataset([{'a.b':[1,2,3,4]}])
59
+ >>> d.head(2)
60
+ Dataset([{'a.b': [1, 2]}])
61
+ """
62
+ new_data = []
63
+ for observation in self.data:
64
+ key, values = list(observation.items())[0]
65
+ new_data.append({key: values[:n]})
66
+ return Dataset(new_data)
67
+
68
+ def expand(self, field):
69
+ return self.to_scenario_list().expand(field)
70
+
71
+ def view(self):
72
+ from perspective.widget import PerspectiveWidget
73
+
74
+ w = PerspectiveWidget(
75
+ self.to_pandas(),
76
+ plugin="Datagrid",
77
+ aggregates={"datetime": "any"},
78
+ sort=[["date", "desc"]],
79
+ )
80
+ return w
81
+
82
+ def keys(self) -> list[str]:
83
+ """Return the keys of the first observation in the dataset.
84
+
85
+ >>> d = Dataset([{'a.b':[1,2,3,4]}])
86
+ >>> d.keys()
87
+ ['a.b']
88
+ """
89
+ return [list(o.keys())[0] for o in self]
90
+
91
+ def filter(self, expression):
92
+ return self.to_scenario_list().filter(expression).to_dataset()
93
+
94
+ def long(self, exclude_fields: list[str] = None) -> Dataset:
95
+ headers, data = self._tabular()
96
+ exclude_fields = exclude_fields or []
97
+
98
+ # Initialize result dictionaries for each column
99
+ result_dict = {}
100
+
101
+ for index, row in enumerate(data):
102
+ row_values = dict(zip(headers, row))
103
+ excluded_values = {field: row_values[field] for field in exclude_fields}
104
+
105
+ # Transform non-excluded fields to long format
106
+ for header, value in row_values.items():
107
+ if header not in exclude_fields:
108
+ # Initialize lists in result_dict if needed
109
+ if not result_dict:
110
+ result_dict = {
111
+ "row": [],
112
+ "key": [],
113
+ "value": [],
114
+ **{field: [] for field in exclude_fields},
115
+ }
116
+
117
+ # Add values to each column
118
+ result_dict["row"].append(index)
119
+ result_dict["key"].append(header)
120
+ result_dict["value"].append(value)
121
+ for field in exclude_fields:
122
+ result_dict[field].append(excluded_values[field])
123
+
124
+ return Dataset([{k: v} for k, v in result_dict.items()])
125
+
126
+ def wide(self) -> "Dataset":
127
+ """
128
+ Convert a long-format dataset (with row, key, value columns) to wide format.
129
+
130
+ Expected input format:
131
+ - A dataset with three columns containing dictionaries:
132
+ - row: list of row indices
133
+ - key: list of column names
134
+ - value: list of values
135
+
136
+ Returns:
137
+ - Dataset: A new dataset with columns corresponding to unique keys
138
+ """
139
+ # Extract the component arrays
140
+ row_dict = next(col for col in self if "row" in col)
141
+ key_dict = next(col for col in self if "key" in col)
142
+ value_dict = next(col for col in self if "value" in col)
143
+
144
+ rows = row_dict["row"]
145
+ keys = key_dict["key"]
146
+ values = value_dict["value"]
147
+
148
+ if not (len(rows) == len(keys) == len(values)):
149
+ raise ValueError("All input arrays must have the same length")
150
+
151
+ # Get unique keys and row indices
152
+ unique_keys = sorted(set(keys))
153
+ unique_rows = sorted(set(rows))
154
+
155
+ # Create a dictionary to store the result
156
+ result = {key: [None] * len(unique_rows) for key in unique_keys}
157
+
158
+ # Populate the result dictionary
159
+ for row_idx, key, value in zip(rows, keys, values):
160
+ # Find the position in the output array for this row
161
+ output_row_idx = unique_rows.index(row_idx)
162
+ result[key][output_row_idx] = value
163
+
164
+ # Convert to list of column dictionaries format
165
+ return Dataset([{key: values} for key, values in result.items()])
166
+
167
+ def __repr__(self) -> str:
168
+ """Return a string representation of the dataset."""
169
+ return f"Dataset({self.data})"
170
+
171
+ def write(self, filename: str, tablefmt: Optional[str] = None) -> None:
172
+ return self.table(tablefmt=tablefmt).write(filename)
173
+
174
+ def _repr_html_(self):
175
+ # headers, data = self._tabular()
176
+ return self.table(print_parameters=self.print_parameters)._repr_html_()
177
+ # return TableDisplay(headers=headers, data=data, raw_data_set=self)
178
+
179
+ def _tabular(self) -> tuple[list[str], list[list[Any]]]:
180
+ # Extract headers
181
+ headers = []
182
+ for entry in self.data:
183
+ headers.extend(entry.keys())
184
+ headers = list(dict.fromkeys(headers)) # Ensure unique headers
185
+
186
+ # Extract data
187
+ max_len = max(len(values) for entry in self.data for values in entry.values())
188
+ rows = []
189
+ for i in range(max_len):
190
+ row = []
191
+ for header in headers:
192
+ for entry in self.data:
193
+ if header in entry:
194
+ values = entry[header]
195
+ row.append(values[i] if i < len(values) else None)
196
+ break
197
+ else:
198
+ row.append(None) # Default to None if header is missing
199
+ rows.append(row)
200
+
201
+ return headers, rows
202
+
203
+ def _key_to_value(self, key: str) -> Any:
204
+ """Retrieve the value associated with the given key from the dataset.
205
+
206
+ >>> d = Dataset([{'a.b':[1,2,3,4]}])
207
+ >>> d._key_to_value('a.b')
208
+ [1, 2, 3, 4]
209
+
210
+ >>> d._key_to_value('a')
211
+ Traceback (most recent call last):
212
+ ...
213
+ KeyError: "Key 'a' not found in any of the dictionaries."
214
+
215
+ """
216
+ potential_matches = []
217
+ for data_dict in self.data:
218
+ data_key, data_values = list(data_dict.items())[0]
219
+ if key == data_key:
220
+ return data_values
221
+ if key == data_key.split(".")[-1]:
222
+ potential_matches.append((data_key, data_values))
223
+
224
+ if len(potential_matches) == 1:
225
+ return potential_matches[0][1]
226
+ elif len(potential_matches) > 1:
227
+ raise KeyError(
228
+ f"Key '{key}' found in more than one location: {[m[0] for m in potential_matches]}"
229
+ )
230
+
231
+ raise KeyError(f"Key '{key}' not found in any of the dictionaries.")
232
+
233
+ def first(self) -> dict[str, Any]:
234
+ """Get the first value of the first key in the first dictionary.
235
+
236
+ >>> d = Dataset([{'a.b':[1,2,3,4]}])
237
+ >>> d.first()
238
+ 1
239
+ """
240
+
241
+ def get_values(d):
242
+ """Get the values of the first key in the dictionary."""
243
+ return list(d.values())[0]
244
+
245
+ return FirstObject(get_values(self.data[0])[0])
246
+
247
+ def latex(self, **kwargs):
248
+ return self.table().latex()
249
+
250
+ def remove_prefix(self) -> Dataset:
251
+ new_data = []
252
+ for observation in self.data:
253
+ key, values = list(observation.items())[0]
254
+ if "." in key:
255
+ new_key = key.split(".")[1]
256
+ new_data.append({new_key: values})
257
+ else:
258
+ new_data.append({key: values})
259
+ return Dataset(new_data)
260
+
261
+ def print(self, pretty_labels=None, **kwargs):
262
+ if "format" in kwargs:
263
+ if kwargs["format"] not in ["html", "markdown", "rich", "latex"]:
264
+ raise ValueError(f"Format '{kwargs['format']}' not supported.")
265
+ if pretty_labels is None:
266
+ pretty_labels = {}
267
+ else:
268
+ return self.rename(pretty_labels).print(**kwargs)
269
+ return self.table()
270
+
271
+ def rename(self, rename_dic) -> Dataset:
272
+ new_data = []
273
+ for observation in self.data:
274
+ key, values = list(observation.items())[0]
275
+ new_key = rename_dic.get(key, key)
276
+ new_data.append({new_key: values})
277
+ return Dataset(new_data)
278
+
279
+ def merge(self, other: Dataset, by_x, by_y) -> Dataset:
280
+ """Merge the dataset with another dataset on the given keys.""
281
+
282
+ merged_df = df1.merge(df2, how="left", on=["key1", "key2"])
283
+ """
284
+ df1 = self.to_pandas()
285
+ df2 = other.to_pandas()
286
+ merged_df = df1.merge(df2, how="left", left_on=by_x, right_on=by_y)
287
+ return Dataset.from_pandas_dataframe(merged_df)
288
+
289
+ def to(self, survey_or_question: Union["Survey", "QuestionBase"]) -> "Jobs":
290
+ from edsl.surveys.Survey import Survey
291
+ from edsl.questions.QuestionBase import QuestionBase
292
+
293
+ if isinstance(survey_or_question, Survey):
294
+ return survey_or_question.by(self.to_scenario_list())
295
+ elif isinstance(survey_or_question, QuestionBase):
296
+ return Survey([survey_or_question]).by(self.to_scenario_list())
297
+
298
+ def select(self, *keys) -> Dataset:
299
+ """Return a new dataset with only the selected keys.
300
+
301
+ :param keys: The keys to select.
302
+
303
+ >>> d = Dataset([{'a.b':[1,2,3,4]}, {'c.d':[5,6,7,8]}])
304
+ >>> d.select('a.b')
305
+ Dataset([{'a.b': [1, 2, 3, 4]}])
306
+
307
+ >>> d.select('a.b', 'c.d')
308
+ Dataset([{'a.b': [1, 2, 3, 4]}, {'c.d': [5, 6, 7, 8]}])
309
+ """
310
+ if isinstance(keys, str):
311
+ keys = [keys]
312
+
313
+ new_data = []
314
+ for observation in self.data:
315
+ observation_key = list(observation.keys())[0]
316
+ if observation_key in keys:
317
+ new_data.append(observation)
318
+ return Dataset(new_data)
319
+
320
+ def to_json(self):
321
+ """Return a JSON representation of the dataset.
322
+
323
+ >>> d = Dataset([{'a.b':[1,2,3,4]}])
324
+ >>> d.to_json()
325
+ [{'a.b': [1, 2, 3, 4]}]
326
+ """
327
+ return json.loads(
328
+ json.dumps(self.data)
329
+ ) # janky but I want to make sure it's serializable & deserializable
330
+
331
+ def shuffle(self, seed=None) -> Dataset:
332
+ """Return a new dataset with the observations shuffled.
333
+
334
+ >>> d = Dataset([{'a.b':[1,2,3,4]}])
335
+ >>> d.shuffle(seed=0)
336
+ Dataset([{'a.b': [3, 1, 2, 4]}])
337
+ """
338
+ if seed is not None:
339
+ random.seed(seed)
340
+
341
+ indices = None
342
+
343
+ for entry in self:
344
+ key, values = list(entry.items())[0]
345
+ if indices is None:
346
+ indices = list(range(len(values)))
347
+ random.shuffle(indices)
348
+ entry[key] = [values[i] for i in indices]
349
+
350
+ return self
351
+
352
+ def expand(self, field):
353
+ return self.to_scenario_list().expand(field).to_dataset()
354
+
355
+ def sample(
356
+ self,
357
+ n: int = None,
358
+ frac: float = None,
359
+ with_replacement: bool = True,
360
+ seed: Union[str, int, float] = None,
361
+ ) -> Dataset:
362
+ """Return a new dataset with a sample of the observations.
363
+
364
+ :param n: The number of samples to take.
365
+ :param frac: The fraction of samples to take.
366
+ :param with_replacement: Whether to sample with replacement.
367
+ :param seed: The seed for the random number generator.
368
+
369
+ >>> d = Dataset([{'a.b':[1,2,3,4]}])
370
+ >>> d.sample(n=2, seed=0, with_replacement=True)
371
+ Dataset([{'a.b': [4, 4]}])
372
+
373
+ >>> d.sample(n = 10, seed=0, with_replacement=False)
374
+ Traceback (most recent call last):
375
+ ...
376
+ ValueError: Sample size cannot be greater than the number of available elements when sampling without replacement.
377
+ """
378
+ if seed is not None:
379
+ random.seed(seed)
380
+
381
+ # Validate the input for sampling parameters
382
+ if n is None and frac is None:
383
+ raise ValueError("Either 'n' or 'frac' must be provided for sampling.")
384
+
385
+ if n is not None and frac is not None:
386
+ raise ValueError("Only one of 'n' or 'frac' should be specified.")
387
+
388
+ # Get the length of the lists from the first entry
389
+ first_key, first_values = list(self[0].items())[0]
390
+ total_length = len(first_values)
391
+
392
+ # Determine the number of samples based on 'n' or 'frac'
393
+ if n is None:
394
+ n = int(total_length * frac)
395
+
396
+ if not with_replacement and n > total_length:
397
+ raise ValueError(
398
+ "Sample size cannot be greater than the number of available elements when sampling without replacement."
399
+ )
400
+
401
+ # Sample indices based on the method chosen
402
+ if with_replacement:
403
+ indices = [random.randint(0, total_length - 1) for _ in range(n)]
404
+ else:
405
+ indices = random.sample(range(total_length), k=n)
406
+
407
+ # Apply the same indices to all entries
408
+ for entry in self:
409
+ key, values = list(entry.items())[0]
410
+ entry[key] = [values[i] for i in indices]
411
+
412
+ return self
413
+
414
+ def order_by(self, sort_key: str, reverse: bool = False) -> Dataset:
415
+ """Return a new dataset with the observations sorted by the given key.
416
+
417
+ :param sort_key: The key to sort the observations by.
418
+ :param reverse: Whether to sort in reverse order.
419
+
420
+ >>> d = Dataset([{'a':[1,2,3,4]}, {'b':[4,3,2,1]}])
421
+ >>> d.order_by('a')
422
+ Dataset([{'a': [1, 2, 3, 4]}, {'b': [4, 3, 2, 1]}])
423
+
424
+ >>> d.order_by('a', reverse=True)
425
+ Dataset([{'a': [4, 3, 2, 1]}, {'b': [1, 2, 3, 4]}])
426
+
427
+ >>> d = Dataset([{'X.a':[1,2,3,4]}, {'X.b':[4,3,2,1]}])
428
+ >>> d.order_by('a')
429
+ Dataset([{'X.a': [1, 2, 3, 4]}, {'X.b': [4, 3, 2, 1]}])
430
+
431
+
432
+ """
433
+ import numpy as np
434
+
435
+ def sort_indices(lst: list[Any]) -> list[int]:
436
+ """
437
+ Return the indices that would sort the list.
438
+
439
+ :param lst: The list to be sorted.
440
+ :return: A list of indices that would sort the list.
441
+ """
442
+ indices = np.argsort(lst).tolist()
443
+ if reverse:
444
+ indices.reverse()
445
+ return indices
446
+
447
+ number_found = 0
448
+ for obs in self.data:
449
+ key, values = list(obs.items())[0]
450
+ # an obseration is {'a':[1,2,3,4]}
451
+ # key = list(obs.keys())[0]
452
+ if (
453
+ sort_key == key or sort_key == key.split(".")[-1]
454
+ ): # e.g., "age" in "scenario.age"
455
+ relevant_values = values
456
+ number_found += 1
457
+
458
+ if number_found == 0:
459
+ raise ValueError(f"Key '{sort_key}' not found in any of the dictionaries.")
460
+ elif number_found > 1:
461
+ raise ValueError(f"Key '{sort_key}' found in more than one dictionary.")
462
+
463
+ # relevant_values = self._key_to_value(sort_key)
464
+ sort_indices_list = sort_indices(relevant_values)
465
+ new_data = []
466
+ for observation in self.data:
467
+ # print(observation)
468
+ key, values = list(observation.items())[0]
469
+ new_values = [values[i] for i in sort_indices_list]
470
+ new_data.append({key: new_values})
471
+
472
+ return Dataset(new_data)
473
+
474
+ def tree(self, node_order: Optional[list[str]] = None) -> Tree:
475
+ """Return a tree representation of the dataset.
476
+
477
+ >>> d = Dataset([{'a':[1,2,3,4]}, {'b':[4,3,2,1]}])
478
+ >>> d.tree()
479
+ Tree(Dataset({'a': [1, 2, 3, 4], 'b': [4, 3, 2, 1]}))
480
+ """
481
+ return Tree(self, node_order=node_order)
482
+
483
+ def table(
484
+ self,
485
+ *fields,
486
+ tablefmt: Optional[str] = None,
487
+ max_rows: Optional[int] = None,
488
+ pretty_labels=None,
489
+ print_parameters: Optional[dict] = None,
490
+ ):
491
+ if pretty_labels is not None:
492
+ new_fields = []
493
+ for field in fields:
494
+ new_fields.append(pretty_labels.get(field, field))
495
+ return self.rename(pretty_labels).table(
496
+ *new_fields, tablefmt=tablefmt, max_rows=max_rows
497
+ )
498
+
499
+ self.print_parameters = print_parameters
500
+
501
+ headers, data = self._tabular()
502
+
503
+ if tablefmt is not None:
504
+ from tabulate import tabulate_formats
505
+
506
+ if tablefmt not in tabulate_formats:
507
+ print(
508
+ f"Error: The following table format is not supported: {tablefmt}",
509
+ file=sys.stderr,
510
+ )
511
+ print(f"\nAvailable formats are: {tabulate_formats}", file=sys.stderr)
512
+ return None
513
+
514
+ if max_rows:
515
+ if len(data) < max_rows:
516
+ max_rows = None
517
+
518
+ if fields:
519
+ full_data = data
520
+ data = []
521
+ indices = []
522
+ for field in fields:
523
+ if field not in headers:
524
+ print(
525
+ f"Error: The following field was not found: {field}",
526
+ file=sys.stderr,
527
+ )
528
+ print(f"\nAvailable fields are: {headers}", file=sys.stderr)
529
+
530
+ # Optional: Suggest similar fields using difflib
531
+ import difflib
532
+
533
+ matches = difflib.get_close_matches(field, headers)
534
+ if matches:
535
+ print(f"\nDid you mean: {matches[0]} ?", file=sys.stderr)
536
+ return None
537
+ indices.append(headers.index(field))
538
+ headers = fields
539
+ for row in full_data:
540
+ data.append([row[i] for i in indices])
541
+
542
+ if max_rows is not None:
543
+ if max_rows > len(data):
544
+ raise ValueError(
545
+ "max_rows cannot be greater than the number of rows in the dataset."
546
+ )
547
+ last_line = data[-1]
548
+ spaces = len(data[max_rows])
549
+ filler_line = ["." for i in range(spaces)]
550
+ data = data[:max_rows]
551
+ data.append(filler_line)
552
+ data.append(last_line)
553
+
554
+ return TableDisplay(
555
+ data=data, headers=headers, tablefmt=tablefmt, raw_data_set=self
556
+ )
557
+
558
+ def summary(self):
559
+ return Dataset([{"num_observations": [len(self)], "keys": [self.keys()]}])
560
+
561
+ @classmethod
562
+ def example(self, n: int = None):
563
+ """Return an example dataset.
564
+
565
+ >>> Dataset.example()
566
+ Dataset([{'a': [1, 2, 3, 4]}, {'b': [4, 3, 2, 1]}])
567
+ """
568
+ if n is None:
569
+ return Dataset([{"a": [1, 2, 3, 4]}, {"b": [4, 3, 2, 1]}])
570
+ else:
571
+ return Dataset([{"a": [1] * n}, {"b": [2] * n}])
572
+
573
+ @classmethod
574
+ def from_edsl_object(cls, object):
575
+ d = object.to_dict(add_edsl_version=False)
576
+ return cls([{"key": list(d.keys())}, {"value": list(d.values())}])
577
+
578
+ @classmethod
579
+ def from_pandas_dataframe(cls, df):
580
+ result = cls([{col: df[col].tolist()} for col in df.columns])
581
+ return result
582
+
583
+
584
+ if __name__ == "__main__":
585
+ import doctest
586
+
587
+ doctest.testmod(optionflags=doctest.ELLIPSIS)