lionagi 0.5.4__py3-none-any.whl → 0.6.0__py3-none-any.whl

Sign up to get free protection for your applications and to get access to all the features.
Files changed (428) hide show
  1. lionagi/__init__.py +16 -24
  2. lionagi/{core/_class_registry.py → _class_registry.py} +51 -10
  3. lionagi/_errors.py +35 -0
  4. lionagi/libs/__init__.py +3 -0
  5. lionagi/libs/compress/__init__.py +3 -0
  6. lionagi/libs/compress/models.py +6 -2
  7. lionagi/libs/compress/utils.py +4 -16
  8. lionagi/libs/file/__init__.py +3 -0
  9. lionagi/libs/file/chunk.py +4 -0
  10. lionagi/libs/file/file_ops.py +4 -0
  11. lionagi/libs/file/params.py +4 -41
  12. lionagi/libs/file/process.py +4 -0
  13. lionagi/libs/file/save.py +5 -1
  14. lionagi/libs/{parse/flatten → nested}/flatten.py +4 -0
  15. lionagi/libs/{parse/nested → nested}/nfilter.py +4 -0
  16. lionagi/libs/{parse/nested → nested}/nget.py +6 -1
  17. lionagi/libs/{parse/nested → nested}/ninsert.py +5 -1
  18. lionagi/libs/{parse/nested → nested}/nmerge.py +4 -0
  19. lionagi/libs/{parse/nested → nested}/npop.py +5 -2
  20. lionagi/libs/{parse/nested → nested}/nset.py +6 -1
  21. lionagi/libs/{parse/flatten → nested}/unflatten.py +4 -0
  22. lionagi/libs/{parse/nested → nested}/utils.py +5 -1
  23. lionagi/libs/package/__init__.py +3 -0
  24. lionagi/libs/package/imports.py +6 -2
  25. lionagi/libs/package/management.py +7 -3
  26. lionagi/libs/package/params.py +4 -0
  27. lionagi/libs/package/system.py +4 -0
  28. lionagi/libs/parse.py +30 -0
  29. lionagi/libs/{parse/json → schema}/as_readable.py +10 -4
  30. lionagi/libs/{parse/string_parse/code_block.py → schema/extract_code_block.py} +4 -0
  31. lionagi/libs/{parse/string_parse/docstring.py → schema/extract_docstring.py} +4 -0
  32. lionagi/libs/{parse/string_parse/function_.py → schema/function_to_schema.py} +21 -9
  33. lionagi/libs/{parse/json/schema.py → schema/json_schema.py} +5 -1
  34. lionagi/libs/validate/common_field_validators.py +170 -0
  35. lionagi/libs/{parse/validate/keys.py → validate/fuzzy_match_keys.py} +42 -8
  36. lionagi/libs/{parse/validate/mapping.py → validate/fuzzy_validate_mapping.py} +41 -6
  37. lionagi/libs/{string_similarity/algorithms.py → validate/string_similarity.py} +115 -1
  38. lionagi/libs/{parse/validate/boolean.py → validate/validate_boolean.py} +42 -3
  39. lionagi/operations/__init__.py +13 -3
  40. lionagi/operations/brainstorm/__init__.py +3 -3
  41. lionagi/operations/brainstorm/brainstorm.py +33 -19
  42. lionagi/operations/brainstorm/prompt.py +4 -0
  43. lionagi/operations/plan/__init__.py +4 -0
  44. lionagi/operations/plan/plan.py +16 -13
  45. lionagi/operations/plan/prompt.py +4 -0
  46. lionagi/operations/select/__init__.py +4 -0
  47. lionagi/operations/select/prompt.py +4 -0
  48. lionagi/operations/select/select.py +1 -1
  49. lionagi/operations/select/utils.py +4 -4
  50. lionagi/{strategies → operations/strategies}/base.py +6 -2
  51. lionagi/{strategies → operations/strategies}/concurrent.py +8 -5
  52. lionagi/{strategies → operations/strategies}/concurrent_chunk.py +6 -3
  53. lionagi/{strategies → operations/strategies}/concurrent_sequential_chunk.py +8 -4
  54. lionagi/{strategies → operations/strategies}/params.py +10 -6
  55. lionagi/{strategies → operations/strategies}/sequential.py +6 -2
  56. lionagi/{strategies → operations/strategies}/sequential_chunk.py +7 -3
  57. lionagi/{strategies → operations/strategies}/sequential_concurrent_chunk.py +9 -4
  58. lionagi/{strategies → operations/strategies}/utils.py +6 -3
  59. lionagi/{core/models/__init__.py → operations/types.py} +3 -1
  60. lionagi/operations/utils.py +6 -3
  61. lionagi/operatives/action/function_calling.py +136 -0
  62. lionagi/operatives/action/manager.py +239 -0
  63. lionagi/operatives/action/request_response_model.py +90 -0
  64. lionagi/operatives/action/tool.py +141 -0
  65. lionagi/{protocols/operatives/action.py → operatives/action/utils.py} +52 -90
  66. lionagi/{core → operatives}/forms/base.py +9 -4
  67. lionagi/{core → operatives}/forms/form.py +8 -13
  68. lionagi/{core → operatives}/forms/report.py +5 -3
  69. lionagi/operatives/instruct/base.py +79 -0
  70. lionagi/operatives/instruct/instruct.py +105 -0
  71. lionagi/operatives/instruct/instruct_collection.py +52 -0
  72. lionagi/operatives/instruct/node.py +13 -0
  73. lionagi/{protocols/operatives → operatives/instruct}/prompts.py +0 -34
  74. lionagi/{protocols/operatives → operatives/instruct}/reason.py +14 -7
  75. lionagi/{integrations/anthropic_/version.py → operatives/manager.py} +5 -1
  76. lionagi/operatives/models/field_model.py +194 -0
  77. lionagi/operatives/models/model_params.py +307 -0
  78. lionagi/{core → operatives}/models/note.py +20 -28
  79. lionagi/{core → operatives}/models/operable_model.py +153 -71
  80. lionagi/{core → operatives}/models/schema_model.py +4 -3
  81. lionagi/{protocols/operatives → operatives}/operative.py +10 -7
  82. lionagi/{protocols/operatives → operatives}/step.py +67 -26
  83. lionagi/operatives/types.py +69 -0
  84. lionagi/protocols/_adapter.py +224 -0
  85. lionagi/protocols/_concepts.py +94 -0
  86. lionagi/protocols/generic/element.py +460 -0
  87. lionagi/protocols/generic/event.py +177 -0
  88. lionagi/protocols/generic/log.py +237 -0
  89. lionagi/{core → protocols}/generic/pile.py +172 -131
  90. lionagi/protocols/generic/processor.py +316 -0
  91. lionagi/protocols/generic/progression.py +500 -0
  92. lionagi/protocols/graph/edge.py +166 -0
  93. lionagi/protocols/graph/graph.py +290 -0
  94. lionagi/protocols/graph/node.py +109 -0
  95. lionagi/protocols/mail/exchange.py +116 -0
  96. lionagi/protocols/mail/mail.py +25 -0
  97. lionagi/protocols/mail/mailbox.py +47 -0
  98. lionagi/protocols/mail/manager.py +168 -0
  99. lionagi/protocols/mail/package.py +55 -0
  100. lionagi/protocols/messages/action_request.py +165 -0
  101. lionagi/protocols/messages/action_response.py +132 -0
  102. lionagi/{core/communication → protocols/messages}/assistant_response.py +65 -79
  103. lionagi/protocols/messages/base.py +73 -0
  104. lionagi/protocols/messages/instruction.py +582 -0
  105. lionagi/protocols/messages/manager.py +429 -0
  106. lionagi/protocols/messages/message.py +216 -0
  107. lionagi/protocols/messages/system.py +115 -0
  108. lionagi/protocols/messages/templates/assistant_response.jinja2 +6 -0
  109. lionagi/{core/communication → protocols/messages}/templates/instruction_message.jinja2 +2 -2
  110. lionagi/protocols/types.py +96 -0
  111. lionagi/service/__init__.py +10 -12
  112. lionagi/service/endpoints/base.py +517 -0
  113. lionagi/service/endpoints/chat_completion.py +102 -0
  114. lionagi/service/endpoints/match_endpoint.py +60 -0
  115. lionagi/service/endpoints/rate_limited_processor.py +145 -0
  116. lionagi/service/endpoints/token_calculator.py +209 -0
  117. lionagi/service/imodel.py +264 -92
  118. lionagi/service/manager.py +45 -0
  119. lionagi/service/providers/anthropic_/messages.py +64 -0
  120. lionagi/service/providers/groq_/chat_completions.py +56 -0
  121. lionagi/service/providers/openai_/chat_completions.py +62 -0
  122. lionagi/service/providers/openrouter_/chat_completions.py +62 -0
  123. lionagi/service/providers/perplexity_/__init__.py +3 -0
  124. lionagi/service/providers/perplexity_/chat_completions.py +40 -0
  125. lionagi/session/__init__.py +3 -0
  126. lionagi/session/branch.py +1287 -0
  127. lionagi/session/session.py +296 -0
  128. lionagi/settings.py +62 -118
  129. lionagi/utils.py +2386 -0
  130. lionagi/version.py +1 -1
  131. {lionagi-0.5.4.dist-info → lionagi-0.6.0.dist-info}/METADATA +7 -6
  132. lionagi-0.6.0.dist-info/RECORD +160 -0
  133. lionagi/core/action/action_manager.py +0 -287
  134. lionagi/core/action/base.py +0 -109
  135. lionagi/core/action/function_calling.py +0 -153
  136. lionagi/core/action/tool.py +0 -202
  137. lionagi/core/action/types.py +0 -16
  138. lionagi/core/communication/action_request.py +0 -163
  139. lionagi/core/communication/action_response.py +0 -149
  140. lionagi/core/communication/base_mail.py +0 -49
  141. lionagi/core/communication/instruction.py +0 -376
  142. lionagi/core/communication/message.py +0 -286
  143. lionagi/core/communication/message_manager.py +0 -530
  144. lionagi/core/communication/system.py +0 -116
  145. lionagi/core/communication/templates/assistant_response.jinja2 +0 -2
  146. lionagi/core/communication/types.py +0 -27
  147. lionagi/core/communication/utils.py +0 -254
  148. lionagi/core/forms/types.py +0 -13
  149. lionagi/core/generic/component.py +0 -422
  150. lionagi/core/generic/edge.py +0 -163
  151. lionagi/core/generic/element.py +0 -199
  152. lionagi/core/generic/graph.py +0 -377
  153. lionagi/core/generic/log.py +0 -151
  154. lionagi/core/generic/log_manager.py +0 -320
  155. lionagi/core/generic/node.py +0 -11
  156. lionagi/core/generic/progression.py +0 -395
  157. lionagi/core/generic/types.py +0 -23
  158. lionagi/core/generic/utils.py +0 -53
  159. lionagi/core/models/base.py +0 -28
  160. lionagi/core/models/field_model.py +0 -145
  161. lionagi/core/models/model_params.py +0 -194
  162. lionagi/core/models/types.py +0 -19
  163. lionagi/core/session/branch.py +0 -130
  164. lionagi/core/session/branch_mixins.py +0 -544
  165. lionagi/core/session/session.py +0 -163
  166. lionagi/core/session/types.py +0 -8
  167. lionagi/core/typing/__init__.py +0 -9
  168. lionagi/core/typing/_concepts.py +0 -173
  169. lionagi/core/typing/_id.py +0 -104
  170. lionagi/core/typing/_pydantic.py +0 -33
  171. lionagi/core/typing/_typing.py +0 -54
  172. lionagi/integrations/__init__.py +0 -0
  173. lionagi/integrations/_services.py +0 -17
  174. lionagi/integrations/anthropic_/AnthropicModel.py +0 -273
  175. lionagi/integrations/anthropic_/AnthropicService.py +0 -117
  176. lionagi/integrations/anthropic_/anthropic_max_output_token_data.yaml +0 -7
  177. lionagi/integrations/anthropic_/anthropic_price_data.yaml +0 -14
  178. lionagi/integrations/anthropic_/api_endpoints/api_request.py +0 -277
  179. lionagi/integrations/anthropic_/api_endpoints/data_models.py +0 -40
  180. lionagi/integrations/anthropic_/api_endpoints/match_response.py +0 -119
  181. lionagi/integrations/anthropic_/api_endpoints/messages/request/message_models.py +0 -14
  182. lionagi/integrations/anthropic_/api_endpoints/messages/request/request_body.py +0 -74
  183. lionagi/integrations/anthropic_/api_endpoints/messages/response/__init__.py +0 -0
  184. lionagi/integrations/anthropic_/api_endpoints/messages/response/content_models.py +0 -32
  185. lionagi/integrations/anthropic_/api_endpoints/messages/response/response_body.py +0 -101
  186. lionagi/integrations/anthropic_/api_endpoints/messages/response/usage_models.py +0 -25
  187. lionagi/integrations/groq_/GroqModel.py +0 -325
  188. lionagi/integrations/groq_/GroqService.py +0 -151
  189. lionagi/integrations/groq_/api_endpoints/__init__.py +0 -0
  190. lionagi/integrations/groq_/api_endpoints/data_models.py +0 -187
  191. lionagi/integrations/groq_/api_endpoints/groq_request.py +0 -288
  192. lionagi/integrations/groq_/api_endpoints/match_response.py +0 -106
  193. lionagi/integrations/groq_/api_endpoints/response_utils.py +0 -105
  194. lionagi/integrations/groq_/groq_max_output_token_data.yaml +0 -21
  195. lionagi/integrations/groq_/groq_price_data.yaml +0 -58
  196. lionagi/integrations/groq_/groq_rate_limits.yaml +0 -105
  197. lionagi/integrations/groq_/version.py +0 -5
  198. lionagi/integrations/litellm_/imodel.py +0 -71
  199. lionagi/integrations/ollama_/OllamaModel.py +0 -244
  200. lionagi/integrations/ollama_/OllamaService.py +0 -142
  201. lionagi/integrations/ollama_/api_endpoints/api_request.py +0 -179
  202. lionagi/integrations/ollama_/api_endpoints/chat_completion/message_models.py +0 -31
  203. lionagi/integrations/ollama_/api_endpoints/chat_completion/request_body.py +0 -46
  204. lionagi/integrations/ollama_/api_endpoints/chat_completion/response_body.py +0 -67
  205. lionagi/integrations/ollama_/api_endpoints/chat_completion/tool_models.py +0 -49
  206. lionagi/integrations/ollama_/api_endpoints/completion/__init__.py +0 -0
  207. lionagi/integrations/ollama_/api_endpoints/completion/request_body.py +0 -72
  208. lionagi/integrations/ollama_/api_endpoints/completion/response_body.py +0 -59
  209. lionagi/integrations/ollama_/api_endpoints/data_models.py +0 -15
  210. lionagi/integrations/ollama_/api_endpoints/embedding/__init__.py +0 -0
  211. lionagi/integrations/ollama_/api_endpoints/embedding/request_body.py +0 -33
  212. lionagi/integrations/ollama_/api_endpoints/embedding/response_body.py +0 -29
  213. lionagi/integrations/ollama_/api_endpoints/match_data_model.py +0 -62
  214. lionagi/integrations/ollama_/api_endpoints/match_response.py +0 -190
  215. lionagi/integrations/ollama_/api_endpoints/model/copy_model.py +0 -13
  216. lionagi/integrations/ollama_/api_endpoints/model/create_model.py +0 -28
  217. lionagi/integrations/ollama_/api_endpoints/model/delete_model.py +0 -11
  218. lionagi/integrations/ollama_/api_endpoints/model/list_model.py +0 -60
  219. lionagi/integrations/ollama_/api_endpoints/model/pull_model.py +0 -34
  220. lionagi/integrations/ollama_/api_endpoints/model/push_model.py +0 -35
  221. lionagi/integrations/ollama_/api_endpoints/model/show_model.py +0 -36
  222. lionagi/integrations/ollama_/api_endpoints/option_models.py +0 -68
  223. lionagi/integrations/openai_/OpenAIModel.py +0 -423
  224. lionagi/integrations/openai_/OpenAIService.py +0 -426
  225. lionagi/integrations/openai_/__init__.py +0 -0
  226. lionagi/integrations/openai_/api_endpoints/__init__.py +0 -3
  227. lionagi/integrations/openai_/api_endpoints/api_request.py +0 -277
  228. lionagi/integrations/openai_/api_endpoints/audio/__init__.py +0 -9
  229. lionagi/integrations/openai_/api_endpoints/audio/speech_models.py +0 -34
  230. lionagi/integrations/openai_/api_endpoints/audio/transcription_models.py +0 -136
  231. lionagi/integrations/openai_/api_endpoints/audio/translation_models.py +0 -41
  232. lionagi/integrations/openai_/api_endpoints/audio/types.py +0 -41
  233. lionagi/integrations/openai_/api_endpoints/batch/__init__.py +0 -17
  234. lionagi/integrations/openai_/api_endpoints/batch/batch_models.py +0 -146
  235. lionagi/integrations/openai_/api_endpoints/batch/cancel_batch.py +0 -7
  236. lionagi/integrations/openai_/api_endpoints/batch/create_batch.py +0 -26
  237. lionagi/integrations/openai_/api_endpoints/batch/list_batch.py +0 -37
  238. lionagi/integrations/openai_/api_endpoints/batch/request_object_models.py +0 -65
  239. lionagi/integrations/openai_/api_endpoints/batch/retrieve_batch.py +0 -7
  240. lionagi/integrations/openai_/api_endpoints/batch/types.py +0 -4
  241. lionagi/integrations/openai_/api_endpoints/chat_completions/__init__.py +0 -1
  242. lionagi/integrations/openai_/api_endpoints/chat_completions/request/__init__.py +0 -39
  243. lionagi/integrations/openai_/api_endpoints/chat_completions/request/message_models.py +0 -121
  244. lionagi/integrations/openai_/api_endpoints/chat_completions/request/request_body.py +0 -221
  245. lionagi/integrations/openai_/api_endpoints/chat_completions/request/response_format.py +0 -71
  246. lionagi/integrations/openai_/api_endpoints/chat_completions/request/stream_options.py +0 -14
  247. lionagi/integrations/openai_/api_endpoints/chat_completions/request/tool_choice_models.py +0 -17
  248. lionagi/integrations/openai_/api_endpoints/chat_completions/request/tool_models.py +0 -54
  249. lionagi/integrations/openai_/api_endpoints/chat_completions/request/types.py +0 -18
  250. lionagi/integrations/openai_/api_endpoints/chat_completions/response/__init__.py +0 -0
  251. lionagi/integrations/openai_/api_endpoints/chat_completions/response/choice_models.py +0 -62
  252. lionagi/integrations/openai_/api_endpoints/chat_completions/response/function_models.py +0 -16
  253. lionagi/integrations/openai_/api_endpoints/chat_completions/response/log_prob_models.py +0 -47
  254. lionagi/integrations/openai_/api_endpoints/chat_completions/response/message_models.py +0 -25
  255. lionagi/integrations/openai_/api_endpoints/chat_completions/response/response_body.py +0 -99
  256. lionagi/integrations/openai_/api_endpoints/chat_completions/response/types.py +0 -8
  257. lionagi/integrations/openai_/api_endpoints/chat_completions/response/usage_models.py +0 -24
  258. lionagi/integrations/openai_/api_endpoints/chat_completions/util.py +0 -46
  259. lionagi/integrations/openai_/api_endpoints/data_models.py +0 -23
  260. lionagi/integrations/openai_/api_endpoints/embeddings/__init__.py +0 -3
  261. lionagi/integrations/openai_/api_endpoints/embeddings/request_body.py +0 -79
  262. lionagi/integrations/openai_/api_endpoints/embeddings/response_body.py +0 -67
  263. lionagi/integrations/openai_/api_endpoints/files/__init__.py +0 -11
  264. lionagi/integrations/openai_/api_endpoints/files/delete_file.py +0 -20
  265. lionagi/integrations/openai_/api_endpoints/files/file_models.py +0 -56
  266. lionagi/integrations/openai_/api_endpoints/files/list_files.py +0 -27
  267. lionagi/integrations/openai_/api_endpoints/files/retrieve_file.py +0 -9
  268. lionagi/integrations/openai_/api_endpoints/files/upload_file.py +0 -38
  269. lionagi/integrations/openai_/api_endpoints/fine_tuning/__init__.py +0 -37
  270. lionagi/integrations/openai_/api_endpoints/fine_tuning/cancel_jobs.py +0 -9
  271. lionagi/integrations/openai_/api_endpoints/fine_tuning/create_jobs.py +0 -133
  272. lionagi/integrations/openai_/api_endpoints/fine_tuning/fine_tuning_job_checkpoint_models.py +0 -58
  273. lionagi/integrations/openai_/api_endpoints/fine_tuning/fine_tuning_job_event_models.py +0 -31
  274. lionagi/integrations/openai_/api_endpoints/fine_tuning/fine_tuning_job_models.py +0 -140
  275. lionagi/integrations/openai_/api_endpoints/fine_tuning/list_fine_tuning_checkpoints.py +0 -51
  276. lionagi/integrations/openai_/api_endpoints/fine_tuning/list_fine_tuning_events.py +0 -42
  277. lionagi/integrations/openai_/api_endpoints/fine_tuning/list_fine_tuning_jobs.py +0 -31
  278. lionagi/integrations/openai_/api_endpoints/fine_tuning/retrieve_jobs.py +0 -9
  279. lionagi/integrations/openai_/api_endpoints/fine_tuning/training_format.py +0 -30
  280. lionagi/integrations/openai_/api_endpoints/images/__init__.py +0 -9
  281. lionagi/integrations/openai_/api_endpoints/images/image_edit_models.py +0 -69
  282. lionagi/integrations/openai_/api_endpoints/images/image_models.py +0 -56
  283. lionagi/integrations/openai_/api_endpoints/images/image_variation_models.py +0 -56
  284. lionagi/integrations/openai_/api_endpoints/images/response_body.py +0 -30
  285. lionagi/integrations/openai_/api_endpoints/match_data_model.py +0 -197
  286. lionagi/integrations/openai_/api_endpoints/match_response.py +0 -336
  287. lionagi/integrations/openai_/api_endpoints/models/__init__.py +0 -7
  288. lionagi/integrations/openai_/api_endpoints/models/delete_fine_tuned_model.py +0 -17
  289. lionagi/integrations/openai_/api_endpoints/models/models_models.py +0 -31
  290. lionagi/integrations/openai_/api_endpoints/models/retrieve_model.py +0 -9
  291. lionagi/integrations/openai_/api_endpoints/moderations/__init__.py +0 -3
  292. lionagi/integrations/openai_/api_endpoints/moderations/request_body.py +0 -20
  293. lionagi/integrations/openai_/api_endpoints/moderations/response_body.py +0 -139
  294. lionagi/integrations/openai_/api_endpoints/uploads/__init__.py +0 -19
  295. lionagi/integrations/openai_/api_endpoints/uploads/add_upload_part.py +0 -11
  296. lionagi/integrations/openai_/api_endpoints/uploads/cancel_upload.py +0 -7
  297. lionagi/integrations/openai_/api_endpoints/uploads/complete_upload.py +0 -18
  298. lionagi/integrations/openai_/api_endpoints/uploads/create_upload.py +0 -17
  299. lionagi/integrations/openai_/api_endpoints/uploads/uploads_models.py +0 -52
  300. lionagi/integrations/openai_/image_token_calculator/__init__.py +0 -0
  301. lionagi/integrations/openai_/image_token_calculator/image_token_calculator.py +0 -98
  302. lionagi/integrations/openai_/image_token_calculator/openai_image_token_data.yaml +0 -15
  303. lionagi/integrations/openai_/openai_max_output_token_data.yaml +0 -12
  304. lionagi/integrations/openai_/openai_price_data.yaml +0 -26
  305. lionagi/integrations/openai_/version.py +0 -1
  306. lionagi/integrations/pandas_/__init__.py +0 -24
  307. lionagi/integrations/pandas_/extend_df.py +0 -61
  308. lionagi/integrations/pandas_/read.py +0 -103
  309. lionagi/integrations/pandas_/remove_rows.py +0 -61
  310. lionagi/integrations/pandas_/replace_keywords.py +0 -65
  311. lionagi/integrations/pandas_/save.py +0 -131
  312. lionagi/integrations/pandas_/search_keywords.py +0 -69
  313. lionagi/integrations/pandas_/to_df.py +0 -196
  314. lionagi/integrations/pandas_/update_cells.py +0 -54
  315. lionagi/integrations/perplexity_/PerplexityModel.py +0 -274
  316. lionagi/integrations/perplexity_/PerplexityService.py +0 -113
  317. lionagi/integrations/perplexity_/api_endpoints/__init__.py +0 -0
  318. lionagi/integrations/perplexity_/api_endpoints/api_request.py +0 -171
  319. lionagi/integrations/perplexity_/api_endpoints/chat_completions/__init__.py +0 -0
  320. lionagi/integrations/perplexity_/api_endpoints/chat_completions/request/__init__.py +0 -0
  321. lionagi/integrations/perplexity_/api_endpoints/chat_completions/request/request_body.py +0 -121
  322. lionagi/integrations/perplexity_/api_endpoints/chat_completions/response/__init__.py +0 -0
  323. lionagi/integrations/perplexity_/api_endpoints/chat_completions/response/response_body.py +0 -146
  324. lionagi/integrations/perplexity_/api_endpoints/data_models.py +0 -63
  325. lionagi/integrations/perplexity_/api_endpoints/match_response.py +0 -26
  326. lionagi/integrations/perplexity_/perplexity_max_output_token_data.yaml +0 -3
  327. lionagi/integrations/perplexity_/perplexity_price_data.yaml +0 -10
  328. lionagi/integrations/perplexity_/version.py +0 -1
  329. lionagi/integrations/pydantic_/__init__.py +0 -8
  330. lionagi/integrations/pydantic_/break_down_annotation.py +0 -81
  331. lionagi/integrations/pydantic_/new_model.py +0 -208
  332. lionagi/libs/constants.py +0 -98
  333. lionagi/libs/file/path.py +0 -301
  334. lionagi/libs/file/types.py +0 -22
  335. lionagi/libs/func/__init__.py +0 -0
  336. lionagi/libs/func/async_calls/__init__.py +0 -24
  337. lionagi/libs/func/async_calls/alcall.py +0 -210
  338. lionagi/libs/func/async_calls/bcall.py +0 -130
  339. lionagi/libs/func/async_calls/mcall.py +0 -134
  340. lionagi/libs/func/async_calls/pcall.py +0 -149
  341. lionagi/libs/func/async_calls/rcall.py +0 -217
  342. lionagi/libs/func/async_calls/tcall.py +0 -114
  343. lionagi/libs/func/async_calls/ucall.py +0 -85
  344. lionagi/libs/func/decorators.py +0 -277
  345. lionagi/libs/func/lcall.py +0 -57
  346. lionagi/libs/func/params.py +0 -64
  347. lionagi/libs/func/throttle.py +0 -119
  348. lionagi/libs/func/types.py +0 -39
  349. lionagi/libs/func/utils.py +0 -96
  350. lionagi/libs/package/types.py +0 -26
  351. lionagi/libs/parse/__init__.py +0 -1
  352. lionagi/libs/parse/flatten/__init__.py +0 -9
  353. lionagi/libs/parse/flatten/params.py +0 -52
  354. lionagi/libs/parse/json/__init__.py +0 -27
  355. lionagi/libs/parse/json/extract.py +0 -102
  356. lionagi/libs/parse/json/parse.py +0 -179
  357. lionagi/libs/parse/json/to_json.py +0 -71
  358. lionagi/libs/parse/nested/__init__.py +0 -33
  359. lionagi/libs/parse/nested/to_flat_list.py +0 -64
  360. lionagi/libs/parse/params.py +0 -0
  361. lionagi/libs/parse/string_parse/__init__.py +0 -11
  362. lionagi/libs/parse/type_convert/__init__.py +0 -19
  363. lionagi/libs/parse/type_convert/params.py +0 -145
  364. lionagi/libs/parse/type_convert/to_dict.py +0 -333
  365. lionagi/libs/parse/type_convert/to_list.py +0 -186
  366. lionagi/libs/parse/type_convert/to_num.py +0 -358
  367. lionagi/libs/parse/type_convert/to_str.py +0 -195
  368. lionagi/libs/parse/types.py +0 -9
  369. lionagi/libs/parse/validate/__init__.py +0 -14
  370. lionagi/libs/parse/validate/params.py +0 -62
  371. lionagi/libs/parse/xml/__init__.py +0 -10
  372. lionagi/libs/parse/xml/convert.py +0 -56
  373. lionagi/libs/parse/xml/parser.py +0 -93
  374. lionagi/libs/string_similarity/__init__.py +0 -32
  375. lionagi/libs/string_similarity/matcher.py +0 -102
  376. lionagi/libs/string_similarity/utils.py +0 -15
  377. lionagi/libs/utils.py +0 -266
  378. lionagi/protocols/adapters/__init__.py +0 -0
  379. lionagi/protocols/adapters/adapter.py +0 -79
  380. lionagi/protocols/adapters/json_adapter.py +0 -43
  381. lionagi/protocols/adapters/pandas_adapter.py +0 -96
  382. lionagi/protocols/configs/__init__.py +0 -0
  383. lionagi/protocols/configs/branch_config.py +0 -86
  384. lionagi/protocols/configs/id_config.py +0 -15
  385. lionagi/protocols/configs/imodel_config.py +0 -73
  386. lionagi/protocols/configs/log_config.py +0 -93
  387. lionagi/protocols/configs/retry_config.py +0 -29
  388. lionagi/protocols/configs/types.py +0 -15
  389. lionagi/protocols/operatives/instruct.py +0 -194
  390. lionagi/protocols/operatives/types.py +0 -19
  391. lionagi/protocols/registries/_component_registry.py +0 -23
  392. lionagi/protocols/registries/_pile_registry.py +0 -30
  393. lionagi/service/complete_request_info.py +0 -11
  394. lionagi/service/rate_limiter.py +0 -108
  395. lionagi/service/service.py +0 -41
  396. lionagi/service/service_match_util.py +0 -131
  397. lionagi/service/service_util.py +0 -72
  398. lionagi/service/token_calculator.py +0 -51
  399. lionagi/strategies/__init__.py +0 -0
  400. lionagi/strategies/types.py +0 -21
  401. lionagi-0.5.4.dist-info/RECORD +0 -374
  402. /lionagi/{core → libs/nested}/__init__.py +0 -0
  403. /lionagi/{core/action → libs/schema}/__init__.py +0 -0
  404. /lionagi/{core/communication → libs/validate}/__init__.py +0 -0
  405. /lionagi/{core/forms → operations/strategies}/__init__.py +0 -0
  406. /lionagi/{core/generic → operatives}/__init__.py +0 -0
  407. /lionagi/{core/session → operatives/action}/__init__.py +0 -0
  408. /lionagi/{integrations/anthropic_ → operatives/forms}/__init__.py +0 -0
  409. /lionagi/{core → operatives}/forms/utils.py +0 -0
  410. /lionagi/{integrations/anthropic_/api_endpoints → operatives/instruct}/__init__.py +0 -0
  411. /lionagi/{integrations/anthropic_/api_endpoints/messages → operatives/models}/__init__.py +0 -0
  412. /lionagi/{integrations/anthropic_/api_endpoints/messages/request → protocols/generic}/__init__.py +0 -0
  413. /lionagi/{integrations/groq_ → protocols/graph}/__init__.py +0 -0
  414. /lionagi/{integrations/litellm_ → protocols/mail}/__init__.py +0 -0
  415. /lionagi/{integrations/ollama_ → protocols/messages}/__init__.py +0 -0
  416. /lionagi/{core/communication → protocols/messages}/templates/README.md +0 -0
  417. /lionagi/{core/communication → protocols/messages}/templates/action_request.jinja2 +0 -0
  418. /lionagi/{core/communication → protocols/messages}/templates/action_response.jinja2 +0 -0
  419. /lionagi/{core/communication → protocols/messages}/templates/system_message.jinja2 +0 -0
  420. /lionagi/{core/communication → protocols/messages}/templates/tool_schemas.jinja2 +0 -0
  421. /lionagi/{integrations/ollama_/api_endpoints → service/endpoints}/__init__.py +0 -0
  422. /lionagi/{integrations/ollama_/api_endpoints/chat_completion → service/providers}/__init__.py +0 -0
  423. /lionagi/{integrations/ollama_/api_endpoints/model → service/providers/anthropic_}/__init__.py +0 -0
  424. /lionagi/{integrations/perplexity_ → service/providers/groq_}/__init__.py +0 -0
  425. /lionagi/{protocols/operatives → service/providers/openai_}/__init__.py +0 -0
  426. /lionagi/{protocols/registries → service/providers/openrouter_}/__init__.py +0 -0
  427. {lionagi-0.5.4.dist-info → lionagi-0.6.0.dist-info}/WHEEL +0 -0
  428. {lionagi-0.5.4.dist-info → lionagi-0.6.0.dist-info}/licenses/LICENSE +0 -0
@@ -1,274 +0,0 @@
1
- # Copyright (c) 2023 - 2024, HaiyangLi <quantocean.li at gmail dot com>
2
- #
3
- # SPDX-License-Identifier: Apache-2.0
4
-
5
- import os
6
- from pathlib import Path
7
-
8
- from dotenv import load_dotenv
9
- from pydantic import (
10
- BaseModel,
11
- ConfigDict,
12
- Field,
13
- field_serializer,
14
- model_validator,
15
- )
16
-
17
- from lionagi.integrations.perplexity_.api_endpoints.api_request import (
18
- PerplexityRequest,
19
- )
20
- from lionagi.integrations.perplexity_.api_endpoints.chat_completions.request.request_body import (
21
- PerplexityChatCompletionRequestBody,
22
- )
23
- from lionagi.integrations.perplexity_.api_endpoints.match_response import (
24
- match_response,
25
- )
26
- from lionagi.service.rate_limiter import RateLimiter, RateLimitError
27
- from lionagi.service.service_util import invoke_retry
28
- from lionagi.service.token_calculator import TiktokenCalculator
29
-
30
- load_dotenv()
31
- path = Path(__file__).parent
32
-
33
- price_config_file_name = path / "perplexity_price_data.yaml"
34
- max_output_token_file_name = path / "perplexity_max_output_token_data.yaml"
35
-
36
-
37
- class _ModuleImportClass:
38
- from lionagi.libs.package.imports import check_import
39
-
40
- yaml = check_import("yaml", pip_name="pyyaml")
41
-
42
-
43
- class PerplexityModel(BaseModel):
44
- model: str = Field(description="ID of the model to use.")
45
-
46
- request_model: PerplexityRequest = Field(description="Making requests")
47
-
48
- rate_limiter: RateLimiter = Field(
49
- description="Rate Limiter to track usage"
50
- )
51
-
52
- text_token_calculator: TiktokenCalculator = Field(
53
- default=None, description="Token Calculator"
54
- )
55
-
56
- estimated_output_len: int = Field(
57
- default=0, description="Expected output len before making request"
58
- )
59
-
60
- model_config = ConfigDict(extra="forbid")
61
-
62
- @model_validator(mode="before")
63
- @classmethod
64
- def parse_input(cls, data: dict):
65
- if not isinstance(data, dict):
66
- raise ValueError("Invalid init param")
67
-
68
- # parse request model
69
- request_model_params = {
70
- "api_key": data.pop("api_key", None),
71
- "endpoint": data.pop("endpoint", None),
72
- "method": data.pop("method", None),
73
- "content_type": data.pop("content_type", None),
74
- }
75
- try:
76
- api_key = os.getenv(request_model_params["api_key"], None)
77
- if api_key:
78
- request_model_params["api_key"] = api_key
79
- except Exception:
80
- pass
81
-
82
- data["request_model"] = PerplexityRequest(**request_model_params)
83
-
84
- # parse rate limiter
85
- if "rate_limiter" not in data:
86
- rate_limiter_params = {}
87
- if limit_tokens := data.pop("limit_tokens", None):
88
- rate_limiter_params["limit_tokens"] = limit_tokens
89
- if limit_requests := data.pop("limit_requests", None):
90
- rate_limiter_params["limit_requests"] = limit_requests
91
-
92
- data["rate_limiter"] = RateLimiter(**rate_limiter_params)
93
-
94
- # parse token calculator
95
- try:
96
- # Perplexity uses cl100k_base encoding like OpenAI
97
- text_calc = TiktokenCalculator(encoding_name="cl100k_base")
98
- data["text_token_calculator"] = text_calc
99
- except Exception:
100
- pass
101
-
102
- return data
103
-
104
- @field_serializer("request_model")
105
- def serialize_request_model(self, value: PerplexityRequest):
106
- return value.model_dump(exclude_unset=True)
107
-
108
- @invoke_retry(max_retries=3, base_delay=1, max_delay=60)
109
- async def invoke(
110
- self,
111
- request_body: PerplexityChatCompletionRequestBody,
112
- estimated_output_len: int = 0,
113
- output_file=None,
114
- parse_response=True,
115
- ):
116
- if request_model := getattr(request_body, "model"):
117
- if request_model != self.model:
118
- raise ValueError(
119
- f"Request model does not match. Model is {self.model}, but request is made for {request_model}."
120
- )
121
-
122
- # check remaining rate limit
123
- input_token_len = await self.get_input_token_len(request_body)
124
-
125
- if getattr(request_body, "max_tokens", None):
126
- estimated_output_len = request_body.max_tokens
127
-
128
- invoke_viability_result = self.verify_invoke_viability(
129
- input_tokens_len=input_token_len,
130
- estimated_output_len=estimated_output_len,
131
- )
132
- if not invoke_viability_result:
133
- raise RateLimitError(
134
- message="Rate limit reached for requests",
135
- input_token_len=input_token_len,
136
- estimated_output_len=estimated_output_len,
137
- )
138
-
139
- try:
140
- if getattr(request_body, "stream", None):
141
- return await self.stream(
142
- request_body,
143
- output_file=output_file,
144
- parse_response=parse_response,
145
- )
146
-
147
- response_body, response_headers = await self.request_model.invoke(
148
- json_data=request_body,
149
- output_file=output_file,
150
- with_response_header=True,
151
- parse_response=False,
152
- )
153
-
154
- if response_body:
155
- # Update rate limit based on usage
156
- if response_body.get("usage"):
157
- total_token_usage = response_body["usage"]["total_tokens"]
158
- if date_str := response_headers.get("date"):
159
- self.rate_limiter.update_rate_limit(
160
- date_str, total_token_usage
161
- )
162
- else:
163
- self.rate_limiter.update_rate_limit(
164
- None, total_token_usage
165
- )
166
- else:
167
- self.rate_limiter.update_rate_limit(None)
168
-
169
- if parse_response:
170
- return match_response(self.request_model, response_body)
171
- else:
172
- return response_body
173
-
174
- except Exception as e:
175
- raise e
176
-
177
- async def stream(
178
- self,
179
- request_body: PerplexityChatCompletionRequestBody,
180
- output_file=None,
181
- parse_response=True,
182
- verbose=True,
183
- ):
184
- response_chunks = []
185
- response_headers = None
186
-
187
- async for chunk in self.request_model.stream(
188
- json_data=request_body,
189
- output_file=output_file,
190
- with_response_header=True,
191
- verbose=verbose,
192
- ):
193
- if isinstance(chunk, dict):
194
- if "headers" in chunk:
195
- response_headers = chunk["headers"]
196
- else:
197
- response_chunks.append(chunk)
198
-
199
- # Update rate limit if we have usage information
200
- if response_chunks and response_chunks[-1].get("usage"):
201
- total_token_usage = response_chunks[-1]["usage"]["total_tokens"]
202
- if date_str := response_headers.get("date"):
203
- self.rate_limiter.update_rate_limit(
204
- date_str, total_token_usage
205
- )
206
- else:
207
- self.rate_limiter.update_rate_limit(None, total_token_usage)
208
-
209
- if parse_response:
210
- return match_response(self.request_model, response_chunks)
211
- else:
212
- return response_chunks
213
-
214
- async def get_input_token_len(
215
- self, request_body: PerplexityChatCompletionRequestBody
216
- ):
217
- if request_model := getattr(request_body, "model"):
218
- if request_model != self.model:
219
- raise ValueError(
220
- f"Request model does not match. Model is {self.model}, but request is made for {request_model}."
221
- )
222
-
223
- total_tokens = 0
224
- for message in request_body.messages:
225
- total_tokens += self.text_token_calculator.calculate(
226
- message.content
227
- )
228
-
229
- return total_tokens
230
-
231
- def verify_invoke_viability(
232
- self, input_tokens_len: int = 0, estimated_output_len: int = 0
233
- ):
234
- self.rate_limiter.release_tokens()
235
-
236
- estimated_output_len = (
237
- estimated_output_len
238
- if estimated_output_len != 0
239
- else self.estimated_output_len
240
- )
241
- if estimated_output_len == 0:
242
- with open(max_output_token_file_name) as file:
243
- output_token_config = _ModuleImportClass.yaml.safe_load(file)
244
- estimated_output_len = output_token_config.get(self.model, 0)
245
- self.estimated_output_len = estimated_output_len
246
-
247
- if self.rate_limiter.check_availability(
248
- input_tokens_len, estimated_output_len
249
- ):
250
- return True
251
- else:
252
- return False
253
-
254
- def estimate_text_price(
255
- self,
256
- input_text: str,
257
- estimated_num_of_output_tokens: int = 0,
258
- ):
259
- if self.text_token_calculator is None:
260
- raise ValueError("Token calculator not available")
261
-
262
- num_of_input_tokens = self.text_token_calculator.calculate(input_text)
263
-
264
- with open(price_config_file_name) as file:
265
- price_config = _ModuleImportClass.yaml.safe_load(file)
266
-
267
- model_price_info_dict = price_config["model"][self.model]
268
- estimated_price = (
269
- model_price_info_dict["input_tokens"] * num_of_input_tokens
270
- + model_price_info_dict["output_tokens"]
271
- * estimated_num_of_output_tokens
272
- )
273
-
274
- return estimated_price
@@ -1,113 +0,0 @@
1
- # Copyright (c) 2023 - 2024, HaiyangLi <quantocean.li at gmail dot com>
2
- #
3
- # SPDX-License-Identifier: Apache-2.0
4
-
5
- import inspect
6
-
7
- from dotenv import load_dotenv
8
-
9
- from lionagi.integrations.perplexity_.api_endpoints.chat_completions.request.request_body import (
10
- PerplexityChatCompletionRequestBody,
11
- )
12
- from lionagi.service import Service, register_service
13
-
14
- from .PerplexityModel import PerplexityModel
15
-
16
- load_dotenv()
17
-
18
-
19
- @register_service
20
- class PerplexityService(Service):
21
- def __init__(
22
- self,
23
- api_key: str,
24
- name: str = None,
25
- ):
26
- super().__setattr__("_initialized", False)
27
- self.api_key = api_key
28
- self.name = name
29
- self.rate_limiters = {} # model: RateLimiter
30
- super().__setattr__("_initialized", True)
31
-
32
- def __setattr__(self, key, value):
33
- if getattr(self, "_initialized", False) and key in [
34
- "api_key",
35
- ]:
36
- raise AttributeError(
37
- f"Cannot modify '{key}' after initialization. "
38
- f"Please set a new service object for new keys."
39
- )
40
- super().__setattr__(key, value)
41
-
42
- def check_rate_limiter(
43
- self,
44
- perplexity_model: PerplexityModel,
45
- limit_requests: int = None,
46
- limit_tokens: int = None,
47
- ):
48
- # Map model versions to their base models for shared rate limiting
49
- shared_models = {
50
- "llama-3.1-sonar-small-128k-online": "llama-3.1-sonar-small",
51
- "llama-3.1-sonar-medium-128k-online": "llama-3.1-sonar-medium",
52
- "llama-3.1-sonar-large-128k-online": "llama-3.1-sonar-large",
53
- }
54
-
55
- if perplexity_model.model in shared_models:
56
- model = shared_models[perplexity_model.model]
57
- else:
58
- model = perplexity_model.model
59
-
60
- if model not in self.rate_limiters:
61
- self.rate_limiters[model] = perplexity_model.rate_limiter
62
- else:
63
- perplexity_model.rate_limiter = self.rate_limiters[model]
64
- if limit_requests:
65
- perplexity_model.rate_limiter.limit_requests = limit_requests
66
- if limit_tokens:
67
- perplexity_model.rate_limiter.limit_tokens = limit_tokens
68
-
69
- return perplexity_model
70
-
71
- @staticmethod
72
- def match_data_model(task_name: str) -> dict:
73
- """Match task name to appropriate request and response models."""
74
- if task_name == "create_chat_completion":
75
- return {"request_body": PerplexityChatCompletionRequestBody}
76
- raise ValueError(f"No data models found for task: {task_name}")
77
-
78
- @classmethod
79
- def list_tasks(cls):
80
- methods = []
81
- for name, member in inspect.getmembers(
82
- cls, predicate=inspect.isfunction
83
- ):
84
- if name not in [
85
- "__init__",
86
- "__setattr__",
87
- "check_rate_limiter",
88
- "match_data_model",
89
- ]:
90
- methods.append(name)
91
- return methods
92
-
93
- # Chat Completions
94
- def create_chat_completion(
95
- self, model: str, limit_tokens: int = None, limit_requests: int = None
96
- ):
97
- model_obj = PerplexityModel(
98
- model=model,
99
- api_key=self.api_key,
100
- endpoint="chat/completions",
101
- method="POST",
102
- content_type="application/json",
103
- limit_tokens=limit_tokens,
104
- limit_requests=limit_requests,
105
- )
106
-
107
- return self.check_rate_limiter(
108
- model_obj, limit_requests=limit_requests, limit_tokens=limit_tokens
109
- )
110
-
111
- @property
112
- def allowed_roles(self):
113
- return ["user", "assistant", "system"]
@@ -1,171 +0,0 @@
1
- import json
2
- from collections.abc import AsyncGenerator
3
- from typing import Any, Dict, Optional, Tuple, Union
4
-
5
- import aiohttp
6
- from pydantic import BaseModel, ConfigDict, Field
7
-
8
- from lionagi.integrations.perplexity_.api_endpoints.data_models import (
9
- PerplexityEndpointRequestBody,
10
- )
11
-
12
-
13
- class PerplexityRequest(BaseModel):
14
- """Handler for making requests to the Perplexity API."""
15
-
16
- api_key: str = Field(
17
- description="API key for authentication", exclude=True
18
- )
19
- endpoint: str = Field(description="Endpoint for request")
20
- method: str = Field(description="HTTP method")
21
- content_type: str | None = "application/json"
22
- base_url: str = "https://api.perplexity.ai"
23
-
24
- model_config = ConfigDict(
25
- arbitrary_types_allowed=True,
26
- extra="allow", # Allow extra attributes for mocking in tests
27
- )
28
-
29
- async def invoke(
30
- self,
31
- json_data: None | (
32
- dict[str, Any] | PerplexityEndpointRequestBody
33
- ) = None,
34
- form_data: BaseModel | None = None,
35
- output_file: str | None = None,
36
- with_response_header: bool = False,
37
- parse_response: bool = True,
38
- ) -> dict[str, Any] | tuple[dict[str, Any], dict[str, str]] | bytes | None:
39
- """Make a request to the Perplexity API."""
40
- url = f"{self.base_url}/{self.endpoint}"
41
- headers = {
42
- "Authorization": f"Bearer {self.api_key}",
43
- }
44
-
45
- if self.content_type:
46
- headers["Content-Type"] = self.content_type
47
-
48
- if isinstance(json_data, PerplexityEndpointRequestBody):
49
- json_data = json_data.model_dump(exclude_unset=True)
50
-
51
- async with aiohttp.ClientSession() as client:
52
- response = await client.request(
53
- method=self.method,
54
- url=url,
55
- headers=headers,
56
- json=json_data,
57
- data=form_data.model_dump() if form_data else None,
58
- )
59
- async with response:
60
- if response.status != 200:
61
- try:
62
- error_body = await response.json()
63
- error_msg = error_body.get("error", {}).get(
64
- "message", str(error_body)
65
- )
66
- except Exception:
67
- error_msg = await response.text()
68
- raise Exception(
69
- f"API request failed with status {response.status}: {error_msg}"
70
- )
71
-
72
- if output_file:
73
- with open(output_file, "wb") as f:
74
- async for chunk in response.content.iter_chunked(8192):
75
- f.write(chunk)
76
- return None
77
-
78
- if parse_response:
79
- response_body = await response.json()
80
- else:
81
- response_body = await response.text()
82
- try:
83
- response_body = json.loads(response_body)
84
- except json.JSONDecodeError:
85
- pass
86
-
87
- if with_response_header:
88
- headers = {
89
- k.lower(): v for k, v in response.headers.items()
90
- }
91
- return response_body, headers
92
- return response_body
93
-
94
- async def stream(
95
- self,
96
- json_data: None | (
97
- dict[str, Any] | PerplexityEndpointRequestBody
98
- ) = None,
99
- output_file: str | None = None,
100
- with_response_header: bool = False,
101
- verbose: bool = True,
102
- ) -> AsyncGenerator[dict[str, Any], None]:
103
- """Stream responses from the Perplexity API."""
104
- if isinstance(json_data, PerplexityEndpointRequestBody):
105
- json_data = json_data.model_dump(exclude_unset=True)
106
-
107
- if not json_data.get("stream", False):
108
- json_data["stream"] = True
109
-
110
- url = f"{self.base_url}/{self.endpoint}"
111
- headers = {
112
- "Authorization": f"Bearer {self.api_key}",
113
- "Content-Type": "application/json",
114
- "Accept": "text/event-stream",
115
- }
116
-
117
- async with aiohttp.ClientSession() as client:
118
- response = await client.post(url, headers=headers, json=json_data)
119
- async with response:
120
- if response.status != 200:
121
- try:
122
- error_body = await response.json()
123
- error_msg = error_body.get("error", {}).get(
124
- "message", str(error_body)
125
- )
126
- except Exception:
127
- error_msg = await response.text()
128
- raise Exception(
129
- f"API request failed with status {response.status}: {error_msg}"
130
- )
131
-
132
- if with_response_header:
133
- headers = {
134
- k.lower(): v for k, v in response.headers.items()
135
- }
136
- yield {"headers": headers}
137
-
138
- file_handle = None
139
- if output_file:
140
- try:
141
- file_handle = open(output_file, "w")
142
- except Exception as e:
143
- raise ValueError(
144
- f"Failed to open output file {output_file}: {e}"
145
- )
146
-
147
- try:
148
- async for chunk in response.content:
149
- if chunk:
150
- chunk_str = chunk.decode("utf-8").strip()
151
- if chunk_str.startswith("data: "):
152
- chunk_str = chunk_str[
153
- 6:
154
- ] # Remove "data: " prefix
155
- try:
156
- chunk_data = json.loads(chunk_str)
157
- if file_handle:
158
- file_handle.write(
159
- json.dumps(chunk_data) + "\n"
160
- )
161
- if verbose and "choices" in chunk_data:
162
- content = chunk_data["choices"][0][
163
- "delta"
164
- ]["content"]
165
- print(content, end="", flush=True)
166
- yield chunk_data
167
- except json.JSONDecodeError:
168
- continue
169
- finally:
170
- if file_handle:
171
- file_handle.close()
@@ -1,121 +0,0 @@
1
- from pydantic import BaseModel, ConfigDict, Field, field_validator
2
-
3
- from lionagi.integrations.perplexity_.api_endpoints.data_models import (
4
- PerplexityEndpointRequestBody,
5
- )
6
-
7
-
8
- class Message(BaseModel):
9
- """A message in the chat completion request."""
10
-
11
- role: str = Field(description="The role of the message author.")
12
- content: str = Field(description="The content of the message.")
13
-
14
- model_config = ConfigDict(extra="forbid")
15
-
16
- @field_validator("role")
17
- @classmethod
18
- def validate_role(cls, role: str) -> str:
19
- valid_roles = ["system", "user", "assistant"]
20
- if role not in valid_roles:
21
- raise ValueError(f"Role must be one of {valid_roles}")
22
- return role
23
-
24
- @field_validator("content")
25
- @classmethod
26
- def validate_content(cls, content: str) -> str:
27
- if not content or not content.strip():
28
- raise ValueError("Content cannot be empty")
29
- return content
30
-
31
-
32
- class PerplexityChatCompletionRequestBody(PerplexityEndpointRequestBody):
33
- """Request body for chat completion requests."""
34
-
35
- model: str = Field(description="ID of the model to use.")
36
- messages: list[Message] = Field(
37
- description="A list of messages comprising the conversation so far."
38
- )
39
- max_tokens: int | None = Field(
40
- default=None,
41
- description="The maximum number of tokens to generate in the completion.",
42
- ge=1,
43
- )
44
- temperature: float | None = Field(
45
- default=0.2,
46
- description="What sampling temperature to use, between 0 and 2.",
47
- ge=0,
48
- lt=2,
49
- )
50
- top_p: float | None = Field(
51
- default=0.9,
52
- description="An alternative to sampling with temperature, called nucleus sampling.",
53
- ge=0,
54
- le=1,
55
- )
56
- search_domain_filter: list[str] | None = Field(
57
- default=None,
58
- description="List of domains to limit citations to.",
59
- )
60
- return_images: bool | None = Field(
61
- default=False,
62
- description="Whether to return images in the response.",
63
- )
64
- return_related_questions: bool | None = Field(
65
- default=False,
66
- description="Whether to return related questions in the response.",
67
- )
68
- search_recency_filter: str | None = Field(
69
- default=None,
70
- description="Filter for search results recency (month, week, day, hour).",
71
- )
72
- top_k: int | None = Field(
73
- default=0,
74
- description="The number of tokens to keep for highest top-k filtering.",
75
- ge=0,
76
- le=2048,
77
- )
78
- stream: bool | None = Field(
79
- default=False,
80
- description="Whether to stream back partial progress.",
81
- )
82
- presence_penalty: float | None = Field(
83
- default=0,
84
- description="Penalty for new tokens based on their presence in text so far.",
85
- ge=-2,
86
- le=2,
87
- )
88
- frequency_penalty: float | None = Field(
89
- default=1,
90
- description="Penalty for new tokens based on their frequency in text so far.",
91
- gt=0,
92
- )
93
-
94
- model_config = ConfigDict(extra="forbid")
95
-
96
- @field_validator("search_recency_filter")
97
- @classmethod
98
- def validate_search_recency_filter(cls, value: str | None) -> str | None:
99
- if value is not None:
100
- valid_filters = ["month", "week", "day", "hour"]
101
- if value not in valid_filters:
102
- raise ValueError(
103
- f"Search recency filter must be one of {valid_filters}"
104
- )
105
- return value
106
-
107
- @field_validator("search_domain_filter")
108
- @classmethod
109
- def validate_search_domain_filter(
110
- cls, value: list[str] | None
111
- ) -> list[str] | None:
112
- if value is not None and len(value) > 3:
113
- raise ValueError("Search domain filter is limited to 3 domains")
114
- return value
115
-
116
- @field_validator("messages")
117
- @classmethod
118
- def validate_messages(cls, messages: list[Message]) -> list[Message]:
119
- if not messages:
120
- raise ValueError("At least one message is required")
121
- return messages