lionagi 0.5.5__py3-none-any.whl → 0.6.1__py3-none-any.whl

Sign up to get free protection for your applications and to get access to all the features.
Files changed (433) hide show
  1. lionagi/__init__.py +18 -24
  2. lionagi/{core/_class_registry.py → _class_registry.py} +51 -10
  3. lionagi/_errors.py +35 -0
  4. lionagi/libs/__init__.py +3 -0
  5. lionagi/libs/compress/__init__.py +3 -0
  6. lionagi/libs/compress/models.py +6 -2
  7. lionagi/libs/compress/utils.py +4 -16
  8. lionagi/libs/file/__init__.py +3 -0
  9. lionagi/libs/file/chunk.py +4 -0
  10. lionagi/libs/file/file_ops.py +4 -0
  11. lionagi/libs/file/params.py +4 -41
  12. lionagi/libs/file/process.py +4 -0
  13. lionagi/libs/file/save.py +5 -1
  14. lionagi/libs/{parse/flatten → nested}/flatten.py +4 -0
  15. lionagi/libs/{parse/nested → nested}/nfilter.py +4 -0
  16. lionagi/libs/{parse/nested → nested}/nget.py +6 -1
  17. lionagi/libs/{parse/nested → nested}/ninsert.py +5 -1
  18. lionagi/libs/{parse/nested → nested}/nmerge.py +4 -0
  19. lionagi/libs/{parse/nested → nested}/npop.py +5 -2
  20. lionagi/libs/{parse/nested → nested}/nset.py +6 -1
  21. lionagi/libs/{parse/flatten → nested}/unflatten.py +4 -0
  22. lionagi/libs/{parse/nested → nested}/utils.py +5 -1
  23. lionagi/libs/package/__init__.py +3 -0
  24. lionagi/libs/package/imports.py +6 -2
  25. lionagi/libs/package/management.py +7 -3
  26. lionagi/libs/package/params.py +4 -0
  27. lionagi/libs/package/system.py +4 -0
  28. lionagi/libs/parse.py +30 -0
  29. lionagi/libs/{parse/json → schema}/as_readable.py +10 -4
  30. lionagi/libs/{parse/string_parse/code_block.py → schema/extract_code_block.py} +4 -0
  31. lionagi/libs/{parse/string_parse/docstring.py → schema/extract_docstring.py} +4 -0
  32. lionagi/libs/{parse/string_parse/function_.py → schema/function_to_schema.py} +21 -9
  33. lionagi/libs/{parse/json/schema.py → schema/json_schema.py} +5 -1
  34. lionagi/libs/validate/common_field_validators.py +170 -0
  35. lionagi/libs/{parse/validate/keys.py → validate/fuzzy_match_keys.py} +42 -8
  36. lionagi/libs/{parse/validate/mapping.py → validate/fuzzy_validate_mapping.py} +41 -6
  37. lionagi/libs/{string_similarity/algorithms.py → validate/string_similarity.py} +115 -1
  38. lionagi/libs/{parse/validate/boolean.py → validate/validate_boolean.py} +42 -3
  39. lionagi/operations/__init__.py +13 -3
  40. lionagi/operations/brainstorm/__init__.py +3 -3
  41. lionagi/operations/brainstorm/brainstorm.py +33 -19
  42. lionagi/operations/brainstorm/prompt.py +4 -0
  43. lionagi/operations/plan/__init__.py +4 -0
  44. lionagi/operations/plan/plan.py +19 -16
  45. lionagi/operations/plan/prompt.py +4 -0
  46. lionagi/operations/select/__init__.py +4 -0
  47. lionagi/operations/select/prompt.py +4 -0
  48. lionagi/operations/select/select.py +2 -2
  49. lionagi/operations/select/utils.py +4 -4
  50. lionagi/{strategies → operations/strategies}/base.py +6 -2
  51. lionagi/{strategies → operations/strategies}/concurrent.py +8 -5
  52. lionagi/{strategies → operations/strategies}/concurrent_chunk.py +6 -3
  53. lionagi/{strategies → operations/strategies}/concurrent_sequential_chunk.py +8 -4
  54. lionagi/{strategies → operations/strategies}/params.py +26 -6
  55. lionagi/{strategies → operations/strategies}/sequential.py +6 -2
  56. lionagi/{strategies → operations/strategies}/sequential_chunk.py +7 -3
  57. lionagi/{strategies → operations/strategies}/sequential_concurrent_chunk.py +9 -4
  58. lionagi/{strategies → operations/strategies}/utils.py +6 -3
  59. lionagi/operations/types.py +13 -0
  60. lionagi/operations/utils.py +6 -3
  61. lionagi/operatives/action/function_calling.py +136 -0
  62. lionagi/operatives/action/manager.py +236 -0
  63. lionagi/operatives/action/request_response_model.py +90 -0
  64. lionagi/operatives/action/tool.py +141 -0
  65. lionagi/{protocols/operatives/action.py → operatives/action/utils.py} +52 -90
  66. lionagi/{core → operatives}/forms/base.py +9 -4
  67. lionagi/{core → operatives}/forms/form.py +8 -13
  68. lionagi/{core → operatives}/forms/report.py +5 -3
  69. lionagi/operatives/instruct/base.py +79 -0
  70. lionagi/operatives/instruct/instruct.py +105 -0
  71. lionagi/operatives/instruct/instruct_collection.py +52 -0
  72. lionagi/operatives/instruct/node.py +13 -0
  73. lionagi/{protocols/operatives → operatives/instruct}/prompts.py +0 -34
  74. lionagi/{protocols/operatives → operatives/instruct}/reason.py +14 -7
  75. lionagi/{core/models/__init__.py → operatives/manager.py} +5 -1
  76. lionagi/operatives/models/field_model.py +194 -0
  77. lionagi/operatives/models/model_params.py +307 -0
  78. lionagi/{core → operatives}/models/note.py +20 -28
  79. lionagi/{core → operatives}/models/operable_model.py +153 -71
  80. lionagi/{core → operatives}/models/schema_model.py +4 -3
  81. lionagi/{protocols/operatives → operatives}/operative.py +10 -7
  82. lionagi/{protocols/operatives → operatives}/step.py +67 -26
  83. lionagi/operatives/types.py +69 -0
  84. lionagi/protocols/_concepts.py +94 -0
  85. lionagi/protocols/adapters/adapter.py +23 -7
  86. lionagi/protocols/adapters/json_adapter.py +72 -14
  87. lionagi/protocols/adapters/pandas_/csv_adapter.py +50 -0
  88. lionagi/protocols/adapters/pandas_/excel_adapter.py +52 -0
  89. lionagi/protocols/adapters/pandas_/pd_dataframe_adapter.py +31 -0
  90. lionagi/protocols/adapters/pandas_/pd_series_adapter.py +17 -0
  91. lionagi/protocols/adapters/types.py +18 -0
  92. lionagi/protocols/generic/element.py +460 -0
  93. lionagi/protocols/generic/event.py +177 -0
  94. lionagi/protocols/generic/log.py +237 -0
  95. lionagi/{core → protocols}/generic/pile.py +193 -131
  96. lionagi/protocols/generic/processor.py +316 -0
  97. lionagi/protocols/generic/progression.py +500 -0
  98. lionagi/protocols/graph/edge.py +166 -0
  99. lionagi/protocols/graph/graph.py +290 -0
  100. lionagi/protocols/graph/node.py +125 -0
  101. lionagi/protocols/mail/exchange.py +116 -0
  102. lionagi/protocols/mail/mail.py +25 -0
  103. lionagi/protocols/mail/mailbox.py +47 -0
  104. lionagi/protocols/mail/manager.py +168 -0
  105. lionagi/protocols/mail/package.py +55 -0
  106. lionagi/protocols/messages/action_request.py +165 -0
  107. lionagi/protocols/messages/action_response.py +132 -0
  108. lionagi/{core/communication → protocols/messages}/assistant_response.py +55 -79
  109. lionagi/protocols/messages/base.py +73 -0
  110. lionagi/protocols/messages/instruction.py +582 -0
  111. lionagi/protocols/messages/manager.py +429 -0
  112. lionagi/protocols/messages/message.py +216 -0
  113. lionagi/protocols/messages/system.py +115 -0
  114. lionagi/protocols/messages/templates/assistant_response.jinja2 +6 -0
  115. lionagi/{core/communication → protocols/messages}/templates/instruction_message.jinja2 +2 -2
  116. lionagi/protocols/types.py +96 -0
  117. lionagi/service/__init__.py +1 -16
  118. lionagi/service/endpoints/base.py +517 -0
  119. lionagi/service/endpoints/chat_completion.py +102 -0
  120. lionagi/service/endpoints/match_endpoint.py +60 -0
  121. lionagi/service/endpoints/rate_limited_processor.py +146 -0
  122. lionagi/service/endpoints/token_calculator.py +209 -0
  123. lionagi/service/imodel.py +263 -96
  124. lionagi/service/manager.py +45 -0
  125. lionagi/service/providers/anthropic_/messages.py +64 -0
  126. lionagi/service/providers/groq_/chat_completions.py +56 -0
  127. lionagi/service/providers/openai_/chat_completions.py +62 -0
  128. lionagi/service/providers/openrouter_/chat_completions.py +62 -0
  129. lionagi/service/providers/perplexity_/__init__.py +3 -0
  130. lionagi/service/providers/perplexity_/chat_completions.py +40 -0
  131. lionagi/service/types.py +18 -0
  132. lionagi/session/__init__.py +3 -0
  133. lionagi/session/branch.py +1287 -0
  134. lionagi/session/session.py +296 -0
  135. lionagi/settings.py +62 -118
  136. lionagi/utils.py +2386 -0
  137. lionagi/version.py +1 -1
  138. {lionagi-0.5.5.dist-info → lionagi-0.6.1.dist-info}/METADATA +10 -9
  139. lionagi-0.6.1.dist-info/RECORD +169 -0
  140. lionagi/core/action/action_manager.py +0 -289
  141. lionagi/core/action/base.py +0 -109
  142. lionagi/core/action/function_calling.py +0 -153
  143. lionagi/core/action/tool.py +0 -202
  144. lionagi/core/action/types.py +0 -16
  145. lionagi/core/communication/action_request.py +0 -163
  146. lionagi/core/communication/action_response.py +0 -149
  147. lionagi/core/communication/base_mail.py +0 -49
  148. lionagi/core/communication/instruction.py +0 -376
  149. lionagi/core/communication/message.py +0 -286
  150. lionagi/core/communication/message_manager.py +0 -543
  151. lionagi/core/communication/system.py +0 -116
  152. lionagi/core/communication/templates/assistant_response.jinja2 +0 -2
  153. lionagi/core/communication/types.py +0 -27
  154. lionagi/core/communication/utils.py +0 -256
  155. lionagi/core/forms/types.py +0 -13
  156. lionagi/core/generic/component.py +0 -422
  157. lionagi/core/generic/edge.py +0 -163
  158. lionagi/core/generic/element.py +0 -199
  159. lionagi/core/generic/graph.py +0 -377
  160. lionagi/core/generic/log.py +0 -151
  161. lionagi/core/generic/log_manager.py +0 -320
  162. lionagi/core/generic/node.py +0 -11
  163. lionagi/core/generic/progression.py +0 -395
  164. lionagi/core/generic/types.py +0 -23
  165. lionagi/core/generic/utils.py +0 -53
  166. lionagi/core/models/base.py +0 -28
  167. lionagi/core/models/field_model.py +0 -145
  168. lionagi/core/models/model_params.py +0 -194
  169. lionagi/core/models/types.py +0 -19
  170. lionagi/core/session/branch.py +0 -130
  171. lionagi/core/session/branch_mixins.py +0 -581
  172. lionagi/core/session/session.py +0 -163
  173. lionagi/core/session/types.py +0 -8
  174. lionagi/core/typing/__init__.py +0 -9
  175. lionagi/core/typing/_concepts.py +0 -173
  176. lionagi/core/typing/_id.py +0 -104
  177. lionagi/core/typing/_pydantic.py +0 -33
  178. lionagi/core/typing/_typing.py +0 -54
  179. lionagi/integrations/_services.py +0 -17
  180. lionagi/integrations/anthropic_/AnthropicModel.py +0 -268
  181. lionagi/integrations/anthropic_/AnthropicService.py +0 -127
  182. lionagi/integrations/anthropic_/anthropic_max_output_token_data.yaml +0 -12
  183. lionagi/integrations/anthropic_/anthropic_price_data.yaml +0 -34
  184. lionagi/integrations/anthropic_/api_endpoints/api_request.py +0 -277
  185. lionagi/integrations/anthropic_/api_endpoints/data_models.py +0 -40
  186. lionagi/integrations/anthropic_/api_endpoints/match_response.py +0 -119
  187. lionagi/integrations/anthropic_/api_endpoints/messages/request/message_models.py +0 -14
  188. lionagi/integrations/anthropic_/api_endpoints/messages/request/request_body.py +0 -74
  189. lionagi/integrations/anthropic_/api_endpoints/messages/response/__init__.py +0 -0
  190. lionagi/integrations/anthropic_/api_endpoints/messages/response/content_models.py +0 -32
  191. lionagi/integrations/anthropic_/api_endpoints/messages/response/response_body.py +0 -101
  192. lionagi/integrations/anthropic_/api_endpoints/messages/response/usage_models.py +0 -25
  193. lionagi/integrations/anthropic_/version.py +0 -5
  194. lionagi/integrations/groq_/GroqModel.py +0 -325
  195. lionagi/integrations/groq_/GroqService.py +0 -156
  196. lionagi/integrations/groq_/api_endpoints/__init__.py +0 -0
  197. lionagi/integrations/groq_/api_endpoints/data_models.py +0 -187
  198. lionagi/integrations/groq_/api_endpoints/groq_request.py +0 -288
  199. lionagi/integrations/groq_/api_endpoints/match_response.py +0 -106
  200. lionagi/integrations/groq_/api_endpoints/response_utils.py +0 -105
  201. lionagi/integrations/groq_/groq_max_output_token_data.yaml +0 -21
  202. lionagi/integrations/groq_/groq_price_data.yaml +0 -58
  203. lionagi/integrations/groq_/groq_rate_limits.yaml +0 -105
  204. lionagi/integrations/groq_/version.py +0 -5
  205. lionagi/integrations/litellm_/imodel.py +0 -76
  206. lionagi/integrations/ollama_/OllamaModel.py +0 -244
  207. lionagi/integrations/ollama_/OllamaService.py +0 -142
  208. lionagi/integrations/ollama_/api_endpoints/api_request.py +0 -179
  209. lionagi/integrations/ollama_/api_endpoints/chat_completion/message_models.py +0 -31
  210. lionagi/integrations/ollama_/api_endpoints/chat_completion/request_body.py +0 -46
  211. lionagi/integrations/ollama_/api_endpoints/chat_completion/response_body.py +0 -67
  212. lionagi/integrations/ollama_/api_endpoints/chat_completion/tool_models.py +0 -49
  213. lionagi/integrations/ollama_/api_endpoints/completion/__init__.py +0 -0
  214. lionagi/integrations/ollama_/api_endpoints/completion/request_body.py +0 -72
  215. lionagi/integrations/ollama_/api_endpoints/completion/response_body.py +0 -59
  216. lionagi/integrations/ollama_/api_endpoints/data_models.py +0 -15
  217. lionagi/integrations/ollama_/api_endpoints/embedding/__init__.py +0 -0
  218. lionagi/integrations/ollama_/api_endpoints/embedding/request_body.py +0 -33
  219. lionagi/integrations/ollama_/api_endpoints/embedding/response_body.py +0 -29
  220. lionagi/integrations/ollama_/api_endpoints/match_data_model.py +0 -62
  221. lionagi/integrations/ollama_/api_endpoints/match_response.py +0 -190
  222. lionagi/integrations/ollama_/api_endpoints/model/copy_model.py +0 -13
  223. lionagi/integrations/ollama_/api_endpoints/model/create_model.py +0 -28
  224. lionagi/integrations/ollama_/api_endpoints/model/delete_model.py +0 -11
  225. lionagi/integrations/ollama_/api_endpoints/model/list_model.py +0 -60
  226. lionagi/integrations/ollama_/api_endpoints/model/pull_model.py +0 -34
  227. lionagi/integrations/ollama_/api_endpoints/model/push_model.py +0 -35
  228. lionagi/integrations/ollama_/api_endpoints/model/show_model.py +0 -36
  229. lionagi/integrations/ollama_/api_endpoints/option_models.py +0 -68
  230. lionagi/integrations/openai_/OpenAIModel.py +0 -419
  231. lionagi/integrations/openai_/OpenAIService.py +0 -435
  232. lionagi/integrations/openai_/__init__.py +0 -0
  233. lionagi/integrations/openai_/api_endpoints/__init__.py +0 -3
  234. lionagi/integrations/openai_/api_endpoints/api_request.py +0 -277
  235. lionagi/integrations/openai_/api_endpoints/audio/__init__.py +0 -9
  236. lionagi/integrations/openai_/api_endpoints/audio/speech_models.py +0 -34
  237. lionagi/integrations/openai_/api_endpoints/audio/transcription_models.py +0 -136
  238. lionagi/integrations/openai_/api_endpoints/audio/translation_models.py +0 -41
  239. lionagi/integrations/openai_/api_endpoints/audio/types.py +0 -41
  240. lionagi/integrations/openai_/api_endpoints/batch/__init__.py +0 -17
  241. lionagi/integrations/openai_/api_endpoints/batch/batch_models.py +0 -146
  242. lionagi/integrations/openai_/api_endpoints/batch/cancel_batch.py +0 -7
  243. lionagi/integrations/openai_/api_endpoints/batch/create_batch.py +0 -26
  244. lionagi/integrations/openai_/api_endpoints/batch/list_batch.py +0 -37
  245. lionagi/integrations/openai_/api_endpoints/batch/request_object_models.py +0 -65
  246. lionagi/integrations/openai_/api_endpoints/batch/retrieve_batch.py +0 -7
  247. lionagi/integrations/openai_/api_endpoints/batch/types.py +0 -4
  248. lionagi/integrations/openai_/api_endpoints/chat_completions/__init__.py +0 -1
  249. lionagi/integrations/openai_/api_endpoints/chat_completions/request/__init__.py +0 -39
  250. lionagi/integrations/openai_/api_endpoints/chat_completions/request/message_models.py +0 -121
  251. lionagi/integrations/openai_/api_endpoints/chat_completions/request/request_body.py +0 -221
  252. lionagi/integrations/openai_/api_endpoints/chat_completions/request/response_format.py +0 -71
  253. lionagi/integrations/openai_/api_endpoints/chat_completions/request/stream_options.py +0 -14
  254. lionagi/integrations/openai_/api_endpoints/chat_completions/request/tool_choice_models.py +0 -17
  255. lionagi/integrations/openai_/api_endpoints/chat_completions/request/tool_models.py +0 -54
  256. lionagi/integrations/openai_/api_endpoints/chat_completions/request/types.py +0 -18
  257. lionagi/integrations/openai_/api_endpoints/chat_completions/response/__init__.py +0 -0
  258. lionagi/integrations/openai_/api_endpoints/chat_completions/response/choice_models.py +0 -62
  259. lionagi/integrations/openai_/api_endpoints/chat_completions/response/function_models.py +0 -16
  260. lionagi/integrations/openai_/api_endpoints/chat_completions/response/log_prob_models.py +0 -47
  261. lionagi/integrations/openai_/api_endpoints/chat_completions/response/message_models.py +0 -25
  262. lionagi/integrations/openai_/api_endpoints/chat_completions/response/response_body.py +0 -99
  263. lionagi/integrations/openai_/api_endpoints/chat_completions/response/types.py +0 -8
  264. lionagi/integrations/openai_/api_endpoints/chat_completions/response/usage_models.py +0 -24
  265. lionagi/integrations/openai_/api_endpoints/chat_completions/util.py +0 -46
  266. lionagi/integrations/openai_/api_endpoints/data_models.py +0 -23
  267. lionagi/integrations/openai_/api_endpoints/embeddings/__init__.py +0 -3
  268. lionagi/integrations/openai_/api_endpoints/embeddings/request_body.py +0 -79
  269. lionagi/integrations/openai_/api_endpoints/embeddings/response_body.py +0 -67
  270. lionagi/integrations/openai_/api_endpoints/files/__init__.py +0 -11
  271. lionagi/integrations/openai_/api_endpoints/files/delete_file.py +0 -20
  272. lionagi/integrations/openai_/api_endpoints/files/file_models.py +0 -56
  273. lionagi/integrations/openai_/api_endpoints/files/list_files.py +0 -27
  274. lionagi/integrations/openai_/api_endpoints/files/retrieve_file.py +0 -9
  275. lionagi/integrations/openai_/api_endpoints/files/upload_file.py +0 -38
  276. lionagi/integrations/openai_/api_endpoints/fine_tuning/__init__.py +0 -37
  277. lionagi/integrations/openai_/api_endpoints/fine_tuning/cancel_jobs.py +0 -9
  278. lionagi/integrations/openai_/api_endpoints/fine_tuning/create_jobs.py +0 -133
  279. lionagi/integrations/openai_/api_endpoints/fine_tuning/fine_tuning_job_checkpoint_models.py +0 -58
  280. lionagi/integrations/openai_/api_endpoints/fine_tuning/fine_tuning_job_event_models.py +0 -31
  281. lionagi/integrations/openai_/api_endpoints/fine_tuning/fine_tuning_job_models.py +0 -140
  282. lionagi/integrations/openai_/api_endpoints/fine_tuning/list_fine_tuning_checkpoints.py +0 -51
  283. lionagi/integrations/openai_/api_endpoints/fine_tuning/list_fine_tuning_events.py +0 -42
  284. lionagi/integrations/openai_/api_endpoints/fine_tuning/list_fine_tuning_jobs.py +0 -31
  285. lionagi/integrations/openai_/api_endpoints/fine_tuning/retrieve_jobs.py +0 -9
  286. lionagi/integrations/openai_/api_endpoints/fine_tuning/training_format.py +0 -30
  287. lionagi/integrations/openai_/api_endpoints/images/__init__.py +0 -9
  288. lionagi/integrations/openai_/api_endpoints/images/image_edit_models.py +0 -69
  289. lionagi/integrations/openai_/api_endpoints/images/image_models.py +0 -56
  290. lionagi/integrations/openai_/api_endpoints/images/image_variation_models.py +0 -56
  291. lionagi/integrations/openai_/api_endpoints/images/response_body.py +0 -30
  292. lionagi/integrations/openai_/api_endpoints/match_data_model.py +0 -197
  293. lionagi/integrations/openai_/api_endpoints/match_response.py +0 -336
  294. lionagi/integrations/openai_/api_endpoints/models/__init__.py +0 -7
  295. lionagi/integrations/openai_/api_endpoints/models/delete_fine_tuned_model.py +0 -17
  296. lionagi/integrations/openai_/api_endpoints/models/models_models.py +0 -31
  297. lionagi/integrations/openai_/api_endpoints/models/retrieve_model.py +0 -9
  298. lionagi/integrations/openai_/api_endpoints/moderations/__init__.py +0 -3
  299. lionagi/integrations/openai_/api_endpoints/moderations/request_body.py +0 -20
  300. lionagi/integrations/openai_/api_endpoints/moderations/response_body.py +0 -139
  301. lionagi/integrations/openai_/api_endpoints/uploads/__init__.py +0 -19
  302. lionagi/integrations/openai_/api_endpoints/uploads/add_upload_part.py +0 -11
  303. lionagi/integrations/openai_/api_endpoints/uploads/cancel_upload.py +0 -7
  304. lionagi/integrations/openai_/api_endpoints/uploads/complete_upload.py +0 -18
  305. lionagi/integrations/openai_/api_endpoints/uploads/create_upload.py +0 -17
  306. lionagi/integrations/openai_/api_endpoints/uploads/uploads_models.py +0 -52
  307. lionagi/integrations/openai_/image_token_calculator/__init__.py +0 -0
  308. lionagi/integrations/openai_/image_token_calculator/image_token_calculator.py +0 -98
  309. lionagi/integrations/openai_/image_token_calculator/openai_image_token_data.yaml +0 -15
  310. lionagi/integrations/openai_/openai_max_output_token_data.yaml +0 -12
  311. lionagi/integrations/openai_/openai_price_data.yaml +0 -26
  312. lionagi/integrations/openai_/version.py +0 -1
  313. lionagi/integrations/pandas_/__init__.py +0 -24
  314. lionagi/integrations/pandas_/extend_df.py +0 -61
  315. lionagi/integrations/pandas_/read.py +0 -103
  316. lionagi/integrations/pandas_/remove_rows.py +0 -61
  317. lionagi/integrations/pandas_/replace_keywords.py +0 -65
  318. lionagi/integrations/pandas_/save.py +0 -131
  319. lionagi/integrations/pandas_/search_keywords.py +0 -69
  320. lionagi/integrations/pandas_/to_df.py +0 -196
  321. lionagi/integrations/pandas_/update_cells.py +0 -54
  322. lionagi/integrations/perplexity_/PerplexityModel.py +0 -274
  323. lionagi/integrations/perplexity_/PerplexityService.py +0 -118
  324. lionagi/integrations/perplexity_/api_endpoints/__init__.py +0 -0
  325. lionagi/integrations/perplexity_/api_endpoints/api_request.py +0 -171
  326. lionagi/integrations/perplexity_/api_endpoints/chat_completions/__init__.py +0 -0
  327. lionagi/integrations/perplexity_/api_endpoints/chat_completions/request/__init__.py +0 -0
  328. lionagi/integrations/perplexity_/api_endpoints/chat_completions/request/request_body.py +0 -121
  329. lionagi/integrations/perplexity_/api_endpoints/chat_completions/response/__init__.py +0 -0
  330. lionagi/integrations/perplexity_/api_endpoints/chat_completions/response/response_body.py +0 -146
  331. lionagi/integrations/perplexity_/api_endpoints/data_models.py +0 -63
  332. lionagi/integrations/perplexity_/api_endpoints/match_response.py +0 -26
  333. lionagi/integrations/perplexity_/perplexity_max_output_token_data.yaml +0 -3
  334. lionagi/integrations/perplexity_/perplexity_price_data.yaml +0 -10
  335. lionagi/integrations/perplexity_/version.py +0 -1
  336. lionagi/integrations/pydantic_/__init__.py +0 -8
  337. lionagi/integrations/pydantic_/break_down_annotation.py +0 -81
  338. lionagi/integrations/pydantic_/new_model.py +0 -208
  339. lionagi/libs/constants.py +0 -98
  340. lionagi/libs/file/path.py +0 -301
  341. lionagi/libs/file/types.py +0 -22
  342. lionagi/libs/func/__init__.py +0 -0
  343. lionagi/libs/func/async_calls/__init__.py +0 -24
  344. lionagi/libs/func/async_calls/alcall.py +0 -210
  345. lionagi/libs/func/async_calls/bcall.py +0 -130
  346. lionagi/libs/func/async_calls/mcall.py +0 -134
  347. lionagi/libs/func/async_calls/pcall.py +0 -149
  348. lionagi/libs/func/async_calls/rcall.py +0 -217
  349. lionagi/libs/func/async_calls/tcall.py +0 -114
  350. lionagi/libs/func/async_calls/ucall.py +0 -85
  351. lionagi/libs/func/decorators.py +0 -277
  352. lionagi/libs/func/lcall.py +0 -57
  353. lionagi/libs/func/params.py +0 -64
  354. lionagi/libs/func/throttle.py +0 -119
  355. lionagi/libs/func/types.py +0 -39
  356. lionagi/libs/func/utils.py +0 -96
  357. lionagi/libs/package/types.py +0 -26
  358. lionagi/libs/parse/__init__.py +0 -1
  359. lionagi/libs/parse/flatten/__init__.py +0 -9
  360. lionagi/libs/parse/flatten/params.py +0 -52
  361. lionagi/libs/parse/json/__init__.py +0 -27
  362. lionagi/libs/parse/json/extract.py +0 -102
  363. lionagi/libs/parse/json/parse.py +0 -179
  364. lionagi/libs/parse/json/to_json.py +0 -71
  365. lionagi/libs/parse/nested/__init__.py +0 -33
  366. lionagi/libs/parse/nested/to_flat_list.py +0 -64
  367. lionagi/libs/parse/params.py +0 -0
  368. lionagi/libs/parse/string_parse/__init__.py +0 -11
  369. lionagi/libs/parse/type_convert/__init__.py +0 -19
  370. lionagi/libs/parse/type_convert/params.py +0 -145
  371. lionagi/libs/parse/type_convert/to_dict.py +0 -333
  372. lionagi/libs/parse/type_convert/to_list.py +0 -186
  373. lionagi/libs/parse/type_convert/to_num.py +0 -358
  374. lionagi/libs/parse/type_convert/to_str.py +0 -195
  375. lionagi/libs/parse/types.py +0 -9
  376. lionagi/libs/parse/validate/__init__.py +0 -14
  377. lionagi/libs/parse/validate/params.py +0 -62
  378. lionagi/libs/parse/xml/__init__.py +0 -10
  379. lionagi/libs/parse/xml/convert.py +0 -56
  380. lionagi/libs/parse/xml/parser.py +0 -93
  381. lionagi/libs/string_similarity/__init__.py +0 -32
  382. lionagi/libs/string_similarity/matcher.py +0 -102
  383. lionagi/libs/string_similarity/utils.py +0 -15
  384. lionagi/libs/utils.py +0 -266
  385. lionagi/protocols/adapters/pandas_adapter.py +0 -96
  386. lionagi/protocols/configs/__init__.py +0 -0
  387. lionagi/protocols/configs/branch_config.py +0 -86
  388. lionagi/protocols/configs/id_config.py +0 -15
  389. lionagi/protocols/configs/imodel_config.py +0 -73
  390. lionagi/protocols/configs/log_config.py +0 -93
  391. lionagi/protocols/configs/retry_config.py +0 -29
  392. lionagi/protocols/configs/types.py +0 -15
  393. lionagi/protocols/operatives/instruct.py +0 -194
  394. lionagi/protocols/operatives/types.py +0 -19
  395. lionagi/protocols/registries/_component_registry.py +0 -23
  396. lionagi/protocols/registries/_pile_registry.py +0 -30
  397. lionagi/service/complete_request_info.py +0 -11
  398. lionagi/service/rate_limiter.py +0 -108
  399. lionagi/service/service.py +0 -41
  400. lionagi/service/service_match_util.py +0 -131
  401. lionagi/service/service_util.py +0 -72
  402. lionagi/service/token_calculator.py +0 -51
  403. lionagi/strategies/__init__.py +0 -0
  404. lionagi/strategies/types.py +0 -21
  405. lionagi-0.5.5.dist-info/RECORD +0 -374
  406. /lionagi/{core → libs/nested}/__init__.py +0 -0
  407. /lionagi/{core/action → libs/schema}/__init__.py +0 -0
  408. /lionagi/{core/communication → libs/validate}/__init__.py +0 -0
  409. /lionagi/{core/forms → operations/strategies}/__init__.py +0 -0
  410. /lionagi/{core/generic → operatives}/__init__.py +0 -0
  411. /lionagi/{core/session → operatives/action}/__init__.py +0 -0
  412. /lionagi/{integrations/anthropic_ → operatives/forms}/__init__.py +0 -0
  413. /lionagi/{core → operatives}/forms/utils.py +0 -0
  414. /lionagi/{integrations/anthropic_/api_endpoints → operatives/instruct}/__init__.py +0 -0
  415. /lionagi/{integrations/anthropic_/api_endpoints/messages → operatives/models}/__init__.py +0 -0
  416. /lionagi/{integrations → protocols/adapters/pandas_}/__init__.py +0 -0
  417. /lionagi/{integrations/anthropic_/api_endpoints/messages/request → protocols/generic}/__init__.py +0 -0
  418. /lionagi/{integrations/groq_ → protocols/graph}/__init__.py +0 -0
  419. /lionagi/{integrations/litellm_ → protocols/mail}/__init__.py +0 -0
  420. /lionagi/{integrations/ollama_ → protocols/messages}/__init__.py +0 -0
  421. /lionagi/{core/communication → protocols/messages}/templates/README.md +0 -0
  422. /lionagi/{core/communication → protocols/messages}/templates/action_request.jinja2 +0 -0
  423. /lionagi/{core/communication → protocols/messages}/templates/action_response.jinja2 +0 -0
  424. /lionagi/{core/communication → protocols/messages}/templates/system_message.jinja2 +0 -0
  425. /lionagi/{core/communication → protocols/messages}/templates/tool_schemas.jinja2 +0 -0
  426. /lionagi/{integrations/ollama_/api_endpoints → service/endpoints}/__init__.py +0 -0
  427. /lionagi/{integrations/ollama_/api_endpoints/chat_completion → service/providers}/__init__.py +0 -0
  428. /lionagi/{integrations/ollama_/api_endpoints/model → service/providers/anthropic_}/__init__.py +0 -0
  429. /lionagi/{integrations/perplexity_ → service/providers/groq_}/__init__.py +0 -0
  430. /lionagi/{protocols/operatives → service/providers/openai_}/__init__.py +0 -0
  431. /lionagi/{protocols/registries → service/providers/openrouter_}/__init__.py +0 -0
  432. {lionagi-0.5.5.dist-info → lionagi-0.6.1.dist-info}/WHEEL +0 -0
  433. {lionagi-0.5.5.dist-info → lionagi-0.6.1.dist-info}/licenses/LICENSE +0 -0
@@ -1,60 +0,0 @@
1
- # Copyright (c) 2023 - 2024, HaiyangLi <quantocean.li at gmail dot com>
2
- #
3
- # SPDX-License-Identifier: Apache-2.0
4
-
5
-
6
- from pydantic import BaseModel, Field
7
-
8
- from ..data_models import OllamaEndpointResponseBody
9
-
10
-
11
- class Detail(BaseModel):
12
- parent_model: str = Field(None)
13
-
14
- format: str = Field(None)
15
-
16
- family: str = Field(None)
17
-
18
- families: list[str] | None = Field(None)
19
-
20
- parameter_size: str = Field(None)
21
-
22
- quantization_level: str = Field(None)
23
-
24
-
25
- class LocalModel(BaseModel):
26
- name: str = Field(None)
27
-
28
- model: str = Field(None)
29
-
30
- modified_at: str = Field(None)
31
-
32
- size: int = Field(None)
33
-
34
- digest: str = Field(None)
35
-
36
- details: Detail = Field(None)
37
-
38
-
39
- class OllamaListLocalModelsResponseBody(OllamaEndpointResponseBody):
40
- models: list[LocalModel] = Field(None)
41
-
42
-
43
- class RunningModel(BaseModel):
44
- name: str = Field(None)
45
-
46
- model: str = Field(None)
47
-
48
- size: int = Field(None)
49
-
50
- digest: str = Field(None)
51
-
52
- details: Detail = Field(None)
53
-
54
- expires_at: str = Field(None)
55
-
56
- size_vram: int = Field(None)
57
-
58
-
59
- class OllamaListRunningModelsResponseBody(OllamaEndpointResponseBody):
60
- models: list[RunningModel] = Field(None)
@@ -1,34 +0,0 @@
1
- # Copyright (c) 2023 - 2024, HaiyangLi <quantocean.li at gmail dot com>
2
- #
3
- # SPDX-License-Identifier: Apache-2.0
4
-
5
-
6
- from pydantic import Field
7
-
8
- from ..data_models import OllamaEndpointRequestBody, OllamaEndpointResponseBody
9
-
10
-
11
- class OllamaPullModelRequestBody(OllamaEndpointRequestBody):
12
- name: str = Field(description="Name of the model to pull")
13
-
14
- insecure: bool = Field(
15
- False,
16
- description="Allow insecure connections to the library. "
17
- "Only use this if you are pulling from your own library during development.",
18
- )
19
-
20
- stream: bool = Field(
21
- True,
22
- description="If 'false' the response will be returned as a single response object, "
23
- "rather than a stream of objects",
24
- )
25
-
26
-
27
- class OllamaPullModelResponseBody(OllamaEndpointResponseBody):
28
- status: str = Field(None)
29
-
30
- digest: str = Field(None)
31
-
32
- total: int = Field(None)
33
-
34
- completed: int = Field(None)
@@ -1,35 +0,0 @@
1
- # Copyright (c) 2023 - 2024, HaiyangLi <quantocean.li at gmail dot com>
2
- #
3
- # SPDX-License-Identifier: Apache-2.0
4
-
5
- from typing import Any
6
-
7
- from pydantic import Field
8
-
9
- from ..data_models import OllamaEndpointRequestBody, OllamaEndpointResponseBody
10
-
11
-
12
- class OllamaPushModelRequestBody(OllamaEndpointRequestBody):
13
- name: str = Field(
14
- description="Name of the model to push in the form of '<namespace>/<model>:<tag>'"
15
- )
16
-
17
- insecure: Any = Field(
18
- None,
19
- description="Allow insecure connections to the library. "
20
- "Only use this if you are pushing to your library during development.",
21
- )
22
-
23
- stream: bool = Field(
24
- True,
25
- description="If 'false' the response will be returned as a single response object, "
26
- "rather than a stream of objects",
27
- )
28
-
29
-
30
- class OllamaPushModelResponseBody(OllamaEndpointResponseBody):
31
- status: str = Field(None)
32
-
33
- digest: str = Field(None)
34
-
35
- total: int = Field(None)
@@ -1,36 +0,0 @@
1
- # Copyright (c) 2023 - 2024, HaiyangLi <quantocean.li at gmail dot com>
2
- #
3
- # SPDX-License-Identifier: Apache-2.0
4
-
5
-
6
- from pydantic import Field
7
-
8
- from ..data_models import OllamaEndpointRequestBody, OllamaEndpointResponseBody
9
- from .list_model import Detail
10
-
11
-
12
- class OllamaShowModelRequestBody(OllamaEndpointRequestBody):
13
- name: str = Field(description="Name of the model to show")
14
-
15
- verbose: bool | None = Field(
16
- None,
17
- description="If set to true, returns full data for verbose response fields",
18
- )
19
-
20
-
21
- class OllamaShowModelResponseBody(OllamaEndpointResponseBody):
22
- license: str = Field(None)
23
-
24
- modelfile: str = Field(None)
25
-
26
- parameters: str = Field(None)
27
-
28
- template: str = Field(None)
29
-
30
- details: Detail = Field(None)
31
-
32
- model_info: dict = Field(None)
33
-
34
- projector_info: dict = Field(None)
35
-
36
- modified_at: str = Field(None)
@@ -1,68 +0,0 @@
1
- # Copyright (c) 2023 - 2024, HaiyangLi <quantocean.li at gmail dot com>
2
- #
3
- # SPDX-License-Identifier: Apache-2.0
4
-
5
- from pydantic import BaseModel, Field
6
-
7
-
8
- class Option(BaseModel):
9
- mirostat: int = Field(
10
- 0, description="Enable Mirostat sampling for controlling perplexity."
11
- )
12
-
13
- mirostat_eta: float = Field(
14
- 0.1,
15
- description="Influences how quickly the algorithm responds to feedback from the generated text. ",
16
- )
17
-
18
- mirostat_tau: float = Field(
19
- 5.0,
20
- description="Controls the balance between coherence and diversity of the output.",
21
- )
22
-
23
- num_ctx: int = Field(
24
- 2048,
25
- description="Sets the size of the context window used to generate the next token.",
26
- )
27
-
28
- repeat_last_n: int = Field(
29
- 64,
30
- description="Sets how far back for the model to look back to prevent repetition.",
31
- )
32
-
33
- repeat_penalty: float = Field(
34
- 1.1, description="Sets how strongly to penalize repetitions."
35
- )
36
-
37
- temperature: float = Field(
38
- 0.8, description="The temperature of the model."
39
- )
40
-
41
- seed: int = Field(
42
- 0, description="Sets the random number seed to use for generation."
43
- )
44
-
45
- stop: str = Field(None, description="Sets the stop sequences to use.")
46
-
47
- tfs_z: float = Field(
48
- 1,
49
- description="Tail free sampling is used to reduce the impact of less probable tokens from the output.",
50
- )
51
-
52
- num_predict: int = Field(
53
- 128,
54
- description="Maximum number of tokens to predict when generating text.",
55
- )
56
-
57
- top_k: int = Field(
58
- 40, description="Reduces the probability of generating nonsense."
59
- )
60
-
61
- top_p: float = Field(0.9, description="Works together with top-k.")
62
-
63
- min_p: float = Field(
64
- 0.0,
65
- description="Alternative to the top_p, and aims to ensure a balance of quality and variety. "
66
- "The parameter p represents the minimum probability for a token to be considered, "
67
- "relative to the probability of the most likely token. ",
68
- )
@@ -1,419 +0,0 @@
1
- import warnings
2
- from pathlib import Path
3
-
4
- from dotenv import load_dotenv
5
- from pydantic import (
6
- BaseModel,
7
- ConfigDict,
8
- Field,
9
- field_serializer,
10
- model_validator,
11
- )
12
-
13
- from lionagi.service.rate_limiter import RateLimiter, RateLimitError
14
- from lionagi.service.service_util import invoke_retry
15
- from lionagi.service.token_calculator import TiktokenCalculator
16
-
17
- from .api_endpoints.api_request import OpenAIRequest
18
- from .api_endpoints.chat_completions.request.request_body import (
19
- OpenAIChatCompletionRequestBody,
20
- StreamOptions,
21
- )
22
- from .api_endpoints.chat_completions.util import get_images, get_text_messages
23
- from .api_endpoints.data_models import OpenAIEndpointRequestBody
24
- from .api_endpoints.embeddings.request_body import OpenAIEmbeddingRequestBody
25
- from .api_endpoints.match_response import match_response
26
- from .image_token_calculator.image_token_calculator import (
27
- OpenAIImageTokenCalculator,
28
- )
29
-
30
- load_dotenv()
31
- path = Path(__file__).parent
32
-
33
- price_config_file_name = path / "openai_max_output_token_data.yaml"
34
- max_output_token_file_name = path / "openai_price_data.yaml"
35
-
36
-
37
- class _ModuleImportClass:
38
- from lionagi.libs.package.imports import check_import
39
-
40
- yaml = check_import("yaml", pip_name="pyyaml")
41
-
42
-
43
- class OpenAIModel(BaseModel):
44
- model: str = Field(description="ID of the model to use.")
45
-
46
- request_model: OpenAIRequest = Field(description="Making requests")
47
-
48
- rate_limiter: RateLimiter = Field(
49
- description="Rate Limiter to track usage"
50
- )
51
-
52
- text_token_calculator: TiktokenCalculator = Field(
53
- default=None, description="Token Calculator"
54
- )
55
-
56
- image_token_calculator: OpenAIImageTokenCalculator = Field(
57
- default=None, description="Image Token Calculator"
58
- )
59
-
60
- estimated_output_len: int = Field(
61
- default=0, description="Expected output len before making request"
62
- )
63
-
64
- model_config = ConfigDict(extra="forbid")
65
-
66
- @model_validator(mode="before")
67
- @classmethod
68
- def parse_input(cls, data: dict):
69
- if not isinstance(data, dict):
70
- raise ValueError("Invalid init param")
71
-
72
- # parse request model
73
- request_model_params = {
74
- "api_key": data.pop("api_key", None),
75
- "endpoint": data.pop("endpoint", None),
76
- "method": data.pop("method", None),
77
- }
78
- if org := data.pop("openai_organization", None):
79
- request_model_params["openai_organization"] = org
80
- if proj := data.pop("openai_project", None):
81
- request_model_params["openai_project"] = proj
82
- if content_type := data.pop("content_type", None):
83
- request_model_params["content_type"] = content_type
84
-
85
- data["request_model"] = OpenAIRequest(**request_model_params)
86
-
87
- # parse rate limiter
88
- if "rate_limiter" not in data:
89
- rate_limiter_params = {}
90
- if limit_tokens := data.pop("limit_tokens", None):
91
- rate_limiter_params["limit_tokens"] = limit_tokens
92
- if limit_requests := data.pop("limit_requests", None):
93
- rate_limiter_params["limit_requests"] = limit_requests
94
-
95
- data["rate_limiter"] = RateLimiter(**rate_limiter_params)
96
-
97
- # parse token calculator
98
- try:
99
- text_calc = TiktokenCalculator(encoding_name=data.get("model"))
100
- data["text_token_calculator"] = text_calc
101
- except Exception:
102
- pass
103
-
104
- # set image calcultor
105
- try:
106
- data["image_token_calculator"] = OpenAIImageTokenCalculator(
107
- model=data.get("model")
108
- )
109
- except Exception:
110
- pass
111
-
112
- return data
113
-
114
- @field_serializer("request_model")
115
- def serialize_request_model(self, value: OpenAIRequest):
116
- return value.model_dump(exclude_unset=True)
117
-
118
- @invoke_retry(max_retries=3, base_delay=1, max_delay=60)
119
- async def invoke(
120
- self,
121
- request_body: OpenAIEndpointRequestBody,
122
- estimated_output_len: int = 0,
123
- output_file=None,
124
- parse_response=True,
125
- ):
126
- if request_model := getattr(request_body, "model"):
127
- if request_model != self.model:
128
- raise ValueError(
129
- f"Request model does not match. Model is {self.model}, but request is made for {request_model}."
130
- )
131
-
132
- if getattr(request_body, "stream", None) and not isinstance(
133
- request_body, OpenAIChatCompletionRequestBody
134
- ):
135
- raise ValueError("Stream is only supported for chat completions")
136
-
137
- # check remaining rate limit
138
- input_token_len = await self.get_input_token_len(request_body)
139
-
140
- # chat completion request body attribute
141
- if getattr(request_body, "max_completion_tokens", None):
142
- estimated_output_len = request_body.max_completion_tokens
143
-
144
- invoke_viability_result = self.verify_invoke_viability(
145
- input_tokens_len=input_token_len,
146
- estimated_output_len=estimated_output_len,
147
- )
148
- if not invoke_viability_result:
149
- raise RateLimitError("Rate limit reached for requests")
150
-
151
- try:
152
- if getattr(request_body, "stream", None):
153
- return await self.stream(
154
- request_body,
155
- output_file=output_file,
156
- parse_response=parse_response,
157
- )
158
-
159
- if getattr(request_body, "file", None):
160
- response_body, response_headers = (
161
- await self.request_model.invoke(
162
- form_data=request_body,
163
- output_file=output_file,
164
- with_response_header=True,
165
- parse_response=False,
166
- )
167
- )
168
- else:
169
- response_body, response_headers = (
170
- await self.request_model.invoke(
171
- json_data=request_body,
172
- output_file=output_file,
173
- with_response_header=True,
174
- parse_response=False,
175
- )
176
- )
177
-
178
- self.check_limits_info(response_headers)
179
-
180
- if response_body:
181
- # mainly for chat/completions and embedding endpoints
182
- # update rate limit
183
- if response_body.get("usage"):
184
- total_token_usage = response_body["usage"]["total_tokens"]
185
- self.rate_limiter.update_rate_limit(
186
- response_headers.get("Date"), total_token_usage
187
- )
188
- else: # No Token limits condition (request limit only)
189
- self.rate_limiter.update_rate_limit(
190
- response_headers.get("Date")
191
- )
192
- else:
193
- # for audio/speech endpoint (without response body object)
194
- self.rate_limiter.update_rate_limit(
195
- response_headers.get("Date")
196
- )
197
-
198
- self.check_remaining_info(response_headers)
199
-
200
- if parse_response:
201
- return match_response(self.request_model, response_body)
202
- else:
203
- return response_body
204
-
205
- except Exception as e: # TODO: example
206
- raise e
207
-
208
- async def stream(
209
- self,
210
- request_body: OpenAIEndpointRequestBody,
211
- output_file=None,
212
- parse_response=True,
213
- ):
214
- if not isinstance(request_body, OpenAIChatCompletionRequestBody):
215
- raise ValueError("Stream is only supported for chat completions")
216
-
217
- stream_options_included = bool(getattr(request_body, "stream_options"))
218
- if not stream_options_included:
219
- setattr(
220
- request_body,
221
- "stream_options",
222
- StreamOptions(include_usage=True),
223
- )
224
-
225
- response_list = []
226
- async for chunk in self.request_model.stream(
227
- json_data=request_body,
228
- output_file=output_file,
229
- with_response_header=True,
230
- ):
231
- response_list.append(chunk)
232
-
233
- response_headers = response_list.pop()
234
- if stream_options_included:
235
- usage_chunk = response_list[-1]
236
- else:
237
- usage_chunk = response_list.pop()
238
-
239
- self.check_limits_info(response_headers)
240
- total_token_usage = usage_chunk["usage"]["total_tokens"]
241
- self.rate_limiter.update_rate_limit(
242
- response_headers.get("Date"), total_token_usage
243
- )
244
-
245
- if parse_response:
246
- return match_response(self.request_model, response_list)
247
- else:
248
- return response_list
249
-
250
- async def get_input_token_len(
251
- self, request_body: OpenAIEndpointRequestBody
252
- ):
253
- if request_model := getattr(request_body, "model"):
254
- if request_model != self.model:
255
- raise ValueError(
256
- f"Request model does not match. Model is {self.model}, but request is made for {request_model}."
257
- )
258
- # TODO: match with new Request Body format
259
- if isinstance(request_body, OpenAIChatCompletionRequestBody):
260
- messages_text = get_text_messages(request_body)
261
- # text_token_calculator should always be available for chat completions
262
- text_tokens = self.text_token_calculator.calculate(messages_text)
263
-
264
- image_urls = get_images(request_body)
265
- image_tokens = 0
266
- for url, detail in image_urls:
267
- if self.image_token_calculator:
268
- image_tokens += (
269
- await self.image_token_calculator.calculate(
270
- url, detail
271
- )
272
- )
273
- else:
274
- raise ValueError(
275
- "The model does not have vision capabilities."
276
- )
277
-
278
- return text_tokens + image_tokens
279
- elif isinstance(request_body, OpenAIEmbeddingRequestBody):
280
- text = request_body.input
281
- if isinstance(text, str): # str
282
- return self.text_token_calculator.calculate(text)
283
- elif isinstance(text[0], int): # List[int]
284
- return len(text)
285
- elif isinstance(text[0], str): # List[str]
286
- total_totkens = 0
287
- for t in text:
288
- total_totkens += self.text_token_calculator.calculate(t)
289
- return total_totkens
290
- else: # List[List[int]]
291
- total_tokens = 0
292
- for t in text:
293
- total_tokens += len(t)
294
- return total_tokens
295
-
296
- # TODO: add other rules for other endpoints if input tokens should be calculated
297
- return 0 # no tokens rate limit
298
-
299
- def verify_invoke_viability(
300
- self, input_tokens_len: int = 0, estimated_output_len: int = 0
301
- ):
302
- self.rate_limiter.release_tokens()
303
-
304
- estimated_output_len = (
305
- estimated_output_len
306
- if estimated_output_len != 0
307
- else self.estimated_output_len
308
- )
309
- if estimated_output_len == 0:
310
- with open(max_output_token_file_name) as file:
311
- output_token_config = _ModuleImportClass.yaml.safe_load(file)
312
- estimated_output_len = output_token_config.get(self.model, 0)
313
- self.estimated_output_len = (
314
- estimated_output_len # update to default max output len
315
- )
316
-
317
- if self.rate_limiter.check_availability(
318
- input_tokens_len, estimated_output_len
319
- ):
320
- return True
321
- else:
322
- return False
323
-
324
- def check_limits_info(self, response_headers):
325
- if response_headers.get("x-ratelimit-limit-requests"):
326
- if self.rate_limiter.limit_requests is None:
327
- self.rate_limiter.limit_requests = int(
328
- response_headers.get("x-ratelimit-limit-requests")
329
- )
330
- else:
331
- if self.rate_limiter.limit_requests > int(
332
- response_headers.get("x-ratelimit-limit-requests")
333
- ):
334
- warnings.warn(
335
- "The configured request limit exceeds the account's allowed request limit."
336
- "This may lead to unexpected throttling or rejection of requests.",
337
- UserWarning,
338
- )
339
- if response_headers.get("x-ratelimit-limit-tokens"):
340
- if self.rate_limiter.limit_tokens is None:
341
- self.rate_limiter.limit_tokens = int(
342
- response_headers.get("x-ratelimit-limit-tokens")
343
- )
344
- else:
345
- if self.rate_limiter.limit_tokens > int(
346
- response_headers.get("x-ratelimit-limit-tokens")
347
- ):
348
- warnings.warn(
349
- "The configured token limit exceeds the account's allowed token limit."
350
- "This may lead to unexpected throttling or rejection of requests.",
351
- UserWarning,
352
- )
353
-
354
- def check_remaining_info(self, response_headers):
355
- if response_headers.get("x-ratelimit-remaining-tokens"):
356
- if (
357
- int(response_headers.get("x-ratelimit-remaining-tokens"))
358
- < self.rate_limiter.remaining_tokens
359
- ):
360
- token_diff = self.rate_limiter.remaining_tokens - int(
361
- response_headers.get("x-ratelimit-remaining-tokens")
362
- )
363
- self.rate_limiter.update_rate_limit(
364
- response_headers.get("Date"), token_diff
365
- )
366
-
367
- if response_headers.get("x-ratelimit-remaining-requests"):
368
- if (
369
- int(response_headers.get("x-ratelimit-remaining-requests"))
370
- < self.rate_limiter.remaining_requests
371
- ):
372
- request_diff = self.rate_limiter.remaining_requests - int(
373
- response_headers.get("x-ratelimit-remaining-requests")
374
- )
375
- for i in range(request_diff):
376
- self.rate_limiter.update_rate_limit(
377
- response_headers.get("Date")
378
- )
379
-
380
- def estimate_text_price(
381
- self,
382
- input_text: str,
383
- with_batch: bool = False,
384
- estimated_num_of_output_tokens: int = 0,
385
- ):
386
- if self.text_token_calculator is None:
387
- raise ValueError(
388
- "Estimating price currently only supports chat/completions endpoint"
389
- )
390
-
391
- # only if text_token_calculator is available
392
- num_of_input_tokens = self.text_token_calculator.calculate(input_text)
393
-
394
- # read openai price info from config file
395
- with open(price_config_file_name) as file:
396
- price_config = _ModuleImportClass.yaml.safe_load(file)
397
-
398
- if self.request_model.endpoint == "chat/completions":
399
- model_price_info_dict = price_config["model"][self.model]
400
- if with_batch:
401
- estimated_price = (
402
- model_price_info_dict["input_tokens_with_batch"]
403
- * num_of_input_tokens
404
- + model_price_info_dict["output_tokens_with_batch"]
405
- * estimated_num_of_output_tokens
406
- )
407
- else:
408
- estimated_price = (
409
- model_price_info_dict["input_tokens"] * num_of_input_tokens
410
- + model_price_info_dict["output_tokens"]
411
- * estimated_num_of_output_tokens
412
- )
413
-
414
- else:
415
- # TODO: add price config for other endpoints
416
- raise ValueError(
417
- "Estimating price currently only supports chat/completions endpoint"
418
- )
419
- return estimated_price