lionagi 0.5.5__py3-none-any.whl → 0.6.0__py3-none-any.whl
Sign up to get free protection for your applications and to get access to all the features.
- lionagi/__init__.py +16 -24
- lionagi/{core/_class_registry.py → _class_registry.py} +51 -10
- lionagi/_errors.py +35 -0
- lionagi/libs/__init__.py +3 -0
- lionagi/libs/compress/__init__.py +3 -0
- lionagi/libs/compress/models.py +6 -2
- lionagi/libs/compress/utils.py +4 -16
- lionagi/libs/file/__init__.py +3 -0
- lionagi/libs/file/chunk.py +4 -0
- lionagi/libs/file/file_ops.py +4 -0
- lionagi/libs/file/params.py +4 -41
- lionagi/libs/file/process.py +4 -0
- lionagi/libs/file/save.py +5 -1
- lionagi/libs/{parse/flatten → nested}/flatten.py +4 -0
- lionagi/libs/{parse/nested → nested}/nfilter.py +4 -0
- lionagi/libs/{parse/nested → nested}/nget.py +6 -1
- lionagi/libs/{parse/nested → nested}/ninsert.py +5 -1
- lionagi/libs/{parse/nested → nested}/nmerge.py +4 -0
- lionagi/libs/{parse/nested → nested}/npop.py +5 -2
- lionagi/libs/{parse/nested → nested}/nset.py +6 -1
- lionagi/libs/{parse/flatten → nested}/unflatten.py +4 -0
- lionagi/libs/{parse/nested → nested}/utils.py +5 -1
- lionagi/libs/package/__init__.py +3 -0
- lionagi/libs/package/imports.py +6 -2
- lionagi/libs/package/management.py +7 -3
- lionagi/libs/package/params.py +4 -0
- lionagi/libs/package/system.py +4 -0
- lionagi/libs/parse.py +30 -0
- lionagi/libs/{parse/json → schema}/as_readable.py +10 -4
- lionagi/libs/{parse/string_parse/code_block.py → schema/extract_code_block.py} +4 -0
- lionagi/libs/{parse/string_parse/docstring.py → schema/extract_docstring.py} +4 -0
- lionagi/libs/{parse/string_parse/function_.py → schema/function_to_schema.py} +21 -9
- lionagi/libs/{parse/json/schema.py → schema/json_schema.py} +5 -1
- lionagi/libs/validate/common_field_validators.py +170 -0
- lionagi/libs/{parse/validate/keys.py → validate/fuzzy_match_keys.py} +42 -8
- lionagi/libs/{parse/validate/mapping.py → validate/fuzzy_validate_mapping.py} +41 -6
- lionagi/libs/{string_similarity/algorithms.py → validate/string_similarity.py} +115 -1
- lionagi/libs/{parse/validate/boolean.py → validate/validate_boolean.py} +42 -3
- lionagi/operations/__init__.py +13 -3
- lionagi/operations/brainstorm/__init__.py +3 -3
- lionagi/operations/brainstorm/brainstorm.py +33 -19
- lionagi/operations/brainstorm/prompt.py +4 -0
- lionagi/operations/plan/__init__.py +4 -0
- lionagi/operations/plan/plan.py +16 -13
- lionagi/operations/plan/prompt.py +4 -0
- lionagi/operations/select/__init__.py +4 -0
- lionagi/operations/select/prompt.py +4 -0
- lionagi/operations/select/select.py +1 -1
- lionagi/operations/select/utils.py +4 -4
- lionagi/{strategies → operations/strategies}/base.py +6 -2
- lionagi/{strategies → operations/strategies}/concurrent.py +8 -5
- lionagi/{strategies → operations/strategies}/concurrent_chunk.py +6 -3
- lionagi/{strategies → operations/strategies}/concurrent_sequential_chunk.py +8 -4
- lionagi/{strategies → operations/strategies}/params.py +10 -6
- lionagi/{strategies → operations/strategies}/sequential.py +6 -2
- lionagi/{strategies → operations/strategies}/sequential_chunk.py +7 -3
- lionagi/{strategies → operations/strategies}/sequential_concurrent_chunk.py +9 -4
- lionagi/{strategies → operations/strategies}/utils.py +6 -3
- lionagi/{core/models/__init__.py → operations/types.py} +3 -1
- lionagi/operations/utils.py +6 -3
- lionagi/operatives/action/function_calling.py +136 -0
- lionagi/operatives/action/manager.py +239 -0
- lionagi/operatives/action/request_response_model.py +90 -0
- lionagi/operatives/action/tool.py +141 -0
- lionagi/{protocols/operatives/action.py → operatives/action/utils.py} +52 -90
- lionagi/{core → operatives}/forms/base.py +9 -4
- lionagi/{core → operatives}/forms/form.py +8 -13
- lionagi/{core → operatives}/forms/report.py +5 -3
- lionagi/operatives/instruct/base.py +79 -0
- lionagi/operatives/instruct/instruct.py +105 -0
- lionagi/operatives/instruct/instruct_collection.py +52 -0
- lionagi/operatives/instruct/node.py +13 -0
- lionagi/{protocols/operatives → operatives/instruct}/prompts.py +0 -34
- lionagi/{protocols/operatives → operatives/instruct}/reason.py +14 -7
- lionagi/{integrations/anthropic_/version.py → operatives/manager.py} +5 -1
- lionagi/operatives/models/field_model.py +194 -0
- lionagi/operatives/models/model_params.py +307 -0
- lionagi/{core → operatives}/models/note.py +20 -28
- lionagi/{core → operatives}/models/operable_model.py +153 -71
- lionagi/{core → operatives}/models/schema_model.py +4 -3
- lionagi/{protocols/operatives → operatives}/operative.py +10 -7
- lionagi/{protocols/operatives → operatives}/step.py +67 -26
- lionagi/operatives/types.py +69 -0
- lionagi/protocols/_adapter.py +224 -0
- lionagi/protocols/_concepts.py +94 -0
- lionagi/protocols/generic/element.py +460 -0
- lionagi/protocols/generic/event.py +177 -0
- lionagi/protocols/generic/log.py +237 -0
- lionagi/{core → protocols}/generic/pile.py +172 -131
- lionagi/protocols/generic/processor.py +316 -0
- lionagi/protocols/generic/progression.py +500 -0
- lionagi/protocols/graph/edge.py +166 -0
- lionagi/protocols/graph/graph.py +290 -0
- lionagi/protocols/graph/node.py +109 -0
- lionagi/protocols/mail/exchange.py +116 -0
- lionagi/protocols/mail/mail.py +25 -0
- lionagi/protocols/mail/mailbox.py +47 -0
- lionagi/protocols/mail/manager.py +168 -0
- lionagi/protocols/mail/package.py +55 -0
- lionagi/protocols/messages/action_request.py +165 -0
- lionagi/protocols/messages/action_response.py +132 -0
- lionagi/{core/communication → protocols/messages}/assistant_response.py +55 -79
- lionagi/protocols/messages/base.py +73 -0
- lionagi/protocols/messages/instruction.py +582 -0
- lionagi/protocols/messages/manager.py +429 -0
- lionagi/protocols/messages/message.py +216 -0
- lionagi/protocols/messages/system.py +115 -0
- lionagi/protocols/messages/templates/assistant_response.jinja2 +6 -0
- lionagi/{core/communication → protocols/messages}/templates/instruction_message.jinja2 +2 -2
- lionagi/protocols/types.py +96 -0
- lionagi/service/__init__.py +10 -12
- lionagi/service/endpoints/base.py +517 -0
- lionagi/service/endpoints/chat_completion.py +102 -0
- lionagi/service/endpoints/match_endpoint.py +60 -0
- lionagi/service/endpoints/rate_limited_processor.py +145 -0
- lionagi/service/endpoints/token_calculator.py +209 -0
- lionagi/service/imodel.py +263 -96
- lionagi/service/manager.py +45 -0
- lionagi/service/providers/anthropic_/messages.py +64 -0
- lionagi/service/providers/groq_/chat_completions.py +56 -0
- lionagi/service/providers/openai_/chat_completions.py +62 -0
- lionagi/service/providers/openrouter_/chat_completions.py +62 -0
- lionagi/service/providers/perplexity_/__init__.py +3 -0
- lionagi/service/providers/perplexity_/chat_completions.py +40 -0
- lionagi/session/__init__.py +3 -0
- lionagi/session/branch.py +1287 -0
- lionagi/session/session.py +296 -0
- lionagi/settings.py +62 -118
- lionagi/utils.py +2386 -0
- lionagi/version.py +1 -1
- {lionagi-0.5.5.dist-info → lionagi-0.6.0.dist-info}/METADATA +7 -6
- lionagi-0.6.0.dist-info/RECORD +160 -0
- lionagi/core/action/action_manager.py +0 -289
- lionagi/core/action/base.py +0 -109
- lionagi/core/action/function_calling.py +0 -153
- lionagi/core/action/tool.py +0 -202
- lionagi/core/action/types.py +0 -16
- lionagi/core/communication/action_request.py +0 -163
- lionagi/core/communication/action_response.py +0 -149
- lionagi/core/communication/base_mail.py +0 -49
- lionagi/core/communication/instruction.py +0 -376
- lionagi/core/communication/message.py +0 -286
- lionagi/core/communication/message_manager.py +0 -543
- lionagi/core/communication/system.py +0 -116
- lionagi/core/communication/templates/assistant_response.jinja2 +0 -2
- lionagi/core/communication/types.py +0 -27
- lionagi/core/communication/utils.py +0 -256
- lionagi/core/forms/types.py +0 -13
- lionagi/core/generic/component.py +0 -422
- lionagi/core/generic/edge.py +0 -163
- lionagi/core/generic/element.py +0 -199
- lionagi/core/generic/graph.py +0 -377
- lionagi/core/generic/log.py +0 -151
- lionagi/core/generic/log_manager.py +0 -320
- lionagi/core/generic/node.py +0 -11
- lionagi/core/generic/progression.py +0 -395
- lionagi/core/generic/types.py +0 -23
- lionagi/core/generic/utils.py +0 -53
- lionagi/core/models/base.py +0 -28
- lionagi/core/models/field_model.py +0 -145
- lionagi/core/models/model_params.py +0 -194
- lionagi/core/models/types.py +0 -19
- lionagi/core/session/branch.py +0 -130
- lionagi/core/session/branch_mixins.py +0 -581
- lionagi/core/session/session.py +0 -163
- lionagi/core/session/types.py +0 -8
- lionagi/core/typing/__init__.py +0 -9
- lionagi/core/typing/_concepts.py +0 -173
- lionagi/core/typing/_id.py +0 -104
- lionagi/core/typing/_pydantic.py +0 -33
- lionagi/core/typing/_typing.py +0 -54
- lionagi/integrations/__init__.py +0 -0
- lionagi/integrations/_services.py +0 -17
- lionagi/integrations/anthropic_/AnthropicModel.py +0 -268
- lionagi/integrations/anthropic_/AnthropicService.py +0 -127
- lionagi/integrations/anthropic_/anthropic_max_output_token_data.yaml +0 -12
- lionagi/integrations/anthropic_/anthropic_price_data.yaml +0 -34
- lionagi/integrations/anthropic_/api_endpoints/api_request.py +0 -277
- lionagi/integrations/anthropic_/api_endpoints/data_models.py +0 -40
- lionagi/integrations/anthropic_/api_endpoints/match_response.py +0 -119
- lionagi/integrations/anthropic_/api_endpoints/messages/request/message_models.py +0 -14
- lionagi/integrations/anthropic_/api_endpoints/messages/request/request_body.py +0 -74
- lionagi/integrations/anthropic_/api_endpoints/messages/response/__init__.py +0 -0
- lionagi/integrations/anthropic_/api_endpoints/messages/response/content_models.py +0 -32
- lionagi/integrations/anthropic_/api_endpoints/messages/response/response_body.py +0 -101
- lionagi/integrations/anthropic_/api_endpoints/messages/response/usage_models.py +0 -25
- lionagi/integrations/groq_/GroqModel.py +0 -325
- lionagi/integrations/groq_/GroqService.py +0 -156
- lionagi/integrations/groq_/api_endpoints/__init__.py +0 -0
- lionagi/integrations/groq_/api_endpoints/data_models.py +0 -187
- lionagi/integrations/groq_/api_endpoints/groq_request.py +0 -288
- lionagi/integrations/groq_/api_endpoints/match_response.py +0 -106
- lionagi/integrations/groq_/api_endpoints/response_utils.py +0 -105
- lionagi/integrations/groq_/groq_max_output_token_data.yaml +0 -21
- lionagi/integrations/groq_/groq_price_data.yaml +0 -58
- lionagi/integrations/groq_/groq_rate_limits.yaml +0 -105
- lionagi/integrations/groq_/version.py +0 -5
- lionagi/integrations/litellm_/imodel.py +0 -76
- lionagi/integrations/ollama_/OllamaModel.py +0 -244
- lionagi/integrations/ollama_/OllamaService.py +0 -142
- lionagi/integrations/ollama_/api_endpoints/api_request.py +0 -179
- lionagi/integrations/ollama_/api_endpoints/chat_completion/message_models.py +0 -31
- lionagi/integrations/ollama_/api_endpoints/chat_completion/request_body.py +0 -46
- lionagi/integrations/ollama_/api_endpoints/chat_completion/response_body.py +0 -67
- lionagi/integrations/ollama_/api_endpoints/chat_completion/tool_models.py +0 -49
- lionagi/integrations/ollama_/api_endpoints/completion/__init__.py +0 -0
- lionagi/integrations/ollama_/api_endpoints/completion/request_body.py +0 -72
- lionagi/integrations/ollama_/api_endpoints/completion/response_body.py +0 -59
- lionagi/integrations/ollama_/api_endpoints/data_models.py +0 -15
- lionagi/integrations/ollama_/api_endpoints/embedding/__init__.py +0 -0
- lionagi/integrations/ollama_/api_endpoints/embedding/request_body.py +0 -33
- lionagi/integrations/ollama_/api_endpoints/embedding/response_body.py +0 -29
- lionagi/integrations/ollama_/api_endpoints/match_data_model.py +0 -62
- lionagi/integrations/ollama_/api_endpoints/match_response.py +0 -190
- lionagi/integrations/ollama_/api_endpoints/model/copy_model.py +0 -13
- lionagi/integrations/ollama_/api_endpoints/model/create_model.py +0 -28
- lionagi/integrations/ollama_/api_endpoints/model/delete_model.py +0 -11
- lionagi/integrations/ollama_/api_endpoints/model/list_model.py +0 -60
- lionagi/integrations/ollama_/api_endpoints/model/pull_model.py +0 -34
- lionagi/integrations/ollama_/api_endpoints/model/push_model.py +0 -35
- lionagi/integrations/ollama_/api_endpoints/model/show_model.py +0 -36
- lionagi/integrations/ollama_/api_endpoints/option_models.py +0 -68
- lionagi/integrations/openai_/OpenAIModel.py +0 -419
- lionagi/integrations/openai_/OpenAIService.py +0 -435
- lionagi/integrations/openai_/__init__.py +0 -0
- lionagi/integrations/openai_/api_endpoints/__init__.py +0 -3
- lionagi/integrations/openai_/api_endpoints/api_request.py +0 -277
- lionagi/integrations/openai_/api_endpoints/audio/__init__.py +0 -9
- lionagi/integrations/openai_/api_endpoints/audio/speech_models.py +0 -34
- lionagi/integrations/openai_/api_endpoints/audio/transcription_models.py +0 -136
- lionagi/integrations/openai_/api_endpoints/audio/translation_models.py +0 -41
- lionagi/integrations/openai_/api_endpoints/audio/types.py +0 -41
- lionagi/integrations/openai_/api_endpoints/batch/__init__.py +0 -17
- lionagi/integrations/openai_/api_endpoints/batch/batch_models.py +0 -146
- lionagi/integrations/openai_/api_endpoints/batch/cancel_batch.py +0 -7
- lionagi/integrations/openai_/api_endpoints/batch/create_batch.py +0 -26
- lionagi/integrations/openai_/api_endpoints/batch/list_batch.py +0 -37
- lionagi/integrations/openai_/api_endpoints/batch/request_object_models.py +0 -65
- lionagi/integrations/openai_/api_endpoints/batch/retrieve_batch.py +0 -7
- lionagi/integrations/openai_/api_endpoints/batch/types.py +0 -4
- lionagi/integrations/openai_/api_endpoints/chat_completions/__init__.py +0 -1
- lionagi/integrations/openai_/api_endpoints/chat_completions/request/__init__.py +0 -39
- lionagi/integrations/openai_/api_endpoints/chat_completions/request/message_models.py +0 -121
- lionagi/integrations/openai_/api_endpoints/chat_completions/request/request_body.py +0 -221
- lionagi/integrations/openai_/api_endpoints/chat_completions/request/response_format.py +0 -71
- lionagi/integrations/openai_/api_endpoints/chat_completions/request/stream_options.py +0 -14
- lionagi/integrations/openai_/api_endpoints/chat_completions/request/tool_choice_models.py +0 -17
- lionagi/integrations/openai_/api_endpoints/chat_completions/request/tool_models.py +0 -54
- lionagi/integrations/openai_/api_endpoints/chat_completions/request/types.py +0 -18
- lionagi/integrations/openai_/api_endpoints/chat_completions/response/__init__.py +0 -0
- lionagi/integrations/openai_/api_endpoints/chat_completions/response/choice_models.py +0 -62
- lionagi/integrations/openai_/api_endpoints/chat_completions/response/function_models.py +0 -16
- lionagi/integrations/openai_/api_endpoints/chat_completions/response/log_prob_models.py +0 -47
- lionagi/integrations/openai_/api_endpoints/chat_completions/response/message_models.py +0 -25
- lionagi/integrations/openai_/api_endpoints/chat_completions/response/response_body.py +0 -99
- lionagi/integrations/openai_/api_endpoints/chat_completions/response/types.py +0 -8
- lionagi/integrations/openai_/api_endpoints/chat_completions/response/usage_models.py +0 -24
- lionagi/integrations/openai_/api_endpoints/chat_completions/util.py +0 -46
- lionagi/integrations/openai_/api_endpoints/data_models.py +0 -23
- lionagi/integrations/openai_/api_endpoints/embeddings/__init__.py +0 -3
- lionagi/integrations/openai_/api_endpoints/embeddings/request_body.py +0 -79
- lionagi/integrations/openai_/api_endpoints/embeddings/response_body.py +0 -67
- lionagi/integrations/openai_/api_endpoints/files/__init__.py +0 -11
- lionagi/integrations/openai_/api_endpoints/files/delete_file.py +0 -20
- lionagi/integrations/openai_/api_endpoints/files/file_models.py +0 -56
- lionagi/integrations/openai_/api_endpoints/files/list_files.py +0 -27
- lionagi/integrations/openai_/api_endpoints/files/retrieve_file.py +0 -9
- lionagi/integrations/openai_/api_endpoints/files/upload_file.py +0 -38
- lionagi/integrations/openai_/api_endpoints/fine_tuning/__init__.py +0 -37
- lionagi/integrations/openai_/api_endpoints/fine_tuning/cancel_jobs.py +0 -9
- lionagi/integrations/openai_/api_endpoints/fine_tuning/create_jobs.py +0 -133
- lionagi/integrations/openai_/api_endpoints/fine_tuning/fine_tuning_job_checkpoint_models.py +0 -58
- lionagi/integrations/openai_/api_endpoints/fine_tuning/fine_tuning_job_event_models.py +0 -31
- lionagi/integrations/openai_/api_endpoints/fine_tuning/fine_tuning_job_models.py +0 -140
- lionagi/integrations/openai_/api_endpoints/fine_tuning/list_fine_tuning_checkpoints.py +0 -51
- lionagi/integrations/openai_/api_endpoints/fine_tuning/list_fine_tuning_events.py +0 -42
- lionagi/integrations/openai_/api_endpoints/fine_tuning/list_fine_tuning_jobs.py +0 -31
- lionagi/integrations/openai_/api_endpoints/fine_tuning/retrieve_jobs.py +0 -9
- lionagi/integrations/openai_/api_endpoints/fine_tuning/training_format.py +0 -30
- lionagi/integrations/openai_/api_endpoints/images/__init__.py +0 -9
- lionagi/integrations/openai_/api_endpoints/images/image_edit_models.py +0 -69
- lionagi/integrations/openai_/api_endpoints/images/image_models.py +0 -56
- lionagi/integrations/openai_/api_endpoints/images/image_variation_models.py +0 -56
- lionagi/integrations/openai_/api_endpoints/images/response_body.py +0 -30
- lionagi/integrations/openai_/api_endpoints/match_data_model.py +0 -197
- lionagi/integrations/openai_/api_endpoints/match_response.py +0 -336
- lionagi/integrations/openai_/api_endpoints/models/__init__.py +0 -7
- lionagi/integrations/openai_/api_endpoints/models/delete_fine_tuned_model.py +0 -17
- lionagi/integrations/openai_/api_endpoints/models/models_models.py +0 -31
- lionagi/integrations/openai_/api_endpoints/models/retrieve_model.py +0 -9
- lionagi/integrations/openai_/api_endpoints/moderations/__init__.py +0 -3
- lionagi/integrations/openai_/api_endpoints/moderations/request_body.py +0 -20
- lionagi/integrations/openai_/api_endpoints/moderations/response_body.py +0 -139
- lionagi/integrations/openai_/api_endpoints/uploads/__init__.py +0 -19
- lionagi/integrations/openai_/api_endpoints/uploads/add_upload_part.py +0 -11
- lionagi/integrations/openai_/api_endpoints/uploads/cancel_upload.py +0 -7
- lionagi/integrations/openai_/api_endpoints/uploads/complete_upload.py +0 -18
- lionagi/integrations/openai_/api_endpoints/uploads/create_upload.py +0 -17
- lionagi/integrations/openai_/api_endpoints/uploads/uploads_models.py +0 -52
- lionagi/integrations/openai_/image_token_calculator/__init__.py +0 -0
- lionagi/integrations/openai_/image_token_calculator/image_token_calculator.py +0 -98
- lionagi/integrations/openai_/image_token_calculator/openai_image_token_data.yaml +0 -15
- lionagi/integrations/openai_/openai_max_output_token_data.yaml +0 -12
- lionagi/integrations/openai_/openai_price_data.yaml +0 -26
- lionagi/integrations/openai_/version.py +0 -1
- lionagi/integrations/pandas_/__init__.py +0 -24
- lionagi/integrations/pandas_/extend_df.py +0 -61
- lionagi/integrations/pandas_/read.py +0 -103
- lionagi/integrations/pandas_/remove_rows.py +0 -61
- lionagi/integrations/pandas_/replace_keywords.py +0 -65
- lionagi/integrations/pandas_/save.py +0 -131
- lionagi/integrations/pandas_/search_keywords.py +0 -69
- lionagi/integrations/pandas_/to_df.py +0 -196
- lionagi/integrations/pandas_/update_cells.py +0 -54
- lionagi/integrations/perplexity_/PerplexityModel.py +0 -274
- lionagi/integrations/perplexity_/PerplexityService.py +0 -118
- lionagi/integrations/perplexity_/api_endpoints/__init__.py +0 -0
- lionagi/integrations/perplexity_/api_endpoints/api_request.py +0 -171
- lionagi/integrations/perplexity_/api_endpoints/chat_completions/__init__.py +0 -0
- lionagi/integrations/perplexity_/api_endpoints/chat_completions/request/__init__.py +0 -0
- lionagi/integrations/perplexity_/api_endpoints/chat_completions/request/request_body.py +0 -121
- lionagi/integrations/perplexity_/api_endpoints/chat_completions/response/__init__.py +0 -0
- lionagi/integrations/perplexity_/api_endpoints/chat_completions/response/response_body.py +0 -146
- lionagi/integrations/perplexity_/api_endpoints/data_models.py +0 -63
- lionagi/integrations/perplexity_/api_endpoints/match_response.py +0 -26
- lionagi/integrations/perplexity_/perplexity_max_output_token_data.yaml +0 -3
- lionagi/integrations/perplexity_/perplexity_price_data.yaml +0 -10
- lionagi/integrations/perplexity_/version.py +0 -1
- lionagi/integrations/pydantic_/__init__.py +0 -8
- lionagi/integrations/pydantic_/break_down_annotation.py +0 -81
- lionagi/integrations/pydantic_/new_model.py +0 -208
- lionagi/libs/constants.py +0 -98
- lionagi/libs/file/path.py +0 -301
- lionagi/libs/file/types.py +0 -22
- lionagi/libs/func/__init__.py +0 -0
- lionagi/libs/func/async_calls/__init__.py +0 -24
- lionagi/libs/func/async_calls/alcall.py +0 -210
- lionagi/libs/func/async_calls/bcall.py +0 -130
- lionagi/libs/func/async_calls/mcall.py +0 -134
- lionagi/libs/func/async_calls/pcall.py +0 -149
- lionagi/libs/func/async_calls/rcall.py +0 -217
- lionagi/libs/func/async_calls/tcall.py +0 -114
- lionagi/libs/func/async_calls/ucall.py +0 -85
- lionagi/libs/func/decorators.py +0 -277
- lionagi/libs/func/lcall.py +0 -57
- lionagi/libs/func/params.py +0 -64
- lionagi/libs/func/throttle.py +0 -119
- lionagi/libs/func/types.py +0 -39
- lionagi/libs/func/utils.py +0 -96
- lionagi/libs/package/types.py +0 -26
- lionagi/libs/parse/__init__.py +0 -1
- lionagi/libs/parse/flatten/__init__.py +0 -9
- lionagi/libs/parse/flatten/params.py +0 -52
- lionagi/libs/parse/json/__init__.py +0 -27
- lionagi/libs/parse/json/extract.py +0 -102
- lionagi/libs/parse/json/parse.py +0 -179
- lionagi/libs/parse/json/to_json.py +0 -71
- lionagi/libs/parse/nested/__init__.py +0 -33
- lionagi/libs/parse/nested/to_flat_list.py +0 -64
- lionagi/libs/parse/params.py +0 -0
- lionagi/libs/parse/string_parse/__init__.py +0 -11
- lionagi/libs/parse/type_convert/__init__.py +0 -19
- lionagi/libs/parse/type_convert/params.py +0 -145
- lionagi/libs/parse/type_convert/to_dict.py +0 -333
- lionagi/libs/parse/type_convert/to_list.py +0 -186
- lionagi/libs/parse/type_convert/to_num.py +0 -358
- lionagi/libs/parse/type_convert/to_str.py +0 -195
- lionagi/libs/parse/types.py +0 -9
- lionagi/libs/parse/validate/__init__.py +0 -14
- lionagi/libs/parse/validate/params.py +0 -62
- lionagi/libs/parse/xml/__init__.py +0 -10
- lionagi/libs/parse/xml/convert.py +0 -56
- lionagi/libs/parse/xml/parser.py +0 -93
- lionagi/libs/string_similarity/__init__.py +0 -32
- lionagi/libs/string_similarity/matcher.py +0 -102
- lionagi/libs/string_similarity/utils.py +0 -15
- lionagi/libs/utils.py +0 -266
- lionagi/protocols/adapters/__init__.py +0 -0
- lionagi/protocols/adapters/adapter.py +0 -79
- lionagi/protocols/adapters/json_adapter.py +0 -43
- lionagi/protocols/adapters/pandas_adapter.py +0 -96
- lionagi/protocols/configs/__init__.py +0 -0
- lionagi/protocols/configs/branch_config.py +0 -86
- lionagi/protocols/configs/id_config.py +0 -15
- lionagi/protocols/configs/imodel_config.py +0 -73
- lionagi/protocols/configs/log_config.py +0 -93
- lionagi/protocols/configs/retry_config.py +0 -29
- lionagi/protocols/configs/types.py +0 -15
- lionagi/protocols/operatives/instruct.py +0 -194
- lionagi/protocols/operatives/types.py +0 -19
- lionagi/protocols/registries/_component_registry.py +0 -23
- lionagi/protocols/registries/_pile_registry.py +0 -30
- lionagi/service/complete_request_info.py +0 -11
- lionagi/service/rate_limiter.py +0 -108
- lionagi/service/service.py +0 -41
- lionagi/service/service_match_util.py +0 -131
- lionagi/service/service_util.py +0 -72
- lionagi/service/token_calculator.py +0 -51
- lionagi/strategies/__init__.py +0 -0
- lionagi/strategies/types.py +0 -21
- lionagi-0.5.5.dist-info/RECORD +0 -374
- /lionagi/{core → libs/nested}/__init__.py +0 -0
- /lionagi/{core/action → libs/schema}/__init__.py +0 -0
- /lionagi/{core/communication → libs/validate}/__init__.py +0 -0
- /lionagi/{core/forms → operations/strategies}/__init__.py +0 -0
- /lionagi/{core/generic → operatives}/__init__.py +0 -0
- /lionagi/{core/session → operatives/action}/__init__.py +0 -0
- /lionagi/{integrations/anthropic_ → operatives/forms}/__init__.py +0 -0
- /lionagi/{core → operatives}/forms/utils.py +0 -0
- /lionagi/{integrations/anthropic_/api_endpoints → operatives/instruct}/__init__.py +0 -0
- /lionagi/{integrations/anthropic_/api_endpoints/messages → operatives/models}/__init__.py +0 -0
- /lionagi/{integrations/anthropic_/api_endpoints/messages/request → protocols/generic}/__init__.py +0 -0
- /lionagi/{integrations/groq_ → protocols/graph}/__init__.py +0 -0
- /lionagi/{integrations/litellm_ → protocols/mail}/__init__.py +0 -0
- /lionagi/{integrations/ollama_ → protocols/messages}/__init__.py +0 -0
- /lionagi/{core/communication → protocols/messages}/templates/README.md +0 -0
- /lionagi/{core/communication → protocols/messages}/templates/action_request.jinja2 +0 -0
- /lionagi/{core/communication → protocols/messages}/templates/action_response.jinja2 +0 -0
- /lionagi/{core/communication → protocols/messages}/templates/system_message.jinja2 +0 -0
- /lionagi/{core/communication → protocols/messages}/templates/tool_schemas.jinja2 +0 -0
- /lionagi/{integrations/ollama_/api_endpoints → service/endpoints}/__init__.py +0 -0
- /lionagi/{integrations/ollama_/api_endpoints/chat_completion → service/providers}/__init__.py +0 -0
- /lionagi/{integrations/ollama_/api_endpoints/model → service/providers/anthropic_}/__init__.py +0 -0
- /lionagi/{integrations/perplexity_ → service/providers/groq_}/__init__.py +0 -0
- /lionagi/{protocols/operatives → service/providers/openai_}/__init__.py +0 -0
- /lionagi/{protocols/registries → service/providers/openrouter_}/__init__.py +0 -0
- {lionagi-0.5.5.dist-info → lionagi-0.6.0.dist-info}/WHEEL +0 -0
- {lionagi-0.5.5.dist-info → lionagi-0.6.0.dist-info}/licenses/LICENSE +0 -0
@@ -1,71 +0,0 @@
|
|
1
|
-
import warnings
|
2
|
-
from typing import Any, Dict, Literal, Optional
|
3
|
-
|
4
|
-
from pydantic import BaseModel, Field, model_validator
|
5
|
-
|
6
|
-
# Suppress the specific warning about field name shadowing
|
7
|
-
warnings.filterwarnings(
|
8
|
-
"ignore",
|
9
|
-
message='Field name "schema" in "JSONSchema" '
|
10
|
-
'shadows an attribute in parent "BaseModel"',
|
11
|
-
)
|
12
|
-
|
13
|
-
|
14
|
-
class JSONSchema(BaseModel):
|
15
|
-
description: str | None = Field(
|
16
|
-
None,
|
17
|
-
description=(
|
18
|
-
"A description of what the response format is for, used by the "
|
19
|
-
"model to determine how to respond in the format."
|
20
|
-
),
|
21
|
-
)
|
22
|
-
|
23
|
-
name: str = Field(
|
24
|
-
max_length=64,
|
25
|
-
pattern="^[a-zA-Z0-9_-]+$",
|
26
|
-
description=(
|
27
|
-
"The name of the response format. Must be a-z, A-Z, 0-9, or "
|
28
|
-
"contain underscores and dashes, with a maximum length of 64."
|
29
|
-
),
|
30
|
-
)
|
31
|
-
|
32
|
-
schema: dict[str, Any] | None = Field(
|
33
|
-
None,
|
34
|
-
description="The schema for the response "
|
35
|
-
"format, described as a JSON Schema object.",
|
36
|
-
)
|
37
|
-
|
38
|
-
strict: bool | None = Field(
|
39
|
-
False,
|
40
|
-
description=(
|
41
|
-
"Whether to enable strict schema adherence when generating the "
|
42
|
-
"output. If set to true, the model will always follow the exact "
|
43
|
-
"schema defined in the `schema` field. Only a subset of JSON "
|
44
|
-
"Schema is supported when `strict` is `true`."
|
45
|
-
),
|
46
|
-
)
|
47
|
-
|
48
|
-
|
49
|
-
class ResponseFormat(BaseModel):
|
50
|
-
type: Literal["text", "json_object", "json_schema"] = Field(
|
51
|
-
description="The type of response format being defined."
|
52
|
-
)
|
53
|
-
json_schema: JSONSchema | None = Field(
|
54
|
-
None,
|
55
|
-
description=(
|
56
|
-
"The JSON schema to use when type is 'json_schema'. Required "
|
57
|
-
"when type is 'json_schema'."
|
58
|
-
),
|
59
|
-
)
|
60
|
-
|
61
|
-
@model_validator(mode="after")
|
62
|
-
def validate_response_format(self) -> "ResponseFormat":
|
63
|
-
if self.type == "json_schema" and not self.json_schema:
|
64
|
-
raise ValueError(
|
65
|
-
"json_schema is required when type is 'json_schema'"
|
66
|
-
)
|
67
|
-
if self.type != "json_schema" and self.json_schema:
|
68
|
-
raise ValueError(
|
69
|
-
"json_schema should only be set when type is 'json_schema'"
|
70
|
-
)
|
71
|
-
return self
|
@@ -1,14 +0,0 @@
|
|
1
|
-
from pydantic import BaseModel, Field
|
2
|
-
|
3
|
-
|
4
|
-
class StreamOptions(BaseModel):
|
5
|
-
include_usage: bool | None = Field(
|
6
|
-
None,
|
7
|
-
description=(
|
8
|
-
"If set, an additional chunk will be streamed before the "
|
9
|
-
"`data: [DONE]` message. The `usage` field on this chunk shows "
|
10
|
-
"the token usage statistics for the entire request, and the "
|
11
|
-
"`choices` field will always be an empty array. All other chunks "
|
12
|
-
"will also include a `usage` field, but with a null value."
|
13
|
-
),
|
14
|
-
)
|
@@ -1,17 +0,0 @@
|
|
1
|
-
from typing import Literal
|
2
|
-
|
3
|
-
from pydantic import BaseModel, Field
|
4
|
-
|
5
|
-
|
6
|
-
class Function(BaseModel):
|
7
|
-
name: str = Field(description="The name of the function to call.")
|
8
|
-
|
9
|
-
|
10
|
-
class ToolChoice(BaseModel):
|
11
|
-
type: Literal["function"] = Field(
|
12
|
-
description="The type of the tool. Currently,"
|
13
|
-
" only function is supported."
|
14
|
-
)
|
15
|
-
function: Function = Field(
|
16
|
-
description="Specifies the function to be called."
|
17
|
-
)
|
@@ -1,54 +0,0 @@
|
|
1
|
-
from typing import Any, Dict, List, Literal, Optional
|
2
|
-
|
3
|
-
from pydantic import BaseModel, Field
|
4
|
-
|
5
|
-
|
6
|
-
class FunctionParameters(BaseModel):
|
7
|
-
type: Literal["object"] = "object"
|
8
|
-
properties: dict[str, dict[str, Any]]
|
9
|
-
required: list[str] | None = None
|
10
|
-
|
11
|
-
|
12
|
-
class Function(BaseModel):
|
13
|
-
description: str | None = Field(
|
14
|
-
None,
|
15
|
-
description=(
|
16
|
-
"A description of what the function does, used by the model to "
|
17
|
-
"choose when and how to call the function."
|
18
|
-
),
|
19
|
-
)
|
20
|
-
|
21
|
-
name: str = Field(
|
22
|
-
max_length=64,
|
23
|
-
pattern="^[a-zA-Z0-9_-]+$",
|
24
|
-
description=(
|
25
|
-
"The name of the function to be called. Must be a-z, A-Z, 0-9, "
|
26
|
-
"or contain underscores and dashes, with a maximum length of 64."
|
27
|
-
),
|
28
|
-
)
|
29
|
-
|
30
|
-
parameters: FunctionParameters | None = Field(
|
31
|
-
None,
|
32
|
-
description=(
|
33
|
-
"The parameters the functions accepts, described as a JSON Schema "
|
34
|
-
"object. See the guide for examples, and the JSON Schema "
|
35
|
-
"reference for documentation about the format."
|
36
|
-
),
|
37
|
-
)
|
38
|
-
|
39
|
-
strict: bool | None = Field(
|
40
|
-
False,
|
41
|
-
description="Whether to enable strict schema"
|
42
|
-
" adherence when generating the function call. "
|
43
|
-
"If set to true, the model will follow the exact"
|
44
|
-
" schema defined in the parameters field. "
|
45
|
-
"Only a subset of JSON Schema is supported when strict is true.",
|
46
|
-
)
|
47
|
-
|
48
|
-
|
49
|
-
class Tool(BaseModel):
|
50
|
-
type: Literal["function"] = Field(
|
51
|
-
description="The type of the tool. Currently,"
|
52
|
-
" only function is supported."
|
53
|
-
)
|
54
|
-
function: Function = Field(description="The function definition.")
|
@@ -1,18 +0,0 @@
|
|
1
|
-
from enum import Enum
|
2
|
-
|
3
|
-
|
4
|
-
class Detail(str, Enum):
|
5
|
-
AUTO = "auto"
|
6
|
-
LOW = "low"
|
7
|
-
HIGH = "high"
|
8
|
-
|
9
|
-
|
10
|
-
class ServiceTier(str, Enum):
|
11
|
-
AUTO = "auto"
|
12
|
-
DEFAULT = "default"
|
13
|
-
|
14
|
-
|
15
|
-
class ToolChoice(str, Enum):
|
16
|
-
NONE = "none"
|
17
|
-
AUTO = "auto"
|
18
|
-
REQUIRED = "required"
|
File without changes
|
@@ -1,62 +0,0 @@
|
|
1
|
-
from pydantic import BaseModel, ConfigDict, Field
|
2
|
-
|
3
|
-
from .log_prob_models import LogProbs
|
4
|
-
from .message_models import Message
|
5
|
-
from .types import FinishReason
|
6
|
-
|
7
|
-
|
8
|
-
class Choice(BaseModel):
|
9
|
-
finish_reason: FinishReason = Field(
|
10
|
-
description=(
|
11
|
-
"The reason the model stopped generating tokens. This will be "
|
12
|
-
"stop if the model hit a natural stop point or a provided stop "
|
13
|
-
"sequence, length if the maximum number of tokens specified in "
|
14
|
-
"the request was reached, content_filter if content was omitted "
|
15
|
-
"due to a flag from our content filters, tool_calls if the model "
|
16
|
-
"called a tool, or function_call (deprecated) if the model "
|
17
|
-
"called a function."
|
18
|
-
)
|
19
|
-
)
|
20
|
-
|
21
|
-
index: int = Field(
|
22
|
-
description="The index of the choice in the list of choices."
|
23
|
-
)
|
24
|
-
|
25
|
-
message: Message = Field(
|
26
|
-
description="A chat completion message generated by the model."
|
27
|
-
)
|
28
|
-
|
29
|
-
logprobs: LogProbs | None = Field(
|
30
|
-
None, description="Log probability information for the choice."
|
31
|
-
)
|
32
|
-
|
33
|
-
model_config = ConfigDict(use_enum_values=True)
|
34
|
-
|
35
|
-
|
36
|
-
class ChunkChoice(BaseModel):
|
37
|
-
delta: Message = Field(
|
38
|
-
description="A chat completion delta generated"
|
39
|
-
" by streamed model responses."
|
40
|
-
)
|
41
|
-
|
42
|
-
logprobs: LogProbs | None = Field(
|
43
|
-
description="Log probability information for the choice."
|
44
|
-
)
|
45
|
-
|
46
|
-
finish_reason: FinishReason | None = Field(
|
47
|
-
description=(
|
48
|
-
"The reason the model stopped generating tokens. This will be "
|
49
|
-
"stop if the model hit a natural stop point or a provided stop "
|
50
|
-
"sequence, length if the maximum number of tokens specified in "
|
51
|
-
"the request was reached, content_filter if content was omitted "
|
52
|
-
"due to a flag from our content filters, tool_calls if the model "
|
53
|
-
"called a tool, or function_call (deprecated) if the model "
|
54
|
-
"called a function."
|
55
|
-
)
|
56
|
-
)
|
57
|
-
|
58
|
-
index: int = Field(
|
59
|
-
description="The index of the choice in the list of choices."
|
60
|
-
)
|
61
|
-
|
62
|
-
model_config = ConfigDict(use_enum_values=True)
|
@@ -1,16 +0,0 @@
|
|
1
|
-
from typing import Literal
|
2
|
-
|
3
|
-
from pydantic import BaseModel, Field
|
4
|
-
|
5
|
-
|
6
|
-
class Function(BaseModel):
|
7
|
-
name: str = Field(description="The name of the function to call.")
|
8
|
-
arguments: str = Field(
|
9
|
-
description="The arguments to pass to the function."
|
10
|
-
)
|
11
|
-
|
12
|
-
|
13
|
-
class ToolCall(BaseModel):
|
14
|
-
id: str = Field(description="The ID of the tool call.")
|
15
|
-
type: Literal["function"] = Field(description="The type of the tool call.")
|
16
|
-
function: Function = Field(description="The function call details.")
|
@@ -1,47 +0,0 @@
|
|
1
|
-
from typing import List, Optional
|
2
|
-
|
3
|
-
from pydantic import BaseModel, Field
|
4
|
-
|
5
|
-
|
6
|
-
class TokenLogProb(BaseModel):
|
7
|
-
token: str = Field(description="The token.")
|
8
|
-
logprob: float = Field(
|
9
|
-
description=(
|
10
|
-
"The log probability of this token, if it is within the top 20 "
|
11
|
-
"most likely tokens. Otherwise, the value -9999.0 is used to "
|
12
|
-
"signify that the token is very unlikely."
|
13
|
-
)
|
14
|
-
)
|
15
|
-
bytes: list[int] | None = Field(
|
16
|
-
None,
|
17
|
-
description=(
|
18
|
-
"A list of integers representing the UTF-8 bytes representation "
|
19
|
-
"of the token. Useful in instances where characters are "
|
20
|
-
"represented by multiple tokens and their byte representations "
|
21
|
-
"must be combined to generate the correct text representation. "
|
22
|
-
"Can be null if there is no bytes representation for the token."
|
23
|
-
),
|
24
|
-
)
|
25
|
-
|
26
|
-
|
27
|
-
class LogProbContent(TokenLogProb):
|
28
|
-
top_logprobs: list[TokenLogProb] = Field(
|
29
|
-
description=(
|
30
|
-
"List of the most likely tokens and their log probability, at "
|
31
|
-
"this token position. In rare cases, there may be fewer than the "
|
32
|
-
"number of requested top_logprobs returned."
|
33
|
-
)
|
34
|
-
)
|
35
|
-
|
36
|
-
|
37
|
-
class LogProbs(BaseModel):
|
38
|
-
content: list[LogProbContent] | None = Field(
|
39
|
-
None,
|
40
|
-
description="A list of message content "
|
41
|
-
"tokens with log probability information.",
|
42
|
-
)
|
43
|
-
refusal: list[LogProbContent] | None = Field(
|
44
|
-
None,
|
45
|
-
description="A list of message refusal "
|
46
|
-
"tokens with log probability information.",
|
47
|
-
)
|
@@ -1,25 +0,0 @@
|
|
1
|
-
from typing import List, Optional
|
2
|
-
|
3
|
-
from pydantic import BaseModel, Field
|
4
|
-
|
5
|
-
from .function_models import ToolCall
|
6
|
-
|
7
|
-
|
8
|
-
class Message(BaseModel):
|
9
|
-
content: str | None = Field(
|
10
|
-
None, description="The contents of the message."
|
11
|
-
)
|
12
|
-
|
13
|
-
refusal: str | None = Field(
|
14
|
-
None, description="The refusal message generated by the model."
|
15
|
-
)
|
16
|
-
|
17
|
-
tool_calls: list[ToolCall] | None = Field(
|
18
|
-
None,
|
19
|
-
description="The tool calls generated by the model, "
|
20
|
-
"such as function calls.",
|
21
|
-
)
|
22
|
-
|
23
|
-
role: str | None = Field(
|
24
|
-
None, description="The role of the author of this message."
|
25
|
-
)
|
@@ -1,99 +0,0 @@
|
|
1
|
-
from typing import List, Literal, Optional
|
2
|
-
|
3
|
-
from pydantic import Field
|
4
|
-
|
5
|
-
from ...data_models import OpenAIEndpointResponseBody
|
6
|
-
from .choice_models import Choice, ChunkChoice
|
7
|
-
from .usage_models import Usage
|
8
|
-
|
9
|
-
|
10
|
-
class OpenAIChatCompletionResponseBody(OpenAIEndpointResponseBody):
|
11
|
-
id: str = Field(description="A unique identifier for the chat completion.")
|
12
|
-
|
13
|
-
choices: list[Choice] = Field(
|
14
|
-
description=(
|
15
|
-
"A list of chat completion choices. Can be more than one if n "
|
16
|
-
"is greater than 1."
|
17
|
-
)
|
18
|
-
)
|
19
|
-
|
20
|
-
created: int = Field(
|
21
|
-
description=(
|
22
|
-
"The Unix timestamp (in seconds) of when the chat completion "
|
23
|
-
"was created."
|
24
|
-
)
|
25
|
-
)
|
26
|
-
|
27
|
-
model: str = Field(description="The model used for the chat completion.")
|
28
|
-
|
29
|
-
service_tier: str | None = Field(
|
30
|
-
None,
|
31
|
-
description=(
|
32
|
-
"The service tier used for processing the request. This field is "
|
33
|
-
"only included if the service_tier parameter is specified in the "
|
34
|
-
"request."
|
35
|
-
),
|
36
|
-
)
|
37
|
-
|
38
|
-
system_fingerprint: str = Field(
|
39
|
-
description=(
|
40
|
-
"This fingerprint represents the backend configuration that the "
|
41
|
-
"model runs with. Can be used in conjunction with the seed "
|
42
|
-
"request parameter to understand when backend changes have been "
|
43
|
-
"made that might impact determinism."
|
44
|
-
)
|
45
|
-
)
|
46
|
-
|
47
|
-
object: Literal["chat.completion"] = Field(
|
48
|
-
description="The object type, which is always chat.completion."
|
49
|
-
)
|
50
|
-
|
51
|
-
usage: Usage = Field(
|
52
|
-
description="Usage statistics for the completion request."
|
53
|
-
)
|
54
|
-
|
55
|
-
|
56
|
-
class OpenAIChatCompletionChunkResponseBody(OpenAIEndpointResponseBody):
|
57
|
-
id: str = Field(description="A unique identifier for the chat completion.")
|
58
|
-
|
59
|
-
choices: list[ChunkChoice] = Field(
|
60
|
-
description=(
|
61
|
-
"A list of chat completion choices. Can be more than one if n "
|
62
|
-
"is greater than 1."
|
63
|
-
)
|
64
|
-
)
|
65
|
-
|
66
|
-
created: int = Field(
|
67
|
-
description=(
|
68
|
-
"The Unix timestamp (in seconds) of when the chat completion "
|
69
|
-
"was created."
|
70
|
-
)
|
71
|
-
)
|
72
|
-
|
73
|
-
model: str = Field(description="The model used for the chat completion.")
|
74
|
-
|
75
|
-
service_tier: str | None = Field(
|
76
|
-
None,
|
77
|
-
description=(
|
78
|
-
"The service tier used for processing the request. This field is "
|
79
|
-
"only included if the service_tier parameter is specified in the "
|
80
|
-
"request."
|
81
|
-
),
|
82
|
-
)
|
83
|
-
|
84
|
-
system_fingerprint: str = Field(
|
85
|
-
description=(
|
86
|
-
"This fingerprint represents the backend configuration that the "
|
87
|
-
"model runs with. Can be used in conjunction with the seed "
|
88
|
-
"request parameter to understand when backend changes have been "
|
89
|
-
"made that might impact determinism."
|
90
|
-
)
|
91
|
-
)
|
92
|
-
|
93
|
-
object: Literal["chat.completion.chunk"] = Field(
|
94
|
-
description="The object type, which is always chat.completion."
|
95
|
-
)
|
96
|
-
|
97
|
-
usage: Usage | None = Field(
|
98
|
-
None, description="Usage statistics for the completion request."
|
99
|
-
)
|
@@ -1,24 +0,0 @@
|
|
1
|
-
from pydantic import BaseModel, Field
|
2
|
-
|
3
|
-
|
4
|
-
class CompletionTokensDetails(BaseModel):
|
5
|
-
reasoning_tokens: int = Field(
|
6
|
-
description="Tokens generated by the model for reasoning."
|
7
|
-
)
|
8
|
-
|
9
|
-
|
10
|
-
class Usage(BaseModel):
|
11
|
-
completion_tokens: int = Field(
|
12
|
-
description="Number of tokens in the generated completion."
|
13
|
-
)
|
14
|
-
|
15
|
-
prompt_tokens: int = Field(description="Number of tokens in the prompt.")
|
16
|
-
|
17
|
-
total_tokens: int = Field(
|
18
|
-
description="Total number of tokens used in the"
|
19
|
-
" request (prompt + completion)"
|
20
|
-
)
|
21
|
-
|
22
|
-
completion_tokens_details: CompletionTokensDetails = Field(
|
23
|
-
description="Breakdown of tokens used in a completion."
|
24
|
-
)
|
@@ -1,46 +0,0 @@
|
|
1
|
-
from .request.request_body import OpenAIChatCompletionRequestBody
|
2
|
-
|
3
|
-
|
4
|
-
def get_text_messages(request_body: OpenAIChatCompletionRequestBody):
|
5
|
-
messages_list = request_body.model_dump(exclude_unset=True).get("messages")
|
6
|
-
parsed_str = "["
|
7
|
-
|
8
|
-
for msg in messages_list:
|
9
|
-
role = msg.get("role", "")
|
10
|
-
content = msg.get("content", "")
|
11
|
-
|
12
|
-
if isinstance(
|
13
|
-
content, list
|
14
|
-
): # Check if content is a list (second example)
|
15
|
-
content_str = []
|
16
|
-
for sub_content in content:
|
17
|
-
if sub_content.get("type") == "text":
|
18
|
-
content_str.append(f'{sub_content.get("text", "")}')
|
19
|
-
content = " ".join(
|
20
|
-
content_str
|
21
|
-
) # Combine all sub_content items into a single string
|
22
|
-
parsed_str += f"role: {role} content: {content} "
|
23
|
-
|
24
|
-
parsed_str = parsed_str.strip() # Remove trailing space
|
25
|
-
parsed_str += "]"
|
26
|
-
|
27
|
-
return parsed_str
|
28
|
-
|
29
|
-
|
30
|
-
def get_images(request_body: OpenAIChatCompletionRequestBody):
|
31
|
-
messages_list = request_body.model_dump(exclude_unset=True).get("messages")
|
32
|
-
image_urls = []
|
33
|
-
|
34
|
-
for msg in messages_list:
|
35
|
-
content = msg.get("content", "")
|
36
|
-
|
37
|
-
if isinstance(
|
38
|
-
content, list
|
39
|
-
): # Check if content is a list (second example)
|
40
|
-
for sub_content in content:
|
41
|
-
if sub_content.get("type") == "image_url":
|
42
|
-
image_url = sub_content.get("image_url")
|
43
|
-
url = image_url.get("url")
|
44
|
-
detail = image_url.get("detail", "auto")
|
45
|
-
image_urls.append((url, detail))
|
46
|
-
return image_urls
|
@@ -1,23 +0,0 @@
|
|
1
|
-
from pydantic import BaseModel, ConfigDict
|
2
|
-
|
3
|
-
|
4
|
-
class OpenAIEndpointRequestBody(BaseModel):
|
5
|
-
model_config = ConfigDict(
|
6
|
-
extra="forbid", use_enum_values=True, validate_assignment=True
|
7
|
-
)
|
8
|
-
|
9
|
-
|
10
|
-
class OpenAIEndpointResponseBody(BaseModel):
|
11
|
-
model_config = ConfigDict(use_enum_values=True, validate_assignment=True)
|
12
|
-
|
13
|
-
|
14
|
-
class OpenAIEndpointQueryParam(BaseModel):
|
15
|
-
model_config = ConfigDict(
|
16
|
-
extra="forbid", use_enum_values=True, validate_assignment=True
|
17
|
-
)
|
18
|
-
|
19
|
-
|
20
|
-
class OpenAIEndpointPathParam(BaseModel):
|
21
|
-
model_config = ConfigDict(
|
22
|
-
extra="forbid", use_enum_values=True, validate_assignment=True
|
23
|
-
)
|
@@ -1,79 +0,0 @@
|
|
1
|
-
from typing import List, Literal, Optional, Union
|
2
|
-
|
3
|
-
from pydantic import ConfigDict, Field, model_validator
|
4
|
-
|
5
|
-
from ..data_models import OpenAIEndpointRequestBody
|
6
|
-
|
7
|
-
InputType = Union[str, list[str], list[int], list[list[int]]]
|
8
|
-
EncodingFormat = Literal["float", "base64"]
|
9
|
-
|
10
|
-
|
11
|
-
class OpenAIEmbeddingRequestBody(OpenAIEndpointRequestBody):
|
12
|
-
input: InputType = Field(
|
13
|
-
description=(
|
14
|
-
"Input text to embed, encoded as a string or array of tokens. "
|
15
|
-
"To embed multiple inputs in a single request, pass an array of "
|
16
|
-
"strings or array of token arrays. The input must not exceed "
|
17
|
-
"the max input tokens for the model (8192 tokens for "
|
18
|
-
"text-embedding-ada-002), cannot be an empty string, and any "
|
19
|
-
"array must be 2048 dimensions or less."
|
20
|
-
),
|
21
|
-
)
|
22
|
-
|
23
|
-
model: str = Field(
|
24
|
-
description=(
|
25
|
-
"ID of the model to use. You can use the List models API to see "
|
26
|
-
"all of your available models, or see our Model overview for "
|
27
|
-
"descriptions of them."
|
28
|
-
),
|
29
|
-
)
|
30
|
-
|
31
|
-
encoding_format: EncodingFormat | None = Field(
|
32
|
-
"float",
|
33
|
-
description=(
|
34
|
-
"The format to return the embeddings in. Can be either `float` "
|
35
|
-
"or `base64`."
|
36
|
-
),
|
37
|
-
)
|
38
|
-
|
39
|
-
dimensions: int | None = Field(
|
40
|
-
None,
|
41
|
-
description=(
|
42
|
-
"The number of dimensions the resulting output embeddings "
|
43
|
-
"should have. Only supported in `text-embedding-3` and later "
|
44
|
-
"models."
|
45
|
-
),
|
46
|
-
)
|
47
|
-
|
48
|
-
user: str | None = Field(
|
49
|
-
None,
|
50
|
-
description=(
|
51
|
-
"A unique identifier representing your end-user, which can help "
|
52
|
-
"OpenAI to monitor and detect abuse."
|
53
|
-
),
|
54
|
-
)
|
55
|
-
|
56
|
-
@model_validator(mode="after")
|
57
|
-
def validate_input(self) -> "OpenAIEmbeddingRequestBody":
|
58
|
-
if isinstance(self.input, str) and self.input.strip() == "":
|
59
|
-
raise ValueError("Input cannot be an empty string.")
|
60
|
-
if isinstance(self.input, list):
|
61
|
-
if len(self.input) == 0:
|
62
|
-
raise ValueError("Input array cannot be empty.")
|
63
|
-
if isinstance(self.input[0], list) and len(self.input) > 2048:
|
64
|
-
raise ValueError(
|
65
|
-
"Input array must be 2048 dimensions or less."
|
66
|
-
)
|
67
|
-
return self
|
68
|
-
|
69
|
-
model_config = ConfigDict(
|
70
|
-
json_schema_extra={
|
71
|
-
"example": {
|
72
|
-
"input": "The food was delicious and the waiter...",
|
73
|
-
"model": "text-embedding-ada-002",
|
74
|
-
"encoding_format": "float",
|
75
|
-
"dimensions": None,
|
76
|
-
"user": "user123",
|
77
|
-
}
|
78
|
-
}
|
79
|
-
)
|