lionagi 0.5.5__py3-none-any.whl → 0.6.0__py3-none-any.whl
Sign up to get free protection for your applications and to get access to all the features.
- lionagi/__init__.py +16 -24
- lionagi/{core/_class_registry.py → _class_registry.py} +51 -10
- lionagi/_errors.py +35 -0
- lionagi/libs/__init__.py +3 -0
- lionagi/libs/compress/__init__.py +3 -0
- lionagi/libs/compress/models.py +6 -2
- lionagi/libs/compress/utils.py +4 -16
- lionagi/libs/file/__init__.py +3 -0
- lionagi/libs/file/chunk.py +4 -0
- lionagi/libs/file/file_ops.py +4 -0
- lionagi/libs/file/params.py +4 -41
- lionagi/libs/file/process.py +4 -0
- lionagi/libs/file/save.py +5 -1
- lionagi/libs/{parse/flatten → nested}/flatten.py +4 -0
- lionagi/libs/{parse/nested → nested}/nfilter.py +4 -0
- lionagi/libs/{parse/nested → nested}/nget.py +6 -1
- lionagi/libs/{parse/nested → nested}/ninsert.py +5 -1
- lionagi/libs/{parse/nested → nested}/nmerge.py +4 -0
- lionagi/libs/{parse/nested → nested}/npop.py +5 -2
- lionagi/libs/{parse/nested → nested}/nset.py +6 -1
- lionagi/libs/{parse/flatten → nested}/unflatten.py +4 -0
- lionagi/libs/{parse/nested → nested}/utils.py +5 -1
- lionagi/libs/package/__init__.py +3 -0
- lionagi/libs/package/imports.py +6 -2
- lionagi/libs/package/management.py +7 -3
- lionagi/libs/package/params.py +4 -0
- lionagi/libs/package/system.py +4 -0
- lionagi/libs/parse.py +30 -0
- lionagi/libs/{parse/json → schema}/as_readable.py +10 -4
- lionagi/libs/{parse/string_parse/code_block.py → schema/extract_code_block.py} +4 -0
- lionagi/libs/{parse/string_parse/docstring.py → schema/extract_docstring.py} +4 -0
- lionagi/libs/{parse/string_parse/function_.py → schema/function_to_schema.py} +21 -9
- lionagi/libs/{parse/json/schema.py → schema/json_schema.py} +5 -1
- lionagi/libs/validate/common_field_validators.py +170 -0
- lionagi/libs/{parse/validate/keys.py → validate/fuzzy_match_keys.py} +42 -8
- lionagi/libs/{parse/validate/mapping.py → validate/fuzzy_validate_mapping.py} +41 -6
- lionagi/libs/{string_similarity/algorithms.py → validate/string_similarity.py} +115 -1
- lionagi/libs/{parse/validate/boolean.py → validate/validate_boolean.py} +42 -3
- lionagi/operations/__init__.py +13 -3
- lionagi/operations/brainstorm/__init__.py +3 -3
- lionagi/operations/brainstorm/brainstorm.py +33 -19
- lionagi/operations/brainstorm/prompt.py +4 -0
- lionagi/operations/plan/__init__.py +4 -0
- lionagi/operations/plan/plan.py +16 -13
- lionagi/operations/plan/prompt.py +4 -0
- lionagi/operations/select/__init__.py +4 -0
- lionagi/operations/select/prompt.py +4 -0
- lionagi/operations/select/select.py +1 -1
- lionagi/operations/select/utils.py +4 -4
- lionagi/{strategies → operations/strategies}/base.py +6 -2
- lionagi/{strategies → operations/strategies}/concurrent.py +8 -5
- lionagi/{strategies → operations/strategies}/concurrent_chunk.py +6 -3
- lionagi/{strategies → operations/strategies}/concurrent_sequential_chunk.py +8 -4
- lionagi/{strategies → operations/strategies}/params.py +10 -6
- lionagi/{strategies → operations/strategies}/sequential.py +6 -2
- lionagi/{strategies → operations/strategies}/sequential_chunk.py +7 -3
- lionagi/{strategies → operations/strategies}/sequential_concurrent_chunk.py +9 -4
- lionagi/{strategies → operations/strategies}/utils.py +6 -3
- lionagi/{core/models/__init__.py → operations/types.py} +3 -1
- lionagi/operations/utils.py +6 -3
- lionagi/operatives/action/function_calling.py +136 -0
- lionagi/operatives/action/manager.py +239 -0
- lionagi/operatives/action/request_response_model.py +90 -0
- lionagi/operatives/action/tool.py +141 -0
- lionagi/{protocols/operatives/action.py → operatives/action/utils.py} +52 -90
- lionagi/{core → operatives}/forms/base.py +9 -4
- lionagi/{core → operatives}/forms/form.py +8 -13
- lionagi/{core → operatives}/forms/report.py +5 -3
- lionagi/operatives/instruct/base.py +79 -0
- lionagi/operatives/instruct/instruct.py +105 -0
- lionagi/operatives/instruct/instruct_collection.py +52 -0
- lionagi/operatives/instruct/node.py +13 -0
- lionagi/{protocols/operatives → operatives/instruct}/prompts.py +0 -34
- lionagi/{protocols/operatives → operatives/instruct}/reason.py +14 -7
- lionagi/{integrations/anthropic_/version.py → operatives/manager.py} +5 -1
- lionagi/operatives/models/field_model.py +194 -0
- lionagi/operatives/models/model_params.py +307 -0
- lionagi/{core → operatives}/models/note.py +20 -28
- lionagi/{core → operatives}/models/operable_model.py +153 -71
- lionagi/{core → operatives}/models/schema_model.py +4 -3
- lionagi/{protocols/operatives → operatives}/operative.py +10 -7
- lionagi/{protocols/operatives → operatives}/step.py +67 -26
- lionagi/operatives/types.py +69 -0
- lionagi/protocols/_adapter.py +224 -0
- lionagi/protocols/_concepts.py +94 -0
- lionagi/protocols/generic/element.py +460 -0
- lionagi/protocols/generic/event.py +177 -0
- lionagi/protocols/generic/log.py +237 -0
- lionagi/{core → protocols}/generic/pile.py +172 -131
- lionagi/protocols/generic/processor.py +316 -0
- lionagi/protocols/generic/progression.py +500 -0
- lionagi/protocols/graph/edge.py +166 -0
- lionagi/protocols/graph/graph.py +290 -0
- lionagi/protocols/graph/node.py +109 -0
- lionagi/protocols/mail/exchange.py +116 -0
- lionagi/protocols/mail/mail.py +25 -0
- lionagi/protocols/mail/mailbox.py +47 -0
- lionagi/protocols/mail/manager.py +168 -0
- lionagi/protocols/mail/package.py +55 -0
- lionagi/protocols/messages/action_request.py +165 -0
- lionagi/protocols/messages/action_response.py +132 -0
- lionagi/{core/communication → protocols/messages}/assistant_response.py +55 -79
- lionagi/protocols/messages/base.py +73 -0
- lionagi/protocols/messages/instruction.py +582 -0
- lionagi/protocols/messages/manager.py +429 -0
- lionagi/protocols/messages/message.py +216 -0
- lionagi/protocols/messages/system.py +115 -0
- lionagi/protocols/messages/templates/assistant_response.jinja2 +6 -0
- lionagi/{core/communication → protocols/messages}/templates/instruction_message.jinja2 +2 -2
- lionagi/protocols/types.py +96 -0
- lionagi/service/__init__.py +10 -12
- lionagi/service/endpoints/base.py +517 -0
- lionagi/service/endpoints/chat_completion.py +102 -0
- lionagi/service/endpoints/match_endpoint.py +60 -0
- lionagi/service/endpoints/rate_limited_processor.py +145 -0
- lionagi/service/endpoints/token_calculator.py +209 -0
- lionagi/service/imodel.py +263 -96
- lionagi/service/manager.py +45 -0
- lionagi/service/providers/anthropic_/messages.py +64 -0
- lionagi/service/providers/groq_/chat_completions.py +56 -0
- lionagi/service/providers/openai_/chat_completions.py +62 -0
- lionagi/service/providers/openrouter_/chat_completions.py +62 -0
- lionagi/service/providers/perplexity_/__init__.py +3 -0
- lionagi/service/providers/perplexity_/chat_completions.py +40 -0
- lionagi/session/__init__.py +3 -0
- lionagi/session/branch.py +1287 -0
- lionagi/session/session.py +296 -0
- lionagi/settings.py +62 -118
- lionagi/utils.py +2386 -0
- lionagi/version.py +1 -1
- {lionagi-0.5.5.dist-info → lionagi-0.6.0.dist-info}/METADATA +7 -6
- lionagi-0.6.0.dist-info/RECORD +160 -0
- lionagi/core/action/action_manager.py +0 -289
- lionagi/core/action/base.py +0 -109
- lionagi/core/action/function_calling.py +0 -153
- lionagi/core/action/tool.py +0 -202
- lionagi/core/action/types.py +0 -16
- lionagi/core/communication/action_request.py +0 -163
- lionagi/core/communication/action_response.py +0 -149
- lionagi/core/communication/base_mail.py +0 -49
- lionagi/core/communication/instruction.py +0 -376
- lionagi/core/communication/message.py +0 -286
- lionagi/core/communication/message_manager.py +0 -543
- lionagi/core/communication/system.py +0 -116
- lionagi/core/communication/templates/assistant_response.jinja2 +0 -2
- lionagi/core/communication/types.py +0 -27
- lionagi/core/communication/utils.py +0 -256
- lionagi/core/forms/types.py +0 -13
- lionagi/core/generic/component.py +0 -422
- lionagi/core/generic/edge.py +0 -163
- lionagi/core/generic/element.py +0 -199
- lionagi/core/generic/graph.py +0 -377
- lionagi/core/generic/log.py +0 -151
- lionagi/core/generic/log_manager.py +0 -320
- lionagi/core/generic/node.py +0 -11
- lionagi/core/generic/progression.py +0 -395
- lionagi/core/generic/types.py +0 -23
- lionagi/core/generic/utils.py +0 -53
- lionagi/core/models/base.py +0 -28
- lionagi/core/models/field_model.py +0 -145
- lionagi/core/models/model_params.py +0 -194
- lionagi/core/models/types.py +0 -19
- lionagi/core/session/branch.py +0 -130
- lionagi/core/session/branch_mixins.py +0 -581
- lionagi/core/session/session.py +0 -163
- lionagi/core/session/types.py +0 -8
- lionagi/core/typing/__init__.py +0 -9
- lionagi/core/typing/_concepts.py +0 -173
- lionagi/core/typing/_id.py +0 -104
- lionagi/core/typing/_pydantic.py +0 -33
- lionagi/core/typing/_typing.py +0 -54
- lionagi/integrations/__init__.py +0 -0
- lionagi/integrations/_services.py +0 -17
- lionagi/integrations/anthropic_/AnthropicModel.py +0 -268
- lionagi/integrations/anthropic_/AnthropicService.py +0 -127
- lionagi/integrations/anthropic_/anthropic_max_output_token_data.yaml +0 -12
- lionagi/integrations/anthropic_/anthropic_price_data.yaml +0 -34
- lionagi/integrations/anthropic_/api_endpoints/api_request.py +0 -277
- lionagi/integrations/anthropic_/api_endpoints/data_models.py +0 -40
- lionagi/integrations/anthropic_/api_endpoints/match_response.py +0 -119
- lionagi/integrations/anthropic_/api_endpoints/messages/request/message_models.py +0 -14
- lionagi/integrations/anthropic_/api_endpoints/messages/request/request_body.py +0 -74
- lionagi/integrations/anthropic_/api_endpoints/messages/response/__init__.py +0 -0
- lionagi/integrations/anthropic_/api_endpoints/messages/response/content_models.py +0 -32
- lionagi/integrations/anthropic_/api_endpoints/messages/response/response_body.py +0 -101
- lionagi/integrations/anthropic_/api_endpoints/messages/response/usage_models.py +0 -25
- lionagi/integrations/groq_/GroqModel.py +0 -325
- lionagi/integrations/groq_/GroqService.py +0 -156
- lionagi/integrations/groq_/api_endpoints/__init__.py +0 -0
- lionagi/integrations/groq_/api_endpoints/data_models.py +0 -187
- lionagi/integrations/groq_/api_endpoints/groq_request.py +0 -288
- lionagi/integrations/groq_/api_endpoints/match_response.py +0 -106
- lionagi/integrations/groq_/api_endpoints/response_utils.py +0 -105
- lionagi/integrations/groq_/groq_max_output_token_data.yaml +0 -21
- lionagi/integrations/groq_/groq_price_data.yaml +0 -58
- lionagi/integrations/groq_/groq_rate_limits.yaml +0 -105
- lionagi/integrations/groq_/version.py +0 -5
- lionagi/integrations/litellm_/imodel.py +0 -76
- lionagi/integrations/ollama_/OllamaModel.py +0 -244
- lionagi/integrations/ollama_/OllamaService.py +0 -142
- lionagi/integrations/ollama_/api_endpoints/api_request.py +0 -179
- lionagi/integrations/ollama_/api_endpoints/chat_completion/message_models.py +0 -31
- lionagi/integrations/ollama_/api_endpoints/chat_completion/request_body.py +0 -46
- lionagi/integrations/ollama_/api_endpoints/chat_completion/response_body.py +0 -67
- lionagi/integrations/ollama_/api_endpoints/chat_completion/tool_models.py +0 -49
- lionagi/integrations/ollama_/api_endpoints/completion/__init__.py +0 -0
- lionagi/integrations/ollama_/api_endpoints/completion/request_body.py +0 -72
- lionagi/integrations/ollama_/api_endpoints/completion/response_body.py +0 -59
- lionagi/integrations/ollama_/api_endpoints/data_models.py +0 -15
- lionagi/integrations/ollama_/api_endpoints/embedding/__init__.py +0 -0
- lionagi/integrations/ollama_/api_endpoints/embedding/request_body.py +0 -33
- lionagi/integrations/ollama_/api_endpoints/embedding/response_body.py +0 -29
- lionagi/integrations/ollama_/api_endpoints/match_data_model.py +0 -62
- lionagi/integrations/ollama_/api_endpoints/match_response.py +0 -190
- lionagi/integrations/ollama_/api_endpoints/model/copy_model.py +0 -13
- lionagi/integrations/ollama_/api_endpoints/model/create_model.py +0 -28
- lionagi/integrations/ollama_/api_endpoints/model/delete_model.py +0 -11
- lionagi/integrations/ollama_/api_endpoints/model/list_model.py +0 -60
- lionagi/integrations/ollama_/api_endpoints/model/pull_model.py +0 -34
- lionagi/integrations/ollama_/api_endpoints/model/push_model.py +0 -35
- lionagi/integrations/ollama_/api_endpoints/model/show_model.py +0 -36
- lionagi/integrations/ollama_/api_endpoints/option_models.py +0 -68
- lionagi/integrations/openai_/OpenAIModel.py +0 -419
- lionagi/integrations/openai_/OpenAIService.py +0 -435
- lionagi/integrations/openai_/__init__.py +0 -0
- lionagi/integrations/openai_/api_endpoints/__init__.py +0 -3
- lionagi/integrations/openai_/api_endpoints/api_request.py +0 -277
- lionagi/integrations/openai_/api_endpoints/audio/__init__.py +0 -9
- lionagi/integrations/openai_/api_endpoints/audio/speech_models.py +0 -34
- lionagi/integrations/openai_/api_endpoints/audio/transcription_models.py +0 -136
- lionagi/integrations/openai_/api_endpoints/audio/translation_models.py +0 -41
- lionagi/integrations/openai_/api_endpoints/audio/types.py +0 -41
- lionagi/integrations/openai_/api_endpoints/batch/__init__.py +0 -17
- lionagi/integrations/openai_/api_endpoints/batch/batch_models.py +0 -146
- lionagi/integrations/openai_/api_endpoints/batch/cancel_batch.py +0 -7
- lionagi/integrations/openai_/api_endpoints/batch/create_batch.py +0 -26
- lionagi/integrations/openai_/api_endpoints/batch/list_batch.py +0 -37
- lionagi/integrations/openai_/api_endpoints/batch/request_object_models.py +0 -65
- lionagi/integrations/openai_/api_endpoints/batch/retrieve_batch.py +0 -7
- lionagi/integrations/openai_/api_endpoints/batch/types.py +0 -4
- lionagi/integrations/openai_/api_endpoints/chat_completions/__init__.py +0 -1
- lionagi/integrations/openai_/api_endpoints/chat_completions/request/__init__.py +0 -39
- lionagi/integrations/openai_/api_endpoints/chat_completions/request/message_models.py +0 -121
- lionagi/integrations/openai_/api_endpoints/chat_completions/request/request_body.py +0 -221
- lionagi/integrations/openai_/api_endpoints/chat_completions/request/response_format.py +0 -71
- lionagi/integrations/openai_/api_endpoints/chat_completions/request/stream_options.py +0 -14
- lionagi/integrations/openai_/api_endpoints/chat_completions/request/tool_choice_models.py +0 -17
- lionagi/integrations/openai_/api_endpoints/chat_completions/request/tool_models.py +0 -54
- lionagi/integrations/openai_/api_endpoints/chat_completions/request/types.py +0 -18
- lionagi/integrations/openai_/api_endpoints/chat_completions/response/__init__.py +0 -0
- lionagi/integrations/openai_/api_endpoints/chat_completions/response/choice_models.py +0 -62
- lionagi/integrations/openai_/api_endpoints/chat_completions/response/function_models.py +0 -16
- lionagi/integrations/openai_/api_endpoints/chat_completions/response/log_prob_models.py +0 -47
- lionagi/integrations/openai_/api_endpoints/chat_completions/response/message_models.py +0 -25
- lionagi/integrations/openai_/api_endpoints/chat_completions/response/response_body.py +0 -99
- lionagi/integrations/openai_/api_endpoints/chat_completions/response/types.py +0 -8
- lionagi/integrations/openai_/api_endpoints/chat_completions/response/usage_models.py +0 -24
- lionagi/integrations/openai_/api_endpoints/chat_completions/util.py +0 -46
- lionagi/integrations/openai_/api_endpoints/data_models.py +0 -23
- lionagi/integrations/openai_/api_endpoints/embeddings/__init__.py +0 -3
- lionagi/integrations/openai_/api_endpoints/embeddings/request_body.py +0 -79
- lionagi/integrations/openai_/api_endpoints/embeddings/response_body.py +0 -67
- lionagi/integrations/openai_/api_endpoints/files/__init__.py +0 -11
- lionagi/integrations/openai_/api_endpoints/files/delete_file.py +0 -20
- lionagi/integrations/openai_/api_endpoints/files/file_models.py +0 -56
- lionagi/integrations/openai_/api_endpoints/files/list_files.py +0 -27
- lionagi/integrations/openai_/api_endpoints/files/retrieve_file.py +0 -9
- lionagi/integrations/openai_/api_endpoints/files/upload_file.py +0 -38
- lionagi/integrations/openai_/api_endpoints/fine_tuning/__init__.py +0 -37
- lionagi/integrations/openai_/api_endpoints/fine_tuning/cancel_jobs.py +0 -9
- lionagi/integrations/openai_/api_endpoints/fine_tuning/create_jobs.py +0 -133
- lionagi/integrations/openai_/api_endpoints/fine_tuning/fine_tuning_job_checkpoint_models.py +0 -58
- lionagi/integrations/openai_/api_endpoints/fine_tuning/fine_tuning_job_event_models.py +0 -31
- lionagi/integrations/openai_/api_endpoints/fine_tuning/fine_tuning_job_models.py +0 -140
- lionagi/integrations/openai_/api_endpoints/fine_tuning/list_fine_tuning_checkpoints.py +0 -51
- lionagi/integrations/openai_/api_endpoints/fine_tuning/list_fine_tuning_events.py +0 -42
- lionagi/integrations/openai_/api_endpoints/fine_tuning/list_fine_tuning_jobs.py +0 -31
- lionagi/integrations/openai_/api_endpoints/fine_tuning/retrieve_jobs.py +0 -9
- lionagi/integrations/openai_/api_endpoints/fine_tuning/training_format.py +0 -30
- lionagi/integrations/openai_/api_endpoints/images/__init__.py +0 -9
- lionagi/integrations/openai_/api_endpoints/images/image_edit_models.py +0 -69
- lionagi/integrations/openai_/api_endpoints/images/image_models.py +0 -56
- lionagi/integrations/openai_/api_endpoints/images/image_variation_models.py +0 -56
- lionagi/integrations/openai_/api_endpoints/images/response_body.py +0 -30
- lionagi/integrations/openai_/api_endpoints/match_data_model.py +0 -197
- lionagi/integrations/openai_/api_endpoints/match_response.py +0 -336
- lionagi/integrations/openai_/api_endpoints/models/__init__.py +0 -7
- lionagi/integrations/openai_/api_endpoints/models/delete_fine_tuned_model.py +0 -17
- lionagi/integrations/openai_/api_endpoints/models/models_models.py +0 -31
- lionagi/integrations/openai_/api_endpoints/models/retrieve_model.py +0 -9
- lionagi/integrations/openai_/api_endpoints/moderations/__init__.py +0 -3
- lionagi/integrations/openai_/api_endpoints/moderations/request_body.py +0 -20
- lionagi/integrations/openai_/api_endpoints/moderations/response_body.py +0 -139
- lionagi/integrations/openai_/api_endpoints/uploads/__init__.py +0 -19
- lionagi/integrations/openai_/api_endpoints/uploads/add_upload_part.py +0 -11
- lionagi/integrations/openai_/api_endpoints/uploads/cancel_upload.py +0 -7
- lionagi/integrations/openai_/api_endpoints/uploads/complete_upload.py +0 -18
- lionagi/integrations/openai_/api_endpoints/uploads/create_upload.py +0 -17
- lionagi/integrations/openai_/api_endpoints/uploads/uploads_models.py +0 -52
- lionagi/integrations/openai_/image_token_calculator/__init__.py +0 -0
- lionagi/integrations/openai_/image_token_calculator/image_token_calculator.py +0 -98
- lionagi/integrations/openai_/image_token_calculator/openai_image_token_data.yaml +0 -15
- lionagi/integrations/openai_/openai_max_output_token_data.yaml +0 -12
- lionagi/integrations/openai_/openai_price_data.yaml +0 -26
- lionagi/integrations/openai_/version.py +0 -1
- lionagi/integrations/pandas_/__init__.py +0 -24
- lionagi/integrations/pandas_/extend_df.py +0 -61
- lionagi/integrations/pandas_/read.py +0 -103
- lionagi/integrations/pandas_/remove_rows.py +0 -61
- lionagi/integrations/pandas_/replace_keywords.py +0 -65
- lionagi/integrations/pandas_/save.py +0 -131
- lionagi/integrations/pandas_/search_keywords.py +0 -69
- lionagi/integrations/pandas_/to_df.py +0 -196
- lionagi/integrations/pandas_/update_cells.py +0 -54
- lionagi/integrations/perplexity_/PerplexityModel.py +0 -274
- lionagi/integrations/perplexity_/PerplexityService.py +0 -118
- lionagi/integrations/perplexity_/api_endpoints/__init__.py +0 -0
- lionagi/integrations/perplexity_/api_endpoints/api_request.py +0 -171
- lionagi/integrations/perplexity_/api_endpoints/chat_completions/__init__.py +0 -0
- lionagi/integrations/perplexity_/api_endpoints/chat_completions/request/__init__.py +0 -0
- lionagi/integrations/perplexity_/api_endpoints/chat_completions/request/request_body.py +0 -121
- lionagi/integrations/perplexity_/api_endpoints/chat_completions/response/__init__.py +0 -0
- lionagi/integrations/perplexity_/api_endpoints/chat_completions/response/response_body.py +0 -146
- lionagi/integrations/perplexity_/api_endpoints/data_models.py +0 -63
- lionagi/integrations/perplexity_/api_endpoints/match_response.py +0 -26
- lionagi/integrations/perplexity_/perplexity_max_output_token_data.yaml +0 -3
- lionagi/integrations/perplexity_/perplexity_price_data.yaml +0 -10
- lionagi/integrations/perplexity_/version.py +0 -1
- lionagi/integrations/pydantic_/__init__.py +0 -8
- lionagi/integrations/pydantic_/break_down_annotation.py +0 -81
- lionagi/integrations/pydantic_/new_model.py +0 -208
- lionagi/libs/constants.py +0 -98
- lionagi/libs/file/path.py +0 -301
- lionagi/libs/file/types.py +0 -22
- lionagi/libs/func/__init__.py +0 -0
- lionagi/libs/func/async_calls/__init__.py +0 -24
- lionagi/libs/func/async_calls/alcall.py +0 -210
- lionagi/libs/func/async_calls/bcall.py +0 -130
- lionagi/libs/func/async_calls/mcall.py +0 -134
- lionagi/libs/func/async_calls/pcall.py +0 -149
- lionagi/libs/func/async_calls/rcall.py +0 -217
- lionagi/libs/func/async_calls/tcall.py +0 -114
- lionagi/libs/func/async_calls/ucall.py +0 -85
- lionagi/libs/func/decorators.py +0 -277
- lionagi/libs/func/lcall.py +0 -57
- lionagi/libs/func/params.py +0 -64
- lionagi/libs/func/throttle.py +0 -119
- lionagi/libs/func/types.py +0 -39
- lionagi/libs/func/utils.py +0 -96
- lionagi/libs/package/types.py +0 -26
- lionagi/libs/parse/__init__.py +0 -1
- lionagi/libs/parse/flatten/__init__.py +0 -9
- lionagi/libs/parse/flatten/params.py +0 -52
- lionagi/libs/parse/json/__init__.py +0 -27
- lionagi/libs/parse/json/extract.py +0 -102
- lionagi/libs/parse/json/parse.py +0 -179
- lionagi/libs/parse/json/to_json.py +0 -71
- lionagi/libs/parse/nested/__init__.py +0 -33
- lionagi/libs/parse/nested/to_flat_list.py +0 -64
- lionagi/libs/parse/params.py +0 -0
- lionagi/libs/parse/string_parse/__init__.py +0 -11
- lionagi/libs/parse/type_convert/__init__.py +0 -19
- lionagi/libs/parse/type_convert/params.py +0 -145
- lionagi/libs/parse/type_convert/to_dict.py +0 -333
- lionagi/libs/parse/type_convert/to_list.py +0 -186
- lionagi/libs/parse/type_convert/to_num.py +0 -358
- lionagi/libs/parse/type_convert/to_str.py +0 -195
- lionagi/libs/parse/types.py +0 -9
- lionagi/libs/parse/validate/__init__.py +0 -14
- lionagi/libs/parse/validate/params.py +0 -62
- lionagi/libs/parse/xml/__init__.py +0 -10
- lionagi/libs/parse/xml/convert.py +0 -56
- lionagi/libs/parse/xml/parser.py +0 -93
- lionagi/libs/string_similarity/__init__.py +0 -32
- lionagi/libs/string_similarity/matcher.py +0 -102
- lionagi/libs/string_similarity/utils.py +0 -15
- lionagi/libs/utils.py +0 -266
- lionagi/protocols/adapters/__init__.py +0 -0
- lionagi/protocols/adapters/adapter.py +0 -79
- lionagi/protocols/adapters/json_adapter.py +0 -43
- lionagi/protocols/adapters/pandas_adapter.py +0 -96
- lionagi/protocols/configs/__init__.py +0 -0
- lionagi/protocols/configs/branch_config.py +0 -86
- lionagi/protocols/configs/id_config.py +0 -15
- lionagi/protocols/configs/imodel_config.py +0 -73
- lionagi/protocols/configs/log_config.py +0 -93
- lionagi/protocols/configs/retry_config.py +0 -29
- lionagi/protocols/configs/types.py +0 -15
- lionagi/protocols/operatives/instruct.py +0 -194
- lionagi/protocols/operatives/types.py +0 -19
- lionagi/protocols/registries/_component_registry.py +0 -23
- lionagi/protocols/registries/_pile_registry.py +0 -30
- lionagi/service/complete_request_info.py +0 -11
- lionagi/service/rate_limiter.py +0 -108
- lionagi/service/service.py +0 -41
- lionagi/service/service_match_util.py +0 -131
- lionagi/service/service_util.py +0 -72
- lionagi/service/token_calculator.py +0 -51
- lionagi/strategies/__init__.py +0 -0
- lionagi/strategies/types.py +0 -21
- lionagi-0.5.5.dist-info/RECORD +0 -374
- /lionagi/{core → libs/nested}/__init__.py +0 -0
- /lionagi/{core/action → libs/schema}/__init__.py +0 -0
- /lionagi/{core/communication → libs/validate}/__init__.py +0 -0
- /lionagi/{core/forms → operations/strategies}/__init__.py +0 -0
- /lionagi/{core/generic → operatives}/__init__.py +0 -0
- /lionagi/{core/session → operatives/action}/__init__.py +0 -0
- /lionagi/{integrations/anthropic_ → operatives/forms}/__init__.py +0 -0
- /lionagi/{core → operatives}/forms/utils.py +0 -0
- /lionagi/{integrations/anthropic_/api_endpoints → operatives/instruct}/__init__.py +0 -0
- /lionagi/{integrations/anthropic_/api_endpoints/messages → operatives/models}/__init__.py +0 -0
- /lionagi/{integrations/anthropic_/api_endpoints/messages/request → protocols/generic}/__init__.py +0 -0
- /lionagi/{integrations/groq_ → protocols/graph}/__init__.py +0 -0
- /lionagi/{integrations/litellm_ → protocols/mail}/__init__.py +0 -0
- /lionagi/{integrations/ollama_ → protocols/messages}/__init__.py +0 -0
- /lionagi/{core/communication → protocols/messages}/templates/README.md +0 -0
- /lionagi/{core/communication → protocols/messages}/templates/action_request.jinja2 +0 -0
- /lionagi/{core/communication → protocols/messages}/templates/action_response.jinja2 +0 -0
- /lionagi/{core/communication → protocols/messages}/templates/system_message.jinja2 +0 -0
- /lionagi/{core/communication → protocols/messages}/templates/tool_schemas.jinja2 +0 -0
- /lionagi/{integrations/ollama_/api_endpoints → service/endpoints}/__init__.py +0 -0
- /lionagi/{integrations/ollama_/api_endpoints/chat_completion → service/providers}/__init__.py +0 -0
- /lionagi/{integrations/ollama_/api_endpoints/model → service/providers/anthropic_}/__init__.py +0 -0
- /lionagi/{integrations/perplexity_ → service/providers/groq_}/__init__.py +0 -0
- /lionagi/{protocols/operatives → service/providers/openai_}/__init__.py +0 -0
- /lionagi/{protocols/registries → service/providers/openrouter_}/__init__.py +0 -0
- {lionagi-0.5.5.dist-info → lionagi-0.6.0.dist-info}/WHEEL +0 -0
- {lionagi-0.5.5.dist-info → lionagi-0.6.0.dist-info}/licenses/LICENSE +0 -0
@@ -1,146 +0,0 @@
|
|
1
|
-
from typing import Literal
|
2
|
-
|
3
|
-
from pydantic import BaseModel, Field
|
4
|
-
|
5
|
-
from ..data_models import OpenAIEndpointResponseBody
|
6
|
-
|
7
|
-
|
8
|
-
class Data(BaseModel):
|
9
|
-
code: str = Field(description="An error code identifying the error type.")
|
10
|
-
|
11
|
-
message: str = Field(
|
12
|
-
description="A human-readable message providing more details"
|
13
|
-
" about the error."
|
14
|
-
)
|
15
|
-
|
16
|
-
param: str | None = Field(
|
17
|
-
None,
|
18
|
-
description="The name of the parameter that caused the "
|
19
|
-
"error, if applicable.",
|
20
|
-
)
|
21
|
-
|
22
|
-
line: int | None = Field(
|
23
|
-
None,
|
24
|
-
description="The line number of the input file where "
|
25
|
-
"the error occurred, if applicable.",
|
26
|
-
)
|
27
|
-
|
28
|
-
|
29
|
-
class Error(BaseModel):
|
30
|
-
object: Literal["list"] = Field(
|
31
|
-
description="The object type, which is always 'list'."
|
32
|
-
)
|
33
|
-
|
34
|
-
data: Data = Field(description="A list of error data.")
|
35
|
-
|
36
|
-
|
37
|
-
class RequestCounts(BaseModel):
|
38
|
-
total: int = Field(description="Total number of requests in the batch.")
|
39
|
-
|
40
|
-
completed: int = Field(
|
41
|
-
description="Number of requests that have been completed successfully."
|
42
|
-
)
|
43
|
-
|
44
|
-
failed: int = Field(description="Number of requests that have failed.")
|
45
|
-
|
46
|
-
|
47
|
-
class OpenAIBatchResponseBody(OpenAIEndpointResponseBody):
|
48
|
-
id: str = Field(description="A unique identifier for the batch.")
|
49
|
-
|
50
|
-
object: Literal["batch"] = Field(
|
51
|
-
description="The object type, which is always 'batch'."
|
52
|
-
)
|
53
|
-
|
54
|
-
endpoint: str = Field(description="The API endpoint used for this batch.")
|
55
|
-
|
56
|
-
errors: Error | None = Field(
|
57
|
-
None, description="Errors encountered during batch processing."
|
58
|
-
)
|
59
|
-
|
60
|
-
input_file_id: str | None = Field(
|
61
|
-
None, description="The ID of the input file for the batch."
|
62
|
-
)
|
63
|
-
|
64
|
-
completion_window: str | None = Field(
|
65
|
-
None,
|
66
|
-
description="The time frame within which the batch "
|
67
|
-
"should be processed.",
|
68
|
-
)
|
69
|
-
|
70
|
-
status: str = Field(description="The current status of the batch.")
|
71
|
-
|
72
|
-
output_file_id: str | None = Field(
|
73
|
-
None,
|
74
|
-
description="The ID of the file containing the outputs of "
|
75
|
-
"successfully executed requests.",
|
76
|
-
)
|
77
|
-
|
78
|
-
error_file_id: str | None = Field(
|
79
|
-
None,
|
80
|
-
description="The ID of the file containing the outputs "
|
81
|
-
"of requests with errors.",
|
82
|
-
)
|
83
|
-
|
84
|
-
created_at: int | None = Field(
|
85
|
-
None,
|
86
|
-
description="The Unix timestamp (in seconds) for when "
|
87
|
-
"the batch was created.",
|
88
|
-
)
|
89
|
-
|
90
|
-
in_progress_at: int | None = Field(
|
91
|
-
None,
|
92
|
-
description="The Unix timestamp (in seconds) for when the"
|
93
|
-
" batch started processing.",
|
94
|
-
)
|
95
|
-
|
96
|
-
expires_at: int | None = Field(
|
97
|
-
None,
|
98
|
-
description="The Unix timestamp (in seconds) for"
|
99
|
-
" when the batch will expire.",
|
100
|
-
)
|
101
|
-
|
102
|
-
finalizing_at: int | None = Field(
|
103
|
-
None,
|
104
|
-
description="The Unix timestamp (in seconds) for"
|
105
|
-
" when the batch started finalizing.",
|
106
|
-
)
|
107
|
-
|
108
|
-
completed_at: int | None = Field(
|
109
|
-
None,
|
110
|
-
description="The Unix timestamp (in seconds) for"
|
111
|
-
" when the batch was completed.",
|
112
|
-
)
|
113
|
-
|
114
|
-
failed_at: int | None = Field(
|
115
|
-
None,
|
116
|
-
description="The Unix timestamp (in seconds) for"
|
117
|
-
" when the batch failed.",
|
118
|
-
)
|
119
|
-
|
120
|
-
expired_at: int | None = Field(
|
121
|
-
None,
|
122
|
-
description="The Unix timestamp (in seconds) for"
|
123
|
-
" when the batch expired.",
|
124
|
-
)
|
125
|
-
|
126
|
-
cancelling_at: int | None = Field(
|
127
|
-
None,
|
128
|
-
description="The Unix timestamp (in seconds) for"
|
129
|
-
" when the batch started cancelling.",
|
130
|
-
)
|
131
|
-
|
132
|
-
cancelled_at: int | None = Field(
|
133
|
-
None,
|
134
|
-
description="The Unix timestamp (in seconds) for"
|
135
|
-
" when the batch was cancelled.",
|
136
|
-
)
|
137
|
-
|
138
|
-
request_counts: RequestCounts = Field(
|
139
|
-
description="The request counts for different statuses"
|
140
|
-
" within the batch."
|
141
|
-
)
|
142
|
-
|
143
|
-
metadata: dict | None = Field(
|
144
|
-
description="Set of 16 key-value pairs that can be"
|
145
|
-
" attached to an object."
|
146
|
-
)
|
@@ -1,26 +0,0 @@
|
|
1
|
-
from typing import Literal, Optional
|
2
|
-
|
3
|
-
from pydantic import Field
|
4
|
-
|
5
|
-
from ..data_models import OpenAIEndpointRequestBody
|
6
|
-
from .types import Endpoint
|
7
|
-
|
8
|
-
|
9
|
-
class OpenAIBatchRequestBody(OpenAIEndpointRequestBody):
|
10
|
-
input_file_id: str = Field(
|
11
|
-
description="The ID of an uploaded file that contains requests"
|
12
|
-
" for the new batch."
|
13
|
-
)
|
14
|
-
|
15
|
-
endpoint: Endpoint = Field(
|
16
|
-
description="The endpoint to be used for all requests in the batch."
|
17
|
-
)
|
18
|
-
|
19
|
-
completion_window: Literal["24h"] = Field(
|
20
|
-
description="The time frame within which the batch"
|
21
|
-
" should be processed."
|
22
|
-
)
|
23
|
-
|
24
|
-
metadata: dict | None = Field(
|
25
|
-
None, description="Optional custom metadata for the batch."
|
26
|
-
)
|
@@ -1,37 +0,0 @@
|
|
1
|
-
from typing import List, Literal, Optional
|
2
|
-
|
3
|
-
from pydantic import Field
|
4
|
-
|
5
|
-
from ..data_models import OpenAIEndpointQueryParam, OpenAIEndpointResponseBody
|
6
|
-
from .batch_models import OpenAIBatchResponseBody
|
7
|
-
|
8
|
-
|
9
|
-
class OpenAIListBatchQueryParam(OpenAIEndpointQueryParam):
|
10
|
-
after: str | None = Field(
|
11
|
-
None, description="A cursor for use in pagination. "
|
12
|
-
)
|
13
|
-
|
14
|
-
limit: int | None = Field(
|
15
|
-
default=20,
|
16
|
-
description="A limit on the number of objects to be returned. "
|
17
|
-
"Limit can range between 1 and 100, and the default is 20.",
|
18
|
-
)
|
19
|
-
|
20
|
-
|
21
|
-
class OpenAIListBatchResponseBody(OpenAIEndpointResponseBody):
|
22
|
-
data: list[OpenAIBatchResponseBody] = Field(
|
23
|
-
description="The list of batch objects."
|
24
|
-
)
|
25
|
-
|
26
|
-
object: Literal["list"] = Field(
|
27
|
-
description='The object type, which is always "list".'
|
28
|
-
)
|
29
|
-
|
30
|
-
first_id: str = Field(description="The first object id in the list")
|
31
|
-
|
32
|
-
last_id: str = Field(description="The last object id in the list")
|
33
|
-
|
34
|
-
has_more: bool = Field(
|
35
|
-
description="Whether there are more results "
|
36
|
-
"available after this batch."
|
37
|
-
)
|
@@ -1,65 +0,0 @@
|
|
1
|
-
from typing import Literal
|
2
|
-
|
3
|
-
from pydantic import BaseModel, Field, SerializeAsAny
|
4
|
-
|
5
|
-
from ..data_models import OpenAIEndpointRequestBody
|
6
|
-
from .types import Endpoint
|
7
|
-
|
8
|
-
|
9
|
-
class Response(BaseModel):
|
10
|
-
status_code: int = Field(
|
11
|
-
description="The HTTP status code of the response."
|
12
|
-
)
|
13
|
-
|
14
|
-
request_id: str = Field(
|
15
|
-
description="An unique identifier for the OpenAI API request. "
|
16
|
-
"Please include this request ID when contacting support."
|
17
|
-
)
|
18
|
-
|
19
|
-
body: dict = Field(description="The JSON body of the response")
|
20
|
-
|
21
|
-
|
22
|
-
class Error(BaseModel):
|
23
|
-
code: str = Field(description="A machine-readable error code.")
|
24
|
-
|
25
|
-
message: str = Field(description="A human-readable error message.")
|
26
|
-
|
27
|
-
|
28
|
-
class OpenAIBatchRequestInputObject(BaseModel):
|
29
|
-
custom_id: str = Field(
|
30
|
-
description="A developer-provided per-request id "
|
31
|
-
"that will be used to match outputs to inputs. "
|
32
|
-
"Must be unique for each request in a batch."
|
33
|
-
)
|
34
|
-
|
35
|
-
method: Literal["POST"] = Field(
|
36
|
-
description="The HTTP method to be used for the "
|
37
|
-
"request. Currently only POST is supported."
|
38
|
-
)
|
39
|
-
|
40
|
-
url: Endpoint = Field(
|
41
|
-
description="The OpenAI API relative URL to be"
|
42
|
-
" used for the request."
|
43
|
-
)
|
44
|
-
|
45
|
-
body: SerializeAsAny[OpenAIEndpointRequestBody] = Field(
|
46
|
-
description="the parameters for the underlying endpoint."
|
47
|
-
)
|
48
|
-
|
49
|
-
|
50
|
-
class OpenAIBatchRequestOutputObject(BaseModel):
|
51
|
-
id: str = Field(description="The output object id.")
|
52
|
-
|
53
|
-
custom_id: str = Field(
|
54
|
-
description="A developer-provided per-request id"
|
55
|
-
" that will be used to match outputs to inputs."
|
56
|
-
)
|
57
|
-
|
58
|
-
response: Response | None = Field(
|
59
|
-
description="The endpoint response body."
|
60
|
-
)
|
61
|
-
|
62
|
-
error: Error | None = Field(
|
63
|
-
description="For requests that failed with a non-HTTP error, "
|
64
|
-
"this will contain more information on the cause of the failure."
|
65
|
-
)
|
@@ -1 +0,0 @@
|
|
1
|
-
from .request import * # noqa
|
@@ -1,39 +0,0 @@
|
|
1
|
-
from .message_models import AssistantMessage
|
2
|
-
from .message_models import Function as MessageFunction
|
3
|
-
from .message_models import (
|
4
|
-
ImageContentPart,
|
5
|
-
ImageURL,
|
6
|
-
SystemMessage,
|
7
|
-
TextContentPart,
|
8
|
-
ToolCall,
|
9
|
-
ToolMessage,
|
10
|
-
UserMessage,
|
11
|
-
)
|
12
|
-
from .request_body import OpenAIChatCompletionRequestBody
|
13
|
-
from .response_format import JSONSchema, ResponseFormat
|
14
|
-
from .stream_options import StreamOptions
|
15
|
-
from .tool_choice_models import Function as ToolChoiceFunction
|
16
|
-
from .tool_choice_models import ToolChoice
|
17
|
-
from .tool_models import Function as ToolFunction
|
18
|
-
from .tool_models import FunctionParameters, Tool
|
19
|
-
|
20
|
-
__all__ = [
|
21
|
-
"OpenAIChatCompletionRequestBody",
|
22
|
-
"SystemMessage",
|
23
|
-
"UserMessage",
|
24
|
-
"AssistantMessage",
|
25
|
-
"ToolMessage",
|
26
|
-
"TextContentPart",
|
27
|
-
"ImageContentPart",
|
28
|
-
"ImageURL",
|
29
|
-
"ToolCall",
|
30
|
-
"MessageFunction",
|
31
|
-
"ResponseFormat",
|
32
|
-
"JSONSchema",
|
33
|
-
"StreamOptions",
|
34
|
-
"Tool",
|
35
|
-
"ToolFunction",
|
36
|
-
"FunctionParameters",
|
37
|
-
"ToolChoice",
|
38
|
-
"ToolChoiceFunction",
|
39
|
-
]
|
@@ -1,121 +0,0 @@
|
|
1
|
-
from typing import List, Literal, Optional, TypeAlias
|
2
|
-
|
3
|
-
from pydantic import BaseModel, ConfigDict, Field, model_validator
|
4
|
-
|
5
|
-
from .types import Detail
|
6
|
-
|
7
|
-
|
8
|
-
class TextContentPart(BaseModel):
|
9
|
-
type: Literal["text"] = Field(description="The type of the content part.")
|
10
|
-
text: str = Field(description="The text content.")
|
11
|
-
|
12
|
-
|
13
|
-
class ImageURL(BaseModel):
|
14
|
-
url: str = Field(description="The URL of the image.")
|
15
|
-
detail: Detail = Field(
|
16
|
-
"auto", description="The detail level of the image."
|
17
|
-
)
|
18
|
-
model_config = ConfigDict(use_enum_values=True)
|
19
|
-
|
20
|
-
|
21
|
-
class ImageContentPart(BaseModel):
|
22
|
-
type: Literal["image_url"] = Field(
|
23
|
-
description="The type of the content part."
|
24
|
-
)
|
25
|
-
image_url: ImageURL = Field(description="The image URL and detail level.")
|
26
|
-
|
27
|
-
|
28
|
-
ContentPart: TypeAlias = TextContentPart | ImageContentPart
|
29
|
-
|
30
|
-
|
31
|
-
class Function(BaseModel):
|
32
|
-
name: str = Field(description="The name of the function to call.")
|
33
|
-
arguments: str = Field(
|
34
|
-
description="The arguments to call the function with, as JSON."
|
35
|
-
)
|
36
|
-
|
37
|
-
|
38
|
-
class ToolCall(BaseModel):
|
39
|
-
id: str = Field(description="The ID of the tool call.")
|
40
|
-
type: Literal["function"] = Field(
|
41
|
-
description="The type of the tool. Only 'function' is supported."
|
42
|
-
)
|
43
|
-
function: Function = Field(
|
44
|
-
description="The function that the model called."
|
45
|
-
)
|
46
|
-
|
47
|
-
|
48
|
-
class SystemMessage(BaseModel):
|
49
|
-
role: Literal["system"]
|
50
|
-
content: str = Field(description="The content of the system message.")
|
51
|
-
|
52
|
-
name: str | None = Field(
|
53
|
-
None,
|
54
|
-
description="An optional name for the participant."
|
55
|
-
" Provides the model information "
|
56
|
-
"to differentiate between participants of the same role.",
|
57
|
-
)
|
58
|
-
|
59
|
-
|
60
|
-
class UserMessage(BaseModel):
|
61
|
-
role: Literal["user"]
|
62
|
-
content: str | list[ContentPart] = Field(
|
63
|
-
description="The content of the user message"
|
64
|
-
)
|
65
|
-
|
66
|
-
name: str | None = Field(
|
67
|
-
None,
|
68
|
-
description="An optional name for the participant."
|
69
|
-
" Provides the model information "
|
70
|
-
"to differentiate between participants of the same role.",
|
71
|
-
)
|
72
|
-
|
73
|
-
|
74
|
-
class AssistantMessage(BaseModel):
|
75
|
-
role: Literal["assistant"]
|
76
|
-
content: str | None = Field(
|
77
|
-
None,
|
78
|
-
description="The contents of the assistant message."
|
79
|
-
" Required unless tool_calls is specified.",
|
80
|
-
)
|
81
|
-
|
82
|
-
refusal: str | None = Field(
|
83
|
-
None, description="The refusal message by the assistant."
|
84
|
-
)
|
85
|
-
|
86
|
-
name: str | None = Field(
|
87
|
-
None,
|
88
|
-
description="An optional name for the participant."
|
89
|
-
" Provides the model information "
|
90
|
-
"to differentiate between participants of the same role.",
|
91
|
-
)
|
92
|
-
|
93
|
-
tool_calls: list[ToolCall] | None = Field(
|
94
|
-
None,
|
95
|
-
description="The tool calls generated by the model,"
|
96
|
-
" such as function calls.",
|
97
|
-
)
|
98
|
-
|
99
|
-
@model_validator(mode="after")
|
100
|
-
def validate_content(self):
|
101
|
-
if not self.content and not self.tool_calls:
|
102
|
-
raise ValueError(
|
103
|
-
"Assistant messages require content or tool_calls"
|
104
|
-
)
|
105
|
-
return self
|
106
|
-
|
107
|
-
|
108
|
-
class ToolMessage(BaseModel):
|
109
|
-
role: Literal["tool"]
|
110
|
-
content: str | list = Field(
|
111
|
-
description="The contents of the tool message."
|
112
|
-
)
|
113
|
-
|
114
|
-
tool_calls_id: str = Field(
|
115
|
-
description="Tool call that this message is responding to."
|
116
|
-
)
|
117
|
-
|
118
|
-
|
119
|
-
Message: TypeAlias = (
|
120
|
-
SystemMessage | UserMessage | AssistantMessage | ToolMessage
|
121
|
-
)
|
@@ -1,221 +0,0 @@
|
|
1
|
-
from typing import Dict, List, Optional
|
2
|
-
|
3
|
-
from pydantic import Field, SerializeAsAny, model_validator
|
4
|
-
|
5
|
-
from ...data_models import OpenAIEndpointRequestBody
|
6
|
-
from .message_models import Message
|
7
|
-
from .response_format import ResponseFormat
|
8
|
-
from .stream_options import StreamOptions
|
9
|
-
from .tool_choice_models import ToolChoice as ToolChoiceObj
|
10
|
-
from .tool_models import Tool
|
11
|
-
from .types import ServiceTier
|
12
|
-
from .types import ToolChoice as ToolChoiceStr
|
13
|
-
|
14
|
-
|
15
|
-
class OpenAIChatCompletionRequestBody(OpenAIEndpointRequestBody):
|
16
|
-
messages: SerializeAsAny[list[Message]] = Field(
|
17
|
-
description="A list of messages comprising the conversation so far."
|
18
|
-
)
|
19
|
-
|
20
|
-
model: str = Field(description="ID of the model to use.")
|
21
|
-
|
22
|
-
frequency_penalty: float | None = Field(
|
23
|
-
0,
|
24
|
-
ge=-2.0,
|
25
|
-
le=2.0,
|
26
|
-
description=(
|
27
|
-
"Number between -2.0 and 2.0. Positive values penalize new tokens "
|
28
|
-
"based on their existing frequency in the text so far, decreasing "
|
29
|
-
"the model's likelihood to repeat the same line verbatim."
|
30
|
-
),
|
31
|
-
)
|
32
|
-
|
33
|
-
logit_bias: dict[str, float] | None = Field(
|
34
|
-
None,
|
35
|
-
description=(
|
36
|
-
"Modify the likelihood of specified tokens appearing in the "
|
37
|
-
"completion. Accepts a JSON object that maps tokens (specified by "
|
38
|
-
"their token ID in the tokenizer) to an associated "
|
39
|
-
"bias value from -100 to 100."
|
40
|
-
),
|
41
|
-
)
|
42
|
-
|
43
|
-
logprobs: bool | None = Field(
|
44
|
-
False,
|
45
|
-
description=(
|
46
|
-
"Whether to return log probabilities of the output tokens or not. "
|
47
|
-
"If true, returns the log probabilities of each output token "
|
48
|
-
"returned in the content of message."
|
49
|
-
),
|
50
|
-
)
|
51
|
-
|
52
|
-
top_logprobs: int | None = Field(
|
53
|
-
None,
|
54
|
-
ge=0,
|
55
|
-
le=20,
|
56
|
-
description=(
|
57
|
-
"An integer between 0 and 20 specifying the number of most likely "
|
58
|
-
"tokens to return at each token position, each with an associated "
|
59
|
-
"log probability. logprobs must be true if this parameter "
|
60
|
-
"is used."
|
61
|
-
),
|
62
|
-
)
|
63
|
-
|
64
|
-
max_completion_tokens: int | None = Field(
|
65
|
-
None,
|
66
|
-
description=(
|
67
|
-
"The maximum number of tokens that can be generated in the chat "
|
68
|
-
"completion. The total length of input tokens and generated "
|
69
|
-
"tokens is limited by the model's context length."
|
70
|
-
),
|
71
|
-
)
|
72
|
-
|
73
|
-
n: int | None = Field(
|
74
|
-
1,
|
75
|
-
ge=1,
|
76
|
-
description=(
|
77
|
-
"How many chat completion choices to generate for each input "
|
78
|
-
"message. Note that you will be charged based on the number of "
|
79
|
-
"generated tokens across all of the choices. Keep n as 1 to "
|
80
|
-
"minimize costs."
|
81
|
-
),
|
82
|
-
)
|
83
|
-
|
84
|
-
presence_penalty: float | None = Field(
|
85
|
-
0,
|
86
|
-
ge=-2.0,
|
87
|
-
le=2.0,
|
88
|
-
description=(
|
89
|
-
"Number between -2.0 and 2.0. Positive values penalize new tokens "
|
90
|
-
"based on whether they appear in the text so far, increasing the "
|
91
|
-
"model's likelihood to talk about new topics."
|
92
|
-
),
|
93
|
-
)
|
94
|
-
|
95
|
-
response_format: ResponseFormat | None = Field(
|
96
|
-
None,
|
97
|
-
description=(
|
98
|
-
"An object specifying the format that the model must output. "
|
99
|
-
"Compatible with GPT-4o, GPT-4o mini, GPT-4 Turbo and all GPT-3.5 "
|
100
|
-
"Turbo models newer than gpt-3.5-turbo-1106. Setting to "
|
101
|
-
"{ 'type': 'json_schema', 'json_schema': {...} } enables "
|
102
|
-
"Structured Outputs. Setting to { 'type': 'json_object' } enables "
|
103
|
-
"JSON mode. Important: when using JSON mode, you must also "
|
104
|
-
"instruct the model to produce JSON via a system or user message."
|
105
|
-
),
|
106
|
-
)
|
107
|
-
|
108
|
-
seed: int | None = Field(
|
109
|
-
None,
|
110
|
-
description=(
|
111
|
-
"This feature is in Beta. If specified, our system will make a "
|
112
|
-
"best effort to sample deterministically, such that repeated "
|
113
|
-
"requests with the same seed and parameters should return the "
|
114
|
-
"same result."
|
115
|
-
),
|
116
|
-
)
|
117
|
-
|
118
|
-
service_tier: ServiceTier | None = Field(
|
119
|
-
None,
|
120
|
-
description=(
|
121
|
-
"Specifies the latency tier to use for processing the request. "
|
122
|
-
"This parameter is relevant for customers subscribed to the scale "
|
123
|
-
"tier service."
|
124
|
-
),
|
125
|
-
)
|
126
|
-
|
127
|
-
stop: str | list[str] | None = Field(
|
128
|
-
None,
|
129
|
-
max_items=4,
|
130
|
-
description=(
|
131
|
-
"Up to 4 sequences where the API will stop generating further "
|
132
|
-
"tokens."
|
133
|
-
),
|
134
|
-
)
|
135
|
-
|
136
|
-
stream: bool | None = Field(
|
137
|
-
False,
|
138
|
-
description=(
|
139
|
-
"If set, partial message deltas will be sent, like in ChatGPT. "
|
140
|
-
"Tokens will be sent as data-only server-sent events as they "
|
141
|
-
"become available, with the stream terminated by a data: [DONE] "
|
142
|
-
"message."
|
143
|
-
),
|
144
|
-
)
|
145
|
-
|
146
|
-
stream_options: StreamOptions | None = Field(
|
147
|
-
None,
|
148
|
-
description=(
|
149
|
-
"Options for streaming response. Only set this when you set "
|
150
|
-
"stream: true."
|
151
|
-
),
|
152
|
-
)
|
153
|
-
|
154
|
-
temperature: float | None = Field(
|
155
|
-
1.0,
|
156
|
-
ge=0,
|
157
|
-
le=2,
|
158
|
-
description=(
|
159
|
-
"What sampling temperature to use, between 0 and 2. Higher values "
|
160
|
-
"like 0.8 will make the output more random, while lower values "
|
161
|
-
"like 0.2 will make it more focused and deterministic."
|
162
|
-
),
|
163
|
-
)
|
164
|
-
|
165
|
-
top_p: float | None = Field(
|
166
|
-
1.0,
|
167
|
-
ge=0,
|
168
|
-
le=1,
|
169
|
-
description=(
|
170
|
-
"An alternative to sampling with temperature, called nucleus "
|
171
|
-
"sampling, where the model considers the results of the tokens "
|
172
|
-
"with top_p probability mass. So 0.1 means only the tokens "
|
173
|
-
"comprising the top 10% probability mass are considered."
|
174
|
-
),
|
175
|
-
)
|
176
|
-
|
177
|
-
tools: list[Tool] | None = Field(
|
178
|
-
None,
|
179
|
-
max_items=128,
|
180
|
-
description=(
|
181
|
-
"A list of tools the model may call. Currently, only functions "
|
182
|
-
"are supported as a tool. Use this to provide a list of functions "
|
183
|
-
"the model may generate JSON inputs for."
|
184
|
-
),
|
185
|
-
)
|
186
|
-
|
187
|
-
tool_choice: ToolChoiceStr | ToolChoiceObj | None = Field(
|
188
|
-
None,
|
189
|
-
description=(
|
190
|
-
"Controls which (if any) tool is called by the model. 'none' "
|
191
|
-
"means the model will not call a tool and instead generates a "
|
192
|
-
"message. 'auto' means the model can pick between generating a "
|
193
|
-
"message or calling a tool. 'required' means the model must call "
|
194
|
-
"a tool."
|
195
|
-
),
|
196
|
-
)
|
197
|
-
|
198
|
-
parallel_tool_calls: bool | None = Field(
|
199
|
-
True,
|
200
|
-
decription="Whether to enable parallel "
|
201
|
-
"function calling during tool use.",
|
202
|
-
)
|
203
|
-
|
204
|
-
user: str | None = Field(
|
205
|
-
None,
|
206
|
-
description=(
|
207
|
-
"A unique identifier representing your end-user, which can help "
|
208
|
-
"OpenAI to monitor and detect abuse."
|
209
|
-
),
|
210
|
-
)
|
211
|
-
|
212
|
-
@model_validator(mode="after")
|
213
|
-
def validate_request(self) -> "OpenAIChatCompletionRequestBody":
|
214
|
-
if self.tools:
|
215
|
-
self.model_fields["tool_choice"].default = "auto"
|
216
|
-
else:
|
217
|
-
self.model_fields["tool_choice"].default = "none"
|
218
|
-
|
219
|
-
if self.top_logprobs is not None and not self.logprobs:
|
220
|
-
raise ValueError("logprobs must be true when top_logprobs is set")
|
221
|
-
return self
|