lionagi 0.3.8__py3-none-any.whl → 0.5.0__py3-none-any.whl
Sign up to get free protection for your applications and to get access to all the features.
- lionagi/__init__.py +14 -46
- lionagi/core/__init__.py +3 -1
- lionagi/core/_class_registry.py +69 -0
- lionagi/core/action/__init__.py +3 -13
- lionagi/core/action/action_manager.py +287 -0
- lionagi/core/action/base.py +109 -0
- lionagi/core/action/function_calling.py +127 -92
- lionagi/core/action/tool.py +172 -70
- lionagi/core/action/types.py +16 -0
- lionagi/core/communication/__init__.py +3 -0
- lionagi/core/communication/action_request.py +163 -0
- lionagi/core/communication/action_response.py +149 -0
- lionagi/core/communication/assistant_response.py +161 -0
- lionagi/core/communication/base_mail.py +49 -0
- lionagi/core/communication/instruction.py +376 -0
- lionagi/core/communication/message.py +286 -0
- lionagi/core/communication/message_manager.py +530 -0
- lionagi/core/communication/system.py +116 -0
- lionagi/core/communication/templates/README.md +28 -0
- lionagi/core/communication/templates/action_request.jinja2 +5 -0
- lionagi/core/communication/templates/action_response.jinja2 +9 -0
- lionagi/core/communication/templates/assistant_response.jinja2 +2 -0
- lionagi/core/communication/templates/instruction_message.jinja2 +61 -0
- lionagi/core/communication/templates/system_message.jinja2 +11 -0
- lionagi/core/communication/templates/tool_schemas.jinja2 +7 -0
- lionagi/core/communication/types.py +27 -0
- lionagi/core/communication/utils.py +254 -0
- lionagi/core/forms/__init__.py +3 -0
- lionagi/core/forms/base.py +232 -0
- lionagi/core/forms/form.py +791 -0
- lionagi/core/forms/report.py +321 -0
- lionagi/core/forms/types.py +13 -0
- lionagi/core/forms/utils.py +26 -0
- lionagi/core/generic/__init__.py +3 -6
- lionagi/core/generic/component.py +422 -0
- lionagi/core/generic/edge.py +143 -101
- lionagi/core/generic/element.py +195 -0
- lionagi/core/generic/graph.py +297 -180
- lionagi/core/generic/log.py +151 -0
- lionagi/core/generic/log_manager.py +320 -0
- lionagi/core/generic/node.py +7 -229
- lionagi/core/generic/pile.py +1017 -0
- lionagi/core/generic/progression.py +388 -0
- lionagi/core/generic/types.py +23 -0
- lionagi/core/generic/utils.py +50 -0
- lionagi/core/models/__init__.py +5 -0
- lionagi/core/models/base.py +85 -0
- lionagi/core/models/field_model.py +122 -0
- lionagi/core/models/new_model_params.py +195 -0
- lionagi/core/models/note.py +351 -0
- lionagi/core/models/operable_model.py +392 -0
- lionagi/core/models/schema_model.py +50 -0
- lionagi/core/models/types.py +10 -0
- lionagi/core/session/__init__.py +3 -0
- lionagi/core/session/branch.py +115 -415
- lionagi/core/session/branch_mixins.py +507 -0
- lionagi/core/session/session.py +122 -257
- lionagi/core/session/types.py +8 -0
- lionagi/core/typing/__init__.py +9 -0
- lionagi/core/typing/concepts.py +132 -0
- lionagi/core/typing/config.py +15 -0
- lionagi/core/typing/id.py +221 -0
- lionagi/core/typing/pydantic_.py +33 -0
- lionagi/core/typing/typing_.py +54 -0
- lionagi/integrations/__init__.py +0 -1
- lionagi/integrations/anthropic_/AnthropicModel.py +268 -0
- lionagi/integrations/anthropic_/AnthropicService.py +113 -0
- lionagi/integrations/anthropic_/__init__.py +3 -0
- lionagi/integrations/anthropic_/anthropic_max_output_token_data.yaml +7 -0
- lionagi/integrations/anthropic_/anthropic_price_data.yaml +14 -0
- lionagi/integrations/anthropic_/api_endpoints/__init__.py +3 -0
- lionagi/integrations/anthropic_/api_endpoints/api_request.py +277 -0
- lionagi/integrations/anthropic_/api_endpoints/data_models.py +40 -0
- lionagi/integrations/anthropic_/api_endpoints/match_response.py +119 -0
- lionagi/integrations/anthropic_/api_endpoints/messages/__init__.py +3 -0
- lionagi/integrations/anthropic_/api_endpoints/messages/request/__init__.py +3 -0
- lionagi/integrations/anthropic_/api_endpoints/messages/request/message_models.py +14 -0
- lionagi/integrations/anthropic_/api_endpoints/messages/request/request_body.py +74 -0
- lionagi/integrations/anthropic_/api_endpoints/messages/response/content_models.py +32 -0
- lionagi/integrations/anthropic_/api_endpoints/messages/response/response_body.py +101 -0
- lionagi/integrations/anthropic_/api_endpoints/messages/response/usage_models.py +25 -0
- lionagi/integrations/anthropic_/version.py +5 -0
- lionagi/integrations/groq_/GroqModel.py +318 -0
- lionagi/integrations/groq_/GroqService.py +147 -0
- lionagi/integrations/groq_/__init__.py +3 -0
- lionagi/integrations/groq_/api_endpoints/data_models.py +187 -0
- lionagi/integrations/groq_/api_endpoints/groq_request.py +288 -0
- lionagi/integrations/groq_/api_endpoints/match_response.py +106 -0
- lionagi/integrations/groq_/api_endpoints/response_utils.py +105 -0
- lionagi/integrations/groq_/groq_max_output_token_data.yaml +21 -0
- lionagi/integrations/groq_/groq_price_data.yaml +58 -0
- lionagi/integrations/groq_/groq_rate_limits.yaml +105 -0
- lionagi/integrations/groq_/version.py +5 -0
- lionagi/integrations/litellm_/__init__.py +3 -0
- lionagi/integrations/litellm_/imodel.py +69 -0
- lionagi/integrations/ollama_/OllamaModel.py +244 -0
- lionagi/integrations/ollama_/OllamaService.py +138 -0
- lionagi/integrations/ollama_/__init__.py +3 -0
- lionagi/integrations/ollama_/api_endpoints/__init__.py +3 -0
- lionagi/integrations/ollama_/api_endpoints/api_request.py +179 -0
- lionagi/integrations/ollama_/api_endpoints/chat_completion/__init__.py +3 -0
- lionagi/integrations/ollama_/api_endpoints/chat_completion/message_models.py +31 -0
- lionagi/integrations/ollama_/api_endpoints/chat_completion/request_body.py +46 -0
- lionagi/integrations/ollama_/api_endpoints/chat_completion/response_body.py +67 -0
- lionagi/integrations/ollama_/api_endpoints/chat_completion/tool_models.py +49 -0
- lionagi/integrations/ollama_/api_endpoints/completion/request_body.py +72 -0
- lionagi/integrations/ollama_/api_endpoints/completion/response_body.py +59 -0
- lionagi/integrations/ollama_/api_endpoints/data_models.py +15 -0
- lionagi/integrations/ollama_/api_endpoints/embedding/request_body.py +33 -0
- lionagi/integrations/ollama_/api_endpoints/embedding/response_body.py +29 -0
- lionagi/integrations/ollama_/api_endpoints/match_data_model.py +62 -0
- lionagi/integrations/ollama_/api_endpoints/match_response.py +190 -0
- lionagi/integrations/ollama_/api_endpoints/model/__init__.py +3 -0
- lionagi/integrations/ollama_/api_endpoints/model/copy_model.py +13 -0
- lionagi/integrations/ollama_/api_endpoints/model/create_model.py +28 -0
- lionagi/integrations/ollama_/api_endpoints/model/delete_model.py +11 -0
- lionagi/integrations/ollama_/api_endpoints/model/list_model.py +60 -0
- lionagi/integrations/ollama_/api_endpoints/model/pull_model.py +34 -0
- lionagi/integrations/ollama_/api_endpoints/model/push_model.py +35 -0
- lionagi/integrations/ollama_/api_endpoints/model/show_model.py +36 -0
- lionagi/integrations/ollama_/api_endpoints/option_models.py +68 -0
- lionagi/integrations/openai_/OpenAIModel.py +414 -0
- lionagi/integrations/openai_/OpenAIService.py +426 -0
- lionagi/integrations/openai_/api_endpoints/__init__.py +3 -0
- lionagi/integrations/openai_/api_endpoints/api_request.py +277 -0
- lionagi/integrations/openai_/api_endpoints/audio/__init__.py +9 -0
- lionagi/integrations/openai_/api_endpoints/audio/speech_models.py +34 -0
- lionagi/integrations/openai_/api_endpoints/audio/transcription_models.py +136 -0
- lionagi/integrations/openai_/api_endpoints/audio/translation_models.py +41 -0
- lionagi/integrations/openai_/api_endpoints/audio/types.py +41 -0
- lionagi/integrations/openai_/api_endpoints/batch/__init__.py +17 -0
- lionagi/integrations/openai_/api_endpoints/batch/batch_models.py +146 -0
- lionagi/integrations/openai_/api_endpoints/batch/cancel_batch.py +7 -0
- lionagi/integrations/openai_/api_endpoints/batch/create_batch.py +26 -0
- lionagi/integrations/openai_/api_endpoints/batch/list_batch.py +37 -0
- lionagi/integrations/openai_/api_endpoints/batch/request_object_models.py +65 -0
- lionagi/integrations/openai_/api_endpoints/batch/retrieve_batch.py +7 -0
- lionagi/integrations/openai_/api_endpoints/batch/types.py +4 -0
- lionagi/integrations/openai_/api_endpoints/chat_completions/__init__.py +1 -0
- lionagi/integrations/openai_/api_endpoints/chat_completions/request/__init__.py +39 -0
- lionagi/integrations/openai_/api_endpoints/chat_completions/request/message_models.py +121 -0
- lionagi/integrations/openai_/api_endpoints/chat_completions/request/request_body.py +221 -0
- lionagi/integrations/openai_/api_endpoints/chat_completions/request/response_format.py +71 -0
- lionagi/integrations/openai_/api_endpoints/chat_completions/request/stream_options.py +14 -0
- lionagi/integrations/openai_/api_endpoints/chat_completions/request/tool_choice_models.py +17 -0
- lionagi/integrations/openai_/api_endpoints/chat_completions/request/tool_models.py +54 -0
- lionagi/integrations/openai_/api_endpoints/chat_completions/request/types.py +18 -0
- lionagi/integrations/openai_/api_endpoints/chat_completions/response/choice_models.py +62 -0
- lionagi/integrations/openai_/api_endpoints/chat_completions/response/function_models.py +16 -0
- lionagi/integrations/openai_/api_endpoints/chat_completions/response/log_prob_models.py +47 -0
- lionagi/integrations/openai_/api_endpoints/chat_completions/response/message_models.py +25 -0
- lionagi/integrations/openai_/api_endpoints/chat_completions/response/response_body.py +99 -0
- lionagi/integrations/openai_/api_endpoints/chat_completions/response/types.py +8 -0
- lionagi/integrations/openai_/api_endpoints/chat_completions/response/usage_models.py +24 -0
- lionagi/integrations/openai_/api_endpoints/chat_completions/util.py +46 -0
- lionagi/integrations/openai_/api_endpoints/data_models.py +23 -0
- lionagi/integrations/openai_/api_endpoints/embeddings/__init__.py +3 -0
- lionagi/integrations/openai_/api_endpoints/embeddings/request_body.py +79 -0
- lionagi/integrations/openai_/api_endpoints/embeddings/response_body.py +67 -0
- lionagi/integrations/openai_/api_endpoints/files/__init__.py +11 -0
- lionagi/integrations/openai_/api_endpoints/files/delete_file.py +20 -0
- lionagi/integrations/openai_/api_endpoints/files/file_models.py +56 -0
- lionagi/integrations/openai_/api_endpoints/files/list_files.py +27 -0
- lionagi/integrations/openai_/api_endpoints/files/retrieve_file.py +9 -0
- lionagi/integrations/openai_/api_endpoints/files/upload_file.py +38 -0
- lionagi/integrations/openai_/api_endpoints/fine_tuning/__init__.py +37 -0
- lionagi/integrations/openai_/api_endpoints/fine_tuning/cancel_jobs.py +9 -0
- lionagi/integrations/openai_/api_endpoints/fine_tuning/create_jobs.py +133 -0
- lionagi/integrations/openai_/api_endpoints/fine_tuning/fine_tuning_job_checkpoint_models.py +58 -0
- lionagi/integrations/openai_/api_endpoints/fine_tuning/fine_tuning_job_event_models.py +31 -0
- lionagi/integrations/openai_/api_endpoints/fine_tuning/fine_tuning_job_models.py +140 -0
- lionagi/integrations/openai_/api_endpoints/fine_tuning/list_fine_tuning_checkpoints.py +51 -0
- lionagi/integrations/openai_/api_endpoints/fine_tuning/list_fine_tuning_events.py +42 -0
- lionagi/integrations/openai_/api_endpoints/fine_tuning/list_fine_tuning_jobs.py +31 -0
- lionagi/integrations/openai_/api_endpoints/fine_tuning/retrieve_jobs.py +9 -0
- lionagi/integrations/openai_/api_endpoints/fine_tuning/training_format.py +30 -0
- lionagi/integrations/openai_/api_endpoints/images/__init__.py +9 -0
- lionagi/integrations/openai_/api_endpoints/images/image_edit_models.py +69 -0
- lionagi/integrations/openai_/api_endpoints/images/image_models.py +56 -0
- lionagi/integrations/openai_/api_endpoints/images/image_variation_models.py +56 -0
- lionagi/integrations/openai_/api_endpoints/images/response_body.py +30 -0
- lionagi/integrations/openai_/api_endpoints/match_data_model.py +197 -0
- lionagi/integrations/openai_/api_endpoints/match_response.py +336 -0
- lionagi/integrations/openai_/api_endpoints/models/__init__.py +7 -0
- lionagi/integrations/openai_/api_endpoints/models/delete_fine_tuned_model.py +17 -0
- lionagi/integrations/openai_/api_endpoints/models/models_models.py +31 -0
- lionagi/integrations/openai_/api_endpoints/models/retrieve_model.py +9 -0
- lionagi/integrations/openai_/api_endpoints/moderations/__init__.py +3 -0
- lionagi/integrations/openai_/api_endpoints/moderations/request_body.py +20 -0
- lionagi/integrations/openai_/api_endpoints/moderations/response_body.py +139 -0
- lionagi/integrations/openai_/api_endpoints/uploads/__init__.py +19 -0
- lionagi/integrations/openai_/api_endpoints/uploads/add_upload_part.py +11 -0
- lionagi/integrations/openai_/api_endpoints/uploads/cancel_upload.py +7 -0
- lionagi/integrations/openai_/api_endpoints/uploads/complete_upload.py +18 -0
- lionagi/integrations/openai_/api_endpoints/uploads/create_upload.py +17 -0
- lionagi/integrations/openai_/api_endpoints/uploads/uploads_models.py +52 -0
- lionagi/integrations/openai_/image_token_calculator/image_token_calculator.py +92 -0
- lionagi/integrations/openai_/image_token_calculator/openai_image_token_data.yaml +15 -0
- lionagi/integrations/openai_/openai_max_output_token_data.yaml +12 -0
- lionagi/integrations/openai_/openai_price_data.yaml +26 -0
- lionagi/integrations/openai_/version.py +1 -0
- lionagi/integrations/pandas_/__init__.py +24 -0
- lionagi/integrations/pandas_/extend_df.py +61 -0
- lionagi/integrations/pandas_/read.py +103 -0
- lionagi/integrations/pandas_/remove_rows.py +61 -0
- lionagi/integrations/pandas_/replace_keywords.py +65 -0
- lionagi/integrations/pandas_/save.py +131 -0
- lionagi/integrations/pandas_/search_keywords.py +69 -0
- lionagi/integrations/pandas_/to_df.py +196 -0
- lionagi/integrations/pandas_/update_cells.py +54 -0
- lionagi/integrations/perplexity_/PerplexityModel.py +269 -0
- lionagi/integrations/perplexity_/PerplexityService.py +109 -0
- lionagi/integrations/perplexity_/__init__.py +3 -0
- lionagi/integrations/perplexity_/api_endpoints/api_request.py +171 -0
- lionagi/integrations/perplexity_/api_endpoints/chat_completions/request/request_body.py +121 -0
- lionagi/integrations/perplexity_/api_endpoints/chat_completions/response/response_body.py +146 -0
- lionagi/integrations/perplexity_/api_endpoints/data_models.py +63 -0
- lionagi/integrations/perplexity_/api_endpoints/match_response.py +26 -0
- lionagi/integrations/perplexity_/perplexity_max_output_token_data.yaml +3 -0
- lionagi/integrations/perplexity_/perplexity_price_data.yaml +10 -0
- lionagi/integrations/perplexity_/version.py +1 -0
- lionagi/integrations/pydantic_/__init__.py +8 -0
- lionagi/integrations/pydantic_/break_down_annotation.py +81 -0
- lionagi/integrations/pydantic_/new_model.py +208 -0
- lionagi/integrations/services.py +17 -0
- lionagi/libs/__init__.py +0 -55
- lionagi/libs/compress/models.py +62 -0
- lionagi/libs/compress/utils.py +81 -0
- lionagi/libs/constants.py +98 -0
- lionagi/libs/file/chunk.py +265 -0
- lionagi/libs/file/file_ops.py +114 -0
- lionagi/libs/file/params.py +212 -0
- lionagi/libs/file/path.py +301 -0
- lionagi/libs/file/process.py +139 -0
- lionagi/libs/file/save.py +90 -0
- lionagi/libs/file/types.py +22 -0
- lionagi/libs/func/async_calls/__init__.py +21 -0
- lionagi/libs/func/async_calls/alcall.py +157 -0
- lionagi/libs/func/async_calls/bcall.py +82 -0
- lionagi/libs/func/async_calls/mcall.py +134 -0
- lionagi/libs/func/async_calls/pcall.py +149 -0
- lionagi/libs/func/async_calls/rcall.py +185 -0
- lionagi/libs/func/async_calls/tcall.py +114 -0
- lionagi/libs/func/async_calls/ucall.py +85 -0
- lionagi/libs/func/decorators.py +277 -0
- lionagi/libs/func/lcall.py +57 -0
- lionagi/libs/func/params.py +64 -0
- lionagi/libs/func/throttle.py +119 -0
- lionagi/libs/func/types.py +39 -0
- lionagi/libs/func/utils.py +96 -0
- lionagi/libs/package/imports.py +162 -0
- lionagi/libs/package/management.py +58 -0
- lionagi/libs/package/params.py +26 -0
- lionagi/libs/package/system.py +18 -0
- lionagi/libs/package/types.py +26 -0
- lionagi/libs/parse/__init__.py +1 -0
- lionagi/libs/parse/flatten/__init__.py +9 -0
- lionagi/libs/parse/flatten/flatten.py +168 -0
- lionagi/libs/parse/flatten/params.py +52 -0
- lionagi/libs/parse/flatten/unflatten.py +79 -0
- lionagi/libs/parse/json/__init__.py +27 -0
- lionagi/libs/parse/json/as_readable.py +104 -0
- lionagi/libs/parse/json/extract.py +102 -0
- lionagi/libs/parse/json/parse.py +179 -0
- lionagi/libs/parse/json/schema.py +227 -0
- lionagi/libs/parse/json/to_json.py +71 -0
- lionagi/libs/parse/nested/__init__.py +33 -0
- lionagi/libs/parse/nested/nfilter.py +55 -0
- lionagi/libs/parse/nested/nget.py +40 -0
- lionagi/libs/parse/nested/ninsert.py +103 -0
- lionagi/libs/parse/nested/nmerge.py +155 -0
- lionagi/libs/parse/nested/npop.py +66 -0
- lionagi/libs/parse/nested/nset.py +89 -0
- lionagi/libs/parse/nested/to_flat_list.py +64 -0
- lionagi/libs/parse/nested/utils.py +185 -0
- lionagi/libs/parse/string_parse/__init__.py +11 -0
- lionagi/libs/parse/string_parse/code_block.py +73 -0
- lionagi/libs/parse/string_parse/docstring.py +179 -0
- lionagi/libs/parse/string_parse/function_.py +92 -0
- lionagi/libs/parse/type_convert/__init__.py +19 -0
- lionagi/libs/parse/type_convert/params.py +145 -0
- lionagi/libs/parse/type_convert/to_dict.py +333 -0
- lionagi/libs/parse/type_convert/to_list.py +186 -0
- lionagi/libs/parse/type_convert/to_num.py +358 -0
- lionagi/libs/parse/type_convert/to_str.py +195 -0
- lionagi/libs/parse/types.py +9 -0
- lionagi/libs/parse/validate/__init__.py +14 -0
- lionagi/libs/parse/validate/boolean.py +96 -0
- lionagi/libs/parse/validate/keys.py +150 -0
- lionagi/libs/parse/validate/mapping.py +109 -0
- lionagi/libs/parse/validate/params.py +62 -0
- lionagi/libs/parse/xml/__init__.py +10 -0
- lionagi/libs/parse/xml/convert.py +56 -0
- lionagi/libs/parse/xml/parser.py +93 -0
- lionagi/libs/string_similarity/__init__.py +32 -0
- lionagi/libs/string_similarity/algorithms.py +219 -0
- lionagi/libs/string_similarity/matcher.py +102 -0
- lionagi/libs/string_similarity/utils.py +15 -0
- lionagi/libs/utils.py +255 -0
- lionagi/operations/__init__.py +3 -6
- lionagi/operations/brainstorm/__init__.py +3 -0
- lionagi/operations/brainstorm/brainstorm.py +204 -0
- lionagi/operations/brainstorm/prompt.py +1 -0
- lionagi/operations/plan/__init__.py +3 -0
- lionagi/operations/plan/plan.py +172 -0
- lionagi/operations/plan/prompt.py +21 -0
- lionagi/operations/select/__init__.py +3 -0
- lionagi/operations/select/prompt.py +1 -0
- lionagi/operations/select/select.py +100 -0
- lionagi/operations/select/utils.py +107 -0
- lionagi/operations/utils.py +35 -0
- lionagi/protocols/adapters/adapter.py +79 -0
- lionagi/protocols/adapters/json_adapter.py +43 -0
- lionagi/protocols/adapters/pandas_adapter.py +96 -0
- lionagi/protocols/configs/__init__.py +15 -0
- lionagi/protocols/configs/branch_config.py +86 -0
- lionagi/protocols/configs/id_config.py +15 -0
- lionagi/protocols/configs/imodel_config.py +73 -0
- lionagi/protocols/configs/log_config.py +93 -0
- lionagi/protocols/configs/retry_config.py +29 -0
- lionagi/protocols/operatives/__init__.py +15 -0
- lionagi/protocols/operatives/action.py +181 -0
- lionagi/protocols/operatives/instruct.py +196 -0
- lionagi/protocols/operatives/operative.py +182 -0
- lionagi/protocols/operatives/prompts.py +232 -0
- lionagi/protocols/operatives/reason.py +56 -0
- lionagi/protocols/operatives/step.py +217 -0
- lionagi/protocols/registries/_component_registry.py +19 -0
- lionagi/protocols/registries/_pile_registry.py +26 -0
- lionagi/service/__init__.py +13 -0
- lionagi/service/complete_request_info.py +11 -0
- lionagi/service/imodel.py +110 -0
- lionagi/service/rate_limiter.py +108 -0
- lionagi/service/service.py +37 -0
- lionagi/service/service_match_util.py +131 -0
- lionagi/service/service_util.py +72 -0
- lionagi/service/token_calculator.py +51 -0
- lionagi/settings.py +136 -0
- lionagi/strategies/base.py +53 -0
- lionagi/strategies/concurrent.py +71 -0
- lionagi/strategies/concurrent_chunk.py +43 -0
- lionagi/strategies/concurrent_sequential_chunk.py +104 -0
- lionagi/strategies/params.py +128 -0
- lionagi/strategies/sequential.py +23 -0
- lionagi/strategies/sequential_chunk.py +89 -0
- lionagi/strategies/sequential_concurrent_chunk.py +100 -0
- lionagi/strategies/types.py +21 -0
- lionagi/strategies/utils.py +49 -0
- lionagi/version.py +1 -1
- lionagi-0.5.0.dist-info/METADATA +348 -0
- lionagi-0.5.0.dist-info/RECORD +373 -0
- {lionagi-0.3.8.dist-info → lionagi-0.5.0.dist-info}/WHEEL +1 -1
- lionagi/core/_setting/_setting.py +0 -59
- lionagi/core/action/README.md +0 -20
- lionagi/core/action/manual.py +0 -1
- lionagi/core/action/node.py +0 -94
- lionagi/core/action/tool_manager.py +0 -342
- lionagi/core/agent/README.md +0 -1
- lionagi/core/agent/base_agent.py +0 -82
- lionagi/core/agent/eval/README.md +0 -1
- lionagi/core/agent/eval/evaluator.py +0 -1
- lionagi/core/agent/eval/vote.py +0 -40
- lionagi/core/agent/learn/learner.py +0 -59
- lionagi/core/agent/plan/unit_template.py +0 -1
- lionagi/core/collections/README.md +0 -23
- lionagi/core/collections/__init__.py +0 -16
- lionagi/core/collections/_logger.py +0 -312
- lionagi/core/collections/abc/README.md +0 -63
- lionagi/core/collections/abc/__init__.py +0 -53
- lionagi/core/collections/abc/component.py +0 -620
- lionagi/core/collections/abc/concepts.py +0 -277
- lionagi/core/collections/abc/exceptions.py +0 -136
- lionagi/core/collections/abc/util.py +0 -45
- lionagi/core/collections/exchange.py +0 -146
- lionagi/core/collections/flow.py +0 -416
- lionagi/core/collections/model.py +0 -465
- lionagi/core/collections/pile.py +0 -1232
- lionagi/core/collections/progression.py +0 -221
- lionagi/core/collections/util.py +0 -73
- lionagi/core/director/README.md +0 -1
- lionagi/core/director/direct.py +0 -298
- lionagi/core/director/director.py +0 -2
- lionagi/core/director/operations/select.py +0 -3
- lionagi/core/director/operations/utils.py +0 -6
- lionagi/core/engine/branch_engine.py +0 -361
- lionagi/core/engine/instruction_map_engine.py +0 -213
- lionagi/core/engine/sandbox_.py +0 -16
- lionagi/core/engine/script_engine.py +0 -89
- lionagi/core/executor/base_executor.py +0 -97
- lionagi/core/executor/graph_executor.py +0 -335
- lionagi/core/executor/neo4j_executor.py +0 -394
- lionagi/core/generic/README.md +0 -0
- lionagi/core/generic/edge_condition.py +0 -17
- lionagi/core/generic/hyperedge.py +0 -1
- lionagi/core/generic/tree.py +0 -49
- lionagi/core/generic/tree_node.py +0 -85
- lionagi/core/mail/__init__.py +0 -11
- lionagi/core/mail/mail.py +0 -26
- lionagi/core/mail/mail_manager.py +0 -185
- lionagi/core/mail/package.py +0 -49
- lionagi/core/mail/start_mail.py +0 -36
- lionagi/core/message/__init__.py +0 -18
- lionagi/core/message/action_request.py +0 -114
- lionagi/core/message/action_response.py +0 -121
- lionagi/core/message/assistant_response.py +0 -80
- lionagi/core/message/instruction.py +0 -194
- lionagi/core/message/message.py +0 -86
- lionagi/core/message/system.py +0 -71
- lionagi/core/message/util.py +0 -274
- lionagi/core/report/__init__.py +0 -4
- lionagi/core/report/base.py +0 -201
- lionagi/core/report/form.py +0 -212
- lionagi/core/report/report.py +0 -150
- lionagi/core/report/util.py +0 -15
- lionagi/core/rule/_default.py +0 -17
- lionagi/core/rule/action.py +0 -87
- lionagi/core/rule/base.py +0 -234
- lionagi/core/rule/boolean.py +0 -56
- lionagi/core/rule/choice.py +0 -48
- lionagi/core/rule/mapping.py +0 -82
- lionagi/core/rule/number.py +0 -73
- lionagi/core/rule/rulebook.py +0 -45
- lionagi/core/rule/string.py +0 -43
- lionagi/core/rule/util.py +0 -0
- lionagi/core/session/directive_mixin.py +0 -307
- lionagi/core/structure/__init__.py +0 -1
- lionagi/core/structure/chain.py +0 -1
- lionagi/core/structure/forest.py +0 -1
- lionagi/core/structure/graph.py +0 -1
- lionagi/core/structure/tree.py +0 -1
- lionagi/core/unit/__init__.py +0 -4
- lionagi/core/unit/parallel_unit.py +0 -234
- lionagi/core/unit/template/action.py +0 -65
- lionagi/core/unit/template/base.py +0 -35
- lionagi/core/unit/template/plan.py +0 -69
- lionagi/core/unit/template/predict.py +0 -95
- lionagi/core/unit/template/score.py +0 -108
- lionagi/core/unit/template/select.py +0 -91
- lionagi/core/unit/unit.py +0 -452
- lionagi/core/unit/unit_form.py +0 -290
- lionagi/core/unit/unit_mixin.py +0 -1166
- lionagi/core/unit/util.py +0 -103
- lionagi/core/validator/validator.py +0 -376
- lionagi/core/work/work.py +0 -59
- lionagi/core/work/work_edge.py +0 -102
- lionagi/core/work/work_function.py +0 -114
- lionagi/core/work/work_function_node.py +0 -50
- lionagi/core/work/work_queue.py +0 -90
- lionagi/core/work/work_task.py +0 -151
- lionagi/core/work/worker.py +0 -410
- lionagi/core/work/worker_engine.py +0 -208
- lionagi/core/work/worklog.py +0 -108
- lionagi/experimental/compressor/base.py +0 -47
- lionagi/experimental/compressor/llm_compressor.py +0 -265
- lionagi/experimental/compressor/llm_summarizer.py +0 -61
- lionagi/experimental/compressor/util.py +0 -70
- lionagi/experimental/directive/README.md +0 -1
- lionagi/experimental/directive/__init__.py +0 -19
- lionagi/experimental/directive/parser/base_parser.py +0 -294
- lionagi/experimental/directive/parser/base_syntax.txt +0 -200
- lionagi/experimental/directive/template/base_template.py +0 -71
- lionagi/experimental/directive/template/schema.py +0 -36
- lionagi/experimental/directive/tokenizer.py +0 -59
- lionagi/experimental/evaluator/README.md +0 -1
- lionagi/experimental/evaluator/ast_evaluator.py +0 -119
- lionagi/experimental/evaluator/base_evaluator.py +0 -213
- lionagi/experimental/knowledge/__init__.py +0 -0
- lionagi/experimental/knowledge/base.py +0 -10
- lionagi/experimental/knowledge/graph.py +0 -0
- lionagi/experimental/memory/__init__.py +0 -0
- lionagi/experimental/strategies/__init__.py +0 -0
- lionagi/experimental/strategies/base.py +0 -1
- lionagi/integrations/bridge/__init__.py +0 -4
- lionagi/integrations/bridge/autogen_/__init__.py +0 -0
- lionagi/integrations/bridge/autogen_/autogen_.py +0 -127
- lionagi/integrations/bridge/langchain_/__init__.py +0 -0
- lionagi/integrations/bridge/langchain_/documents.py +0 -138
- lionagi/integrations/bridge/langchain_/langchain_bridge.py +0 -68
- lionagi/integrations/bridge/llamaindex_/__init__.py +0 -0
- lionagi/integrations/bridge/llamaindex_/index.py +0 -36
- lionagi/integrations/bridge/llamaindex_/llama_index_bridge.py +0 -108
- lionagi/integrations/bridge/llamaindex_/llama_pack.py +0 -256
- lionagi/integrations/bridge/llamaindex_/node_parser.py +0 -92
- lionagi/integrations/bridge/llamaindex_/reader.py +0 -201
- lionagi/integrations/bridge/llamaindex_/textnode.py +0 -59
- lionagi/integrations/bridge/pydantic_/__init__.py +0 -0
- lionagi/integrations/bridge/pydantic_/pydantic_bridge.py +0 -7
- lionagi/integrations/bridge/transformers_/__init__.py +0 -0
- lionagi/integrations/bridge/transformers_/install_.py +0 -39
- lionagi/integrations/chunker/__init__.py +0 -0
- lionagi/integrations/chunker/chunk.py +0 -314
- lionagi/integrations/config/__init__.py +0 -4
- lionagi/integrations/config/mlx_configs.py +0 -1
- lionagi/integrations/config/oai_configs.py +0 -154
- lionagi/integrations/config/ollama_configs.py +0 -1
- lionagi/integrations/config/openrouter_configs.py +0 -74
- lionagi/integrations/langchain_/__init__.py +0 -0
- lionagi/integrations/llamaindex_/__init__.py +0 -0
- lionagi/integrations/loader/__init__.py +0 -0
- lionagi/integrations/loader/load.py +0 -257
- lionagi/integrations/loader/load_util.py +0 -214
- lionagi/integrations/provider/__init__.py +0 -11
- lionagi/integrations/provider/_mapping.py +0 -47
- lionagi/integrations/provider/litellm.py +0 -53
- lionagi/integrations/provider/mistralai.py +0 -1
- lionagi/integrations/provider/mlx_service.py +0 -55
- lionagi/integrations/provider/oai.py +0 -196
- lionagi/integrations/provider/ollama.py +0 -55
- lionagi/integrations/provider/openrouter.py +0 -170
- lionagi/integrations/provider/services.py +0 -138
- lionagi/integrations/provider/transformers.py +0 -108
- lionagi/integrations/storage/__init__.py +0 -3
- lionagi/integrations/storage/neo4j.py +0 -681
- lionagi/integrations/storage/storage_util.py +0 -302
- lionagi/integrations/storage/structure_excel.py +0 -291
- lionagi/integrations/storage/to_csv.py +0 -70
- lionagi/integrations/storage/to_excel.py +0 -91
- lionagi/libs/ln_api.py +0 -944
- lionagi/libs/ln_async.py +0 -208
- lionagi/libs/ln_context.py +0 -37
- lionagi/libs/ln_convert.py +0 -671
- lionagi/libs/ln_dataframe.py +0 -187
- lionagi/libs/ln_func_call.py +0 -1328
- lionagi/libs/ln_image.py +0 -114
- lionagi/libs/ln_knowledge_graph.py +0 -422
- lionagi/libs/ln_nested.py +0 -822
- lionagi/libs/ln_parse.py +0 -750
- lionagi/libs/ln_queue.py +0 -107
- lionagi/libs/ln_tokenize.py +0 -179
- lionagi/libs/ln_validate.py +0 -299
- lionagi/libs/special_tokens.py +0 -172
- lionagi/libs/sys_util.py +0 -710
- lionagi/lions/__init__.py +0 -0
- lionagi/lions/coder/__init__.py +0 -0
- lionagi/lions/coder/add_feature.py +0 -20
- lionagi/lions/coder/base_prompts.py +0 -22
- lionagi/lions/coder/code_form.py +0 -15
- lionagi/lions/coder/coder.py +0 -184
- lionagi/lions/coder/util.py +0 -101
- lionagi/lions/director/__init__.py +0 -0
- lionagi/lions/judge/__init__.py +0 -0
- lionagi/lions/judge/config.py +0 -8
- lionagi/lions/judge/data/__init__.py +0 -0
- lionagi/lions/judge/data/sample_codes.py +0 -526
- lionagi/lions/judge/data/sample_rurbic.py +0 -48
- lionagi/lions/judge/forms/__init__.py +0 -0
- lionagi/lions/judge/forms/code_analysis_form.py +0 -126
- lionagi/lions/judge/rubric.py +0 -34
- lionagi/lions/judge/services/__init__.py +0 -0
- lionagi/lions/judge/services/judge_code.py +0 -49
- lionagi/lions/researcher/__init__.py +0 -0
- lionagi/lions/researcher/data_source/__init__.py +0 -0
- lionagi/lions/researcher/data_source/finhub_.py +0 -192
- lionagi/lions/researcher/data_source/google_.py +0 -207
- lionagi/lions/researcher/data_source/wiki_.py +0 -98
- lionagi/lions/researcher/data_source/yfinance_.py +0 -21
- lionagi/operations/brainstorm.py +0 -87
- lionagi/operations/config.py +0 -6
- lionagi/operations/rank.py +0 -102
- lionagi/operations/score.py +0 -144
- lionagi/operations/select.py +0 -141
- lionagi-0.3.8.dist-info/METADATA +0 -241
- lionagi-0.3.8.dist-info/RECORD +0 -249
- /lionagi/{core/_setting → integrations/anthropic_/api_endpoints/messages/response}/__init__.py +0 -0
- /lionagi/{core/agent → integrations/groq_/api_endpoints}/__init__.py +0 -0
- /lionagi/{core/agent/eval → integrations/ollama_/api_endpoints/completion}/__init__.py +0 -0
- /lionagi/{core/agent/learn → integrations/ollama_/api_endpoints/embedding}/__init__.py +0 -0
- /lionagi/{core/agent/plan → integrations/openai_}/__init__.py +0 -0
- /lionagi/{core/director → integrations/openai_/api_endpoints/chat_completions/response}/__init__.py +0 -0
- /lionagi/{core/director/operations → integrations/openai_/image_token_calculator}/__init__.py +0 -0
- /lionagi/{core/engine → integrations/perplexity_/api_endpoints}/__init__.py +0 -0
- /lionagi/{core/executor → integrations/perplexity_/api_endpoints/chat_completions}/__init__.py +0 -0
- /lionagi/{core/generic/registry/component_registry → integrations/perplexity_/api_endpoints/chat_completions/request}/__init__.py +0 -0
- /lionagi/{core/rule → integrations/perplexity_/api_endpoints/chat_completions/response}/__init__.py +0 -0
- /lionagi/{core/unit/template → libs/compress}/__init__.py +0 -0
- /lionagi/{core/validator → libs/file}/__init__.py +0 -0
- /lionagi/{core/work → libs/func}/__init__.py +0 -0
- /lionagi/{experimental → libs/package}/__init__.py +0 -0
- /lionagi/{core/agent/plan/plan.py → libs/parse/params.py} +0 -0
- /lionagi/{experimental/compressor → protocols}/__init__.py +0 -0
- /lionagi/{experimental/directive/parser → protocols/adapters}/__init__.py +0 -0
- /lionagi/{experimental/directive/template → protocols/registries}/__init__.py +0 -0
- /lionagi/{experimental/evaluator → strategies}/__init__.py +0 -0
- {lionagi-0.3.8.dist-info → lionagi-0.5.0.dist-info/licenses}/LICENSE +0 -0
@@ -0,0 +1,46 @@
|
|
1
|
+
# Copyright (c) 2023 - 2024, HaiyangLi <quantocean.li at gmail dot com>
|
2
|
+
#
|
3
|
+
# SPDX-License-Identifier: Apache-2.0
|
4
|
+
|
5
|
+
from typing import Literal
|
6
|
+
|
7
|
+
from pydantic import Field
|
8
|
+
|
9
|
+
from ..data_models import OllamaEndpointRequestBody
|
10
|
+
from ..option_models import Option
|
11
|
+
from .message_models import Message
|
12
|
+
from .tool_models import Tool
|
13
|
+
|
14
|
+
|
15
|
+
class OllamaChatCompletionRequestBody(OllamaEndpointRequestBody):
|
16
|
+
model: str = Field(description="The model name")
|
17
|
+
|
18
|
+
messages: list[Message] = Field(
|
19
|
+
description="The messages of the chat, this can be used to keep a chat memory"
|
20
|
+
)
|
21
|
+
|
22
|
+
tools: list[Tool] | None = Field(
|
23
|
+
None,
|
24
|
+
description="Tools for the model to use if supported. Requires 'stream' to be set to false",
|
25
|
+
)
|
26
|
+
|
27
|
+
format: Literal["json"] | None = Field(
|
28
|
+
None,
|
29
|
+
description="The format to return a response in. Currently the only accepted value is 'json'",
|
30
|
+
)
|
31
|
+
|
32
|
+
options: Option | None = Field(
|
33
|
+
None,
|
34
|
+
description="Additional model parameters listed in the documentation for the Modelfile",
|
35
|
+
)
|
36
|
+
|
37
|
+
stream: bool = Field(
|
38
|
+
True,
|
39
|
+
description="If false the response will be returned as a single response object, "
|
40
|
+
"rather than a stream of objects",
|
41
|
+
)
|
42
|
+
|
43
|
+
keep_alive: str | Literal[0] = Field(
|
44
|
+
"5m",
|
45
|
+
description="Controls how long the model will stay loaded into memory following the request.",
|
46
|
+
)
|
@@ -0,0 +1,67 @@
|
|
1
|
+
# Copyright (c) 2023 - 2024, HaiyangLi <quantocean.li at gmail dot com>
|
2
|
+
#
|
3
|
+
# SPDX-License-Identifier: Apache-2.0
|
4
|
+
|
5
|
+
|
6
|
+
from pydantic import BaseModel, Field
|
7
|
+
|
8
|
+
from ..data_models import OllamaEndpointResponseBody
|
9
|
+
from .message_models import Message
|
10
|
+
|
11
|
+
|
12
|
+
class Function(BaseModel):
|
13
|
+
name: str = Field(None, description="The name of the function to call.")
|
14
|
+
arguments: str = Field(
|
15
|
+
None, description="The arguments to pass to the function."
|
16
|
+
)
|
17
|
+
|
18
|
+
|
19
|
+
class ToolCall(BaseModel):
|
20
|
+
function: Function = Field(None, description="The function call details.")
|
21
|
+
|
22
|
+
|
23
|
+
class OllamaStreamChatCompletionResponseBody(OllamaEndpointResponseBody):
|
24
|
+
model: str = Field(None, description="The model name")
|
25
|
+
|
26
|
+
created_at: str = Field(
|
27
|
+
None, description="The timestamp when the response was created"
|
28
|
+
)
|
29
|
+
|
30
|
+
message: Message = Field(
|
31
|
+
None, description="he partial or full response generated by the model"
|
32
|
+
)
|
33
|
+
|
34
|
+
done: bool = Field(
|
35
|
+
None,
|
36
|
+
description="A flag indicating whether the response generation is complete",
|
37
|
+
)
|
38
|
+
|
39
|
+
done_reason: str = Field(
|
40
|
+
None, description="The response generation complete reason"
|
41
|
+
)
|
42
|
+
|
43
|
+
|
44
|
+
class OllamaChatCompletionResponseBody(OllamaStreamChatCompletionResponseBody):
|
45
|
+
total_duration: int = Field(
|
46
|
+
None, description="Time spent generating the response"
|
47
|
+
)
|
48
|
+
|
49
|
+
load_duration: int = Field(
|
50
|
+
None, description="Time spent in nanoseconds loading the model"
|
51
|
+
)
|
52
|
+
|
53
|
+
prompt_eval_count: int = Field(
|
54
|
+
None, description="number of tokens in the prompt"
|
55
|
+
)
|
56
|
+
|
57
|
+
prompt_eval_duration: int = Field(
|
58
|
+
None, description="time spent in nanoseconds evaluating the prompt"
|
59
|
+
)
|
60
|
+
|
61
|
+
eval_count: int = Field(
|
62
|
+
None, description="number of tokens in the response"
|
63
|
+
)
|
64
|
+
|
65
|
+
eval_duration: int = Field(
|
66
|
+
None, description="time in nanoseconds spent generating the response"
|
67
|
+
)
|
@@ -0,0 +1,49 @@
|
|
1
|
+
# Copyright (c) 2023 - 2024, HaiyangLi <quantocean.li at gmail dot com>
|
2
|
+
#
|
3
|
+
# SPDX-License-Identifier: Apache-2.0
|
4
|
+
|
5
|
+
|
6
|
+
from typing import Any, Literal
|
7
|
+
|
8
|
+
from pydantic import BaseModel, Field
|
9
|
+
|
10
|
+
|
11
|
+
class FunctionParameters(BaseModel):
|
12
|
+
type: Literal["object"] = "object"
|
13
|
+
properties: dict[str, dict[str, Any]]
|
14
|
+
required: list[str] | None = None
|
15
|
+
|
16
|
+
|
17
|
+
class Function(BaseModel):
|
18
|
+
description: str | None = Field(
|
19
|
+
None,
|
20
|
+
description=(
|
21
|
+
"A description of what the function does, used by the model to "
|
22
|
+
"choose when and how to call the function."
|
23
|
+
),
|
24
|
+
)
|
25
|
+
|
26
|
+
name: str = Field(
|
27
|
+
max_length=64,
|
28
|
+
pattern="^[a-zA-Z0-9_-]+$",
|
29
|
+
description=(
|
30
|
+
"The name of the function to be called. Must be a-z, A-Z, 0-9, "
|
31
|
+
"or contain underscores and dashes, with a maximum length of 64."
|
32
|
+
),
|
33
|
+
)
|
34
|
+
|
35
|
+
parameters: FunctionParameters | None = Field(
|
36
|
+
None,
|
37
|
+
description=(
|
38
|
+
"The parameters the functions accepts, described as a JSON Schema "
|
39
|
+
"object. See the guide for examples, and the JSON Schema "
|
40
|
+
"reference for documentation about the format."
|
41
|
+
),
|
42
|
+
)
|
43
|
+
|
44
|
+
|
45
|
+
class Tool(BaseModel):
|
46
|
+
type: Literal["function"] = Field(
|
47
|
+
description="The type of the tool. Currently, only function is supported."
|
48
|
+
)
|
49
|
+
function: Function = Field(description="The function definition.")
|
@@ -0,0 +1,72 @@
|
|
1
|
+
# Copyright (c) 2023 - 2024, HaiyangLi <quantocean.li at gmail dot com>
|
2
|
+
#
|
3
|
+
# SPDX-License-Identifier: Apache-2.0
|
4
|
+
|
5
|
+
|
6
|
+
from typing import List, Literal, Optional
|
7
|
+
|
8
|
+
from pydantic import Field
|
9
|
+
|
10
|
+
from ..data_models import OllamaEndpointRequestBody
|
11
|
+
from ..option_models import Option
|
12
|
+
|
13
|
+
|
14
|
+
class OllamaCompletionRequestBody(OllamaEndpointRequestBody):
|
15
|
+
model: str = Field(description="The model name")
|
16
|
+
|
17
|
+
prompt: str = Field(
|
18
|
+
None, description="The prompt to generate a response for"
|
19
|
+
)
|
20
|
+
|
21
|
+
suffix: str | None = Field(
|
22
|
+
None, description="The text after the model response"
|
23
|
+
)
|
24
|
+
|
25
|
+
images: list[str] | None = Field(
|
26
|
+
None,
|
27
|
+
description="A list of base64-encoded images (for multimodal models such as 'llava')",
|
28
|
+
)
|
29
|
+
|
30
|
+
format: Literal["json"] | None = Field(
|
31
|
+
None,
|
32
|
+
description="The format to return a response in. Currently the only accepted value is 'json'",
|
33
|
+
)
|
34
|
+
|
35
|
+
options: Option | dict | None = Field(
|
36
|
+
None,
|
37
|
+
description="Additional model parameters listed in the documentation for the 'Modelfile'",
|
38
|
+
)
|
39
|
+
|
40
|
+
system: str | None = Field(
|
41
|
+
None,
|
42
|
+
description="System message to (overrides what is defined in the 'Modelfile')",
|
43
|
+
)
|
44
|
+
|
45
|
+
template: str | None = Field(
|
46
|
+
None,
|
47
|
+
description="The prompt template to use (overrides what is defined in the 'Modelfile')",
|
48
|
+
)
|
49
|
+
|
50
|
+
context: list[int] | None = Field(
|
51
|
+
None,
|
52
|
+
description="The context parameter returned from a previous request to '/generate', "
|
53
|
+
"this can be used to keep a short conversational memory",
|
54
|
+
)
|
55
|
+
|
56
|
+
stream: bool = Field(
|
57
|
+
True,
|
58
|
+
description="If 'false' the response will be returned as a single response object, "
|
59
|
+
"rather than a stream of objects",
|
60
|
+
)
|
61
|
+
|
62
|
+
raw: bool = Field(
|
63
|
+
False,
|
64
|
+
description="if 'true' no formatting will be applied to the prompt. "
|
65
|
+
"You may choose to use the 'raw' parameter if you are specifying a full templated "
|
66
|
+
"prompt in your request to the API",
|
67
|
+
)
|
68
|
+
|
69
|
+
keep_alive: str | Literal[0] = Field(
|
70
|
+
"5m",
|
71
|
+
description="Controls how long the model will stay loaded into memory following the request.",
|
72
|
+
)
|
@@ -0,0 +1,59 @@
|
|
1
|
+
# Copyright (c) 2023 - 2024, HaiyangLi <quantocean.li at gmail dot com>
|
2
|
+
#
|
3
|
+
# SPDX-License-Identifier: Apache-2.0
|
4
|
+
|
5
|
+
|
6
|
+
from typing import Optional
|
7
|
+
|
8
|
+
from pydantic import Field
|
9
|
+
|
10
|
+
from ..data_models import OllamaEndpointResponseBody
|
11
|
+
|
12
|
+
|
13
|
+
class OllamaStreamCompletionResponseBody(OllamaEndpointResponseBody):
|
14
|
+
model: str = Field(None, description="The model name")
|
15
|
+
|
16
|
+
created_at: str = Field(
|
17
|
+
None, description="The timestamp when the response was created"
|
18
|
+
)
|
19
|
+
|
20
|
+
response: str = Field(
|
21
|
+
None, description="he partial or full response generated by the model"
|
22
|
+
)
|
23
|
+
|
24
|
+
done: bool = Field(
|
25
|
+
None,
|
26
|
+
description="A flag indicating whether the response generation is complete",
|
27
|
+
)
|
28
|
+
|
29
|
+
|
30
|
+
class OllamaCompletionResponseBody(OllamaStreamCompletionResponseBody):
|
31
|
+
total_duration: int = Field(
|
32
|
+
None, description="Time spent generating the response"
|
33
|
+
)
|
34
|
+
|
35
|
+
load_duration: int = Field(
|
36
|
+
None, description="Time spent in nanoseconds loading the model"
|
37
|
+
)
|
38
|
+
|
39
|
+
prompt_eval_count: int = Field(
|
40
|
+
None, description="number of tokens in the prompt"
|
41
|
+
)
|
42
|
+
|
43
|
+
prompt_eval_duration: int = Field(
|
44
|
+
None, description="time spent in nanoseconds evaluating the prompt"
|
45
|
+
)
|
46
|
+
|
47
|
+
eval_count: int = Field(
|
48
|
+
None, description="number of tokens in the response"
|
49
|
+
)
|
50
|
+
|
51
|
+
eval_duration: int = Field(
|
52
|
+
None, description="time in nanoseconds spent generating the response"
|
53
|
+
)
|
54
|
+
|
55
|
+
context: list | None = Field(
|
56
|
+
None,
|
57
|
+
description="an encoding of the conversation used in this response, "
|
58
|
+
"this can be sent in the next request to keep a conversational memory",
|
59
|
+
)
|
@@ -0,0 +1,15 @@
|
|
1
|
+
# Copyright (c) 2023 - 2024, HaiyangLi <quantocean.li at gmail dot com>
|
2
|
+
#
|
3
|
+
# SPDX-License-Identifier: Apache-2.0
|
4
|
+
|
5
|
+
from pydantic import BaseModel, ConfigDict
|
6
|
+
|
7
|
+
|
8
|
+
class OllamaEndpointRequestBody(BaseModel):
|
9
|
+
model_config = ConfigDict(
|
10
|
+
extra="forbid", use_enum_values=True, validate_assignment=True
|
11
|
+
)
|
12
|
+
|
13
|
+
|
14
|
+
class OllamaEndpointResponseBody(BaseModel):
|
15
|
+
model_config = ConfigDict(use_enum_values=True, validate_assignment=True)
|
@@ -0,0 +1,33 @@
|
|
1
|
+
# Copyright (c) 2023 - 2024, HaiyangLi <quantocean.li at gmail dot com>
|
2
|
+
#
|
3
|
+
# SPDX-License-Identifier: Apache-2.0
|
4
|
+
|
5
|
+
|
6
|
+
from pydantic import Field
|
7
|
+
|
8
|
+
from ..data_models import OllamaEndpointRequestBody
|
9
|
+
from ..option_models import Option
|
10
|
+
|
11
|
+
|
12
|
+
class OllamaEmbeddingRequestBody(OllamaEndpointRequestBody):
|
13
|
+
model: str = Field(description="Name of model to generate embeddings from")
|
14
|
+
|
15
|
+
input: str | list[str] = Field(
|
16
|
+
description="Text or list of text to generate embeddings for"
|
17
|
+
)
|
18
|
+
|
19
|
+
truncate: bool = Field(
|
20
|
+
True,
|
21
|
+
description="Truncates the end of each input to fit within context length. "
|
22
|
+
"Returns error if 'false' and context length is exceeded.",
|
23
|
+
)
|
24
|
+
|
25
|
+
options: Option | None = Field(
|
26
|
+
None,
|
27
|
+
description="Additional model parameters listed in the documentation for the Modelfile",
|
28
|
+
)
|
29
|
+
|
30
|
+
keep_alive: str = Field(
|
31
|
+
"5m",
|
32
|
+
description="Controls how long the model will stay loaded into memory following the request.",
|
33
|
+
)
|
@@ -0,0 +1,29 @@
|
|
1
|
+
# Copyright (c) 2023 - 2024, HaiyangLi <quantocean.li at gmail dot com>
|
2
|
+
#
|
3
|
+
# SPDX-License-Identifier: Apache-2.0
|
4
|
+
|
5
|
+
|
6
|
+
from pydantic import Field
|
7
|
+
|
8
|
+
from ..data_models import OllamaEndpointResponseBody
|
9
|
+
|
10
|
+
|
11
|
+
class OllamaEmbeddingResponseBody(OllamaEndpointResponseBody):
|
12
|
+
model: str = Field(None, description="The model name")
|
13
|
+
|
14
|
+
embeddings: list = Field(
|
15
|
+
None,
|
16
|
+
description="The generated embeddings for the text or list of text",
|
17
|
+
)
|
18
|
+
|
19
|
+
total_duration: int = Field(
|
20
|
+
None, description="Time spent generating the response"
|
21
|
+
)
|
22
|
+
|
23
|
+
load_duration: int = Field(
|
24
|
+
None, description="Time spent in nanoseconds loading the model"
|
25
|
+
)
|
26
|
+
|
27
|
+
prompt_eval_count: int = Field(
|
28
|
+
None, description="Number of tokens in the prompt"
|
29
|
+
)
|
@@ -0,0 +1,62 @@
|
|
1
|
+
# Copyright (c) 2023 - 2024, HaiyangLi <quantocean.li at gmail dot com>
|
2
|
+
#
|
3
|
+
# SPDX-License-Identifier: Apache-2.0
|
4
|
+
|
5
|
+
|
6
|
+
def match_data_model(task_name):
|
7
|
+
if task_name == "generate_completion":
|
8
|
+
from .completion.request_body import OllamaCompletionRequestBody
|
9
|
+
|
10
|
+
return {"request_body": OllamaCompletionRequestBody}
|
11
|
+
|
12
|
+
elif task_name == "generate_chat_completion":
|
13
|
+
from .chat_completion.request_body import (
|
14
|
+
OllamaChatCompletionRequestBody,
|
15
|
+
)
|
16
|
+
|
17
|
+
return {"request_body": OllamaChatCompletionRequestBody}
|
18
|
+
|
19
|
+
elif task_name == "generate_embeddings":
|
20
|
+
from .embedding.request_body import OllamaEmbeddingRequestBody
|
21
|
+
|
22
|
+
return {"request_body": OllamaEmbeddingRequestBody}
|
23
|
+
|
24
|
+
elif task_name == "create_model":
|
25
|
+
from .model.create_model import OllamaCreateModelRequestBody
|
26
|
+
|
27
|
+
return {"json_data": OllamaCreateModelRequestBody}
|
28
|
+
|
29
|
+
elif (
|
30
|
+
task_name == "list_local_models" or task_name == "list_running_models"
|
31
|
+
):
|
32
|
+
return {}
|
33
|
+
|
34
|
+
elif task_name == "show_model_information":
|
35
|
+
from .model.show_model import OllamaShowModelRequestBody
|
36
|
+
|
37
|
+
return {"json_data": OllamaShowModelRequestBody}
|
38
|
+
|
39
|
+
elif task_name == "copy_model":
|
40
|
+
from .model.copy_model import OllamaCopyModelRequestBody
|
41
|
+
|
42
|
+
return {"json_data": OllamaCopyModelRequestBody}
|
43
|
+
|
44
|
+
elif task_name == "delete_model":
|
45
|
+
from .model.delete_model import OllamaDeleteModelRequestBody
|
46
|
+
|
47
|
+
return {"json_data": OllamaDeleteModelRequestBody}
|
48
|
+
|
49
|
+
elif task_name == "pull_model":
|
50
|
+
from .model.pull_model import OllamaPullModelRequestBody
|
51
|
+
|
52
|
+
return {"json_data": OllamaPullModelRequestBody}
|
53
|
+
|
54
|
+
elif task_name == "push_model":
|
55
|
+
from .model.push_model import OllamaPushModelRequestBody
|
56
|
+
|
57
|
+
return {"json_data": OllamaPushModelRequestBody}
|
58
|
+
|
59
|
+
else:
|
60
|
+
raise ValueError(
|
61
|
+
f"Invalid task: {task_name}. Not supported in the service."
|
62
|
+
)
|
@@ -0,0 +1,190 @@
|
|
1
|
+
# Copyright (c) 2023 - 2024, HaiyangLi <quantocean.li at gmail dot com>
|
2
|
+
#
|
3
|
+
# SPDX-License-Identifier: Apache-2.0
|
4
|
+
|
5
|
+
imported_models = {}
|
6
|
+
|
7
|
+
|
8
|
+
def match_response(request_model, response: dict | list):
|
9
|
+
global imported_models
|
10
|
+
|
11
|
+
endpoint = request_model.endpoint
|
12
|
+
|
13
|
+
if endpoint == "generate":
|
14
|
+
if isinstance(response, list):
|
15
|
+
if "OllamaStreamCompletionResponseBody" not in imported_models:
|
16
|
+
from .completion.response_body import (
|
17
|
+
OllamaStreamCompletionResponseBody,
|
18
|
+
)
|
19
|
+
|
20
|
+
imported_models["OllamaStreamCompletionResponseBody"] = (
|
21
|
+
OllamaStreamCompletionResponseBody
|
22
|
+
)
|
23
|
+
if "OllamaCompletionResponseBody" not in imported_models:
|
24
|
+
from .completion.response_body import (
|
25
|
+
OllamaCompletionResponseBody,
|
26
|
+
)
|
27
|
+
|
28
|
+
imported_models["OllamaCompletionResponseBody"] = (
|
29
|
+
OllamaCompletionResponseBody
|
30
|
+
)
|
31
|
+
|
32
|
+
result = []
|
33
|
+
for item in response[:-1]:
|
34
|
+
result.append(
|
35
|
+
imported_models["OllamaStreamCompletionResponseBody"](
|
36
|
+
**item
|
37
|
+
)
|
38
|
+
)
|
39
|
+
|
40
|
+
result.append(
|
41
|
+
imported_models["OllamaCompletionResponseBody"](**response[-1])
|
42
|
+
)
|
43
|
+
return result
|
44
|
+
else:
|
45
|
+
if "OllamaCompletionResponseBody" not in imported_models:
|
46
|
+
from .completion.response_body import (
|
47
|
+
OllamaCompletionResponseBody,
|
48
|
+
)
|
49
|
+
|
50
|
+
imported_models["OllamaCompletionResponseBody"] = (
|
51
|
+
OllamaCompletionResponseBody
|
52
|
+
)
|
53
|
+
return imported_models["OllamaCompletionResponseBody"](**response)
|
54
|
+
|
55
|
+
elif endpoint == "chat":
|
56
|
+
if isinstance(response, list):
|
57
|
+
if "OllamaStreamChatCompletionResponseBody" not in imported_models:
|
58
|
+
from .chat_completion.response_body import (
|
59
|
+
OllamaStreamChatCompletionResponseBody,
|
60
|
+
)
|
61
|
+
|
62
|
+
imported_models["OllamaStreamChatCompletionResponseBody"] = (
|
63
|
+
OllamaStreamChatCompletionResponseBody
|
64
|
+
)
|
65
|
+
if "OllamaChatCompletionResponseBody" not in imported_models:
|
66
|
+
from .chat_completion.response_body import (
|
67
|
+
OllamaChatCompletionResponseBody,
|
68
|
+
)
|
69
|
+
|
70
|
+
imported_models["OllamaChatCompletionResponseBody"] = (
|
71
|
+
OllamaChatCompletionResponseBody
|
72
|
+
)
|
73
|
+
|
74
|
+
result = []
|
75
|
+
for item in response[:-1]:
|
76
|
+
result.append(
|
77
|
+
imported_models["OllamaStreamChatCompletionResponseBody"](
|
78
|
+
**item
|
79
|
+
)
|
80
|
+
)
|
81
|
+
|
82
|
+
result.append(
|
83
|
+
imported_models["OllamaChatCompletionResponseBody"](
|
84
|
+
**response[-1]
|
85
|
+
)
|
86
|
+
)
|
87
|
+
return result
|
88
|
+
else:
|
89
|
+
if "OllamaChatCompletionResponseBody" not in imported_models:
|
90
|
+
from .chat_completion.response_body import (
|
91
|
+
OllamaChatCompletionResponseBody,
|
92
|
+
)
|
93
|
+
|
94
|
+
imported_models["OllamaChatCompletionResponseBody"] = (
|
95
|
+
OllamaChatCompletionResponseBody
|
96
|
+
)
|
97
|
+
return imported_models["OllamaChatCompletionResponseBody"](
|
98
|
+
**response
|
99
|
+
)
|
100
|
+
|
101
|
+
elif endpoint == "embed":
|
102
|
+
if "OllamaEmbeddingResponseBody" not in imported_models:
|
103
|
+
from .embedding.response_body import OllamaEmbeddingResponseBody
|
104
|
+
|
105
|
+
imported_models["OllamaEmbeddingResponseBody"] = (
|
106
|
+
OllamaEmbeddingResponseBody
|
107
|
+
)
|
108
|
+
return imported_models["OllamaEmbeddingResponseBody"](**response)
|
109
|
+
|
110
|
+
elif endpoint == "create":
|
111
|
+
if "OllamaCreateModelResponseBody" not in imported_models:
|
112
|
+
from .model.create_model import OllamaCreateModelResponseBody
|
113
|
+
|
114
|
+
imported_models["OllamaCreateModelResponseBody"] = (
|
115
|
+
OllamaCreateModelResponseBody
|
116
|
+
)
|
117
|
+
if isinstance(response, list):
|
118
|
+
return [
|
119
|
+
imported_models["OllamaCreateModelResponseBody"](**res)
|
120
|
+
for res in response
|
121
|
+
]
|
122
|
+
else: # dict
|
123
|
+
return imported_models["OllamaCreateModelResponseBody"](**response)
|
124
|
+
|
125
|
+
elif endpoint == "tags":
|
126
|
+
if "OllamaListLocalModelsResponseBody" not in imported_models:
|
127
|
+
from .model.list_model import OllamaListLocalModelsResponseBody
|
128
|
+
|
129
|
+
imported_models["OllamaListLocalModelsResponseBody"] = (
|
130
|
+
OllamaListLocalModelsResponseBody
|
131
|
+
)
|
132
|
+
return imported_models["OllamaListLocalModelsResponseBody"](**response)
|
133
|
+
|
134
|
+
elif endpoint == "show":
|
135
|
+
if "OllamaShowModelResponseBody" not in imported_models:
|
136
|
+
from .model.show_model import OllamaShowModelResponseBody
|
137
|
+
|
138
|
+
imported_models["OllamaShowModelResponseBody"] = (
|
139
|
+
OllamaShowModelResponseBody
|
140
|
+
)
|
141
|
+
return imported_models["OllamaShowModelResponseBody"](**response)
|
142
|
+
|
143
|
+
elif endpoint == "pull":
|
144
|
+
if "OllamaPullModelResponseBody" not in imported_models:
|
145
|
+
from .model.pull_model import OllamaPullModelResponseBody
|
146
|
+
|
147
|
+
imported_models["OllamaPullModelResponseBody"] = (
|
148
|
+
OllamaPullModelResponseBody
|
149
|
+
)
|
150
|
+
if isinstance(response, list):
|
151
|
+
return [
|
152
|
+
imported_models["OllamaPullModelResponseBody"](**res)
|
153
|
+
for res in response
|
154
|
+
]
|
155
|
+
else: # dict
|
156
|
+
return imported_models["OllamaPullModelResponseBody"](**response)
|
157
|
+
|
158
|
+
elif endpoint == "push":
|
159
|
+
if "OllamaPushModelResponseBody" not in imported_models:
|
160
|
+
from .model.push_model import OllamaPushModelResponseBody
|
161
|
+
|
162
|
+
imported_models["OllamaPushModelResponseBody"] = (
|
163
|
+
OllamaPushModelResponseBody
|
164
|
+
)
|
165
|
+
if isinstance(response, list):
|
166
|
+
return [
|
167
|
+
imported_models["OllamaPushModelResponseBody"](**res)
|
168
|
+
for res in response
|
169
|
+
]
|
170
|
+
else: # dict
|
171
|
+
return imported_models["OllamaPushModelResponseBody"](**response)
|
172
|
+
|
173
|
+
elif endpoint == "ps":
|
174
|
+
if "OllamaListRunningModelsResponseBody" not in imported_models:
|
175
|
+
from .model.list_model import OllamaListRunningModelsResponseBody
|
176
|
+
|
177
|
+
imported_models["OllamaListRunningModelsResponseBody"] = (
|
178
|
+
OllamaListRunningModelsResponseBody
|
179
|
+
)
|
180
|
+
return imported_models["OllamaListRunningModelsResponseBody"](
|
181
|
+
**response
|
182
|
+
)
|
183
|
+
|
184
|
+
elif not response:
|
185
|
+
return
|
186
|
+
|
187
|
+
else:
|
188
|
+
raise ValueError(
|
189
|
+
"There is no standard response model for the provided request and response"
|
190
|
+
)
|
@@ -0,0 +1,13 @@
|
|
1
|
+
# Copyright (c) 2023 - 2024, HaiyangLi <quantocean.li at gmail dot com>
|
2
|
+
#
|
3
|
+
# SPDX-License-Identifier: Apache-2.0
|
4
|
+
|
5
|
+
from pydantic import Field
|
6
|
+
|
7
|
+
from ..data_models import OllamaEndpointRequestBody
|
8
|
+
|
9
|
+
|
10
|
+
class OllamaCopyModelRequestBody(OllamaEndpointRequestBody):
|
11
|
+
source: str = Field(description="Name of the source existing model")
|
12
|
+
|
13
|
+
destination: str = Field(description="Name of the copy model")
|
@@ -0,0 +1,28 @@
|
|
1
|
+
# Copyright (c) 2023 - 2024, HaiyangLi <quantocean.li at gmail dot com>
|
2
|
+
#
|
3
|
+
# SPDX-License-Identifier: Apache-2.0
|
4
|
+
|
5
|
+
|
6
|
+
from pydantic import Field
|
7
|
+
|
8
|
+
from ..data_models import OllamaEndpointRequestBody, OllamaEndpointResponseBody
|
9
|
+
|
10
|
+
|
11
|
+
class OllamaCreateModelRequestBody(OllamaEndpointRequestBody):
|
12
|
+
name: str = Field(description="name of the model to create")
|
13
|
+
|
14
|
+
modelfile: str | None = Field(
|
15
|
+
None, description="Contents of the Modelfile"
|
16
|
+
)
|
17
|
+
|
18
|
+
stream: bool | None = Field(
|
19
|
+
True,
|
20
|
+
description="if 'false' the response will be returned as a single response object, "
|
21
|
+
"rather than a stream of objects",
|
22
|
+
)
|
23
|
+
|
24
|
+
path: str | None = Field(None, description="path to the Modelfile")
|
25
|
+
|
26
|
+
|
27
|
+
class OllamaCreateModelResponseBody(OllamaEndpointResponseBody):
|
28
|
+
status: str = Field(None)
|
@@ -0,0 +1,11 @@
|
|
1
|
+
# Copyright (c) 2023 - 2024, HaiyangLi <quantocean.li at gmail dot com>
|
2
|
+
#
|
3
|
+
# SPDX-License-Identifier: Apache-2.0
|
4
|
+
|
5
|
+
from pydantic import Field
|
6
|
+
|
7
|
+
from ..data_models import OllamaEndpointRequestBody
|
8
|
+
|
9
|
+
|
10
|
+
class OllamaDeleteModelRequestBody(OllamaEndpointRequestBody):
|
11
|
+
name: str = Field(description="Name of the model to delete")
|