lionagi 0.4.0__py3-none-any.whl → 0.5.0__py3-none-any.whl
Sign up to get free protection for your applications and to get access to all the features.
- lionagi/__init__.py +14 -46
- lionagi/core/__init__.py +3 -1
- lionagi/core/_class_registry.py +69 -0
- lionagi/core/action/__init__.py +3 -13
- lionagi/core/action/action_manager.py +287 -0
- lionagi/core/action/base.py +109 -0
- lionagi/core/action/function_calling.py +127 -92
- lionagi/core/action/tool.py +172 -70
- lionagi/core/action/types.py +16 -0
- lionagi/core/communication/__init__.py +3 -0
- lionagi/core/communication/action_request.py +163 -0
- lionagi/core/communication/action_response.py +149 -0
- lionagi/core/communication/assistant_response.py +161 -0
- lionagi/core/communication/base_mail.py +49 -0
- lionagi/core/communication/instruction.py +376 -0
- lionagi/core/communication/message.py +286 -0
- lionagi/core/communication/message_manager.py +530 -0
- lionagi/core/communication/system.py +116 -0
- lionagi/core/communication/templates/README.md +28 -0
- lionagi/core/communication/templates/action_request.jinja2 +5 -0
- lionagi/core/communication/templates/action_response.jinja2 +9 -0
- lionagi/core/communication/templates/assistant_response.jinja2 +2 -0
- lionagi/core/communication/templates/instruction_message.jinja2 +61 -0
- lionagi/core/communication/templates/system_message.jinja2 +11 -0
- lionagi/core/communication/templates/tool_schemas.jinja2 +7 -0
- lionagi/core/communication/types.py +27 -0
- lionagi/core/communication/utils.py +254 -0
- lionagi/core/forms/__init__.py +3 -0
- lionagi/core/forms/base.py +232 -0
- lionagi/core/forms/form.py +791 -0
- lionagi/core/forms/report.py +321 -0
- lionagi/core/forms/types.py +13 -0
- lionagi/core/forms/utils.py +26 -0
- lionagi/core/generic/__init__.py +3 -6
- lionagi/core/generic/component.py +422 -0
- lionagi/core/generic/edge.py +143 -101
- lionagi/core/generic/element.py +195 -0
- lionagi/core/generic/graph.py +297 -180
- lionagi/core/generic/log.py +151 -0
- lionagi/core/generic/log_manager.py +320 -0
- lionagi/core/generic/node.py +7 -229
- lionagi/core/generic/pile.py +1017 -0
- lionagi/core/generic/progression.py +388 -0
- lionagi/core/generic/types.py +23 -0
- lionagi/core/generic/utils.py +50 -0
- lionagi/core/models/__init__.py +5 -0
- lionagi/core/models/base.py +85 -0
- lionagi/core/models/field_model.py +122 -0
- lionagi/core/models/new_model_params.py +195 -0
- lionagi/core/models/note.py +351 -0
- lionagi/core/models/operable_model.py +392 -0
- lionagi/core/models/schema_model.py +50 -0
- lionagi/core/models/types.py +10 -0
- lionagi/core/session/__init__.py +3 -0
- lionagi/core/session/branch.py +115 -415
- lionagi/core/session/branch_mixins.py +507 -0
- lionagi/core/session/session.py +122 -257
- lionagi/core/session/types.py +8 -0
- lionagi/core/typing/__init__.py +9 -0
- lionagi/core/typing/concepts.py +132 -0
- lionagi/core/typing/config.py +15 -0
- lionagi/core/typing/id.py +221 -0
- lionagi/core/typing/pydantic_.py +33 -0
- lionagi/core/typing/typing_.py +54 -0
- lionagi/integrations/__init__.py +0 -1
- lionagi/integrations/anthropic_/AnthropicModel.py +268 -0
- lionagi/integrations/anthropic_/AnthropicService.py +113 -0
- lionagi/integrations/anthropic_/__init__.py +3 -0
- lionagi/integrations/anthropic_/anthropic_max_output_token_data.yaml +7 -0
- lionagi/integrations/anthropic_/anthropic_price_data.yaml +14 -0
- lionagi/integrations/anthropic_/api_endpoints/__init__.py +3 -0
- lionagi/integrations/anthropic_/api_endpoints/api_request.py +277 -0
- lionagi/integrations/anthropic_/api_endpoints/data_models.py +40 -0
- lionagi/integrations/anthropic_/api_endpoints/match_response.py +119 -0
- lionagi/integrations/anthropic_/api_endpoints/messages/__init__.py +3 -0
- lionagi/integrations/anthropic_/api_endpoints/messages/request/__init__.py +3 -0
- lionagi/integrations/anthropic_/api_endpoints/messages/request/message_models.py +14 -0
- lionagi/integrations/anthropic_/api_endpoints/messages/request/request_body.py +74 -0
- lionagi/integrations/anthropic_/api_endpoints/messages/response/content_models.py +32 -0
- lionagi/integrations/anthropic_/api_endpoints/messages/response/response_body.py +101 -0
- lionagi/integrations/anthropic_/api_endpoints/messages/response/usage_models.py +25 -0
- lionagi/integrations/anthropic_/version.py +5 -0
- lionagi/integrations/groq_/GroqModel.py +318 -0
- lionagi/integrations/groq_/GroqService.py +147 -0
- lionagi/integrations/groq_/__init__.py +3 -0
- lionagi/integrations/groq_/api_endpoints/data_models.py +187 -0
- lionagi/integrations/groq_/api_endpoints/groq_request.py +288 -0
- lionagi/integrations/groq_/api_endpoints/match_response.py +106 -0
- lionagi/integrations/groq_/api_endpoints/response_utils.py +105 -0
- lionagi/integrations/groq_/groq_max_output_token_data.yaml +21 -0
- lionagi/integrations/groq_/groq_price_data.yaml +58 -0
- lionagi/integrations/groq_/groq_rate_limits.yaml +105 -0
- lionagi/integrations/groq_/version.py +5 -0
- lionagi/integrations/litellm_/__init__.py +3 -0
- lionagi/integrations/litellm_/imodel.py +69 -0
- lionagi/integrations/ollama_/OllamaModel.py +244 -0
- lionagi/integrations/ollama_/OllamaService.py +138 -0
- lionagi/integrations/ollama_/__init__.py +3 -0
- lionagi/integrations/ollama_/api_endpoints/__init__.py +3 -0
- lionagi/integrations/ollama_/api_endpoints/api_request.py +179 -0
- lionagi/integrations/ollama_/api_endpoints/chat_completion/__init__.py +3 -0
- lionagi/integrations/ollama_/api_endpoints/chat_completion/message_models.py +31 -0
- lionagi/integrations/ollama_/api_endpoints/chat_completion/request_body.py +46 -0
- lionagi/integrations/ollama_/api_endpoints/chat_completion/response_body.py +67 -0
- lionagi/integrations/ollama_/api_endpoints/chat_completion/tool_models.py +49 -0
- lionagi/integrations/ollama_/api_endpoints/completion/request_body.py +72 -0
- lionagi/integrations/ollama_/api_endpoints/completion/response_body.py +59 -0
- lionagi/integrations/ollama_/api_endpoints/data_models.py +15 -0
- lionagi/integrations/ollama_/api_endpoints/embedding/request_body.py +33 -0
- lionagi/integrations/ollama_/api_endpoints/embedding/response_body.py +29 -0
- lionagi/integrations/ollama_/api_endpoints/match_data_model.py +62 -0
- lionagi/integrations/ollama_/api_endpoints/match_response.py +190 -0
- lionagi/integrations/ollama_/api_endpoints/model/__init__.py +3 -0
- lionagi/integrations/ollama_/api_endpoints/model/copy_model.py +13 -0
- lionagi/integrations/ollama_/api_endpoints/model/create_model.py +28 -0
- lionagi/integrations/ollama_/api_endpoints/model/delete_model.py +11 -0
- lionagi/integrations/ollama_/api_endpoints/model/list_model.py +60 -0
- lionagi/integrations/ollama_/api_endpoints/model/pull_model.py +34 -0
- lionagi/integrations/ollama_/api_endpoints/model/push_model.py +35 -0
- lionagi/integrations/ollama_/api_endpoints/model/show_model.py +36 -0
- lionagi/integrations/ollama_/api_endpoints/option_models.py +68 -0
- lionagi/integrations/openai_/OpenAIModel.py +414 -0
- lionagi/integrations/openai_/OpenAIService.py +426 -0
- lionagi/integrations/openai_/api_endpoints/__init__.py +3 -0
- lionagi/integrations/openai_/api_endpoints/api_request.py +277 -0
- lionagi/integrations/openai_/api_endpoints/audio/__init__.py +9 -0
- lionagi/integrations/openai_/api_endpoints/audio/speech_models.py +34 -0
- lionagi/integrations/openai_/api_endpoints/audio/transcription_models.py +136 -0
- lionagi/integrations/openai_/api_endpoints/audio/translation_models.py +41 -0
- lionagi/integrations/openai_/api_endpoints/audio/types.py +41 -0
- lionagi/integrations/openai_/api_endpoints/batch/__init__.py +17 -0
- lionagi/integrations/openai_/api_endpoints/batch/batch_models.py +146 -0
- lionagi/integrations/openai_/api_endpoints/batch/cancel_batch.py +7 -0
- lionagi/integrations/openai_/api_endpoints/batch/create_batch.py +26 -0
- lionagi/integrations/openai_/api_endpoints/batch/list_batch.py +37 -0
- lionagi/integrations/openai_/api_endpoints/batch/request_object_models.py +65 -0
- lionagi/integrations/openai_/api_endpoints/batch/retrieve_batch.py +7 -0
- lionagi/integrations/openai_/api_endpoints/batch/types.py +4 -0
- lionagi/integrations/openai_/api_endpoints/chat_completions/__init__.py +1 -0
- lionagi/integrations/openai_/api_endpoints/chat_completions/request/__init__.py +39 -0
- lionagi/integrations/openai_/api_endpoints/chat_completions/request/message_models.py +121 -0
- lionagi/integrations/openai_/api_endpoints/chat_completions/request/request_body.py +221 -0
- lionagi/integrations/openai_/api_endpoints/chat_completions/request/response_format.py +71 -0
- lionagi/integrations/openai_/api_endpoints/chat_completions/request/stream_options.py +14 -0
- lionagi/integrations/openai_/api_endpoints/chat_completions/request/tool_choice_models.py +17 -0
- lionagi/integrations/openai_/api_endpoints/chat_completions/request/tool_models.py +54 -0
- lionagi/integrations/openai_/api_endpoints/chat_completions/request/types.py +18 -0
- lionagi/integrations/openai_/api_endpoints/chat_completions/response/choice_models.py +62 -0
- lionagi/integrations/openai_/api_endpoints/chat_completions/response/function_models.py +16 -0
- lionagi/integrations/openai_/api_endpoints/chat_completions/response/log_prob_models.py +47 -0
- lionagi/integrations/openai_/api_endpoints/chat_completions/response/message_models.py +25 -0
- lionagi/integrations/openai_/api_endpoints/chat_completions/response/response_body.py +99 -0
- lionagi/integrations/openai_/api_endpoints/chat_completions/response/types.py +8 -0
- lionagi/integrations/openai_/api_endpoints/chat_completions/response/usage_models.py +24 -0
- lionagi/integrations/openai_/api_endpoints/chat_completions/util.py +46 -0
- lionagi/integrations/openai_/api_endpoints/data_models.py +23 -0
- lionagi/integrations/openai_/api_endpoints/embeddings/__init__.py +3 -0
- lionagi/integrations/openai_/api_endpoints/embeddings/request_body.py +79 -0
- lionagi/integrations/openai_/api_endpoints/embeddings/response_body.py +67 -0
- lionagi/integrations/openai_/api_endpoints/files/__init__.py +11 -0
- lionagi/integrations/openai_/api_endpoints/files/delete_file.py +20 -0
- lionagi/integrations/openai_/api_endpoints/files/file_models.py +56 -0
- lionagi/integrations/openai_/api_endpoints/files/list_files.py +27 -0
- lionagi/integrations/openai_/api_endpoints/files/retrieve_file.py +9 -0
- lionagi/integrations/openai_/api_endpoints/files/upload_file.py +38 -0
- lionagi/integrations/openai_/api_endpoints/fine_tuning/__init__.py +37 -0
- lionagi/integrations/openai_/api_endpoints/fine_tuning/cancel_jobs.py +9 -0
- lionagi/integrations/openai_/api_endpoints/fine_tuning/create_jobs.py +133 -0
- lionagi/integrations/openai_/api_endpoints/fine_tuning/fine_tuning_job_checkpoint_models.py +58 -0
- lionagi/integrations/openai_/api_endpoints/fine_tuning/fine_tuning_job_event_models.py +31 -0
- lionagi/integrations/openai_/api_endpoints/fine_tuning/fine_tuning_job_models.py +140 -0
- lionagi/integrations/openai_/api_endpoints/fine_tuning/list_fine_tuning_checkpoints.py +51 -0
- lionagi/integrations/openai_/api_endpoints/fine_tuning/list_fine_tuning_events.py +42 -0
- lionagi/integrations/openai_/api_endpoints/fine_tuning/list_fine_tuning_jobs.py +31 -0
- lionagi/integrations/openai_/api_endpoints/fine_tuning/retrieve_jobs.py +9 -0
- lionagi/integrations/openai_/api_endpoints/fine_tuning/training_format.py +30 -0
- lionagi/integrations/openai_/api_endpoints/images/__init__.py +9 -0
- lionagi/integrations/openai_/api_endpoints/images/image_edit_models.py +69 -0
- lionagi/integrations/openai_/api_endpoints/images/image_models.py +56 -0
- lionagi/integrations/openai_/api_endpoints/images/image_variation_models.py +56 -0
- lionagi/integrations/openai_/api_endpoints/images/response_body.py +30 -0
- lionagi/integrations/openai_/api_endpoints/match_data_model.py +197 -0
- lionagi/integrations/openai_/api_endpoints/match_response.py +336 -0
- lionagi/integrations/openai_/api_endpoints/models/__init__.py +7 -0
- lionagi/integrations/openai_/api_endpoints/models/delete_fine_tuned_model.py +17 -0
- lionagi/integrations/openai_/api_endpoints/models/models_models.py +31 -0
- lionagi/integrations/openai_/api_endpoints/models/retrieve_model.py +9 -0
- lionagi/integrations/openai_/api_endpoints/moderations/__init__.py +3 -0
- lionagi/integrations/openai_/api_endpoints/moderations/request_body.py +20 -0
- lionagi/integrations/openai_/api_endpoints/moderations/response_body.py +139 -0
- lionagi/integrations/openai_/api_endpoints/uploads/__init__.py +19 -0
- lionagi/integrations/openai_/api_endpoints/uploads/add_upload_part.py +11 -0
- lionagi/integrations/openai_/api_endpoints/uploads/cancel_upload.py +7 -0
- lionagi/integrations/openai_/api_endpoints/uploads/complete_upload.py +18 -0
- lionagi/integrations/openai_/api_endpoints/uploads/create_upload.py +17 -0
- lionagi/integrations/openai_/api_endpoints/uploads/uploads_models.py +52 -0
- lionagi/integrations/openai_/image_token_calculator/image_token_calculator.py +92 -0
- lionagi/integrations/openai_/image_token_calculator/openai_image_token_data.yaml +15 -0
- lionagi/integrations/openai_/openai_max_output_token_data.yaml +12 -0
- lionagi/integrations/openai_/openai_price_data.yaml +26 -0
- lionagi/integrations/openai_/version.py +1 -0
- lionagi/integrations/pandas_/__init__.py +24 -0
- lionagi/integrations/pandas_/extend_df.py +61 -0
- lionagi/integrations/pandas_/read.py +103 -0
- lionagi/integrations/pandas_/remove_rows.py +61 -0
- lionagi/integrations/pandas_/replace_keywords.py +65 -0
- lionagi/integrations/pandas_/save.py +131 -0
- lionagi/integrations/pandas_/search_keywords.py +69 -0
- lionagi/integrations/pandas_/to_df.py +196 -0
- lionagi/integrations/pandas_/update_cells.py +54 -0
- lionagi/integrations/perplexity_/PerplexityModel.py +269 -0
- lionagi/integrations/perplexity_/PerplexityService.py +109 -0
- lionagi/integrations/perplexity_/__init__.py +3 -0
- lionagi/integrations/perplexity_/api_endpoints/api_request.py +171 -0
- lionagi/integrations/perplexity_/api_endpoints/chat_completions/request/request_body.py +121 -0
- lionagi/integrations/perplexity_/api_endpoints/chat_completions/response/response_body.py +146 -0
- lionagi/integrations/perplexity_/api_endpoints/data_models.py +63 -0
- lionagi/integrations/perplexity_/api_endpoints/match_response.py +26 -0
- lionagi/integrations/perplexity_/perplexity_max_output_token_data.yaml +3 -0
- lionagi/integrations/perplexity_/perplexity_price_data.yaml +10 -0
- lionagi/integrations/perplexity_/version.py +1 -0
- lionagi/integrations/pydantic_/__init__.py +8 -0
- lionagi/integrations/pydantic_/break_down_annotation.py +81 -0
- lionagi/integrations/pydantic_/new_model.py +208 -0
- lionagi/integrations/services.py +17 -0
- lionagi/libs/__init__.py +0 -55
- lionagi/libs/compress/models.py +62 -0
- lionagi/libs/compress/utils.py +81 -0
- lionagi/libs/constants.py +98 -0
- lionagi/libs/file/chunk.py +265 -0
- lionagi/libs/file/file_ops.py +114 -0
- lionagi/libs/file/params.py +212 -0
- lionagi/libs/file/path.py +301 -0
- lionagi/libs/file/process.py +139 -0
- lionagi/libs/file/save.py +90 -0
- lionagi/libs/file/types.py +22 -0
- lionagi/libs/func/async_calls/__init__.py +21 -0
- lionagi/libs/func/async_calls/alcall.py +157 -0
- lionagi/libs/func/async_calls/bcall.py +82 -0
- lionagi/libs/func/async_calls/mcall.py +134 -0
- lionagi/libs/func/async_calls/pcall.py +149 -0
- lionagi/libs/func/async_calls/rcall.py +185 -0
- lionagi/libs/func/async_calls/tcall.py +114 -0
- lionagi/libs/func/async_calls/ucall.py +85 -0
- lionagi/libs/func/decorators.py +277 -0
- lionagi/libs/func/lcall.py +57 -0
- lionagi/libs/func/params.py +64 -0
- lionagi/libs/func/throttle.py +119 -0
- lionagi/libs/func/types.py +39 -0
- lionagi/libs/func/utils.py +96 -0
- lionagi/libs/package/imports.py +162 -0
- lionagi/libs/package/management.py +58 -0
- lionagi/libs/package/params.py +26 -0
- lionagi/libs/package/system.py +18 -0
- lionagi/libs/package/types.py +26 -0
- lionagi/libs/parse/__init__.py +1 -0
- lionagi/libs/parse/flatten/__init__.py +9 -0
- lionagi/libs/parse/flatten/flatten.py +168 -0
- lionagi/libs/parse/flatten/params.py +52 -0
- lionagi/libs/parse/flatten/unflatten.py +79 -0
- lionagi/libs/parse/json/__init__.py +27 -0
- lionagi/libs/parse/json/as_readable.py +104 -0
- lionagi/libs/parse/json/extract.py +102 -0
- lionagi/libs/parse/json/parse.py +179 -0
- lionagi/libs/parse/json/schema.py +227 -0
- lionagi/libs/parse/json/to_json.py +71 -0
- lionagi/libs/parse/nested/__init__.py +33 -0
- lionagi/libs/parse/nested/nfilter.py +55 -0
- lionagi/libs/parse/nested/nget.py +40 -0
- lionagi/libs/parse/nested/ninsert.py +103 -0
- lionagi/libs/parse/nested/nmerge.py +155 -0
- lionagi/libs/parse/nested/npop.py +66 -0
- lionagi/libs/parse/nested/nset.py +89 -0
- lionagi/libs/parse/nested/to_flat_list.py +64 -0
- lionagi/libs/parse/nested/utils.py +185 -0
- lionagi/libs/parse/string_parse/__init__.py +11 -0
- lionagi/libs/parse/string_parse/code_block.py +73 -0
- lionagi/libs/parse/string_parse/docstring.py +179 -0
- lionagi/libs/parse/string_parse/function_.py +92 -0
- lionagi/libs/parse/type_convert/__init__.py +19 -0
- lionagi/libs/parse/type_convert/params.py +145 -0
- lionagi/libs/parse/type_convert/to_dict.py +333 -0
- lionagi/libs/parse/type_convert/to_list.py +186 -0
- lionagi/libs/parse/type_convert/to_num.py +358 -0
- lionagi/libs/parse/type_convert/to_str.py +195 -0
- lionagi/libs/parse/types.py +9 -0
- lionagi/libs/parse/validate/__init__.py +14 -0
- lionagi/libs/parse/validate/boolean.py +96 -0
- lionagi/libs/parse/validate/keys.py +150 -0
- lionagi/libs/parse/validate/mapping.py +109 -0
- lionagi/libs/parse/validate/params.py +62 -0
- lionagi/libs/parse/xml/__init__.py +10 -0
- lionagi/libs/parse/xml/convert.py +56 -0
- lionagi/libs/parse/xml/parser.py +93 -0
- lionagi/libs/string_similarity/__init__.py +32 -0
- lionagi/libs/string_similarity/algorithms.py +219 -0
- lionagi/libs/string_similarity/matcher.py +102 -0
- lionagi/libs/string_similarity/utils.py +15 -0
- lionagi/libs/utils.py +255 -0
- lionagi/operations/__init__.py +3 -6
- lionagi/operations/brainstorm/__init__.py +3 -0
- lionagi/operations/brainstorm/brainstorm.py +204 -0
- lionagi/operations/brainstorm/prompt.py +1 -0
- lionagi/operations/plan/__init__.py +3 -0
- lionagi/operations/plan/plan.py +172 -0
- lionagi/operations/plan/prompt.py +21 -0
- lionagi/operations/select/__init__.py +3 -0
- lionagi/operations/select/prompt.py +1 -0
- lionagi/operations/select/select.py +100 -0
- lionagi/operations/select/utils.py +107 -0
- lionagi/operations/utils.py +35 -0
- lionagi/protocols/adapters/adapter.py +79 -0
- lionagi/protocols/adapters/json_adapter.py +43 -0
- lionagi/protocols/adapters/pandas_adapter.py +96 -0
- lionagi/protocols/configs/__init__.py +15 -0
- lionagi/protocols/configs/branch_config.py +86 -0
- lionagi/protocols/configs/id_config.py +15 -0
- lionagi/protocols/configs/imodel_config.py +73 -0
- lionagi/protocols/configs/log_config.py +93 -0
- lionagi/protocols/configs/retry_config.py +29 -0
- lionagi/protocols/operatives/__init__.py +15 -0
- lionagi/protocols/operatives/action.py +181 -0
- lionagi/protocols/operatives/instruct.py +196 -0
- lionagi/protocols/operatives/operative.py +182 -0
- lionagi/protocols/operatives/prompts.py +232 -0
- lionagi/protocols/operatives/reason.py +56 -0
- lionagi/protocols/operatives/step.py +217 -0
- lionagi/protocols/registries/_component_registry.py +19 -0
- lionagi/protocols/registries/_pile_registry.py +26 -0
- lionagi/service/__init__.py +13 -0
- lionagi/service/complete_request_info.py +11 -0
- lionagi/service/imodel.py +110 -0
- lionagi/service/rate_limiter.py +108 -0
- lionagi/service/service.py +37 -0
- lionagi/service/service_match_util.py +131 -0
- lionagi/service/service_util.py +72 -0
- lionagi/service/token_calculator.py +51 -0
- lionagi/settings.py +136 -0
- lionagi/strategies/base.py +53 -0
- lionagi/strategies/concurrent.py +71 -0
- lionagi/strategies/concurrent_chunk.py +43 -0
- lionagi/strategies/concurrent_sequential_chunk.py +104 -0
- lionagi/strategies/params.py +128 -0
- lionagi/strategies/sequential.py +23 -0
- lionagi/strategies/sequential_chunk.py +89 -0
- lionagi/strategies/sequential_concurrent_chunk.py +100 -0
- lionagi/strategies/types.py +21 -0
- lionagi/strategies/utils.py +49 -0
- lionagi/version.py +1 -1
- lionagi-0.5.0.dist-info/METADATA +348 -0
- lionagi-0.5.0.dist-info/RECORD +373 -0
- {lionagi-0.4.0.dist-info → lionagi-0.5.0.dist-info}/WHEEL +1 -1
- lionagi/core/_setting/_setting.py +0 -59
- lionagi/core/action/README.md +0 -20
- lionagi/core/action/manual.py +0 -1
- lionagi/core/action/node.py +0 -94
- lionagi/core/action/tool_manager.py +0 -342
- lionagi/core/agent/README.md +0 -1
- lionagi/core/agent/base_agent.py +0 -82
- lionagi/core/agent/eval/README.md +0 -1
- lionagi/core/agent/eval/evaluator.py +0 -1
- lionagi/core/agent/eval/vote.py +0 -40
- lionagi/core/agent/learn/learner.py +0 -59
- lionagi/core/agent/plan/unit_template.py +0 -1
- lionagi/core/collections/README.md +0 -23
- lionagi/core/collections/__init__.py +0 -16
- lionagi/core/collections/_logger.py +0 -312
- lionagi/core/collections/abc/README.md +0 -63
- lionagi/core/collections/abc/__init__.py +0 -53
- lionagi/core/collections/abc/component.py +0 -620
- lionagi/core/collections/abc/concepts.py +0 -277
- lionagi/core/collections/abc/exceptions.py +0 -136
- lionagi/core/collections/abc/util.py +0 -45
- lionagi/core/collections/exchange.py +0 -146
- lionagi/core/collections/flow.py +0 -416
- lionagi/core/collections/model.py +0 -465
- lionagi/core/collections/pile.py +0 -1232
- lionagi/core/collections/progression.py +0 -221
- lionagi/core/collections/util.py +0 -73
- lionagi/core/director/README.md +0 -1
- lionagi/core/director/direct.py +0 -298
- lionagi/core/director/director.py +0 -2
- lionagi/core/director/operations/select.py +0 -3
- lionagi/core/director/operations/utils.py +0 -6
- lionagi/core/engine/branch_engine.py +0 -361
- lionagi/core/engine/instruction_map_engine.py +0 -213
- lionagi/core/engine/sandbox_.py +0 -16
- lionagi/core/engine/script_engine.py +0 -89
- lionagi/core/executor/base_executor.py +0 -97
- lionagi/core/executor/graph_executor.py +0 -335
- lionagi/core/executor/neo4j_executor.py +0 -394
- lionagi/core/generic/README.md +0 -0
- lionagi/core/generic/edge_condition.py +0 -17
- lionagi/core/generic/hyperedge.py +0 -1
- lionagi/core/generic/tree.py +0 -49
- lionagi/core/generic/tree_node.py +0 -85
- lionagi/core/mail/__init__.py +0 -11
- lionagi/core/mail/mail.py +0 -26
- lionagi/core/mail/mail_manager.py +0 -185
- lionagi/core/mail/package.py +0 -49
- lionagi/core/mail/start_mail.py +0 -36
- lionagi/core/message/__init__.py +0 -18
- lionagi/core/message/action_request.py +0 -114
- lionagi/core/message/action_response.py +0 -121
- lionagi/core/message/assistant_response.py +0 -80
- lionagi/core/message/instruction.py +0 -194
- lionagi/core/message/message.py +0 -86
- lionagi/core/message/system.py +0 -71
- lionagi/core/message/util.py +0 -274
- lionagi/core/report/__init__.py +0 -4
- lionagi/core/report/base.py +0 -201
- lionagi/core/report/form.py +0 -212
- lionagi/core/report/report.py +0 -150
- lionagi/core/report/util.py +0 -15
- lionagi/core/rule/_default.py +0 -17
- lionagi/core/rule/action.py +0 -87
- lionagi/core/rule/base.py +0 -234
- lionagi/core/rule/boolean.py +0 -56
- lionagi/core/rule/choice.py +0 -48
- lionagi/core/rule/mapping.py +0 -82
- lionagi/core/rule/number.py +0 -73
- lionagi/core/rule/rulebook.py +0 -45
- lionagi/core/rule/string.py +0 -43
- lionagi/core/rule/util.py +0 -0
- lionagi/core/session/directive_mixin.py +0 -307
- lionagi/core/structure/__init__.py +0 -1
- lionagi/core/structure/chain.py +0 -1
- lionagi/core/structure/forest.py +0 -1
- lionagi/core/structure/graph.py +0 -1
- lionagi/core/structure/tree.py +0 -1
- lionagi/core/unit/__init__.py +0 -4
- lionagi/core/unit/parallel_unit.py +0 -234
- lionagi/core/unit/template/action.py +0 -65
- lionagi/core/unit/template/base.py +0 -35
- lionagi/core/unit/template/plan.py +0 -69
- lionagi/core/unit/template/predict.py +0 -95
- lionagi/core/unit/template/score.py +0 -108
- lionagi/core/unit/template/select.py +0 -91
- lionagi/core/unit/unit.py +0 -452
- lionagi/core/unit/unit_form.py +0 -290
- lionagi/core/unit/unit_mixin.py +0 -1166
- lionagi/core/unit/util.py +0 -103
- lionagi/core/validator/validator.py +0 -376
- lionagi/core/work/work.py +0 -59
- lionagi/core/work/work_edge.py +0 -102
- lionagi/core/work/work_function.py +0 -114
- lionagi/core/work/work_function_node.py +0 -50
- lionagi/core/work/work_queue.py +0 -90
- lionagi/core/work/work_task.py +0 -151
- lionagi/core/work/worker.py +0 -410
- lionagi/core/work/worker_engine.py +0 -208
- lionagi/core/work/worklog.py +0 -108
- lionagi/experimental/compressor/base.py +0 -47
- lionagi/experimental/compressor/llm_compressor.py +0 -265
- lionagi/experimental/compressor/llm_summarizer.py +0 -61
- lionagi/experimental/compressor/util.py +0 -70
- lionagi/experimental/directive/README.md +0 -1
- lionagi/experimental/directive/__init__.py +0 -19
- lionagi/experimental/directive/parser/base_parser.py +0 -294
- lionagi/experimental/directive/parser/base_syntax.txt +0 -200
- lionagi/experimental/directive/template/base_template.py +0 -71
- lionagi/experimental/directive/template/schema.py +0 -36
- lionagi/experimental/directive/tokenizer.py +0 -59
- lionagi/experimental/evaluator/README.md +0 -1
- lionagi/experimental/evaluator/ast_evaluator.py +0 -119
- lionagi/experimental/evaluator/base_evaluator.py +0 -213
- lionagi/experimental/knowledge/__init__.py +0 -0
- lionagi/experimental/knowledge/base.py +0 -10
- lionagi/experimental/knowledge/graph.py +0 -0
- lionagi/experimental/memory/__init__.py +0 -0
- lionagi/experimental/strategies/__init__.py +0 -0
- lionagi/experimental/strategies/base.py +0 -1
- lionagi/integrations/bridge/__init__.py +0 -4
- lionagi/integrations/bridge/autogen_/__init__.py +0 -0
- lionagi/integrations/bridge/autogen_/autogen_.py +0 -127
- lionagi/integrations/bridge/langchain_/__init__.py +0 -0
- lionagi/integrations/bridge/langchain_/documents.py +0 -138
- lionagi/integrations/bridge/langchain_/langchain_bridge.py +0 -68
- lionagi/integrations/bridge/llamaindex_/__init__.py +0 -0
- lionagi/integrations/bridge/llamaindex_/index.py +0 -36
- lionagi/integrations/bridge/llamaindex_/llama_index_bridge.py +0 -108
- lionagi/integrations/bridge/llamaindex_/llama_pack.py +0 -256
- lionagi/integrations/bridge/llamaindex_/node_parser.py +0 -92
- lionagi/integrations/bridge/llamaindex_/reader.py +0 -201
- lionagi/integrations/bridge/llamaindex_/textnode.py +0 -59
- lionagi/integrations/bridge/pydantic_/__init__.py +0 -0
- lionagi/integrations/bridge/pydantic_/pydantic_bridge.py +0 -7
- lionagi/integrations/bridge/transformers_/__init__.py +0 -0
- lionagi/integrations/bridge/transformers_/install_.py +0 -39
- lionagi/integrations/chunker/__init__.py +0 -0
- lionagi/integrations/chunker/chunk.py +0 -314
- lionagi/integrations/config/__init__.py +0 -4
- lionagi/integrations/config/mlx_configs.py +0 -1
- lionagi/integrations/config/oai_configs.py +0 -154
- lionagi/integrations/config/ollama_configs.py +0 -1
- lionagi/integrations/config/openrouter_configs.py +0 -74
- lionagi/integrations/langchain_/__init__.py +0 -0
- lionagi/integrations/llamaindex_/__init__.py +0 -0
- lionagi/integrations/loader/__init__.py +0 -0
- lionagi/integrations/loader/load.py +0 -257
- lionagi/integrations/loader/load_util.py +0 -214
- lionagi/integrations/provider/__init__.py +0 -11
- lionagi/integrations/provider/_mapping.py +0 -47
- lionagi/integrations/provider/litellm.py +0 -53
- lionagi/integrations/provider/mistralai.py +0 -1
- lionagi/integrations/provider/mlx_service.py +0 -55
- lionagi/integrations/provider/oai.py +0 -196
- lionagi/integrations/provider/ollama.py +0 -55
- lionagi/integrations/provider/openrouter.py +0 -170
- lionagi/integrations/provider/services.py +0 -138
- lionagi/integrations/provider/transformers.py +0 -108
- lionagi/integrations/storage/__init__.py +0 -3
- lionagi/integrations/storage/neo4j.py +0 -681
- lionagi/integrations/storage/storage_util.py +0 -302
- lionagi/integrations/storage/structure_excel.py +0 -291
- lionagi/integrations/storage/to_csv.py +0 -70
- lionagi/integrations/storage/to_excel.py +0 -91
- lionagi/libs/ln_api.py +0 -944
- lionagi/libs/ln_async.py +0 -208
- lionagi/libs/ln_context.py +0 -37
- lionagi/libs/ln_convert.py +0 -671
- lionagi/libs/ln_dataframe.py +0 -187
- lionagi/libs/ln_func_call.py +0 -1328
- lionagi/libs/ln_image.py +0 -114
- lionagi/libs/ln_knowledge_graph.py +0 -422
- lionagi/libs/ln_nested.py +0 -822
- lionagi/libs/ln_parse.py +0 -750
- lionagi/libs/ln_queue.py +0 -107
- lionagi/libs/ln_tokenize.py +0 -179
- lionagi/libs/ln_validate.py +0 -299
- lionagi/libs/special_tokens.py +0 -172
- lionagi/libs/sys_util.py +0 -710
- lionagi/lions/__init__.py +0 -0
- lionagi/lions/coder/__init__.py +0 -0
- lionagi/lions/coder/add_feature.py +0 -20
- lionagi/lions/coder/base_prompts.py +0 -22
- lionagi/lions/coder/code_form.py +0 -15
- lionagi/lions/coder/coder.py +0 -184
- lionagi/lions/coder/util.py +0 -101
- lionagi/lions/director/__init__.py +0 -0
- lionagi/lions/judge/__init__.py +0 -0
- lionagi/lions/judge/config.py +0 -8
- lionagi/lions/judge/data/__init__.py +0 -0
- lionagi/lions/judge/data/sample_codes.py +0 -526
- lionagi/lions/judge/data/sample_rurbic.py +0 -48
- lionagi/lions/judge/forms/__init__.py +0 -0
- lionagi/lions/judge/forms/code_analysis_form.py +0 -126
- lionagi/lions/judge/rubric.py +0 -34
- lionagi/lions/judge/services/__init__.py +0 -0
- lionagi/lions/judge/services/judge_code.py +0 -49
- lionagi/lions/researcher/__init__.py +0 -0
- lionagi/lions/researcher/data_source/__init__.py +0 -0
- lionagi/lions/researcher/data_source/finhub_.py +0 -192
- lionagi/lions/researcher/data_source/google_.py +0 -207
- lionagi/lions/researcher/data_source/wiki_.py +0 -98
- lionagi/lions/researcher/data_source/yfinance_.py +0 -21
- lionagi/operations/brainstorm.py +0 -87
- lionagi/operations/config.py +0 -6
- lionagi/operations/rank.py +0 -102
- lionagi/operations/score.py +0 -144
- lionagi/operations/select.py +0 -141
- lionagi-0.4.0.dist-info/METADATA +0 -241
- lionagi-0.4.0.dist-info/RECORD +0 -249
- /lionagi/{core/_setting → integrations/anthropic_/api_endpoints/messages/response}/__init__.py +0 -0
- /lionagi/{core/agent → integrations/groq_/api_endpoints}/__init__.py +0 -0
- /lionagi/{core/agent/eval → integrations/ollama_/api_endpoints/completion}/__init__.py +0 -0
- /lionagi/{core/agent/learn → integrations/ollama_/api_endpoints/embedding}/__init__.py +0 -0
- /lionagi/{core/agent/plan → integrations/openai_}/__init__.py +0 -0
- /lionagi/{core/director → integrations/openai_/api_endpoints/chat_completions/response}/__init__.py +0 -0
- /lionagi/{core/director/operations → integrations/openai_/image_token_calculator}/__init__.py +0 -0
- /lionagi/{core/engine → integrations/perplexity_/api_endpoints}/__init__.py +0 -0
- /lionagi/{core/executor → integrations/perplexity_/api_endpoints/chat_completions}/__init__.py +0 -0
- /lionagi/{core/generic/registry/component_registry → integrations/perplexity_/api_endpoints/chat_completions/request}/__init__.py +0 -0
- /lionagi/{core/rule → integrations/perplexity_/api_endpoints/chat_completions/response}/__init__.py +0 -0
- /lionagi/{core/unit/template → libs/compress}/__init__.py +0 -0
- /lionagi/{core/validator → libs/file}/__init__.py +0 -0
- /lionagi/{core/work → libs/func}/__init__.py +0 -0
- /lionagi/{experimental → libs/package}/__init__.py +0 -0
- /lionagi/{core/agent/plan/plan.py → libs/parse/params.py} +0 -0
- /lionagi/{experimental/compressor → protocols}/__init__.py +0 -0
- /lionagi/{experimental/directive/parser → protocols/adapters}/__init__.py +0 -0
- /lionagi/{experimental/directive/template → protocols/registries}/__init__.py +0 -0
- /lionagi/{experimental/evaluator → strategies}/__init__.py +0 -0
- {lionagi-0.4.0.dist-info → lionagi-0.5.0.dist-info/licenses}/LICENSE +0 -0
@@ -0,0 +1,204 @@
|
|
1
|
+
# Copyright (c) 2023 - 2024, HaiyangLi <quantocean.li at gmail dot com>
|
2
|
+
#
|
3
|
+
# SPDX-License-Identifier: Apache-2.0
|
4
|
+
|
5
|
+
|
6
|
+
from lionagi.core.session.branch import Branch
|
7
|
+
from lionagi.core.session.session import Session
|
8
|
+
from lionagi.core.typing import ID, Any, BaseModel
|
9
|
+
from lionagi.libs.func import alcall
|
10
|
+
from lionagi.libs.parse import to_flat_list
|
11
|
+
from lionagi.protocols.operatives.instruct import (
|
12
|
+
INSTRUCT_MODEL_FIELD,
|
13
|
+
Instruct,
|
14
|
+
InstructResponse,
|
15
|
+
)
|
16
|
+
|
17
|
+
from ..utils import prepare_instruct, prepare_session
|
18
|
+
from .prompt import PROMPT
|
19
|
+
|
20
|
+
|
21
|
+
class BrainstormOperation(BaseModel):
|
22
|
+
initial: Any
|
23
|
+
brainstorm: list[Instruct] | None = None
|
24
|
+
explore: list[InstructResponse] | None = None
|
25
|
+
|
26
|
+
|
27
|
+
async def run_instruct(
|
28
|
+
ins: Instruct,
|
29
|
+
session: Session,
|
30
|
+
branch: Branch,
|
31
|
+
auto_run: bool,
|
32
|
+
verbose: bool = True,
|
33
|
+
**kwargs: Any,
|
34
|
+
) -> Any:
|
35
|
+
"""Execute an instruction within a brainstorming session.
|
36
|
+
|
37
|
+
Args:
|
38
|
+
ins: The instruction model to run.
|
39
|
+
session: The current session.
|
40
|
+
branch: The branch to operate on.
|
41
|
+
auto_run: Whether to automatically run nested instructions.
|
42
|
+
verbose: Whether to enable verbose output.
|
43
|
+
**kwargs: Additional keyword arguments.
|
44
|
+
|
45
|
+
Returns:
|
46
|
+
The result of the instruction execution.
|
47
|
+
"""
|
48
|
+
|
49
|
+
async def run(ins_):
|
50
|
+
if verbose:
|
51
|
+
msg_ = (
|
52
|
+
ins_.guidance[:100] + "..."
|
53
|
+
if len(ins_.guidance) > 100
|
54
|
+
else ins_.guidance
|
55
|
+
)
|
56
|
+
print(f"\n-----Running instruction-----\n{msg_}")
|
57
|
+
b_ = session.split(branch)
|
58
|
+
return await run_instruct(
|
59
|
+
ins_, session, b_, False, verbose=verbose, **kwargs
|
60
|
+
)
|
61
|
+
|
62
|
+
config = {**ins.model_dump(), **kwargs}
|
63
|
+
res = await branch.operate(**config)
|
64
|
+
branch.msgs.logger.dump()
|
65
|
+
instructs = []
|
66
|
+
|
67
|
+
if hasattr(res, "instruct_models"):
|
68
|
+
instructs = res.instruct_models
|
69
|
+
|
70
|
+
if auto_run is True and instructs:
|
71
|
+
ress = await alcall(instructs, run)
|
72
|
+
response_ = []
|
73
|
+
for res in ress:
|
74
|
+
if isinstance(res, list):
|
75
|
+
response_.extend(res)
|
76
|
+
else:
|
77
|
+
response_.append(res)
|
78
|
+
response_.insert(0, res)
|
79
|
+
return response_
|
80
|
+
|
81
|
+
return res
|
82
|
+
|
83
|
+
|
84
|
+
async def brainstorm(
|
85
|
+
instruct: Instruct | dict[str, Any],
|
86
|
+
num_instruct: int = 2,
|
87
|
+
session: Session | None = None,
|
88
|
+
branch: Branch | ID.Ref | None = None,
|
89
|
+
auto_run: bool = True,
|
90
|
+
auto_explore: bool = False,
|
91
|
+
explore_kwargs: dict[str, Any] | None = None,
|
92
|
+
branch_kwargs: dict[str, Any] | None = None,
|
93
|
+
return_session: bool = False,
|
94
|
+
verbose: bool = False,
|
95
|
+
**kwargs: Any,
|
96
|
+
) -> Any:
|
97
|
+
"""Perform a brainstorming session.
|
98
|
+
|
99
|
+
Args:
|
100
|
+
instruct: Instruction model or dictionary.
|
101
|
+
num_instruct: Number of instructions to generate.
|
102
|
+
session: Existing session or None to create a new one.
|
103
|
+
branch: Existing branch or reference.
|
104
|
+
auto_run: If True, automatically run generated instructions.
|
105
|
+
branch_kwargs: Additional arguments for branch creation.
|
106
|
+
return_session: If True, return the session with results.
|
107
|
+
verbose: Whether to enable verbose output.
|
108
|
+
**kwargs: Additional keyword arguments.
|
109
|
+
|
110
|
+
Returns:
|
111
|
+
The results of the brainstorming session, optionally with the session.
|
112
|
+
"""
|
113
|
+
|
114
|
+
if auto_explore and not auto_run:
|
115
|
+
raise ValueError("auto_explore requires auto_run to be True.")
|
116
|
+
|
117
|
+
if verbose:
|
118
|
+
print(f"Starting brainstorming...")
|
119
|
+
|
120
|
+
field_models: list = kwargs.get("field_models", [])
|
121
|
+
if INSTRUCT_MODEL_FIELD not in field_models:
|
122
|
+
field_models.append(INSTRUCT_MODEL_FIELD)
|
123
|
+
|
124
|
+
kwargs["field_models"] = field_models
|
125
|
+
session, branch = prepare_session(session, branch, branch_kwargs)
|
126
|
+
instruct = prepare_instruct(
|
127
|
+
instruct, PROMPT.format(num_instruct=num_instruct)
|
128
|
+
)
|
129
|
+
res1 = await branch.operate(**instruct, **kwargs)
|
130
|
+
out = BrainstormOperation(initial=res1)
|
131
|
+
|
132
|
+
if verbose:
|
133
|
+
print("Initial brainstorming complete.")
|
134
|
+
|
135
|
+
instructs = None
|
136
|
+
|
137
|
+
async def run(ins_):
|
138
|
+
if verbose:
|
139
|
+
msg_ = (
|
140
|
+
ins_.guidance[:100] + "..."
|
141
|
+
if len(ins_.guidance) > 100
|
142
|
+
else ins_.guidance
|
143
|
+
)
|
144
|
+
print(f"\n-----Running instruction-----\n{msg_}")
|
145
|
+
b_ = session.split(branch)
|
146
|
+
return await run_instruct(
|
147
|
+
ins_, session, b_, auto_run, verbose=verbose, **kwargs
|
148
|
+
)
|
149
|
+
|
150
|
+
if not auto_run:
|
151
|
+
if return_session:
|
152
|
+
return out, session
|
153
|
+
return out
|
154
|
+
|
155
|
+
async with session.branches:
|
156
|
+
response_ = []
|
157
|
+
if hasattr(res1, "instruct_models"):
|
158
|
+
instructs: list[Instruct] = res1.instruct_models
|
159
|
+
ress = await alcall(instructs, run)
|
160
|
+
ress = to_flat_list(ress, dropna=True)
|
161
|
+
|
162
|
+
response_ = [
|
163
|
+
res if not isinstance(res, str | dict) else None
|
164
|
+
for res in ress
|
165
|
+
]
|
166
|
+
response_ = to_flat_list(response_, unique=True, dropna=True)
|
167
|
+
out.brainstorm = (
|
168
|
+
response_ if isinstance(response_, list) else [response_]
|
169
|
+
)
|
170
|
+
response_.insert(0, res1)
|
171
|
+
|
172
|
+
if response_ and auto_explore:
|
173
|
+
|
174
|
+
async def explore(ins_: Instruct):
|
175
|
+
if verbose:
|
176
|
+
msg_ = (
|
177
|
+
ins_.guidance[:100] + "..."
|
178
|
+
if len(ins_.guidance) > 100
|
179
|
+
else ins_.guidance
|
180
|
+
)
|
181
|
+
print(f"\n-----Exploring Idea-----\n{msg_}")
|
182
|
+
b_ = session.split(branch)
|
183
|
+
res = await b_.instruct(ins_, **(explore_kwargs or {}))
|
184
|
+
return InstructResponse(
|
185
|
+
instruct=ins_,
|
186
|
+
response=res,
|
187
|
+
)
|
188
|
+
|
189
|
+
response_ = to_flat_list(
|
190
|
+
[
|
191
|
+
i.instruct_models
|
192
|
+
for i in response_
|
193
|
+
if hasattr(i, "instruct_models")
|
194
|
+
],
|
195
|
+
dropna=True,
|
196
|
+
unique=True,
|
197
|
+
)
|
198
|
+
res_explore = await alcall(response_, explore)
|
199
|
+
out.explore = res_explore
|
200
|
+
|
201
|
+
if return_session:
|
202
|
+
return out, session
|
203
|
+
|
204
|
+
return out
|
@@ -0,0 +1 @@
|
|
1
|
+
PROMPT = """Perform a brainstorm session. Fill in {num_instruct} Instruct for the appropriate next step, we will run them separately and concurrently with same external context, but you should supplement each idea with certain amount of uniqueness while adhering to the guidelines and standards of the project. The Instruct should be concisely informational. If you think a particular step requries further extension, you should mention it in the instruct"""
|
@@ -0,0 +1,172 @@
|
|
1
|
+
# Copyright (c) 2023 - 2024, HaiyangLi <quantocean.li at gmail dot com>
|
2
|
+
#
|
3
|
+
# SPDX-License-Identifier: Apache-2.0
|
4
|
+
|
5
|
+
|
6
|
+
from lionagi.core.session.branch import Branch
|
7
|
+
from lionagi.core.session.session import Session
|
8
|
+
from lionagi.core.typing import ID, Any, BaseModel, Literal
|
9
|
+
from lionagi.protocols.operatives.instruct import (
|
10
|
+
INSTRUCT_MODEL_FIELD,
|
11
|
+
Instruct,
|
12
|
+
InstructResponse,
|
13
|
+
)
|
14
|
+
|
15
|
+
from ..utils import prepare_instruct, prepare_session
|
16
|
+
from .prompt import EXPANSION_PROMPT, PLAN_PROMPT
|
17
|
+
|
18
|
+
|
19
|
+
class PlanOperation(BaseModel):
|
20
|
+
initial: Any
|
21
|
+
plan: list[Instruct] | None = None
|
22
|
+
execute: list[InstructResponse] | None = None
|
23
|
+
|
24
|
+
|
25
|
+
async def run_step(
|
26
|
+
ins: Instruct,
|
27
|
+
session: Session,
|
28
|
+
branch: Branch,
|
29
|
+
verbose: bool = True,
|
30
|
+
**kwargs: Any,
|
31
|
+
) -> Any:
|
32
|
+
"""Execute a single step of the plan.
|
33
|
+
|
34
|
+
Args:
|
35
|
+
ins: The instruction model for the step.
|
36
|
+
session: The current session.
|
37
|
+
branch: The branch to operate on.
|
38
|
+
verbose: Whether to enable verbose output.
|
39
|
+
**kwargs: Additional keyword arguments.
|
40
|
+
|
41
|
+
Returns:
|
42
|
+
The result of the branch operation.
|
43
|
+
"""
|
44
|
+
if verbose:
|
45
|
+
instruction = (
|
46
|
+
ins.instruction[:100] + "..."
|
47
|
+
if len(ins.instruction) > 100
|
48
|
+
else ins.instruction
|
49
|
+
)
|
50
|
+
print(f"Further planning: {instruction}")
|
51
|
+
|
52
|
+
config = {**ins.model_dump(), **kwargs}
|
53
|
+
guide = config.pop("guidance", "")
|
54
|
+
config["guidance"] = EXPANSION_PROMPT + "\n" + str(guide)
|
55
|
+
|
56
|
+
res = await branch.operate(**config)
|
57
|
+
branch.msgs.logger.dump()
|
58
|
+
return res
|
59
|
+
|
60
|
+
|
61
|
+
async def plan(
|
62
|
+
instruct: Instruct | dict[str, Any],
|
63
|
+
num_steps: int = 2,
|
64
|
+
session: Session | None = None,
|
65
|
+
branch: Branch | ID.Ref | None = None,
|
66
|
+
auto_run: bool = True,
|
67
|
+
auto_execute: bool = False,
|
68
|
+
execution_strategy: Literal["sequential"] = "sequential",
|
69
|
+
execution_kwargs: dict[str, Any] | None = None,
|
70
|
+
branch_kwargs: dict[str, Any] | None = None,
|
71
|
+
return_session: bool = False,
|
72
|
+
verbose: bool = True,
|
73
|
+
**kwargs: Any,
|
74
|
+
) -> PlanOperation | tuple[list[InstructResponse], Session]:
|
75
|
+
"""Create and execute a multi-step plan.
|
76
|
+
|
77
|
+
Args:
|
78
|
+
instruct: Instruction model or dictionary.
|
79
|
+
num_steps: Number of steps in the plan.
|
80
|
+
session: Existing session or None to create a new one.
|
81
|
+
branch: Existing branch or reference.
|
82
|
+
auto_run: If True, automatically run the steps.
|
83
|
+
branch_kwargs: Additional keyword arguments for branch creation.
|
84
|
+
return_session: If True, return the session along with results.
|
85
|
+
verbose: Whether to enable verbose output.
|
86
|
+
**kwargs: Additional keyword arguments.
|
87
|
+
|
88
|
+
Returns:
|
89
|
+
Results of the plan execution, optionally with the session.
|
90
|
+
"""
|
91
|
+
if num_steps > 5:
|
92
|
+
raise ValueError("Number of steps must be 5 or less")
|
93
|
+
|
94
|
+
if verbose:
|
95
|
+
print(f"Planning execution with {num_steps} steps...")
|
96
|
+
|
97
|
+
field_models: list = kwargs.get("field_models", [])
|
98
|
+
if INSTRUCT_MODEL_FIELD not in field_models:
|
99
|
+
field_models.append(INSTRUCT_MODEL_FIELD)
|
100
|
+
kwargs["field_models"] = field_models
|
101
|
+
session, branch = prepare_session(session, branch, branch_kwargs)
|
102
|
+
execute_branch: Branch = session.split(branch)
|
103
|
+
instruct = prepare_instruct(
|
104
|
+
instruct, PLAN_PROMPT.format(num_steps=num_steps)
|
105
|
+
)
|
106
|
+
|
107
|
+
res1 = await branch.operate(**instruct, **kwargs)
|
108
|
+
out = PlanOperation(initial=res1)
|
109
|
+
|
110
|
+
if verbose:
|
111
|
+
print("Initial planning complete. Starting step planning...")
|
112
|
+
|
113
|
+
if not auto_run:
|
114
|
+
if return_session:
|
115
|
+
return res1, session
|
116
|
+
return res1
|
117
|
+
|
118
|
+
results = []
|
119
|
+
if hasattr(res1, "instruct_models"):
|
120
|
+
instructs: list[Instruct] = res1.instruct_models
|
121
|
+
for i, ins in enumerate(instructs, 1):
|
122
|
+
if verbose:
|
123
|
+
print(f"\n----- Planning step {i}/{len(instructs)} -----")
|
124
|
+
res = await run_step(
|
125
|
+
ins, session, branch, verbose=verbose, **kwargs
|
126
|
+
)
|
127
|
+
results.append(res)
|
128
|
+
|
129
|
+
if verbose:
|
130
|
+
print("\nAll planning completed successfully!")
|
131
|
+
|
132
|
+
all_plans = []
|
133
|
+
for res in results:
|
134
|
+
if hasattr(res, "instruct_models"):
|
135
|
+
for i in res.instruct_models:
|
136
|
+
if i and i not in all_plans:
|
137
|
+
all_plans.append(i)
|
138
|
+
out.plan = all_plans
|
139
|
+
|
140
|
+
if auto_execute:
|
141
|
+
if verbose:
|
142
|
+
print("\nStarting execution of all steps...")
|
143
|
+
results = []
|
144
|
+
match execution_strategy:
|
145
|
+
case "sequential":
|
146
|
+
for i, ins in enumerate(all_plans, 1):
|
147
|
+
if verbose:
|
148
|
+
print(
|
149
|
+
f"\n------ Executing step {i}/{len(all_plans)} ------"
|
150
|
+
)
|
151
|
+
msg = (
|
152
|
+
ins.instruction[:100] + "..."
|
153
|
+
if len(ins.instruction) > 100
|
154
|
+
else ins.instruction
|
155
|
+
)
|
156
|
+
print(f"Instruction: {msg}")
|
157
|
+
res = await execute_branch.instruct(
|
158
|
+
ins, **(execution_kwargs or {})
|
159
|
+
)
|
160
|
+
res_ = InstructResponse(instruct=ins, response=res)
|
161
|
+
results.append(res_)
|
162
|
+
out.execute = results
|
163
|
+
if verbose:
|
164
|
+
print("\nAll steps executed successfully!")
|
165
|
+
case _:
|
166
|
+
raise ValueError(
|
167
|
+
f"Invalid execution strategy: {execution_strategy}"
|
168
|
+
)
|
169
|
+
|
170
|
+
if return_session:
|
171
|
+
return out, session
|
172
|
+
return out
|
@@ -0,0 +1,21 @@
|
|
1
|
+
PLAN_PROMPT = """
|
2
|
+
Develop a high-level plan with {num_steps} distinct steps. Each step should:
|
3
|
+
1. Represent a major milestone or phase
|
4
|
+
2. Be logically sequenced for dependencies
|
5
|
+
3. Be clearly distinct from other steps
|
6
|
+
4. Have measurable completion criteria
|
7
|
+
5. Be suitable for further decomposition
|
8
|
+
"""
|
9
|
+
|
10
|
+
EXPANSION_PROMPT = """
|
11
|
+
Break down a high-level plan into detailed concrete executable actions. Each step should:
|
12
|
+
- Ensure actions are atomic and verifiable
|
13
|
+
- Include necessary context and preconditions
|
14
|
+
- Specify expected outcomes and validations
|
15
|
+
- Maintain sequential dependencies
|
16
|
+
- Be self-contained with clear scope
|
17
|
+
- Include all required context/parameters
|
18
|
+
- Have unambiguous success criteria
|
19
|
+
- Specify error handling approach
|
20
|
+
- Define expected outputs
|
21
|
+
"""
|
@@ -0,0 +1 @@
|
|
1
|
+
PROMPT = "Please select up to {max_num_selections} items from the following list {choices}. Provide the selection(s) into appropriate field in format required, and no comments from you"
|
@@ -0,0 +1,100 @@
|
|
1
|
+
# Copyright (c) 2023 - 2024, HaiyangLi <quantocean.li at gmail dot com>
|
2
|
+
#
|
3
|
+
# SPDX-License-Identifier: Apache-2.0
|
4
|
+
|
5
|
+
|
6
|
+
from enum import Enum
|
7
|
+
from typing import Any
|
8
|
+
|
9
|
+
from pydantic import BaseModel, Field
|
10
|
+
|
11
|
+
from lionagi import Branch
|
12
|
+
from lionagi.protocols.operatives.instruct import Instruct
|
13
|
+
|
14
|
+
from .prompt import PROMPT
|
15
|
+
from .utils import parse_selection, parse_to_representation
|
16
|
+
|
17
|
+
|
18
|
+
class SelectionModel(BaseModel):
|
19
|
+
"""Model representing the selection output."""
|
20
|
+
|
21
|
+
selected: list[Any] = Field(default_factory=list)
|
22
|
+
|
23
|
+
|
24
|
+
async def select(
|
25
|
+
instruct: Instruct | dict[str, Any],
|
26
|
+
choices: list[str] | type[Enum] | dict[str, Any],
|
27
|
+
max_num_selections: int = 1,
|
28
|
+
branch: Branch | None = None,
|
29
|
+
branch_kwargs: dict[str, Any] | None = None,
|
30
|
+
return_branch: bool = False,
|
31
|
+
verbose: bool = False,
|
32
|
+
**kwargs: Any,
|
33
|
+
) -> SelectionModel | tuple[SelectionModel, Branch]:
|
34
|
+
"""Perform a selection operation from given choices.
|
35
|
+
|
36
|
+
Args:
|
37
|
+
instruct: Instruction model or dictionary.
|
38
|
+
choices: Options to select from.
|
39
|
+
max_num_selections: Maximum selections allowed.
|
40
|
+
branch: Existing branch or None to create a new one.
|
41
|
+
branch_kwargs: Additional arguments for branch creation.
|
42
|
+
return_branch: If True, return the branch with the selection.
|
43
|
+
verbose: Whether to enable verbose output.
|
44
|
+
**kwargs: Additional keyword arguments.
|
45
|
+
|
46
|
+
Returns:
|
47
|
+
A SelectionModel instance, optionally with the branch.
|
48
|
+
"""
|
49
|
+
if verbose:
|
50
|
+
print(f"Starting selection with up to {max_num_selections} choices.")
|
51
|
+
|
52
|
+
branch = branch or Branch(**(branch_kwargs or {}))
|
53
|
+
selections, contents = parse_to_representation(choices)
|
54
|
+
prompt = PROMPT.format(
|
55
|
+
max_num_selections=max_num_selections, choices=selections
|
56
|
+
)
|
57
|
+
|
58
|
+
if isinstance(instruct, Instruct):
|
59
|
+
instruct = instruct.to_dict()
|
60
|
+
|
61
|
+
instruct = instruct or {}
|
62
|
+
|
63
|
+
if instruct.get("instruction", None) is not None:
|
64
|
+
instruct["instruction"] = (
|
65
|
+
f"{instruct['instruction']}\n\n{prompt} \n\n "
|
66
|
+
)
|
67
|
+
else:
|
68
|
+
instruct["instruction"] = prompt
|
69
|
+
|
70
|
+
context = instruct.get("context", None) or []
|
71
|
+
context = [context] if not isinstance(context, list) else context
|
72
|
+
context.extend([{k: v} for k, v in zip(selections, contents)])
|
73
|
+
instruct["context"] = context
|
74
|
+
|
75
|
+
response_model: SelectionModel = await branch.operate(
|
76
|
+
operative_model=SelectionModel,
|
77
|
+
**kwargs,
|
78
|
+
**instruct,
|
79
|
+
)
|
80
|
+
if verbose:
|
81
|
+
print(f"Received selection: {response_model.selected}")
|
82
|
+
|
83
|
+
selected = response_model
|
84
|
+
if isinstance(response_model, BaseModel) and hasattr(
|
85
|
+
response_model, "selected"
|
86
|
+
):
|
87
|
+
selected = response_model.selected
|
88
|
+
selected = [selected] if not isinstance(selected, list) else selected
|
89
|
+
|
90
|
+
corrected_selections = [parse_selection(i, choices) for i in selected]
|
91
|
+
|
92
|
+
if isinstance(response_model, BaseModel):
|
93
|
+
response_model.selected = corrected_selections
|
94
|
+
|
95
|
+
elif isinstance(response_model, dict):
|
96
|
+
response_model["selected"] = corrected_selections
|
97
|
+
|
98
|
+
if return_branch:
|
99
|
+
return response_model, branch
|
100
|
+
return response_model
|
@@ -0,0 +1,107 @@
|
|
1
|
+
# Copyright (c) 2023 - 2024, HaiyangLi <quantocean.li at gmail dot com>
|
2
|
+
#
|
3
|
+
# SPDX-License-Identifier: Apache-2.0
|
4
|
+
|
5
|
+
|
6
|
+
import inspect
|
7
|
+
from enum import Enum
|
8
|
+
from typing import Any
|
9
|
+
|
10
|
+
from lionagi.core.typing import BaseModel, JsonValue
|
11
|
+
from lionagi.libs.parse import is_same_dtype
|
12
|
+
from lionagi.libs.string_similarity import string_similarity
|
13
|
+
|
14
|
+
|
15
|
+
def parse_to_representation(
|
16
|
+
choices: Enum | dict | list | tuple | set,
|
17
|
+
) -> tuple[list[str], JsonValue]:
|
18
|
+
"""
|
19
|
+
should use
|
20
|
+
1. iterator of string | BaseModel
|
21
|
+
2. dict[str, JsonValue | BaseModel]
|
22
|
+
3. Enum[str, JsonValue | BaseModel]
|
23
|
+
"""
|
24
|
+
|
25
|
+
if isinstance(choices, tuple | set | list):
|
26
|
+
choices = list(choices)
|
27
|
+
if is_same_dtype(choices, str):
|
28
|
+
return choices, choices
|
29
|
+
|
30
|
+
if isinstance(choices, list):
|
31
|
+
if is_same_dtype(choices, BaseModel):
|
32
|
+
choices = {i.__class__.__name__: i for i in choices}
|
33
|
+
if all(
|
34
|
+
inspect.isclass(i) and issubclass(i, BaseModel) for i in choices
|
35
|
+
):
|
36
|
+
choices = {i.__name__: i for i in choices}
|
37
|
+
if isinstance(choices, type) and issubclass(choices, Enum):
|
38
|
+
keys = [i.name for i in choices]
|
39
|
+
contents = [get_choice_representation(i) for i in choices]
|
40
|
+
return keys, contents
|
41
|
+
|
42
|
+
if isinstance(choices, dict):
|
43
|
+
keys = list(choices.keys())
|
44
|
+
contents = list(choices.values())
|
45
|
+
contents = [get_choice_representation(v) for k, v in choices.items()]
|
46
|
+
return keys, contents
|
47
|
+
|
48
|
+
if isinstance(choices, tuple | set | list):
|
49
|
+
choices = list(choices)
|
50
|
+
if is_same_dtype(choices, str):
|
51
|
+
return choices, choices
|
52
|
+
|
53
|
+
raise NotImplementedError
|
54
|
+
|
55
|
+
|
56
|
+
def get_choice_representation(choice: Any) -> str:
|
57
|
+
|
58
|
+
if isinstance(choice, str):
|
59
|
+
return choice
|
60
|
+
|
61
|
+
if isinstance(choice, BaseModel):
|
62
|
+
return f"{choice.__class__.__name__}:\n{choice.model_json_schema(indent=2)}"
|
63
|
+
|
64
|
+
if isinstance(choice, Enum):
|
65
|
+
return get_choice_representation(choice.value)
|
66
|
+
|
67
|
+
|
68
|
+
def parse_selection(selection_str: str, choices: Any):
|
69
|
+
|
70
|
+
select_from = []
|
71
|
+
|
72
|
+
if isinstance(choices, dict):
|
73
|
+
select_from = list(choices.keys())
|
74
|
+
|
75
|
+
if inspect.isclass(choices) and issubclass(choices, Enum):
|
76
|
+
select_from = [choice.name for choice in choices]
|
77
|
+
|
78
|
+
if isinstance(choices, list | tuple | set):
|
79
|
+
if is_same_dtype(choices, BaseModel):
|
80
|
+
select_from = [i.__class__.__name__ for i in choices]
|
81
|
+
if is_same_dtype(choices, str):
|
82
|
+
select_from = list(choices)
|
83
|
+
if all(
|
84
|
+
inspect.isclass(i) and issubclass(i, BaseModel) for i in choices
|
85
|
+
):
|
86
|
+
select_from = [i.__name__ for i in choices]
|
87
|
+
|
88
|
+
if not select_from:
|
89
|
+
raise ValueError("The values provided for choice is not valid")
|
90
|
+
|
91
|
+
selected = string_similarity(
|
92
|
+
selection_str, select_from, return_most_similar=True
|
93
|
+
)
|
94
|
+
|
95
|
+
if isinstance(choices, dict) and selected in choices:
|
96
|
+
return choices[selected]
|
97
|
+
|
98
|
+
if inspect.isclass(choices) and issubclass(choices, Enum):
|
99
|
+
for i in choices:
|
100
|
+
if i.name == selected:
|
101
|
+
return i
|
102
|
+
|
103
|
+
if isinstance(choices, list) and is_same_dtype(choices, str):
|
104
|
+
if selected in choices:
|
105
|
+
return selected
|
106
|
+
|
107
|
+
return selected
|
@@ -0,0 +1,35 @@
|
|
1
|
+
from lionagi.core.session.branch import Branch
|
2
|
+
from lionagi.core.session.session import Session
|
3
|
+
from lionagi.protocols.operatives.instruct import Instruct
|
4
|
+
|
5
|
+
|
6
|
+
def prepare_session(
|
7
|
+
session=None, branch=None, branch_kwargs=None
|
8
|
+
) -> tuple[Session, Branch]:
|
9
|
+
if session is not None:
|
10
|
+
if branch is not None:
|
11
|
+
branch: Branch = session.branches[branch]
|
12
|
+
else:
|
13
|
+
branch = session.new_branch(**(branch_kwargs or {}))
|
14
|
+
else:
|
15
|
+
session = Session()
|
16
|
+
if isinstance(branch, Branch):
|
17
|
+
session.branches.include(branch)
|
18
|
+
session.default_branch = branch
|
19
|
+
if branch is None:
|
20
|
+
branch = session.new_branch(**(branch_kwargs or {}))
|
21
|
+
|
22
|
+
return session, branch
|
23
|
+
|
24
|
+
|
25
|
+
def prepare_instruct(instruct: Instruct | dict, prompt: str):
|
26
|
+
if isinstance(instruct, Instruct):
|
27
|
+
instruct = instruct.to_dict()
|
28
|
+
if not isinstance(instruct, dict):
|
29
|
+
raise ValueError(
|
30
|
+
"instruct needs to be an InstructModel object or a dictionary of valid parameters"
|
31
|
+
)
|
32
|
+
|
33
|
+
guidance = instruct.get("guidance", "")
|
34
|
+
instruct["guidance"] = f"\n{prompt}\n{guidance}"
|
35
|
+
return instruct
|