lionagi 0.9.4__tar.gz → 0.9.6__tar.gz
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- {lionagi-0.9.4 → lionagi-0.9.6}/PKG-INFO +1 -1
- lionagi-0.9.6/lionagi/libs/schema/function_to_schema.py +164 -0
- {lionagi-0.9.4 → lionagi-0.9.6}/lionagi/operations/ReAct/ReAct.py +35 -1
- {lionagi-0.9.4 → lionagi-0.9.6}/lionagi/operations/ReAct/utils.py +5 -2
- {lionagi-0.9.4 → lionagi-0.9.6}/lionagi/operations/chat/chat.py +7 -5
- {lionagi-0.9.4 → lionagi-0.9.6}/lionagi/operations/communicate/communicate.py +2 -0
- {lionagi-0.9.4 → lionagi-0.9.6}/lionagi/operations/interpret/interpret.py +2 -1
- {lionagi-0.9.4 → lionagi-0.9.6}/lionagi/operations/operate/operate.py +2 -0
- {lionagi-0.9.4 → lionagi-0.9.6}/lionagi/service/endpoints/base.py +47 -5
- {lionagi-0.9.4 → lionagi-0.9.6}/lionagi/service/endpoints/match_endpoint.py +7 -0
- {lionagi-0.9.4 → lionagi-0.9.6}/lionagi/service/imodel.py +34 -15
- lionagi-0.9.6/lionagi/service/providers/ollama_/chat_completions.py +134 -0
- {lionagi-0.9.4 → lionagi-0.9.6}/lionagi/service/providers/openai_/chat_completions.py +2 -0
- {lionagi-0.9.4 → lionagi-0.9.6}/lionagi/session/branch.py +16 -0
- lionagi-0.9.6/lionagi/tools/query/__init__.py +3 -0
- lionagi-0.9.6/lionagi/version.py +1 -0
- {lionagi-0.9.4 → lionagi-0.9.6}/pyproject.toml +7 -1
- {lionagi-0.9.4 → lionagi-0.9.6}/uv.lock +25 -4
- lionagi-0.9.4/lionagi/libs/schema/function_to_schema.py +0 -104
- lionagi-0.9.4/lionagi/version.py +0 -1
- {lionagi-0.9.4 → lionagi-0.9.6}/.env.example +0 -0
- {lionagi-0.9.4 → lionagi-0.9.6}/.github/FUNDING.yml +0 -0
- {lionagi-0.9.4 → lionagi-0.9.6}/.github/dependabot.yml +0 -0
- {lionagi-0.9.4 → lionagi-0.9.6}/.github/workflows/ci.yml +0 -0
- {lionagi-0.9.4 → lionagi-0.9.6}/.github/workflows/codeql.yml +0 -0
- {lionagi-0.9.4 → lionagi-0.9.6}/.github/workflows/docs.yml +0 -0
- {lionagi-0.9.4 → lionagi-0.9.6}/.github/workflows/release.yml +0 -0
- {lionagi-0.9.4 → lionagi-0.9.6}/.gitignore +0 -0
- {lionagi-0.9.4 → lionagi-0.9.6}/.pre-commit-config.yaml +0 -0
- {lionagi-0.9.4 → lionagi-0.9.6}/CODE_OF_CONDUCT.md +0 -0
- {lionagi-0.9.4 → lionagi-0.9.6}/CONTRIBUTING.md +0 -0
- {lionagi-0.9.4 → lionagi-0.9.6}/LICENSE +0 -0
- {lionagi-0.9.4 → lionagi-0.9.6}/README.md +0 -0
- {lionagi-0.9.4 → lionagi-0.9.6}/cookbooks/ch01_get_started.md +0 -0
- {lionagi-0.9.4 → lionagi-0.9.6}/cookbooks/ch02_concepts.md +0 -0
- {lionagi-0.9.4 → lionagi-0.9.6}/dev_tools/count_code_base_lines.py +0 -0
- {lionagi-0.9.4 → lionagi-0.9.6}/docs/Makefile +0 -0
- {lionagi-0.9.4 → lionagi-0.9.6}/docs/_static/custom.css +0 -0
- {lionagi-0.9.4 → lionagi-0.9.6}/docs/_templates/layout.html +0 -0
- {lionagi-0.9.4 → lionagi-0.9.6}/docs/conf.py +0 -0
- {lionagi-0.9.4 → lionagi-0.9.6}/docs/index.rst +0 -0
- {lionagi-0.9.4 → lionagi-0.9.6}/docs/modules/action.rst +0 -0
- {lionagi-0.9.4 → lionagi-0.9.6}/docs/modules/adapter.rst +0 -0
- {lionagi-0.9.4 → lionagi-0.9.6}/docs/modules/branch.rst +0 -0
- {lionagi-0.9.4 → lionagi-0.9.6}/docs/modules/branch_operations.rst +0 -0
- {lionagi-0.9.4 → lionagi-0.9.6}/docs/modules/concepts.rst +0 -0
- {lionagi-0.9.4 → lionagi-0.9.6}/docs/modules/element_id.rst +0 -0
- {lionagi-0.9.4 → lionagi-0.9.6}/docs/modules/event.rst +0 -0
- {lionagi-0.9.4 → lionagi-0.9.6}/docs/modules/form.rst +0 -0
- {lionagi-0.9.4 → lionagi-0.9.6}/docs/modules/graph.rst +0 -0
- {lionagi-0.9.4 → lionagi-0.9.6}/docs/modules/index.rst +0 -0
- {lionagi-0.9.4 → lionagi-0.9.6}/docs/modules/instruct.rst +0 -0
- {lionagi-0.9.4 → lionagi-0.9.6}/docs/modules/lib_file.rst +0 -0
- {lionagi-0.9.4 → lionagi-0.9.6}/docs/modules/lib_nested.rst +0 -0
- {lionagi-0.9.4 → lionagi-0.9.6}/docs/modules/lib_package.rst +0 -0
- {lionagi-0.9.4 → lionagi-0.9.6}/docs/modules/lib_schema.rst +0 -0
- {lionagi-0.9.4 → lionagi-0.9.6}/docs/modules/lib_validate.rst +0 -0
- {lionagi-0.9.4 → lionagi-0.9.6}/docs/modules/log.rst +0 -0
- {lionagi-0.9.4 → lionagi-0.9.6}/docs/modules/mail.rst +0 -0
- {lionagi-0.9.4 → lionagi-0.9.6}/docs/modules/message.rst +0 -0
- {lionagi-0.9.4 → lionagi-0.9.6}/docs/modules/models.rst +0 -0
- {lionagi-0.9.4 → lionagi-0.9.6}/docs/modules/operative_step.rst +0 -0
- {lionagi-0.9.4 → lionagi-0.9.6}/docs/modules/pile.rst +0 -0
- {lionagi-0.9.4 → lionagi-0.9.6}/docs/modules/processor.rst +0 -0
- {lionagi-0.9.4 → lionagi-0.9.6}/docs/modules/progression.rst +0 -0
- {lionagi-0.9.4 → lionagi-0.9.6}/docs/modules/service.rst +0 -0
- {lionagi-0.9.4 → lionagi-0.9.6}/docs/modules/session.rst +0 -0
- {lionagi-0.9.4 → lionagi-0.9.6}/docs/modules/utils.rst +0 -0
- {lionagi-0.9.4 → lionagi-0.9.6}/docs/tutorials/get_started.rst +0 -0
- {lionagi-0.9.4 → lionagi-0.9.6}/docs/tutorials/get_started_pt2.rst +0 -0
- {lionagi-0.9.4 → lionagi-0.9.6}/docs/tutorials/get_started_pt3.rst +0 -0
- {lionagi-0.9.4 → lionagi-0.9.6}/docs/tutorials/index.rst +0 -0
- {lionagi-0.9.4 → lionagi-0.9.6}/lionagi/__init__.py +0 -0
- {lionagi-0.9.4 → lionagi-0.9.6}/lionagi/_class_registry.py +0 -0
- {lionagi-0.9.4 → lionagi-0.9.6}/lionagi/_errors.py +0 -0
- {lionagi-0.9.4 → lionagi-0.9.6}/lionagi/_types.py +0 -0
- {lionagi-0.9.4 → lionagi-0.9.6}/lionagi/libs/__init__.py +0 -0
- {lionagi-0.9.4 → lionagi-0.9.6}/lionagi/libs/file/__init__.py +0 -0
- {lionagi-0.9.4 → lionagi-0.9.6}/lionagi/libs/file/chunk.py +0 -0
- {lionagi-0.9.4 → lionagi-0.9.6}/lionagi/libs/file/file_ops.py +0 -0
- {lionagi-0.9.4 → lionagi-0.9.6}/lionagi/libs/file/params.py +0 -0
- {lionagi-0.9.4 → lionagi-0.9.6}/lionagi/libs/file/process.py +0 -0
- {lionagi-0.9.4 → lionagi-0.9.6}/lionagi/libs/file/save.py +0 -0
- {lionagi-0.9.4 → lionagi-0.9.6}/lionagi/libs/nested/__init__.py +0 -0
- {lionagi-0.9.4 → lionagi-0.9.6}/lionagi/libs/nested/flatten.py +0 -0
- {lionagi-0.9.4 → lionagi-0.9.6}/lionagi/libs/nested/nfilter.py +0 -0
- {lionagi-0.9.4 → lionagi-0.9.6}/lionagi/libs/nested/nget.py +0 -0
- {lionagi-0.9.4 → lionagi-0.9.6}/lionagi/libs/nested/ninsert.py +0 -0
- {lionagi-0.9.4 → lionagi-0.9.6}/lionagi/libs/nested/nmerge.py +0 -0
- {lionagi-0.9.4 → lionagi-0.9.6}/lionagi/libs/nested/npop.py +0 -0
- {lionagi-0.9.4 → lionagi-0.9.6}/lionagi/libs/nested/nset.py +0 -0
- {lionagi-0.9.4 → lionagi-0.9.6}/lionagi/libs/nested/unflatten.py +0 -0
- {lionagi-0.9.4 → lionagi-0.9.6}/lionagi/libs/nested/utils.py +0 -0
- {lionagi-0.9.4 → lionagi-0.9.6}/lionagi/libs/package/__init__.py +0 -0
- {lionagi-0.9.4 → lionagi-0.9.6}/lionagi/libs/package/imports.py +0 -0
- {lionagi-0.9.4 → lionagi-0.9.6}/lionagi/libs/package/management.py +0 -0
- {lionagi-0.9.4 → lionagi-0.9.6}/lionagi/libs/package/params.py +0 -0
- {lionagi-0.9.4 → lionagi-0.9.6}/lionagi/libs/package/system.py +0 -0
- {lionagi-0.9.4 → lionagi-0.9.6}/lionagi/libs/parse.py +0 -0
- {lionagi-0.9.4 → lionagi-0.9.6}/lionagi/libs/schema/__init__.py +0 -0
- {lionagi-0.9.4 → lionagi-0.9.6}/lionagi/libs/schema/as_readable.py +0 -0
- {lionagi-0.9.4 → lionagi-0.9.6}/lionagi/libs/schema/extract_code_block.py +0 -0
- {lionagi-0.9.4 → lionagi-0.9.6}/lionagi/libs/schema/extract_docstring.py +0 -0
- {lionagi-0.9.4 → lionagi-0.9.6}/lionagi/libs/schema/json_schema.py +0 -0
- {lionagi-0.9.4 → lionagi-0.9.6}/lionagi/libs/token_transform/__init__.py +0 -0
- {lionagi-0.9.4 → lionagi-0.9.6}/lionagi/libs/token_transform/llmlingua.py +0 -0
- {lionagi-0.9.4 → lionagi-0.9.6}/lionagi/libs/token_transform/perplexity.py +0 -0
- {lionagi-0.9.4 → lionagi-0.9.6}/lionagi/libs/token_transform/synthlang.py +0 -0
- {lionagi-0.9.4 → lionagi-0.9.6}/lionagi/libs/validate/__init__.py +0 -0
- {lionagi-0.9.4 → lionagi-0.9.6}/lionagi/libs/validate/common_field_validators.py +0 -0
- {lionagi-0.9.4 → lionagi-0.9.6}/lionagi/libs/validate/fuzzy_match_keys.py +0 -0
- {lionagi-0.9.4 → lionagi-0.9.6}/lionagi/libs/validate/fuzzy_validate_mapping.py +0 -0
- {lionagi-0.9.4 → lionagi-0.9.6}/lionagi/libs/validate/string_similarity.py +0 -0
- {lionagi-0.9.4 → lionagi-0.9.6}/lionagi/libs/validate/validate_boolean.py +0 -0
- {lionagi-0.9.4 → lionagi-0.9.6}/lionagi/operations/ReAct/__init__.py +0 -0
- {lionagi-0.9.4 → lionagi-0.9.6}/lionagi/operations/__init__.py +0 -0
- {lionagi-0.9.4 → lionagi-0.9.6}/lionagi/operations/_act/__init__.py +0 -0
- {lionagi-0.9.4 → lionagi-0.9.6}/lionagi/operations/_act/act.py +0 -0
- {lionagi-0.9.4 → lionagi-0.9.6}/lionagi/operations/brainstorm/__init__.py +0 -0
- {lionagi-0.9.4 → lionagi-0.9.6}/lionagi/operations/brainstorm/brainstorm.py +0 -0
- {lionagi-0.9.4 → lionagi-0.9.6}/lionagi/operations/brainstorm/prompt.py +0 -0
- {lionagi-0.9.4 → lionagi-0.9.6}/lionagi/operations/chat/__init__.py +0 -0
- {lionagi-0.9.4 → lionagi-0.9.6}/lionagi/operations/communicate/__init__.py +0 -0
- {lionagi-0.9.4 → lionagi-0.9.6}/lionagi/operations/instruct/__init__.py +0 -0
- {lionagi-0.9.4 → lionagi-0.9.6}/lionagi/operations/instruct/instruct.py +0 -0
- {lionagi-0.9.4 → lionagi-0.9.6}/lionagi/operations/interpret/__init__.py +0 -0
- {lionagi-0.9.4 → lionagi-0.9.6}/lionagi/operations/manager.py +0 -0
- {lionagi-0.9.4 → lionagi-0.9.6}/lionagi/operations/operate/__init__.py +0 -0
- {lionagi-0.9.4 → lionagi-0.9.6}/lionagi/operations/parse/__init__.py +0 -0
- {lionagi-0.9.4 → lionagi-0.9.6}/lionagi/operations/parse/parse.py +0 -0
- {lionagi-0.9.4 → lionagi-0.9.6}/lionagi/operations/plan/__init__.py +0 -0
- {lionagi-0.9.4 → lionagi-0.9.6}/lionagi/operations/plan/plan.py +0 -0
- {lionagi-0.9.4 → lionagi-0.9.6}/lionagi/operations/plan/prompt.py +0 -0
- {lionagi-0.9.4 → lionagi-0.9.6}/lionagi/operations/select/__init__.py +0 -0
- {lionagi-0.9.4 → lionagi-0.9.6}/lionagi/operations/select/select.py +0 -0
- {lionagi-0.9.4 → lionagi-0.9.6}/lionagi/operations/select/utils.py +0 -0
- {lionagi-0.9.4 → lionagi-0.9.6}/lionagi/operations/translate/__init__.py +0 -0
- {lionagi-0.9.4 → lionagi-0.9.6}/lionagi/operations/translate/translate.py +0 -0
- {lionagi-0.9.4 → lionagi-0.9.6}/lionagi/operations/types.py +0 -0
- {lionagi-0.9.4 → lionagi-0.9.6}/lionagi/operations/utils.py +0 -0
- {lionagi-0.9.4 → lionagi-0.9.6}/lionagi/operatives/__init__.py +0 -0
- {lionagi-0.9.4 → lionagi-0.9.6}/lionagi/operatives/action/__init__.py +0 -0
- {lionagi-0.9.4 → lionagi-0.9.6}/lionagi/operatives/action/function_calling.py +0 -0
- {lionagi-0.9.4 → lionagi-0.9.6}/lionagi/operatives/action/manager.py +0 -0
- {lionagi-0.9.4 → lionagi-0.9.6}/lionagi/operatives/action/request_response_model.py +0 -0
- {lionagi-0.9.4 → lionagi-0.9.6}/lionagi/operatives/action/tool.py +0 -0
- {lionagi-0.9.4 → lionagi-0.9.6}/lionagi/operatives/action/utils.py +0 -0
- {lionagi-0.9.4 → lionagi-0.9.6}/lionagi/operatives/forms/__init__.py +0 -0
- {lionagi-0.9.4 → lionagi-0.9.6}/lionagi/operatives/forms/base.py +0 -0
- {lionagi-0.9.4 → lionagi-0.9.6}/lionagi/operatives/forms/flow.py +0 -0
- {lionagi-0.9.4 → lionagi-0.9.6}/lionagi/operatives/forms/form.py +0 -0
- {lionagi-0.9.4 → lionagi-0.9.6}/lionagi/operatives/forms/report.py +0 -0
- {lionagi-0.9.4 → lionagi-0.9.6}/lionagi/operatives/instruct/__init__.py +0 -0
- {lionagi-0.9.4 → lionagi-0.9.6}/lionagi/operatives/instruct/base.py +0 -0
- {lionagi-0.9.4 → lionagi-0.9.6}/lionagi/operatives/instruct/instruct.py +0 -0
- {lionagi-0.9.4 → lionagi-0.9.6}/lionagi/operatives/instruct/instruct_collection.py +0 -0
- {lionagi-0.9.4 → lionagi-0.9.6}/lionagi/operatives/instruct/node.py +0 -0
- {lionagi-0.9.4 → lionagi-0.9.6}/lionagi/operatives/instruct/prompts.py +0 -0
- {lionagi-0.9.4 → lionagi-0.9.6}/lionagi/operatives/instruct/reason.py +0 -0
- {lionagi-0.9.4 → lionagi-0.9.6}/lionagi/operatives/manager.py +0 -0
- {lionagi-0.9.4 → lionagi-0.9.6}/lionagi/operatives/models/__init__.py +0 -0
- {lionagi-0.9.4 → lionagi-0.9.6}/lionagi/operatives/models/field_model.py +0 -0
- {lionagi-0.9.4 → lionagi-0.9.6}/lionagi/operatives/models/model_params.py +0 -0
- {lionagi-0.9.4 → lionagi-0.9.6}/lionagi/operatives/models/note.py +0 -0
- {lionagi-0.9.4 → lionagi-0.9.6}/lionagi/operatives/models/operable_model.py +0 -0
- {lionagi-0.9.4 → lionagi-0.9.6}/lionagi/operatives/models/schema_model.py +0 -0
- {lionagi-0.9.4 → lionagi-0.9.6}/lionagi/operatives/operative.py +0 -0
- {lionagi-0.9.4 → lionagi-0.9.6}/lionagi/operatives/step.py +0 -0
- {lionagi-0.9.4 → lionagi-0.9.6}/lionagi/operatives/strategies/__init__.py +0 -0
- {lionagi-0.9.4 → lionagi-0.9.6}/lionagi/operatives/strategies/base.py +0 -0
- {lionagi-0.9.4 → lionagi-0.9.6}/lionagi/operatives/strategies/concurrent.py +0 -0
- {lionagi-0.9.4 → lionagi-0.9.6}/lionagi/operatives/strategies/concurrent_chunk.py +0 -0
- {lionagi-0.9.4 → lionagi-0.9.6}/lionagi/operatives/strategies/concurrent_sequential_chunk.py +0 -0
- {lionagi-0.9.4 → lionagi-0.9.6}/lionagi/operatives/strategies/params.py +0 -0
- {lionagi-0.9.4 → lionagi-0.9.6}/lionagi/operatives/strategies/sequential.py +0 -0
- {lionagi-0.9.4 → lionagi-0.9.6}/lionagi/operatives/strategies/sequential_chunk.py +0 -0
- {lionagi-0.9.4 → lionagi-0.9.6}/lionagi/operatives/strategies/sequential_concurrent_chunk.py +0 -0
- {lionagi-0.9.4 → lionagi-0.9.6}/lionagi/operatives/strategies/utils.py +0 -0
- {lionagi-0.9.4 → lionagi-0.9.6}/lionagi/operatives/types.py +0 -0
- {lionagi-0.9.4 → lionagi-0.9.6}/lionagi/protocols/__init__.py +0 -0
- {lionagi-0.9.4 → lionagi-0.9.6}/lionagi/protocols/_concepts.py +0 -0
- {lionagi-0.9.4 → lionagi-0.9.6}/lionagi/protocols/adapters/__init__.py +0 -0
- {lionagi-0.9.4 → lionagi-0.9.6}/lionagi/protocols/adapters/adapter.py +0 -0
- {lionagi-0.9.4 → lionagi-0.9.6}/lionagi/protocols/adapters/json_adapter.py +0 -0
- {lionagi-0.9.4 → lionagi-0.9.6}/lionagi/protocols/adapters/pandas_/__init__.py +0 -0
- {lionagi-0.9.4 → lionagi-0.9.6}/lionagi/protocols/adapters/pandas_/csv_adapter.py +0 -0
- {lionagi-0.9.4 → lionagi-0.9.6}/lionagi/protocols/adapters/pandas_/excel_adapter.py +0 -0
- {lionagi-0.9.4 → lionagi-0.9.6}/lionagi/protocols/adapters/pandas_/pd_dataframe_adapter.py +0 -0
- {lionagi-0.9.4 → lionagi-0.9.6}/lionagi/protocols/adapters/pandas_/pd_series_adapter.py +0 -0
- {lionagi-0.9.4 → lionagi-0.9.6}/lionagi/protocols/adapters/types.py +0 -0
- {lionagi-0.9.4 → lionagi-0.9.6}/lionagi/protocols/generic/__init__.py +0 -0
- {lionagi-0.9.4 → lionagi-0.9.6}/lionagi/protocols/generic/element.py +0 -0
- {lionagi-0.9.4 → lionagi-0.9.6}/lionagi/protocols/generic/event.py +0 -0
- {lionagi-0.9.4 → lionagi-0.9.6}/lionagi/protocols/generic/log.py +0 -0
- {lionagi-0.9.4 → lionagi-0.9.6}/lionagi/protocols/generic/pile.py +0 -0
- {lionagi-0.9.4 → lionagi-0.9.6}/lionagi/protocols/generic/processor.py +0 -0
- {lionagi-0.9.4 → lionagi-0.9.6}/lionagi/protocols/generic/progression.py +0 -0
- {lionagi-0.9.4 → lionagi-0.9.6}/lionagi/protocols/graph/__init__.py +0 -0
- {lionagi-0.9.4 → lionagi-0.9.6}/lionagi/protocols/graph/edge.py +0 -0
- {lionagi-0.9.4 → lionagi-0.9.6}/lionagi/protocols/graph/graph.py +0 -0
- {lionagi-0.9.4 → lionagi-0.9.6}/lionagi/protocols/graph/node.py +0 -0
- {lionagi-0.9.4 → lionagi-0.9.6}/lionagi/protocols/mail/__init__.py +0 -0
- {lionagi-0.9.4 → lionagi-0.9.6}/lionagi/protocols/mail/exchange.py +0 -0
- {lionagi-0.9.4 → lionagi-0.9.6}/lionagi/protocols/mail/mail.py +0 -0
- {lionagi-0.9.4 → lionagi-0.9.6}/lionagi/protocols/mail/mailbox.py +0 -0
- {lionagi-0.9.4 → lionagi-0.9.6}/lionagi/protocols/mail/manager.py +0 -0
- {lionagi-0.9.4 → lionagi-0.9.6}/lionagi/protocols/mail/package.py +0 -0
- {lionagi-0.9.4 → lionagi-0.9.6}/lionagi/protocols/messages/__init__.py +0 -0
- {lionagi-0.9.4 → lionagi-0.9.6}/lionagi/protocols/messages/action_request.py +0 -0
- {lionagi-0.9.4 → lionagi-0.9.6}/lionagi/protocols/messages/action_response.py +0 -0
- {lionagi-0.9.4 → lionagi-0.9.6}/lionagi/protocols/messages/assistant_response.py +0 -0
- {lionagi-0.9.4 → lionagi-0.9.6}/lionagi/protocols/messages/base.py +0 -0
- {lionagi-0.9.4 → lionagi-0.9.6}/lionagi/protocols/messages/instruction.py +0 -0
- {lionagi-0.9.4 → lionagi-0.9.6}/lionagi/protocols/messages/manager.py +0 -0
- {lionagi-0.9.4 → lionagi-0.9.6}/lionagi/protocols/messages/message.py +0 -0
- {lionagi-0.9.4 → lionagi-0.9.6}/lionagi/protocols/messages/system.py +0 -0
- {lionagi-0.9.4 → lionagi-0.9.6}/lionagi/protocols/messages/templates/README.md +0 -0
- {lionagi-0.9.4 → lionagi-0.9.6}/lionagi/protocols/messages/templates/action_request.jinja2 +0 -0
- {lionagi-0.9.4 → lionagi-0.9.6}/lionagi/protocols/messages/templates/action_response.jinja2 +0 -0
- {lionagi-0.9.4 → lionagi-0.9.6}/lionagi/protocols/messages/templates/assistant_response.jinja2 +0 -0
- {lionagi-0.9.4 → lionagi-0.9.6}/lionagi/protocols/messages/templates/instruction_message.jinja2 +0 -0
- {lionagi-0.9.4 → lionagi-0.9.6}/lionagi/protocols/messages/templates/system_message.jinja2 +0 -0
- {lionagi-0.9.4 → lionagi-0.9.6}/lionagi/protocols/messages/templates/tool_schemas.jinja2 +0 -0
- {lionagi-0.9.4 → lionagi-0.9.6}/lionagi/protocols/types.py +0 -0
- {lionagi-0.9.4 → lionagi-0.9.6}/lionagi/service/__init__.py +0 -0
- {lionagi-0.9.4 → lionagi-0.9.6}/lionagi/service/endpoints/__init__.py +0 -0
- {lionagi-0.9.4 → lionagi-0.9.6}/lionagi/service/endpoints/chat_completion.py +0 -0
- {lionagi-0.9.4 → lionagi-0.9.6}/lionagi/service/endpoints/rate_limited_processor.py +0 -0
- {lionagi-0.9.4 → lionagi-0.9.6}/lionagi/service/endpoints/token_calculator.py +0 -0
- {lionagi-0.9.4 → lionagi-0.9.6}/lionagi/service/manager.py +0 -0
- {lionagi-0.9.4 → lionagi-0.9.6}/lionagi/service/providers/__init__.py +0 -0
- {lionagi-0.9.4 → lionagi-0.9.6}/lionagi/service/providers/anthropic_/__init__.py +0 -0
- {lionagi-0.9.4 → lionagi-0.9.6}/lionagi/service/providers/anthropic_/messages.py +0 -0
- {lionagi-0.9.4 → lionagi-0.9.6}/lionagi/service/providers/exa_/__init__.py +0 -0
- {lionagi-0.9.4 → lionagi-0.9.6}/lionagi/service/providers/exa_/models.py +0 -0
- {lionagi-0.9.4 → lionagi-0.9.6}/lionagi/service/providers/exa_/search.py +0 -0
- {lionagi-0.9.4 → lionagi-0.9.6}/lionagi/service/providers/exa_/types.py +0 -0
- {lionagi-0.9.4 → lionagi-0.9.6}/lionagi/service/providers/groq_/__init__.py +0 -0
- {lionagi-0.9.4 → lionagi-0.9.6}/lionagi/service/providers/groq_/chat_completions.py +0 -0
- {lionagi-0.9.4/lionagi/service/providers/openai_ → lionagi-0.9.6/lionagi/service/providers/ollama_}/__init__.py +0 -0
- {lionagi-0.9.4/lionagi/service/providers/openrouter_ → lionagi-0.9.6/lionagi/service/providers/openai_}/__init__.py +0 -0
- {lionagi-0.9.4/lionagi/service/providers/perplexity_ → lionagi-0.9.6/lionagi/service/providers/openrouter_}/__init__.py +0 -0
- {lionagi-0.9.4 → lionagi-0.9.6}/lionagi/service/providers/openrouter_/chat_completions.py +0 -0
- {lionagi-0.9.4/lionagi/session → lionagi-0.9.6/lionagi/service/providers/perplexity_}/__init__.py +0 -0
- {lionagi-0.9.4 → lionagi-0.9.6}/lionagi/service/providers/perplexity_/chat_completions.py +0 -0
- {lionagi-0.9.4 → lionagi-0.9.6}/lionagi/service/providers/perplexity_/models.py +0 -0
- {lionagi-0.9.4 → lionagi-0.9.6}/lionagi/service/providers/types.py +0 -0
- {lionagi-0.9.4 → lionagi-0.9.6}/lionagi/service/types.py +0 -0
- {lionagi-0.9.4/lionagi/tools → lionagi-0.9.6/lionagi/session}/__init__.py +0 -0
- {lionagi-0.9.4 → lionagi-0.9.6}/lionagi/session/prompts.py +0 -0
- {lionagi-0.9.4 → lionagi-0.9.6}/lionagi/session/session.py +0 -0
- {lionagi-0.9.4 → lionagi-0.9.6}/lionagi/settings.py +0 -0
- {lionagi-0.9.4/lionagi/tools/browser/providers → lionagi-0.9.6/lionagi/tools}/__init__.py +0 -0
- {lionagi-0.9.4 → lionagi-0.9.6}/lionagi/tools/base.py +0 -0
- {lionagi-0.9.4 → lionagi-0.9.6}/lionagi/tools/browser/__init__.py +0 -0
- {lionagi-0.9.4/lionagi/tools/code → lionagi-0.9.6/lionagi/tools/browser/providers}/__init__.py +0 -0
- {lionagi-0.9.4 → lionagi-0.9.6}/lionagi/tools/browser/providers/browser_use_.py +0 -0
- {lionagi-0.9.4/lionagi/tools/code/providers → lionagi-0.9.6/lionagi/tools/code}/__init__.py +0 -0
- {lionagi-0.9.4 → lionagi-0.9.6}/lionagi/tools/code/coder.py +0 -0
- {lionagi-0.9.4 → lionagi-0.9.6}/lionagi/tools/code/manager.py +0 -0
- {lionagi-0.9.4/lionagi/tools/file → lionagi-0.9.6/lionagi/tools/code/providers}/__init__.py +0 -0
- {lionagi-0.9.4 → lionagi-0.9.6}/lionagi/tools/code/providers/aider_.py +0 -0
- {lionagi-0.9.4 → lionagi-0.9.6}/lionagi/tools/code/providers/e2b_.py +0 -0
- {lionagi-0.9.4 → lionagi-0.9.6}/lionagi/tools/code/sandbox.py +0 -0
- {lionagi-0.9.4/lionagi/tools/file/providers → lionagi-0.9.6/lionagi/tools/file}/__init__.py +0 -0
- {lionagi-0.9.4 → lionagi-0.9.6}/lionagi/tools/file/manager.py +0 -0
- {lionagi-0.9.4/lionagi/tools/query → lionagi-0.9.6/lionagi/tools/file/providers}/__init__.py +0 -0
- {lionagi-0.9.4 → lionagi-0.9.6}/lionagi/tools/file/providers/docling_.py +0 -0
- {lionagi-0.9.4 → lionagi-0.9.6}/lionagi/tools/file/reader.py +0 -0
- {lionagi-0.9.4 → lionagi-0.9.6}/lionagi/tools/file/writer.py +0 -0
- {lionagi-0.9.4 → lionagi-0.9.6}/lionagi/tools/types.py +0 -0
- {lionagi-0.9.4 → lionagi-0.9.6}/lionagi/utils.py +0 -0
- {lionagi-0.9.4 → lionagi-0.9.6}/prompts/doc_style.md +0 -0
@@ -0,0 +1,164 @@
|
|
1
|
+
# Copyright (c) 2023 - 2025, HaiyangLi <quantocean.li at gmail dot com>
|
2
|
+
#
|
3
|
+
# SPDX-License-Identifier: Apache-2.0
|
4
|
+
|
5
|
+
import inspect
|
6
|
+
from typing import Any, Literal
|
7
|
+
|
8
|
+
from pydantic import Field, field_validator
|
9
|
+
|
10
|
+
from lionagi.libs.schema.extract_docstring import extract_docstring
|
11
|
+
from lionagi.libs.validate.common_field_validators import (
|
12
|
+
validate_model_to_type,
|
13
|
+
)
|
14
|
+
from lionagi.operatives.models.schema_model import SchemaModel
|
15
|
+
|
16
|
+
py_json_msp = {
|
17
|
+
"str": "string",
|
18
|
+
"int": "number",
|
19
|
+
"float": "number",
|
20
|
+
"list": "array",
|
21
|
+
"tuple": "array",
|
22
|
+
"bool": "boolean",
|
23
|
+
"dict": "object",
|
24
|
+
}
|
25
|
+
|
26
|
+
|
27
|
+
class FunctionSchema(SchemaModel):
|
28
|
+
name: str
|
29
|
+
description: str | None = Field(
|
30
|
+
None,
|
31
|
+
description=(
|
32
|
+
"A description of what the function does, used by the "
|
33
|
+
"model to choose when and how to call the function."
|
34
|
+
),
|
35
|
+
)
|
36
|
+
parameters: dict[str, Any] | None = Field(
|
37
|
+
None,
|
38
|
+
description=(
|
39
|
+
"The parameters the functions accepts, described as a JSON Schema object. "
|
40
|
+
"See the guide (https://platform.openai.com/docs/guides/function-calling) "
|
41
|
+
"for examples, and the JSON Schema reference for documentation about the "
|
42
|
+
"format. Omitting parameters defines a function with an empty parameter list."
|
43
|
+
),
|
44
|
+
validation_alias="request_options",
|
45
|
+
)
|
46
|
+
strict: bool | None = Field(
|
47
|
+
None,
|
48
|
+
description=(
|
49
|
+
"Whether to enable strict schema adherence when generating the function call. "
|
50
|
+
"If set to true, the model will follow the exact schema defined in the parameters "
|
51
|
+
"field. Only a subset of JSON Schema is supported when strict is true."
|
52
|
+
),
|
53
|
+
)
|
54
|
+
|
55
|
+
@field_validator("parameters", mode="before")
|
56
|
+
def _validate_parameters(cls, v):
|
57
|
+
if v is None:
|
58
|
+
return None
|
59
|
+
if isinstance(v, dict):
|
60
|
+
return v
|
61
|
+
try:
|
62
|
+
model_type = validate_model_to_type(cls, v)
|
63
|
+
return model_type.model_json_schema()
|
64
|
+
except Exception:
|
65
|
+
raise ValueError(f"Invalid model type: {v}")
|
66
|
+
|
67
|
+
def to_dict(self):
|
68
|
+
dict_ = super().to_dict()
|
69
|
+
return {"type": "function", "function": dict_}
|
70
|
+
|
71
|
+
|
72
|
+
def function_to_schema(
|
73
|
+
f_,
|
74
|
+
style: Literal["google", "rest"] = "google",
|
75
|
+
*,
|
76
|
+
request_options: dict[str, Any] | None = None,
|
77
|
+
strict: bool = None,
|
78
|
+
func_description: str = None,
|
79
|
+
parametert_description: dict[str, str] = None,
|
80
|
+
return_obj: bool = False,
|
81
|
+
) -> dict:
|
82
|
+
"""
|
83
|
+
Generate a schema description for a given function. in openai format
|
84
|
+
|
85
|
+
This function generates a schema description for the given function
|
86
|
+
using typing hints and docstrings. The schema includes the function's
|
87
|
+
name, description, and parameter details.
|
88
|
+
|
89
|
+
Args:
|
90
|
+
func (Callable): The function to generate a schema for.
|
91
|
+
style (str): The docstring format. Can be 'google' (default) or
|
92
|
+
'reST'.
|
93
|
+
func_description (str, optional): A custom description for the
|
94
|
+
function. If not provided, the description will be extracted
|
95
|
+
from the function's docstring.
|
96
|
+
params_description (dict, optional): A dictionary mapping
|
97
|
+
parameter names to their descriptions. If not provided, the
|
98
|
+
parameter descriptions will be extracted from the function's
|
99
|
+
docstring.
|
100
|
+
|
101
|
+
Returns:
|
102
|
+
dict: A schema describing the function, including its name,
|
103
|
+
description, and parameter details.
|
104
|
+
|
105
|
+
Example:
|
106
|
+
>>> def example_func(param1: int, param2: str) -> bool:
|
107
|
+
... '''Example function.
|
108
|
+
...
|
109
|
+
... Args:
|
110
|
+
... param1 (int): The first parameter.
|
111
|
+
... param2 (str): The second parameter.
|
112
|
+
... '''
|
113
|
+
... return True
|
114
|
+
>>> schema = function_to_schema(example_func)
|
115
|
+
>>> schema['function']['name']
|
116
|
+
'example_func'
|
117
|
+
"""
|
118
|
+
# Extract function name
|
119
|
+
func_name = f_.__name__
|
120
|
+
|
121
|
+
# Extract function description and parameter descriptions
|
122
|
+
if not func_description or not parametert_description:
|
123
|
+
func_desc, params_desc = extract_docstring(f_, style)
|
124
|
+
func_description = func_description or func_desc
|
125
|
+
parametert_description = parametert_description or params_desc
|
126
|
+
|
127
|
+
# Extract parameter details using typing hints
|
128
|
+
sig = inspect.signature(f_)
|
129
|
+
parameters: dict[str, Any] = {
|
130
|
+
"type": "object",
|
131
|
+
"properties": {},
|
132
|
+
"required": [],
|
133
|
+
}
|
134
|
+
|
135
|
+
if not request_options:
|
136
|
+
for name, param in sig.parameters.items():
|
137
|
+
# Default type to string and update if type hint is available
|
138
|
+
param_type = "string"
|
139
|
+
if param.annotation is not inspect.Parameter.empty:
|
140
|
+
param_type = py_json_msp[param.annotation.__name__]
|
141
|
+
|
142
|
+
# Extract parameter description from docstring, if available
|
143
|
+
param_description = parametert_description.get(name)
|
144
|
+
|
145
|
+
# Assuming all parameters are required for simplicity
|
146
|
+
parameters["required"].append(name)
|
147
|
+
parameters["properties"][name] = {
|
148
|
+
"type": param_type,
|
149
|
+
"description": param_description,
|
150
|
+
}
|
151
|
+
else:
|
152
|
+
parameters = request_options
|
153
|
+
|
154
|
+
params = {
|
155
|
+
"name": func_name,
|
156
|
+
"description": func_description,
|
157
|
+
"parameters": parameters,
|
158
|
+
}
|
159
|
+
if strict:
|
160
|
+
params["strict"] = strict
|
161
|
+
|
162
|
+
if return_obj:
|
163
|
+
return FunctionSchema(**params)
|
164
|
+
return FunctionSchema(**params).to_dict()
|
@@ -47,6 +47,8 @@ async def ReAct(
|
|
47
47
|
analysis_model: iModel | None = None,
|
48
48
|
verbose_analysis: bool = False,
|
49
49
|
verbose_length: int = None,
|
50
|
+
include_token_usage_to_model: bool = True,
|
51
|
+
continue_after_failed_response: bool = False,
|
50
52
|
**kwargs,
|
51
53
|
):
|
52
54
|
outs = []
|
@@ -73,6 +75,8 @@ async def ReAct(
|
|
73
75
|
verbose_analysis=verbose_analysis,
|
74
76
|
display_as=display_as,
|
75
77
|
verbose_length=verbose_length,
|
78
|
+
include_token_usage_to_model=include_token_usage_to_model,
|
79
|
+
continue_after_failed_response=continue_after_failed_response,
|
76
80
|
**kwargs,
|
77
81
|
):
|
78
82
|
analysis, str_ = i
|
@@ -101,6 +105,8 @@ async def ReAct(
|
|
101
105
|
analysis_model=analysis_model,
|
102
106
|
display_as=display_as,
|
103
107
|
verbose_length=verbose_length,
|
108
|
+
include_token_usage_to_model=include_token_usage_to_model,
|
109
|
+
continue_after_failed_response=continue_after_failed_response,
|
104
110
|
**kwargs,
|
105
111
|
):
|
106
112
|
outs.append(i)
|
@@ -131,6 +137,8 @@ async def ReActStream(
|
|
131
137
|
verbose_analysis: bool = False,
|
132
138
|
display_as: Literal["json", "yaml"] = "yaml",
|
133
139
|
verbose_length: int = None,
|
140
|
+
include_token_usage_to_model: bool = True,
|
141
|
+
continue_after_failed_response: bool = False,
|
134
142
|
**kwargs,
|
135
143
|
) -> AsyncGenerator:
|
136
144
|
irfm: FieldModel | None = None
|
@@ -213,6 +221,9 @@ async def ReActStream(
|
|
213
221
|
kwargs_for_operate = copy(kwargs)
|
214
222
|
kwargs_for_operate["actions"] = True
|
215
223
|
kwargs_for_operate["reason"] = True
|
224
|
+
kwargs_for_operate["include_token_usage_to_model"] = (
|
225
|
+
include_token_usage_to_model
|
226
|
+
)
|
216
227
|
|
217
228
|
# Step 1: Generate initial ReAct analysis
|
218
229
|
analysis: ReActAnalysis = await branch.operate(
|
@@ -255,7 +266,7 @@ async def ReActStream(
|
|
255
266
|
if isinstance(analysis, dict)
|
256
267
|
else False
|
257
268
|
)
|
258
|
-
and (extensions if max_extensions else 0) > 0
|
269
|
+
and (extensions - 1 if max_extensions else 0) > 0
|
259
270
|
):
|
260
271
|
new_instruction = None
|
261
272
|
if extensions == max_extensions:
|
@@ -272,6 +283,9 @@ async def ReActStream(
|
|
272
283
|
operate_kwargs["reason"] = True
|
273
284
|
operate_kwargs["response_format"] = ReActAnalysis
|
274
285
|
operate_kwargs["action_strategy"] = analysis.action_strategy
|
286
|
+
operate_kwargs["include_token_usage_to_model"] = (
|
287
|
+
include_token_usage_to_model
|
288
|
+
)
|
275
289
|
if analysis.action_batch_size:
|
276
290
|
operate_kwargs["action_batch_size"] = analysis.action_batch_size
|
277
291
|
if irfm:
|
@@ -289,6 +303,7 @@ async def ReActStream(
|
|
289
303
|
operate_kwargs["guidance"] = guide + operate_kwargs.get(
|
290
304
|
"guidance", ""
|
291
305
|
)
|
306
|
+
operate_kwargs["reasoning_effort"] = reasoning_effort
|
292
307
|
|
293
308
|
analysis = await branch.operate(
|
294
309
|
instruction=new_instruction,
|
@@ -298,6 +313,16 @@ async def ReActStream(
|
|
298
313
|
)
|
299
314
|
round_count += 1
|
300
315
|
|
316
|
+
if isinstance(analysis, dict) and all(
|
317
|
+
i is None for i in analysis.values()
|
318
|
+
):
|
319
|
+
if not continue_after_failed_response:
|
320
|
+
raise ValueError(
|
321
|
+
"All values in the response are None. "
|
322
|
+
"This might be due to a failed response. "
|
323
|
+
"Set `continue_after_failed_response=True` to ignore this error."
|
324
|
+
)
|
325
|
+
|
301
326
|
# If verbose, show round analysis
|
302
327
|
if verbose_analysis:
|
303
328
|
str_ = f"\n### ReAct Round No.{round_count} Analysis:\n"
|
@@ -329,6 +354,15 @@ async def ReActStream(
|
|
329
354
|
response_format=response_format,
|
330
355
|
**(response_kwargs or {}),
|
331
356
|
)
|
357
|
+
if isinstance(analysis, dict) and all(
|
358
|
+
i is None for i in analysis.values()
|
359
|
+
):
|
360
|
+
if not continue_after_failed_response:
|
361
|
+
raise ValueError(
|
362
|
+
"All values in the response are None. "
|
363
|
+
"This might be due to a failed response. "
|
364
|
+
"Set `continue_after_failed_response=True` to ignore this error."
|
365
|
+
)
|
332
366
|
except Exception:
|
333
367
|
out = branch.msgs.last_response.response
|
334
368
|
|
@@ -30,6 +30,8 @@ class ReActAnalysis(BaseModel):
|
|
30
30
|
2) A list of planned actions to perform before finalizing,
|
31
31
|
3) Indication whether more expansions/rounds are needed,
|
32
32
|
4) Additional tuning knobs: how to handle validation, how to execute actions, etc.
|
33
|
+
Remember do not repeat yourself, and aim to use the most efficient way to achieve
|
34
|
+
the goal to user's satisfaction.
|
33
35
|
"""
|
34
36
|
|
35
37
|
# Standard ReAct strings for controlling expansions:
|
@@ -38,11 +40,12 @@ class ReActAnalysis(BaseModel):
|
|
38
40
|
"If you are not ready to finalize, set extension_needed to True. "
|
39
41
|
"hint: you should set extension_needed to True if the overall goal"
|
40
42
|
"is not yet achieved. Do not set it to False, if you are just providing"
|
41
|
-
"an interim answer. You have up to {extensions} expansions. Please
|
43
|
+
"an interim answer. You have up to {extensions} expansions. Please "
|
44
|
+
"strategize accordingly and continue."
|
42
45
|
)
|
43
46
|
CONTINUE_EXT_PROMPT: ClassVar[str] = (
|
44
47
|
"Another round is available. You may do multiple actions if needed. "
|
45
|
-
"You have up to {extensions} expansions. Please continue."
|
48
|
+
"You have up to {extensions} expansions. Please strategize accordingly and continue."
|
46
49
|
)
|
47
50
|
ANSWER_PROMPT: ClassVar[str] = (
|
48
51
|
"Given your reasoning and actions, please now provide the final answer "
|
@@ -36,6 +36,7 @@ async def chat(
|
|
36
36
|
image_detail: Literal["low", "high", "auto"] = None,
|
37
37
|
plain_content: str = None,
|
38
38
|
return_ins_res_message: bool = False,
|
39
|
+
include_token_usage_to_model: bool = False,
|
39
40
|
**kwargs,
|
40
41
|
) -> tuple[Instruction, AssistantResponse]:
|
41
42
|
ins: Instruction = branch.msgs.create_instruction(
|
@@ -151,11 +152,12 @@ async def chat(
|
|
151
152
|
kwargs["messages"] = [i.chat_msg for i in messages]
|
152
153
|
imodel = imodel or branch.chat_model
|
153
154
|
|
154
|
-
meth =
|
155
|
-
|
156
|
-
|
157
|
-
|
158
|
-
|
155
|
+
meth = imodel.invoke
|
156
|
+
if "stream" not in kwargs or not kwargs["stream"]:
|
157
|
+
kwargs["include_token_usage_to_model"] = include_token_usage_to_model
|
158
|
+
else:
|
159
|
+
meth = imodel.stream
|
160
|
+
|
159
161
|
api_call = await meth(**kwargs)
|
160
162
|
branch._log_manager.log(Log.create(api_call))
|
161
163
|
|
@@ -35,6 +35,7 @@ async def communicate(
|
|
35
35
|
fuzzy_match_kwargs=None,
|
36
36
|
clear_messages=False,
|
37
37
|
operative_model=None,
|
38
|
+
include_token_usage_to_model: bool = False,
|
38
39
|
**kwargs,
|
39
40
|
):
|
40
41
|
if operative_model:
|
@@ -80,6 +81,7 @@ async def communicate(
|
|
80
81
|
image_detail=image_detail,
|
81
82
|
plain_content=plain_content,
|
82
83
|
return_ins_res_message=True,
|
84
|
+
include_token_usage_to_model=include_token_usage_to_model,
|
83
85
|
**kwargs,
|
84
86
|
)
|
85
87
|
branch.msgs.add_message(instruction=ins)
|
@@ -20,7 +20,8 @@ async def interpret(
|
|
20
20
|
instruction = (
|
21
21
|
"You are given a user's raw instruction or question. Your task is to rewrite it into a clearer,"
|
22
22
|
"more structured prompt for an LLM or system, making any implicit or missing details explicit. "
|
23
|
-
"Return only the re-written prompt."
|
23
|
+
"Return only the re-written prompt. Do not assume any details not mentioned in the input, nor "
|
24
|
+
"give additional instruction than what is explicitly stated."
|
24
25
|
)
|
25
26
|
guidance = (
|
26
27
|
f"Domain hint: {domain or 'general'}. "
|
@@ -63,6 +63,7 @@ async def operate(
|
|
63
63
|
] = "return_value",
|
64
64
|
operative_model: type[BaseModel] = None,
|
65
65
|
request_model: type[BaseModel] = None,
|
66
|
+
include_token_usage_to_model: bool = False,
|
66
67
|
**kwargs,
|
67
68
|
) -> list | BaseModel | None | dict | str:
|
68
69
|
if operative_model:
|
@@ -138,6 +139,7 @@ async def operate(
|
|
138
139
|
image_detail=image_detail,
|
139
140
|
tool_schemas=tool_schemas,
|
140
141
|
return_ins_res_message=True,
|
142
|
+
include_token_usage_to_model=include_token_usage_to_model,
|
141
143
|
**kwargs,
|
142
144
|
)
|
143
145
|
branch.msgs.add_message(instruction=ins)
|
@@ -80,6 +80,7 @@ class EndpointConfig(BaseModel):
|
|
80
80
|
api_version: str | None = None
|
81
81
|
allowed_roles: list[str] | None = None
|
82
82
|
request_options: type | None = Field(None, exclude=True)
|
83
|
+
invoke_with_endpoint: bool | None = None
|
83
84
|
|
84
85
|
|
85
86
|
class EndPoint(ABC):
|
@@ -91,19 +92,28 @@ class EndPoint(ABC):
|
|
91
92
|
HTTP requests.
|
92
93
|
"""
|
93
94
|
|
94
|
-
def __init__(
|
95
|
+
def __init__(
|
96
|
+
self, config: dict | EndpointConfig | type[EndpointConfig], **kwargs
|
97
|
+
) -> None:
|
95
98
|
"""Initializes the EndPoint with a given configuration.
|
96
99
|
|
97
100
|
Args:
|
98
|
-
config (dict): Configuration data that matches the EndpointConfig
|
101
|
+
config (dict | EndpointConfig): Configuration data that matches the EndpointConfig
|
99
102
|
schema.
|
100
103
|
"""
|
101
|
-
|
104
|
+
if isinstance(config, dict):
|
105
|
+
self.config = EndpointConfig(**config)
|
106
|
+
if isinstance(config, EndpointConfig):
|
107
|
+
self.config = config
|
108
|
+
if isinstance(config, type) and issubclass(config, EndpointConfig):
|
109
|
+
self.config = config()
|
110
|
+
if kwargs:
|
111
|
+
self.update_config(**kwargs)
|
102
112
|
|
103
113
|
def update_config(self, **kwargs):
|
104
114
|
config = self.config.model_dump()
|
105
115
|
config.update(kwargs)
|
106
|
-
self.config =
|
116
|
+
self.config = self.config.model_validate(config)
|
107
117
|
|
108
118
|
@property
|
109
119
|
def name(self) -> str | None:
|
@@ -349,11 +359,38 @@ class APICalling(Event):
|
|
349
359
|
endpoint: EndPoint = Field(exclude=True)
|
350
360
|
is_cached: bool = Field(default=False, exclude=True)
|
351
361
|
should_invoke_endpoint: bool = Field(default=True, exclude=True)
|
362
|
+
include_token_usage_to_model: bool = Field(
|
363
|
+
default=False,
|
364
|
+
exclude=True,
|
365
|
+
description="Whether to include token usage information into instruction messages",
|
366
|
+
)
|
367
|
+
response_obj: BaseModel | None = Field(None, exclude=True)
|
352
368
|
|
353
369
|
@model_validator(mode="after")
|
354
370
|
def _validate_streaming(self) -> Self:
|
355
371
|
if self.payload.get("stream") is True:
|
356
372
|
self.streaming = True
|
373
|
+
|
374
|
+
if self.include_token_usage_to_model:
|
375
|
+
if isinstance(self.payload["messages"][-1], dict):
|
376
|
+
required_tokens = self.required_tokens
|
377
|
+
self.payload["messages"][-1][
|
378
|
+
"content"
|
379
|
+
] += f"\n\nEstimated Current Token Usage: {required_tokens}"
|
380
|
+
if "model" in self.payload:
|
381
|
+
if (
|
382
|
+
self.payload["model"].startswith("gpt-4")
|
383
|
+
or "o1mini" in self.payload["model"]
|
384
|
+
or "o1-preview" in self.payload["model"]
|
385
|
+
):
|
386
|
+
self.payload["messages"][-1]["content"] += "/128_000"
|
387
|
+
elif "o1" in self.payload["model"]:
|
388
|
+
self.payload["messages"][-1]["content"] += "/200_000"
|
389
|
+
elif "sonnet" in self.payload["model"]:
|
390
|
+
self.payload["messages"][-1]["content"] += "/200_000"
|
391
|
+
elif "haiku" in self.payload["model"]:
|
392
|
+
self.payload["messages"][-1]["content"] += "/200_000"
|
393
|
+
|
357
394
|
return self
|
358
395
|
|
359
396
|
@property
|
@@ -622,7 +659,12 @@ class APICalling(Event):
|
|
622
659
|
f"API call to {self.endpoint.full_url} failed: {e1}"
|
623
660
|
)
|
624
661
|
else:
|
625
|
-
self.
|
662
|
+
self.response_obj = response
|
663
|
+
self.execution.response = (
|
664
|
+
response.model_dump()
|
665
|
+
if isinstance(response, BaseModel)
|
666
|
+
else response
|
667
|
+
)
|
626
668
|
self.execution.status = EventStatus.COMPLETED
|
627
669
|
|
628
670
|
def __str__(self) -> str:
|
@@ -48,6 +48,13 @@ def match_endpoint(
|
|
48
48
|
|
49
49
|
return OpenRouterChatCompletionEndPoint()
|
50
50
|
|
51
|
+
if provider == "ollama":
|
52
|
+
from ..providers.ollama_.chat_completions import (
|
53
|
+
OllamaChatCompletionEndPoint,
|
54
|
+
)
|
55
|
+
|
56
|
+
return OllamaChatCompletionEndPoint()
|
57
|
+
|
51
58
|
return OpenAIChatCompletionEndPoint(
|
52
59
|
config={
|
53
60
|
"provider": provider,
|
@@ -51,7 +51,7 @@ class iModel:
|
|
51
51
|
interval: float | None = None,
|
52
52
|
limit_requests: int = None,
|
53
53
|
limit_tokens: int = None,
|
54
|
-
invoke_with_endpoint: bool =
|
54
|
+
invoke_with_endpoint: bool = None,
|
55
55
|
concurrency_limit: int | None = None,
|
56
56
|
streaming_process_func: Callable = None,
|
57
57
|
requires_api_key: bool = True,
|
@@ -95,6 +95,16 @@ class iModel:
|
|
95
95
|
Additional keyword arguments, such as `model`, or any other
|
96
96
|
provider-specific fields.
|
97
97
|
"""
|
98
|
+
model = kwargs.get("model", None)
|
99
|
+
if model:
|
100
|
+
if not provider:
|
101
|
+
if "/" in model:
|
102
|
+
provider = model.split("/")[0]
|
103
|
+
model = model.replace(provider + "/", "")
|
104
|
+
kwargs["model"] = model
|
105
|
+
else:
|
106
|
+
raise ValueError("Provider must be provided")
|
107
|
+
|
98
108
|
if api_key is None:
|
99
109
|
provider = str(provider or "").strip().lower()
|
100
110
|
match provider:
|
@@ -110,6 +120,8 @@ class iModel:
|
|
110
120
|
api_key = "GROQ_API_KEY"
|
111
121
|
case "exa":
|
112
122
|
api_key = "EXA_API_KEY"
|
123
|
+
case "ollama":
|
124
|
+
api_key = "ollama"
|
113
125
|
case "":
|
114
126
|
if requires_api_key:
|
115
127
|
raise ValueError("API key must be provided")
|
@@ -121,16 +133,6 @@ class iModel:
|
|
121
133
|
api_key = os.getenv(api_key)
|
122
134
|
|
123
135
|
kwargs["api_key"] = api_key
|
124
|
-
model = kwargs.get("model", None)
|
125
|
-
if model:
|
126
|
-
if not provider:
|
127
|
-
if "/" in model:
|
128
|
-
provider = model.split("/")[0]
|
129
|
-
model = model.replace(provider + "/", "")
|
130
|
-
kwargs["model"] = model
|
131
|
-
else:
|
132
|
-
raise ValueError("Provider must be provided")
|
133
|
-
|
134
136
|
if isinstance(endpoint, EndPoint):
|
135
137
|
self.endpoint = endpoint
|
136
138
|
else:
|
@@ -145,7 +147,13 @@ class iModel:
|
|
145
147
|
if base_url:
|
146
148
|
self.endpoint.config.base_url = base_url
|
147
149
|
|
148
|
-
|
150
|
+
if (
|
151
|
+
invoke_with_endpoint is None
|
152
|
+
and self.endpoint.config.invoke_with_endpoint is True
|
153
|
+
):
|
154
|
+
invoke_with_endpoint = True
|
155
|
+
|
156
|
+
self.should_invoke_endpoint = invoke_with_endpoint or False
|
149
157
|
self.kwargs = kwargs
|
150
158
|
self.executor = RateLimitedAPIExecutor(
|
151
159
|
queue_capacity=queue_capacity,
|
@@ -162,7 +170,9 @@ class iModel:
|
|
162
170
|
else:
|
163
171
|
self.streaming_process_func = streaming_process_func
|
164
172
|
|
165
|
-
def create_api_calling(
|
173
|
+
def create_api_calling(
|
174
|
+
self, include_token_usage_to_model: bool = False, **kwargs
|
175
|
+
) -> APICalling:
|
166
176
|
"""Constructs an `APICalling` object from endpoint-specific payload.
|
167
177
|
|
168
178
|
Args:
|
@@ -183,6 +193,7 @@ class iModel:
|
|
183
193
|
endpoint=self.endpoint,
|
184
194
|
is_cached=payload.get("is_cached", False),
|
185
195
|
should_invoke_endpoint=self.should_invoke_endpoint,
|
196
|
+
include_token_usage_to_model=include_token_usage_to_model,
|
186
197
|
)
|
187
198
|
|
188
199
|
async def process_chunk(self, chunk) -> None:
|
@@ -200,7 +211,12 @@ class iModel:
|
|
200
211
|
return await self.streaming_process_func(chunk)
|
201
212
|
return self.streaming_process_func(chunk)
|
202
213
|
|
203
|
-
async def stream(
|
214
|
+
async def stream(
|
215
|
+
self,
|
216
|
+
api_call=None,
|
217
|
+
include_token_usage_to_model: bool = False,
|
218
|
+
**kwargs,
|
219
|
+
) -> AsyncGenerator:
|
204
220
|
"""Performs a streaming API call with the given arguments.
|
205
221
|
|
206
222
|
Args:
|
@@ -214,7 +230,10 @@ class iModel:
|
|
214
230
|
"""
|
215
231
|
if api_call is None:
|
216
232
|
kwargs["stream"] = True
|
217
|
-
api_call = self.create_api_calling(
|
233
|
+
api_call = self.create_api_calling(
|
234
|
+
include_token_usage_to_model=include_token_usage_to_model,
|
235
|
+
**kwargs,
|
236
|
+
)
|
218
237
|
await self.executor.append(api_call)
|
219
238
|
|
220
239
|
if (
|