lionagi 0.7.3__tar.gz → 0.7.4__tar.gz
Sign up to get free protection for your applications and to get access to all the features.
- {lionagi-0.7.3 → lionagi-0.7.4}/PKG-INFO +2 -2
- lionagi-0.7.3/cookbooks/ch1_get_started.md → lionagi-0.7.4/cookbooks/ch01_get_started.md +116 -96
- lionagi-0.7.4/cookbooks/ch02_concepts.md +223 -0
- lionagi-0.7.4/lionagi/__init__.py +23 -0
- lionagi-0.7.4/lionagi/_types.py +2 -0
- {lionagi-0.7.3 → lionagi-0.7.4}/lionagi/session/branch.py +18 -0
- lionagi-0.7.4/lionagi/version.py +1 -0
- {lionagi-0.7.3 → lionagi-0.7.4}/pyproject.toml +4 -2
- {lionagi-0.7.3 → lionagi-0.7.4}/uv.lock +329 -25
- lionagi-0.7.3/.python-version +0 -1
- lionagi-0.7.3/cookbooks/ch2_concepts.md +0 -470
- lionagi-0.7.3/lionagi/__init__.py +0 -23
- lionagi-0.7.3/lionagi/version.py +0 -1
- {lionagi-0.7.3 → lionagi-0.7.4}/.env.example +0 -0
- {lionagi-0.7.3 → lionagi-0.7.4}/.github/FUNDING.yml +0 -0
- {lionagi-0.7.3 → lionagi-0.7.4}/.github/dependabot.yml +0 -0
- {lionagi-0.7.3 → lionagi-0.7.4}/.github/workflows/ci.yml +0 -0
- {lionagi-0.7.3 → lionagi-0.7.4}/.github/workflows/codeql.yml +0 -0
- {lionagi-0.7.3 → lionagi-0.7.4}/.github/workflows/docs.yml +0 -0
- {lionagi-0.7.3 → lionagi-0.7.4}/.github/workflows/release.yml +0 -0
- {lionagi-0.7.3 → lionagi-0.7.4}/.gitignore +0 -0
- {lionagi-0.7.3 → lionagi-0.7.4}/.pre-commit-config.yaml +0 -0
- {lionagi-0.7.3 → lionagi-0.7.4}/CODE_OF_CONDUCT.md +0 -0
- {lionagi-0.7.3 → lionagi-0.7.4}/CONTRIBUTING.md +0 -0
- {lionagi-0.7.3 → lionagi-0.7.4}/LICENSE +0 -0
- {lionagi-0.7.3 → lionagi-0.7.4}/README.md +0 -0
- {lionagi-0.7.3 → lionagi-0.7.4}/cookbooks/ch10_e2e_project.md +0 -0
- {lionagi-0.7.3 → lionagi-0.7.4}/cookbooks/ch11_performance.md +0 -0
- {lionagi-0.7.3 → lionagi-0.7.4}/cookbooks/ch12_graph.md +0 -0
- {lionagi-0.7.3 → lionagi-0.7.4}/cookbooks/ch3_internal_tools.md +0 -0
- {lionagi-0.7.3 → lionagi-0.7.4}/cookbooks/ch4_structured_forms.md +0 -0
- {lionagi-0.7.3 → lionagi-0.7.4}/cookbooks/ch5_react.md +0 -0
- {lionagi-0.7.3 → lionagi-0.7.4}/cookbooks/ch6_multi_branch.md +0 -0
- {lionagi-0.7.3 → lionagi-0.7.4}/cookbooks/ch7_multi_agent.md +0 -0
- {lionagi-0.7.3 → lionagi-0.7.4}/cookbooks/ch8_rate_limiting.md +0 -0
- {lionagi-0.7.3 → lionagi-0.7.4}/cookbooks/ch9_data_adapter.md +0 -0
- {lionagi-0.7.3 → lionagi-0.7.4}/dev_tools/count_code_base_lines.py +0 -0
- {lionagi-0.7.3 → lionagi-0.7.4}/docs/Makefile +0 -0
- {lionagi-0.7.3 → lionagi-0.7.4}/docs/_static/custom.css +0 -0
- {lionagi-0.7.3 → lionagi-0.7.4}/docs/_templates/layout.html +0 -0
- {lionagi-0.7.3 → lionagi-0.7.4}/docs/conf.py +0 -0
- {lionagi-0.7.3 → lionagi-0.7.4}/docs/index.rst +0 -0
- {lionagi-0.7.3 → lionagi-0.7.4}/docs/modules/action.rst +0 -0
- {lionagi-0.7.3 → lionagi-0.7.4}/docs/modules/adapter.rst +0 -0
- {lionagi-0.7.3 → lionagi-0.7.4}/docs/modules/branch.rst +0 -0
- {lionagi-0.7.3 → lionagi-0.7.4}/docs/modules/branch_operations.rst +0 -0
- {lionagi-0.7.3 → lionagi-0.7.4}/docs/modules/concepts.rst +0 -0
- {lionagi-0.7.3 → lionagi-0.7.4}/docs/modules/element_id.rst +0 -0
- {lionagi-0.7.3 → lionagi-0.7.4}/docs/modules/event.rst +0 -0
- {lionagi-0.7.3 → lionagi-0.7.4}/docs/modules/form.rst +0 -0
- {lionagi-0.7.3 → lionagi-0.7.4}/docs/modules/graph.rst +0 -0
- {lionagi-0.7.3 → lionagi-0.7.4}/docs/modules/index.rst +0 -0
- {lionagi-0.7.3 → lionagi-0.7.4}/docs/modules/instruct.rst +0 -0
- {lionagi-0.7.3 → lionagi-0.7.4}/docs/modules/lib_file.rst +0 -0
- {lionagi-0.7.3 → lionagi-0.7.4}/docs/modules/lib_nested.rst +0 -0
- {lionagi-0.7.3 → lionagi-0.7.4}/docs/modules/lib_package.rst +0 -0
- {lionagi-0.7.3 → lionagi-0.7.4}/docs/modules/lib_schema.rst +0 -0
- {lionagi-0.7.3 → lionagi-0.7.4}/docs/modules/lib_validate.rst +0 -0
- {lionagi-0.7.3 → lionagi-0.7.4}/docs/modules/log.rst +0 -0
- {lionagi-0.7.3 → lionagi-0.7.4}/docs/modules/mail.rst +0 -0
- {lionagi-0.7.3 → lionagi-0.7.4}/docs/modules/message.rst +0 -0
- {lionagi-0.7.3 → lionagi-0.7.4}/docs/modules/models.rst +0 -0
- {lionagi-0.7.3 → lionagi-0.7.4}/docs/modules/operative_step.rst +0 -0
- {lionagi-0.7.3 → lionagi-0.7.4}/docs/modules/pile.rst +0 -0
- {lionagi-0.7.3 → lionagi-0.7.4}/docs/modules/processor.rst +0 -0
- {lionagi-0.7.3 → lionagi-0.7.4}/docs/modules/progression.rst +0 -0
- {lionagi-0.7.3 → lionagi-0.7.4}/docs/modules/service.rst +0 -0
- {lionagi-0.7.3 → lionagi-0.7.4}/docs/modules/session.rst +0 -0
- {lionagi-0.7.3 → lionagi-0.7.4}/docs/modules/utils.rst +0 -0
- {lionagi-0.7.3 → lionagi-0.7.4}/docs/tutorials/get_started.rst +0 -0
- {lionagi-0.7.3 → lionagi-0.7.4}/docs/tutorials/get_started_pt2.rst +0 -0
- {lionagi-0.7.3 → lionagi-0.7.4}/docs/tutorials/get_started_pt3.rst +0 -0
- {lionagi-0.7.3 → lionagi-0.7.4}/docs/tutorials/index.rst +0 -0
- {lionagi-0.7.3 → lionagi-0.7.4}/lionagi/_class_registry.py +0 -0
- {lionagi-0.7.3 → lionagi-0.7.4}/lionagi/_errors.py +0 -0
- {lionagi-0.7.3 → lionagi-0.7.4}/lionagi/libs/__init__.py +0 -0
- {lionagi-0.7.3 → lionagi-0.7.4}/lionagi/libs/file/__init__.py +0 -0
- {lionagi-0.7.3 → lionagi-0.7.4}/lionagi/libs/file/chunk.py +0 -0
- {lionagi-0.7.3 → lionagi-0.7.4}/lionagi/libs/file/file_ops.py +0 -0
- {lionagi-0.7.3 → lionagi-0.7.4}/lionagi/libs/file/params.py +0 -0
- {lionagi-0.7.3 → lionagi-0.7.4}/lionagi/libs/file/process.py +0 -0
- {lionagi-0.7.3 → lionagi-0.7.4}/lionagi/libs/file/save.py +0 -0
- {lionagi-0.7.3 → lionagi-0.7.4}/lionagi/libs/nested/__init__.py +0 -0
- {lionagi-0.7.3 → lionagi-0.7.4}/lionagi/libs/nested/flatten.py +0 -0
- {lionagi-0.7.3 → lionagi-0.7.4}/lionagi/libs/nested/nfilter.py +0 -0
- {lionagi-0.7.3 → lionagi-0.7.4}/lionagi/libs/nested/nget.py +0 -0
- {lionagi-0.7.3 → lionagi-0.7.4}/lionagi/libs/nested/ninsert.py +0 -0
- {lionagi-0.7.3 → lionagi-0.7.4}/lionagi/libs/nested/nmerge.py +0 -0
- {lionagi-0.7.3 → lionagi-0.7.4}/lionagi/libs/nested/npop.py +0 -0
- {lionagi-0.7.3 → lionagi-0.7.4}/lionagi/libs/nested/nset.py +0 -0
- {lionagi-0.7.3 → lionagi-0.7.4}/lionagi/libs/nested/unflatten.py +0 -0
- {lionagi-0.7.3 → lionagi-0.7.4}/lionagi/libs/nested/utils.py +0 -0
- {lionagi-0.7.3 → lionagi-0.7.4}/lionagi/libs/package/__init__.py +0 -0
- {lionagi-0.7.3 → lionagi-0.7.4}/lionagi/libs/package/imports.py +0 -0
- {lionagi-0.7.3 → lionagi-0.7.4}/lionagi/libs/package/management.py +0 -0
- {lionagi-0.7.3 → lionagi-0.7.4}/lionagi/libs/package/params.py +0 -0
- {lionagi-0.7.3 → lionagi-0.7.4}/lionagi/libs/package/system.py +0 -0
- {lionagi-0.7.3 → lionagi-0.7.4}/lionagi/libs/parse.py +0 -0
- {lionagi-0.7.3 → lionagi-0.7.4}/lionagi/libs/schema/__init__.py +0 -0
- {lionagi-0.7.3 → lionagi-0.7.4}/lionagi/libs/schema/as_readable.py +0 -0
- {lionagi-0.7.3 → lionagi-0.7.4}/lionagi/libs/schema/extract_code_block.py +0 -0
- {lionagi-0.7.3 → lionagi-0.7.4}/lionagi/libs/schema/extract_docstring.py +0 -0
- {lionagi-0.7.3 → lionagi-0.7.4}/lionagi/libs/schema/function_to_schema.py +0 -0
- {lionagi-0.7.3 → lionagi-0.7.4}/lionagi/libs/schema/json_schema.py +0 -0
- {lionagi-0.7.3 → lionagi-0.7.4}/lionagi/libs/token_transform/__init__.py +0 -0
- {lionagi-0.7.3 → lionagi-0.7.4}/lionagi/libs/token_transform/llmlingua.py +0 -0
- {lionagi-0.7.3 → lionagi-0.7.4}/lionagi/libs/token_transform/perplexity.py +0 -0
- {lionagi-0.7.3 → lionagi-0.7.4}/lionagi/libs/token_transform/synthlang.py +0 -0
- {lionagi-0.7.3 → lionagi-0.7.4}/lionagi/libs/validate/__init__.py +0 -0
- {lionagi-0.7.3 → lionagi-0.7.4}/lionagi/libs/validate/common_field_validators.py +0 -0
- {lionagi-0.7.3 → lionagi-0.7.4}/lionagi/libs/validate/fuzzy_match_keys.py +0 -0
- {lionagi-0.7.3 → lionagi-0.7.4}/lionagi/libs/validate/fuzzy_validate_mapping.py +0 -0
- {lionagi-0.7.3 → lionagi-0.7.4}/lionagi/libs/validate/string_similarity.py +0 -0
- {lionagi-0.7.3 → lionagi-0.7.4}/lionagi/libs/validate/validate_boolean.py +0 -0
- {lionagi-0.7.3 → lionagi-0.7.4}/lionagi/operations/ReAct/ReAct.py +0 -0
- {lionagi-0.7.3 → lionagi-0.7.4}/lionagi/operations/ReAct/__init__.py +0 -0
- {lionagi-0.7.3 → lionagi-0.7.4}/lionagi/operations/ReAct/utils.py +0 -0
- {lionagi-0.7.3 → lionagi-0.7.4}/lionagi/operations/__init__.py +0 -0
- {lionagi-0.7.3 → lionagi-0.7.4}/lionagi/operations/_act/__init__.py +0 -0
- {lionagi-0.7.3 → lionagi-0.7.4}/lionagi/operations/_act/act.py +0 -0
- {lionagi-0.7.3 → lionagi-0.7.4}/lionagi/operations/brainstorm/__init__.py +0 -0
- {lionagi-0.7.3 → lionagi-0.7.4}/lionagi/operations/brainstorm/brainstorm.py +0 -0
- {lionagi-0.7.3 → lionagi-0.7.4}/lionagi/operations/brainstorm/prompt.py +0 -0
- {lionagi-0.7.3 → lionagi-0.7.4}/lionagi/operations/chat/__init__.py +0 -0
- {lionagi-0.7.3 → lionagi-0.7.4}/lionagi/operations/chat/chat.py +0 -0
- {lionagi-0.7.3 → lionagi-0.7.4}/lionagi/operations/communicate/__init__.py +0 -0
- {lionagi-0.7.3 → lionagi-0.7.4}/lionagi/operations/communicate/communicate.py +0 -0
- {lionagi-0.7.3 → lionagi-0.7.4}/lionagi/operations/instruct/__init__.py +0 -0
- {lionagi-0.7.3 → lionagi-0.7.4}/lionagi/operations/instruct/instruct.py +0 -0
- {lionagi-0.7.3 → lionagi-0.7.4}/lionagi/operations/interpret/__init__.py +0 -0
- {lionagi-0.7.3 → lionagi-0.7.4}/lionagi/operations/interpret/interpret.py +0 -0
- {lionagi-0.7.3 → lionagi-0.7.4}/lionagi/operations/operate/__init__.py +0 -0
- {lionagi-0.7.3 → lionagi-0.7.4}/lionagi/operations/operate/operate.py +0 -0
- {lionagi-0.7.3 → lionagi-0.7.4}/lionagi/operations/parse/__init__.py +0 -0
- {lionagi-0.7.3 → lionagi-0.7.4}/lionagi/operations/parse/parse.py +0 -0
- {lionagi-0.7.3 → lionagi-0.7.4}/lionagi/operations/plan/__init__.py +0 -0
- {lionagi-0.7.3 → lionagi-0.7.4}/lionagi/operations/plan/plan.py +0 -0
- {lionagi-0.7.3 → lionagi-0.7.4}/lionagi/operations/plan/prompt.py +0 -0
- {lionagi-0.7.3 → lionagi-0.7.4}/lionagi/operations/select/__init__.py +0 -0
- {lionagi-0.7.3 → lionagi-0.7.4}/lionagi/operations/select/select.py +0 -0
- {lionagi-0.7.3 → lionagi-0.7.4}/lionagi/operations/select/utils.py +0 -0
- {lionagi-0.7.3 → lionagi-0.7.4}/lionagi/operations/translate/__init__.py +0 -0
- {lionagi-0.7.3 → lionagi-0.7.4}/lionagi/operations/translate/translate.py +0 -0
- {lionagi-0.7.3 → lionagi-0.7.4}/lionagi/operations/types.py +0 -0
- {lionagi-0.7.3 → lionagi-0.7.4}/lionagi/operations/utils.py +0 -0
- {lionagi-0.7.3 → lionagi-0.7.4}/lionagi/operatives/__init__.py +0 -0
- {lionagi-0.7.3 → lionagi-0.7.4}/lionagi/operatives/action/__init__.py +0 -0
- {lionagi-0.7.3 → lionagi-0.7.4}/lionagi/operatives/action/function_calling.py +0 -0
- {lionagi-0.7.3 → lionagi-0.7.4}/lionagi/operatives/action/manager.py +0 -0
- {lionagi-0.7.3 → lionagi-0.7.4}/lionagi/operatives/action/request_response_model.py +0 -0
- {lionagi-0.7.3 → lionagi-0.7.4}/lionagi/operatives/action/tool.py +0 -0
- {lionagi-0.7.3 → lionagi-0.7.4}/lionagi/operatives/action/utils.py +0 -0
- {lionagi-0.7.3 → lionagi-0.7.4}/lionagi/operatives/forms/__init__.py +0 -0
- {lionagi-0.7.3 → lionagi-0.7.4}/lionagi/operatives/forms/base.py +0 -0
- {lionagi-0.7.3 → lionagi-0.7.4}/lionagi/operatives/forms/form.py +0 -0
- {lionagi-0.7.3 → lionagi-0.7.4}/lionagi/operatives/forms/report.py +0 -0
- {lionagi-0.7.3 → lionagi-0.7.4}/lionagi/operatives/forms/utils.py +0 -0
- {lionagi-0.7.3 → lionagi-0.7.4}/lionagi/operatives/instruct/__init__.py +0 -0
- {lionagi-0.7.3 → lionagi-0.7.4}/lionagi/operatives/instruct/base.py +0 -0
- {lionagi-0.7.3 → lionagi-0.7.4}/lionagi/operatives/instruct/instruct.py +0 -0
- {lionagi-0.7.3 → lionagi-0.7.4}/lionagi/operatives/instruct/instruct_collection.py +0 -0
- {lionagi-0.7.3 → lionagi-0.7.4}/lionagi/operatives/instruct/node.py +0 -0
- {lionagi-0.7.3 → lionagi-0.7.4}/lionagi/operatives/instruct/prompts.py +0 -0
- {lionagi-0.7.3 → lionagi-0.7.4}/lionagi/operatives/instruct/reason.py +0 -0
- {lionagi-0.7.3 → lionagi-0.7.4}/lionagi/operatives/manager.py +0 -0
- {lionagi-0.7.3 → lionagi-0.7.4}/lionagi/operatives/models/__init__.py +0 -0
- {lionagi-0.7.3 → lionagi-0.7.4}/lionagi/operatives/models/field_model.py +0 -0
- {lionagi-0.7.3 → lionagi-0.7.4}/lionagi/operatives/models/model_params.py +0 -0
- {lionagi-0.7.3 → lionagi-0.7.4}/lionagi/operatives/models/note.py +0 -0
- {lionagi-0.7.3 → lionagi-0.7.4}/lionagi/operatives/models/operable_model.py +0 -0
- {lionagi-0.7.3 → lionagi-0.7.4}/lionagi/operatives/models/schema_model.py +0 -0
- {lionagi-0.7.3 → lionagi-0.7.4}/lionagi/operatives/operative.py +0 -0
- {lionagi-0.7.3 → lionagi-0.7.4}/lionagi/operatives/step.py +0 -0
- {lionagi-0.7.3 → lionagi-0.7.4}/lionagi/operatives/strategies/__init__.py +0 -0
- {lionagi-0.7.3 → lionagi-0.7.4}/lionagi/operatives/strategies/base.py +0 -0
- {lionagi-0.7.3 → lionagi-0.7.4}/lionagi/operatives/strategies/concurrent.py +0 -0
- {lionagi-0.7.3 → lionagi-0.7.4}/lionagi/operatives/strategies/concurrent_chunk.py +0 -0
- {lionagi-0.7.3 → lionagi-0.7.4}/lionagi/operatives/strategies/concurrent_sequential_chunk.py +0 -0
- {lionagi-0.7.3 → lionagi-0.7.4}/lionagi/operatives/strategies/params.py +0 -0
- {lionagi-0.7.3 → lionagi-0.7.4}/lionagi/operatives/strategies/sequential.py +0 -0
- {lionagi-0.7.3 → lionagi-0.7.4}/lionagi/operatives/strategies/sequential_chunk.py +0 -0
- {lionagi-0.7.3 → lionagi-0.7.4}/lionagi/operatives/strategies/sequential_concurrent_chunk.py +0 -0
- {lionagi-0.7.3 → lionagi-0.7.4}/lionagi/operatives/strategies/utils.py +0 -0
- {lionagi-0.7.3 → lionagi-0.7.4}/lionagi/operatives/types.py +0 -0
- {lionagi-0.7.3 → lionagi-0.7.4}/lionagi/protocols/__init__.py +0 -0
- {lionagi-0.7.3 → lionagi-0.7.4}/lionagi/protocols/_concepts.py +0 -0
- {lionagi-0.7.3 → lionagi-0.7.4}/lionagi/protocols/adapters/__init__.py +0 -0
- {lionagi-0.7.3 → lionagi-0.7.4}/lionagi/protocols/adapters/adapter.py +0 -0
- {lionagi-0.7.3 → lionagi-0.7.4}/lionagi/protocols/adapters/json_adapter.py +0 -0
- {lionagi-0.7.3 → lionagi-0.7.4}/lionagi/protocols/adapters/pandas_/__init__.py +0 -0
- {lionagi-0.7.3 → lionagi-0.7.4}/lionagi/protocols/adapters/pandas_/csv_adapter.py +0 -0
- {lionagi-0.7.3 → lionagi-0.7.4}/lionagi/protocols/adapters/pandas_/excel_adapter.py +0 -0
- {lionagi-0.7.3 → lionagi-0.7.4}/lionagi/protocols/adapters/pandas_/pd_dataframe_adapter.py +0 -0
- {lionagi-0.7.3 → lionagi-0.7.4}/lionagi/protocols/adapters/pandas_/pd_series_adapter.py +0 -0
- {lionagi-0.7.3 → lionagi-0.7.4}/lionagi/protocols/adapters/types.py +0 -0
- {lionagi-0.7.3 → lionagi-0.7.4}/lionagi/protocols/generic/__init__.py +0 -0
- {lionagi-0.7.3 → lionagi-0.7.4}/lionagi/protocols/generic/element.py +0 -0
- {lionagi-0.7.3 → lionagi-0.7.4}/lionagi/protocols/generic/event.py +0 -0
- {lionagi-0.7.3 → lionagi-0.7.4}/lionagi/protocols/generic/log.py +0 -0
- {lionagi-0.7.3 → lionagi-0.7.4}/lionagi/protocols/generic/pile.py +0 -0
- {lionagi-0.7.3 → lionagi-0.7.4}/lionagi/protocols/generic/processor.py +0 -0
- {lionagi-0.7.3 → lionagi-0.7.4}/lionagi/protocols/generic/progression.py +0 -0
- {lionagi-0.7.3 → lionagi-0.7.4}/lionagi/protocols/graph/__init__.py +0 -0
- {lionagi-0.7.3 → lionagi-0.7.4}/lionagi/protocols/graph/edge.py +0 -0
- {lionagi-0.7.3 → lionagi-0.7.4}/lionagi/protocols/graph/graph.py +0 -0
- {lionagi-0.7.3 → lionagi-0.7.4}/lionagi/protocols/graph/node.py +0 -0
- {lionagi-0.7.3 → lionagi-0.7.4}/lionagi/protocols/mail/__init__.py +0 -0
- {lionagi-0.7.3 → lionagi-0.7.4}/lionagi/protocols/mail/exchange.py +0 -0
- {lionagi-0.7.3 → lionagi-0.7.4}/lionagi/protocols/mail/mail.py +0 -0
- {lionagi-0.7.3 → lionagi-0.7.4}/lionagi/protocols/mail/mailbox.py +0 -0
- {lionagi-0.7.3 → lionagi-0.7.4}/lionagi/protocols/mail/manager.py +0 -0
- {lionagi-0.7.3 → lionagi-0.7.4}/lionagi/protocols/mail/package.py +0 -0
- {lionagi-0.7.3 → lionagi-0.7.4}/lionagi/protocols/messages/__init__.py +0 -0
- {lionagi-0.7.3 → lionagi-0.7.4}/lionagi/protocols/messages/action_request.py +0 -0
- {lionagi-0.7.3 → lionagi-0.7.4}/lionagi/protocols/messages/action_response.py +0 -0
- {lionagi-0.7.3 → lionagi-0.7.4}/lionagi/protocols/messages/assistant_response.py +0 -0
- {lionagi-0.7.3 → lionagi-0.7.4}/lionagi/protocols/messages/base.py +0 -0
- {lionagi-0.7.3 → lionagi-0.7.4}/lionagi/protocols/messages/instruction.py +0 -0
- {lionagi-0.7.3 → lionagi-0.7.4}/lionagi/protocols/messages/manager.py +0 -0
- {lionagi-0.7.3 → lionagi-0.7.4}/lionagi/protocols/messages/message.py +0 -0
- {lionagi-0.7.3 → lionagi-0.7.4}/lionagi/protocols/messages/system.py +0 -0
- {lionagi-0.7.3 → lionagi-0.7.4}/lionagi/protocols/messages/templates/README.md +0 -0
- {lionagi-0.7.3 → lionagi-0.7.4}/lionagi/protocols/messages/templates/action_request.jinja2 +0 -0
- {lionagi-0.7.3 → lionagi-0.7.4}/lionagi/protocols/messages/templates/action_response.jinja2 +0 -0
- {lionagi-0.7.3 → lionagi-0.7.4}/lionagi/protocols/messages/templates/assistant_response.jinja2 +0 -0
- {lionagi-0.7.3 → lionagi-0.7.4}/lionagi/protocols/messages/templates/instruction_message.jinja2 +0 -0
- {lionagi-0.7.3 → lionagi-0.7.4}/lionagi/protocols/messages/templates/system_message.jinja2 +0 -0
- {lionagi-0.7.3 → lionagi-0.7.4}/lionagi/protocols/messages/templates/tool_schemas.jinja2 +0 -0
- {lionagi-0.7.3 → lionagi-0.7.4}/lionagi/protocols/types.py +0 -0
- {lionagi-0.7.3 → lionagi-0.7.4}/lionagi/service/__init__.py +0 -0
- {lionagi-0.7.3 → lionagi-0.7.4}/lionagi/service/endpoints/__init__.py +0 -0
- {lionagi-0.7.3 → lionagi-0.7.4}/lionagi/service/endpoints/base.py +0 -0
- {lionagi-0.7.3 → lionagi-0.7.4}/lionagi/service/endpoints/chat_completion.py +0 -0
- {lionagi-0.7.3 → lionagi-0.7.4}/lionagi/service/endpoints/match_endpoint.py +0 -0
- {lionagi-0.7.3 → lionagi-0.7.4}/lionagi/service/endpoints/rate_limited_processor.py +0 -0
- {lionagi-0.7.3 → lionagi-0.7.4}/lionagi/service/endpoints/token_calculator.py +0 -0
- {lionagi-0.7.3 → lionagi-0.7.4}/lionagi/service/imodel.py +0 -0
- {lionagi-0.7.3 → lionagi-0.7.4}/lionagi/service/manager.py +0 -0
- {lionagi-0.7.3 → lionagi-0.7.4}/lionagi/service/providers/__init__.py +0 -0
- {lionagi-0.7.3 → lionagi-0.7.4}/lionagi/service/providers/anthropic_/__init__.py +0 -0
- {lionagi-0.7.3 → lionagi-0.7.4}/lionagi/service/providers/anthropic_/messages.py +0 -0
- {lionagi-0.7.3 → lionagi-0.7.4}/lionagi/service/providers/groq_/__init__.py +0 -0
- {lionagi-0.7.3 → lionagi-0.7.4}/lionagi/service/providers/groq_/chat_completions.py +0 -0
- {lionagi-0.7.3 → lionagi-0.7.4}/lionagi/service/providers/openai_/__init__.py +0 -0
- {lionagi-0.7.3 → lionagi-0.7.4}/lionagi/service/providers/openai_/chat_completions.py +0 -0
- {lionagi-0.7.3 → lionagi-0.7.4}/lionagi/service/providers/openrouter_/__init__.py +0 -0
- {lionagi-0.7.3 → lionagi-0.7.4}/lionagi/service/providers/openrouter_/chat_completions.py +0 -0
- {lionagi-0.7.3 → lionagi-0.7.4}/lionagi/service/providers/perplexity_/__init__.py +0 -0
- {lionagi-0.7.3 → lionagi-0.7.4}/lionagi/service/providers/perplexity_/chat_completions.py +0 -0
- {lionagi-0.7.3 → lionagi-0.7.4}/lionagi/service/types.py +0 -0
- {lionagi-0.7.3 → lionagi-0.7.4}/lionagi/session/__init__.py +0 -0
- {lionagi-0.7.3 → lionagi-0.7.4}/lionagi/session/session.py +0 -0
- {lionagi-0.7.3 → lionagi-0.7.4}/lionagi/settings.py +0 -0
- {lionagi-0.7.3 → lionagi-0.7.4}/lionagi/utils.py +0 -0
- {lionagi-0.7.3 → lionagi-0.7.4}/prompts/doc_style.md +0 -0
@@ -1,7 +1,7 @@
|
|
1
1
|
Metadata-Version: 2.4
|
2
2
|
Name: lionagi
|
3
|
-
Version: 0.7.
|
4
|
-
Summary: An
|
3
|
+
Version: 0.7.4
|
4
|
+
Summary: An Intelligence Operating System.
|
5
5
|
Author-email: HaiyangLi <quantocean.li@gmail.com>
|
6
6
|
License: Apache License
|
7
7
|
Version 2.0, January 2004
|
@@ -30,7 +30,7 @@ source env/bin/activate # On Windows: env\Scripts\activate
|
|
30
30
|
# Install LionAGI and dotenv
|
31
31
|
pip install lionagi
|
32
32
|
```
|
33
|
-
1.2 API Setup
|
33
|
+
### 1.2 API Setup
|
34
34
|
|
35
35
|
```python
|
36
36
|
import os
|
@@ -45,9 +45,21 @@ load_dotenv()
|
|
45
45
|
os.environ["OPENAI_API_KEY"] = "your-api-key"
|
46
46
|
```
|
47
47
|
|
48
|
-
2. Building a Basic Assistant
|
48
|
+
## 2. Building a Basic Assistant
|
49
49
|
|
50
50
|
The Basic Assistant shows how to query GPT-based models with LionAGI. We’ll ask a few questions about AI Safety as an example.
|
51
|
+
```python
|
52
|
+
from timeit import default_timer as timer
|
53
|
+
|
54
|
+
start = timer()
|
55
|
+
|
56
|
+
import lionagi
|
57
|
+
|
58
|
+
print(f"Imported lionagi in {timer()-start:.3f} seconds")
|
59
|
+
print(f"lionagi version: {lionagi.__version__}")
|
60
|
+
```
|
61
|
+
if this code runs without errors, you have successfully installed LionAGI.
|
62
|
+
|
51
63
|
```python
|
52
64
|
from lionagi import Branch, iModel
|
53
65
|
from IPython.display import display, Markdown
|
@@ -84,48 +96,50 @@ responses = []
|
|
84
96
|
for question in questions:
|
85
97
|
# Prompt the assistant with context and question
|
86
98
|
response = await assistant.chat(f"{context}\nQuestion: {question}")
|
87
|
-
|
99
|
+
|
88
100
|
# Display the response in a Jupyter Notebook (if using IPython)
|
89
101
|
display(Markdown(response))
|
90
|
-
|
102
|
+
|
91
103
|
# Store the response
|
92
104
|
responses.append({"question": question, "answer": response})
|
93
105
|
```
|
94
|
-
|
95
|
-
|
96
|
-
|
97
|
-
|
98
|
-
|
106
|
+
|
107
|
+
|
108
|
+
Explanation:
|
109
|
+
1. iModel configures how we interact with OpenAI. We specify the model name and temperature.
|
110
|
+
2. Branch sets up a conversational context (the system prompt).
|
111
|
+
3. assistant.chat() sends queries (prompts) to GPT.
|
112
|
+
4. We collect results in responses, which you can later print or save.
|
99
113
|
|
100
114
|
3. Building an Advanced Assistant
|
101
115
|
|
116
|
+
## 3. Building an Advanced Assistant
|
117
|
+
|
102
118
|
Now let’s expand on the basic approach. The Advanced Assistant adds:
|
103
|
-
|
104
|
-
|
105
|
-
|
106
|
-
|
119
|
+
1. Persistent storage for research (JSON files)
|
120
|
+
2. Error handling (API key issues, rate limits)
|
121
|
+
3. Summaries of research topics
|
122
|
+
4. Retrieval of previously saved topics
|
123
|
+
|
107
124
|
```python
|
108
125
|
from lionagi import Branch, iModel
|
109
126
|
from datetime import datetime
|
110
127
|
from pathlib import Path
|
111
128
|
import json
|
112
|
-
|
129
|
+
|
113
130
|
|
114
131
|
class ResearchAssistant:
|
115
132
|
"""Advanced research assistant with persistence."""
|
133
|
+
|
116
134
|
def __init__(
|
117
135
|
self,
|
118
136
|
name: str = "Researcher",
|
119
|
-
model: str = "gpt-4o",
|
120
|
-
save_dir: str = "research"
|
137
|
+
model: str = "gpt-4o-mini",
|
138
|
+
save_dir: str = "research",
|
121
139
|
):
|
122
140
|
# 1. Configure the AI model
|
123
|
-
ai_model = iModel(
|
124
|
-
|
125
|
-
model=model,
|
126
|
-
temperature=0.7
|
127
|
-
)
|
128
|
-
|
141
|
+
ai_model = iModel(provider="openai", model=model, temperature=0.7)
|
142
|
+
|
129
143
|
# 2. Create the assistant branch
|
130
144
|
self.assistant = Branch(
|
131
145
|
name=name,
|
@@ -133,17 +147,17 @@ class ResearchAssistant:
|
|
133
147
|
Provide clear, accurate information.
|
134
148
|
Support claims with evidence.
|
135
149
|
Ask for clarification if needed.""",
|
136
|
-
chat_model=ai_model
|
150
|
+
chat_model=ai_model,
|
137
151
|
)
|
138
|
-
|
152
|
+
|
139
153
|
# 3. Setup storage
|
140
154
|
self.save_dir = Path(save_dir)
|
141
155
|
self.save_dir.mkdir(exist_ok=True)
|
142
|
-
|
156
|
+
|
143
157
|
# 4. Track research in memory
|
144
158
|
self.topics: dict[str, dict] = {}
|
145
159
|
self._load_history()
|
146
|
-
|
160
|
+
|
147
161
|
def _load_history(self):
|
148
162
|
"""
|
149
163
|
Loads previous research from JSON files in the save_dir.
|
@@ -153,11 +167,9 @@ class ResearchAssistant:
|
|
153
167
|
with open(file) as f:
|
154
168
|
research = json.load(f)
|
155
169
|
self.topics[research["topic"]] = research
|
156
|
-
|
170
|
+
|
157
171
|
async def research_topic(
|
158
|
-
self,
|
159
|
-
topic: str,
|
160
|
-
questions: list[str]
|
172
|
+
self, topic: str, questions: list[str]
|
161
173
|
) -> dict[str, str]:
|
162
174
|
"""
|
163
175
|
Researches a topic thoroughly by asking multiple questions.
|
@@ -170,148 +182,154 @@ class ResearchAssistant:
|
|
170
182
|
f"Regarding {topic}: {question}"
|
171
183
|
)
|
172
184
|
answers[question] = response
|
173
|
-
|
185
|
+
|
174
186
|
# Save research to a JSON file
|
175
187
|
research = {
|
176
188
|
"topic": topic,
|
177
189
|
"date": datetime.now().isoformat(),
|
178
190
|
"questions": questions,
|
179
|
-
"answers": answers
|
191
|
+
"answers": answers,
|
180
192
|
}
|
181
|
-
|
182
|
-
file_path =
|
193
|
+
|
194
|
+
file_path = (
|
195
|
+
self.save_dir / f"{topic.lower().replace(' ', '_')}.json"
|
196
|
+
)
|
183
197
|
with open(file_path, "w") as f:
|
184
198
|
json.dump(research, f, indent=2)
|
185
|
-
|
199
|
+
|
186
200
|
# Update in-memory tracking
|
187
201
|
self.topics[topic] = research
|
188
|
-
|
202
|
+
|
189
203
|
return answers
|
190
|
-
|
204
|
+
|
191
205
|
except Exception as e:
|
192
206
|
# Handle common errors
|
193
207
|
if "API key" in str(e):
|
194
|
-
raise ValueError(
|
208
|
+
raise ValueError(
|
209
|
+
"Invalid API key. Please check your configuration."
|
210
|
+
)
|
195
211
|
elif "Rate limit" in str(e):
|
196
|
-
raise ValueError(
|
212
|
+
raise ValueError(
|
213
|
+
"Rate limit exceeded. Please try again later."
|
214
|
+
)
|
197
215
|
else:
|
198
216
|
raise e
|
199
|
-
|
200
|
-
async def get_summary(
|
201
|
-
self,
|
202
|
-
topic: str,
|
203
|
-
style: str = "technical"
|
204
|
-
) -> str:
|
217
|
+
|
218
|
+
async def get_summary(self, topic: str, style: str = "technical") -> str:
|
205
219
|
"""
|
206
220
|
Generates a summary of the answers for a researched topic in a specific style.
|
207
221
|
Returns the summary string, or an error if the topic was not found.
|
208
222
|
"""
|
209
223
|
if topic not in self.topics:
|
210
224
|
return f"No research found for: {topic}"
|
211
|
-
|
225
|
+
|
212
226
|
research = self.topics[topic]
|
213
227
|
questions = research["questions"]
|
214
228
|
answers = research["answers"]
|
215
|
-
|
229
|
+
|
216
230
|
prompt = f"""
|
217
231
|
Summarize research on {topic}.
|
218
232
|
Style: {style}
|
219
233
|
Questions covered: {', '.join(questions)}
|
220
234
|
Key findings: {json.dumps(answers, indent=2)}
|
221
235
|
"""
|
222
|
-
|
236
|
+
|
223
237
|
try:
|
224
238
|
return await self.assistant.chat(prompt)
|
225
239
|
except Exception as e:
|
226
240
|
return f"Error generating summary: {str(e)}"
|
227
|
-
|
241
|
+
|
228
242
|
def get_topics(self) -> list[str]:
|
229
243
|
"""Returns a list of all topics researched so far."""
|
230
244
|
return list(self.topics.keys())
|
231
|
-
|
232
|
-
def get_research(
|
233
|
-
self,
|
234
|
-
topic: str
|
235
|
-
) -> dict | None:
|
245
|
+
|
246
|
+
def get_research(self, topic: str) -> dict | None:
|
236
247
|
"""Returns the full research details for a given topic, or None if not found."""
|
237
248
|
return self.topics.get(topic)
|
238
249
|
```
|
250
|
+
|
239
251
|
Usage Example
|
252
|
+
:
|
240
253
|
```python
|
254
|
+
from IPython.display import display, Markdown
|
255
|
+
|
241
256
|
async def research_project():
|
242
257
|
"""Demonstrates how to use the advanced ResearchAssistant."""
|
243
|
-
|
258
|
+
|
244
259
|
# 1. Create an instance of ResearchAssistant
|
245
260
|
assistant = ResearchAssistant(
|
246
|
-
name="AI Researcher",
|
247
|
-
model="gpt-4o",
|
248
|
-
save_dir="ai_research"
|
261
|
+
name="AI Researcher", model="gpt-4o", save_dir="ai_research"
|
249
262
|
)
|
250
|
-
|
263
|
+
|
251
264
|
# 2. Define topics and questions
|
252
265
|
topics = {
|
253
266
|
"AI Safety": [
|
254
267
|
"What are the main concerns?",
|
255
268
|
"What solutions exist?",
|
256
|
-
"What are future challenges?"
|
269
|
+
"What are future challenges?",
|
257
270
|
],
|
258
271
|
"Machine Learning": [
|
259
272
|
"What are key concepts?",
|
260
273
|
"What are best practices?",
|
261
|
-
"What are common pitfalls?"
|
262
|
-
]
|
274
|
+
"What are common pitfalls?",
|
275
|
+
],
|
263
276
|
}
|
264
|
-
|
277
|
+
|
265
278
|
# 3. Research each topic
|
266
279
|
for topic, questions in topics.items():
|
267
280
|
print(f"\nResearching: {topic}")
|
268
|
-
|
281
|
+
|
269
282
|
try:
|
270
283
|
# Gather answers
|
271
284
|
answers = await assistant.research_topic(topic, questions)
|
272
|
-
|
285
|
+
|
273
286
|
# Generate and print a summary
|
274
287
|
summary = await assistant.get_summary(topic, style="technical")
|
275
|
-
|
288
|
+
|
276
289
|
print("\nFindings:")
|
277
290
|
for q, a in answers.items():
|
278
291
|
display(Markdown(f"**Q**: {q}"))
|
279
292
|
display(Markdown(f"**A**: {a}"))
|
280
|
-
|
281
|
-
|
282
|
-
|
293
|
+
|
294
|
+
display(Markdown(f"\nSummary:\n{summary}"))
|
295
|
+
|
283
296
|
except Exception as e:
|
284
297
|
print(f"Error researching {topic}: {str(e)}")
|
285
298
|
continue
|
286
|
-
|
299
|
+
|
287
300
|
# 4. Show all researched topics
|
288
|
-
|
301
|
+
display(Markdown(f"\nAll Topics:{assistant.get_topics()}"))
|
289
302
|
|
290
303
|
# If you’re running in an environment that supports async,
|
291
304
|
# you can execute:
|
292
305
|
# await research_project()
|
306
|
+
# else you can use:
|
307
|
+
# asyncio.run(research_project())
|
308
|
+
```
|
309
|
+
```python
|
310
|
+
# Example call (in an async environment, such as Jupyter Notebook):
|
311
|
+
await research_project()
|
293
312
|
```
|
294
|
-
|
295
313
|
Explanation
|
296
|
-
|
297
|
-
|
298
|
-
|
299
|
-
|
300
|
-
|
301
|
-
|
302
|
-
4. Best Practices
|
303
|
-
|
304
|
-
|
305
|
-
|
306
|
-
|
307
|
-
|
308
|
-
|
309
|
-
|
310
|
-
|
311
|
-
|
312
|
-
|
313
|
-
|
314
|
-
|
314
|
+
1. ResearchAssistant Class: Encapsulates functions to query GPT, track and load previous research, and generate summaries.
|
315
|
+
2. _load_history(): Loads prior research from JSON files in save_dir.
|
316
|
+
3. research_topic(): Prompts GPT with each question, saves answers to a local JSON file, and updates an internal topics dictionary.
|
317
|
+
4. get_summary(): Builds a customized summary prompt and returns GPT’s response.
|
318
|
+
5. Error Handling: Uses Python exceptions to catch and respond to common issues (invalid key, rate limits).
|
319
|
+
|
320
|
+
## 4. Best Practices
|
321
|
+
1. Assistant Design
|
322
|
+
- Provide a clear system message (role, instructions, style).
|
323
|
+
- Configure model parameters (model, temperature) carefully.
|
324
|
+
- Gracefully handle common errors (API key problems, rate limits).
|
325
|
+
2. Code Structure
|
326
|
+
- Use type hints for clarity (e.g., -> dict[str, str]).
|
327
|
+
- Keep code modular and documented.
|
328
|
+
- Follow PEP 8 style guidelines.
|
329
|
+
3. User Experience
|
330
|
+
- Persist research results so users can revisit them.
|
331
|
+
- Offer summaries or highlights.
|
332
|
+
- Provide progress/error notifications to guide the user.
|
315
333
|
|
316
334
|
5. Quick Reference
|
317
335
|
|
@@ -341,13 +359,15 @@ except Exception as e:
|
|
341
359
|
print(f"Error: {str(e)}")
|
342
360
|
```
|
343
361
|
|
344
|
-
6. Next Steps
|
362
|
+
## 6. Next Steps
|
345
363
|
|
346
364
|
You have now learned:
|
347
|
-
|
348
|
-
|
349
|
-
|
365
|
+
1. How to create a Basic AI Assistant
|
366
|
+
2. How to research topics, save results, and manage errors
|
367
|
+
3. How to retrieve and summarize past research
|
368
|
+
|
369
|
+
In Chapter 2, we’ll explore LionAGI’s core concepts and dive deeper into its architecture.
|
350
370
|
|
351
|
-
|
371
|
+
You’ll learn how to handle more complex conversation flows, manipulate prompts dynamically, and use advanced features like multiple branches or streaming responses.
|
352
372
|
|
353
373
|
Happy coding and researching!
|
@@ -0,0 +1,223 @@
|
|
1
|
+
# LionAGI Cookbook
|
2
|
+
|
3
|
+
## Chapter 2: Building a Customer Service Bot
|
4
|
+
|
5
|
+
In [Chapter 1](ch1_get_started.md), we built a **research assistant** primarily using the `branch.chat()` method.
|
6
|
+
|
7
|
+
That approach was **single-turn**: each call to `chat()` did **not** add messages to the conversation history. Now, we’ll explore **LionAGI’s** architecture and focus on **multi-turn** usage with `branch.communicate()`, which **does** store messages for continuous dialogue.
|
8
|
+
|
9
|
+
---
|
10
|
+
|
11
|
+
## 1. Architecture & Design Philosophy
|
12
|
+
|
13
|
+
LionAGI is deliberately **modular**. When building a “Customer Service” bot, these key pieces unite:
|
14
|
+
|
15
|
+
1. **Branch**
|
16
|
+
- Coordinates a single conversation.
|
17
|
+
- Maintains messages, tools, logs, and references to an **iModel**.
|
18
|
+
- We’ll use `communicate()` for multi-turn conversation memory.
|
19
|
+
|
20
|
+
2. **iModel**
|
21
|
+
- Represents a configured LLM endpoint (like GPT-4o, sonnet-3.5, etc.).
|
22
|
+
- Manages concurrency, token usage, and request settings.
|
23
|
+
|
24
|
+
3. **Action System** (Optional)
|
25
|
+
- Tools exposed to the LLM for real backend operations (e.g., resetting a password, issuing refunds).
|
26
|
+
- The LLM can “function-call” them if it decides it needs those capabilities.
|
27
|
+
|
28
|
+
4. **Messages & Conversation**
|
29
|
+
- `branch.communicate()` automatically **stores** user messages + assistant replies in order, supporting multi-turn memory.
|
30
|
+
- `branch.chat()`, by contrast, is typically a single-turn approach (no automatic message storage).
|
31
|
+
|
32
|
+
5. **Logs & Session** (Advanced)
|
33
|
+
- You can attach a `LogManager` or use a `Session` to handle multiple conversation branches and cross-branch communication.
|
34
|
+
- For now, we’ll stick to a single Branch for a single conversation flow.
|
35
|
+
|
36
|
+
### High-Level Flow for Customer Service
|
37
|
+
1. **User** asks a question or describes a problem.
|
38
|
+
2. **Branch** uses an **iModel** to generate a reply, storing both the user inquiry and the assistant response in the conversation.
|
39
|
+
3. If a function call is needed (e.g., “reset_password”), the Branch’s tool system handles it.
|
40
|
+
4. The conversation continues seamlessly: new user messages have context from previous messages because we used `communicate()`.
|
41
|
+
|
42
|
+
---
|
43
|
+
|
44
|
+
## 2. Example: Building a Basic Customer Service Branch
|
45
|
+
|
46
|
+
Here’s a **LionAGI** approach for multi-turn conversation. Let’s define a simple Tool, an iModel, and a Branch that uses `communicate()`.
|
47
|
+
|
48
|
+
|
49
|
+
### 2.1 Configuring an iModel
|
50
|
+
|
51
|
+
```python
|
52
|
+
from lionagi import iModel
|
53
|
+
|
54
|
+
customer_service_model = iModel(
|
55
|
+
provider="openai",
|
56
|
+
model="gpt-4o-mini",
|
57
|
+
temperature=0.7,
|
58
|
+
# concurrency/rate-limits can be set if needed
|
59
|
+
)
|
60
|
+
```
|
61
|
+
|
62
|
+
### 2.2 Creating the Branch
|
63
|
+
|
64
|
+
```python
|
65
|
+
from lionagi import Branch
|
66
|
+
|
67
|
+
service_branch = Branch(
|
68
|
+
name="CustomerService",
|
69
|
+
system="""You are a polite customer service agent.
|
70
|
+
- Greet the user.
|
71
|
+
- Provide helpful info or next steps.
|
72
|
+
- Escalate if the issue is out of scope.""",
|
73
|
+
chat_model=customer_service_model,
|
74
|
+
)
|
75
|
+
```
|
76
|
+
|
77
|
+
Key Points
|
78
|
+
- The system string sets the overall tone and instructions (like a system prompt).
|
79
|
+
- chat_model is our chosen LLM interface.
|
80
|
+
- tools includes our password reset functionality.
|
81
|
+
|
82
|
+
### 2.3 Handling Multi-turn Inquiries with communicate()
|
83
|
+
|
84
|
+
```python
|
85
|
+
async def handle_inquiry(user_id: str, user_message: str) -> str:
|
86
|
+
"""
|
87
|
+
Takes the user's message and returns an AI response,
|
88
|
+
preserving conversation history for follow-up questions.
|
89
|
+
"""
|
90
|
+
# Provide context if needed (e.g., user_id)
|
91
|
+
response = await service_branch.communicate(
|
92
|
+
instruction=user_message,
|
93
|
+
context={"user_id": user_id}
|
94
|
+
)
|
95
|
+
return response
|
96
|
+
```
|
97
|
+
Why communicate()?
|
98
|
+
- It automatically adds the user message and the assistant’s reply to the Branch’s conversation log.
|
99
|
+
- Follow-up calls to communicate() within the same Branch will see the entire conversation so far.
|
100
|
+
- This is different from branch.chat(), which does not store messages for future turns.
|
101
|
+
|
102
|
+
Demo of Multi-Turn Exchange
|
103
|
+
```python
|
104
|
+
import asyncio
|
105
|
+
|
106
|
+
async def customer_service_demo():
|
107
|
+
# 1) First inquiry
|
108
|
+
resp1 = await handle_inquiry("User123", "Hi, I forgot my password.")
|
109
|
+
print("Assistant says:", resp1)
|
110
|
+
|
111
|
+
# 2) Follow-up inquiry (the user is still locked out)
|
112
|
+
resp2 = await handle_inquiry("User123", "I'm still locked out. Help!")
|
113
|
+
print("Assistant says:", resp2)
|
114
|
+
|
115
|
+
# 3) Another question, context is still remembered
|
116
|
+
resp3 = await handle_inquiry("User123", "Thanks! How do I change my billing address next?")
|
117
|
+
print("Assistant says:", resp3)
|
118
|
+
|
119
|
+
# asyncio.run(customer_service_demo())
|
120
|
+
```
|
121
|
+
Here, each call to handle_inquiry() uses communicate(), building a conversation that retains context across multiple user messages.
|
122
|
+
|
123
|
+
## 3. Managing Conversation State & Logs
|
124
|
+
|
125
|
+
### 3.1 Automatic Logging
|
126
|
+
|
127
|
+
LionAGI automatically log conversation events via a LogManager. You can configure log manager by passing a config at branch creation:
|
128
|
+
|
129
|
+
```python
|
130
|
+
from lionagi import types
|
131
|
+
|
132
|
+
log_config = types.LogManagerConfig(
|
133
|
+
persist_dir="./logs",
|
134
|
+
capacity=10, # logs are dumped after 10 events (api_call, function_call)
|
135
|
+
extension=".json",
|
136
|
+
auto_save_on_exit=True,
|
137
|
+
)
|
138
|
+
service_branch = Branch(
|
139
|
+
name="CustomerService",
|
140
|
+
system="""You are a polite customer service agent.
|
141
|
+
- Greet the user.
|
142
|
+
- Provide helpful info or next steps.
|
143
|
+
- Escalate if the issue is out of scope.""",
|
144
|
+
chat_model=customer_service_model,
|
145
|
+
tools=[reset_password], # directly pass in the function
|
146
|
+
log_config=log_config,
|
147
|
+
)
|
148
|
+
```
|
149
|
+
You can also check the logs as a DataFrame:
|
150
|
+
|
151
|
+
```python
|
152
|
+
df = service_branch.logs.to_df()
|
153
|
+
print(df.head())
|
154
|
+
```
|
155
|
+
3.2 Viewing Stored Conversation
|
156
|
+
|
157
|
+
Because we’re using communicate(), the conversation grows with each user message:
|
158
|
+
```python
|
159
|
+
# this will produce a formatted dataframe of messages in the conversation
|
160
|
+
df = service_branch.to_df()
|
161
|
+
|
162
|
+
# whereas below will convert all messages data in the conversation into dataframe
|
163
|
+
df1 = service_branch.messages.to_df()
|
164
|
+
```
|
165
|
+
You’ll see System, User, and Assistant roles in chronological order.
|
166
|
+
|
167
|
+
## 4. Best Practices
|
168
|
+
|
169
|
+
1. Use communicate() for Multi-turn
|
170
|
+
- If you need a persistent conversation across multiple messages, communicate() automatically appends user & assistant messages.
|
171
|
+
- chat() is simpler but stateless (no built-in memory), ideal for single-turn Q&A or ephemeral prompts.
|
172
|
+
2. Leverage Tools
|
173
|
+
- Real customer service might need secure “Check Account,” “Issue Refund,” or “Reset Password.”
|
174
|
+
- Wrap each as a Tool or put them in an ActionManager. The LLM can function-call them as needed.
|
175
|
+
3. Log Management
|
176
|
+
- Set capacity-based dumping or manual triggers to avoid large memory usage.
|
177
|
+
- For compliance or analytics, consider storing logs externally (e.g., in a database).
|
178
|
+
4. Escalation Strategy
|
179
|
+
- If the LLM says “I can’t handle this” or calls “escalate_ticket,” you can hand off to a manager branch or a real agent.
|
180
|
+
- Use a Session if you want multiple Branches or multi-agent flows.
|
181
|
+
5. Conversation Summaries
|
182
|
+
- For longer sessions, you might occasionally summarize prior messages (to keep context concise) or parse them for user satisfaction.
|
183
|
+
|
184
|
+
## 5. Quick Reference
|
185
|
+
|
186
|
+
Below is a minimal usage pattern focusing on multi-turn communicate(), different from Chapter 1’s single-turn chat():
|
187
|
+
```python
|
188
|
+
from lionagi import Branch, iModel
|
189
|
+
|
190
|
+
# 1) Model configuration
|
191
|
+
cs_model = iModel(
|
192
|
+
provider="openai",
|
193
|
+
model="gpt-4o",
|
194
|
+
temperature=0.7
|
195
|
+
)
|
196
|
+
|
197
|
+
# 2) Branch creation
|
198
|
+
cs_branch = Branch(
|
199
|
+
name="CustomerSupport",
|
200
|
+
system="You are a friendly agent with memory of previous messages.",
|
201
|
+
chat_model=cs_model
|
202
|
+
)
|
203
|
+
|
204
|
+
# 3) Multi-turn conversation
|
205
|
+
async def quick_demo():
|
206
|
+
# First user message
|
207
|
+
first_resp = await cs_branch.communicate("Hi, I need help with my account")
|
208
|
+
print("Assistant:", first_resp)
|
209
|
+
|
210
|
+
# Follow-up user message
|
211
|
+
second_resp = await cs_branch.communicate("I'm also locked out of billing.")
|
212
|
+
print("Assistant:", second_resp)
|
213
|
+
```
|
214
|
+
Notice how each communicate() call references the same Branch, so user context accumulates.
|
215
|
+
|
216
|
+
## 6. Summary & Next Steps
|
217
|
+
|
218
|
+
In this chapter, you’ve seen:
|
219
|
+
- How communicate() differs from chat(): communicate() keeps conversation state for subsequent turns.
|
220
|
+
- Integrating Tools for real backend actions in a multi-turn scenario.
|
221
|
+
- Logging conversation history for compliance or analytics.
|
222
|
+
|
223
|
+
Coming Up: Chapter 3 explores advanced function calling and tool integration—including custom schemas, concurrency handling, and more. This opens the door to sophisticated customer service workflows, from verifying user details to secure refunds or escalations.
|
@@ -0,0 +1,23 @@
|
|
1
|
+
# Copyright (c) 2023 - 2024, HaiyangLi <quantocean.li at gmail dot com>
|
2
|
+
#
|
3
|
+
# SPDX-License-Identifier: Apache-2.0
|
4
|
+
|
5
|
+
from . import _types as types
|
6
|
+
from .operations import types as op
|
7
|
+
from .operatives import types as ops_types # deprecated
|
8
|
+
from .service.imodel import iModel
|
9
|
+
from .session.session import Branch, Session
|
10
|
+
from .version import __version__
|
11
|
+
|
12
|
+
LiteiModel = iModel
|
13
|
+
|
14
|
+
__all__ = (
|
15
|
+
"Session",
|
16
|
+
"Branch",
|
17
|
+
"iModel",
|
18
|
+
"LiteiModel",
|
19
|
+
"types",
|
20
|
+
"ops_types",
|
21
|
+
"op",
|
22
|
+
"__version__",
|
23
|
+
)
|
@@ -611,6 +611,24 @@ class Branch(Element, Communicatable, Relational):
|
|
611
611
|
# Remove placeholders (UNDEFINED) so we don't incorrectly assign them
|
612
612
|
return cls(**{k: v for k, v in params.items() if v is not UNDEFINED})
|
613
613
|
|
614
|
+
def dump_logs(self, clear: bool = True, persist_path=None):
|
615
|
+
"""
|
616
|
+
Dumps the log to a file or clears it.
|
617
|
+
|
618
|
+
Args:
|
619
|
+
clear (bool, optional):
|
620
|
+
If `True`, clears the log after dumping.
|
621
|
+
persist_path (str, optional):
|
622
|
+
The file path to save the log to.
|
623
|
+
"""
|
624
|
+
self._log_manager.dump(clear=clear, persist_path=persist_path)
|
625
|
+
|
626
|
+
async def adump_logs(self, clear: bool = True, persist_path=None):
|
627
|
+
"""
|
628
|
+
Asynchronously dumps the log to a file or clears it.
|
629
|
+
"""
|
630
|
+
await self._log_manager.adump(clear=clear, persist_path=persist_path)
|
631
|
+
|
614
632
|
# -------------------------------------------------------------------------
|
615
633
|
# Asynchronous Operations (chat, parse, operate, etc.)
|
616
634
|
# -------------------------------------------------------------------------
|
@@ -0,0 +1 @@
|
|
1
|
+
__version__ = "0.7.4"
|
@@ -1,7 +1,7 @@
|
|
1
1
|
[project]
|
2
2
|
name = "lionagi"
|
3
|
-
version = "0.7.
|
4
|
-
description = "An
|
3
|
+
version = "0.7.4"
|
4
|
+
description = "An Intelligence Operating System."
|
5
5
|
authors = [
|
6
6
|
{ name = "HaiyangLi", email = "quantocean.li@gmail.com" },
|
7
7
|
]
|
@@ -42,6 +42,8 @@ dev = [
|
|
42
42
|
"sphinx-autobuild>=2024.10.3",
|
43
43
|
"sphinx>=8.1.3",
|
44
44
|
"furo>=2024.8.6",
|
45
|
+
"ipython>=8.31.0",
|
46
|
+
"ipykernel>=6.29.5",
|
45
47
|
]
|
46
48
|
|
47
49
|
[tool.black]
|