semantic-kernel 0.5.0.dev0__tar.gz → 0.9.0b1__tar.gz
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- {semantic_kernel-0.5.0.dev0 → semantic_kernel-0.9.0b1}/PKG-INFO +4 -3
- {semantic_kernel-0.5.0.dev0 → semantic_kernel-0.9.0b1}/pyproject.toml +16 -14
- {semantic_kernel-0.5.0.dev0 → semantic_kernel-0.9.0b1}/semantic_kernel/__init__.py +5 -15
- semantic_kernel-0.9.0b1/semantic_kernel/connectors/ai/chat_completion_client_base.py +68 -0
- {semantic_kernel-0.5.0.dev0 → semantic_kernel-0.9.0b1}/semantic_kernel/connectors/ai/embeddings/embedding_generator_base.py +3 -1
- {semantic_kernel-0.5.0.dev0 → semantic_kernel-0.9.0b1}/semantic_kernel/connectors/ai/google_palm/gp_prompt_execution_settings.py +4 -1
- {semantic_kernel-0.5.0.dev0 → semantic_kernel-0.9.0b1}/semantic_kernel/connectors/ai/google_palm/services/gp_chat_completion.py +41 -37
- {semantic_kernel-0.5.0.dev0 → semantic_kernel-0.9.0b1}/semantic_kernel/connectors/ai/google_palm/services/gp_text_completion.py +11 -22
- {semantic_kernel-0.5.0.dev0 → semantic_kernel-0.9.0b1}/semantic_kernel/connectors/ai/google_palm/services/gp_text_embedding.py +7 -8
- {semantic_kernel-0.5.0.dev0 → semantic_kernel-0.9.0b1}/semantic_kernel/connectors/ai/hugging_face/hf_prompt_execution_settings.py +3 -8
- {semantic_kernel-0.5.0.dev0 → semantic_kernel-0.9.0b1}/semantic_kernel/connectors/ai/hugging_face/services/hf_text_completion.py +32 -39
- {semantic_kernel-0.5.0.dev0 → semantic_kernel-0.9.0b1}/semantic_kernel/connectors/ai/hugging_face/services/hf_text_embedding.py +6 -10
- semantic_kernel-0.9.0b1/semantic_kernel/connectors/ai/ollama/__init__.py +0 -0
- {semantic_kernel-0.5.0.dev0 → semantic_kernel-0.9.0b1}/semantic_kernel/connectors/ai/ollama/ollama_prompt_execution_settings.py +3 -1
- {semantic_kernel-0.5.0.dev0 → semantic_kernel-0.9.0b1}/semantic_kernel/connectors/ai/ollama/services/ollama_chat_completion.py +17 -22
- {semantic_kernel-0.5.0.dev0 → semantic_kernel-0.9.0b1}/semantic_kernel/connectors/ai/ollama/services/ollama_text_completion.py +3 -6
- {semantic_kernel-0.5.0.dev0 → semantic_kernel-0.9.0b1}/semantic_kernel/connectors/ai/ollama/services/ollama_text_embedding.py +1 -2
- {semantic_kernel-0.5.0.dev0 → semantic_kernel-0.9.0b1}/semantic_kernel/connectors/ai/ollama/utils.py +2 -0
- {semantic_kernel-0.5.0.dev0 → semantic_kernel-0.9.0b1}/semantic_kernel/connectors/ai/open_ai/contents/azure_streaming_chat_message_content.py +5 -4
- {semantic_kernel-0.5.0.dev0/semantic_kernel/connectors/ai/open_ai/models/chat → semantic_kernel-0.9.0b1/semantic_kernel/connectors/ai/open_ai/contents}/function_call.py +21 -11
- semantic_kernel-0.9.0b1/semantic_kernel/connectors/ai/open_ai/contents/open_ai_chat_message_content.py +73 -0
- {semantic_kernel-0.5.0.dev0 → semantic_kernel-0.9.0b1}/semantic_kernel/connectors/ai/open_ai/contents/open_ai_streaming_chat_message_content.py +8 -7
- {semantic_kernel-0.5.0.dev0/semantic_kernel/connectors/ai/open_ai/models/chat → semantic_kernel-0.9.0b1/semantic_kernel/connectors/ai/open_ai/contents}/tool_calls.py +1 -1
- {semantic_kernel-0.5.0.dev0 → semantic_kernel-0.9.0b1}/semantic_kernel/connectors/ai/open_ai/exceptions/content_filter_ai_exception.py +12 -43
- {semantic_kernel-0.5.0.dev0 → semantic_kernel-0.9.0b1}/semantic_kernel/connectors/ai/open_ai/prompt_execution_settings/open_ai_prompt_execution_settings.py +3 -2
- {semantic_kernel-0.5.0.dev0 → semantic_kernel-0.9.0b1}/semantic_kernel/connectors/ai/open_ai/services/azure_chat_completion.py +9 -20
- {semantic_kernel-0.5.0.dev0 → semantic_kernel-0.9.0b1}/semantic_kernel/connectors/ai/open_ai/services/azure_config_base.py +14 -20
- {semantic_kernel-0.5.0.dev0 → semantic_kernel-0.9.0b1}/semantic_kernel/connectors/ai/open_ai/services/azure_text_completion.py +6 -15
- {semantic_kernel-0.5.0.dev0 → semantic_kernel-0.9.0b1}/semantic_kernel/connectors/ai/open_ai/services/azure_text_embedding.py +5 -11
- {semantic_kernel-0.5.0.dev0 → semantic_kernel-0.9.0b1}/semantic_kernel/connectors/ai/open_ai/services/open_ai_chat_completion.py +6 -11
- {semantic_kernel-0.5.0.dev0 → semantic_kernel-0.9.0b1}/semantic_kernel/connectors/ai/open_ai/services/open_ai_chat_completion_base.py +20 -16
- {semantic_kernel-0.5.0.dev0 → semantic_kernel-0.9.0b1}/semantic_kernel/connectors/ai/open_ai/services/open_ai_config_base.py +16 -24
- {semantic_kernel-0.5.0.dev0 → semantic_kernel-0.9.0b1}/semantic_kernel/connectors/ai/open_ai/services/open_ai_handler.py +9 -19
- {semantic_kernel-0.5.0.dev0 → semantic_kernel-0.9.0b1}/semantic_kernel/connectors/ai/open_ai/services/open_ai_text_completion.py +7 -7
- {semantic_kernel-0.5.0.dev0 → semantic_kernel-0.9.0b1}/semantic_kernel/connectors/ai/open_ai/services/open_ai_text_completion_base.py +4 -5
- {semantic_kernel-0.5.0.dev0 → semantic_kernel-0.9.0b1}/semantic_kernel/connectors/ai/open_ai/services/open_ai_text_embedding.py +5 -5
- semantic_kernel-0.9.0b1/semantic_kernel/connectors/ai/open_ai/utils.py +309 -0
- {semantic_kernel-0.5.0.dev0 → semantic_kernel-0.9.0b1}/semantic_kernel/connectors/ai/prompt_execution_settings.py +1 -1
- {semantic_kernel-0.5.0.dev0 → semantic_kernel-0.9.0b1}/semantic_kernel/connectors/ai/text_completion_client_base.py +5 -7
- {semantic_kernel-0.5.0.dev0 → semantic_kernel-0.9.0b1}/semantic_kernel/connectors/memory/astradb/astra_client.py +3 -2
- {semantic_kernel-0.5.0.dev0 → semantic_kernel-0.9.0b1}/semantic_kernel/connectors/memory/astradb/astradb_memory_store.py +16 -15
- {semantic_kernel-0.5.0.dev0 → semantic_kernel-0.9.0b1}/semantic_kernel/connectors/memory/azure_cognitive_search/azure_cognitive_search_memory_store.py +14 -21
- {semantic_kernel-0.5.0.dev0 → semantic_kernel-0.9.0b1}/semantic_kernel/connectors/memory/azure_cognitive_search/utils.py +6 -15
- {semantic_kernel-0.5.0.dev0 → semantic_kernel-0.9.0b1}/semantic_kernel/connectors/memory/azure_cosmosdb/azure_cosmos_db_memory_store.py +12 -18
- semantic_kernel-0.9.0b1/semantic_kernel/connectors/memory/azure_cosmosdb/cosmosdb_utils.py +34 -0
- {semantic_kernel-0.5.0.dev0 → semantic_kernel-0.9.0b1}/semantic_kernel/connectors/memory/azure_cosmosdb/mongo_vcore_store_api.py +3 -3
- {semantic_kernel-0.5.0.dev0 → semantic_kernel-0.9.0b1}/semantic_kernel/connectors/memory/chroma/chroma_memory_store.py +10 -7
- {semantic_kernel-0.5.0.dev0 → semantic_kernel-0.9.0b1}/semantic_kernel/connectors/memory/milvus/milvus_memory_store.py +22 -21
- {semantic_kernel-0.5.0.dev0 → semantic_kernel-0.9.0b1}/semantic_kernel/connectors/memory/mongodb_atlas/mongodb_atlas_memory_store.py +10 -6
- {semantic_kernel-0.5.0.dev0 → semantic_kernel-0.9.0b1}/semantic_kernel/connectors/memory/pinecone/pinecone_memory_store.py +21 -13
- {semantic_kernel-0.5.0.dev0 → semantic_kernel-0.9.0b1}/semantic_kernel/connectors/memory/postgres/postgres_memory_store.py +18 -13
- {semantic_kernel-0.5.0.dev0 → semantic_kernel-0.9.0b1}/semantic_kernel/connectors/memory/qdrant/qdrant_memory_store.py +5 -4
- {semantic_kernel-0.5.0.dev0 → semantic_kernel-0.9.0b1}/semantic_kernel/connectors/memory/redis/README.md +5 -5
- {semantic_kernel-0.5.0.dev0 → semantic_kernel-0.9.0b1}/semantic_kernel/connectors/memory/redis/redis_memory_store.py +13 -15
- {semantic_kernel-0.5.0.dev0 → semantic_kernel-0.9.0b1}/semantic_kernel/connectors/memory/usearch/usearch_memory_store.py +16 -9
- {semantic_kernel-0.5.0.dev0 → semantic_kernel-0.9.0b1}/semantic_kernel/connectors/memory/weaviate/weaviate_memory_store.py +2 -1
- {semantic_kernel-0.5.0.dev0 → semantic_kernel-0.9.0b1}/semantic_kernel/connectors/openapi/kernel_openapi.py +37 -22
- {semantic_kernel-0.5.0.dev0 → semantic_kernel-0.9.0b1}/semantic_kernel/connectors/search_engine/bing_connector.py +8 -5
- semantic_kernel-0.9.0b1/semantic_kernel/connectors/search_engine/connector.py +14 -0
- {semantic_kernel-0.5.0.dev0 → semantic_kernel-0.9.0b1}/semantic_kernel/connectors/search_engine/google_connector.py +7 -6
- semantic_kernel-0.9.0b1/semantic_kernel/contents/__init__.py +16 -0
- semantic_kernel-0.9.0b1/semantic_kernel/contents/chat_history.py +292 -0
- {semantic_kernel-0.5.0.dev0/semantic_kernel/models → semantic_kernel-0.9.0b1/semantic_kernel}/contents/chat_message_content.py +31 -3
- {semantic_kernel-0.5.0.dev0/semantic_kernel/models → semantic_kernel-0.9.0b1/semantic_kernel}/contents/kernel_content.py +1 -1
- {semantic_kernel-0.5.0.dev0/semantic_kernel/models → semantic_kernel-0.9.0b1/semantic_kernel}/contents/streaming_chat_message_content.py +9 -7
- {semantic_kernel-0.5.0.dev0/semantic_kernel/models → semantic_kernel-0.9.0b1/semantic_kernel}/contents/streaming_text_content.py +5 -4
- {semantic_kernel-0.5.0.dev0/semantic_kernel/models → semantic_kernel-0.9.0b1/semantic_kernel}/contents/text_content.py +1 -1
- {semantic_kernel-0.5.0.dev0 → semantic_kernel-0.9.0b1}/semantic_kernel/core_plugins/__init__.py +0 -2
- semantic_kernel-0.9.0b1/semantic_kernel/core_plugins/conversation_summary_plugin.py +86 -0
- {semantic_kernel-0.5.0.dev0 → semantic_kernel-0.9.0b1}/semantic_kernel/core_plugins/http_plugin.py +27 -21
- semantic_kernel-0.9.0b1/semantic_kernel/core_plugins/math_plugin.py +69 -0
- semantic_kernel-0.9.0b1/semantic_kernel/core_plugins/text_memory_plugin.py +97 -0
- {semantic_kernel-0.5.0.dev0 → semantic_kernel-0.9.0b1}/semantic_kernel/core_plugins/text_plugin.py +26 -26
- {semantic_kernel-0.5.0.dev0 → semantic_kernel-0.9.0b1}/semantic_kernel/core_plugins/time_plugin.py +4 -3
- semantic_kernel-0.9.0b1/semantic_kernel/core_plugins/wait_plugin.py +38 -0
- semantic_kernel-0.9.0b1/semantic_kernel/core_plugins/web_search_engine_plugin.py +51 -0
- semantic_kernel-0.9.0b1/semantic_kernel/events/function_invoked_event_args.py +45 -0
- semantic_kernel-0.9.0b1/semantic_kernel/events/function_invoking_event_args.py +35 -0
- semantic_kernel-0.9.0b1/semantic_kernel/events/kernel_events_args.py +42 -0
- semantic_kernel-0.9.0b1/semantic_kernel/exceptions/__init__.py +8 -0
- semantic_kernel-0.9.0b1/semantic_kernel/exceptions/content_exceptions.py +37 -0
- semantic_kernel-0.9.0b1/semantic_kernel/exceptions/function_exceptions.py +57 -0
- semantic_kernel-0.9.0b1/semantic_kernel/exceptions/kernel_exceptions.py +44 -0
- semantic_kernel-0.9.0b1/semantic_kernel/exceptions/planner_exceptions.py +38 -0
- semantic_kernel-0.9.0b1/semantic_kernel/exceptions/service_exceptions.py +58 -0
- semantic_kernel-0.9.0b1/semantic_kernel/exceptions/template_engine_exceptions.py +91 -0
- semantic_kernel-0.9.0b1/semantic_kernel/functions/__init__.py +21 -0
- semantic_kernel-0.9.0b1/semantic_kernel/functions/function_result.py +63 -0
- semantic_kernel-0.9.0b1/semantic_kernel/functions/kernel_arguments.py +35 -0
- semantic_kernel-0.9.0b1/semantic_kernel/functions/kernel_function.py +207 -0
- semantic_kernel-0.9.0b1/semantic_kernel/functions/kernel_function_decorator.py +119 -0
- semantic_kernel-0.9.0b1/semantic_kernel/functions/kernel_function_from_method.py +168 -0
- semantic_kernel-0.9.0b1/semantic_kernel/functions/kernel_function_from_prompt.py +216 -0
- semantic_kernel-0.9.0b1/semantic_kernel/functions/kernel_function_metadata.py +42 -0
- semantic_kernel-0.9.0b1/semantic_kernel/functions/kernel_parameter_metadata.py +18 -0
- {semantic_kernel-0.5.0.dev0/semantic_kernel/plugin_definition → semantic_kernel-0.9.0b1/semantic_kernel/functions}/kernel_plugin.py +26 -12
- {semantic_kernel-0.5.0.dev0/semantic_kernel/plugin_definition → semantic_kernel-0.9.0b1/semantic_kernel/functions}/kernel_plugin_collection.py +54 -49
- semantic_kernel-0.9.0b1/semantic_kernel/functions/prompt_rendering_result.py +23 -0
- semantic_kernel-0.9.0b1/semantic_kernel/kernel.py +756 -0
- {semantic_kernel-0.5.0.dev0 → semantic_kernel-0.9.0b1}/semantic_kernel/memory/semantic_text_memory_base.py +1 -1
- {semantic_kernel-0.5.0.dev0 → semantic_kernel-0.9.0b1}/semantic_kernel/memory/volatile_memory_store.py +14 -11
- semantic_kernel-0.9.0b1/semantic_kernel/planners/__init__.py +15 -0
- semantic_kernel-0.9.0b1/semantic_kernel/planners/action_planner/__init__.py +7 -0
- {semantic_kernel-0.5.0.dev0/semantic_kernel/planning → semantic_kernel-0.9.0b1/semantic_kernel/planners}/action_planner/action_planner.py +57 -94
- {semantic_kernel-0.5.0.dev0/semantic_kernel/planning → semantic_kernel-0.9.0b1/semantic_kernel/planners}/action_planner/skprompt.txt +1 -1
- {semantic_kernel-0.5.0.dev0/semantic_kernel/planning → semantic_kernel-0.9.0b1/semantic_kernel/planners}/basic_planner.py +47 -35
- semantic_kernel-0.9.0b1/semantic_kernel/planners/plan.py +377 -0
- {semantic_kernel-0.5.0.dev0/semantic_kernel/planning → semantic_kernel-0.9.0b1/semantic_kernel/planners}/sequential_planner/Plugins/SequentialPlanning/config.json +3 -1
- {semantic_kernel-0.5.0.dev0/semantic_kernel/planning → semantic_kernel-0.9.0b1/semantic_kernel/planners}/sequential_planner/__init__.py +1 -1
- semantic_kernel-0.9.0b1/semantic_kernel/planners/sequential_planner/sequential_planner.py +138 -0
- semantic_kernel-0.9.0b1/semantic_kernel/planners/sequential_planner/sequential_planner_extensions.py +128 -0
- {semantic_kernel-0.5.0.dev0/semantic_kernel/planning → semantic_kernel-0.9.0b1/semantic_kernel/planners}/sequential_planner/sequential_planner_parser.py +22 -29
- {semantic_kernel-0.5.0.dev0/semantic_kernel/planning → semantic_kernel-0.9.0b1/semantic_kernel/planners}/stepwise_planner/Plugins/StepwiseStep/config.json +9 -8
- semantic_kernel-0.9.0b1/semantic_kernel/planners/stepwise_planner/__init__.py +5 -0
- {semantic_kernel-0.5.0.dev0/semantic_kernel/planning → semantic_kernel-0.9.0b1/semantic_kernel/planners}/stepwise_planner/stepwise_planner.py +109 -97
- semantic_kernel-0.9.0b1/semantic_kernel/prompt_template/input_variable.py +13 -0
- semantic_kernel-0.9.0b1/semantic_kernel/prompt_template/kernel_prompt_template.py +165 -0
- semantic_kernel-0.9.0b1/semantic_kernel/prompt_template/prompt_template_base.py +16 -0
- semantic_kernel-0.9.0b1/semantic_kernel/prompt_template/prompt_template_config.py +116 -0
- {semantic_kernel-0.5.0.dev0 → semantic_kernel-0.9.0b1}/semantic_kernel/reliability/pass_through_without_retry.py +3 -5
- {semantic_kernel-0.5.0.dev0 → semantic_kernel-0.9.0b1}/semantic_kernel/reliability/retry_mechanism_base.py +2 -4
- {semantic_kernel-0.5.0.dev0/semantic_kernel/connectors/ai → semantic_kernel-0.9.0b1/semantic_kernel/services}/ai_service_client_base.py +16 -3
- semantic_kernel-0.9.0b1/semantic_kernel/services/ai_service_selector.py +41 -0
- semantic_kernel-0.9.0b1/semantic_kernel/template_engine/blocks/block.py +21 -0
- {semantic_kernel-0.5.0.dev0 → semantic_kernel-0.9.0b1}/semantic_kernel/template_engine/blocks/block_types.py +1 -0
- semantic_kernel-0.9.0b1/semantic_kernel/template_engine/blocks/code_block.py +177 -0
- semantic_kernel-0.9.0b1/semantic_kernel/template_engine/blocks/function_id_block.py +65 -0
- semantic_kernel-0.9.0b1/semantic_kernel/template_engine/blocks/named_arg_block.py +96 -0
- {semantic_kernel-0.5.0.dev0 → semantic_kernel-0.9.0b1}/semantic_kernel/template_engine/blocks/symbols.py +4 -1
- {semantic_kernel-0.5.0.dev0 → semantic_kernel-0.9.0b1}/semantic_kernel/template_engine/blocks/text_block.py +16 -13
- semantic_kernel-0.9.0b1/semantic_kernel/template_engine/blocks/val_block.py +73 -0
- semantic_kernel-0.9.0b1/semantic_kernel/template_engine/blocks/var_block.py +77 -0
- {semantic_kernel-0.5.0.dev0 → semantic_kernel-0.9.0b1}/semantic_kernel/template_engine/code_tokenizer.py +44 -56
- {semantic_kernel-0.5.0.dev0 → semantic_kernel-0.9.0b1}/semantic_kernel/template_engine/protocols/code_renderer.py +5 -4
- {semantic_kernel-0.5.0.dev0 → semantic_kernel-0.9.0b1}/semantic_kernel/template_engine/protocols/text_renderer.py +5 -4
- semantic_kernel-0.9.0b1/semantic_kernel/template_engine/template_tokenizer.py +163 -0
- semantic_kernel-0.9.0b1/semantic_kernel/text/function_extension.py +23 -0
- semantic_kernel-0.9.0b1/semantic_kernel/utils/chat.py +18 -0
- {semantic_kernel-0.5.0.dev0 → semantic_kernel-0.9.0b1}/semantic_kernel/utils/null_logger.py +2 -0
- {semantic_kernel-0.5.0.dev0 → semantic_kernel-0.9.0b1}/semantic_kernel/utils/settings.py +1 -1
- {semantic_kernel-0.5.0.dev0 → semantic_kernel-0.9.0b1}/semantic_kernel/utils/validation.py +19 -12
- semantic_kernel-0.5.0.dev0/semantic_kernel/connectors/ai/ai_exception.py +0 -60
- semantic_kernel-0.5.0.dev0/semantic_kernel/connectors/ai/chat_completion_client_base.py +0 -53
- semantic_kernel-0.5.0.dev0/semantic_kernel/connectors/ai/open_ai/contents/open_ai_chat_message_content.py +0 -32
- semantic_kernel-0.5.0.dev0/semantic_kernel/connectors/ai/open_ai/models/chat/open_ai_chat_message.py +0 -17
- semantic_kernel-0.5.0.dev0/semantic_kernel/connectors/ai/open_ai/semantic_functions/open_ai_chat_prompt_template.py +0 -87
- semantic_kernel-0.5.0.dev0/semantic_kernel/connectors/ai/open_ai/utils.py +0 -222
- semantic_kernel-0.5.0.dev0/semantic_kernel/connectors/memory/azure_cosmosdb/cosmosdb_utils.py +0 -12
- semantic_kernel-0.5.0.dev0/semantic_kernel/connectors/search_engine/connector.py +0 -12
- semantic_kernel-0.5.0.dev0/semantic_kernel/core_plugins/conversation_summary_plugin.py +0 -60
- semantic_kernel-0.5.0.dev0/semantic_kernel/core_plugins/file_io_plugin.py +0 -75
- semantic_kernel-0.5.0.dev0/semantic_kernel/core_plugins/math_plugin.py +0 -89
- semantic_kernel-0.5.0.dev0/semantic_kernel/core_plugins/text_memory_plugin.py +0 -135
- semantic_kernel-0.5.0.dev0/semantic_kernel/core_plugins/wait_plugin.py +0 -26
- semantic_kernel-0.5.0.dev0/semantic_kernel/core_plugins/web_search_engine_plugin.py +0 -54
- semantic_kernel-0.5.0.dev0/semantic_kernel/events/function_invoked_event_args.py +0 -16
- semantic_kernel-0.5.0.dev0/semantic_kernel/events/function_invoking_event_args.py +0 -16
- semantic_kernel-0.5.0.dev0/semantic_kernel/events/kernel_events_args.py +0 -31
- semantic_kernel-0.5.0.dev0/semantic_kernel/kernel.py +0 -871
- semantic_kernel-0.5.0.dev0/semantic_kernel/kernel_exception.py +0 -60
- semantic_kernel-0.5.0.dev0/semantic_kernel/models/chat/chat_message.py +0 -39
- semantic_kernel-0.5.0.dev0/semantic_kernel/models/contents/__init__.py +0 -16
- semantic_kernel-0.5.0.dev0/semantic_kernel/orchestration/__init__.py +0 -7
- semantic_kernel-0.5.0.dev0/semantic_kernel/orchestration/context_variables.py +0 -125
- semantic_kernel-0.5.0.dev0/semantic_kernel/orchestration/delegate_handlers.py +0 -169
- semantic_kernel-0.5.0.dev0/semantic_kernel/orchestration/delegate_inference.py +0 -295
- semantic_kernel-0.5.0.dev0/semantic_kernel/orchestration/delegate_types.py +0 -29
- semantic_kernel-0.5.0.dev0/semantic_kernel/orchestration/kernel_context.py +0 -170
- semantic_kernel-0.5.0.dev0/semantic_kernel/orchestration/kernel_function.py +0 -511
- semantic_kernel-0.5.0.dev0/semantic_kernel/orchestration/kernel_function_base.py +0 -142
- semantic_kernel-0.5.0.dev0/semantic_kernel/planning/__init__.py +0 -15
- semantic_kernel-0.5.0.dev0/semantic_kernel/planning/action_planner/__init__.py +0 -7
- semantic_kernel-0.5.0.dev0/semantic_kernel/planning/plan.py +0 -393
- semantic_kernel-0.5.0.dev0/semantic_kernel/planning/planning_exception.py +0 -46
- semantic_kernel-0.5.0.dev0/semantic_kernel/planning/sequential_planner/sequential_planner.py +0 -146
- semantic_kernel-0.5.0.dev0/semantic_kernel/planning/sequential_planner/sequential_planner_extensions.py +0 -183
- semantic_kernel-0.5.0.dev0/semantic_kernel/planning/stepwise_planner/__init__.py +0 -5
- semantic_kernel-0.5.0.dev0/semantic_kernel/plugin_definition/__init__.py +0 -11
- semantic_kernel-0.5.0.dev0/semantic_kernel/plugin_definition/function_view.py +0 -57
- semantic_kernel-0.5.0.dev0/semantic_kernel/plugin_definition/functions_view.py +0 -56
- semantic_kernel-0.5.0.dev0/semantic_kernel/plugin_definition/kernel_function_context_parameter_decorator.py +0 -34
- semantic_kernel-0.5.0.dev0/semantic_kernel/plugin_definition/kernel_function_decorator.py +0 -29
- semantic_kernel-0.5.0.dev0/semantic_kernel/plugin_definition/parameter_view.py +0 -23
- semantic_kernel-0.5.0.dev0/semantic_kernel/semantic_functions/chat_prompt_template.py +0 -162
- semantic_kernel-0.5.0.dev0/semantic_kernel/semantic_functions/prompt_template.py +0 -81
- semantic_kernel-0.5.0.dev0/semantic_kernel/semantic_functions/prompt_template_base.py +0 -20
- semantic_kernel-0.5.0.dev0/semantic_kernel/semantic_functions/prompt_template_config.py +0 -83
- semantic_kernel-0.5.0.dev0/semantic_kernel/semantic_functions/semantic_function_config.py +0 -22
- semantic_kernel-0.5.0.dev0/semantic_kernel/template_engine/blocks/block.py +0 -26
- semantic_kernel-0.5.0.dev0/semantic_kernel/template_engine/blocks/code_block.py +0 -137
- semantic_kernel-0.5.0.dev0/semantic_kernel/template_engine/blocks/function_id_block.py +0 -97
- semantic_kernel-0.5.0.dev0/semantic_kernel/template_engine/blocks/val_block.py +0 -66
- semantic_kernel-0.5.0.dev0/semantic_kernel/template_engine/blocks/var_block.py +0 -85
- semantic_kernel-0.5.0.dev0/semantic_kernel/template_engine/prompt_template_engine.py +0 -141
- semantic_kernel-0.5.0.dev0/semantic_kernel/template_engine/protocols/prompt_templating_engine.py +0 -75
- semantic_kernel-0.5.0.dev0/semantic_kernel/template_engine/template_tokenizer.py +0 -167
- semantic_kernel-0.5.0.dev0/semantic_kernel/text/function_extension.py +0 -23
- semantic_kernel-0.5.0.dev0/semantic_kernel/utils/static_property.py +0 -8
- {semantic_kernel-0.5.0.dev0 → semantic_kernel-0.9.0b1}/pip/README.md +0 -0
- {semantic_kernel-0.5.0.dev0 → semantic_kernel-0.9.0b1}/semantic_kernel/connectors/__init__.py +0 -0
- {semantic_kernel-0.5.0.dev0 → semantic_kernel-0.9.0b1}/semantic_kernel/connectors/ai/__init__.py +0 -0
- {semantic_kernel-0.5.0.dev0 → semantic_kernel-0.9.0b1}/semantic_kernel/connectors/ai/google_palm/__init__.py +0 -0
- {semantic_kernel-0.5.0.dev0 → semantic_kernel-0.9.0b1}/semantic_kernel/connectors/ai/hugging_face/__init__.py +0 -0
- {semantic_kernel-0.5.0.dev0/semantic_kernel/connectors/ai/ollama → semantic_kernel-0.9.0b1/semantic_kernel/connectors/ai/hugging_face/services}/__init__.py +0 -0
- {semantic_kernel-0.5.0.dev0 → semantic_kernel-0.9.0b1}/semantic_kernel/connectors/ai/open_ai/__init__.py +0 -0
- {semantic_kernel-0.5.0.dev0 → semantic_kernel-0.9.0b1}/semantic_kernel/connectors/ai/open_ai/const.py +0 -0
- {semantic_kernel-0.5.0.dev0 → semantic_kernel-0.9.0b1}/semantic_kernel/connectors/ai/open_ai/contents/__init__.py +0 -0
- {semantic_kernel-0.5.0.dev0 → semantic_kernel-0.9.0b1}/semantic_kernel/connectors/ai/open_ai/contents/azure_chat_message_content.py +0 -0
- {semantic_kernel-0.5.0.dev0 → semantic_kernel-0.9.0b1}/semantic_kernel/connectors/ai/open_ai/prompt_execution_settings/azure_chat_prompt_execution_settings.py +0 -0
- {semantic_kernel-0.5.0.dev0 → semantic_kernel-0.9.0b1}/semantic_kernel/connectors/ai/open_ai/services/open_ai_model_types.py +0 -0
- {semantic_kernel-0.5.0.dev0 → semantic_kernel-0.9.0b1}/semantic_kernel/connectors/ai/open_ai/services/open_ai_text_embedding_base.py +0 -0
- {semantic_kernel-0.5.0.dev0/semantic_kernel/connectors/ai/open_ai/exceptions → semantic_kernel-0.9.0b1/semantic_kernel/connectors/memory}/__init__.py +0 -0
- {semantic_kernel-0.5.0.dev0 → semantic_kernel-0.9.0b1}/semantic_kernel/connectors/memory/astradb/__init__.py +0 -0
- {semantic_kernel-0.5.0.dev0 → semantic_kernel-0.9.0b1}/semantic_kernel/connectors/memory/astradb/utils.py +0 -0
- {semantic_kernel-0.5.0.dev0 → semantic_kernel-0.9.0b1}/semantic_kernel/connectors/memory/azure_cognitive_search/__init__.py +0 -0
- {semantic_kernel-0.5.0.dev0 → semantic_kernel-0.9.0b1}/semantic_kernel/connectors/memory/azure_cosmosdb/__init__.py +0 -0
- {semantic_kernel-0.5.0.dev0 → semantic_kernel-0.9.0b1}/semantic_kernel/connectors/memory/azure_cosmosdb/azure_cosmos_db_store_api.py +0 -0
- {semantic_kernel-0.5.0.dev0 → semantic_kernel-0.9.0b1}/semantic_kernel/connectors/memory/chroma/__init__.py +0 -0
- {semantic_kernel-0.5.0.dev0 → semantic_kernel-0.9.0b1}/semantic_kernel/connectors/memory/chroma/utils.py +0 -0
- {semantic_kernel-0.5.0.dev0 → semantic_kernel-0.9.0b1}/semantic_kernel/connectors/memory/milvus/__init__.py +0 -0
- {semantic_kernel-0.5.0.dev0 → semantic_kernel-0.9.0b1}/semantic_kernel/connectors/memory/mongodb_atlas/README.md +0 -0
- {semantic_kernel-0.5.0.dev0 → semantic_kernel-0.9.0b1}/semantic_kernel/connectors/memory/mongodb_atlas/__init__.py +0 -0
- {semantic_kernel-0.5.0.dev0 → semantic_kernel-0.9.0b1}/semantic_kernel/connectors/memory/mongodb_atlas/utils.py +0 -0
- {semantic_kernel-0.5.0.dev0 → semantic_kernel-0.9.0b1}/semantic_kernel/connectors/memory/pinecone/__init__.py +0 -0
- {semantic_kernel-0.5.0.dev0 → semantic_kernel-0.9.0b1}/semantic_kernel/connectors/memory/pinecone/utils.py +0 -0
- {semantic_kernel-0.5.0.dev0 → semantic_kernel-0.9.0b1}/semantic_kernel/connectors/memory/postgres/__init__.py +0 -0
- {semantic_kernel-0.5.0.dev0 → semantic_kernel-0.9.0b1}/semantic_kernel/connectors/memory/qdrant/__init__.py +0 -0
- {semantic_kernel-0.5.0.dev0 → semantic_kernel-0.9.0b1}/semantic_kernel/connectors/memory/redis/__init__.py +0 -0
- {semantic_kernel-0.5.0.dev0 → semantic_kernel-0.9.0b1}/semantic_kernel/connectors/memory/redis/utils.py +0 -0
- {semantic_kernel-0.5.0.dev0 → semantic_kernel-0.9.0b1}/semantic_kernel/connectors/memory/usearch/__init__.py +0 -0
- {semantic_kernel-0.5.0.dev0 → semantic_kernel-0.9.0b1}/semantic_kernel/connectors/memory/weaviate/__init__.py +0 -0
- {semantic_kernel-0.5.0.dev0 → semantic_kernel-0.9.0b1}/semantic_kernel/connectors/openapi/__init__.py +0 -0
- {semantic_kernel-0.5.0.dev0 → semantic_kernel-0.9.0b1}/semantic_kernel/connectors/search_engine/__init__.py +0 -0
- {semantic_kernel-0.5.0.dev0 → semantic_kernel-0.9.0b1}/semantic_kernel/connectors/telemetry.py +0 -0
- {semantic_kernel-0.5.0.dev0/semantic_kernel/models/chat → semantic_kernel-0.9.0b1/semantic_kernel/contents}/chat_role.py +0 -0
- {semantic_kernel-0.5.0.dev0/semantic_kernel/models/chat → semantic_kernel-0.9.0b1/semantic_kernel/contents}/finish_reason.py +0 -0
- {semantic_kernel-0.5.0.dev0/semantic_kernel/models → semantic_kernel-0.9.0b1/semantic_kernel}/contents/streaming_kernel_content.py +0 -0
- {semantic_kernel-0.5.0.dev0 → semantic_kernel-0.9.0b1}/semantic_kernel/events/__init__.py +0 -0
- {semantic_kernel-0.5.0.dev0 → semantic_kernel-0.9.0b1}/semantic_kernel/kernel_pydantic.py +0 -0
- {semantic_kernel-0.5.0.dev0 → semantic_kernel-0.9.0b1}/semantic_kernel/memory/__init__.py +0 -0
- {semantic_kernel-0.5.0.dev0 → semantic_kernel-0.9.0b1}/semantic_kernel/memory/memory_query_result.py +0 -0
- {semantic_kernel-0.5.0.dev0 → semantic_kernel-0.9.0b1}/semantic_kernel/memory/memory_record.py +0 -0
- {semantic_kernel-0.5.0.dev0 → semantic_kernel-0.9.0b1}/semantic_kernel/memory/memory_store_base.py +0 -0
- {semantic_kernel-0.5.0.dev0 → semantic_kernel-0.9.0b1}/semantic_kernel/memory/null_memory.py +0 -0
- {semantic_kernel-0.5.0.dev0 → semantic_kernel-0.9.0b1}/semantic_kernel/memory/semantic_text_memory.py +0 -0
- {semantic_kernel-0.5.0.dev0/semantic_kernel/planning → semantic_kernel-0.9.0b1/semantic_kernel/planners}/action_planner/action_planner_config.py +0 -0
- {semantic_kernel-0.5.0.dev0/semantic_kernel/planning → semantic_kernel-0.9.0b1/semantic_kernel/planners}/sequential_planner/Plugins/SequentialPlanning/skprompt.txt +0 -0
- {semantic_kernel-0.5.0.dev0/semantic_kernel/planning → semantic_kernel-0.9.0b1/semantic_kernel/planners}/sequential_planner/sequential_planner_config.py +0 -0
- {semantic_kernel-0.5.0.dev0/semantic_kernel/planning → semantic_kernel-0.9.0b1/semantic_kernel/planners}/stepwise_planner/Plugins/StepwiseStep/skprompt.txt +0 -0
- {semantic_kernel-0.5.0.dev0/semantic_kernel/planning → semantic_kernel-0.9.0b1/semantic_kernel/planners}/stepwise_planner/stepwise_planner_config.py +0 -0
- {semantic_kernel-0.5.0.dev0/semantic_kernel/planning → semantic_kernel-0.9.0b1/semantic_kernel/planners}/stepwise_planner/system_step.py +0 -0
- {semantic_kernel-0.5.0.dev0/semantic_kernel/connectors/memory → semantic_kernel-0.9.0b1/semantic_kernel/services}/__init__.py +0 -0
- {semantic_kernel-0.5.0.dev0 → semantic_kernel-0.9.0b1}/semantic_kernel/template_engine/README.md +0 -0
- {semantic_kernel-0.5.0.dev0 → semantic_kernel-0.9.0b1}/semantic_kernel/text/__init__.py +0 -0
- {semantic_kernel-0.5.0.dev0 → semantic_kernel-0.9.0b1}/semantic_kernel/text/text_chunker.py +0 -0
- {semantic_kernel-0.5.0.dev0 → semantic_kernel-0.9.0b1}/semantic_kernel/utils/logging.py +0 -0
- {semantic_kernel-0.5.0.dev0 → semantic_kernel-0.9.0b1}/semantic_kernel/utils/naming.py +0 -0
|
@@ -1,6 +1,6 @@
|
|
|
1
1
|
Metadata-Version: 2.1
|
|
2
2
|
Name: semantic-kernel
|
|
3
|
-
Version: 0.
|
|
3
|
+
Version: 0.9.0b1
|
|
4
4
|
Summary: Semantic Kernel Python SDK
|
|
5
5
|
Author: Microsoft
|
|
6
6
|
Author-email: SK-Support@microsoft.com
|
|
@@ -13,13 +13,14 @@ Classifier: Programming Language :: Python :: 3.11
|
|
|
13
13
|
Classifier: Programming Language :: Python :: 3.12
|
|
14
14
|
Requires-Dist: aiofiles (>=23.1.0,<24.0.0)
|
|
15
15
|
Requires-Dist: aiohttp (>=3.8,<4.0)
|
|
16
|
-
Requires-Dist:
|
|
16
|
+
Requires-Dist: defusedxml (>=0.7.1,<0.8.0)
|
|
17
|
+
Requires-Dist: motor (>=3.3.2,<4.0.0)
|
|
17
18
|
Requires-Dist: numpy (>=1.24.2,<2.0.0)
|
|
18
19
|
Requires-Dist: openai (>=1.0)
|
|
19
20
|
Requires-Dist: openapi_core (>=0.18.0,<0.19.0)
|
|
20
21
|
Requires-Dist: prance (>=23.6.21.0,<24.0.0.0)
|
|
21
22
|
Requires-Dist: pydantic (>2)
|
|
22
|
-
Requires-Dist: python-dotenv (==1.0.
|
|
23
|
+
Requires-Dist: python-dotenv (==1.0.1)
|
|
23
24
|
Requires-Dist: regex (>=2023.6.3,<2024.0.0)
|
|
24
25
|
Description-Content-Type: text/markdown
|
|
25
26
|
|
|
@@ -1,6 +1,6 @@
|
|
|
1
1
|
[tool.poetry]
|
|
2
2
|
name = "semantic-kernel"
|
|
3
|
-
version = "0.
|
|
3
|
+
version = "0.9.0.beta1"
|
|
4
4
|
description = "Semantic Kernel Python SDK"
|
|
5
5
|
authors = ["Microsoft <SK-Support@microsoft.com>"]
|
|
6
6
|
readme = "pip/README.md"
|
|
@@ -12,32 +12,33 @@ aiohttp = "^3.8"
|
|
|
12
12
|
numpy = "^1.24.2"
|
|
13
13
|
openai = ">=1.0"
|
|
14
14
|
aiofiles = "^23.1.0"
|
|
15
|
-
python-dotenv = "1.0.
|
|
15
|
+
python-dotenv = "1.0.1"
|
|
16
16
|
regex = "^2023.6.3"
|
|
17
17
|
openapi_core = "^0.18.0"
|
|
18
18
|
prance = "^23.6.21.0"
|
|
19
19
|
pydantic = ">2"
|
|
20
|
-
motor = "^3.3.
|
|
20
|
+
motor = "^3.3.2"
|
|
21
|
+
defusedxml = "^0.7.1"
|
|
21
22
|
|
|
22
23
|
[tool.poetry.group.dev.dependencies]
|
|
23
24
|
pre-commit = "3.5.0"
|
|
24
|
-
|
|
25
|
-
|
|
26
|
-
ruff = "0.1.8"
|
|
25
|
+
black = ">=23.12.0"
|
|
26
|
+
ruff = ">=0.2.2"
|
|
27
27
|
ipykernel = "^6.21.1"
|
|
28
28
|
pytest = "7.4.3"
|
|
29
29
|
pytest-asyncio = "0.23.2"
|
|
30
30
|
snoop = "0.4.3"
|
|
31
|
-
pytest-cov = "4.1.0"
|
|
31
|
+
pytest-cov = ">=4.1.0"
|
|
32
|
+
mypy = ">=1.8.0"
|
|
32
33
|
|
|
33
34
|
[tool.poetry.group.google_palm.dependencies]
|
|
34
|
-
google-generativeai = { version = ">=0.1,<0.
|
|
35
|
+
google-generativeai = { version = ">=0.1,<0.4", markers = "python_version >= '3.9'" }
|
|
35
36
|
grpcio-status = { version = "^1.53.0", markers = "python_version >= '3.9'" }
|
|
36
37
|
|
|
37
38
|
[tool.poetry.group.hugging_face.dependencies]
|
|
38
39
|
transformers = "^4.28.1"
|
|
39
40
|
sentence-transformers = "^2.2.2"
|
|
40
|
-
torch = "2.
|
|
41
|
+
torch = "2.2.0"
|
|
41
42
|
|
|
42
43
|
[tool.poetry.group.qdrant.dependencies]
|
|
43
44
|
qdrant-client = {version = "^1.3.2", python = ">=3.8,<3.12"}
|
|
@@ -50,7 +51,7 @@ pymilvus = "2.2.16"
|
|
|
50
51
|
milvus = "2.2.16"
|
|
51
52
|
|
|
52
53
|
[tool.poetry.group.weaviate.dependencies]
|
|
53
|
-
weaviate-client = "
|
|
54
|
+
weaviate-client = ">=3.18,<5.0"
|
|
54
55
|
|
|
55
56
|
[tool.poetry.group.pinecone.dependencies]
|
|
56
57
|
pinecone-client = "^2.2.2"
|
|
@@ -68,15 +69,16 @@ azure-search-documents = {version = "11.4.0b9", allow-prereleases = true}
|
|
|
68
69
|
azure-core = "^1.28.0"
|
|
69
70
|
azure-identity = "^1.13.0"
|
|
70
71
|
|
|
72
|
+
[tool.poetry.group.tests.dependencies]
|
|
73
|
+
azure-search-documents = {version = "11.4.0b9", allow-prereleases = true}
|
|
74
|
+
azure-core = "^1.28.0"
|
|
75
|
+
|
|
71
76
|
[tool.poetry.group.usearch.dependencies]
|
|
72
77
|
usearch = "1.1.1"
|
|
73
78
|
pyarrow = ">=12.0.1,<15.0.0"
|
|
74
79
|
|
|
75
|
-
[tool.isort]
|
|
76
|
-
profile = "ruff"
|
|
77
|
-
|
|
78
80
|
[tool.ruff]
|
|
79
|
-
select = ["E", "F", "I"]
|
|
81
|
+
lint.select = ["E", "F", "I"]
|
|
80
82
|
line-length = 120
|
|
81
83
|
|
|
82
84
|
[tool.black]
|
|
@@ -1,18 +1,12 @@
|
|
|
1
1
|
# Copyright (c) Microsoft. All rights reserved.
|
|
2
2
|
|
|
3
3
|
from semantic_kernel import core_plugins, memory
|
|
4
|
+
from semantic_kernel.functions.kernel_arguments import KernelArguments
|
|
5
|
+
from semantic_kernel.functions.kernel_function import KernelFunction
|
|
4
6
|
from semantic_kernel.kernel import Kernel
|
|
5
|
-
from semantic_kernel.
|
|
6
|
-
from semantic_kernel.orchestration.kernel_context import KernelContext
|
|
7
|
-
from semantic_kernel.orchestration.kernel_function_base import KernelFunctionBase
|
|
8
|
-
from semantic_kernel.semantic_functions.chat_prompt_template import ChatPromptTemplate
|
|
9
|
-
from semantic_kernel.semantic_functions.prompt_template import PromptTemplate
|
|
10
|
-
from semantic_kernel.semantic_functions.prompt_template_config import (
|
|
7
|
+
from semantic_kernel.prompt_template.prompt_template_config import (
|
|
11
8
|
PromptTemplateConfig,
|
|
12
9
|
)
|
|
13
|
-
from semantic_kernel.semantic_functions.semantic_function_config import (
|
|
14
|
-
SemanticFunctionConfig,
|
|
15
|
-
)
|
|
16
10
|
from semantic_kernel.utils.logging import setup_logging
|
|
17
11
|
from semantic_kernel.utils.null_logger import NullLogger
|
|
18
12
|
from semantic_kernel.utils.settings import (
|
|
@@ -46,12 +40,8 @@ __all__ = [
|
|
|
46
40
|
"google_palm_settings_from_dot_env",
|
|
47
41
|
"redis_settings_from_dot_env",
|
|
48
42
|
"PromptTemplateConfig",
|
|
49
|
-
"
|
|
50
|
-
"
|
|
51
|
-
"SemanticFunctionConfig",
|
|
52
|
-
"ContextVariables",
|
|
53
|
-
"KernelFunctionBase",
|
|
54
|
-
"KernelContext",
|
|
43
|
+
"KernelArguments",
|
|
44
|
+
"KernelFunction",
|
|
55
45
|
"memory",
|
|
56
46
|
"core_plugins",
|
|
57
47
|
"setup_logging",
|
|
@@ -0,0 +1,68 @@
|
|
|
1
|
+
# Copyright (c) Microsoft. All rights reserved.
|
|
2
|
+
|
|
3
|
+
from abc import ABC, abstractmethod
|
|
4
|
+
from typing import TYPE_CHECKING, AsyncIterable, Dict, List, Optional, Type
|
|
5
|
+
|
|
6
|
+
from semantic_kernel.contents import ChatMessageContent
|
|
7
|
+
from semantic_kernel.services.ai_service_client_base import AIServiceClientBase
|
|
8
|
+
|
|
9
|
+
if TYPE_CHECKING:
|
|
10
|
+
from semantic_kernel.connectors.ai.prompt_execution_settings import PromptExecutionSettings
|
|
11
|
+
from semantic_kernel.contents import StreamingChatMessageContent
|
|
12
|
+
from semantic_kernel.contents.chat_history import ChatHistory
|
|
13
|
+
|
|
14
|
+
|
|
15
|
+
class ChatCompletionClientBase(AIServiceClientBase, ABC):
|
|
16
|
+
def get_chat_message_content_class(self) -> Type[ChatMessageContent]:
|
|
17
|
+
"""Get the chat message content types used by a class, default is ChatMessageContent."""
|
|
18
|
+
return ChatMessageContent
|
|
19
|
+
|
|
20
|
+
@abstractmethod
|
|
21
|
+
async def complete_chat(
|
|
22
|
+
self,
|
|
23
|
+
chat_history: "ChatHistory",
|
|
24
|
+
settings: "PromptExecutionSettings",
|
|
25
|
+
) -> List["ChatMessageContent"]:
|
|
26
|
+
"""
|
|
27
|
+
This is the method that is called from the kernel to get a response from a chat-optimized LLM.
|
|
28
|
+
|
|
29
|
+
Arguments:
|
|
30
|
+
chat_history {ChatHistory} -- A list of chats in a chat_history object, that can be
|
|
31
|
+
rendered into messages from system, user, assistant and tools.
|
|
32
|
+
settings {PromptExecutionSettings} -- Settings for the request.
|
|
33
|
+
|
|
34
|
+
Returns:
|
|
35
|
+
Union[str, List[str]] -- A string or list of strings representing the response(s) from the LLM.
|
|
36
|
+
"""
|
|
37
|
+
pass
|
|
38
|
+
|
|
39
|
+
@abstractmethod
|
|
40
|
+
async def complete_chat_stream(
|
|
41
|
+
self,
|
|
42
|
+
chat_history: "ChatHistory",
|
|
43
|
+
settings: "PromptExecutionSettings",
|
|
44
|
+
) -> AsyncIterable[List["StreamingChatMessageContent"]]:
|
|
45
|
+
"""
|
|
46
|
+
This is the method that is called from the kernel to get a stream response from a chat-optimized LLM.
|
|
47
|
+
|
|
48
|
+
Arguments:
|
|
49
|
+
chat_history {ChatHistory} -- A list of chat chat_history, that can be rendered into a
|
|
50
|
+
set of chat_history, from system, user, assistant and function.
|
|
51
|
+
settings {PromptExecutionSettings} -- Settings for the request.
|
|
52
|
+
|
|
53
|
+
Yields:
|
|
54
|
+
A stream representing the response(s) from the LLM.
|
|
55
|
+
"""
|
|
56
|
+
pass
|
|
57
|
+
|
|
58
|
+
def _prepare_chat_history_for_request(
|
|
59
|
+
self,
|
|
60
|
+
chat_history: "ChatHistory",
|
|
61
|
+
) -> List[Dict[str, Optional[str]]]:
|
|
62
|
+
"""
|
|
63
|
+
Prepare the chat history for a request, allowing customization of the key names for role/author,
|
|
64
|
+
and optionally overriding the role.
|
|
65
|
+
"""
|
|
66
|
+
return [
|
|
67
|
+
message.model_dump(exclude_none=True, exclude=["metadata", "encoding"]) for message in chat_history.messages
|
|
68
|
+
]
|
|
@@ -3,11 +3,13 @@
|
|
|
3
3
|
from abc import ABC, abstractmethod
|
|
4
4
|
from typing import TYPE_CHECKING, List
|
|
5
5
|
|
|
6
|
+
from semantic_kernel.services.ai_service_client_base import AIServiceClientBase
|
|
7
|
+
|
|
6
8
|
if TYPE_CHECKING:
|
|
7
9
|
from numpy import ndarray
|
|
8
10
|
|
|
9
11
|
|
|
10
|
-
class EmbeddingGeneratorBase(ABC):
|
|
12
|
+
class EmbeddingGeneratorBase(AIServiceClientBase, ABC):
|
|
11
13
|
@abstractmethod
|
|
12
14
|
async def generate_embeddings(self, texts: List[str]) -> "ndarray":
|
|
13
15
|
pass
|
|
@@ -3,6 +3,7 @@ from typing import Any, Dict, Iterable, List, Optional, Union
|
|
|
3
3
|
from pydantic import Field, model_validator
|
|
4
4
|
|
|
5
5
|
from semantic_kernel.connectors.ai.prompt_execution_settings import PromptExecutionSettings
|
|
6
|
+
from semantic_kernel.exceptions import ServiceInvalidExecutionSettingsError
|
|
6
7
|
|
|
7
8
|
# TODO: replace back with google types once pydantic issue is fixed.
|
|
8
9
|
MessagesOptions = List[Dict[str, Any]]
|
|
@@ -38,4 +39,6 @@ class GooglePalmChatPromptExecutionSettings(GooglePalmPromptExecutionSettings):
|
|
|
38
39
|
def validate_input(self):
|
|
39
40
|
if self.prompt is not None:
|
|
40
41
|
if self.messages or self.context or self.examples:
|
|
41
|
-
raise
|
|
42
|
+
raise ServiceInvalidExecutionSettingsError(
|
|
43
|
+
"Prompt cannot be used without messages, context or examples"
|
|
44
|
+
)
|
|
@@ -2,10 +2,11 @@
|
|
|
2
2
|
|
|
3
3
|
import logging
|
|
4
4
|
import sys
|
|
5
|
-
from typing import
|
|
5
|
+
from typing import Dict, List, Optional, Tuple
|
|
6
6
|
|
|
7
|
-
from semantic_kernel.
|
|
8
|
-
from semantic_kernel.
|
|
7
|
+
from semantic_kernel.contents.chat_message_content import ChatMessageContent
|
|
8
|
+
from semantic_kernel.contents.text_content import TextContent
|
|
9
|
+
from semantic_kernel.exceptions import ServiceInvalidRequestError, ServiceResponseException
|
|
9
10
|
|
|
10
11
|
if sys.version_info >= (3, 9):
|
|
11
12
|
from typing import Annotated
|
|
@@ -16,33 +17,31 @@ import google.generativeai as palm
|
|
|
16
17
|
from google.generativeai.types import ChatResponse, MessageDict
|
|
17
18
|
from pydantic import PrivateAttr, StringConstraints
|
|
18
19
|
|
|
19
|
-
from semantic_kernel.connectors.ai.
|
|
20
|
-
from semantic_kernel.connectors.ai.ai_service_client_base import AIServiceClientBase
|
|
21
|
-
from semantic_kernel.connectors.ai.chat_completion_client_base import (
|
|
22
|
-
ChatCompletionClientBase,
|
|
23
|
-
)
|
|
20
|
+
from semantic_kernel.connectors.ai.chat_completion_client_base import ChatCompletionClientBase
|
|
24
21
|
from semantic_kernel.connectors.ai.google_palm.gp_prompt_execution_settings import (
|
|
25
22
|
GooglePalmChatPromptExecutionSettings,
|
|
26
23
|
GooglePalmPromptExecutionSettings,
|
|
27
24
|
)
|
|
28
25
|
from semantic_kernel.connectors.ai.prompt_execution_settings import PromptExecutionSettings
|
|
29
|
-
from semantic_kernel.connectors.ai.text_completion_client_base import
|
|
30
|
-
|
|
31
|
-
|
|
26
|
+
from semantic_kernel.connectors.ai.text_completion_client_base import TextCompletionClientBase
|
|
27
|
+
from semantic_kernel.contents.chat_history import ChatHistory
|
|
28
|
+
from semantic_kernel.contents.chat_role import ChatRole
|
|
32
29
|
|
|
33
30
|
logger: logging.Logger = logging.getLogger(__name__)
|
|
34
31
|
|
|
32
|
+
int_to_role = {1: ChatRole.USER, 2: ChatRole.SYSTEM, 3: ChatRole.ASSISTANT, 4: ChatRole.TOOL}
|
|
33
|
+
|
|
35
34
|
|
|
36
|
-
class GooglePalmChatCompletion(ChatCompletionClientBase, TextCompletionClientBase
|
|
35
|
+
class GooglePalmChatCompletion(ChatCompletionClientBase, TextCompletionClientBase):
|
|
37
36
|
api_key: Annotated[str, StringConstraints(strip_whitespace=True, min_length=1)]
|
|
38
|
-
_message_history: Optional[
|
|
37
|
+
_message_history: Optional[ChatHistory] = PrivateAttr()
|
|
38
|
+
service_id: Optional[str] = None
|
|
39
39
|
|
|
40
40
|
def __init__(
|
|
41
41
|
self,
|
|
42
42
|
ai_model_id: str,
|
|
43
43
|
api_key: str,
|
|
44
|
-
message_history: Optional[
|
|
45
|
-
log: Optional[Any] = None,
|
|
44
|
+
message_history: Optional[ChatHistory] = None,
|
|
46
45
|
):
|
|
47
46
|
"""
|
|
48
47
|
Initializes a new instance of the GooglePalmChatCompletion class.
|
|
@@ -52,20 +51,17 @@ class GooglePalmChatCompletion(ChatCompletionClientBase, TextCompletionClientBas
|
|
|
52
51
|
https://developers.generativeai.google/models/language
|
|
53
52
|
api_key {str} -- GooglePalm API key, see
|
|
54
53
|
https://developers.generativeai.google/products/palm
|
|
55
|
-
message_history {Optional[
|
|
56
|
-
log {Optional[Any]} -- A logger to use for logging. (Optional)
|
|
54
|
+
message_history {Optional[ChatHistory]} -- The message history to use for context. (Optional)
|
|
57
55
|
"""
|
|
58
56
|
super().__init__(
|
|
59
57
|
ai_model_id=ai_model_id,
|
|
60
58
|
api_key=api_key,
|
|
61
59
|
)
|
|
62
|
-
if log:
|
|
63
|
-
logger.warning("The `log` parameter is deprecated. Please use the `logging` module instead.")
|
|
64
60
|
self._message_history = message_history
|
|
65
61
|
|
|
66
62
|
async def complete_chat(
|
|
67
63
|
self,
|
|
68
|
-
messages:
|
|
64
|
+
messages: ChatHistory,
|
|
69
65
|
settings: GooglePalmPromptExecutionSettings,
|
|
70
66
|
) -> List[ChatMessageContent]:
|
|
71
67
|
"""
|
|
@@ -79,7 +75,7 @@ class GooglePalmChatCompletion(ChatCompletionClientBase, TextCompletionClientBas
|
|
|
79
75
|
Returns:
|
|
80
76
|
List[ChatMessageContent] -- A list of ChatMessageContent objects representing the response(s) from the LLM.
|
|
81
77
|
"""
|
|
82
|
-
settings.messages =
|
|
78
|
+
settings.messages = self._prepare_chat_history_for_request(messages)
|
|
83
79
|
if not settings.ai_model_id:
|
|
84
80
|
settings.ai_model_id = self.ai_model_id
|
|
85
81
|
response = await self._send_chat_request(settings)
|
|
@@ -99,13 +95,16 @@ class GooglePalmChatCompletion(ChatCompletionClientBase, TextCompletionClientBas
|
|
|
99
95
|
Returns:
|
|
100
96
|
ChatMessageContent -- The created chat message content.
|
|
101
97
|
"""
|
|
102
|
-
metadata = {
|
|
98
|
+
metadata = {
|
|
99
|
+
"citation_metadata": candidate.get("citation_metadata"),
|
|
100
|
+
"filters": response.filters,
|
|
101
|
+
"choice_index": index,
|
|
102
|
+
}
|
|
103
103
|
return ChatMessageContent(
|
|
104
|
-
choice_index=index,
|
|
105
104
|
inner_content=response,
|
|
106
105
|
ai_model_id=self.ai_model_id,
|
|
107
106
|
metadata=metadata,
|
|
108
|
-
role=candidate.get("author"),
|
|
107
|
+
role=int_to_role[int(candidate.get("author"))], # TODO: why is author coming back as '1'?
|
|
109
108
|
content=candidate.get("content"),
|
|
110
109
|
)
|
|
111
110
|
|
|
@@ -120,7 +119,6 @@ class GooglePalmChatCompletion(ChatCompletionClientBase, TextCompletionClientBas
|
|
|
120
119
|
self,
|
|
121
120
|
prompt: str,
|
|
122
121
|
settings: GooglePalmPromptExecutionSettings,
|
|
123
|
-
**kwargs,
|
|
124
122
|
) -> List[TextContent]:
|
|
125
123
|
"""
|
|
126
124
|
This is the method that is called from the kernel to get a response from a text-optimized LLM.
|
|
@@ -132,8 +130,6 @@ class GooglePalmChatCompletion(ChatCompletionClientBase, TextCompletionClientBas
|
|
|
132
130
|
Returns:
|
|
133
131
|
List[TextContent] -- A list of TextContent objects representing the response(s) from the LLM.
|
|
134
132
|
"""
|
|
135
|
-
if kwargs.get("logger"):
|
|
136
|
-
logger.warning("The `logger` parameter is deprecated. Please use the `logging` module instead.")
|
|
137
133
|
settings.messages = [{"author": "user", "content": prompt}]
|
|
138
134
|
if not settings.ai_model_id:
|
|
139
135
|
settings.ai_model_id = self.ai_model_id
|
|
@@ -162,10 +158,7 @@ class GooglePalmChatCompletion(ChatCompletionClientBase, TextCompletionClientBas
|
|
|
162
158
|
self,
|
|
163
159
|
prompt: str,
|
|
164
160
|
settings: GooglePalmPromptExecutionSettings,
|
|
165
|
-
**kwargs,
|
|
166
161
|
):
|
|
167
|
-
if kwargs.get("logger"):
|
|
168
|
-
logger.warning("The `logger` parameter is deprecated. Please use the `logging` module instead.")
|
|
169
162
|
raise NotImplementedError("Google Palm API does not currently support streaming")
|
|
170
163
|
|
|
171
164
|
async def _send_chat_request(
|
|
@@ -206,10 +199,7 @@ class GooglePalmChatCompletion(ChatCompletionClientBase, TextCompletionClientBas
|
|
|
206
199
|
raise ValueError("The request settings cannot be `None`")
|
|
207
200
|
|
|
208
201
|
if settings.messages[-1]["author"] != "user":
|
|
209
|
-
raise
|
|
210
|
-
AIException.ErrorCodes.InvalidRequest,
|
|
211
|
-
"The last message must be from the user",
|
|
212
|
-
)
|
|
202
|
+
raise ServiceInvalidRequestError("The last message must be from the user")
|
|
213
203
|
try:
|
|
214
204
|
palm.configure(api_key=self.api_key)
|
|
215
205
|
except Exception as ex:
|
|
@@ -226,13 +216,27 @@ class GooglePalmChatCompletion(ChatCompletionClientBase, TextCompletionClientBas
|
|
|
226
216
|
)
|
|
227
217
|
self._message_history = response # Store response object for future use
|
|
228
218
|
except Exception as ex:
|
|
229
|
-
raise
|
|
230
|
-
AIException.ErrorCodes.ServiceError,
|
|
219
|
+
raise ServiceResponseException(
|
|
231
220
|
"Google PaLM service failed to complete the prompt",
|
|
232
221
|
ex,
|
|
233
|
-
)
|
|
222
|
+
) from ex
|
|
234
223
|
return response
|
|
235
224
|
|
|
236
225
|
def get_prompt_execution_settings_class(self) -> "PromptExecutionSettings":
|
|
237
226
|
"""Create a request settings object."""
|
|
238
227
|
return GooglePalmChatPromptExecutionSettings
|
|
228
|
+
|
|
229
|
+
def _prepare_chat_history_for_request(
|
|
230
|
+
self,
|
|
231
|
+
chat_history: ChatHistory,
|
|
232
|
+
) -> List[Dict[str, Optional[str]]]:
|
|
233
|
+
"""
|
|
234
|
+
Prepare the chat history for a request, allowing customization of the key names for role/author,
|
|
235
|
+
and optionally overriding the role.
|
|
236
|
+
"""
|
|
237
|
+
standard_out = super()._prepare_chat_history_for_request(chat_history)
|
|
238
|
+
for message in standard_out:
|
|
239
|
+
message["author"] = message.pop("role")
|
|
240
|
+
# The last message should always be from the user
|
|
241
|
+
standard_out[-1]["author"] = "user"
|
|
242
|
+
return standard_out
|
|
@@ -2,36 +2,32 @@
|
|
|
2
2
|
|
|
3
3
|
import logging
|
|
4
4
|
import sys
|
|
5
|
-
from typing import
|
|
5
|
+
from typing import List
|
|
6
6
|
|
|
7
|
-
from semantic_kernel.
|
|
7
|
+
from semantic_kernel.contents.text_content import TextContent
|
|
8
|
+
from semantic_kernel.exceptions import ServiceResponseException
|
|
8
9
|
|
|
9
10
|
if sys.version_info >= (3, 9):
|
|
10
11
|
from typing import Annotated
|
|
11
12
|
else:
|
|
12
13
|
from typing_extensions import Annotated
|
|
14
|
+
|
|
13
15
|
import google.generativeai as palm
|
|
14
16
|
from google.generativeai.types import Completion
|
|
15
17
|
from google.generativeai.types.text_types import TextCompletion
|
|
16
18
|
from pydantic import StringConstraints
|
|
17
19
|
|
|
18
|
-
from semantic_kernel.connectors.ai.
|
|
19
|
-
from semantic_kernel.connectors.ai.ai_service_client_base import AIServiceClientBase
|
|
20
|
-
from semantic_kernel.connectors.ai.google_palm.gp_prompt_execution_settings import (
|
|
21
|
-
GooglePalmTextPromptExecutionSettings,
|
|
22
|
-
)
|
|
20
|
+
from semantic_kernel.connectors.ai.google_palm.gp_prompt_execution_settings import GooglePalmTextPromptExecutionSettings
|
|
23
21
|
from semantic_kernel.connectors.ai.prompt_execution_settings import PromptExecutionSettings
|
|
24
|
-
from semantic_kernel.connectors.ai.text_completion_client_base import
|
|
25
|
-
TextCompletionClientBase,
|
|
26
|
-
)
|
|
22
|
+
from semantic_kernel.connectors.ai.text_completion_client_base import TextCompletionClientBase
|
|
27
23
|
|
|
28
24
|
logger: logging.Logger = logging.getLogger(__name__)
|
|
29
25
|
|
|
30
26
|
|
|
31
|
-
class GooglePalmTextCompletion(TextCompletionClientBase
|
|
27
|
+
class GooglePalmTextCompletion(TextCompletionClientBase):
|
|
32
28
|
api_key: Annotated[str, StringConstraints(strip_whitespace=True, min_length=1)]
|
|
33
29
|
|
|
34
|
-
def __init__(self, ai_model_id: str, api_key: str
|
|
30
|
+
def __init__(self, ai_model_id: str, api_key: str):
|
|
35
31
|
"""
|
|
36
32
|
Initializes a new instance of the GooglePalmTextCompletion class.
|
|
37
33
|
|
|
@@ -40,15 +36,10 @@ class GooglePalmTextCompletion(TextCompletionClientBase, AIServiceClientBase):
|
|
|
40
36
|
https://developers.generativeai.google/models/language
|
|
41
37
|
api_key {str} -- GooglePalm API key, see
|
|
42
38
|
https://developers.generativeai.google/products/palm
|
|
43
|
-
log {Optional[Any]} -- The logger instance to use. (Optional) (Deprecated)
|
|
44
39
|
"""
|
|
45
40
|
super().__init__(ai_model_id=ai_model_id, api_key=api_key)
|
|
46
|
-
if log:
|
|
47
|
-
logger.warning("The `log` parameter is deprecated. Please use the `logging` module instead.")
|
|
48
41
|
|
|
49
|
-
async def complete(
|
|
50
|
-
self, prompt: str, settings: GooglePalmTextPromptExecutionSettings, **kwargs
|
|
51
|
-
) -> List[TextContent]:
|
|
42
|
+
async def complete(self, prompt: str, settings: GooglePalmTextPromptExecutionSettings) -> List[TextContent]:
|
|
52
43
|
"""
|
|
53
44
|
This is the method that is called from the kernel to get a response from a text-optimized LLM.
|
|
54
45
|
|
|
@@ -72,11 +63,10 @@ class GooglePalmTextCompletion(TextCompletionClientBase, AIServiceClientBase):
|
|
|
72
63
|
try:
|
|
73
64
|
response = palm.generate_text(**settings.prepare_settings_dict())
|
|
74
65
|
except Exception as ex:
|
|
75
|
-
raise
|
|
76
|
-
AIException.ErrorCodes.ServiceError,
|
|
66
|
+
raise ServiceResponseException(
|
|
77
67
|
"Google PaLM service failed to complete the prompt",
|
|
78
68
|
ex,
|
|
79
|
-
)
|
|
69
|
+
) from ex
|
|
80
70
|
return [self._create_text_content(response, candidate) for candidate in response.candidates]
|
|
81
71
|
|
|
82
72
|
def _create_text_content(self, response: Completion, candidate: TextCompletion) -> TextContent:
|
|
@@ -97,7 +87,6 @@ class GooglePalmTextCompletion(TextCompletionClientBase, AIServiceClientBase):
|
|
|
97
87
|
self,
|
|
98
88
|
prompt: str,
|
|
99
89
|
settings: GooglePalmTextPromptExecutionSettings,
|
|
100
|
-
logger: Optional[Any] = None,
|
|
101
90
|
):
|
|
102
91
|
raise NotImplementedError("Google Palm API does not currently support streaming")
|
|
103
92
|
|
|
@@ -8,18 +8,18 @@ if sys.version_info >= (3, 9):
|
|
|
8
8
|
from typing import Annotated
|
|
9
9
|
else:
|
|
10
10
|
from typing_extensions import Annotated
|
|
11
|
+
|
|
11
12
|
import google.generativeai as palm
|
|
12
13
|
from numpy import array, ndarray
|
|
13
14
|
from pydantic import StringConstraints
|
|
14
15
|
|
|
15
|
-
from semantic_kernel.connectors.ai.ai_exception import AIException
|
|
16
|
-
from semantic_kernel.connectors.ai.ai_service_client_base import AIServiceClientBase
|
|
17
16
|
from semantic_kernel.connectors.ai.embeddings.embedding_generator_base import (
|
|
18
17
|
EmbeddingGeneratorBase,
|
|
19
18
|
)
|
|
19
|
+
from semantic_kernel.exceptions import ServiceInvalidAuthError, ServiceResponseException
|
|
20
20
|
|
|
21
21
|
|
|
22
|
-
class GooglePalmTextEmbedding(EmbeddingGeneratorBase
|
|
22
|
+
class GooglePalmTextEmbedding(EmbeddingGeneratorBase):
|
|
23
23
|
api_key: Annotated[str, StringConstraints(strip_whitespace=True, min_length=1)]
|
|
24
24
|
|
|
25
25
|
def __init__(self, ai_model_id: str, api_key: str) -> None:
|
|
@@ -47,10 +47,10 @@ class GooglePalmTextEmbedding(EmbeddingGeneratorBase, AIServiceClientBase):
|
|
|
47
47
|
try:
|
|
48
48
|
palm.configure(api_key=self.api_key)
|
|
49
49
|
except Exception as ex:
|
|
50
|
-
raise
|
|
50
|
+
raise ServiceInvalidAuthError(
|
|
51
51
|
"Google PaLM service failed to configure. Invalid API key provided.",
|
|
52
52
|
ex,
|
|
53
|
-
)
|
|
53
|
+
) from ex
|
|
54
54
|
embeddings = []
|
|
55
55
|
for text in texts:
|
|
56
56
|
try:
|
|
@@ -60,9 +60,8 @@ class GooglePalmTextEmbedding(EmbeddingGeneratorBase, AIServiceClientBase):
|
|
|
60
60
|
)
|
|
61
61
|
embeddings.append(array(response["embedding"]))
|
|
62
62
|
except Exception as ex:
|
|
63
|
-
raise
|
|
64
|
-
AIException.ErrorCodes.ServiceError,
|
|
63
|
+
raise ServiceResponseException(
|
|
65
64
|
"Google PaLM service failed to generate the embedding.",
|
|
66
65
|
ex,
|
|
67
|
-
)
|
|
66
|
+
) from ex
|
|
68
67
|
return array(embeddings)
|
|
@@ -26,15 +26,10 @@ class HuggingFacePromptExecutionSettings(PromptExecutionSettings):
|
|
|
26
26
|
|
|
27
27
|
def prepare_settings_dict(self, **kwargs) -> Dict[str, Any]:
|
|
28
28
|
gen_config = self.get_generation_config()
|
|
29
|
-
|
|
30
|
-
return {
|
|
31
|
-
"text_inputs": kwargs["prompt"],
|
|
32
|
-
"generation_config": gen_config,
|
|
33
|
-
"num_return_sequences": self.num_return_sequences,
|
|
34
|
-
"do_sample": self.do_sample,
|
|
35
|
-
}
|
|
36
|
-
return {
|
|
29
|
+
settings = {
|
|
37
30
|
"generation_config": gen_config,
|
|
38
31
|
"num_return_sequences": self.num_return_sequences,
|
|
39
32
|
"do_sample": self.do_sample,
|
|
40
33
|
}
|
|
34
|
+
settings.update(kwargs)
|
|
35
|
+
return settings
|