spaik-sdk 0.6.5__tar.gz → 0.6.6__tar.gz

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (238) hide show
  1. {spaik_sdk-0.6.5 → spaik_sdk-0.6.6}/PKG-INFO +5 -1
  2. {spaik_sdk-0.6.5 → spaik_sdk-0.6.6}/pyproject.toml +5 -1
  3. {spaik_sdk-0.6.5 → spaik_sdk-0.6.6}/spaik_sdk/models/factories/anthropic_factory.py +1 -1
  4. {spaik_sdk-0.6.5 → spaik_sdk-0.6.6}/spaik_sdk/models/factories/base_model_factory.py +12 -0
  5. spaik_sdk-0.6.6/spaik_sdk/models/factories/cohere_factory.py +24 -0
  6. spaik_sdk-0.6.6/spaik_sdk/models/factories/deepseek_factory.py +26 -0
  7. spaik_sdk-0.6.6/spaik_sdk/models/factories/meta_factory.py +26 -0
  8. spaik_sdk-0.6.6/spaik_sdk/models/factories/mistral_factory.py +26 -0
  9. spaik_sdk-0.6.6/spaik_sdk/models/factories/moonshot_factory.py +26 -0
  10. spaik_sdk-0.6.6/spaik_sdk/models/factories/xai_factory.py +26 -0
  11. {spaik_sdk-0.6.5 → spaik_sdk-0.6.6}/spaik_sdk/models/llm_config.py +1 -1
  12. {spaik_sdk-0.6.5 → spaik_sdk-0.6.6}/spaik_sdk/models/llm_families.py +6 -0
  13. {spaik_sdk-0.6.5 → spaik_sdk-0.6.6}/spaik_sdk/models/model_registry.py +93 -0
  14. spaik_sdk-0.6.6/spaik_sdk/models/providers/azure_provider.py +101 -0
  15. {spaik_sdk-0.6.5 → spaik_sdk-0.6.6}/spaik_sdk/models/providers/base_provider.py +16 -0
  16. spaik_sdk-0.6.6/spaik_sdk/models/providers/cohere_provider.py +23 -0
  17. spaik_sdk-0.6.6/spaik_sdk/models/providers/deepseek_provider.py +23 -0
  18. spaik_sdk-0.6.6/spaik_sdk/models/providers/mistral_provider.py +23 -0
  19. {spaik_sdk-0.6.5 → spaik_sdk-0.6.6}/spaik_sdk/models/providers/provider_type.py +17 -0
  20. spaik_sdk-0.6.6/spaik_sdk/models/providers/xai_provider.py +23 -0
  21. {spaik_sdk-0.6.5 → spaik_sdk-0.6.6}/uv.lock +771 -283
  22. spaik_sdk-0.6.5/spaik_sdk/models/providers/azure_provider.py +0 -55
  23. {spaik_sdk-0.6.5 → spaik_sdk-0.6.6}/.cursor/rules/global.mdc +0 -0
  24. {spaik_sdk-0.6.5 → spaik_sdk-0.6.6}/.cursor/rules/post_run.mdc +0 -0
  25. {spaik_sdk-0.6.5 → spaik_sdk-0.6.6}/.cursor/rules/repo_overview.mdc +0 -0
  26. {spaik_sdk-0.6.5 → spaik_sdk-0.6.6}/.cursor/rules/testing-structure.mdc +0 -0
  27. {spaik_sdk-0.6.5 → spaik_sdk-0.6.6}/.gitignore +0 -0
  28. {spaik_sdk-0.6.5 → spaik_sdk-0.6.6}/Makefile +0 -0
  29. {spaik_sdk-0.6.5 → spaik_sdk-0.6.6}/README.md +0 -0
  30. {spaik_sdk-0.6.5 → spaik_sdk-0.6.6}/__init__.py.bak +0 -0
  31. {spaik_sdk-0.6.5 → spaik_sdk-0.6.6}/env.example +0 -0
  32. {spaik_sdk-0.6.5 → spaik_sdk-0.6.6}/kill.sh +0 -0
  33. {spaik_sdk-0.6.5 → spaik_sdk-0.6.6}/py.typed +0 -0
  34. {spaik_sdk-0.6.5 → spaik_sdk-0.6.6}/setup.sh +0 -0
  35. {spaik_sdk-0.6.5 → spaik_sdk-0.6.6}/spaik_sdk/__init__.py +0 -0
  36. {spaik_sdk-0.6.5 → spaik_sdk-0.6.6}/spaik_sdk/agent/__init__.py +0 -0
  37. {spaik_sdk-0.6.5 → spaik_sdk-0.6.6}/spaik_sdk/agent/base_agent.py +0 -0
  38. {spaik_sdk-0.6.5 → spaik_sdk-0.6.6}/spaik_sdk/attachments/__init__.py +0 -0
  39. {spaik_sdk-0.6.5 → spaik_sdk-0.6.6}/spaik_sdk/attachments/builder.py +0 -0
  40. {spaik_sdk-0.6.5 → spaik_sdk-0.6.6}/spaik_sdk/attachments/file_storage_provider.py +0 -0
  41. {spaik_sdk-0.6.5 → spaik_sdk-0.6.6}/spaik_sdk/attachments/mime_types.py +0 -0
  42. {spaik_sdk-0.6.5 → spaik_sdk-0.6.6}/spaik_sdk/attachments/models.py +0 -0
  43. {spaik_sdk-0.6.5 → spaik_sdk-0.6.6}/spaik_sdk/attachments/provider_support.py +0 -0
  44. {spaik_sdk-0.6.5 → spaik_sdk-0.6.6}/spaik_sdk/attachments/storage/__init__.py +0 -0
  45. {spaik_sdk-0.6.5 → spaik_sdk-0.6.6}/spaik_sdk/attachments/storage/base_file_storage.py +0 -0
  46. {spaik_sdk-0.6.5 → spaik_sdk-0.6.6}/spaik_sdk/attachments/storage/impl/__init__.py +0 -0
  47. {spaik_sdk-0.6.5 → spaik_sdk-0.6.6}/spaik_sdk/attachments/storage/impl/local_file_storage.py +0 -0
  48. {spaik_sdk-0.6.5 → spaik_sdk-0.6.6}/spaik_sdk/audio/__init__.py +0 -0
  49. {spaik_sdk-0.6.5 → spaik_sdk-0.6.6}/spaik_sdk/audio/options.py +0 -0
  50. {spaik_sdk-0.6.5 → spaik_sdk-0.6.6}/spaik_sdk/audio/providers/__init__.py +0 -0
  51. {spaik_sdk-0.6.5 → spaik_sdk-0.6.6}/spaik_sdk/audio/providers/google_tts.py +0 -0
  52. {spaik_sdk-0.6.5 → spaik_sdk-0.6.6}/spaik_sdk/audio/providers/openai_stt.py +0 -0
  53. {spaik_sdk-0.6.5 → spaik_sdk-0.6.6}/spaik_sdk/audio/providers/openai_tts.py +0 -0
  54. {spaik_sdk-0.6.5 → spaik_sdk-0.6.6}/spaik_sdk/audio/stt.py +0 -0
  55. {spaik_sdk-0.6.5 → spaik_sdk-0.6.6}/spaik_sdk/audio/tts.py +0 -0
  56. {spaik_sdk-0.6.5 → spaik_sdk-0.6.6}/spaik_sdk/config/credentials_provider.py +0 -0
  57. {spaik_sdk-0.6.5 → spaik_sdk-0.6.6}/spaik_sdk/config/env.py +0 -0
  58. {spaik_sdk-0.6.5 → spaik_sdk-0.6.6}/spaik_sdk/config/env_credentials_provider.py +0 -0
  59. {spaik_sdk-0.6.5 → spaik_sdk-0.6.6}/spaik_sdk/config/get_credentials_provider.py +0 -0
  60. {spaik_sdk-0.6.5 → spaik_sdk-0.6.6}/spaik_sdk/image_gen/__init__.py +0 -0
  61. {spaik_sdk-0.6.5 → spaik_sdk-0.6.6}/spaik_sdk/image_gen/image_generator.py +0 -0
  62. {spaik_sdk-0.6.5 → spaik_sdk-0.6.6}/spaik_sdk/image_gen/options.py +0 -0
  63. {spaik_sdk-0.6.5 → spaik_sdk-0.6.6}/spaik_sdk/image_gen/providers/__init__.py +0 -0
  64. {spaik_sdk-0.6.5 → spaik_sdk-0.6.6}/spaik_sdk/image_gen/providers/google.py +0 -0
  65. {spaik_sdk-0.6.5 → spaik_sdk-0.6.6}/spaik_sdk/image_gen/providers/openai.py +0 -0
  66. {spaik_sdk-0.6.5 → spaik_sdk-0.6.6}/spaik_sdk/llm/__init__.py +0 -0
  67. {spaik_sdk-0.6.5 → spaik_sdk-0.6.6}/spaik_sdk/llm/cancellation_handle.py +0 -0
  68. {spaik_sdk-0.6.5 → spaik_sdk-0.6.6}/spaik_sdk/llm/consumption/__init__.py +0 -0
  69. {spaik_sdk-0.6.5 → spaik_sdk-0.6.6}/spaik_sdk/llm/consumption/consumption_estimate.py +0 -0
  70. {spaik_sdk-0.6.5 → spaik_sdk-0.6.6}/spaik_sdk/llm/consumption/consumption_estimate_builder.py +0 -0
  71. {spaik_sdk-0.6.5 → spaik_sdk-0.6.6}/spaik_sdk/llm/consumption/consumption_extractor.py +0 -0
  72. {spaik_sdk-0.6.5 → spaik_sdk-0.6.6}/spaik_sdk/llm/consumption/token_usage.py +0 -0
  73. {spaik_sdk-0.6.5 → spaik_sdk-0.6.6}/spaik_sdk/llm/converters.py +0 -0
  74. {spaik_sdk-0.6.5 → spaik_sdk-0.6.6}/spaik_sdk/llm/cost/__init__.py +0 -0
  75. {spaik_sdk-0.6.5 → spaik_sdk-0.6.6}/spaik_sdk/llm/cost/builtin_cost_provider.py +0 -0
  76. {spaik_sdk-0.6.5 → spaik_sdk-0.6.6}/spaik_sdk/llm/cost/cost_estimate.py +0 -0
  77. {spaik_sdk-0.6.5 → spaik_sdk-0.6.6}/spaik_sdk/llm/cost/cost_provider.py +0 -0
  78. {spaik_sdk-0.6.5 → spaik_sdk-0.6.6}/spaik_sdk/llm/extract_error_message.py +0 -0
  79. {spaik_sdk-0.6.5 → spaik_sdk-0.6.6}/spaik_sdk/llm/langchain_loop_manager.py +0 -0
  80. {spaik_sdk-0.6.5 → spaik_sdk-0.6.6}/spaik_sdk/llm/langchain_service.py +0 -0
  81. {spaik_sdk-0.6.5 → spaik_sdk-0.6.6}/spaik_sdk/llm/message_handler.py +0 -0
  82. {spaik_sdk-0.6.5 → spaik_sdk-0.6.6}/spaik_sdk/llm/streaming/__init__.py +0 -0
  83. {spaik_sdk-0.6.5 → spaik_sdk-0.6.6}/spaik_sdk/llm/streaming/block_manager.py +0 -0
  84. {spaik_sdk-0.6.5 → spaik_sdk-0.6.6}/spaik_sdk/llm/streaming/models.py +0 -0
  85. {spaik_sdk-0.6.5 → spaik_sdk-0.6.6}/spaik_sdk/llm/streaming/streaming_content_handler.py +0 -0
  86. {spaik_sdk-0.6.5 → spaik_sdk-0.6.6}/spaik_sdk/llm/streaming/streaming_event_handler.py +0 -0
  87. {spaik_sdk-0.6.5 → spaik_sdk-0.6.6}/spaik_sdk/llm/streaming/streaming_state_manager.py +0 -0
  88. {spaik_sdk-0.6.5 → spaik_sdk-0.6.6}/spaik_sdk/models/__init__.py +0 -0
  89. {spaik_sdk-0.6.5 → spaik_sdk-0.6.6}/spaik_sdk/models/factories/__init__.py +0 -0
  90. {spaik_sdk-0.6.5 → spaik_sdk-0.6.6}/spaik_sdk/models/factories/google_factory.py +0 -0
  91. {spaik_sdk-0.6.5 → spaik_sdk-0.6.6}/spaik_sdk/models/factories/ollama_factory.py +0 -0
  92. {spaik_sdk-0.6.5 → spaik_sdk-0.6.6}/spaik_sdk/models/factories/openai_factory.py +0 -0
  93. {spaik_sdk-0.6.5 → spaik_sdk-0.6.6}/spaik_sdk/models/llm_model.py +0 -0
  94. {spaik_sdk-0.6.5 → spaik_sdk-0.6.6}/spaik_sdk/models/llm_wrapper.py +0 -0
  95. {spaik_sdk-0.6.5 → spaik_sdk-0.6.6}/spaik_sdk/models/providers/__init__.py +0 -0
  96. {spaik_sdk-0.6.5 → spaik_sdk-0.6.6}/spaik_sdk/models/providers/anthropic_provider.py +0 -0
  97. {spaik_sdk-0.6.5 → spaik_sdk-0.6.6}/spaik_sdk/models/providers/google_provider.py +0 -0
  98. {spaik_sdk-0.6.5 → spaik_sdk-0.6.6}/spaik_sdk/models/providers/ollama_provider.py +0 -0
  99. {spaik_sdk-0.6.5 → spaik_sdk-0.6.6}/spaik_sdk/models/providers/openai_provider.py +0 -0
  100. {spaik_sdk-0.6.5 → spaik_sdk-0.6.6}/spaik_sdk/orchestration/__init__.py +0 -0
  101. {spaik_sdk-0.6.5 → spaik_sdk-0.6.6}/spaik_sdk/orchestration/base_orchestrator.py +0 -0
  102. {spaik_sdk-0.6.5 → spaik_sdk-0.6.6}/spaik_sdk/orchestration/checkpoint.py +0 -0
  103. {spaik_sdk-0.6.5 → spaik_sdk-0.6.6}/spaik_sdk/orchestration/models.py +0 -0
  104. {spaik_sdk-0.6.5 → spaik_sdk-0.6.6}/spaik_sdk/prompt/__init__.py +0 -0
  105. {spaik_sdk-0.6.5 → spaik_sdk-0.6.6}/spaik_sdk/prompt/get_prompt_loader.py +0 -0
  106. {spaik_sdk-0.6.5 → spaik_sdk-0.6.6}/spaik_sdk/prompt/local_prompt_loader.py +0 -0
  107. {spaik_sdk-0.6.5 → spaik_sdk-0.6.6}/spaik_sdk/prompt/prompt_loader.py +0 -0
  108. {spaik_sdk-0.6.5 → spaik_sdk-0.6.6}/spaik_sdk/prompt/prompt_loader_mode.py +0 -0
  109. {spaik_sdk-0.6.5 → spaik_sdk-0.6.6}/spaik_sdk/py.typed +0 -0
  110. {spaik_sdk-0.6.5 → spaik_sdk-0.6.6}/spaik_sdk/recording/__init__.py +0 -0
  111. {spaik_sdk-0.6.5 → spaik_sdk-0.6.6}/spaik_sdk/recording/base_playback.py +0 -0
  112. {spaik_sdk-0.6.5 → spaik_sdk-0.6.6}/spaik_sdk/recording/base_recorder.py +0 -0
  113. {spaik_sdk-0.6.5 → spaik_sdk-0.6.6}/spaik_sdk/recording/conditional_recorder.py +0 -0
  114. {spaik_sdk-0.6.5 → spaik_sdk-0.6.6}/spaik_sdk/recording/impl/__init__.py +0 -0
  115. {spaik_sdk-0.6.5 → spaik_sdk-0.6.6}/spaik_sdk/recording/impl/local_playback.py +0 -0
  116. {spaik_sdk-0.6.5 → spaik_sdk-0.6.6}/spaik_sdk/recording/impl/local_recorder.py +0 -0
  117. {spaik_sdk-0.6.5 → spaik_sdk-0.6.6}/spaik_sdk/recording/langchain_serializer.py +0 -0
  118. {spaik_sdk-0.6.5 → spaik_sdk-0.6.6}/spaik_sdk/server/__init__.py +0 -0
  119. {spaik_sdk-0.6.5 → spaik_sdk-0.6.6}/spaik_sdk/server/api/routers/__init__.py +0 -0
  120. {spaik_sdk-0.6.5 → spaik_sdk-0.6.6}/spaik_sdk/server/api/routers/api_builder.py +0 -0
  121. {spaik_sdk-0.6.5 → spaik_sdk-0.6.6}/spaik_sdk/server/api/routers/audio_router_factory.py +0 -0
  122. {spaik_sdk-0.6.5 → spaik_sdk-0.6.6}/spaik_sdk/server/api/routers/file_router_factory.py +0 -0
  123. {spaik_sdk-0.6.5 → spaik_sdk-0.6.6}/spaik_sdk/server/api/routers/thread_router_factory.py +0 -0
  124. {spaik_sdk-0.6.5 → spaik_sdk-0.6.6}/spaik_sdk/server/api/streaming/__init__.py +0 -0
  125. {spaik_sdk-0.6.5 → spaik_sdk-0.6.6}/spaik_sdk/server/api/streaming/format_sse_event.py +0 -0
  126. {spaik_sdk-0.6.5 → spaik_sdk-0.6.6}/spaik_sdk/server/api/streaming/negotiate_streaming_response.py +0 -0
  127. {spaik_sdk-0.6.5 → spaik_sdk-0.6.6}/spaik_sdk/server/api/streaming/streaming_negotiator.py +0 -0
  128. {spaik_sdk-0.6.5 → spaik_sdk-0.6.6}/spaik_sdk/server/authorization/__init__.py +0 -0
  129. {spaik_sdk-0.6.5 → spaik_sdk-0.6.6}/spaik_sdk/server/authorization/base_authorizer.py +0 -0
  130. {spaik_sdk-0.6.5 → spaik_sdk-0.6.6}/spaik_sdk/server/authorization/base_user.py +0 -0
  131. {spaik_sdk-0.6.5 → spaik_sdk-0.6.6}/spaik_sdk/server/authorization/dummy_authorizer.py +0 -0
  132. {spaik_sdk-0.6.5 → spaik_sdk-0.6.6}/spaik_sdk/server/job_processor/__init__.py +0 -0
  133. {spaik_sdk-0.6.5 → spaik_sdk-0.6.6}/spaik_sdk/server/job_processor/base_job_processor.py +0 -0
  134. {spaik_sdk-0.6.5 → spaik_sdk-0.6.6}/spaik_sdk/server/job_processor/thread_job_processor.py +0 -0
  135. {spaik_sdk-0.6.5 → spaik_sdk-0.6.6}/spaik_sdk/server/pubsub/__init__.py +0 -0
  136. {spaik_sdk-0.6.5 → spaik_sdk-0.6.6}/spaik_sdk/server/pubsub/cancellation_publisher.py +0 -0
  137. {spaik_sdk-0.6.5 → spaik_sdk-0.6.6}/spaik_sdk/server/pubsub/cancellation_subscriber.py +0 -0
  138. {spaik_sdk-0.6.5 → spaik_sdk-0.6.6}/spaik_sdk/server/pubsub/event_publisher.py +0 -0
  139. {spaik_sdk-0.6.5 → spaik_sdk-0.6.6}/spaik_sdk/server/pubsub/impl/__init__.py +0 -0
  140. {spaik_sdk-0.6.5 → spaik_sdk-0.6.6}/spaik_sdk/server/pubsub/impl/local_cancellation_pubsub.py +0 -0
  141. {spaik_sdk-0.6.5 → spaik_sdk-0.6.6}/spaik_sdk/server/pubsub/impl/signalr_publisher.py +0 -0
  142. {spaik_sdk-0.6.5 → spaik_sdk-0.6.6}/spaik_sdk/server/queue/__init__.py +0 -0
  143. {spaik_sdk-0.6.5 → spaik_sdk-0.6.6}/spaik_sdk/server/queue/agent_job_queue.py +0 -0
  144. {spaik_sdk-0.6.5 → spaik_sdk-0.6.6}/spaik_sdk/server/queue/impl/__init__.py +0 -0
  145. {spaik_sdk-0.6.5 → spaik_sdk-0.6.6}/spaik_sdk/server/queue/impl/azure_queue.py +0 -0
  146. {spaik_sdk-0.6.5 → spaik_sdk-0.6.6}/spaik_sdk/server/response/__init__.py +0 -0
  147. {spaik_sdk-0.6.5 → spaik_sdk-0.6.6}/spaik_sdk/server/response/agent_response_generator.py +0 -0
  148. {spaik_sdk-0.6.5 → spaik_sdk-0.6.6}/spaik_sdk/server/response/response_generator.py +0 -0
  149. {spaik_sdk-0.6.5 → spaik_sdk-0.6.6}/spaik_sdk/server/response/simple_agent_response_generator.py +0 -0
  150. {spaik_sdk-0.6.5 → spaik_sdk-0.6.6}/spaik_sdk/server/services/__init__.py +0 -0
  151. {spaik_sdk-0.6.5 → spaik_sdk-0.6.6}/spaik_sdk/server/services/thread_converters.py +0 -0
  152. {spaik_sdk-0.6.5 → spaik_sdk-0.6.6}/spaik_sdk/server/services/thread_models.py +0 -0
  153. {spaik_sdk-0.6.5 → spaik_sdk-0.6.6}/spaik_sdk/server/services/thread_service.py +0 -0
  154. {spaik_sdk-0.6.5 → spaik_sdk-0.6.6}/spaik_sdk/server/storage/__init__.py +0 -0
  155. {spaik_sdk-0.6.5 → spaik_sdk-0.6.6}/spaik_sdk/server/storage/base_thread_repository.py +0 -0
  156. {spaik_sdk-0.6.5 → spaik_sdk-0.6.6}/spaik_sdk/server/storage/impl/__init__.py +0 -0
  157. {spaik_sdk-0.6.5 → spaik_sdk-0.6.6}/spaik_sdk/server/storage/impl/in_memory_thread_repository.py +0 -0
  158. {spaik_sdk-0.6.5 → spaik_sdk-0.6.6}/spaik_sdk/server/storage/impl/local_file_thread_repository.py +0 -0
  159. {spaik_sdk-0.6.5 → spaik_sdk-0.6.6}/spaik_sdk/server/storage/thread_filter.py +0 -0
  160. {spaik_sdk-0.6.5 → spaik_sdk-0.6.6}/spaik_sdk/server/storage/thread_metadata.py +0 -0
  161. {spaik_sdk-0.6.5 → spaik_sdk-0.6.6}/spaik_sdk/thread/__init__.py +0 -0
  162. {spaik_sdk-0.6.5 → spaik_sdk-0.6.6}/spaik_sdk/thread/adapters/__init__.py +0 -0
  163. {spaik_sdk-0.6.5 → spaik_sdk-0.6.6}/spaik_sdk/thread/adapters/cli/__init__.py +0 -0
  164. {spaik_sdk-0.6.5 → spaik_sdk-0.6.6}/spaik_sdk/thread/adapters/cli/block_display.py +0 -0
  165. {spaik_sdk-0.6.5 → spaik_sdk-0.6.6}/spaik_sdk/thread/adapters/cli/display_manager.py +0 -0
  166. {spaik_sdk-0.6.5 → spaik_sdk-0.6.6}/spaik_sdk/thread/adapters/cli/live_cli.py +0 -0
  167. {spaik_sdk-0.6.5 → spaik_sdk-0.6.6}/spaik_sdk/thread/adapters/event_adapter.py +0 -0
  168. {spaik_sdk-0.6.5 → spaik_sdk-0.6.6}/spaik_sdk/thread/adapters/streaming_block_adapter.py +0 -0
  169. {spaik_sdk-0.6.5 → spaik_sdk-0.6.6}/spaik_sdk/thread/adapters/sync_adapter.py +0 -0
  170. {spaik_sdk-0.6.5 → spaik_sdk-0.6.6}/spaik_sdk/thread/models.py +0 -0
  171. {spaik_sdk-0.6.5 → spaik_sdk-0.6.6}/spaik_sdk/thread/thread_container.py +0 -0
  172. {spaik_sdk-0.6.5 → spaik_sdk-0.6.6}/spaik_sdk/tools/__init__.py +0 -0
  173. {spaik_sdk-0.6.5 → spaik_sdk-0.6.6}/spaik_sdk/tools/impl/__init__.py +0 -0
  174. {spaik_sdk-0.6.5 → spaik_sdk-0.6.6}/spaik_sdk/tools/impl/mcp_tool_provider.py +0 -0
  175. {spaik_sdk-0.6.5 → spaik_sdk-0.6.6}/spaik_sdk/tools/impl/search_tool_provider.py +0 -0
  176. {spaik_sdk-0.6.5 → spaik_sdk-0.6.6}/spaik_sdk/tools/tool_provider.py +0 -0
  177. {spaik_sdk-0.6.5 → spaik_sdk-0.6.6}/spaik_sdk/tracing/__init__.py +0 -0
  178. {spaik_sdk-0.6.5 → spaik_sdk-0.6.6}/spaik_sdk/tracing/agent_trace.py +0 -0
  179. {spaik_sdk-0.6.5 → spaik_sdk-0.6.6}/spaik_sdk/tracing/get_trace_sink.py +0 -0
  180. {spaik_sdk-0.6.5 → spaik_sdk-0.6.6}/spaik_sdk/tracing/local_trace_sink.py +0 -0
  181. {spaik_sdk-0.6.5 → spaik_sdk-0.6.6}/spaik_sdk/tracing/noop_trace_sink.py +0 -0
  182. {spaik_sdk-0.6.5 → spaik_sdk-0.6.6}/spaik_sdk/tracing/trace_sink.py +0 -0
  183. {spaik_sdk-0.6.5 → spaik_sdk-0.6.6}/spaik_sdk/tracing/trace_sink_mode.py +0 -0
  184. {spaik_sdk-0.6.5 → spaik_sdk-0.6.6}/spaik_sdk/utils/__init__.py +0 -0
  185. {spaik_sdk-0.6.5 → spaik_sdk-0.6.6}/spaik_sdk/utils/init_logger.py +0 -0
  186. {spaik_sdk-0.6.5 → spaik_sdk-0.6.6}/tests/__init__.py +0 -0
  187. {spaik_sdk-0.6.5 → spaik_sdk-0.6.6}/tests/conftest.py +0 -0
  188. {spaik_sdk-0.6.5 → spaik_sdk-0.6.6}/tests/data/recordings/test_consumption_tracking/1.jsonl +0 -0
  189. {spaik_sdk-0.6.5 → spaik_sdk-0.6.6}/tests/data/recordings/test_event_stream_basic/1.jsonl +0 -0
  190. {spaik_sdk-0.6.5 → spaik_sdk-0.6.6}/tests/data/recordings/test_get_response_text/1.jsonl +0 -0
  191. {spaik_sdk-0.6.5 → spaik_sdk-0.6.6}/tests/data/recordings/test_get_response_text_claude-3-7-sonnet-latest/1.jsonl +0 -0
  192. {spaik_sdk-0.6.5 → spaik_sdk-0.6.6}/tests/data/recordings/test_get_response_text_claude-haiku-4-5-20251001/1.jsonl +0 -0
  193. {spaik_sdk-0.6.5 → spaik_sdk-0.6.6}/tests/data/recordings/test_get_response_text_claude-opus-4-5-20251101/1.jsonl +0 -0
  194. {spaik_sdk-0.6.5 → spaik_sdk-0.6.6}/tests/data/recordings/test_get_response_text_claude-sonnet-4-20250514/1.jsonl +0 -0
  195. {spaik_sdk-0.6.5 → spaik_sdk-0.6.6}/tests/data/recordings/test_get_response_text_claude-sonnet-4-5-20250929/1.jsonl +0 -0
  196. {spaik_sdk-0.6.5 → spaik_sdk-0.6.6}/tests/data/recordings/test_get_response_text_gemini-2.5-flash/1.jsonl +0 -0
  197. {spaik_sdk-0.6.5 → spaik_sdk-0.6.6}/tests/data/recordings/test_get_response_text_gemini-3-flash-preview/1.jsonl +0 -0
  198. {spaik_sdk-0.6.5 → spaik_sdk-0.6.6}/tests/data/recordings/test_get_response_text_gemini-3-pro-preview/1.jsonl +0 -0
  199. {spaik_sdk-0.6.5 → spaik_sdk-0.6.6}/tests/data/recordings/test_get_response_text_gpt-4.1/1.jsonl +0 -0
  200. {spaik_sdk-0.6.5 → spaik_sdk-0.6.6}/tests/data/recordings/test_get_response_text_gpt-5.1/1.jsonl +0 -0
  201. {spaik_sdk-0.6.5 → spaik_sdk-0.6.6}/tests/data/recordings/test_get_response_text_gpt-5.2/1.jsonl +0 -0
  202. {spaik_sdk-0.6.5 → spaik_sdk-0.6.6}/tests/data/recordings/test_get_response_text_o4-mini/1.jsonl +0 -0
  203. {spaik_sdk-0.6.5 → spaik_sdk-0.6.6}/tests/data/recordings/test_get_response_text_with_cancellation/1.jsonl +0 -0
  204. {spaik_sdk-0.6.5 → spaik_sdk-0.6.6}/tests/data/recordings/test_get_response_with_tool_call_claude-3-7-sonnet-latest/1.jsonl +0 -0
  205. {spaik_sdk-0.6.5 → spaik_sdk-0.6.6}/tests/data/recordings/test_get_response_with_tool_call_claude-haiku-4-5-20251001/1.jsonl +0 -0
  206. {spaik_sdk-0.6.5 → spaik_sdk-0.6.6}/tests/data/recordings/test_get_response_with_tool_call_claude-opus-4-1-20250805/1.jsonl +0 -0
  207. {spaik_sdk-0.6.5 → spaik_sdk-0.6.6}/tests/data/recordings/test_get_response_with_tool_call_claude-opus-4-5-20251101/1.jsonl +0 -0
  208. {spaik_sdk-0.6.5 → spaik_sdk-0.6.6}/tests/data/recordings/test_get_response_with_tool_call_claude-sonnet-4-20250514/1.jsonl +0 -0
  209. {spaik_sdk-0.6.5 → spaik_sdk-0.6.6}/tests/data/recordings/test_get_response_with_tool_call_claude-sonnet-4-5-20250929/1.jsonl +0 -0
  210. {spaik_sdk-0.6.5 → spaik_sdk-0.6.6}/tests/data/recordings/test_get_response_with_tool_call_gemini-2.5-flash/1.jsonl +0 -0
  211. {spaik_sdk-0.6.5 → spaik_sdk-0.6.6}/tests/data/recordings/test_get_response_with_tool_call_gemini-3-flash-preview/1.jsonl +0 -0
  212. {spaik_sdk-0.6.5 → spaik_sdk-0.6.6}/tests/data/recordings/test_get_response_with_tool_call_gpt-4.1/1.jsonl +0 -0
  213. {spaik_sdk-0.6.5 → spaik_sdk-0.6.6}/tests/data/recordings/test_get_response_with_tool_call_gpt-5/1.jsonl +0 -0
  214. {spaik_sdk-0.6.5 → spaik_sdk-0.6.6}/tests/data/recordings/test_get_response_with_tool_call_gpt-5.1/1.jsonl +0 -0
  215. {spaik_sdk-0.6.5 → spaik_sdk-0.6.6}/tests/data/recordings/test_get_response_with_tool_call_gpt-5.1-codex/1.jsonl +0 -0
  216. {spaik_sdk-0.6.5 → spaik_sdk-0.6.6}/tests/data/recordings/test_get_response_with_tool_call_gpt-5.2/1.jsonl +0 -0
  217. {spaik_sdk-0.6.5 → spaik_sdk-0.6.6}/tests/data/recordings/test_get_response_with_tool_call_gpt-5.2-pro/1.jsonl +0 -0
  218. {spaik_sdk-0.6.5 → spaik_sdk-0.6.6}/tests/data/recordings/test_get_response_with_tool_call_o4-mini/1.jsonl +0 -0
  219. {spaik_sdk-0.6.5 → spaik_sdk-0.6.6}/tests/data/recordings/test_get_structured_response/1.json +0 -0
  220. {spaik_sdk-0.6.5 → spaik_sdk-0.6.6}/tests/data/recordings/test_mystery_streaming_issue/1.jsonl +0 -0
  221. {spaik_sdk-0.6.5 → spaik_sdk-0.6.6}/tests/integration/__init__.py +0 -0
  222. {spaik_sdk-0.6.5 → spaik_sdk-0.6.6}/tests/integration/test_cost_tracking_integration.py +0 -0
  223. {spaik_sdk-0.6.5 → spaik_sdk-0.6.6}/tests/integration/test_mcp_tool_provider.py +0 -0
  224. {spaik_sdk-0.6.5 → spaik_sdk-0.6.6}/tests/manual/__init__.py +0 -0
  225. {spaik_sdk-0.6.5 → spaik_sdk-0.6.6}/tests/manual/test_search.py +0 -0
  226. {spaik_sdk-0.6.5 → spaik_sdk-0.6.6}/tests/unit/__init__.py +0 -0
  227. {spaik_sdk-0.6.5 → spaik_sdk-0.6.6}/tests/unit/spaik_sdk/agent/test_base_agent.py +0 -0
  228. {spaik_sdk-0.6.5 → spaik_sdk-0.6.6}/tests/unit/spaik_sdk/llm/streaming/test_streaming_event_handler.py +0 -0
  229. {spaik_sdk-0.6.5 → spaik_sdk-0.6.6}/tests/unit/spaik_sdk/models/__init__.py +0 -0
  230. {spaik_sdk-0.6.5 → spaik_sdk-0.6.6}/tests/unit/spaik_sdk/models/factories/__init__.py +0 -0
  231. {spaik_sdk-0.6.5 → spaik_sdk-0.6.6}/tests/unit/spaik_sdk/models/factories/test_google_factory.py +0 -0
  232. {spaik_sdk-0.6.5 → spaik_sdk-0.6.6}/tests/unit/spaik_sdk/models/factories/test_openai_factory.py +0 -0
  233. {spaik_sdk-0.6.5 → spaik_sdk-0.6.6}/tests/unit/spaik_sdk/models/test_model_registry.py +0 -0
  234. {spaik_sdk-0.6.5 → spaik_sdk-0.6.6}/tests/unit/spaik_sdk/orchestration/__init__.py +0 -0
  235. {spaik_sdk-0.6.5 → spaik_sdk-0.6.6}/tests/unit/spaik_sdk/orchestration/test_base_orchestrator.py +0 -0
  236. {spaik_sdk-0.6.5 → spaik_sdk-0.6.6}/tests/unit/spaik_sdk/tools/impl/__init__.py +0 -0
  237. {spaik_sdk-0.6.5 → spaik_sdk-0.6.6}/tests/unit/spaik_sdk/tracing/__init__.py +0 -0
  238. {spaik_sdk-0.6.5 → spaik_sdk-0.6.6}/tests/unit/spaik_sdk/tracing/test_tracing.py +0 -0
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.4
2
2
  Name: spaik-sdk
3
- Version: 0.6.5
3
+ Version: 0.6.6
4
4
  Summary: Python SDK for building AI agents with multi-LLM support, streaming, and production-ready infrastructure
5
5
  Project-URL: Homepage, https://github.com/siilisolutions/spaik-sdk
6
6
  Project-URL: Repository, https://github.com/siilisolutions/spaik-sdk
@@ -24,12 +24,16 @@ Requires-Dist: dotenv>=0.9.9
24
24
  Requires-Dist: fastapi>=0.115.12
25
25
  Requires-Dist: httpx>=0.25.0
26
26
  Requires-Dist: langchain-anthropic>=1.3.0
27
+ Requires-Dist: langchain-cohere>=0.5.0
27
28
  Requires-Dist: langchain-core>=1.2.0
29
+ Requires-Dist: langchain-deepseek>=1.0.0
28
30
  Requires-Dist: langchain-google-genai>=4.0.0
29
31
  Requires-Dist: langchain-mcp-adapters>=0.2.1
32
+ Requires-Dist: langchain-mistralai>=0.2.0
30
33
  Requires-Dist: langchain-ollama>=0.3.0
31
34
  Requires-Dist: langchain-openai>=1.1.0
32
35
  Requires-Dist: langchain-tavily>=0.2.15
36
+ Requires-Dist: langchain-xai>=0.2.0
33
37
  Requires-Dist: langchain>=1.2.0
34
38
  Requires-Dist: langgraph>=1.0.0
35
39
  Requires-Dist: mcp>=1.9.2
@@ -1,6 +1,6 @@
1
1
  [project]
2
2
  name = "spaik-sdk"
3
- version = "0.6.5"
3
+ version = "0.6.6"
4
4
  description = "Python SDK for building AI agents with multi-LLM support, streaming, and production-ready infrastructure"
5
5
  readme = "README.md"
6
6
  license = "MIT"
@@ -28,10 +28,14 @@ dependencies = [
28
28
  "cryptography>=41.0.0", # Required for JWT RSA operations
29
29
  "langchain>=1.2.0",
30
30
  "langchain-anthropic>=1.3.0",
31
+ "langchain-cohere>=0.5.0",
31
32
  "langchain-core>=1.2.0",
33
+ "langchain-deepseek>=1.0.0",
32
34
  "langchain-google-genai>=4.0.0",
35
+ "langchain-mistralai>=0.2.0",
33
36
  "langchain-openai>=1.1.0",
34
37
  "langchain-ollama>=0.3.0",
38
+ "langchain-xai>=0.2.0",
35
39
  "langgraph>=1.0.0",
36
40
  "pandas>=2.0.3",
37
41
  "pandas-stubs",
@@ -21,7 +21,7 @@ class AnthropicModelFactory(BaseModelFactory):
21
21
  model_config: Dict[str, Any] = {
22
22
  "model_name": config.model.name,
23
23
  "streaming": config.streaming,
24
- "max_tokens": config.max_output_tokens,
24
+ "max_tokens": config.max_output_tokens if config.max_output_tokens is not None else 8192,
25
25
  }
26
26
 
27
27
  # Handle thinking mode via model_kwargs for LangChain compatibility
@@ -54,15 +54,27 @@ class BaseModelFactory(ABC):
54
54
  """Factory method to create appropriate factory instance."""
55
55
 
56
56
  from spaik_sdk.models.factories.anthropic_factory import AnthropicModelFactory
57
+ from spaik_sdk.models.factories.cohere_factory import CohereModelFactory
58
+ from spaik_sdk.models.factories.deepseek_factory import DeepSeekModelFactory
57
59
  from spaik_sdk.models.factories.google_factory import GoogleModelFactory
60
+ from spaik_sdk.models.factories.meta_factory import MetaModelFactory
61
+ from spaik_sdk.models.factories.mistral_factory import MistralModelFactory
62
+ from spaik_sdk.models.factories.moonshot_factory import MoonshotModelFactory
58
63
  from spaik_sdk.models.factories.ollama_factory import OllamaModelFactory
59
64
  from spaik_sdk.models.factories.openai_factory import OpenAIModelFactory
65
+ from spaik_sdk.models.factories.xai_factory import XAIModelFactory
60
66
 
61
67
  factories = [
62
68
  AnthropicModelFactory(),
63
69
  OpenAIModelFactory(),
64
70
  GoogleModelFactory(),
65
71
  OllamaModelFactory(),
72
+ DeepSeekModelFactory(),
73
+ XAIModelFactory(),
74
+ CohereModelFactory(),
75
+ MistralModelFactory(),
76
+ MetaModelFactory(),
77
+ MoonshotModelFactory(),
66
78
  ]
67
79
  for factory in factories:
68
80
  if factory.supports_model_config(config):
@@ -0,0 +1,24 @@
1
+ from typing import Any, Dict, Optional
2
+
3
+ from spaik_sdk.models.factories.base_model_factory import BaseModelFactory
4
+ from spaik_sdk.models.llm_config import LLMConfig
5
+ from spaik_sdk.models.llm_families import LLMFamilies
6
+ from spaik_sdk.models.llm_model import LLMModel
7
+ from spaik_sdk.models.model_registry import ModelRegistry
8
+
9
+
10
+ class CohereModelFactory(BaseModelFactory):
11
+ MODELS = ModelRegistry.get_by_family(LLMFamilies.COHERE)
12
+
13
+ def supports_model(self, model: LLMModel) -> bool:
14
+ return model in CohereModelFactory.MODELS
15
+
16
+ def get_cache_control(self, config: LLMConfig) -> Optional[Dict[str, Any]]:
17
+ return None
18
+
19
+ def get_model_specific_config(self, config: LLMConfig) -> Dict[str, Any]:
20
+ model_config: Dict[str, Any] = {
21
+ "model": config.model.name,
22
+ "temperature": config.temperature,
23
+ }
24
+ return model_config
@@ -0,0 +1,26 @@
1
+ from typing import Any, Dict, Optional
2
+
3
+ from spaik_sdk.models.factories.base_model_factory import BaseModelFactory
4
+ from spaik_sdk.models.llm_config import LLMConfig
5
+ from spaik_sdk.models.llm_families import LLMFamilies
6
+ from spaik_sdk.models.llm_model import LLMModel
7
+ from spaik_sdk.models.model_registry import ModelRegistry
8
+
9
+
10
+ class DeepSeekModelFactory(BaseModelFactory):
11
+ MODELS = ModelRegistry.get_by_family(LLMFamilies.DEEPSEEK)
12
+
13
+ def supports_model(self, model: LLMModel) -> bool:
14
+ return model in DeepSeekModelFactory.MODELS
15
+
16
+ def get_cache_control(self, config: LLMConfig) -> Optional[Dict[str, Any]]:
17
+ return None
18
+
19
+ def get_model_specific_config(self, config: LLMConfig) -> Dict[str, Any]:
20
+ model_config: Dict[str, Any] = {
21
+ "model": config.model.name,
22
+ "temperature": config.temperature,
23
+ }
24
+ if config.max_output_tokens is not None:
25
+ model_config["max_tokens"] = config.max_output_tokens
26
+ return model_config
@@ -0,0 +1,26 @@
1
+ from typing import Any, Dict, Optional
2
+
3
+ from spaik_sdk.models.factories.base_model_factory import BaseModelFactory
4
+ from spaik_sdk.models.llm_config import LLMConfig
5
+ from spaik_sdk.models.llm_families import LLMFamilies
6
+ from spaik_sdk.models.llm_model import LLMModel
7
+ from spaik_sdk.models.model_registry import ModelRegistry
8
+
9
+
10
+ class MetaModelFactory(BaseModelFactory):
11
+ MODELS = ModelRegistry.get_by_family(LLMFamilies.META)
12
+
13
+ def supports_model(self, model: LLMModel) -> bool:
14
+ return model in MetaModelFactory.MODELS
15
+
16
+ def get_cache_control(self, config: LLMConfig) -> Optional[Dict[str, Any]]:
17
+ return None
18
+
19
+ def get_model_specific_config(self, config: LLMConfig) -> Dict[str, Any]:
20
+ model_config: Dict[str, Any] = {
21
+ "model": config.model.name,
22
+ "temperature": config.temperature,
23
+ }
24
+ if config.max_output_tokens is not None:
25
+ model_config["max_tokens"] = config.max_output_tokens
26
+ return model_config
@@ -0,0 +1,26 @@
1
+ from typing import Any, Dict, Optional
2
+
3
+ from spaik_sdk.models.factories.base_model_factory import BaseModelFactory
4
+ from spaik_sdk.models.llm_config import LLMConfig
5
+ from spaik_sdk.models.llm_families import LLMFamilies
6
+ from spaik_sdk.models.llm_model import LLMModel
7
+ from spaik_sdk.models.model_registry import ModelRegistry
8
+
9
+
10
+ class MistralModelFactory(BaseModelFactory):
11
+ MODELS = ModelRegistry.get_by_family(LLMFamilies.MISTRAL)
12
+
13
+ def supports_model(self, model: LLMModel) -> bool:
14
+ return model in MistralModelFactory.MODELS
15
+
16
+ def get_cache_control(self, config: LLMConfig) -> Optional[Dict[str, Any]]:
17
+ return None
18
+
19
+ def get_model_specific_config(self, config: LLMConfig) -> Dict[str, Any]:
20
+ model_config: Dict[str, Any] = {
21
+ "model": config.model.name,
22
+ "temperature": config.temperature,
23
+ }
24
+ if config.max_output_tokens is not None:
25
+ model_config["max_tokens"] = config.max_output_tokens
26
+ return model_config
@@ -0,0 +1,26 @@
1
+ from typing import Any, Dict, Optional
2
+
3
+ from spaik_sdk.models.factories.base_model_factory import BaseModelFactory
4
+ from spaik_sdk.models.llm_config import LLMConfig
5
+ from spaik_sdk.models.llm_families import LLMFamilies
6
+ from spaik_sdk.models.llm_model import LLMModel
7
+ from spaik_sdk.models.model_registry import ModelRegistry
8
+
9
+
10
+ class MoonshotModelFactory(BaseModelFactory):
11
+ MODELS = ModelRegistry.get_by_family(LLMFamilies.MOONSHOT)
12
+
13
+ def supports_model(self, model: LLMModel) -> bool:
14
+ return model in MoonshotModelFactory.MODELS
15
+
16
+ def get_cache_control(self, config: LLMConfig) -> Optional[Dict[str, Any]]:
17
+ return None
18
+
19
+ def get_model_specific_config(self, config: LLMConfig) -> Dict[str, Any]:
20
+ model_config: Dict[str, Any] = {
21
+ "model": config.model.name,
22
+ "temperature": config.temperature,
23
+ }
24
+ if config.max_output_tokens is not None:
25
+ model_config["max_tokens"] = config.max_output_tokens
26
+ return model_config
@@ -0,0 +1,26 @@
1
+ from typing import Any, Dict, Optional
2
+
3
+ from spaik_sdk.models.factories.base_model_factory import BaseModelFactory
4
+ from spaik_sdk.models.llm_config import LLMConfig
5
+ from spaik_sdk.models.llm_families import LLMFamilies
6
+ from spaik_sdk.models.llm_model import LLMModel
7
+ from spaik_sdk.models.model_registry import ModelRegistry
8
+
9
+
10
+ class XAIModelFactory(BaseModelFactory):
11
+ MODELS = ModelRegistry.get_by_family(LLMFamilies.XAI)
12
+
13
+ def supports_model(self, model: LLMModel) -> bool:
14
+ return model in XAIModelFactory.MODELS
15
+
16
+ def get_cache_control(self, config: LLMConfig) -> Optional[Dict[str, Any]]:
17
+ return None
18
+
19
+ def get_model_specific_config(self, config: LLMConfig) -> Dict[str, Any]:
20
+ model_config: Dict[str, Any] = {
21
+ "model": config.model.name,
22
+ "temperature": config.temperature,
23
+ }
24
+ if config.max_output_tokens is not None:
25
+ model_config["max_tokens"] = config.max_output_tokens
26
+ return model_config
@@ -16,7 +16,7 @@ class LLMConfig:
16
16
  streaming: bool = True
17
17
  reasoning_summary: str = "detailed" # Options: "auto", "concise", "detailed", None
18
18
  reasoning_effort: str = "medium" # Options: "low", "medium", "high"
19
- max_output_tokens: int = 8192
19
+ max_output_tokens: Optional[int] = None
20
20
  reasoning_budget_tokens: int = 4096
21
21
  temperature: float = 0.1
22
22
  structured_response: bool = False
@@ -5,3 +5,9 @@ class LLMFamilies:
5
5
  OPENAI = "openai"
6
6
  GOOGLE = "google"
7
7
  OLLAMA = "ollama"
8
+ DEEPSEEK = "deepseek"
9
+ MISTRAL = "mistral"
10
+ META = "meta"
11
+ COHERE = "cohere"
12
+ XAI = "xai"
13
+ MOONSHOT = "moonshot"
@@ -22,17 +22,32 @@ class ModelRegistry:
22
22
 
23
23
  # OpenAI models
24
24
  GPT_4_1 = LLMModel(family=LLMFamilies.OPENAI, name="gpt-4.1", reasoning=False, prompt_caching=True)
25
+ GPT_4_1_MINI = LLMModel(family=LLMFamilies.OPENAI, name="gpt-4.1-mini", reasoning=False, prompt_caching=True)
26
+ GPT_4_1_NANO = LLMModel(family=LLMFamilies.OPENAI, name="gpt-4.1-nano", reasoning=False, prompt_caching=True)
25
27
  GPT_4O = LLMModel(family=LLMFamilies.OPENAI, name="gpt-4o", reasoning=False, prompt_caching=True)
28
+ GPT_4O_MINI = LLMModel(family=LLMFamilies.OPENAI, name="gpt-4o-mini", reasoning=False, prompt_caching=True)
29
+ O1 = LLMModel(family=LLMFamilies.OPENAI, name="o1")
30
+ O1_MINI = LLMModel(family=LLMFamilies.OPENAI, name="o1-mini")
31
+ O3 = LLMModel(family=LLMFamilies.OPENAI, name="o3")
32
+ O3_MINI = LLMModel(family=LLMFamilies.OPENAI, name="o3-mini")
33
+ O3_PRO = LLMModel(family=LLMFamilies.OPENAI, name="o3-pro")
26
34
  O4_MINI = LLMModel(family=LLMFamilies.OPENAI, name="o4-mini")
27
35
  O4_MINI_APRIL_2025 = LLMModel(family=LLMFamilies.OPENAI, name="o4-mini-2025-04-16")
36
+ CODEX_MINI = LLMModel(family=LLMFamilies.OPENAI, name="codex-mini")
28
37
  GPT_5 = LLMModel(family=LLMFamilies.OPENAI, name="gpt-5", reasoning=True, reasoning_min_effort="minimal", prompt_caching=True)
29
38
  GPT_5_MINI = LLMModel(family=LLMFamilies.OPENAI, name="gpt-5-mini", reasoning=True, reasoning_min_effort="minimal", prompt_caching=True)
30
39
  GPT_5_NANO = LLMModel(family=LLMFamilies.OPENAI, name="gpt-5-nano", reasoning=True, reasoning_min_effort="minimal", prompt_caching=True)
40
+ GPT_5_CHAT = LLMModel(family=LLMFamilies.OPENAI, name="gpt-5-chat", reasoning=False, prompt_caching=True)
41
+ GPT_5_CODEX = LLMModel(family=LLMFamilies.OPENAI, name="gpt-5-codex", reasoning=True, prompt_caching=True)
42
+ GPT_5_PRO = LLMModel(family=LLMFamilies.OPENAI, name="gpt-5-pro", reasoning=True, prompt_caching=True)
31
43
  GPT_5_1 = LLMModel(family=LLMFamilies.OPENAI, name="gpt-5.1", reasoning=True, prompt_caching=True)
44
+ GPT_5_1_CHAT = LLMModel(family=LLMFamilies.OPENAI, name="gpt-5.1-chat", reasoning=True, prompt_caching=True)
32
45
  GPT_5_1_CODEX = LLMModel(family=LLMFamilies.OPENAI, name="gpt-5.1-codex", reasoning=True, prompt_caching=True)
33
46
  GPT_5_1_CODEX_MINI = LLMModel(family=LLMFamilies.OPENAI, name="gpt-5.1-codex-mini", reasoning=True, prompt_caching=True)
34
47
  GPT_5_1_CODEX_MAX = LLMModel(family=LLMFamilies.OPENAI, name="gpt-5.1-codex-max", reasoning=True, prompt_caching=True)
35
48
  GPT_5_2 = LLMModel(family=LLMFamilies.OPENAI, name="gpt-5.2", reasoning=True, prompt_caching=True)
49
+ GPT_5_2_CHAT = LLMModel(family=LLMFamilies.OPENAI, name="gpt-5.2-chat", reasoning=False, prompt_caching=True)
50
+ GPT_5_2_CODEX = LLMModel(family=LLMFamilies.OPENAI, name="gpt-5.2-codex", reasoning=True, prompt_caching=True)
36
51
  GPT_5_2_PRO = LLMModel(family=LLMFamilies.OPENAI, name="gpt-5.2-pro", reasoning=True, prompt_caching=True)
37
52
 
38
53
  # Google models
@@ -43,6 +58,35 @@ class ModelRegistry:
43
58
  GEMINI_3_FLASH = LLMModel(family=LLMFamilies.GOOGLE, name="gemini-3-flash-preview", prompt_caching=True)
44
59
  GEMINI_3_PRO = LLMModel(family=LLMFamilies.GOOGLE, name="gemini-3-pro-preview", prompt_caching=True)
45
60
 
61
+ # DeepSeek models
62
+ DEEPSEEK_V3 = LLMModel(family=LLMFamilies.DEEPSEEK, name="DeepSeek-V3-0324")
63
+ DEEPSEEK_V3_1 = LLMModel(family=LLMFamilies.DEEPSEEK, name="DeepSeek-V3.1")
64
+ DEEPSEEK_V3_2 = LLMModel(family=LLMFamilies.DEEPSEEK, name="DeepSeek-V3.2")
65
+ DEEPSEEK_V3_2_SPECIALE = LLMModel(family=LLMFamilies.DEEPSEEK, name="DeepSeek-V3.2-Speciale")
66
+ DEEPSEEK_R1 = LLMModel(family=LLMFamilies.DEEPSEEK, name="DeepSeek-R1")
67
+ DEEPSEEK_R1_0528 = LLMModel(family=LLMFamilies.DEEPSEEK, name="DeepSeek-R1-0528")
68
+
69
+ # Mistral models
70
+ MISTRAL_LARGE_3 = LLMModel(family=LLMFamilies.MISTRAL, name="Mistral-Large-3", reasoning=False)
71
+
72
+ # Meta Llama models
73
+ LLAMA_4_MAVERICK = LLMModel(family=LLMFamilies.META, name="Llama-4-Maverick-17B-128E-Instruct-FP8", reasoning=False)
74
+ LLAMA_3_3_70B = LLMModel(family=LLMFamilies.META, name="Llama-3.3-70B-Instruct", reasoning=False)
75
+
76
+ # Cohere models
77
+ COHERE_COMMAND_A = LLMModel(family=LLMFamilies.COHERE, name="Cohere-command-a", reasoning=False)
78
+
79
+ # xAI Grok models
80
+ GROK_3 = LLMModel(family=LLMFamilies.XAI, name="grok-3")
81
+ GROK_3_MINI = LLMModel(family=LLMFamilies.XAI, name="grok-3-mini")
82
+ GROK_4 = LLMModel(family=LLMFamilies.XAI, name="grok-4")
83
+ GROK_4_FAST_REASONING = LLMModel(family=LLMFamilies.XAI, name="grok-4-fast-reasoning")
84
+ GROK_4_FAST_NON_REASONING = LLMModel(family=LLMFamilies.XAI, name="grok-4-fast-non-reasoning", reasoning=False)
85
+ GROK_CODE_FAST_1 = LLMModel(family=LLMFamilies.XAI, name="grok-code-fast-1")
86
+
87
+ # Moonshot AI models
88
+ KIMI_K2_THINKING = LLMModel(family=LLMFamilies.MOONSHOT, name="Kimi-K2-Thinking")
89
+
46
90
  # Registry for custom models
47
91
  _custom_models: Set[LLMModel] = set()
48
92
 
@@ -82,6 +126,7 @@ class ModelRegistry:
82
126
  def _get_aliases(cls) -> Dict[str, LLMModel]:
83
127
  """Get aliases mapping."""
84
128
  return {
129
+ # Claude aliases
85
130
  "sonnet": cls.CLAUDE_4_SONNET,
86
131
  "sonnet 3.7": cls.CLAUDE_3_7_SONNET,
87
132
  "sonnet 4.5": cls.CLAUDE_4_5_SONNET,
@@ -100,25 +145,73 @@ class ModelRegistry:
100
145
  "claude 4.5 sonnet": cls.CLAUDE_4_5_SONNET,
101
146
  "claude 4.5 haiku": cls.CLAUDE_4_5_HAIKU,
102
147
  "claude 4 opus": cls.CLAUDE_4_OPUS,
148
+ # OpenAI aliases
149
+ "o1": cls.O1,
150
+ "o1 mini": cls.O1_MINI,
151
+ "o3": cls.O3,
152
+ "o3 mini": cls.O3_MINI,
153
+ "o3 pro": cls.O3_PRO,
103
154
  "o4 mini": cls.O4_MINI,
104
155
  "o4 mini 2025-04-16": cls.O4_MINI_APRIL_2025,
156
+ "codex mini": cls.CODEX_MINI,
105
157
  "gpt 4.1": cls.GPT_4_1,
158
+ "gpt 4.1 mini": cls.GPT_4_1_MINI,
159
+ "gpt 4.1 nano": cls.GPT_4_1_NANO,
106
160
  "gpt 4o": cls.GPT_4O,
161
+ "gpt 4o mini": cls.GPT_4O_MINI,
107
162
  "gpt 5": cls.GPT_5,
108
163
  "gpt 5 mini": cls.GPT_5_MINI,
109
164
  "gpt 5 nano": cls.GPT_5_NANO,
165
+ "gpt 5 chat": cls.GPT_5_CHAT,
166
+ "gpt 5 codex": cls.GPT_5_CODEX,
167
+ "gpt 5 pro": cls.GPT_5_PRO,
110
168
  "gpt 5.1": cls.GPT_5_1,
169
+ "gpt 5.1 chat": cls.GPT_5_1_CHAT,
111
170
  "gpt 5.1 codex": cls.GPT_5_1_CODEX,
112
171
  "gpt 5.1 codex mini": cls.GPT_5_1_CODEX_MINI,
113
172
  "gpt 5.1 codex max": cls.GPT_5_1_CODEX_MAX,
114
173
  "gpt 5.2": cls.GPT_5_2,
174
+ "gpt 5.2 chat": cls.GPT_5_2_CHAT,
175
+ "gpt 5.2 codex": cls.GPT_5_2_CODEX,
115
176
  "gpt 5.2 pro": cls.GPT_5_2_PRO,
177
+ # Gemini aliases
116
178
  "gemini 2.5 flash": cls.GEMINI_2_5_FLASH,
117
179
  "gemini 2.5 pro": cls.GEMINI_2_5_PRO,
118
180
  "gemini 3 flash": cls.GEMINI_3_FLASH,
119
181
  "gemini 3.0 flash": cls.GEMINI_3_FLASH,
120
182
  "gemini 3 pro": cls.GEMINI_3_PRO,
121
183
  "gemini 3.0 pro": cls.GEMINI_3_PRO,
184
+ # DeepSeek aliases
185
+ "deepseek": cls.DEEPSEEK_V3_2,
186
+ "deepseek v3": cls.DEEPSEEK_V3,
187
+ "deepseek v3.1": cls.DEEPSEEK_V3_1,
188
+ "deepseek v3.2": cls.DEEPSEEK_V3_2,
189
+ "deepseek v3.2 speciale": cls.DEEPSEEK_V3_2_SPECIALE,
190
+ "deepseek r1": cls.DEEPSEEK_R1,
191
+ # Mistral aliases
192
+ "mistral": cls.MISTRAL_LARGE_3,
193
+ "mistral large": cls.MISTRAL_LARGE_3,
194
+ "mistral large 3": cls.MISTRAL_LARGE_3,
195
+ # Meta Llama aliases
196
+ "llama": cls.LLAMA_3_3_70B,
197
+ "llama 3.3": cls.LLAMA_3_3_70B,
198
+ "llama 3.3 70b": cls.LLAMA_3_3_70B,
199
+ "llama 4": cls.LLAMA_4_MAVERICK,
200
+ "llama 4 maverick": cls.LLAMA_4_MAVERICK,
201
+ # Cohere aliases
202
+ "cohere": cls.COHERE_COMMAND_A,
203
+ "cohere command": cls.COHERE_COMMAND_A,
204
+ "command a": cls.COHERE_COMMAND_A,
205
+ # xAI Grok aliases
206
+ "grok": cls.GROK_4,
207
+ "grok 3": cls.GROK_3,
208
+ "grok 3 mini": cls.GROK_3_MINI,
209
+ "grok 4": cls.GROK_4,
210
+ "grok 4 fast": cls.GROK_4_FAST_REASONING,
211
+ "grok code": cls.GROK_CODE_FAST_1,
212
+ # Moonshot aliases
213
+ "kimi": cls.KIMI_K2_THINKING,
214
+ "kimi k2": cls.KIMI_K2_THINKING,
122
215
  }
123
216
 
124
217
 
@@ -0,0 +1,101 @@
1
+ import os
2
+ from typing import Any, Collection, Dict, Set
3
+
4
+ from langchain_core.language_models.chat_models import BaseChatModel
5
+ from langchain_openai import AzureChatOpenAI
6
+
7
+ from spaik_sdk.models.llm_config import LLMConfig
8
+ from spaik_sdk.models.llm_model import LLMModel
9
+ from spaik_sdk.models.model_registry import ModelRegistry
10
+ from spaik_sdk.models.providers.base_provider import BaseProvider
11
+
12
+ # Model name -> Environment variable for Azure deployment name
13
+ AZURE_DEPLOYMENT_ENV_VARS: Dict[str, str] = {
14
+ # OpenAI models
15
+ "gpt-4.1": "AZURE_GPT_4_1_DEPLOYMENT",
16
+ "gpt-4.1-mini": "AZURE_GPT_4_1_MINI_DEPLOYMENT",
17
+ "gpt-4.1-nano": "AZURE_GPT_4_1_NANO_DEPLOYMENT",
18
+ "gpt-4o": "AZURE_GPT_4O_DEPLOYMENT",
19
+ "gpt-4o-mini": "AZURE_GPT_4O_MINI_DEPLOYMENT",
20
+ "o1": "AZURE_O1_DEPLOYMENT",
21
+ "o1-mini": "AZURE_O1_MINI_DEPLOYMENT",
22
+ "o3": "AZURE_O3_DEPLOYMENT",
23
+ "o3-mini": "AZURE_O3_MINI_DEPLOYMENT",
24
+ "o3-pro": "AZURE_O3_PRO_DEPLOYMENT",
25
+ "o4-mini": "AZURE_O4_MINI_DEPLOYMENT",
26
+ "o4-mini-2025-04-16": "AZURE_O4_MINI_2025_04_16_DEPLOYMENT",
27
+ "codex-mini": "AZURE_CODEX_MINI_DEPLOYMENT",
28
+ "gpt-5": "AZURE_GPT_5_DEPLOYMENT",
29
+ "gpt-5-mini": "AZURE_GPT_5_MINI_DEPLOYMENT",
30
+ "gpt-5-nano": "AZURE_GPT_5_NANO_DEPLOYMENT",
31
+ "gpt-5-chat": "AZURE_GPT_5_CHAT_DEPLOYMENT",
32
+ "gpt-5-codex": "AZURE_GPT_5_CODEX_DEPLOYMENT",
33
+ "gpt-5-pro": "AZURE_GPT_5_PRO_DEPLOYMENT",
34
+ "gpt-5.1": "AZURE_GPT_5_1_DEPLOYMENT",
35
+ "gpt-5.1-chat": "AZURE_GPT_5_1_CHAT_DEPLOYMENT",
36
+ "gpt-5.1-codex": "AZURE_GPT_5_1_CODEX_DEPLOYMENT",
37
+ "gpt-5.1-codex-mini": "AZURE_GPT_5_1_CODEX_MINI_DEPLOYMENT",
38
+ "gpt-5.1-codex-max": "AZURE_GPT_5_1_CODEX_MAX_DEPLOYMENT",
39
+ "gpt-5.2": "AZURE_GPT_5_2_DEPLOYMENT",
40
+ "gpt-5.2-chat": "AZURE_GPT_5_2_CHAT_DEPLOYMENT",
41
+ "gpt-5.2-codex": "AZURE_GPT_5_2_CODEX_DEPLOYMENT",
42
+ "gpt-5.2-pro": "AZURE_GPT_5_2_PRO_DEPLOYMENT",
43
+ # DeepSeek models (Azure AI Foundry)
44
+ "DeepSeek-V3-0324": "AZURE_DEEPSEEK_V3_DEPLOYMENT",
45
+ "DeepSeek-V3.1": "AZURE_DEEPSEEK_V3_1_DEPLOYMENT",
46
+ "DeepSeek-V3.2": "AZURE_DEEPSEEK_V3_2_DEPLOYMENT",
47
+ "DeepSeek-V3.2-Speciale": "AZURE_DEEPSEEK_V3_2_SPECIALE_DEPLOYMENT",
48
+ "DeepSeek-R1": "AZURE_DEEPSEEK_R1_DEPLOYMENT",
49
+ "DeepSeek-R1-0528": "AZURE_DEEPSEEK_R1_0528_DEPLOYMENT",
50
+ # Mistral models (Azure AI Foundry)
51
+ "Mistral-Large-3": "AZURE_MISTRAL_LARGE_3_DEPLOYMENT",
52
+ # Meta Llama models (Azure AI Foundry)
53
+ "Llama-4-Maverick-17B-128E-Instruct-FP8": "AZURE_LLAMA_4_MAVERICK_DEPLOYMENT",
54
+ "Llama-3.3-70B-Instruct": "AZURE_LLAMA_3_3_70B_DEPLOYMENT",
55
+ # Cohere models (Azure AI Foundry)
56
+ "Cohere-command-a": "AZURE_COHERE_COMMAND_A_DEPLOYMENT",
57
+ # xAI Grok models (Azure AI Foundry)
58
+ "grok-3": "AZURE_GROK_3_DEPLOYMENT",
59
+ "grok-3-mini": "AZURE_GROK_3_MINI_DEPLOYMENT",
60
+ "grok-4": "AZURE_GROK_4_DEPLOYMENT",
61
+ "grok-4-fast-reasoning": "AZURE_GROK_4_FAST_REASONING_DEPLOYMENT",
62
+ "grok-4-fast-non-reasoning": "AZURE_GROK_4_FAST_NON_REASONING_DEPLOYMENT",
63
+ "grok-code-fast-1": "AZURE_GROK_CODE_FAST_1_DEPLOYMENT",
64
+ # Moonshot AI models (Azure AI Foundry)
65
+ "Kimi-K2-Thinking": "AZURE_KIMI_K2_THINKING_DEPLOYMENT",
66
+ }
67
+
68
+
69
+ class AzureProvider(BaseProvider):
70
+ def get_supported_models(self) -> Collection[LLMModel]:
71
+ supported: Set[LLMModel] = set()
72
+ for model_name in AZURE_DEPLOYMENT_ENV_VARS.keys():
73
+ try:
74
+ model = ModelRegistry.from_name(model_name)
75
+ supported.add(model)
76
+ except ValueError:
77
+ pass
78
+ return supported
79
+
80
+ def get_model_config(self, config: LLMConfig) -> Dict[str, Any]:
81
+ return {
82
+ "api_key": self._get_required_env("AZURE_API_KEY"),
83
+ "api_version": self._get_required_env("AZURE_API_VERSION"),
84
+ "azure_endpoint": self._get_required_env("AZURE_ENDPOINT"),
85
+ }
86
+
87
+ def create_langchain_model(self, config: LLMConfig, full_config: Dict[str, Any]) -> BaseChatModel:
88
+ full_config["deployment_name"] = self._get_deployment_name(config.model.name)
89
+ return AzureChatOpenAI(**full_config)
90
+
91
+ def _get_deployment_name(self, model_name: str) -> str:
92
+ env_var = AZURE_DEPLOYMENT_ENV_VARS.get(model_name)
93
+ if not env_var:
94
+ raise ValueError(f"Model '{model_name}' not supported on Azure. Add it to AZURE_DEPLOYMENT_ENV_VARS.")
95
+ return os.environ.get(env_var, model_name)
96
+
97
+ def _get_required_env(self, key: str) -> str:
98
+ value = os.environ.get(key)
99
+ if not value:
100
+ raise ValueError(f"Environment variable {key} is required but not set")
101
+ return value
@@ -58,5 +58,21 @@ class BaseProvider(ABC):
58
58
  from spaik_sdk.models.providers.ollama_provider import OllamaProvider
59
59
 
60
60
  return OllamaProvider()
61
+ elif provider_type == ProviderType.DEEPSEEK:
62
+ from spaik_sdk.models.providers.deepseek_provider import DeepSeekProvider
63
+
64
+ return DeepSeekProvider()
65
+ elif provider_type == ProviderType.XAI:
66
+ from spaik_sdk.models.providers.xai_provider import XAIProvider
67
+
68
+ return XAIProvider()
69
+ elif provider_type == ProviderType.COHERE:
70
+ from spaik_sdk.models.providers.cohere_provider import CohereProvider
71
+
72
+ return CohereProvider()
73
+ elif provider_type == ProviderType.MISTRAL:
74
+ from spaik_sdk.models.providers.mistral_provider import MistralProvider
75
+
76
+ return MistralProvider()
61
77
  else:
62
78
  raise ValueError(f"Unsupported provider type: {provider_type}")
@@ -0,0 +1,23 @@
1
+ from typing import Any, Collection, Dict
2
+
3
+ from langchain_cohere import ChatCohere
4
+ from langchain_core.language_models.chat_models import BaseChatModel
5
+
6
+ from spaik_sdk.config.get_credentials_provider import credentials_provider
7
+ from spaik_sdk.models.factories.cohere_factory import CohereModelFactory
8
+ from spaik_sdk.models.llm_config import LLMConfig
9
+ from spaik_sdk.models.llm_model import LLMModel
10
+ from spaik_sdk.models.providers.base_provider import BaseProvider
11
+
12
+
13
+ class CohereProvider(BaseProvider):
14
+ def get_supported_models(self) -> Collection[LLMModel]:
15
+ return CohereModelFactory.MODELS
16
+
17
+ def get_model_config(self, config: LLMConfig) -> Dict[str, Any]:
18
+ return {
19
+ "cohere_api_key": credentials_provider.get_provider_key("cohere"),
20
+ }
21
+
22
+ def create_langchain_model(self, config: LLMConfig, full_config: Dict[str, Any]) -> BaseChatModel:
23
+ return ChatCohere(**full_config)
@@ -0,0 +1,23 @@
1
+ from typing import Any, Collection, Dict
2
+
3
+ from langchain_core.language_models.chat_models import BaseChatModel
4
+ from langchain_deepseek import ChatDeepSeek
5
+
6
+ from spaik_sdk.config.get_credentials_provider import credentials_provider
7
+ from spaik_sdk.models.factories.deepseek_factory import DeepSeekModelFactory
8
+ from spaik_sdk.models.llm_config import LLMConfig
9
+ from spaik_sdk.models.llm_model import LLMModel
10
+ from spaik_sdk.models.providers.base_provider import BaseProvider
11
+
12
+
13
+ class DeepSeekProvider(BaseProvider):
14
+ def get_supported_models(self) -> Collection[LLMModel]:
15
+ return DeepSeekModelFactory.MODELS
16
+
17
+ def get_model_config(self, config: LLMConfig) -> Dict[str, Any]:
18
+ return {
19
+ "api_key": credentials_provider.get_provider_key("deepseek"),
20
+ }
21
+
22
+ def create_langchain_model(self, config: LLMConfig, full_config: Dict[str, Any]) -> BaseChatModel:
23
+ return ChatDeepSeek(**full_config)
@@ -0,0 +1,23 @@
1
+ from typing import Any, Collection, Dict
2
+
3
+ from langchain_core.language_models.chat_models import BaseChatModel
4
+ from langchain_mistralai import ChatMistralAI
5
+
6
+ from spaik_sdk.config.get_credentials_provider import credentials_provider
7
+ from spaik_sdk.models.factories.mistral_factory import MistralModelFactory
8
+ from spaik_sdk.models.llm_config import LLMConfig
9
+ from spaik_sdk.models.llm_model import LLMModel
10
+ from spaik_sdk.models.providers.base_provider import BaseProvider
11
+
12
+
13
+ class MistralProvider(BaseProvider):
14
+ def get_supported_models(self) -> Collection[LLMModel]:
15
+ return MistralModelFactory.MODELS
16
+
17
+ def get_model_config(self, config: LLMConfig) -> Dict[str, Any]:
18
+ return {
19
+ "api_key": credentials_provider.get_provider_key("mistral"),
20
+ }
21
+
22
+ def create_langchain_model(self, config: LLMConfig, full_config: Dict[str, Any]) -> BaseChatModel:
23
+ return ChatMistralAI(**full_config)
@@ -9,6 +9,10 @@ class ProviderType(Enum):
9
9
  OPENAI_DIRECT = "openai"
10
10
  GOOGLE = "google"
11
11
  OLLAMA = "ollama"
12
+ DEEPSEEK = "deepseek"
13
+ XAI = "xai"
14
+ COHERE = "cohere"
15
+ MISTRAL = "mistral"
12
16
 
13
17
  @classmethod
14
18
  def from_name(cls, name: str) -> "ProviderType":
@@ -39,6 +43,14 @@ class ProviderType(Enum):
39
43
  return cls.GOOGLE
40
44
  elif family_lower == "ollama":
41
45
  return cls.OLLAMA
46
+ elif family_lower == "deepseek":
47
+ return cls.DEEPSEEK
48
+ elif family_lower == "xai":
49
+ return cls.XAI
50
+ elif family_lower == "cohere":
51
+ return cls.COHERE
52
+ elif family_lower == "mistral":
53
+ return cls.MISTRAL
42
54
  else:
43
55
  raise ValueError(f"Unknown model family: {family}")
44
56
 
@@ -51,6 +63,11 @@ PROVIDER_ALIASES = {
51
63
  "openai": ProviderType.OPENAI_DIRECT,
52
64
  "google": ProviderType.GOOGLE,
53
65
  "gemini": ProviderType.GOOGLE,
66
+ "deepseek": ProviderType.DEEPSEEK,
67
+ "xai": ProviderType.XAI,
68
+ "grok": ProviderType.XAI,
69
+ "cohere": ProviderType.COHERE,
70
+ "mistral": ProviderType.MISTRAL,
54
71
  }
55
72
 
56
73