abstractcore 2.4.5__tar.gz → 2.4.6__tar.gz

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (165) hide show
  1. {abstractcore-2.4.5 → abstractcore-2.4.6}/PKG-INFO +59 -9
  2. {abstractcore-2.4.5 → abstractcore-2.4.6}/README.md +58 -8
  3. {abstractcore-2.4.5 → abstractcore-2.4.6}/abstractcore/core/interface.py +7 -0
  4. {abstractcore-2.4.5 → abstractcore-2.4.6}/abstractcore/core/session.py +27 -2
  5. {abstractcore-2.4.5 → abstractcore-2.4.6}/abstractcore/providers/anthropic_provider.py +14 -2
  6. {abstractcore-2.4.5 → abstractcore-2.4.6}/abstractcore/providers/base.py +24 -0
  7. {abstractcore-2.4.5 → abstractcore-2.4.6}/abstractcore/providers/huggingface_provider.py +23 -9
  8. {abstractcore-2.4.5 → abstractcore-2.4.6}/abstractcore/providers/lmstudio_provider.py +6 -1
  9. {abstractcore-2.4.5 → abstractcore-2.4.6}/abstractcore/providers/mlx_provider.py +20 -7
  10. {abstractcore-2.4.5 → abstractcore-2.4.6}/abstractcore/providers/ollama_provider.py +6 -1
  11. {abstractcore-2.4.5 → abstractcore-2.4.6}/abstractcore/providers/openai_provider.py +6 -2
  12. {abstractcore-2.4.5 → abstractcore-2.4.6}/abstractcore/tools/common_tools.py +651 -1
  13. {abstractcore-2.4.5 → abstractcore-2.4.6}/abstractcore/utils/version.py +1 -1
  14. {abstractcore-2.4.5 → abstractcore-2.4.6}/abstractcore.egg-info/PKG-INFO +59 -9
  15. {abstractcore-2.4.5 → abstractcore-2.4.6}/abstractcore.egg-info/SOURCES.txt +2 -0
  16. abstractcore-2.4.6/tests/test_seed_determinism.py +354 -0
  17. abstractcore-2.4.6/tests/test_seed_temperature_basic.py +189 -0
  18. {abstractcore-2.4.5 → abstractcore-2.4.6}/LICENSE +0 -0
  19. {abstractcore-2.4.5 → abstractcore-2.4.6}/abstractcore/__init__.py +0 -0
  20. {abstractcore-2.4.5 → abstractcore-2.4.6}/abstractcore/apps/__init__.py +0 -0
  21. {abstractcore-2.4.5 → abstractcore-2.4.6}/abstractcore/apps/__main__.py +0 -0
  22. {abstractcore-2.4.5 → abstractcore-2.4.6}/abstractcore/apps/app_config_utils.py +0 -0
  23. {abstractcore-2.4.5 → abstractcore-2.4.6}/abstractcore/apps/extractor.py +0 -0
  24. {abstractcore-2.4.5 → abstractcore-2.4.6}/abstractcore/apps/judge.py +0 -0
  25. {abstractcore-2.4.5 → abstractcore-2.4.6}/abstractcore/apps/summarizer.py +0 -0
  26. {abstractcore-2.4.5 → abstractcore-2.4.6}/abstractcore/architectures/__init__.py +0 -0
  27. {abstractcore-2.4.5 → abstractcore-2.4.6}/abstractcore/architectures/detection.py +0 -0
  28. {abstractcore-2.4.5 → abstractcore-2.4.6}/abstractcore/architectures/enums.py +0 -0
  29. {abstractcore-2.4.5 → abstractcore-2.4.6}/abstractcore/assets/architecture_formats.json +0 -0
  30. {abstractcore-2.4.5 → abstractcore-2.4.6}/abstractcore/assets/model_capabilities.json +0 -0
  31. {abstractcore-2.4.5 → abstractcore-2.4.6}/abstractcore/assets/session_schema.json +0 -0
  32. {abstractcore-2.4.5 → abstractcore-2.4.6}/abstractcore/cli/__init__.py +0 -0
  33. {abstractcore-2.4.5 → abstractcore-2.4.6}/abstractcore/cli/main.py +0 -0
  34. {abstractcore-2.4.5 → abstractcore-2.4.6}/abstractcore/cli/vision_config.py +0 -0
  35. {abstractcore-2.4.5 → abstractcore-2.4.6}/abstractcore/core/__init__.py +0 -0
  36. {abstractcore-2.4.5 → abstractcore-2.4.6}/abstractcore/core/enums.py +0 -0
  37. {abstractcore-2.4.5 → abstractcore-2.4.6}/abstractcore/core/factory.py +0 -0
  38. {abstractcore-2.4.5 → abstractcore-2.4.6}/abstractcore/core/retry.py +0 -0
  39. {abstractcore-2.4.5 → abstractcore-2.4.6}/abstractcore/core/types.py +0 -0
  40. {abstractcore-2.4.5 → abstractcore-2.4.6}/abstractcore/embeddings/__init__.py +0 -0
  41. {abstractcore-2.4.5 → abstractcore-2.4.6}/abstractcore/embeddings/manager.py +0 -0
  42. {abstractcore-2.4.5 → abstractcore-2.4.6}/abstractcore/embeddings/models.py +0 -0
  43. {abstractcore-2.4.5 → abstractcore-2.4.6}/abstractcore/events/__init__.py +0 -0
  44. {abstractcore-2.4.5 → abstractcore-2.4.6}/abstractcore/exceptions/__init__.py +0 -0
  45. {abstractcore-2.4.5 → abstractcore-2.4.6}/abstractcore/media/__init__.py +0 -0
  46. {abstractcore-2.4.5 → abstractcore-2.4.6}/abstractcore/media/auto_handler.py +0 -0
  47. {abstractcore-2.4.5 → abstractcore-2.4.6}/abstractcore/media/base.py +0 -0
  48. {abstractcore-2.4.5 → abstractcore-2.4.6}/abstractcore/media/capabilities.py +0 -0
  49. {abstractcore-2.4.5 → abstractcore-2.4.6}/abstractcore/media/handlers/__init__.py +0 -0
  50. {abstractcore-2.4.5 → abstractcore-2.4.6}/abstractcore/media/handlers/anthropic_handler.py +0 -0
  51. {abstractcore-2.4.5 → abstractcore-2.4.6}/abstractcore/media/handlers/local_handler.py +0 -0
  52. {abstractcore-2.4.5 → abstractcore-2.4.6}/abstractcore/media/handlers/openai_handler.py +0 -0
  53. {abstractcore-2.4.5 → abstractcore-2.4.6}/abstractcore/media/processors/__init__.py +0 -0
  54. {abstractcore-2.4.5 → abstractcore-2.4.6}/abstractcore/media/processors/image_processor.py +0 -0
  55. {abstractcore-2.4.5 → abstractcore-2.4.6}/abstractcore/media/processors/office_processor.py +0 -0
  56. {abstractcore-2.4.5 → abstractcore-2.4.6}/abstractcore/media/processors/pdf_processor.py +0 -0
  57. {abstractcore-2.4.5 → abstractcore-2.4.6}/abstractcore/media/processors/text_processor.py +0 -0
  58. {abstractcore-2.4.5 → abstractcore-2.4.6}/abstractcore/media/types.py +0 -0
  59. {abstractcore-2.4.5 → abstractcore-2.4.6}/abstractcore/media/utils/__init__.py +0 -0
  60. {abstractcore-2.4.5 → abstractcore-2.4.6}/abstractcore/media/utils/image_scaler.py +0 -0
  61. {abstractcore-2.4.5 → abstractcore-2.4.6}/abstractcore/media/vision_fallback.py +0 -0
  62. {abstractcore-2.4.5 → abstractcore-2.4.6}/abstractcore/processing/__init__.py +0 -0
  63. {abstractcore-2.4.5 → abstractcore-2.4.6}/abstractcore/processing/basic_extractor.py +0 -0
  64. {abstractcore-2.4.5 → abstractcore-2.4.6}/abstractcore/processing/basic_judge.py +0 -0
  65. {abstractcore-2.4.5 → abstractcore-2.4.6}/abstractcore/processing/basic_summarizer.py +0 -0
  66. {abstractcore-2.4.5 → abstractcore-2.4.6}/abstractcore/providers/__init__.py +0 -0
  67. {abstractcore-2.4.5 → abstractcore-2.4.6}/abstractcore/providers/mock_provider.py +0 -0
  68. {abstractcore-2.4.5 → abstractcore-2.4.6}/abstractcore/providers/registry.py +0 -0
  69. {abstractcore-2.4.5 → abstractcore-2.4.6}/abstractcore/providers/streaming.py +0 -0
  70. {abstractcore-2.4.5 → abstractcore-2.4.6}/abstractcore/server/__init__.py +0 -0
  71. {abstractcore-2.4.5 → abstractcore-2.4.6}/abstractcore/server/app.py +0 -0
  72. {abstractcore-2.4.5 → abstractcore-2.4.6}/abstractcore/structured/__init__.py +0 -0
  73. {abstractcore-2.4.5 → abstractcore-2.4.6}/abstractcore/structured/handler.py +0 -0
  74. {abstractcore-2.4.5 → abstractcore-2.4.6}/abstractcore/structured/retry.py +0 -0
  75. {abstractcore-2.4.5 → abstractcore-2.4.6}/abstractcore/tools/__init__.py +0 -0
  76. {abstractcore-2.4.5 → abstractcore-2.4.6}/abstractcore/tools/core.py +0 -0
  77. {abstractcore-2.4.5 → abstractcore-2.4.6}/abstractcore/tools/handler.py +0 -0
  78. {abstractcore-2.4.5 → abstractcore-2.4.6}/abstractcore/tools/parser.py +0 -0
  79. {abstractcore-2.4.5 → abstractcore-2.4.6}/abstractcore/tools/registry.py +0 -0
  80. {abstractcore-2.4.5 → abstractcore-2.4.6}/abstractcore/tools/syntax_rewriter.py +0 -0
  81. {abstractcore-2.4.5 → abstractcore-2.4.6}/abstractcore/tools/tag_rewriter.py +0 -0
  82. {abstractcore-2.4.5 → abstractcore-2.4.6}/abstractcore/utils/__init__.py +0 -0
  83. {abstractcore-2.4.5 → abstractcore-2.4.6}/abstractcore/utils/cli.py +0 -0
  84. {abstractcore-2.4.5 → abstractcore-2.4.6}/abstractcore/utils/message_preprocessor.py +0 -0
  85. {abstractcore-2.4.5 → abstractcore-2.4.6}/abstractcore/utils/self_fixes.py +0 -0
  86. {abstractcore-2.4.5 → abstractcore-2.4.6}/abstractcore/utils/structured_logging.py +0 -0
  87. {abstractcore-2.4.5 → abstractcore-2.4.6}/abstractcore/utils/token_utils.py +0 -0
  88. {abstractcore-2.4.5 → abstractcore-2.4.6}/abstractcore.egg-info/dependency_links.txt +0 -0
  89. {abstractcore-2.4.5 → abstractcore-2.4.6}/abstractcore.egg-info/entry_points.txt +0 -0
  90. {abstractcore-2.4.5 → abstractcore-2.4.6}/abstractcore.egg-info/requires.txt +0 -0
  91. {abstractcore-2.4.5 → abstractcore-2.4.6}/abstractcore.egg-info/top_level.txt +0 -0
  92. {abstractcore-2.4.5 → abstractcore-2.4.6}/pyproject.toml +0 -0
  93. {abstractcore-2.4.5 → abstractcore-2.4.6}/setup.cfg +0 -0
  94. {abstractcore-2.4.5 → abstractcore-2.4.6}/tests/test_agentic_cli_compatibility.py +0 -0
  95. {abstractcore-2.4.5 → abstractcore-2.4.6}/tests/test_all_specified_providers.py +0 -0
  96. {abstractcore-2.4.5 → abstractcore-2.4.6}/tests/test_basic_session.py +0 -0
  97. {abstractcore-2.4.5 → abstractcore-2.4.6}/tests/test_basic_summarizer.py +0 -0
  98. {abstractcore-2.4.5 → abstractcore-2.4.6}/tests/test_cli_media.py +0 -0
  99. {abstractcore-2.4.5 → abstractcore-2.4.6}/tests/test_complete_integration.py +0 -0
  100. {abstractcore-2.4.5 → abstractcore-2.4.6}/tests/test_comprehensive_events.py +0 -0
  101. {abstractcore-2.4.5 → abstractcore-2.4.6}/tests/test_consistency.py +0 -0
  102. {abstractcore-2.4.5 → abstractcore-2.4.6}/tests/test_core_components.py +0 -0
  103. {abstractcore-2.4.5 → abstractcore-2.4.6}/tests/test_critical_streaming_tool_fix.py +0 -0
  104. {abstractcore-2.4.5 → abstractcore-2.4.6}/tests/test_debug_server.py +0 -0
  105. {abstractcore-2.4.5 → abstractcore-2.4.6}/tests/test_direct_vs_server.py +0 -0
  106. {abstractcore-2.4.5 → abstractcore-2.4.6}/tests/test_embeddings.py +0 -0
  107. {abstractcore-2.4.5 → abstractcore-2.4.6}/tests/test_embeddings_integration.py +0 -0
  108. {abstractcore-2.4.5 → abstractcore-2.4.6}/tests/test_embeddings_llm_integration.py +0 -0
  109. {abstractcore-2.4.5 → abstractcore-2.4.6}/tests/test_embeddings_matrix_operations.py +0 -0
  110. {abstractcore-2.4.5 → abstractcore-2.4.6}/tests/test_embeddings_no_mock.py +0 -0
  111. {abstractcore-2.4.5 → abstractcore-2.4.6}/tests/test_embeddings_real.py +0 -0
  112. {abstractcore-2.4.5 → abstractcore-2.4.6}/tests/test_embeddings_semantic_validation.py +0 -0
  113. {abstractcore-2.4.5 → abstractcore-2.4.6}/tests/test_embeddings_simple.py +0 -0
  114. {abstractcore-2.4.5 → abstractcore-2.4.6}/tests/test_enhanced_prompt.py +0 -0
  115. {abstractcore-2.4.5 → abstractcore-2.4.6}/tests/test_environment_variable_tool_call_tags.py +0 -0
  116. {abstractcore-2.4.5 → abstractcore-2.4.6}/tests/test_factory.py +0 -0
  117. {abstractcore-2.4.5 → abstractcore-2.4.6}/tests/test_final_accuracy.py +0 -0
  118. {abstractcore-2.4.5 → abstractcore-2.4.6}/tests/test_final_comprehensive.py +0 -0
  119. {abstractcore-2.4.5 → abstractcore-2.4.6}/tests/test_final_graceful_errors.py +0 -0
  120. {abstractcore-2.4.5 → abstractcore-2.4.6}/tests/test_fixed_media.py +0 -0
  121. {abstractcore-2.4.5 → abstractcore-2.4.6}/tests/test_fixed_prompt.py +0 -0
  122. {abstractcore-2.4.5 → abstractcore-2.4.6}/tests/test_graceful_fallback.py +0 -0
  123. {abstractcore-2.4.5 → abstractcore-2.4.6}/tests/test_import_debug.py +0 -0
  124. {abstractcore-2.4.5 → abstractcore-2.4.6}/tests/test_integrated_functionality.py +0 -0
  125. {abstractcore-2.4.5 → abstractcore-2.4.6}/tests/test_lmstudio_context.py +0 -0
  126. {abstractcore-2.4.5 → abstractcore-2.4.6}/tests/test_media_import.py +0 -0
  127. {abstractcore-2.4.5 → abstractcore-2.4.6}/tests/test_media_server.py +0 -0
  128. {abstractcore-2.4.5 → abstractcore-2.4.6}/tests/test_ollama_tool_role_fix.py +0 -0
  129. {abstractcore-2.4.5 → abstractcore-2.4.6}/tests/test_openai_conversion_manual.py +0 -0
  130. {abstractcore-2.4.5 → abstractcore-2.4.6}/tests/test_openai_format_bug.py +0 -0
  131. {abstractcore-2.4.5 → abstractcore-2.4.6}/tests/test_openai_format_conversion.py +0 -0
  132. {abstractcore-2.4.5 → abstractcore-2.4.6}/tests/test_openai_media_integration.py +0 -0
  133. {abstractcore-2.4.5 → abstractcore-2.4.6}/tests/test_progressive_complexity.py +0 -0
  134. {abstractcore-2.4.5 → abstractcore-2.4.6}/tests/test_provider_basic_session.py +0 -0
  135. {abstractcore-2.4.5 → abstractcore-2.4.6}/tests/test_provider_connectivity.py +0 -0
  136. {abstractcore-2.4.5 → abstractcore-2.4.6}/tests/test_provider_simple_generation.py +0 -0
  137. {abstractcore-2.4.5 → abstractcore-2.4.6}/tests/test_provider_streaming.py +0 -0
  138. {abstractcore-2.4.5 → abstractcore-2.4.6}/tests/test_provider_token_translation.py +0 -0
  139. {abstractcore-2.4.5 → abstractcore-2.4.6}/tests/test_provider_tool_detection.py +0 -0
  140. {abstractcore-2.4.5 → abstractcore-2.4.6}/tests/test_providers.py +0 -0
  141. {abstractcore-2.4.5 → abstractcore-2.4.6}/tests/test_providers_comprehensive.py +0 -0
  142. {abstractcore-2.4.5 → abstractcore-2.4.6}/tests/test_providers_simple.py +0 -0
  143. {abstractcore-2.4.5 → abstractcore-2.4.6}/tests/test_real_models_comprehensive.py +0 -0
  144. {abstractcore-2.4.5 → abstractcore-2.4.6}/tests/test_retry_observability.py +0 -0
  145. {abstractcore-2.4.5 → abstractcore-2.4.6}/tests/test_retry_strategy.py +0 -0
  146. {abstractcore-2.4.5 → abstractcore-2.4.6}/tests/test_sensory_prompting.py +0 -0
  147. {abstractcore-2.4.5 → abstractcore-2.4.6}/tests/test_server_debug.py +0 -0
  148. {abstractcore-2.4.5 → abstractcore-2.4.6}/tests/test_server_embeddings_real.py +0 -0
  149. {abstractcore-2.4.5 → abstractcore-2.4.6}/tests/test_server_integration.py +0 -0
  150. {abstractcore-2.4.5 → abstractcore-2.4.6}/tests/test_stream_tool_calling.py +0 -0
  151. {abstractcore-2.4.5 → abstractcore-2.4.6}/tests/test_streaming_enhancements.py +0 -0
  152. {abstractcore-2.4.5 → abstractcore-2.4.6}/tests/test_streaming_tag_rewriting.py +0 -0
  153. {abstractcore-2.4.5 → abstractcore-2.4.6}/tests/test_structured_integration.py +0 -0
  154. {abstractcore-2.4.5 → abstractcore-2.4.6}/tests/test_structured_output.py +0 -0
  155. {abstractcore-2.4.5 → abstractcore-2.4.6}/tests/test_syntax_rewriter.py +0 -0
  156. {abstractcore-2.4.5 → abstractcore-2.4.6}/tests/test_text_only_model_experience.py +0 -0
  157. {abstractcore-2.4.5 → abstractcore-2.4.6}/tests/test_tool_calling.py +0 -0
  158. {abstractcore-2.4.5 → abstractcore-2.4.6}/tests/test_tool_execution_separation.py +0 -0
  159. {abstractcore-2.4.5 → abstractcore-2.4.6}/tests/test_unified_streaming.py +0 -0
  160. {abstractcore-2.4.5 → abstractcore-2.4.6}/tests/test_unload_memory.py +0 -0
  161. {abstractcore-2.4.5 → abstractcore-2.4.6}/tests/test_user_scenario_validation.py +0 -0
  162. {abstractcore-2.4.5 → abstractcore-2.4.6}/tests/test_vision_accuracy.py +0 -0
  163. {abstractcore-2.4.5 → abstractcore-2.4.6}/tests/test_vision_comprehensive.py +0 -0
  164. {abstractcore-2.4.5 → abstractcore-2.4.6}/tests/test_vision_fallback_improvement.py +0 -0
  165. {abstractcore-2.4.5 → abstractcore-2.4.6}/tests/test_wrong_model_fallback.py +0 -0
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.4
2
2
  Name: abstractcore
3
- Version: 2.4.5
3
+ Version: 2.4.6
4
4
  Summary: Unified interface to all LLM providers with essential infrastructure for tool calling, streaming, and model management
5
5
  Author-email: Laurent-Philippe Albou <contact@abstractcore.ai>
6
6
  Maintainer-email: Laurent-Philippe Albou <contact@abstractcore.ai>
@@ -122,6 +122,21 @@ response = llm.generate("What is the capital of France?")
122
122
  print(response.content)
123
123
  ```
124
124
 
125
+ ### Deterministic Generation
126
+
127
+ ```python
128
+ from abstractcore import create_llm
129
+
130
+ # Deterministic outputs with seed + temperature=0
131
+ llm = create_llm("openai", model="gpt-3.5-turbo", seed=42, temperature=0.0)
132
+
133
+ # These will produce identical outputs
134
+ response1 = llm.generate("Write exactly 3 words about coding")
135
+ response2 = llm.generate("Write exactly 3 words about coding")
136
+ print(f"Response 1: {response1.content}") # "Innovative, challenging, rewarding."
137
+ print(f"Response 2: {response2.content}") # "Innovative, challenging, rewarding."
138
+ ```
139
+
125
140
  ### Tool Calling
126
141
 
127
142
  ```python
@@ -140,6 +155,39 @@ response = llm.generate(
140
155
  print(response.content)
141
156
  ```
142
157
 
158
+ ### Built-in Tools
159
+
160
+ AbstractCore includes a comprehensive set of ready-to-use tools for common tasks:
161
+
162
+ ```python
163
+ from abstractcore.tools.common_tools import fetch_url, search_files, read_file
164
+
165
+ # Intelligent web content fetching with automatic parsing
166
+ result = fetch_url("https://api.github.com/repos/python/cpython")
167
+ # Automatically detects JSON, HTML, images, PDFs, etc. and provides structured analysis
168
+
169
+ # File system operations
170
+ files = search_files("def.*fetch", ".", file_pattern="*.py") # Find function definitions
171
+ content = read_file("config.json") # Read file contents
172
+
173
+ # Use with any LLM
174
+ llm = create_llm("anthropic", model="claude-3-5-haiku-latest")
175
+ response = llm.generate(
176
+ "Analyze this API response and summarize the key information",
177
+ tools=[fetch_url]
178
+ )
179
+ ```
180
+
181
+ **Available Tools:**
182
+ - `fetch_url` - Intelligent web content fetching with automatic content type detection and parsing
183
+ - `search_files` - Search for text patterns inside files using regex
184
+ - `list_files` - Find and list files by names/paths using glob patterns
185
+ - `read_file` - Read file contents with optional line range selection
186
+ - `write_file` - Write content to files with directory creation
187
+ - `edit_file` - Edit files using pattern matching and replacement
188
+ - `web_search` - Search the web using DuckDuckGo
189
+ - `execute_command` - Execute shell commands safely with security controls
190
+
143
191
  ### Session Management
144
192
 
145
193
  ```python
@@ -228,14 +276,16 @@ response = llm.generate(
228
276
 
229
277
  ## Supported Providers
230
278
 
231
- | Provider | Status | Setup |
232
- |----------|--------|-------|
233
- | **OpenAI** | Full | [Get API key](docs/prerequisites.md#openai-setup) |
234
- | **Anthropic** | Full | [Get API key](docs/prerequisites.md#anthropic-setup) |
235
- | **Ollama** | Full | [Install guide](docs/prerequisites.md#ollama-setup) |
236
- | **LMStudio** | Full | [Install guide](docs/prerequisites.md#lmstudio-setup) |
237
- | **MLX** | Full | [Setup guide](docs/prerequisites.md#mlx-setup) |
238
- | **HuggingFace** | Full | [Setup guide](docs/prerequisites.md#huggingface-setup) |
279
+ | Provider | Status | SEED Support | Setup |
280
+ |----------|--------|-------------|-------|
281
+ | **OpenAI** | Full | ✅ Native | [Get API key](docs/prerequisites.md#openai-setup) |
282
+ | **Anthropic** | Full | ⚠️ Warning* | [Get API key](docs/prerequisites.md#anthropic-setup) |
283
+ | **Ollama** | Full | ✅ Native | [Install guide](docs/prerequisites.md#ollama-setup) |
284
+ | **LMStudio** | Full | ✅ Native | [Install guide](docs/prerequisites.md#lmstudio-setup) |
285
+ | **MLX** | Full | ✅ Native | [Setup guide](docs/prerequisites.md#mlx-setup) |
286
+ | **HuggingFace** | Full | ✅ Native | [Setup guide](docs/prerequisites.md#huggingface-setup) |
287
+
288
+ *Anthropic doesn't support seed parameters but issues a warning when provided. Use `temperature=0.0` for more consistent outputs.
239
289
 
240
290
  ## Server Mode (Optional HTTP REST API)
241
291
 
@@ -28,6 +28,21 @@ response = llm.generate("What is the capital of France?")
28
28
  print(response.content)
29
29
  ```
30
30
 
31
+ ### Deterministic Generation
32
+
33
+ ```python
34
+ from abstractcore import create_llm
35
+
36
+ # Deterministic outputs with seed + temperature=0
37
+ llm = create_llm("openai", model="gpt-3.5-turbo", seed=42, temperature=0.0)
38
+
39
+ # These will produce identical outputs
40
+ response1 = llm.generate("Write exactly 3 words about coding")
41
+ response2 = llm.generate("Write exactly 3 words about coding")
42
+ print(f"Response 1: {response1.content}") # "Innovative, challenging, rewarding."
43
+ print(f"Response 2: {response2.content}") # "Innovative, challenging, rewarding."
44
+ ```
45
+
31
46
  ### Tool Calling
32
47
 
33
48
  ```python
@@ -46,6 +61,39 @@ response = llm.generate(
46
61
  print(response.content)
47
62
  ```
48
63
 
64
+ ### Built-in Tools
65
+
66
+ AbstractCore includes a comprehensive set of ready-to-use tools for common tasks:
67
+
68
+ ```python
69
+ from abstractcore.tools.common_tools import fetch_url, search_files, read_file
70
+
71
+ # Intelligent web content fetching with automatic parsing
72
+ result = fetch_url("https://api.github.com/repos/python/cpython")
73
+ # Automatically detects JSON, HTML, images, PDFs, etc. and provides structured analysis
74
+
75
+ # File system operations
76
+ files = search_files("def.*fetch", ".", file_pattern="*.py") # Find function definitions
77
+ content = read_file("config.json") # Read file contents
78
+
79
+ # Use with any LLM
80
+ llm = create_llm("anthropic", model="claude-3-5-haiku-latest")
81
+ response = llm.generate(
82
+ "Analyze this API response and summarize the key information",
83
+ tools=[fetch_url]
84
+ )
85
+ ```
86
+
87
+ **Available Tools:**
88
+ - `fetch_url` - Intelligent web content fetching with automatic content type detection and parsing
89
+ - `search_files` - Search for text patterns inside files using regex
90
+ - `list_files` - Find and list files by names/paths using glob patterns
91
+ - `read_file` - Read file contents with optional line range selection
92
+ - `write_file` - Write content to files with directory creation
93
+ - `edit_file` - Edit files using pattern matching and replacement
94
+ - `web_search` - Search the web using DuckDuckGo
95
+ - `execute_command` - Execute shell commands safely with security controls
96
+
49
97
  ### Session Management
50
98
 
51
99
  ```python
@@ -134,14 +182,16 @@ response = llm.generate(
134
182
 
135
183
  ## Supported Providers
136
184
 
137
- | Provider | Status | Setup |
138
- |----------|--------|-------|
139
- | **OpenAI** | Full | [Get API key](docs/prerequisites.md#openai-setup) |
140
- | **Anthropic** | Full | [Get API key](docs/prerequisites.md#anthropic-setup) |
141
- | **Ollama** | Full | [Install guide](docs/prerequisites.md#ollama-setup) |
142
- | **LMStudio** | Full | [Install guide](docs/prerequisites.md#lmstudio-setup) |
143
- | **MLX** | Full | [Setup guide](docs/prerequisites.md#mlx-setup) |
144
- | **HuggingFace** | Full | [Setup guide](docs/prerequisites.md#huggingface-setup) |
185
+ | Provider | Status | SEED Support | Setup |
186
+ |----------|--------|-------------|-------|
187
+ | **OpenAI** | Full | ✅ Native | [Get API key](docs/prerequisites.md#openai-setup) |
188
+ | **Anthropic** | Full | ⚠️ Warning* | [Get API key](docs/prerequisites.md#anthropic-setup) |
189
+ | **Ollama** | Full | ✅ Native | [Install guide](docs/prerequisites.md#ollama-setup) |
190
+ | **LMStudio** | Full | ✅ Native | [Install guide](docs/prerequisites.md#lmstudio-setup) |
191
+ | **MLX** | Full | ✅ Native | [Setup guide](docs/prerequisites.md#mlx-setup) |
192
+ | **HuggingFace** | Full | ✅ Native | [Setup guide](docs/prerequisites.md#huggingface-setup) |
193
+
194
+ *Anthropic doesn't support seed parameters but issues a warning when provided. Use `temperature=0.0` for more consistent outputs.
145
195
 
146
196
  ## Server Mode (Optional HTTP REST API)
147
197
 
@@ -70,6 +70,8 @@ class AbstractCoreInterface(ABC):
70
70
  max_tokens: Optional[int] = None,
71
71
  max_input_tokens: Optional[int] = None,
72
72
  max_output_tokens: int = 2048,
73
+ temperature: float = 0.7,
74
+ seed: Optional[int] = None,
73
75
  debug: bool = False,
74
76
  **kwargs):
75
77
  self.model = model
@@ -79,6 +81,11 @@ class AbstractCoreInterface(ABC):
79
81
  self.max_tokens = max_tokens
80
82
  self.max_input_tokens = max_input_tokens
81
83
  self.max_output_tokens = max_output_tokens
84
+
85
+ # Unified generation parameters
86
+ self.temperature = temperature
87
+ self.seed = seed
88
+
82
89
  self.debug = debug
83
90
 
84
91
  # Validate token parameters
@@ -32,8 +32,23 @@ class BasicSession:
32
32
  tool_timeout: Optional[float] = None,
33
33
  recovery_timeout: Optional[float] = None,
34
34
  auto_compact: bool = False,
35
- auto_compact_threshold: int = 6000):
36
- """Initialize basic session"""
35
+ auto_compact_threshold: int = 6000,
36
+ temperature: Optional[float] = None,
37
+ seed: Optional[int] = None):
38
+ """Initialize basic session
39
+
40
+ Args:
41
+ provider: LLM provider instance
42
+ system_prompt: System prompt for the session
43
+ tools: List of available tools
44
+ timeout: HTTP request timeout
45
+ tool_timeout: Tool execution timeout
46
+ recovery_timeout: Circuit breaker recovery timeout
47
+ auto_compact: Enable automatic conversation compaction
48
+ auto_compact_threshold: Token threshold for auto-compaction
49
+ temperature: Default temperature for generation (0.0-1.0)
50
+ seed: Default seed for deterministic generation
51
+ """
37
52
 
38
53
  self.provider = provider
39
54
  self.id = str(uuid.uuid4())
@@ -45,6 +60,10 @@ class BasicSession:
45
60
  self.auto_compact_threshold = auto_compact_threshold
46
61
  self._original_session = None # Track if this is a compacted session
47
62
 
63
+ # Store session-level generation parameters
64
+ self.temperature = temperature
65
+ self.seed = seed
66
+
48
67
  # Optional analytics fields
49
68
  self.summary = None
50
69
  self.assessment = None
@@ -189,6 +208,12 @@ class BasicSession:
189
208
  # Extract media parameter explicitly (fix for media parameter passing)
190
209
  media = kwargs.pop('media', None)
191
210
 
211
+ # Add session-level parameters if not overridden in kwargs
212
+ if 'temperature' not in kwargs and self.temperature is not None:
213
+ kwargs['temperature'] = self.temperature
214
+ if 'seed' not in kwargs and self.seed is not None:
215
+ kwargs['seed'] = self.seed
216
+
192
217
  # Call provider
193
218
  response = self.provider.generate(
194
219
  prompt=prompt,
@@ -47,8 +47,7 @@ class AnthropicProvider(BaseProvider):
47
47
  # Initialize tool handler
48
48
  self.tool_handler = UniversalToolHandler(model)
49
49
 
50
- # Store configuration (remove duplicate max_tokens)
51
- self.temperature = kwargs.get("temperature", 0.7)
50
+ # Store provider-specific configuration
52
51
  self.top_p = kwargs.get("top_p", 1.0)
53
52
  self.top_k = kwargs.get("top_k", None)
54
53
 
@@ -132,6 +131,19 @@ class AnthropicProvider(BaseProvider):
132
131
  if kwargs.get("top_k") or self.top_k:
133
132
  call_params["top_k"] = kwargs.get("top_k", self.top_k)
134
133
 
134
+ # Handle seed parameter (Anthropic doesn't support seed natively)
135
+ seed_value = kwargs.get("seed", self.seed)
136
+ if seed_value is not None:
137
+ import warnings
138
+ warnings.warn(
139
+ f"Seed parameter ({seed_value}) is not supported by Anthropic Claude API. "
140
+ f"For deterministic outputs, use temperature=0.0 which may provide more consistent results, "
141
+ f"though true determinism is not guaranteed.",
142
+ UserWarning,
143
+ stacklevel=3
144
+ )
145
+ self.logger.warning(f"Seed {seed_value} requested but not supported by Anthropic API")
146
+
135
147
  # Handle structured output using the "tool trick"
136
148
  structured_tool_name = None
137
149
  if response_model and PYDANTIC_AVAILABLE:
@@ -570,8 +570,32 @@ class BaseProvider(AbstractCoreInterface, ABC):
570
570
  result_kwargs = kwargs.copy()
571
571
  result_kwargs["max_output_tokens"] = effective_max_output
572
572
 
573
+ # Add unified generation parameters with fallback hierarchy: kwargs → instance → defaults
574
+ result_kwargs["temperature"] = result_kwargs.get("temperature", self.temperature)
575
+ if self.seed is not None:
576
+ result_kwargs["seed"] = result_kwargs.get("seed", self.seed)
577
+
573
578
  return result_kwargs
574
579
 
580
+ def _extract_generation_params(self, **kwargs) -> Dict[str, Any]:
581
+ """
582
+ Extract generation parameters with consistent fallback hierarchy.
583
+
584
+ Returns:
585
+ Dict containing temperature, seed, and other generation parameters
586
+ """
587
+ params = {}
588
+
589
+ # Temperature (always present)
590
+ params["temperature"] = kwargs.get("temperature", self.temperature)
591
+
592
+ # Seed (only if not None)
593
+ seed_value = kwargs.get("seed", self.seed)
594
+ if seed_value is not None:
595
+ params["seed"] = seed_value
596
+
597
+ return params
598
+
575
599
  def _get_provider_max_tokens_param(self, kwargs: Dict[str, Any]) -> int:
576
600
  """
577
601
  Extract the appropriate max tokens parameter for this provider.
@@ -68,6 +68,7 @@ class HuggingFaceProvider(BaseProvider):
68
68
  # Initialize tool handler
69
69
  self.tool_handler = UniversalToolHandler(model)
70
70
 
71
+ # Store provider-specific configuration
71
72
  self.n_gpu_layers = n_gpu_layers
72
73
  self.model_type = None # Will be "transformers" or "gguf"
73
74
  self.device = device
@@ -537,14 +538,15 @@ class HuggingFaceProvider(BaseProvider):
537
538
  # Generation parameters using unified system
538
539
  generation_kwargs = self._prepare_generation_kwargs(**kwargs)
539
540
  max_new_tokens = self._get_provider_max_tokens_param(generation_kwargs)
540
- temperature = kwargs.get("temperature", 0.7)
541
+ temperature = kwargs.get("temperature", self.temperature)
541
542
  top_p = kwargs.get("top_p", 0.9)
543
+ seed_value = kwargs.get("seed", self.seed)
542
544
 
543
545
  try:
544
546
  if stream:
545
- return self._stream_generate_transformers_with_tools(input_text, max_new_tokens, temperature, top_p, tools, kwargs.get('tool_call_tags'))
547
+ return self._stream_generate_transformers_with_tools(input_text, max_new_tokens, temperature, top_p, tools, kwargs.get('tool_call_tags'), seed_value)
546
548
  else:
547
- response = self._single_generate_transformers(input_text, max_new_tokens, temperature, top_p)
549
+ response = self._single_generate_transformers(input_text, max_new_tokens, temperature, top_p, seed_value)
548
550
 
549
551
  # Handle tool execution for prompted models
550
552
  if tools and self.tool_handler.supports_prompted and response.content:
@@ -651,11 +653,16 @@ class HuggingFaceProvider(BaseProvider):
651
653
  generation_kwargs = {
652
654
  "messages": chat_messages,
653
655
  "max_tokens": max_output_tokens, # This is max_output_tokens for llama-cpp
654
- "temperature": kwargs.get("temperature", 0.7),
656
+ "temperature": kwargs.get("temperature", self.temperature),
655
657
  "top_p": kwargs.get("top_p", 0.9),
656
658
  "stream": stream
657
659
  }
658
660
 
661
+ # Add seed if provided (GGUF/llama-cpp supports seed)
662
+ seed_value = kwargs.get("seed", self.seed)
663
+ if seed_value is not None:
664
+ generation_kwargs["seed"] = seed_value
665
+
659
666
  # Handle tools - both native and prompted support
660
667
  has_native_tools = False
661
668
  if tools:
@@ -846,9 +853,16 @@ class HuggingFaceProvider(BaseProvider):
846
853
  )
847
854
 
848
855
  def _single_generate_transformers(self, input_text: str, max_new_tokens: int,
849
- temperature: float, top_p: float) -> GenerateResponse:
856
+ temperature: float, top_p: float, seed: Optional[int] = None) -> GenerateResponse:
850
857
  """Generate single response using transformers (original implementation)"""
851
858
  try:
859
+ # Set seed for deterministic generation if provided
860
+ if seed is not None:
861
+ import torch
862
+ torch.manual_seed(seed)
863
+ if torch.cuda.is_available():
864
+ torch.cuda.manual_seed_all(seed)
865
+
852
866
  outputs = self.pipeline(
853
867
  input_text,
854
868
  max_new_tokens=max_new_tokens,
@@ -902,11 +916,11 @@ class HuggingFaceProvider(BaseProvider):
902
916
  }
903
917
 
904
918
  def _stream_generate_transformers(self, input_text: str, max_new_tokens: int,
905
- temperature: float, top_p: float, tool_call_tags: Optional[str] = None) -> Iterator[GenerateResponse]:
919
+ temperature: float, top_p: float, tool_call_tags: Optional[str] = None, seed: Optional[int] = None) -> Iterator[GenerateResponse]:
906
920
  """Stream response using transformers (simulated, original implementation) with tool tag rewriting support"""
907
921
  try:
908
922
  # HuggingFace doesn't have native streaming, so we simulate it
909
- full_response = self._single_generate_transformers(input_text, max_new_tokens, temperature, top_p)
923
+ full_response = self._single_generate_transformers(input_text, max_new_tokens, temperature, top_p, seed)
910
924
 
911
925
  if full_response.content:
912
926
  # Apply tool tag rewriting if enabled
@@ -1039,12 +1053,12 @@ class HuggingFaceProvider(BaseProvider):
1039
1053
  def _stream_generate_transformers_with_tools(self, input_text: str, max_new_tokens: int,
1040
1054
  temperature: float, top_p: float,
1041
1055
  tools: Optional[List[Dict[str, Any]]] = None,
1042
- tool_call_tags: Optional[str] = None) -> Iterator[GenerateResponse]:
1056
+ tool_call_tags: Optional[str] = None, seed: Optional[int] = None) -> Iterator[GenerateResponse]:
1043
1057
  """Stream generate with tool execution at the end"""
1044
1058
  collected_content = ""
1045
1059
 
1046
1060
  # Stream the response content
1047
- for chunk in self._stream_generate_transformers(input_text, max_new_tokens, temperature, top_p, tool_call_tags):
1061
+ for chunk in self._stream_generate_transformers(input_text, max_new_tokens, temperature, top_p, tool_call_tags, seed):
1048
1062
  collected_content += chunk.content
1049
1063
  yield chunk
1050
1064
 
@@ -196,11 +196,16 @@ class LMStudioProvider(BaseProvider):
196
196
  "model": self.model,
197
197
  "messages": chat_messages,
198
198
  "stream": stream,
199
- "temperature": kwargs.get("temperature", 0.7),
199
+ "temperature": kwargs.get("temperature", self.temperature),
200
200
  "max_tokens": max_output_tokens, # LMStudio uses max_tokens for output tokens
201
201
  "top_p": kwargs.get("top_p", 0.9),
202
202
  }
203
203
 
204
+ # Add seed if provided (LMStudio supports seed via OpenAI-compatible API)
205
+ seed_value = kwargs.get("seed", self.seed)
206
+ if seed_value is not None:
207
+ payload["seed"] = seed_value
208
+
204
209
  if stream:
205
210
  # Return streaming response - BaseProvider will handle tag rewriting via UnifiedStreamProcessor
206
211
  return self._stream_generate(payload)
@@ -189,14 +189,15 @@ class MLXProvider(BaseProvider):
189
189
  # MLX generation parameters using unified system
190
190
  generation_kwargs = self._prepare_generation_kwargs(**kwargs)
191
191
  max_tokens = self._get_provider_max_tokens_param(generation_kwargs)
192
- temperature = kwargs.get("temperature", 0.7)
192
+ temperature = kwargs.get("temperature", self.temperature)
193
193
  top_p = kwargs.get("top_p", 0.9)
194
+ seed_value = kwargs.get("seed", self.seed)
194
195
 
195
196
  try:
196
197
  if stream:
197
- return self._stream_generate_with_tools(full_prompt, max_tokens, temperature, top_p, tools, kwargs.get('tool_call_tags'))
198
+ return self._stream_generate_with_tools(full_prompt, max_tokens, temperature, top_p, tools, kwargs.get('tool_call_tags'), seed_value)
198
199
  else:
199
- response = self._single_generate(full_prompt, max_tokens, temperature, top_p)
200
+ response = self._single_generate(full_prompt, max_tokens, temperature, top_p, seed_value)
200
201
 
201
202
  # Handle tool execution for prompted models
202
203
  if tools and self.tool_handler.supports_prompted and response.content:
@@ -256,9 +257,15 @@ class MLXProvider(BaseProvider):
256
257
 
257
258
  return full_prompt
258
259
 
259
- def _single_generate(self, prompt: str, max_tokens: int, temperature: float, top_p: float) -> GenerateResponse:
260
+ def _single_generate(self, prompt: str, max_tokens: int, temperature: float, top_p: float, seed: Optional[int] = None) -> GenerateResponse:
260
261
  """Generate single response"""
261
262
 
263
+ # Handle seed parameter (MLX supports seed via mx.random.seed)
264
+ if seed is not None:
265
+ import mlx.core as mx
266
+ mx.random.seed(seed)
267
+ self.logger.debug(f"Set MLX random seed to {seed} for deterministic generation")
268
+
262
269
  # Try different MLX API signatures
263
270
  try:
264
271
  # Try new mlx-lm API
@@ -305,9 +312,15 @@ class MLXProvider(BaseProvider):
305
312
  "total_tokens": total_tokens
306
313
  }
307
314
 
308
- def _stream_generate(self, prompt: str, max_tokens: int, temperature: float, top_p: float, tool_call_tags: Optional[str] = None) -> Iterator[GenerateResponse]:
315
+ def _stream_generate(self, prompt: str, max_tokens: int, temperature: float, top_p: float, tool_call_tags: Optional[str] = None, seed: Optional[int] = None) -> Iterator[GenerateResponse]:
309
316
  """Generate real streaming response using MLX stream_generate with tool tag rewriting support"""
310
317
  try:
318
+ # Handle seed parameter (MLX supports seed via mx.random.seed)
319
+ if seed is not None:
320
+ import mlx.core as mx
321
+ mx.random.seed(seed)
322
+ self.logger.debug(f"Set MLX random seed to {seed} for deterministic streaming generation")
323
+
311
324
  # Initialize tool tag rewriter if needed
312
325
  rewriter = None
313
326
  buffer = ""
@@ -366,12 +379,12 @@ class MLXProvider(BaseProvider):
366
379
  def _stream_generate_with_tools(self, full_prompt: str, max_tokens: int,
367
380
  temperature: float, top_p: float,
368
381
  tools: Optional[List[Dict[str, Any]]] = None,
369
- tool_call_tags: Optional[str] = None) -> Iterator[GenerateResponse]:
382
+ tool_call_tags: Optional[str] = None, seed: Optional[int] = None) -> Iterator[GenerateResponse]:
370
383
  """Stream generate with tool execution at the end"""
371
384
  collected_content = ""
372
385
 
373
386
  # Stream the response content
374
- for chunk in self._stream_generate(full_prompt, max_tokens, temperature, top_p, tool_call_tags):
387
+ for chunk in self._stream_generate(full_prompt, max_tokens, temperature, top_p, tool_call_tags, seed):
375
388
  collected_content += chunk.content
376
389
  yield chunk
377
390
 
@@ -132,11 +132,16 @@ class OllamaProvider(BaseProvider):
132
132
  "model": self.model,
133
133
  "stream": stream,
134
134
  "options": {
135
- "temperature": kwargs.get("temperature", 0.7),
135
+ "temperature": kwargs.get("temperature", self.temperature),
136
136
  "num_predict": max_output_tokens, # Ollama uses num_predict for max output tokens
137
137
  }
138
138
  }
139
139
 
140
+ # Add seed if provided (Ollama supports seed for deterministic outputs)
141
+ seed_value = kwargs.get("seed", self.seed)
142
+ if seed_value is not None:
143
+ payload["options"]["seed"] = seed_value
144
+
140
145
  # Add structured output support (Ollama native JSON schema)
141
146
  if response_model and PYDANTIC_AVAILABLE:
142
147
  json_schema = response_model.model_json_schema()
@@ -50,8 +50,7 @@ class OpenAIProvider(BaseProvider):
50
50
  # Preflight check: validate model exists
51
51
  self._validate_model_exists()
52
52
 
53
- # Store configuration (remove duplicate max_tokens)
54
- self.temperature = kwargs.get("temperature", 0.7)
53
+ # Store provider-specific configuration
55
54
  self.top_p = kwargs.get("top_p", 1.0)
56
55
  self.frequency_penalty = kwargs.get("frequency_penalty", 0.0)
57
56
  self.presence_penalty = kwargs.get("presence_penalty", 0.0)
@@ -125,6 +124,11 @@ class OpenAIProvider(BaseProvider):
125
124
  call_params["top_p"] = kwargs.get("top_p", self.top_p)
126
125
  call_params["frequency_penalty"] = kwargs.get("frequency_penalty", self.frequency_penalty)
127
126
  call_params["presence_penalty"] = kwargs.get("presence_penalty", self.presence_penalty)
127
+
128
+ # Add seed if provided (OpenAI supports seed for deterministic outputs)
129
+ seed_value = kwargs.get("seed", self.seed)
130
+ if seed_value is not None:
131
+ call_params["seed"] = seed_value
128
132
 
129
133
  # Handle different token parameter names for different model families
130
134
  if self._uses_max_completion_tokens():