content-core 1.6.0__tar.gz → 1.7.0__tar.gz

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of content-core might be problematic. Click here for more details.

Files changed (99) hide show
  1. {content_core-1.6.0 → content_core-1.7.0}/CHANGELOG.md +5 -0
  2. {content_core-1.6.0 → content_core-1.7.0}/PKG-INFO +9 -1
  3. {content_core-1.6.0 → content_core-1.7.0}/README.md +8 -0
  4. {content_core-1.6.0 → content_core-1.7.0}/docs/usage.md +197 -0
  5. {content_core-1.6.0 → content_core-1.7.0}/pyproject.toml +1 -1
  6. {content_core-1.6.0 → content_core-1.7.0}/src/content_core/common/state.py +10 -0
  7. {content_core-1.6.0 → content_core-1.7.0}/src/content_core/processors/audio.py +32 -1
  8. {content_core-1.6.0 → content_core-1.7.0}/src/content_core/templated_message.py +0 -1
  9. {content_core-1.6.0 → content_core-1.7.0}/tests/integration/conftest.py +0 -1
  10. content_core-1.7.0/tests/unit/test_audio_model_override.py +451 -0
  11. {content_core-1.6.0 → content_core-1.7.0}/uv.lock +21 -21
  12. content_core-1.6.0/notebooks/extraction.ipynb +0 -194
  13. {content_core-1.6.0 → content_core-1.7.0}/.github/PULL_REQUEST_TEMPLATE.md +0 -0
  14. {content_core-1.6.0 → content_core-1.7.0}/.github/workflows/claude-code-review.yml +0 -0
  15. {content_core-1.6.0 → content_core-1.7.0}/.github/workflows/claude.yml +0 -0
  16. {content_core-1.6.0 → content_core-1.7.0}/.github/workflows/create-tag.yml +0 -0
  17. {content_core-1.6.0 → content_core-1.7.0}/.github/workflows/publish.yml +0 -0
  18. {content_core-1.6.0 → content_core-1.7.0}/.gitignore +0 -0
  19. {content_core-1.6.0 → content_core-1.7.0}/.python-version +0 -0
  20. {content_core-1.6.0 → content_core-1.7.0}/CONTRIBUTING.md +0 -0
  21. {content_core-1.6.0 → content_core-1.7.0}/LICENSE +0 -0
  22. {content_core-1.6.0 → content_core-1.7.0}/Makefile +0 -0
  23. {content_core-1.6.0 → content_core-1.7.0}/docs/macos.md +0 -0
  24. {content_core-1.6.0 → content_core-1.7.0}/docs/mcp.md +0 -0
  25. {content_core-1.6.0 → content_core-1.7.0}/docs/processors.md +0 -0
  26. {content_core-1.6.0 → content_core-1.7.0}/docs/raycast.md +0 -0
  27. {content_core-1.6.0 → content_core-1.7.0}/examples/main.py +0 -0
  28. {content_core-1.6.0 → content_core-1.7.0}/prompts/content/cleanup.jinja +0 -0
  29. {content_core-1.6.0 → content_core-1.7.0}/prompts/content/summarize.jinja +0 -0
  30. {content_core-1.6.0 → content_core-1.7.0}/raycast-content-core/.eslintrc.json +0 -0
  31. {content_core-1.6.0 → content_core-1.7.0}/raycast-content-core/CHANGELOG.md +0 -0
  32. {content_core-1.6.0 → content_core-1.7.0}/raycast-content-core/README.md +0 -0
  33. {content_core-1.6.0 → content_core-1.7.0}/raycast-content-core/assets/command-icon.png +0 -0
  34. {content_core-1.6.0 → content_core-1.7.0}/raycast-content-core/package-lock.json +0 -0
  35. {content_core-1.6.0 → content_core-1.7.0}/raycast-content-core/package.json +0 -0
  36. {content_core-1.6.0 → content_core-1.7.0}/raycast-content-core/raycast-env.d.ts +0 -0
  37. {content_core-1.6.0 → content_core-1.7.0}/raycast-content-core/src/extract-content.tsx +0 -0
  38. {content_core-1.6.0 → content_core-1.7.0}/raycast-content-core/src/quick-extract.tsx +0 -0
  39. {content_core-1.6.0 → content_core-1.7.0}/raycast-content-core/src/summarize-content.tsx +0 -0
  40. {content_core-1.6.0 → content_core-1.7.0}/raycast-content-core/src/utils/content-core.ts +0 -0
  41. {content_core-1.6.0 → content_core-1.7.0}/raycast-content-core/src/utils/types.ts +0 -0
  42. {content_core-1.6.0 → content_core-1.7.0}/raycast-content-core/tsconfig.json +0 -0
  43. {content_core-1.6.0 → content_core-1.7.0}/src/content_core/__init__.py +0 -0
  44. {content_core-1.6.0 → content_core-1.7.0}/src/content_core/cc_config.yaml +0 -0
  45. {content_core-1.6.0 → content_core-1.7.0}/src/content_core/common/__init__.py +0 -0
  46. {content_core-1.6.0 → content_core-1.7.0}/src/content_core/common/exceptions.py +0 -0
  47. {content_core-1.6.0 → content_core-1.7.0}/src/content_core/common/types.py +0 -0
  48. {content_core-1.6.0 → content_core-1.7.0}/src/content_core/common/utils.py +0 -0
  49. {content_core-1.6.0 → content_core-1.7.0}/src/content_core/config.py +0 -0
  50. {content_core-1.6.0 → content_core-1.7.0}/src/content_core/content/__init__.py +0 -0
  51. {content_core-1.6.0 → content_core-1.7.0}/src/content_core/content/cleanup/__init__.py +0 -0
  52. {content_core-1.6.0 → content_core-1.7.0}/src/content_core/content/cleanup/core.py +0 -0
  53. {content_core-1.6.0 → content_core-1.7.0}/src/content_core/content/extraction/__init__.py +0 -0
  54. {content_core-1.6.0 → content_core-1.7.0}/src/content_core/content/extraction/graph.py +0 -0
  55. {content_core-1.6.0 → content_core-1.7.0}/src/content_core/content/identification/__init__.py +0 -0
  56. {content_core-1.6.0 → content_core-1.7.0}/src/content_core/content/identification/file_detector.py +0 -0
  57. {content_core-1.6.0 → content_core-1.7.0}/src/content_core/content/summary/__init__.py +0 -0
  58. {content_core-1.6.0 → content_core-1.7.0}/src/content_core/content/summary/core.py +0 -0
  59. {content_core-1.6.0 → content_core-1.7.0}/src/content_core/logging.py +0 -0
  60. {content_core-1.6.0 → content_core-1.7.0}/src/content_core/mcp/__init__.py +0 -0
  61. {content_core-1.6.0 → content_core-1.7.0}/src/content_core/mcp/server.py +0 -0
  62. {content_core-1.6.0 → content_core-1.7.0}/src/content_core/models.py +0 -0
  63. {content_core-1.6.0 → content_core-1.7.0}/src/content_core/models_config.yaml +0 -0
  64. {content_core-1.6.0 → content_core-1.7.0}/src/content_core/notebooks/run.ipynb +0 -0
  65. {content_core-1.6.0 → content_core-1.7.0}/src/content_core/notebooks/urls.ipynb +0 -0
  66. {content_core-1.6.0 → content_core-1.7.0}/src/content_core/processors/docling.py +0 -0
  67. {content_core-1.6.0 → content_core-1.7.0}/src/content_core/processors/office.py +0 -0
  68. {content_core-1.6.0 → content_core-1.7.0}/src/content_core/processors/pdf.py +0 -0
  69. {content_core-1.6.0 → content_core-1.7.0}/src/content_core/processors/text.py +0 -0
  70. {content_core-1.6.0 → content_core-1.7.0}/src/content_core/processors/url.py +0 -0
  71. {content_core-1.6.0 → content_core-1.7.0}/src/content_core/processors/video.py +0 -0
  72. {content_core-1.6.0 → content_core-1.7.0}/src/content_core/processors/youtube.py +0 -0
  73. {content_core-1.6.0 → content_core-1.7.0}/src/content_core/py.typed +0 -0
  74. {content_core-1.6.0 → content_core-1.7.0}/src/content_core/tools/__init__.py +0 -0
  75. {content_core-1.6.0 → content_core-1.7.0}/src/content_core/tools/cleanup.py +0 -0
  76. {content_core-1.6.0 → content_core-1.7.0}/src/content_core/tools/extract.py +0 -0
  77. {content_core-1.6.0 → content_core-1.7.0}/src/content_core/tools/summarize.py +0 -0
  78. {content_core-1.6.0 → content_core-1.7.0}/test_coverage_branch_report.md +0 -0
  79. {content_core-1.6.0 → content_core-1.7.0}/tests/input_content/file.docx +0 -0
  80. {content_core-1.6.0 → content_core-1.7.0}/tests/input_content/file.epub +0 -0
  81. {content_core-1.6.0 → content_core-1.7.0}/tests/input_content/file.md +0 -0
  82. {content_core-1.6.0 → content_core-1.7.0}/tests/input_content/file.mp3 +0 -0
  83. {content_core-1.6.0 → content_core-1.7.0}/tests/input_content/file.mp4 +0 -0
  84. {content_core-1.6.0 → content_core-1.7.0}/tests/input_content/file.pdf +0 -0
  85. {content_core-1.6.0 → content_core-1.7.0}/tests/input_content/file.pptx +0 -0
  86. {content_core-1.6.0 → content_core-1.7.0}/tests/input_content/file.txt +0 -0
  87. {content_core-1.6.0 → content_core-1.7.0}/tests/input_content/file.xlsx +0 -0
  88. {content_core-1.6.0 → content_core-1.7.0}/tests/input_content/file_audio.mp3 +0 -0
  89. {content_core-1.6.0 → content_core-1.7.0}/tests/input_content/new_pdf.pdf +0 -0
  90. {content_core-1.6.0 → content_core-1.7.0}/tests/integration/test_cli.py +0 -0
  91. {content_core-1.6.0 → content_core-1.7.0}/tests/integration/test_extraction.py +0 -0
  92. {content_core-1.6.0 → content_core-1.7.0}/tests/unit/test_audio_concurrency.py +0 -0
  93. {content_core-1.6.0 → content_core-1.7.0}/tests/unit/test_config.py +0 -0
  94. {content_core-1.6.0 → content_core-1.7.0}/tests/unit/test_docling.py +0 -0
  95. {content_core-1.6.0 → content_core-1.7.0}/tests/unit/test_file_detector.py +0 -0
  96. {content_core-1.6.0 → content_core-1.7.0}/tests/unit/test_file_detector_critical.py +0 -0
  97. {content_core-1.6.0 → content_core-1.7.0}/tests/unit/test_file_detector_performance.py +0 -0
  98. {content_core-1.6.0 → content_core-1.7.0}/tests/unit/test_mcp_server.py +0 -0
  99. {content_core-1.6.0 → content_core-1.7.0}/tests/unit/test_pymupdf_ocr.py +0 -0
@@ -11,6 +11,11 @@ and this project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0
11
11
  - Pure Python file type detection via the new `FileDetector` class
12
12
  - Comprehensive file signature detection for 25+ file formats
13
13
  - Smart detection for ZIP-based formats (DOCX, XLSX, PPTX, EPUB)
14
+ - Custom audio model configuration - override speech-to-text provider and model at runtime
15
+ - Pass `audio_provider` and `audio_model` parameters through `extract_content()` API
16
+ - Supports any provider/model combination available through Esperanto library
17
+ - Maintains full backward compatibility - existing code works unchanged
18
+ - Includes validation with helpful warnings and error messages
14
19
 
15
20
  ### Changed
16
21
  - File type detection now uses pure Python implementation instead of libmagic
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.4
2
2
  Name: content-core
3
- Version: 1.6.0
3
+ Version: 1.7.0
4
4
  Summary: Extract what matters from any media source. Available as Python Library, macOS Service, CLI and MCP Server
5
5
  Author-email: LUIS NOVO <lfnovo@gmail.com>
6
6
  License-File: LICENSE
@@ -263,6 +263,14 @@ cleaned_text = await cc.clean("...messy text with [brackets] and extra spaces...
263
263
 
264
264
  # Summarize content with optional context
265
265
  summary = await cc.summarize_content("long article text", context="explain to a child")
266
+
267
+ # Extract audio with custom speech-to-text model
268
+ from content_core.common import ProcessSourceInput
269
+ result = await cc.extract(ProcessSourceInput(
270
+ file_path="interview.mp3",
271
+ audio_provider="openai",
272
+ audio_model="whisper-1"
273
+ ))
266
274
  ```
267
275
 
268
276
  ## Documentation
@@ -228,6 +228,14 @@ cleaned_text = await cc.clean("...messy text with [brackets] and extra spaces...
228
228
 
229
229
  # Summarize content with optional context
230
230
  summary = await cc.summarize_content("long article text", context="explain to a child")
231
+
232
+ # Extract audio with custom speech-to-text model
233
+ from content_core.common import ProcessSourceInput
234
+ result = await cc.extract(ProcessSourceInput(
235
+ file_path="interview.mp3",
236
+ audio_provider="openai",
237
+ audio_model="whisper-1"
238
+ ))
231
239
  ```
232
240
 
233
241
  ## Documentation
@@ -362,6 +362,203 @@ result = await cc.extract({"file_path": "conference_talk.mp4"})
362
362
  print(result.content) # Full transcript
363
363
  ```
364
364
 
365
+ ## Custom Audio Model Configuration
366
+
367
+ Content Core allows you to override the default speech-to-text model at runtime, enabling you to choose different AI providers and models based on your specific needs (language support, cost, accuracy, etc.).
368
+
369
+ ### Overview
370
+
371
+ By default, audio and video files are transcribed using the model configured in `models_config.yaml` (typically OpenAI Whisper-1). You can override this on a per-call basis by specifying both `audio_provider` and `audio_model` parameters.
372
+
373
+ **Key Features:**
374
+ - ✅ **Runtime flexibility**: Choose different models for different use cases
375
+ - ✅ **Backward compatible**: Existing code works unchanged
376
+ - ✅ **Multiple providers**: Support for any provider supported by Esperanto
377
+ - ✅ **Automatic fallback**: Graceful handling of invalid configurations
378
+
379
+ ### Basic Usage
380
+
381
+ ```python
382
+ from content_core.common import ProcessSourceInput
383
+ import content_core as cc
384
+
385
+ # Use custom audio model for transcription
386
+ result = await cc.extract(ProcessSourceInput(
387
+ file_path="interview.mp3",
388
+ audio_provider="openai",
389
+ audio_model="whisper-1"
390
+ ))
391
+
392
+ print(result.content) # Transcribed text using specified model
393
+ ```
394
+
395
+ ### Supported Providers
396
+
397
+ Content Core uses the Esperanto library for AI model abstraction, which supports multiple providers:
398
+
399
+ - **OpenAI**: `provider="openai"`, models: `whisper-1`
400
+ - **Google**: `provider="google"`, models: `chirp` (if available)
401
+ - **Other providers**: Any provider supported by Esperanto
402
+
403
+ Check the [Esperanto documentation](https://github.com/yourusername/esperanto) for the full list of supported providers and models.
404
+
405
+ ### Use Cases
406
+
407
+ **Multilingual Transcription:**
408
+ ```python
409
+ from content_core.common import ProcessSourceInput
410
+ import content_core as cc
411
+
412
+ # Use a model optimized for a specific language
413
+ result = await cc.extract(ProcessSourceInput(
414
+ file_path="spanish_interview.mp3",
415
+ audio_provider="openai",
416
+ audio_model="whisper-1" # Whisper supports 99 languages
417
+ ))
418
+ ```
419
+
420
+ **Cost Optimization:**
421
+ ```python
422
+ from content_core.common import ProcessSourceInput
423
+ import content_core as cc
424
+
425
+ # Use different models based on quality requirements
426
+ # For high-value content, use premium model
427
+ premium_result = await cc.extract(ProcessSourceInput(
428
+ file_path="important_meeting.mp3",
429
+ audio_provider="openai",
430
+ audio_model="whisper-1"
431
+ ))
432
+
433
+ # For casual content, use default or cost-effective model
434
+ casual_result = await cc.extract(ProcessSourceInput(
435
+ file_path="casual_recording.mp3"
436
+ # No custom params = uses default configured model
437
+ ))
438
+ ```
439
+
440
+ **Video Transcription with Custom Model:**
441
+ ```python
442
+ from content_core.common import ProcessSourceInput
443
+ import content_core as cc
444
+
445
+ # Custom model works for video files too (audio is extracted automatically)
446
+ result = await cc.extract(ProcessSourceInput(
447
+ file_path="conference_presentation.mp4",
448
+ audio_provider="openai",
449
+ audio_model="whisper-1"
450
+ ))
451
+ ```
452
+
453
+ ### Parameter Requirements
454
+
455
+ Both `audio_provider` and `audio_model` must be specified together:
456
+
457
+ ```python
458
+ # ✅ CORRECT: Both parameters provided
459
+ result = await cc.extract(ProcessSourceInput(
460
+ file_path="audio.mp3",
461
+ audio_provider="openai",
462
+ audio_model="whisper-1"
463
+ ))
464
+
465
+ # ✅ CORRECT: Neither parameter (uses default)
466
+ result = await cc.extract(ProcessSourceInput(
467
+ file_path="audio.mp3"
468
+ ))
469
+
470
+ # ⚠️ WARNING: Only one parameter (logs warning, uses default)
471
+ result = await cc.extract(ProcessSourceInput(
472
+ file_path="audio.mp3",
473
+ audio_provider="openai" # Missing audio_model
474
+ ))
475
+ # Logs: "audio_provider provided without audio_model. Both must be specified together. Falling back to default model."
476
+ ```
477
+
478
+ ### Error Handling
479
+
480
+ Content Core gracefully handles invalid model configurations:
481
+
482
+ **Invalid Provider:**
483
+ ```python
484
+ result = await cc.extract(ProcessSourceInput(
485
+ file_path="audio.mp3",
486
+ audio_provider="invalid_provider",
487
+ audio_model="whisper-1"
488
+ ))
489
+ # Logs error and falls back to default model
490
+ # Transcription continues successfully
491
+ ```
492
+
493
+ **Invalid Model Name:**
494
+ ```python
495
+ result = await cc.extract(ProcessSourceInput(
496
+ file_path="audio.mp3",
497
+ audio_provider="openai",
498
+ audio_model="nonexistent-model"
499
+ ))
500
+ # Logs error and falls back to default model
501
+ # Transcription continues successfully
502
+ ```
503
+
504
+ **Error Message Example:**
505
+ ```
506
+ ERROR: Failed to create custom audio model 'invalid_provider/whisper-1': Unsupported provider.
507
+ Check that the provider and model are supported by Esperanto. Falling back to default model.
508
+ ```
509
+
510
+ ### Concurrency Control
511
+
512
+ Custom audio models respect the same concurrency limits as the default model (configured via `CCORE_AUDIO_CONCURRENCY` or `set_audio_concurrency()`). This ensures consistent API rate limit handling regardless of which model you use.
513
+
514
+ ```python
515
+ from content_core.config import set_audio_concurrency
516
+ from content_core.common import ProcessSourceInput
517
+ import content_core as cc
518
+
519
+ # Set concurrency for all transcriptions (default and custom models)
520
+ set_audio_concurrency(5)
521
+
522
+ # Both use the same concurrency limit
523
+ default_result = await cc.extract(ProcessSourceInput(file_path="audio1.mp3"))
524
+ custom_result = await cc.extract(ProcessSourceInput(
525
+ file_path="audio2.mp3",
526
+ audio_provider="openai",
527
+ audio_model="whisper-1"
528
+ ))
529
+ ```
530
+
531
+ ### Backward Compatibility
532
+
533
+ All existing code continues to work without any changes:
534
+
535
+ ```python
536
+ import content_core as cc
537
+
538
+ # Old code (no custom params) - still works perfectly
539
+ result = await cc.extract("audio.mp3")
540
+ result = await cc.extract({"file_path": "audio.mp3"})
541
+
542
+ # New capability (optional custom params)
543
+ from content_core.common import ProcessSourceInput
544
+ result = await cc.extract(ProcessSourceInput(
545
+ file_path="audio.mp3",
546
+ audio_provider="openai",
547
+ audio_model="whisper-1"
548
+ ))
549
+ ```
550
+
551
+ ### Troubleshooting
552
+
553
+ **Issue**: "Both audio_provider and audio_model must be specified together"
554
+ - **Solution**: Provide both parameters or neither. Don't specify just one.
555
+
556
+ **Issue**: "Failed to create custom audio model"
557
+ - **Solution**: Verify the provider and model are supported by Esperanto. Check your API keys are configured correctly.
558
+
559
+ **Issue**: Custom model seems to be ignored
560
+ - **Solution**: Ensure you're using `ProcessSourceInput` class (not plain dict) when passing custom parameters.
561
+
365
562
  ## File Type Detection
366
563
 
367
564
  Content Core uses a pure Python implementation for file type detection, eliminating the need for system dependencies like libmagic. This ensures consistent behavior across all platforms (Windows, macOS, Linux).
@@ -1,6 +1,6 @@
1
1
  [project]
2
2
  name = "content-core"
3
- version = "1.6.0"
3
+ version = "1.7.0"
4
4
  description = "Extract what matters from any media source. Available as Python Library, macOS Service, CLI and MCP Server"
5
5
  readme = "README.md"
6
6
  homepage = "https://github.com/lfnovo/content-core"
@@ -27,6 +27,14 @@ class ProcessSourceState(BaseModel):
27
27
  default=None,
28
28
  description="Override Docling output format: 'markdown', 'html', or 'json'",
29
29
  )
30
+ audio_provider: Optional[str] = Field(
31
+ default=None,
32
+ description="Override speech-to-text provider (e.g., 'openai', 'google')",
33
+ )
34
+ audio_model: Optional[str] = Field(
35
+ default=None,
36
+ description="Override speech-to-text model name (e.g., 'whisper-1', 'chirp')",
37
+ )
30
38
 
31
39
 
32
40
  class ProcessSourceInput(BaseModel):
@@ -36,6 +44,8 @@ class ProcessSourceInput(BaseModel):
36
44
  document_engine: Optional[str] = None
37
45
  url_engine: Optional[str] = None
38
46
  output_format: Optional[str] = None
47
+ audio_provider: Optional[str] = None
48
+ audio_model: Optional[str] = None
39
49
 
40
50
 
41
51
  class ProcessSourceOutput(BaseModel):
@@ -190,8 +190,39 @@ async def extract_audio_data(data: ProcessSourceState):
190
190
 
191
191
  # Transcribe audio files in parallel with concurrency limit
192
192
  from content_core.models import ModelFactory
193
+ from esperanto import AIFactory
194
+
195
+ # Determine which model to use based on state parameters
196
+ if data.audio_provider and data.audio_model:
197
+ # Custom model provided - create new instance
198
+ try:
199
+ logger.info(
200
+ f"Using custom audio model: {data.audio_provider}/{data.audio_model}"
201
+ )
202
+ speech_to_text_model = AIFactory.create_speech_to_text(
203
+ data.audio_provider, data.audio_model
204
+ )
205
+ except Exception as e:
206
+ logger.error(
207
+ f"Failed to create custom audio model '{data.audio_provider}/{data.audio_model}': {e}. "
208
+ f"Check that the provider and model are supported by Esperanto. "
209
+ f"Falling back to default model."
210
+ )
211
+ speech_to_text_model = ModelFactory.get_model("speech_to_text")
212
+ elif data.audio_provider or data.audio_model:
213
+ # Only one parameter provided - log warning and use default
214
+ missing = "audio_model" if data.audio_provider else "audio_provider"
215
+ provided = "audio_provider" if data.audio_provider else "audio_model"
216
+ logger.warning(
217
+ f"{provided} provided without {missing}. "
218
+ f"Both audio_provider and audio_model must be specified together. "
219
+ f"Falling back to default model."
220
+ )
221
+ speech_to_text_model = ModelFactory.get_model("speech_to_text")
222
+ else:
223
+ # No custom parameters - use default (backward compatible)
224
+ speech_to_text_model = ModelFactory.get_model("speech_to_text")
193
225
 
194
- speech_to_text_model = ModelFactory.get_model("speech_to_text")
195
226
  concurrency = get_audio_concurrency()
196
227
  semaphore = asyncio.Semaphore(concurrency)
197
228
 
@@ -2,7 +2,6 @@ from typing import Dict, Optional, Union
2
2
 
3
3
  from ai_prompter import Prompter
4
4
  from esperanto import LanguageModel
5
- from esperanto.common_types import Message
6
5
  from pydantic import BaseModel, Field
7
6
 
8
7
  from content_core.models import ModelFactory
@@ -1,7 +1,6 @@
1
1
  """Pytest configuration for integration tests."""
2
2
  import asyncio
3
3
  import gc
4
- import warnings
5
4
 
6
5
  import pytest
7
6