ebk 0.4.4__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (87) hide show
  1. ebk/__init__.py +35 -0
  2. ebk/ai/__init__.py +23 -0
  3. ebk/ai/knowledge_graph.py +450 -0
  4. ebk/ai/llm_providers/__init__.py +26 -0
  5. ebk/ai/llm_providers/anthropic.py +209 -0
  6. ebk/ai/llm_providers/base.py +295 -0
  7. ebk/ai/llm_providers/gemini.py +285 -0
  8. ebk/ai/llm_providers/ollama.py +294 -0
  9. ebk/ai/metadata_enrichment.py +394 -0
  10. ebk/ai/question_generator.py +328 -0
  11. ebk/ai/reading_companion.py +224 -0
  12. ebk/ai/semantic_search.py +433 -0
  13. ebk/ai/text_extractor.py +393 -0
  14. ebk/calibre_import.py +66 -0
  15. ebk/cli.py +6433 -0
  16. ebk/config.py +230 -0
  17. ebk/db/__init__.py +37 -0
  18. ebk/db/migrations.py +507 -0
  19. ebk/db/models.py +725 -0
  20. ebk/db/session.py +144 -0
  21. ebk/decorators.py +1 -0
  22. ebk/exports/__init__.py +0 -0
  23. ebk/exports/base_exporter.py +218 -0
  24. ebk/exports/echo_export.py +279 -0
  25. ebk/exports/html_library.py +1743 -0
  26. ebk/exports/html_utils.py +87 -0
  27. ebk/exports/hugo.py +59 -0
  28. ebk/exports/jinja_export.py +286 -0
  29. ebk/exports/multi_facet_export.py +159 -0
  30. ebk/exports/opds_export.py +232 -0
  31. ebk/exports/symlink_dag.py +479 -0
  32. ebk/exports/zip.py +25 -0
  33. ebk/extract_metadata.py +341 -0
  34. ebk/ident.py +89 -0
  35. ebk/library_db.py +1440 -0
  36. ebk/opds.py +748 -0
  37. ebk/plugins/__init__.py +42 -0
  38. ebk/plugins/base.py +502 -0
  39. ebk/plugins/hooks.py +442 -0
  40. ebk/plugins/registry.py +499 -0
  41. ebk/repl/__init__.py +9 -0
  42. ebk/repl/find.py +126 -0
  43. ebk/repl/grep.py +173 -0
  44. ebk/repl/shell.py +1677 -0
  45. ebk/repl/text_utils.py +320 -0
  46. ebk/search_parser.py +413 -0
  47. ebk/server.py +3608 -0
  48. ebk/services/__init__.py +28 -0
  49. ebk/services/annotation_extraction.py +351 -0
  50. ebk/services/annotation_service.py +380 -0
  51. ebk/services/export_service.py +577 -0
  52. ebk/services/import_service.py +447 -0
  53. ebk/services/personal_metadata_service.py +347 -0
  54. ebk/services/queue_service.py +253 -0
  55. ebk/services/tag_service.py +281 -0
  56. ebk/services/text_extraction.py +317 -0
  57. ebk/services/view_service.py +12 -0
  58. ebk/similarity/__init__.py +77 -0
  59. ebk/similarity/base.py +154 -0
  60. ebk/similarity/core.py +471 -0
  61. ebk/similarity/extractors.py +168 -0
  62. ebk/similarity/metrics.py +376 -0
  63. ebk/skills/SKILL.md +182 -0
  64. ebk/skills/__init__.py +1 -0
  65. ebk/vfs/__init__.py +101 -0
  66. ebk/vfs/base.py +298 -0
  67. ebk/vfs/library_vfs.py +122 -0
  68. ebk/vfs/nodes/__init__.py +54 -0
  69. ebk/vfs/nodes/authors.py +196 -0
  70. ebk/vfs/nodes/books.py +480 -0
  71. ebk/vfs/nodes/files.py +155 -0
  72. ebk/vfs/nodes/metadata.py +385 -0
  73. ebk/vfs/nodes/root.py +100 -0
  74. ebk/vfs/nodes/similar.py +165 -0
  75. ebk/vfs/nodes/subjects.py +184 -0
  76. ebk/vfs/nodes/tags.py +371 -0
  77. ebk/vfs/resolver.py +228 -0
  78. ebk/vfs_router.py +275 -0
  79. ebk/views/__init__.py +32 -0
  80. ebk/views/dsl.py +668 -0
  81. ebk/views/service.py +619 -0
  82. ebk-0.4.4.dist-info/METADATA +755 -0
  83. ebk-0.4.4.dist-info/RECORD +87 -0
  84. ebk-0.4.4.dist-info/WHEEL +5 -0
  85. ebk-0.4.4.dist-info/entry_points.txt +2 -0
  86. ebk-0.4.4.dist-info/licenses/LICENSE +21 -0
  87. ebk-0.4.4.dist-info/top_level.txt +1 -0
ebk/__init__.py ADDED
@@ -0,0 +1,35 @@
1
+ """
2
+ ebk - A powerful eBook metadata management tool with SQLAlchemy + SQLite backend.
3
+
4
+ Main API:
5
+ from ebk.library_db import Library
6
+ from pathlib import Path
7
+
8
+ # Open or create a library
9
+ lib = Library.open(Path("/path/to/library"))
10
+
11
+ # Add a book
12
+ book = lib.add_book(
13
+ Path("book.pdf"),
14
+ metadata={"title": "My Book", "creators": ["Author"]},
15
+ extract_text=True
16
+ )
17
+
18
+ # Search with full-text search
19
+ results = lib.search("python programming", limit=50)
20
+
21
+ # Query with fluent API
22
+ results = (lib.query()
23
+ .filter_by_language("en")
24
+ .filter_by_author("Knuth")
25
+ .limit(20)
26
+ .all())
27
+
28
+ # Always close when done
29
+ lib.close()
30
+ """
31
+
32
+ from .library_db import Library
33
+
34
+ __version__ = "0.4.4"
35
+ __all__ = ["Library"]
ebk/ai/__init__.py ADDED
@@ -0,0 +1,23 @@
1
+ """
2
+ AI-powered features for ebk: Knowledge graphs, semantic search, and intelligent reading assistance.
3
+ """
4
+
5
+ from .knowledge_graph import KnowledgeGraph, ConceptNode, ConceptRelation
6
+ from .text_extractor import TextExtractor, ChapterExtractor
7
+ from .semantic_search import SemanticSearch, EmbeddingStore
8
+ from .reading_companion import ReadingCompanion, ReadingSession
9
+ from .question_generator import QuestionGenerator, QuizBuilder
10
+
11
+ __all__ = [
12
+ 'KnowledgeGraph',
13
+ 'ConceptNode',
14
+ 'ConceptRelation',
15
+ 'TextExtractor',
16
+ 'ChapterExtractor',
17
+ 'SemanticSearch',
18
+ 'EmbeddingStore',
19
+ 'ReadingCompanion',
20
+ 'ReadingSession',
21
+ 'QuestionGenerator',
22
+ 'QuizBuilder'
23
+ ]
@@ -0,0 +1,450 @@
1
+ """
2
+ Knowledge Graph implementation for connecting concepts across books.
3
+ """
4
+
5
+ import json
6
+ import hashlib
7
+ from pathlib import Path
8
+ from typing import Dict, List, Optional, Set, Tuple, Any
9
+ from dataclasses import dataclass, field
10
+ from datetime import datetime
11
+ import networkx as nx
12
+ from collections import defaultdict
13
+
14
+
15
+ @dataclass
16
+ class ConceptNode:
17
+ """Represents a concept/idea extracted from books."""
18
+ id: str
19
+ name: str
20
+ description: str
21
+ source_books: List[str] = field(default_factory=list)
22
+ contexts: List[Dict[str, Any]] = field(default_factory=list)
23
+ keywords: List[str] = field(default_factory=list)
24
+ importance_score: float = 0.0
25
+ created_at: datetime = field(default_factory=datetime.now)
26
+
27
+ def add_context(self, book_id: str, page: int, quote: str, chapter: str = None):
28
+ """Add a context where this concept appears."""
29
+ self.contexts.append({
30
+ 'book_id': book_id,
31
+ 'page': page,
32
+ 'quote': quote,
33
+ 'chapter': chapter,
34
+ 'timestamp': datetime.now().isoformat()
35
+ })
36
+ if book_id not in self.source_books:
37
+ self.source_books.append(book_id)
38
+
39
+ def to_dict(self) -> Dict:
40
+ """Convert to dictionary for serialization."""
41
+ return {
42
+ 'id': self.id,
43
+ 'name': self.name,
44
+ 'description': self.description,
45
+ 'source_books': self.source_books,
46
+ 'contexts': self.contexts,
47
+ 'keywords': self.keywords,
48
+ 'importance_score': self.importance_score,
49
+ 'created_at': self.created_at.isoformat()
50
+ }
51
+
52
+
53
+ @dataclass
54
+ class ConceptRelation:
55
+ """Represents a relationship between two concepts."""
56
+ source_id: str
57
+ target_id: str
58
+ relation_type: str # 'supports', 'contradicts', 'extends', 'examples', 'causes', etc.
59
+ strength: float = 1.0
60
+ evidence: List[Dict[str, Any]] = field(default_factory=list)
61
+
62
+ def add_evidence(self, book_id: str, description: str):
63
+ """Add evidence for this relationship."""
64
+ self.evidence.append({
65
+ 'book_id': book_id,
66
+ 'description': description,
67
+ 'timestamp': datetime.now().isoformat()
68
+ })
69
+
70
+
71
+ class KnowledgeGraph:
72
+ """
73
+ A knowledge graph that connects concepts across multiple books.
74
+ Uses NetworkX for graph operations and provides rich querying capabilities.
75
+ """
76
+
77
+ def __init__(self, library_path: Path):
78
+ self.library_path = Path(library_path)
79
+ self.graph_path = self.library_path / '.knowledge_graph'
80
+ self.graph_path.mkdir(exist_ok=True)
81
+
82
+ self.graph = nx.DiGraph()
83
+ self.concepts: Dict[str, ConceptNode] = {}
84
+ self.concept_index: Dict[str, List[str]] = defaultdict(list) # keyword -> concept_ids
85
+ self.book_concepts: Dict[str, Set[str]] = defaultdict(set) # book_id -> concept_ids
86
+
87
+ self.load_graph()
88
+
89
+ def generate_concept_id(self, name: str, context: str = "") -> str:
90
+ """Generate a unique ID for a concept."""
91
+ content = f"{name.lower()}:{context}"
92
+ return hashlib.md5(content.encode()).hexdigest()[:12]
93
+
94
+ def add_concept(self, name: str, description: str,
95
+ book_id: str = None, page: int = None,
96
+ quote: str = None, keywords: List[str] = None) -> ConceptNode:
97
+ """Add a new concept or update existing one."""
98
+ concept_id = self.generate_concept_id(name, description[:50])
99
+
100
+ if concept_id in self.concepts:
101
+ concept = self.concepts[concept_id]
102
+ if book_id and quote:
103
+ concept.add_context(book_id, page, quote)
104
+ else:
105
+ concept = ConceptNode(
106
+ id=concept_id,
107
+ name=name,
108
+ description=description,
109
+ keywords=keywords or self._extract_keywords(name, description)
110
+ )
111
+ if book_id and quote:
112
+ concept.add_context(book_id, page, quote)
113
+
114
+ self.concepts[concept_id] = concept
115
+ self.graph.add_node(concept_id, **concept.to_dict())
116
+
117
+ # Update indices
118
+ for keyword in concept.keywords:
119
+ self.concept_index[keyword.lower()].append(concept_id)
120
+ if book_id:
121
+ self.book_concepts[book_id].add(concept_id)
122
+
123
+ return concept
124
+
125
+ def add_relation(self, source_name: str, target_name: str,
126
+ relation_type: str, strength: float = 1.0,
127
+ book_id: str = None, evidence: str = None) -> ConceptRelation:
128
+ """Add a relationship between two concepts."""
129
+ source_id = self._find_concept_id_by_name(source_name)
130
+ target_id = self._find_concept_id_by_name(target_name)
131
+
132
+ # Ensure both concepts exist
133
+ if source_id not in self.concepts or target_id not in self.concepts:
134
+ raise ValueError(f"Both concepts must exist before creating a relation")
135
+
136
+ relation = ConceptRelation(
137
+ source_id=source_id,
138
+ target_id=target_id,
139
+ relation_type=relation_type,
140
+ strength=strength
141
+ )
142
+
143
+ if book_id and evidence:
144
+ relation.add_evidence(book_id, evidence)
145
+
146
+ self.graph.add_edge(
147
+ source_id, target_id,
148
+ type=relation_type,
149
+ strength=strength,
150
+ evidence=relation.evidence
151
+ )
152
+
153
+ return relation
154
+
155
+ def find_concept_path(self, start_concept: str, end_concept: str) -> List[str]:
156
+ """Find the shortest path between two concepts."""
157
+ start_id = self.generate_concept_id(start_concept, "")
158
+ end_id = self.generate_concept_id(end_concept, "")
159
+
160
+ if start_id not in self.graph or end_id not in self.graph:
161
+ return []
162
+
163
+ try:
164
+ path = nx.shortest_path(self.graph, start_id, end_id)
165
+ return [self.concepts[node_id].name for node_id in path]
166
+ except nx.NetworkXNoPath:
167
+ return []
168
+
169
+ def find_related_concepts(self, concept_name: str,
170
+ max_distance: int = 2,
171
+ min_strength: float = 0.5) -> List[Tuple[str, float]]:
172
+ """Find concepts related to a given concept within a certain distance."""
173
+ concept_id = self.generate_concept_id(concept_name, "")
174
+
175
+ if concept_id not in self.graph:
176
+ # Try fuzzy matching
177
+ concept_id = self._fuzzy_find_concept(concept_name)
178
+ if not concept_id:
179
+ return []
180
+
181
+ related = []
182
+ visited = set()
183
+
184
+ # BFS with distance tracking
185
+ queue = [(concept_id, 0, 1.0)]
186
+
187
+ while queue:
188
+ current_id, distance, accumulated_strength = queue.pop(0)
189
+
190
+ if current_id in visited or distance > max_distance:
191
+ continue
192
+
193
+ visited.add(current_id)
194
+
195
+ if current_id != concept_id and accumulated_strength >= min_strength:
196
+ concept = self.concepts[current_id]
197
+ related.append((concept.name, accumulated_strength))
198
+
199
+ # Explore neighbors
200
+ for neighbor in self.graph.neighbors(current_id):
201
+ edge_data = self.graph[current_id][neighbor]
202
+ new_strength = accumulated_strength * edge_data.get('strength', 1.0)
203
+ queue.append((neighbor, distance + 1, new_strength))
204
+
205
+ # Sort by relevance (accumulated strength)
206
+ related.sort(key=lambda x: x[1], reverse=True)
207
+ return related
208
+
209
+ def get_concept_connections(self, book_id: str) -> Dict[str, List[str]]:
210
+ """Get all concept connections for a specific book."""
211
+ book_concept_ids = self.book_concepts.get(book_id, set())
212
+ connections = {}
213
+
214
+ for concept_id in book_concept_ids:
215
+ concept = self.concepts[concept_id]
216
+ neighbors = []
217
+
218
+ for neighbor_id in self.graph.neighbors(concept_id):
219
+ neighbor = self.concepts[neighbor_id]
220
+ edge_data = self.graph[concept_id][neighbor_id]
221
+ neighbors.append({
222
+ 'name': neighbor.name,
223
+ 'relation': edge_data.get('type', 'related'),
224
+ 'strength': edge_data.get('strength', 1.0)
225
+ })
226
+
227
+ if neighbors:
228
+ connections[concept.name] = neighbors
229
+
230
+ return connections
231
+
232
+ def generate_reading_path(self, start_topic: str,
233
+ end_topic: str,
234
+ available_books: List[str]) -> List[Dict[str, Any]]:
235
+ """
236
+ Generate a reading path from one topic to another using available books.
237
+ Returns a sequence of books and the concepts they'll teach.
238
+ """
239
+ start_concepts = self._find_concepts_by_topic(start_topic)
240
+ end_concepts = self._find_concepts_by_topic(end_topic)
241
+
242
+ if not start_concepts or not end_concepts:
243
+ return []
244
+
245
+ # Find paths between all concept pairs
246
+ all_paths = []
247
+ for start_id in start_concepts:
248
+ for end_id in end_concepts:
249
+ try:
250
+ path = nx.shortest_path(self.graph, start_id, end_id)
251
+ all_paths.append(path)
252
+ except nx.NetworkXNoPath:
253
+ continue
254
+
255
+ if not all_paths:
256
+ return []
257
+
258
+ # Select the best path (shortest with most book coverage)
259
+ best_path = min(all_paths, key=len)
260
+
261
+ # Map concepts to books
262
+ reading_sequence = []
263
+ covered_concepts = set()
264
+
265
+ for concept_id in best_path:
266
+ if concept_id in covered_concepts:
267
+ continue
268
+
269
+ concept = self.concepts[concept_id]
270
+ # Find which available book best covers this concept
271
+ best_book = None
272
+ max_coverage = 0
273
+
274
+ for book_id in concept.source_books:
275
+ if book_id in available_books:
276
+ coverage = len([c for c in concept.contexts if c['book_id'] == book_id])
277
+ if coverage > max_coverage:
278
+ max_coverage = coverage
279
+ best_book = book_id
280
+
281
+ if best_book:
282
+ reading_sequence.append({
283
+ 'book_id': best_book,
284
+ 'concept': concept.name,
285
+ 'description': concept.description,
286
+ 'why': f"Bridges understanding from {start_topic} towards {end_topic}"
287
+ })
288
+ covered_concepts.add(concept_id)
289
+
290
+ return reading_sequence
291
+
292
+ def calculate_concept_importance(self) -> Dict[str, float]:
293
+ """
294
+ Calculate importance scores for all concepts using PageRank-like algorithm.
295
+ """
296
+ if not self.graph.nodes():
297
+ return {}
298
+
299
+ # Calculate PageRank
300
+ pagerank_scores = nx.pagerank(self.graph, weight='strength')
301
+
302
+ # Update concept importance scores
303
+ for concept_id, score in pagerank_scores.items():
304
+ if concept_id in self.concepts:
305
+ self.concepts[concept_id].importance_score = score
306
+
307
+ return pagerank_scores
308
+
309
+ def get_key_concepts(self, top_n: int = 10) -> List[ConceptNode]:
310
+ """Get the most important concepts in the knowledge graph."""
311
+ self.calculate_concept_importance()
312
+
313
+ sorted_concepts = sorted(
314
+ self.concepts.values(),
315
+ key=lambda c: c.importance_score,
316
+ reverse=True
317
+ )
318
+
319
+ return sorted_concepts[:top_n]
320
+
321
+ def export_for_visualization(self) -> Dict[str, Any]:
322
+ """Export graph data for visualization tools."""
323
+ nodes = []
324
+ edges = []
325
+
326
+ for concept_id, concept in self.concepts.items():
327
+ nodes.append({
328
+ 'id': concept_id,
329
+ 'label': concept.name,
330
+ 'title': concept.description,
331
+ 'value': concept.importance_score * 100,
332
+ 'group': len(concept.source_books) # Group by number of source books
333
+ })
334
+
335
+ for source, target, data in self.graph.edges(data=True):
336
+ edges.append({
337
+ 'from': source,
338
+ 'to': target,
339
+ 'label': data.get('type', 'related'),
340
+ 'value': data.get('strength', 1.0)
341
+ })
342
+
343
+ return {
344
+ 'nodes': nodes,
345
+ 'edges': edges,
346
+ 'metadata': {
347
+ 'total_concepts': len(self.concepts),
348
+ 'total_relations': self.graph.number_of_edges(),
349
+ 'books_indexed': len(self.book_concepts)
350
+ }
351
+ }
352
+
353
+ def save_graph(self):
354
+ """Persist the knowledge graph to disk."""
355
+ # Save concepts
356
+ concepts_data = {
357
+ cid: concept.to_dict()
358
+ for cid, concept in self.concepts.items()
359
+ }
360
+ with open(self.graph_path / 'concepts.json', 'w') as f:
361
+ json.dump(concepts_data, f, indent=2)
362
+
363
+ # Save graph structure
364
+ graph_data = nx.node_link_data(self.graph)
365
+ with open(self.graph_path / 'graph.json', 'w') as f:
366
+ json.dump(graph_data, f, indent=2)
367
+
368
+ # Save indices
369
+ indices = {
370
+ 'concept_index': dict(self.concept_index),
371
+ 'book_concepts': {k: list(v) for k, v in self.book_concepts.items()}
372
+ }
373
+ with open(self.graph_path / 'indices.json', 'w') as f:
374
+ json.dump(indices, f, indent=2)
375
+
376
+ def load_graph(self):
377
+ """Load the knowledge graph from disk."""
378
+ concepts_file = self.graph_path / 'concepts.json'
379
+ graph_file = self.graph_path / 'graph.json'
380
+ indices_file = self.graph_path / 'indices.json'
381
+
382
+ if concepts_file.exists():
383
+ with open(concepts_file, 'r') as f:
384
+ concepts_data = json.load(f)
385
+ for cid, cdata in concepts_data.items():
386
+ # Reconstruct ConceptNode
387
+ cdata['created_at'] = datetime.fromisoformat(cdata['created_at'])
388
+ self.concepts[cid] = ConceptNode(**{
389
+ k: v for k, v in cdata.items()
390
+ if k in ConceptNode.__dataclass_fields__
391
+ })
392
+
393
+ if graph_file.exists():
394
+ with open(graph_file, 'r') as f:
395
+ graph_data = json.load(f)
396
+ self.graph = nx.node_link_graph(graph_data)
397
+
398
+ if indices_file.exists():
399
+ with open(indices_file, 'r') as f:
400
+ indices = json.load(f)
401
+ self.concept_index = defaultdict(list, indices.get('concept_index', {}))
402
+ self.book_concepts = defaultdict(
403
+ set,
404
+ {k: set(v) for k, v in indices.get('book_concepts', {}).items()}
405
+ )
406
+
407
+ def _find_concept_id_by_name(self, name: str) -> Optional[str]:
408
+ """Find concept ID by name from existing concepts."""
409
+ name_lower = name.lower()
410
+ for concept_id, concept in self.concepts.items():
411
+ if concept.name.lower() == name_lower:
412
+ return concept_id
413
+ return None
414
+
415
+ def _extract_keywords(self, name: str, description: str) -> List[str]:
416
+ """Extract keywords from concept name and description."""
417
+ # Simple keyword extraction - can be enhanced with NLP
418
+ import re
419
+ text = f"{name} {description}".lower()
420
+ words = re.findall(r'\b[a-z]+\b', text)
421
+ # Filter common words and return unique keywords
422
+ stopwords = {'the', 'a', 'an', 'and', 'or', 'but', 'in', 'on', 'at', 'to', 'for', 'of', 'with', 'by', 'as', 'is', 'was', 'are', 'been'}
423
+ keywords = list(set(w for w in words if w not in stopwords and len(w) > 3))
424
+ return keywords[:10] # Limit to 10 keywords
425
+
426
+ def _fuzzy_find_concept(self, name: str) -> Optional[str]:
427
+ """Find concept by fuzzy matching the name."""
428
+ name_lower = name.lower()
429
+ for concept_id, concept in self.concepts.items():
430
+ if name_lower in concept.name.lower() or concept.name.lower() in name_lower:
431
+ return concept_id
432
+ return None
433
+
434
+ def _find_concepts_by_topic(self, topic: str) -> List[str]:
435
+ """Find all concepts related to a topic."""
436
+ topic_lower = topic.lower()
437
+ related_concepts = []
438
+
439
+ # Search in concept names and descriptions
440
+ for concept_id, concept in self.concepts.items():
441
+ if (topic_lower in concept.name.lower() or
442
+ topic_lower in concept.description.lower() or
443
+ any(topic_lower in kw.lower() for kw in concept.keywords)):
444
+ related_concepts.append(concept_id)
445
+
446
+ # Search in concept index
447
+ for keyword in topic_lower.split():
448
+ related_concepts.extend(self.concept_index.get(keyword, []))
449
+
450
+ return list(set(related_concepts))
@@ -0,0 +1,26 @@
1
+ """
2
+ LLM Provider Abstractions for EBK.
3
+
4
+ Provides a unified interface for various LLM providers including:
5
+ - Ollama (local and remote)
6
+ - OpenAI (via compatible API)
7
+ - Anthropic Claude
8
+ - Google Gemini
9
+
10
+ Future: MCP client support for tool calling and web search.
11
+ """
12
+
13
+ from .base import BaseLLMProvider, LLMConfig, LLMResponse, ModelCapability
14
+ from .ollama import OllamaProvider
15
+ from .anthropic import AnthropicProvider
16
+ from .gemini import GeminiProvider
17
+
18
+ __all__ = [
19
+ 'BaseLLMProvider',
20
+ 'LLMConfig',
21
+ 'LLMResponse',
22
+ 'ModelCapability',
23
+ 'OllamaProvider',
24
+ 'AnthropicProvider',
25
+ 'GeminiProvider',
26
+ ]