forktex-intelligence 0.2.3__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -0,0 +1,97 @@
1
+ # Copyright (C) 2026 FORKTEX S.R.L.
2
+ #
3
+ # SPDX-License-Identifier: AGPL-3.0-or-later OR LicenseRef-ForkTex-Commercial
4
+ #
5
+ # This file is part of forktex-intelligence.
6
+ #
7
+ # For commercial licensing -- including use in proprietary products, SaaS
8
+ # deployments, or any context where AGPL obligations cannot be met -- you
9
+ # MUST obtain a commercial license from FORKTEX S.R.L. (info@forktex.com).
10
+ #
11
+ # This program is free software: you can redistribute it and/or modify
12
+ # it under the terms of the GNU Affero General Public License as published by
13
+ # the Free Software Foundation, either version 3 of the License, or
14
+ # (at your option) any later version.
15
+ #
16
+ # This program is distributed in the hope that it will be useful,
17
+ # but WITHOUT ANY WARRANTY; without even the implied warranty of
18
+ # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
19
+ # GNU Affero General Public License for more details.
20
+ #
21
+ # You should have received a copy of the GNU Affero General Public License
22
+ # along with this program. If not, see <https://www.gnu.org/licenses/>.
23
+
24
+ """forktex_intelligence — Standalone Python SDK for the ForkTex Intelligence API.
25
+
26
+ Three pillars:
27
+ 1. Smart API — LLM chat/structured output, proxied to best providers
28
+ 2. Content Extraction — Upload any file, get structured text + chunks
29
+ 3. Vector Space — Managed vector storage with semantic search
30
+
31
+ Usage::
32
+
33
+ from forktex_intelligence import Intelligence
34
+
35
+ async with Intelligence(org_id="my-org-uuid") as ai:
36
+ # Chat
37
+ response = await ai.chat("Explain Python decorators")
38
+
39
+ # Extract text from a file
40
+ result = await ai.extract_file(pdf_bytes, "paper.pdf")
41
+
42
+ # Search across collections
43
+ results = await ai.search("collection-id", "quantum computing")
44
+ """
45
+
46
+ __version__ = "0.2.3"
47
+
48
+ from forktex_intelligence.api import (
49
+ AvailableModel,
50
+ Intelligence,
51
+ Response,
52
+ StructuredResponse,
53
+ StreamChunks,
54
+ )
55
+ from forktex_intelligence.config import IntelligenceSettings
56
+ from forktex_intelligence.client.client import (
57
+ ForktexIntelligenceClient,
58
+ IntelligenceAPIError,
59
+ )
60
+ from forktex_intelligence.client.generated import (
61
+ SPEC_HASH,
62
+ SPEC_VERSION,
63
+ ChatMessage,
64
+ ChatResponse,
65
+ HealthResponse,
66
+ StructuredChatResponse,
67
+ ToolCallInfo,
68
+ UsageInfo,
69
+ )
70
+ from forktex_intelligence.streams import SSEEvent, SSEEventType
71
+
72
+ __all__ = [
73
+ # High-level API
74
+ "AvailableModel",
75
+ "Intelligence",
76
+ "Response",
77
+ "StructuredResponse",
78
+ "StreamChunks",
79
+ # Configuration
80
+ "IntelligenceSettings",
81
+ # Low-level client (advanced)
82
+ "ForktexIntelligenceClient",
83
+ "IntelligenceAPIError",
84
+ # Codegen contract (wire-compatibility markers)
85
+ "SPEC_VERSION",
86
+ "SPEC_HASH",
87
+ # Wire-level models (advanced — prefer high-level API)
88
+ "ChatMessage",
89
+ "ChatResponse",
90
+ "HealthResponse",
91
+ "StructuredChatResponse",
92
+ "ToolCallInfo",
93
+ "UsageInfo",
94
+ # Streaming
95
+ "SSEEvent",
96
+ "SSEEventType",
97
+ ]
@@ -0,0 +1,544 @@
1
+ # Copyright (C) 2026 FORKTEX S.R.L.
2
+ #
3
+ # SPDX-License-Identifier: AGPL-3.0-or-later OR LicenseRef-ForkTex-Commercial
4
+ #
5
+ # This file is part of forktex-intelligence.
6
+ #
7
+ # For commercial licensing -- including use in proprietary products, SaaS
8
+ # deployments, or any context where AGPL obligations cannot be met -- you
9
+ # MUST obtain a commercial license from FORKTEX S.R.L. (info@forktex.com).
10
+ #
11
+ # This program is free software: you can redistribute it and/or modify
12
+ # it under the terms of the GNU Affero General Public License as published by
13
+ # the Free Software Foundation, either version 3 of the License, or
14
+ # (at your option) any later version.
15
+ #
16
+ # This program is distributed in the hope that it will be useful,
17
+ # but WITHOUT ANY WARRANTY; without even the implied warranty of
18
+ # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
19
+ # GNU Affero General Public License for more details.
20
+ #
21
+ # You should have received a copy of the GNU Affero General Public License
22
+ # along with this program. If not, see <https://www.gnu.org/licenses/>.
23
+
24
+ """High-level wrapping API for the ForkTex Intelligence API.
25
+
26
+ Provides a user-friendly interface for the Intelligence platform:
27
+ - Smart API (chat, streaming, structured output)
28
+ - Content Extraction (upload file → text + chunks)
29
+ - Vector Space (collections, documents, search)
30
+
31
+ Usage::
32
+
33
+ from forktex_intelligence import Intelligence
34
+
35
+ async with Intelligence() as ai:
36
+ # Simple chat
37
+ response = await ai.chat("What is Python?")
38
+ print(response.text)
39
+
40
+ # Extract text from a file
41
+ result = await ai.extract_file(pdf_bytes, "paper.pdf")
42
+ print(result["text"])
43
+
44
+ # Search across collections
45
+ results = await ai.search("my-collection-id", "quantum computing")
46
+ """
47
+
48
+ from __future__ import annotations
49
+
50
+ import json
51
+ from typing import Any, AsyncIterator, Dict, List, Optional
52
+ from uuid import UUID
53
+
54
+ from pydantic import BaseModel, Field
55
+
56
+ from forktex_intelligence.config import IntelligenceSettings
57
+ from forktex_intelligence.client.client import ForktexIntelligenceClient
58
+ from forktex_intelligence.client.generated import (
59
+ ChatResponse,
60
+ CollectionCreateRequest,
61
+ CollectionListResponse,
62
+ CollectionResponse,
63
+ CrossCollectionSearchRequest,
64
+ DocumentListResponse,
65
+ DocumentResponse,
66
+ EmbedRequest,
67
+ HealthResponse,
68
+ OrgResponse,
69
+ RerankRequest,
70
+ SearchRequest,
71
+ SearchResponse,
72
+ StructuredChatResponse,
73
+ UsageAggregation,
74
+ UsageLogResponse,
75
+ )
76
+ from forktex_intelligence.streams import SSEEventType, parse_sse_stream
77
+
78
+
79
+ class AvailableModel(BaseModel):
80
+ """A model available through the Intelligence API."""
81
+
82
+ id: str
83
+ description: str = ""
84
+
85
+ def __str__(self) -> str:
86
+ return self.id
87
+
88
+ def __repr__(self) -> str:
89
+ return f"AvailableModel({self.id!r})"
90
+
91
+
92
+ class Response(BaseModel):
93
+ """Response from a chat completion."""
94
+
95
+ text: str
96
+ model: str = ""
97
+ input_tokens: int = 0
98
+ output_tokens: int = 0
99
+ tool_calls: list[dict[str, Any]] = Field(default_factory=list)
100
+ raw: Dict[str, Any] = Field(default_factory=dict)
101
+
102
+ @property
103
+ def total_tokens(self) -> int:
104
+ return self.input_tokens + self.output_tokens
105
+
106
+ @property
107
+ def has_tool_calls(self) -> bool:
108
+ return len(self.tool_calls) > 0
109
+
110
+ def __str__(self) -> str:
111
+ return self.text
112
+
113
+ def __repr__(self) -> str:
114
+ return f"Response(text={self.text!r}, model={self.model!r})"
115
+
116
+ @classmethod
117
+ def from_chat_response(cls, resp: ChatResponse) -> Response:
118
+ return cls(
119
+ text=resp.content,
120
+ model=resp.model,
121
+ input_tokens=resp.usage.input_tokens if resp.usage else 0,
122
+ output_tokens=resp.usage.output_tokens if resp.usage else 0,
123
+ tool_calls=[tc.model_dump() for tc in (resp.tool_calls or [])],
124
+ raw=resp.model_dump(),
125
+ )
126
+
127
+
128
+ class StructuredResponse(BaseModel):
129
+ """Response from a structured chat completion."""
130
+
131
+ data: Dict[str, Any]
132
+ model: str = ""
133
+ input_tokens: int = 0
134
+ output_tokens: int = 0
135
+
136
+ @property
137
+ def total_tokens(self) -> int:
138
+ return self.input_tokens + self.output_tokens
139
+
140
+ def __getitem__(self, key: str) -> Any:
141
+ return self.data[key]
142
+
143
+ def get(self, key: str, default: Any = None) -> Any:
144
+ return self.data.get(key, default)
145
+
146
+ @classmethod
147
+ def from_structured_response(cls, resp: StructuredChatResponse) -> StructuredResponse:
148
+ raw = resp.content
149
+ if isinstance(raw, str):
150
+ try:
151
+ raw = json.loads(raw)
152
+ except Exception:
153
+ pass
154
+ if not isinstance(raw, dict):
155
+ raw = {"value": raw}
156
+ return cls(
157
+ data=raw,
158
+ model=resp.model,
159
+ input_tokens=resp.usage.input_tokens if resp.usage else 0,
160
+ output_tokens=resp.usage.output_tokens if resp.usage else 0,
161
+ )
162
+
163
+
164
+ class StreamChunks:
165
+ """Async iterator wrapping raw SSE chat_stream bytes."""
166
+
167
+ def __init__(self, raw_stream: AsyncIterator[bytes]) -> None:
168
+ self._raw = raw_stream
169
+ self._iterator: Optional[AsyncIterator[str]] = None
170
+ self._tool_calls: list[dict[str, Any]] = []
171
+ self._text_parts: list[str] = []
172
+ self._usage: Dict[str, int] = {}
173
+ self._model: str = ""
174
+
175
+ def __aiter__(self) -> StreamChunks:
176
+ self._iterator = parse_sse_stream(self._raw)
177
+ return self
178
+
179
+ async def __anext__(self) -> str:
180
+ assert self._iterator is not None
181
+ async for event in self._iterator:
182
+ if event.event == SSEEventType.DELTA:
183
+ chunk = event.delta_text
184
+ self._text_parts.append(chunk)
185
+ return chunk
186
+ if event.event == SSEEventType.TOOL_CALL:
187
+ self._tool_calls.append(event.data)
188
+ if event.event == SSEEventType.USAGE:
189
+ self._usage["input_tokens"] = event.input_tokens
190
+ self._usage["output_tokens"] = event.output_tokens
191
+ if event.event == SSEEventType.DONE:
192
+ raise StopAsyncIteration
193
+ raise StopAsyncIteration
194
+
195
+ @property
196
+ def has_tool_calls(self) -> bool:
197
+ return len(self._tool_calls) > 0
198
+
199
+ async def collect(self) -> Response:
200
+ async for _ in self:
201
+ pass
202
+ return Response(
203
+ text="".join(self._text_parts),
204
+ model=self._model,
205
+ input_tokens=self._usage.get("input_tokens", 0),
206
+ output_tokens=self._usage.get("output_tokens", 0),
207
+ tool_calls=self._tool_calls,
208
+ )
209
+
210
+
211
+ class Intelligence:
212
+ """High-level client for the ForkTex Intelligence API.
213
+
214
+ Supports org-scoped operations and dual auth (JWT or API key). Wraps
215
+ ``ForktexIntelligenceClient`` (which itself inherits the generated
216
+ ``_GeneratedOperations`` surface from ``openapi.json``) with an
217
+ ergonomic facade that injects the active org_id into every org-scoped
218
+ call.
219
+
220
+ Usage::
221
+
222
+ async with Intelligence(org_id="my-org-uuid") as ai:
223
+ response = await ai.chat("Hello!")
224
+ results = await ai.search("coll-id", "semantic query")
225
+ """
226
+
227
+ def __init__(
228
+ self,
229
+ *,
230
+ endpoint: Optional[str] = None,
231
+ api_key: Optional[str] = None,
232
+ jwt_token: Optional[str] = None,
233
+ org_id: Optional[str | UUID] = None,
234
+ timeout: float = 120.0,
235
+ settings: Optional[IntelligenceSettings] = None,
236
+ transport: Any = None,
237
+ ) -> None:
238
+ if settings is not None:
239
+ self._settings = settings
240
+ else:
241
+ self._settings = IntelligenceSettings(
242
+ endpoint=endpoint or IntelligenceSettings.model_fields["endpoint"].default,
243
+ api_key=api_key or "",
244
+ )
245
+
246
+ if not self._settings.is_configured:
247
+ raise RuntimeError(
248
+ "Intelligence API not configured. Provide endpoint and api_key "
249
+ "via constructor arguments or an IntelligenceSettings object."
250
+ )
251
+
252
+ self._client = ForktexIntelligenceClient(
253
+ self._settings.endpoint,
254
+ self._settings.api_key,
255
+ jwt_token=jwt_token or "",
256
+ org_id=org_id,
257
+ timeout=timeout,
258
+ transport=transport,
259
+ )
260
+ self._needs_org_resolve = not org_id and not self._client.org_id
261
+
262
+ @property
263
+ def client(self) -> ForktexIntelligenceClient:
264
+ return self._client
265
+
266
+ def set_org(self, org_id: str | UUID) -> None:
267
+ """Set the org for org-scoped requests."""
268
+ self._client.set_org(org_id)
269
+
270
+ async def close(self) -> None:
271
+ await self._client.close()
272
+
273
+ @property
274
+ def _org(self) -> UUID:
275
+ """Return active org_id as a UUID; raise if not set."""
276
+ if not self._client.org_id:
277
+ raise RuntimeError("No org_id set. Call set_org(org_id) first.")
278
+ return UUID(self._client.org_id)
279
+
280
+ async def _ensure_org(self) -> None:
281
+ """Auto-resolve org_id from API key if not set."""
282
+ if self._needs_org_resolve and self._client.api_key:
283
+ await self._client.whoami()
284
+ self._needs_org_resolve = False
285
+
286
+ async def __aenter__(self) -> Intelligence:
287
+ await self._ensure_org()
288
+ return self
289
+
290
+ async def __aexit__(self, *args: Any) -> None:
291
+ await self.close()
292
+
293
+ # ── Auth ─────────────────────────────────────────────────────────
294
+
295
+ async def register(self, email: str, password: str) -> Dict[str, Any]:
296
+ """Register a new user. Auto-sets JWT for subsequent requests."""
297
+ return await self._client.register(email, password)
298
+
299
+ async def login(self, email: str, password: str) -> Dict[str, Any]:
300
+ """Login and get JWT. Auto-sets JWT for subsequent requests."""
301
+ return await self._client.login(email, password)
302
+
303
+ async def me(self) -> Any:
304
+ """Get current user info and orgs."""
305
+ return await self._client.me()
306
+
307
+ async def create_org(self, name: str, slug: str) -> OrgResponse:
308
+ """Create an org and auto-set it as current."""
309
+ return await self._client.create_org(name, slug)
310
+
311
+ # ── Smart API (Pillar 1) ─────────────────────────────────────────
312
+
313
+ async def chat(
314
+ self,
315
+ prompt: str,
316
+ *,
317
+ model: Optional[str] = None,
318
+ system: Optional[str] = None,
319
+ temperature: Optional[float] = None,
320
+ max_tokens: Optional[int] = None,
321
+ messages: Optional[List[Dict[str, str]]] = None,
322
+ tools: Optional[List[Dict[str, Any]]] = None,
323
+ ) -> Response:
324
+ """Send a chat message and get the complete response."""
325
+ if messages is None:
326
+ messages = []
327
+ if system:
328
+ messages.append({"role": "system", "content": system})
329
+ messages.append({"role": "user", "content": prompt})
330
+
331
+ resp = await self._client.chat(
332
+ self._org,
333
+ messages,
334
+ model=model,
335
+ system=system if messages != [] else None,
336
+ temperature=temperature,
337
+ max_tokens=max_tokens,
338
+ tools=tools,
339
+ )
340
+ return Response.from_chat_response(resp)
341
+
342
+ def stream(
343
+ self,
344
+ prompt: str,
345
+ *,
346
+ model: Optional[str] = None,
347
+ system: Optional[str] = None,
348
+ temperature: Optional[float] = None,
349
+ max_tokens: Optional[int] = None,
350
+ messages: Optional[List[Dict[str, str]]] = None,
351
+ tools: Optional[List[Dict[str, Any]]] = None,
352
+ ) -> StreamChunks:
353
+ """Send a chat message and stream the response."""
354
+ if messages is None:
355
+ messages = []
356
+ if system:
357
+ messages.append({"role": "system", "content": system})
358
+ messages.append({"role": "user", "content": prompt})
359
+
360
+ raw = self._client.chat_stream(
361
+ self._org,
362
+ messages,
363
+ model=model,
364
+ system=system if messages != [] else None,
365
+ temperature=temperature,
366
+ max_tokens=max_tokens,
367
+ tools=tools,
368
+ )
369
+ return StreamChunks(raw)
370
+
371
+ async def extract_structured(
372
+ self,
373
+ prompt: str,
374
+ *,
375
+ schema: Dict[str, Any],
376
+ model: Optional[str] = None,
377
+ system: Optional[str] = None,
378
+ messages: Optional[List[Dict[str, str]]] = None,
379
+ ) -> StructuredResponse:
380
+ """Get a structured JSON response matching a schema."""
381
+ if messages is None:
382
+ messages = [{"role": "user", "content": prompt}]
383
+
384
+ resp = await self._client.chat_structured(
385
+ self._org,
386
+ messages,
387
+ model=model,
388
+ system=system,
389
+ response_schema=schema,
390
+ )
391
+ return StructuredResponse.from_structured_response(resp)
392
+
393
+ # ── Content Extraction (Pillar 2) ────────────────────────────────
394
+
395
+ async def extract_file(
396
+ self,
397
+ file_data: bytes,
398
+ filename: str,
399
+ *,
400
+ content_type: str = "application/octet-stream",
401
+ chunk_size: int = 256,
402
+ chunk_overlap: int = 32,
403
+ ) -> Dict[str, Any]:
404
+ """Upload a file and get extracted text + chunks back (no storage)."""
405
+ return await self._client.extract_file(
406
+ self._org,
407
+ file_data,
408
+ filename,
409
+ content_type=content_type,
410
+ chunk_size=chunk_size,
411
+ chunk_overlap=chunk_overlap,
412
+ )
413
+
414
+ # ── Vector Space (Pillar 3) ──────────────────────────────────────
415
+
416
+ async def list_collections(self) -> CollectionListResponse:
417
+ return await self._client.list_colls(self._org)
418
+
419
+ async def create_collection(
420
+ self,
421
+ name: str,
422
+ *,
423
+ embedding_model: str = "all-MiniLM-L6-v2",
424
+ distance_metric: str = "cosine",
425
+ ) -> CollectionResponse:
426
+ body = CollectionCreateRequest(
427
+ name=name,
428
+ embedding_model=embedding_model,
429
+ distance_metric=distance_metric,
430
+ )
431
+ return await self._client.create_coll(self._org, body=body)
432
+
433
+ async def get_collection(self, collection_id: str | UUID) -> CollectionResponse:
434
+ return await self._client.get_coll(self._org, UUID(str(collection_id)))
435
+
436
+ async def delete_collection(self, collection_id: str | UUID) -> None:
437
+ await self._client.delete_coll(self._org, UUID(str(collection_id)))
438
+
439
+ async def upload_document(
440
+ self,
441
+ collection_id: str | UUID,
442
+ file_data: bytes,
443
+ filename: str,
444
+ *,
445
+ content_type: str = "application/octet-stream",
446
+ ) -> Dict[str, Any]:
447
+ """Upload a document into a collection for vector indexing."""
448
+ return await self._client.upload_document(
449
+ self._org,
450
+ collection_id,
451
+ file_data,
452
+ filename,
453
+ content_type=content_type,
454
+ )
455
+
456
+ async def list_documents(self, collection_id: str | UUID) -> DocumentListResponse:
457
+ return await self._client.list_docs(self._org, UUID(str(collection_id)))
458
+
459
+ async def get_document(self, collection_id: str | UUID, document_id: str | UUID) -> DocumentResponse:
460
+ return await self._client.get_doc(self._org, UUID(str(collection_id)), UUID(str(document_id)))
461
+
462
+ async def delete_document(self, collection_id: str | UUID, document_id: str | UUID) -> None:
463
+ await self._client.delete_doc(self._org, UUID(str(collection_id)), UUID(str(document_id)))
464
+
465
+ async def search(
466
+ self,
467
+ collection_id: str | UUID,
468
+ query: str,
469
+ *,
470
+ top_k: int = 10,
471
+ rerank: bool = False,
472
+ rerank_top_k: int = 5,
473
+ ) -> SearchResponse:
474
+ """Search a single collection."""
475
+ body = SearchRequest(query=query, top_k=top_k, rerank=rerank, rerank_top_k=rerank_top_k)
476
+ return await self._client.search_single(self._org, UUID(str(collection_id)), body=body)
477
+
478
+ async def search_cross(
479
+ self,
480
+ query: str,
481
+ *,
482
+ top_k: int = 10,
483
+ collection_ids: Optional[List[str]] = None,
484
+ rerank: bool = False,
485
+ rerank_top_k: int = 5,
486
+ ) -> SearchResponse:
487
+ """Search across multiple or all org collections."""
488
+ body = CrossCollectionSearchRequest(
489
+ query=query,
490
+ top_k=top_k,
491
+ collection_ids=[UUID(c) for c in (collection_ids or [])] or None,
492
+ rerank=rerank,
493
+ rerank_top_k=rerank_top_k,
494
+ )
495
+ return await self._client.search_cross(self._org, body=body)
496
+
497
+ # ── Embeddings ───────────────────────────────────────────────────
498
+
499
+ async def embed(
500
+ self,
501
+ texts: List[str],
502
+ *,
503
+ model: str = "all-MiniLM-L6-v2",
504
+ normalize: bool = True,
505
+ ) -> List[List[float]]:
506
+ """Generate embeddings for a list of texts."""
507
+ body = EmbedRequest(input=texts, model=model, normalize=normalize)
508
+ result = await self._client.embed(self._org, body=body)
509
+ return [item.embedding for item in result.data]
510
+
511
+ async def rerank(
512
+ self,
513
+ query: str,
514
+ documents: List[str],
515
+ *,
516
+ model: str = "all-MiniLM-L6-v2",
517
+ top_k: Optional[int] = None,
518
+ ) -> List[Dict[str, Any]]:
519
+ """Rerank documents by relevance to a query."""
520
+ body = RerankRequest(query=query, documents=documents, model=model, top_k=top_k)
521
+ result = await self._client.rerank(self._org, body=body)
522
+ return [item.model_dump() for item in result.data]
523
+
524
+ # ── Platform ─────────────────────────────────────────────────────
525
+
526
+ async def get_usage(self, **params: Any) -> UsageAggregation:
527
+ return await self._client.usage_stats(self._org, **params)
528
+
529
+ async def get_usage_log(self, **params: Any) -> UsageLogResponse:
530
+ return await self._client.usage_log(self._org, **params)
531
+
532
+ async def models(self) -> List[AvailableModel]:
533
+ resp = await self._client.list_models()
534
+ return [AvailableModel(id=m.id, description=m.description) for m in resp.models]
535
+
536
+ async def health(self) -> HealthResponse:
537
+ return await self._client.health()
538
+
539
+ async def is_healthy(self) -> bool:
540
+ try:
541
+ await self._client.health()
542
+ return True
543
+ except Exception:
544
+ return False
@@ -0,0 +1,56 @@
1
+ # Copyright (C) 2026 FORKTEX S.R.L.
2
+ #
3
+ # SPDX-License-Identifier: AGPL-3.0-or-later OR LicenseRef-ForkTex-Commercial
4
+ #
5
+ # This file is part of forktex-intelligence.
6
+ #
7
+ # For commercial licensing -- including use in proprietary products, SaaS
8
+ # deployments, or any context where AGPL obligations cannot be met -- you
9
+ # MUST obtain a commercial license from FORKTEX S.R.L. (info@forktex.com).
10
+ #
11
+ # This program is free software: you can redistribute it and/or modify
12
+ # it under the terms of the GNU Affero General Public License as published by
13
+ # the Free Software Foundation, either version 3 of the License, or
14
+ # (at your option) any later version.
15
+ #
16
+ # This program is distributed in the hope that it will be useful,
17
+ # but WITHOUT ANY WARRANTY; without even the implied warranty of
18
+ # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
19
+ # GNU Affero General Public License for more details.
20
+ #
21
+ # You should have received a copy of the GNU Affero General Public License
22
+ # along with this program. If not, see <https://www.gnu.org/licenses/>.
23
+
24
+ """forktex_intelligence.client — HTTP client and generated models for the Intelligence API."""
25
+
26
+ from forktex_intelligence.client.client import (
27
+ ForktexIntelligenceClient,
28
+ IntelligenceAPIError,
29
+ )
30
+ from forktex_intelligence.client.generated import (
31
+ ChatMessage,
32
+ ChatRequest,
33
+ ChatResponse,
34
+ HealthResponse,
35
+ ModelInfo,
36
+ ModelsResponse,
37
+ StructuredChatRequest,
38
+ StructuredChatResponse,
39
+ ToolCallInfo,
40
+ UsageInfo,
41
+ )
42
+
43
+ __all__ = [
44
+ "ForktexIntelligenceClient",
45
+ "IntelligenceAPIError",
46
+ "ChatMessage",
47
+ "ChatRequest",
48
+ "ChatResponse",
49
+ "HealthResponse",
50
+ "ModelInfo",
51
+ "ModelsResponse",
52
+ "StructuredChatRequest",
53
+ "StructuredChatResponse",
54
+ "ToolCallInfo",
55
+ "UsageInfo",
56
+ ]