nous-genai 0.1.0__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (45) hide show
  1. nous/__init__.py +3 -0
  2. nous/genai/__init__.py +56 -0
  3. nous/genai/__main__.py +3 -0
  4. nous/genai/_internal/__init__.py +1 -0
  5. nous/genai/_internal/capability_rules.py +476 -0
  6. nous/genai/_internal/config.py +102 -0
  7. nous/genai/_internal/errors.py +63 -0
  8. nous/genai/_internal/http.py +951 -0
  9. nous/genai/_internal/json_schema.py +54 -0
  10. nous/genai/cli.py +1316 -0
  11. nous/genai/client.py +719 -0
  12. nous/genai/mcp_cli.py +275 -0
  13. nous/genai/mcp_server.py +1080 -0
  14. nous/genai/providers/__init__.py +15 -0
  15. nous/genai/providers/aliyun.py +535 -0
  16. nous/genai/providers/anthropic.py +483 -0
  17. nous/genai/providers/gemini.py +1606 -0
  18. nous/genai/providers/openai.py +1909 -0
  19. nous/genai/providers/tuzi.py +1158 -0
  20. nous/genai/providers/volcengine.py +273 -0
  21. nous/genai/reference/__init__.py +17 -0
  22. nous/genai/reference/catalog.py +206 -0
  23. nous/genai/reference/mappings.py +467 -0
  24. nous/genai/reference/mode_overrides.py +26 -0
  25. nous/genai/reference/model_catalog.py +82 -0
  26. nous/genai/reference/model_catalog_data/__init__.py +1 -0
  27. nous/genai/reference/model_catalog_data/aliyun.py +98 -0
  28. nous/genai/reference/model_catalog_data/anthropic.py +10 -0
  29. nous/genai/reference/model_catalog_data/google.py +45 -0
  30. nous/genai/reference/model_catalog_data/openai.py +44 -0
  31. nous/genai/reference/model_catalog_data/tuzi_anthropic.py +21 -0
  32. nous/genai/reference/model_catalog_data/tuzi_google.py +19 -0
  33. nous/genai/reference/model_catalog_data/tuzi_openai.py +75 -0
  34. nous/genai/reference/model_catalog_data/tuzi_web.py +136 -0
  35. nous/genai/reference/model_catalog_data/volcengine.py +107 -0
  36. nous/genai/tools/__init__.py +13 -0
  37. nous/genai/tools/output_parser.py +119 -0
  38. nous/genai/types.py +416 -0
  39. nous/py.typed +1 -0
  40. nous_genai-0.1.0.dist-info/METADATA +200 -0
  41. nous_genai-0.1.0.dist-info/RECORD +45 -0
  42. nous_genai-0.1.0.dist-info/WHEEL +5 -0
  43. nous_genai-0.1.0.dist-info/entry_points.txt +4 -0
  44. nous_genai-0.1.0.dist-info/licenses/LICENSE +190 -0
  45. nous_genai-0.1.0.dist-info/top_level.txt +1 -0
nous/genai/types.py ADDED
@@ -0,0 +1,416 @@
1
+ from __future__ import annotations
2
+
3
+ import base64
4
+ import os
5
+ from dataclasses import dataclass, field
6
+ from pathlib import Path
7
+ from typing import Any, Literal, cast
8
+
9
+ from ._internal.errors import ErrorInfo, invalid_request_error, not_supported_error
10
+
11
+ Role = Literal["system", "user", "assistant", "tool"]
12
+ PartType = Literal[
13
+ "text",
14
+ "image",
15
+ "audio",
16
+ "video",
17
+ "embedding",
18
+ "file",
19
+ "tool_call",
20
+ "tool_result",
21
+ ]
22
+
23
+ Modality = Literal["text", "image", "audio", "video", "embedding"]
24
+ Status = Literal["completed", "running", "failed", "canceled"]
25
+ ReasoningEffort = Literal["none", "minimal", "low", "medium", "high", "xhigh"]
26
+ ToolChoiceMode = Literal["none", "auto", "required", "tool"]
27
+
28
+
29
+ @dataclass(frozen=True, slots=True)
30
+ class ReasoningSpec:
31
+ effort: ReasoningEffort | None = None
32
+
33
+
34
+ _REASONING_EFFORT_VALUES = {"none", "minimal", "low", "medium", "high", "xhigh"}
35
+
36
+
37
+ def normalize_reasoning_effort(value: object) -> ReasoningEffort:
38
+ if not isinstance(value, str) or not value.strip():
39
+ raise invalid_request_error("reasoning.effort must be a non-empty string")
40
+ effort = value.strip().lower()
41
+ if effort not in _REASONING_EFFORT_VALUES:
42
+ raise invalid_request_error(f"unknown reasoning.effort: {effort}")
43
+ return cast(ReasoningEffort, effort)
44
+
45
+
46
+ @dataclass(frozen=True, slots=True)
47
+ class PartSourceBytes:
48
+ kind: Literal["bytes"] = "bytes"
49
+ data: bytes | str = b""
50
+ encoding: Literal["base64"] | None = None
51
+
52
+ def __post_init__(self) -> None:
53
+ if self.encoding is None:
54
+ if isinstance(self.data, bytearray):
55
+ object.__setattr__(self, "data", bytes(self.data))
56
+ return
57
+ if not isinstance(self.data, bytes):
58
+ raise ValueError(
59
+ "PartSourceBytes.data must be bytes when encoding is None"
60
+ )
61
+ return
62
+
63
+ if self.encoding != "base64":
64
+ raise ValueError(f"unknown PartSourceBytes.encoding: {self.encoding}")
65
+ if not isinstance(self.data, str):
66
+ raise ValueError(
67
+ "PartSourceBytes.data must be str when encoding is 'base64'"
68
+ )
69
+
70
+
71
+ @dataclass(frozen=True, slots=True)
72
+ class PartSourcePath:
73
+ kind: Literal["path"] = "path"
74
+ path: str = ""
75
+
76
+
77
+ @dataclass(frozen=True, slots=True)
78
+ class PartSourceUrl:
79
+ kind: Literal["url"] = "url"
80
+ url: str = ""
81
+
82
+
83
+ @dataclass(frozen=True, slots=True)
84
+ class PartSourceRef:
85
+ kind: Literal["ref"] = "ref"
86
+ provider: str = ""
87
+ id: str = ""
88
+
89
+
90
+ PartSource = PartSourceBytes | PartSourcePath | PartSourceUrl | PartSourceRef
91
+
92
+
93
+ @dataclass(frozen=True, slots=True)
94
+ class Part:
95
+ type: PartType
96
+ mime_type: str | None = None
97
+ source: PartSource | None = None
98
+ text: str | None = None
99
+ embedding: list[float] | None = None
100
+ meta: dict[str, Any] = field(default_factory=dict)
101
+
102
+ def __post_init__(self) -> None:
103
+ if not isinstance(self.meta, dict):
104
+ raise ValueError("Part.meta must be an object")
105
+ if self.mime_type is not None and not isinstance(self.mime_type, str):
106
+ raise ValueError("Part.mime_type must be a string")
107
+
108
+ if self.type == "text":
109
+ if not isinstance(self.text, str):
110
+ raise ValueError("text Part requires text")
111
+ if self.source is not None:
112
+ raise ValueError("text Part cannot have source")
113
+ if self.embedding is not None:
114
+ raise ValueError("text Part cannot have embedding")
115
+ return
116
+
117
+ if self.type == "embedding":
118
+ if not isinstance(self.embedding, list) or not all(
119
+ isinstance(x, (int, float)) for x in self.embedding
120
+ ):
121
+ raise ValueError("embedding Part requires embedding: list[number]")
122
+ if self.source is not None:
123
+ raise ValueError("embedding Part cannot have source")
124
+ if self.text is not None:
125
+ raise ValueError("embedding Part cannot have text")
126
+ return
127
+
128
+ if self.type in {"image", "audio", "video", "file"}:
129
+ if self.source is None:
130
+ raise ValueError(f"{self.type} Part requires source")
131
+ if not isinstance(
132
+ self.source,
133
+ (PartSourceBytes, PartSourcePath, PartSourceUrl, PartSourceRef),
134
+ ):
135
+ raise ValueError("Part.source must be a PartSource object")
136
+ if self.text is not None:
137
+ raise ValueError(f"{self.type} Part cannot have text")
138
+ if self.embedding is not None:
139
+ raise ValueError(f"{self.type} Part cannot have embedding")
140
+ if self.mime_type and self.type in {"image", "audio", "video"}:
141
+ prefix = f"{self.type}/"
142
+ if not self.mime_type.startswith(prefix):
143
+ raise ValueError(
144
+ f"{self.type} Part mime_type must start with {prefix!r}"
145
+ )
146
+ return
147
+
148
+ if self.type in {"tool_call", "tool_result"}:
149
+ if self.source is not None:
150
+ raise ValueError(f"{self.type} Part cannot have source")
151
+ if self.text is not None:
152
+ raise ValueError(f"{self.type} Part cannot have text")
153
+ if self.embedding is not None:
154
+ raise ValueError(f"{self.type} Part cannot have embedding")
155
+ return
156
+
157
+ raise ValueError(f"unknown Part.type: {self.type}")
158
+
159
+ @staticmethod
160
+ def from_text(text: str) -> "Part":
161
+ return Part(type="text", text=text)
162
+
163
+ @staticmethod
164
+ def tool_call(
165
+ *, name: str, arguments: Any, tool_call_id: str | None = None
166
+ ) -> "Part":
167
+ meta: dict[str, Any] = {"name": name, "arguments": arguments}
168
+ if tool_call_id is not None:
169
+ meta["tool_call_id"] = tool_call_id
170
+ return Part(type="tool_call", meta=meta)
171
+
172
+ @staticmethod
173
+ def tool_result(
174
+ *,
175
+ name: str,
176
+ result: Any,
177
+ tool_call_id: str | None = None,
178
+ is_error: bool | None = None,
179
+ ) -> "Part":
180
+ meta: dict[str, Any] = {"name": name, "result": result}
181
+ if tool_call_id is not None:
182
+ meta["tool_call_id"] = tool_call_id
183
+ if is_error is not None:
184
+ meta["is_error"] = bool(is_error)
185
+ return Part(type="tool_result", meta=meta)
186
+
187
+ @staticmethod
188
+ def embedding_part(vector: list[float]) -> "Part":
189
+ return Part(type="embedding", embedding=vector)
190
+
191
+ def require_text(self) -> str:
192
+ if self.type != "text" or self.text is None:
193
+ raise invalid_request_error("Part is not text")
194
+ return self.text
195
+
196
+ def require_source(self) -> PartSource:
197
+ if self.source is None:
198
+ raise invalid_request_error("Part has no source")
199
+ return self.source
200
+
201
+
202
+ @dataclass(frozen=True, slots=True)
203
+ class Message:
204
+ role: Role
205
+ content: list[Part]
206
+
207
+
208
+ @dataclass(frozen=True, slots=True)
209
+ class OutputTextSpec:
210
+ format: Literal["text", "json"] = "text"
211
+ json_schema: Any | None = None
212
+ max_output_tokens: int | None = None
213
+
214
+
215
+ @dataclass(frozen=True, slots=True)
216
+ class OutputImageSpec:
217
+ n: int | None = None
218
+ size: str | None = None
219
+ format: str | None = None
220
+
221
+
222
+ @dataclass(frozen=True, slots=True)
223
+ class OutputAudioSpec:
224
+ voice: str | None = None
225
+ format: str | None = None
226
+ language: str | None = None
227
+
228
+
229
+ @dataclass(frozen=True, slots=True)
230
+ class OutputVideoSpec:
231
+ duration_sec: int | None = None
232
+ aspect_ratio: str | None = None
233
+ fps: int | None = None
234
+ format: str | None = None
235
+
236
+
237
+ @dataclass(frozen=True, slots=True)
238
+ class OutputEmbeddingSpec:
239
+ dimensions: int | None = None
240
+
241
+
242
+ @dataclass(frozen=True, slots=True)
243
+ class Tool:
244
+ """
245
+ Minimal function tool declaration (provider-agnostic).
246
+
247
+ - `parameters` is a JSON Schema object describing the function arguments.
248
+ - Providers may support only a subset of JSON Schema/OpenAPI Schema.
249
+ """
250
+
251
+ name: str
252
+ description: str | None = None
253
+ parameters: dict[str, Any] | None = None
254
+ strict: bool | None = None
255
+
256
+
257
+ @dataclass(frozen=True, slots=True)
258
+ class ToolChoice:
259
+ mode: ToolChoiceMode = "auto"
260
+ name: str | None = None
261
+
262
+ def normalized(self) -> "ToolChoice":
263
+ mode = self.mode.strip().lower()
264
+ if mode not in {"none", "auto", "required", "tool"}:
265
+ raise invalid_request_error(f"unknown tool_choice.mode: {self.mode}")
266
+ name = self.name.strip() if isinstance(self.name, str) else None
267
+ if mode == "tool" and not name:
268
+ raise invalid_request_error("tool_choice.name required when mode='tool'")
269
+ if mode != "tool" and name is not None:
270
+ raise invalid_request_error(
271
+ "tool_choice.name only allowed when mode='tool'"
272
+ )
273
+ return ToolChoice(mode=cast(ToolChoiceMode, mode), name=name)
274
+
275
+
276
+ @dataclass(frozen=True, slots=True)
277
+ class OutputSpec:
278
+ modalities: list[Modality]
279
+ text: OutputTextSpec | None = None
280
+ image: OutputImageSpec | None = None
281
+ audio: OutputAudioSpec | None = None
282
+ video: OutputVideoSpec | None = None
283
+ embedding: OutputEmbeddingSpec | None = None
284
+
285
+
286
+ @dataclass(frozen=True, slots=True)
287
+ class GenerateParams:
288
+ temperature: float | None = None
289
+ top_p: float | None = None
290
+ seed: int | None = None
291
+ max_output_tokens: int | None = None
292
+ stop: list[str] | None = None
293
+ timeout_ms: int | None = None
294
+ idempotency_key: str | None = None
295
+ reasoning: ReasoningSpec | None = None
296
+
297
+
298
+ @dataclass(frozen=True, slots=True)
299
+ class GenerateRequest:
300
+ model: str = field(
301
+ metadata={
302
+ "description": 'Model string in the form "{provider}:{model_id}" (e.g. "openai:gpt-4o-mini")',
303
+ "pattern": r"^[^\s:]+:[^\s]+$",
304
+ "examples": ["openai:gpt-4o-mini"],
305
+ }
306
+ )
307
+ input: list[Message]
308
+ output: OutputSpec
309
+ params: GenerateParams = field(default_factory=GenerateParams)
310
+ wait: bool = True
311
+ tools: list[Tool] | None = None
312
+ tool_choice: ToolChoice | None = None
313
+ provider_options: dict[str, Any] = field(default_factory=dict)
314
+
315
+ def provider(self) -> str:
316
+ if ":" not in self.model:
317
+ raise invalid_request_error('model must be "{provider}:{model_id}"')
318
+ return self.model.split(":", 1)[0]
319
+
320
+ def model_id(self) -> str:
321
+ if ":" not in self.model:
322
+ raise invalid_request_error('model must be "{provider}:{model_id}"')
323
+ return self.model.split(":", 1)[1]
324
+
325
+
326
+ @dataclass(frozen=True, slots=True)
327
+ class Usage:
328
+ input_tokens: int | None = None
329
+ output_tokens: int | None = None
330
+ total_tokens: int | None = None
331
+ seconds: float | None = None
332
+ image_count: int | None = None
333
+ video_seconds: float | None = None
334
+ cost_estimate: float | None = None
335
+
336
+
337
+ @dataclass(frozen=True, slots=True)
338
+ class JobInfo:
339
+ job_id: str
340
+ poll_after_ms: int = 1_000
341
+ expires_at: str | None = None
342
+
343
+
344
+ @dataclass(frozen=True, slots=True)
345
+ class GenerateResponse:
346
+ id: str
347
+ provider: str
348
+ model: str
349
+ status: Status
350
+ output: list[Message] = field(default_factory=list)
351
+ usage: Usage | None = None
352
+ job: JobInfo | None = None
353
+ error: ErrorInfo | None = None
354
+
355
+
356
+ @dataclass(frozen=True, slots=True)
357
+ class GenerateEvent:
358
+ type: str
359
+ data: dict[str, Any] = field(default_factory=dict)
360
+
361
+
362
+ @dataclass(frozen=True, slots=True)
363
+ class Capability:
364
+ input_modalities: set[Modality]
365
+ output_modalities: set[Modality]
366
+ supports_stream: bool
367
+ supports_job: bool
368
+ supports_tools: bool = False
369
+ supports_json_schema: bool = False
370
+
371
+
372
+ def detect_mime_type(path: str) -> str | None:
373
+ suffix = Path(path).suffix.lower()
374
+ if suffix in {".png"}:
375
+ return "image/png"
376
+ if suffix in {".jpg", ".jpeg"}:
377
+ return "image/jpeg"
378
+ if suffix in {".webp"}:
379
+ return "image/webp"
380
+ if suffix in {".wav"}:
381
+ return "audio/wav"
382
+ if suffix in {".mp3"}:
383
+ return "audio/mpeg"
384
+ if suffix in {".m4a"}:
385
+ return "audio/mp4"
386
+ if suffix in {".mp4"}:
387
+ return "video/mp4"
388
+ if suffix in {".mov"}:
389
+ return "video/quicktime"
390
+ return None
391
+
392
+
393
+ def file_to_bytes(path: str, max_bytes: int) -> bytes:
394
+ st = os.stat(path)
395
+ if st.st_size > max_bytes:
396
+ raise not_supported_error(
397
+ f"file too large for inline bytes ({st.st_size} > {max_bytes}); use url/ref instead"
398
+ )
399
+ with open(path, "rb") as f:
400
+ return f.read()
401
+
402
+
403
+ def bytes_to_base64(data: bytes) -> str:
404
+ return base64.b64encode(data).decode("ascii")
405
+
406
+
407
+ def sniff_image_mime_type(data: bytes) -> str | None:
408
+ if len(data) >= 8 and data.startswith(b"\x89PNG\r\n\x1a\n"):
409
+ return "image/png"
410
+ if len(data) >= 3 and data.startswith(b"\xff\xd8\xff"):
411
+ return "image/jpeg"
412
+ if len(data) >= 12 and data.startswith(b"RIFF") and data[8:12] == b"WEBP":
413
+ return "image/webp"
414
+ if len(data) >= 6 and data[:6] in {b"GIF87a", b"GIF89a"}:
415
+ return "image/gif"
416
+ return None
nous/py.typed ADDED
@@ -0,0 +1 @@
1
+
@@ -0,0 +1,200 @@
1
+ Metadata-Version: 2.4
2
+ Name: nous-genai
3
+ Version: 0.1.0
4
+ Summary: Single-endpoint GenAI SDK (multi-provider, multimodal)
5
+ License-Expression: Apache-2.0
6
+ Project-URL: Homepage, https://github.com/gravtice/nous-genai
7
+ Project-URL: Issues, https://github.com/gravtice/nous-genai/issues
8
+ Classifier: Programming Language :: Python :: 3
9
+ Classifier: Programming Language :: Python :: 3.12
10
+ Classifier: Operating System :: OS Independent
11
+ Requires-Python: >=3.12
12
+ Description-Content-Type: text/markdown
13
+ License-File: LICENSE
14
+ Requires-Dist: mcp>=1.25.0
15
+ Requires-Dist: uvicorn>=0.30.0
16
+ Dynamic: license-file
17
+
18
+ # nous-genai
19
+
20
+ ![CI](https://github.com/gravtice/nous-genai/actions/workflows/ci.yml/badge.svg)
21
+ ![Python](https://img.shields.io/badge/python-≥3.12-blue)
22
+ ![License](https://img.shields.io/badge/license-Apache--2.0-green)
23
+
24
+ 中文文档:`readme_zh.md`
25
+
26
+ One interface for calling multimodal models; four ways to use: Skill, MCP, CLI, SDK.
27
+
28
+ ## Features
29
+
30
+ - **Multi-provider**: OpenAI, Google (Gemini), Anthropic (Claude), Aliyun (DashScope/Bailian), Volcengine (Doubao/Ark), Tuzi
31
+ - **Multimodal**: text/image/audio/video input and output (model-dependent)
32
+ - **Unified API**: a single `Client.generate()` for all providers
33
+ - **Streaming**: `generate_stream()` for incremental output
34
+ - **Tool calling**: function tools (model/provider-dependent)
35
+ - **JSON Schema output**: structured output (model/provider-dependent)
36
+ - **MCP Server**: Streamable HTTP and SSE transport
37
+ - **Security**: SSRF protection, DNS pinning, download limits, Bearer token auth (MCP)
38
+
39
+ ## Installation
40
+
41
+ ```bash
42
+ pip install nous-genai
43
+ ```
44
+
45
+ For development:
46
+
47
+ ```bash
48
+ pip install -e .
49
+ # or (recommended)
50
+ uv sync
51
+ ```
52
+
53
+ ## Configuration (Zero-parameter)
54
+
55
+ SDK/CLI/MCP loads env files automatically with priority (high → low):
56
+
57
+ `.env.local > .env.production > .env.development > .env.test`
58
+
59
+ Process env vars override `.env.*` (the loader uses `os.environ.setdefault()`).
60
+
61
+ Minimal `.env.local` (OpenAI only):
62
+
63
+ ```bash
64
+ NOUS_GENAI_OPENAI_API_KEY=...
65
+ NOUS_GENAI_TIMEOUT_MS=120000
66
+ ```
67
+
68
+ See `docs/CONFIGURATION.md` or copy `.env.example` to `.env.local`.
69
+
70
+ ## Quickstart
71
+
72
+ ### Text generation
73
+
74
+ ```python
75
+ from nous.genai import Client, GenerateRequest, Message, OutputSpec, Part
76
+
77
+ client = Client()
78
+ resp = client.generate(
79
+ GenerateRequest(
80
+ model="openai:gpt-4o-mini",
81
+ input=[Message(role="user", content=[Part.from_text("Hello!")])],
82
+ output=OutputSpec(modalities=["text"]),
83
+ )
84
+ )
85
+ print(resp.output[0].content[0].text)
86
+ ```
87
+
88
+ ### Streaming
89
+
90
+ ```python
91
+ import sys
92
+ from nous.genai import Client, GenerateRequest, Message, OutputSpec, Part
93
+
94
+ client = Client()
95
+ req = GenerateRequest(
96
+ model="openai:gpt-4o-mini",
97
+ input=[Message(role="user", content=[Part.from_text("Tell me a joke")])],
98
+ output=OutputSpec(modalities=["text"]),
99
+ )
100
+ for ev in client.generate_stream(req):
101
+ if ev.type == "output.text.delta":
102
+ sys.stdout.write(str(ev.data.get("delta", "")))
103
+ sys.stdout.flush()
104
+ print()
105
+ ```
106
+
107
+ ### Image understanding
108
+
109
+ ```python
110
+ from nous.genai import Client, GenerateRequest, Message, OutputSpec, Part, PartSourcePath
111
+ from nous.genai.types import detect_mime_type
112
+
113
+ path = "./cat.png"
114
+ mime = detect_mime_type(path) or "application/octet-stream"
115
+
116
+ client = Client()
117
+ resp = client.generate(
118
+ GenerateRequest(
119
+ model="openai:gpt-4o-mini",
120
+ input=[
121
+ Message(
122
+ role="user",
123
+ content=[
124
+ Part.from_text("Describe this image"),
125
+ Part(type="image", mime_type=mime, source=PartSourcePath(path=path)),
126
+ ],
127
+ )
128
+ ],
129
+ output=OutputSpec(modalities=["text"]),
130
+ )
131
+ )
132
+ print(resp.output[0].content[0].text)
133
+ ```
134
+
135
+ ### List available models
136
+
137
+ ```python
138
+ from nous.genai import Client
139
+
140
+ client = Client()
141
+ print(client.list_all_available_models())
142
+ ```
143
+
144
+ ## Providers
145
+
146
+ | Provider | Notes |
147
+ |----------|------|
148
+ | `openai` | GPT-4, DALL·E, Whisper, TTS |
149
+ | `google` | Gemini, Imagen, Veo |
150
+ | `anthropic` | Claude |
151
+ | `aliyun` | DashScope / Bailian (OpenAI-compatible + AIGC) |
152
+ | `volcengine` | Ark / Doubao (OpenAI-compatible) |
153
+ | `tuzi-web` / `tuzi-openai` / `tuzi-google` / `tuzi-anthropic` | Tuzi adapters |
154
+
155
+ ## Binary output
156
+
157
+ Binary `Part.source` is a tagged union:
158
+
159
+ - **Input**: `bytes/path/base64/url/ref` (MCP forbids `bytes/path`)
160
+ - **Output**: `url/base64/ref` (SDK does not auto-download to disk)
161
+
162
+ If you need to write to file, see `examples/demo.py` (`_write_binary()`), or reuse `Client.download_to_file()` for the built-in safe downloader.
163
+
164
+ ## CLI & MCP Server
165
+
166
+ ```bash
167
+ # CLI
168
+ uv run genai --model openai:gpt-4o-mini --prompt "Hello"
169
+ uv run genai model available --all
170
+
171
+ # MCP Server
172
+ uv run genai-mcp-server # Streamable HTTP: /mcp, SSE: /sse
173
+ uv run genai-mcp-cli tools # Debug CLI
174
+ ```
175
+
176
+ ## Security
177
+
178
+ - **SSRF protection**: rejects private/loopback URLs by default (`NOUS_GENAI_ALLOW_PRIVATE_URLS=1` to allow)
179
+ - **DNS pinning**: mitigates DNS rebinding
180
+ - **Download limit**: 128MiB per URL by default (`NOUS_GENAI_URL_DOWNLOAD_MAX_BYTES`)
181
+ - **Bearer token auth**: for MCP server
182
+ - **Token rules**: fine-grained access control
183
+
184
+ ## Testing
185
+
186
+ ```bash
187
+ uv run pytest tests/ -v
188
+ ```
189
+
190
+ ## Docs
191
+
192
+ - [Configuration](docs/CONFIGURATION.md)
193
+ - [Architecture](docs/ARCHITECTURE_DESIGN.md)
194
+ - [MCP Server Security Review](docs/MCP_SERVER_CLI_SECURITY_REVIEW.md)
195
+ - [Contributing](CONTRIBUTING.md)
196
+ - [Changelog](CHANGELOG.md)
197
+
198
+ ## License
199
+
200
+ [Apache-2.0](LICENSE)
@@ -0,0 +1,45 @@
1
+ nous/__init__.py,sha256=Hh6QnLL0rRIVOTu33-Yt8GxHSNGfckYHTIsNP5prvEE,32
2
+ nous/py.typed,sha256=AbpHGcgLb-kRsJGnwFEktk7uzpZOCcBY74-YBdrKVGs,1
3
+ nous/genai/__init__.py,sha256=VY43n2cxXUP8ozW5vkW3P_WyEkGWYQY17NVHJHdRY24,1037
4
+ nous/genai/__main__.py,sha256=bYt9eEaoRQWdejEHFD8REx9jxVEdZptECFsV7F49Ink,30
5
+ nous/genai/cli.py,sha256=f_e1Ss1ehVw-lfVaF9wEuquP5e0Fib2FfSnr0zmnTrQ,42733
6
+ nous/genai/client.py,sha256=uKwttxMSkXCa1w8i7g3Mqs0L6ZCZZvMk7HdTXPfzoV0,26249
7
+ nous/genai/mcp_cli.py,sha256=bhAhL4r9x3Xkzb_7NypfUELAHqdIiC23LQbSAGZIPAE,10356
8
+ nous/genai/mcp_server.py,sha256=1NlJnAIV4LhMUDJ4BmH60KU7Jrx-bfivgIbcIaofPVU,40507
9
+ nous/genai/types.py,sha256=g589VUakFeMEIN3-AwHHKP6PCcMDlD6svKZcrBeu0Ts,13011
10
+ nous/genai/_internal/__init__.py,sha256=U4S_2y3zgLZVfMenHRaJFBW8yqh2mUBuI291LGQVOJ8,35
11
+ nous/genai/_internal/capability_rules.py,sha256=uCYeBDbJZx-S4Bib66oi_v3nZXS7dG84X0eEDifMjbY,13299
12
+ nous/genai/_internal/config.py,sha256=JfT8uhU_QSNfPEKPU614pB_jRJbMBnVkbHnOm7UeYoc,3243
13
+ nous/genai/_internal/errors.py,sha256=JZ2GIdFjae1I9ar7BCXmy01VenTrgvn4WfhbFNVF4i4,1639
14
+ nous/genai/_internal/http.py,sha256=XcO13YOs28N-Tq8WKi0a4REIHgWQxXBgzPxDM_MBSo0,30236
15
+ nous/genai/_internal/json_schema.py,sha256=y2978JFfUS-COLgemff7qyCmbTqLV-Tg_LhhLwB4QTU,1591
16
+ nous/genai/providers/__init__.py,sha256=MDbuoAPxnULdSH-jg7L7rjjQlDM-DL8bFK4615wZQEY,360
17
+ nous/genai/providers/aliyun.py,sha256=FOAhQOlHmJC3Nm5cagPbf6JQ94XtM-TKFVCH51PPSLM,18977
18
+ nous/genai/providers/anthropic.py,sha256=MShEm1lAw3dpbCPcsRz9eqWHt_x20EBswe4cJ4-ZgHo,17920
19
+ nous/genai/providers/gemini.py,sha256=Kb-oe7zVYlm--EGla1yZ0BavUx11GkAO1d0dtyLSC3A,58320
20
+ nous/genai/providers/openai.py,sha256=HxhR3pABZMLPtECAq4rUwuDSsBM4bb7AF66AVYLjUIU,75303
21
+ nous/genai/providers/tuzi.py,sha256=arkU5UBtcwb4bNOEEMcNW9hww8NNzXxc1-d7TAv_2Hg,44761
22
+ nous/genai/providers/volcengine.py,sha256=hZVO9jIBLqKSqwZsysOoO_FtrLGGUvJVbfLDKzXITQc,9962
23
+ nous/genai/reference/__init__.py,sha256=nfMsgNMUF1CHw3qslPA5Zniy8rBds9eISBpkCshbbJE,409
24
+ nous/genai/reference/catalog.py,sha256=FeF3iabPNgNdWFR40ffAYTPHXfuEY4zT8G9asrlBD14,6633
25
+ nous/genai/reference/mappings.py,sha256=SH0u1vYiwAKvuCzdAX6D5LSbL3JMjotu8IE63BBqmv8,14384
26
+ nous/genai/reference/mode_overrides.py,sha256=xNzbzWwe9Z0WW2J6DO3rKDmcsy2Y3vNJ5PQHAbK40kU,760
27
+ nous/genai/reference/model_catalog.py,sha256=87wGuPF-pNmMiJ3ZzrJup98VTZSsBJTgheg2hU3-lIg,2240
28
+ nous/genai/reference/model_catalog_data/__init__.py,sha256=U4S_2y3zgLZVfMenHRaJFBW8yqh2mUBuI291LGQVOJ8,35
29
+ nous/genai/reference/model_catalog_data/aliyun.py,sha256=z8UwxdgUnpFUWRwrICwM_qLJCOWFgd7gXrlNBhxUiGw,2455
30
+ nous/genai/reference/model_catalog_data/anthropic.py,sha256=nG3el5OkDVeCFXaRHLj1VsGIRhjvvNGoNUolyLz3ZJc,198
31
+ nous/genai/reference/model_catalog_data/google.py,sha256=7LefJB9W7N0DvQIUZmAE4w3IEIlbSE0hkzhYRD1SpFc,1267
32
+ nous/genai/reference/model_catalog_data/openai.py,sha256=siwx-htP7kPtlcDPzl6ommol3Jpg6zwZfta3_MSgaUI,846
33
+ nous/genai/reference/model_catalog_data/tuzi_anthropic.py,sha256=8jLckkoU0zwjoaTsAcdK03yY0u6iikK9p32HnL55mOk,519
34
+ nous/genai/reference/model_catalog_data/tuzi_google.py,sha256=3S4mJfN1VtsNkTxbb42AeKc-oF84KRPTILA39QOpCew,540
35
+ nous/genai/reference/model_catalog_data/tuzi_openai.py,sha256=K_w802dimRfQmmlgc4P2eI6ohvqAsQt2GoWb0K57BX0,1632
36
+ nous/genai/reference/model_catalog_data/tuzi_web.py,sha256=wKYo9yndsi5yQA44HB2omQKz6esHvOqZKjMizOvFdbc,3081
37
+ nous/genai/reference/model_catalog_data/volcengine.py,sha256=GxDELF9z8sbYyOEu-SpYluCal0tTAfFFLkS9-vXt1hg,3657
38
+ nous/genai/tools/__init__.py,sha256=YWNhBKZ4t6diXI5SlCHA5OAIhLLOOA3vwQ6jClJsO2o,292
39
+ nous/genai/tools/output_parser.py,sha256=epHW-fLBatrjMTl2nMvh_vFWFEmP8Xj2pZ5fosaZxTc,3968
40
+ nous_genai-0.1.0.dist-info/licenses/LICENSE,sha256=yMLznCFyvxXAx7UUyAtq1gYFyzRsqGngYiaRF9R33Lg,10757
41
+ nous_genai-0.1.0.dist-info/METADATA,sha256=UmxjLMmPE1GA4FpMFbo2HrjdThAB_BDMg4fMnhr1AT4,5474
42
+ nous_genai-0.1.0.dist-info/WHEEL,sha256=wUyA8OaulRlbfwMtmQsvNngGrxQHAvkKcvRmdizlJi0,92
43
+ nous_genai-0.1.0.dist-info/entry_points.txt,sha256=j_cgAhxjPu7wtlo_zcdFLmz58dSxvnmSFM4f0A-ozEc,132
44
+ nous_genai-0.1.0.dist-info/top_level.txt,sha256=yUcst4OAspsyKhX0y5ENzFkJKzR_gislA5MykV1pVbk,5
45
+ nous_genai-0.1.0.dist-info/RECORD,,
@@ -0,0 +1,5 @@
1
+ Wheel-Version: 1.0
2
+ Generator: setuptools (80.10.2)
3
+ Root-Is-Purelib: true
4
+ Tag: py3-none-any
5
+
@@ -0,0 +1,4 @@
1
+ [console_scripts]
2
+ genai = nous.genai.cli:main
3
+ genai-mcp-cli = nous.genai.mcp_cli:main
4
+ genai-mcp-server = nous.genai.mcp_server:main