nous-genai 0.1.0__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- nous/__init__.py +3 -0
- nous/genai/__init__.py +56 -0
- nous/genai/__main__.py +3 -0
- nous/genai/_internal/__init__.py +1 -0
- nous/genai/_internal/capability_rules.py +476 -0
- nous/genai/_internal/config.py +102 -0
- nous/genai/_internal/errors.py +63 -0
- nous/genai/_internal/http.py +951 -0
- nous/genai/_internal/json_schema.py +54 -0
- nous/genai/cli.py +1316 -0
- nous/genai/client.py +719 -0
- nous/genai/mcp_cli.py +275 -0
- nous/genai/mcp_server.py +1080 -0
- nous/genai/providers/__init__.py +15 -0
- nous/genai/providers/aliyun.py +535 -0
- nous/genai/providers/anthropic.py +483 -0
- nous/genai/providers/gemini.py +1606 -0
- nous/genai/providers/openai.py +1909 -0
- nous/genai/providers/tuzi.py +1158 -0
- nous/genai/providers/volcengine.py +273 -0
- nous/genai/reference/__init__.py +17 -0
- nous/genai/reference/catalog.py +206 -0
- nous/genai/reference/mappings.py +467 -0
- nous/genai/reference/mode_overrides.py +26 -0
- nous/genai/reference/model_catalog.py +82 -0
- nous/genai/reference/model_catalog_data/__init__.py +1 -0
- nous/genai/reference/model_catalog_data/aliyun.py +98 -0
- nous/genai/reference/model_catalog_data/anthropic.py +10 -0
- nous/genai/reference/model_catalog_data/google.py +45 -0
- nous/genai/reference/model_catalog_data/openai.py +44 -0
- nous/genai/reference/model_catalog_data/tuzi_anthropic.py +21 -0
- nous/genai/reference/model_catalog_data/tuzi_google.py +19 -0
- nous/genai/reference/model_catalog_data/tuzi_openai.py +75 -0
- nous/genai/reference/model_catalog_data/tuzi_web.py +136 -0
- nous/genai/reference/model_catalog_data/volcengine.py +107 -0
- nous/genai/tools/__init__.py +13 -0
- nous/genai/tools/output_parser.py +119 -0
- nous/genai/types.py +416 -0
- nous/py.typed +1 -0
- nous_genai-0.1.0.dist-info/METADATA +200 -0
- nous_genai-0.1.0.dist-info/RECORD +45 -0
- nous_genai-0.1.0.dist-info/WHEEL +5 -0
- nous_genai-0.1.0.dist-info/entry_points.txt +4 -0
- nous_genai-0.1.0.dist-info/licenses/LICENSE +190 -0
- nous_genai-0.1.0.dist-info/top_level.txt +1 -0
nous/genai/mcp_cli.py
ADDED
|
@@ -0,0 +1,275 @@
|
|
|
1
|
+
from __future__ import annotations
|
|
2
|
+
|
|
3
|
+
import argparse
|
|
4
|
+
import asyncio
|
|
5
|
+
import json
|
|
6
|
+
import os
|
|
7
|
+
import sys
|
|
8
|
+
from pathlib import Path
|
|
9
|
+
from typing import Any
|
|
10
|
+
|
|
11
|
+
from ._internal.config import load_env_files
|
|
12
|
+
|
|
13
|
+
|
|
14
|
+
def _env(name: str) -> str | None:
|
|
15
|
+
return os.environ.get(name)
|
|
16
|
+
|
|
17
|
+
|
|
18
|
+
def _env_int(name: str, default: int) -> int:
|
|
19
|
+
raw = _env(name)
|
|
20
|
+
if raw is None:
|
|
21
|
+
return default
|
|
22
|
+
try:
|
|
23
|
+
return int(raw)
|
|
24
|
+
except ValueError:
|
|
25
|
+
return default
|
|
26
|
+
|
|
27
|
+
|
|
28
|
+
def _mcp_url() -> str:
|
|
29
|
+
base = (_env("NOUS_GENAI_MCP_URL") or "").strip()
|
|
30
|
+
if base:
|
|
31
|
+
return _ensure_mcp_path(base)
|
|
32
|
+
|
|
33
|
+
base = (_env("NOUS_GENAI_MCP_BASE_URL") or "").strip()
|
|
34
|
+
if not base:
|
|
35
|
+
base = (_env("NOUS_GENAI_MCP_PUBLIC_BASE_URL") or "").strip()
|
|
36
|
+
if base:
|
|
37
|
+
return _ensure_mcp_path(base)
|
|
38
|
+
|
|
39
|
+
host = (_env("NOUS_GENAI_MCP_HOST") or "").strip() or "127.0.0.1"
|
|
40
|
+
port = _env_int("NOUS_GENAI_MCP_PORT", 6001)
|
|
41
|
+
if host in {"0.0.0.0", "::"}:
|
|
42
|
+
host = "127.0.0.1"
|
|
43
|
+
return f"http://{host}:{port}/mcp"
|
|
44
|
+
|
|
45
|
+
|
|
46
|
+
def _ensure_mcp_path(base: str) -> str:
|
|
47
|
+
stripped = base.rstrip("/")
|
|
48
|
+
if stripped.endswith("/mcp"):
|
|
49
|
+
return stripped
|
|
50
|
+
return f"{stripped}/mcp"
|
|
51
|
+
|
|
52
|
+
|
|
53
|
+
def _print_json(obj: Any, *, indent: int | None = 2) -> None:
|
|
54
|
+
print(json.dumps(obj, ensure_ascii=False, indent=indent))
|
|
55
|
+
|
|
56
|
+
|
|
57
|
+
def _write_json(obj: Any, path: str, *, indent: int | None = 2) -> None:
|
|
58
|
+
text = json.dumps(obj, ensure_ascii=False, indent=indent) + "\n"
|
|
59
|
+
Path(path).write_text(text, encoding="utf-8")
|
|
60
|
+
|
|
61
|
+
|
|
62
|
+
def _summarize_schema(schema: dict[str, Any] | None) -> str:
|
|
63
|
+
if not isinstance(schema, dict) or not schema:
|
|
64
|
+
return "none"
|
|
65
|
+
required = schema.get("required")
|
|
66
|
+
props = schema.get("properties")
|
|
67
|
+
parts: list[str] = []
|
|
68
|
+
if isinstance(required, list):
|
|
69
|
+
parts.append(f"required={len(required)}")
|
|
70
|
+
if isinstance(props, dict):
|
|
71
|
+
parts.append(f"properties={len(props)}")
|
|
72
|
+
title = schema.get("title")
|
|
73
|
+
if isinstance(title, str) and title:
|
|
74
|
+
parts.append(f"title={title}")
|
|
75
|
+
return ", ".join(parts) if parts else "ok"
|
|
76
|
+
|
|
77
|
+
|
|
78
|
+
async def _run(args: argparse.Namespace) -> int:
|
|
79
|
+
loaded_envs = load_env_files()
|
|
80
|
+
url = _mcp_url()
|
|
81
|
+
bearer = (args.bearer_token or _env("NOUS_GENAI_MCP_BEARER_TOKEN") or "").strip()
|
|
82
|
+
|
|
83
|
+
from mcp import ClientSession
|
|
84
|
+
from mcp.client.streamable_http import streamable_http_client
|
|
85
|
+
from mcp.shared._httpx_utils import create_mcp_http_client
|
|
86
|
+
|
|
87
|
+
if args.cmd == "env":
|
|
88
|
+
_print_json(
|
|
89
|
+
{
|
|
90
|
+
"cwd": str(Path.cwd()),
|
|
91
|
+
"loaded_env_files": [str(p) for p in loaded_envs],
|
|
92
|
+
"mcp_url": url,
|
|
93
|
+
"bearer_auth": bool(bearer),
|
|
94
|
+
}
|
|
95
|
+
)
|
|
96
|
+
return 0
|
|
97
|
+
|
|
98
|
+
headers = {"Authorization": f"Bearer {bearer}"} if bearer else None
|
|
99
|
+
async with create_mcp_http_client(headers=headers) as http_client:
|
|
100
|
+
async with streamable_http_client(url, http_client=http_client) as (
|
|
101
|
+
read_stream,
|
|
102
|
+
write_stream,
|
|
103
|
+
_,
|
|
104
|
+
):
|
|
105
|
+
async with ClientSession(read_stream, write_stream) as session:
|
|
106
|
+
await session.initialize()
|
|
107
|
+
|
|
108
|
+
if args.cmd == "tools":
|
|
109
|
+
tools_result = await session.list_tools()
|
|
110
|
+
if args.json:
|
|
111
|
+
indent = None if args.compact else 2
|
|
112
|
+
if args.name:
|
|
113
|
+
tool = next(
|
|
114
|
+
(t for t in tools_result.tools if t.name == args.name),
|
|
115
|
+
None,
|
|
116
|
+
)
|
|
117
|
+
if tool is None:
|
|
118
|
+
raise ValueError(f"tool not found: {args.name}")
|
|
119
|
+
data = tool.model_dump(mode="json", exclude_none=True)
|
|
120
|
+
else:
|
|
121
|
+
data = tools_result.model_dump(
|
|
122
|
+
mode="json", exclude_none=True
|
|
123
|
+
)
|
|
124
|
+
if args.out:
|
|
125
|
+
_write_json(data, args.out, indent=indent)
|
|
126
|
+
else:
|
|
127
|
+
_print_json(data, indent=indent)
|
|
128
|
+
return 0
|
|
129
|
+
|
|
130
|
+
if args.out:
|
|
131
|
+
raise ValueError("--out requires --json")
|
|
132
|
+
if args.compact:
|
|
133
|
+
raise ValueError("--compact requires --json")
|
|
134
|
+
|
|
135
|
+
tools = tools_result.tools
|
|
136
|
+
if args.name:
|
|
137
|
+
tool = next((t for t in tools if t.name == args.name), None)
|
|
138
|
+
if tool is None:
|
|
139
|
+
raise ValueError(f"tool not found: {args.name}")
|
|
140
|
+
tools = [tool]
|
|
141
|
+
|
|
142
|
+
for tool in tools:
|
|
143
|
+
print(f"- {tool.name}")
|
|
144
|
+
desc = getattr(tool, "description", None)
|
|
145
|
+
if isinstance(desc, str):
|
|
146
|
+
desc = " ".join(desc.split())
|
|
147
|
+
if desc:
|
|
148
|
+
print(f" description: {desc}")
|
|
149
|
+
print(f" inputSchema: {_summarize_schema(tool.inputSchema)}")
|
|
150
|
+
print(f" outputSchema: {_summarize_schema(tool.outputSchema)}")
|
|
151
|
+
if args.full:
|
|
152
|
+
print(" inputSchema JSON:")
|
|
153
|
+
_print_json(tool.inputSchema or {})
|
|
154
|
+
print(" outputSchema JSON:")
|
|
155
|
+
_print_json(tool.outputSchema or {})
|
|
156
|
+
return 0
|
|
157
|
+
|
|
158
|
+
if args.cmd == "templates":
|
|
159
|
+
tmpl_result = await session.list_resource_templates()
|
|
160
|
+
for t in tmpl_result.resourceTemplates:
|
|
161
|
+
print(f"- {t.uriTemplate} ({t.mimeType or 'unknown'})")
|
|
162
|
+
return 0
|
|
163
|
+
|
|
164
|
+
if args.cmd == "resources":
|
|
165
|
+
res_result = await session.list_resources()
|
|
166
|
+
for r in res_result.resources:
|
|
167
|
+
print(f"- {r.uri} ({r.mimeType or 'unknown'})")
|
|
168
|
+
return 0
|
|
169
|
+
|
|
170
|
+
if args.cmd == "read":
|
|
171
|
+
read_result = await session.read_resource(args.uri)
|
|
172
|
+
for item in read_result.contents:
|
|
173
|
+
text = getattr(item, "text", None)
|
|
174
|
+
if isinstance(text, str):
|
|
175
|
+
if args.max_chars > 0 and len(text) > args.max_chars:
|
|
176
|
+
total = len(text)
|
|
177
|
+
text = text[: args.max_chars] + f"... ({total} chars)"
|
|
178
|
+
print(text)
|
|
179
|
+
continue
|
|
180
|
+
blob = getattr(item, "blob", None)
|
|
181
|
+
if isinstance(blob, str):
|
|
182
|
+
print(f"<blob base64: {len(blob)} chars>")
|
|
183
|
+
continue
|
|
184
|
+
print(f"<resource: {item.uri}>")
|
|
185
|
+
return 0
|
|
186
|
+
|
|
187
|
+
if args.cmd == "call":
|
|
188
|
+
tool_args: dict[str, Any] | None = None
|
|
189
|
+
if args.args_file:
|
|
190
|
+
tool_args = json.loads(
|
|
191
|
+
Path(args.args_file).read_text(encoding="utf-8")
|
|
192
|
+
)
|
|
193
|
+
elif args.args:
|
|
194
|
+
tool_args = json.loads(args.args)
|
|
195
|
+
|
|
196
|
+
result = await session.call_tool(args.name, arguments=tool_args)
|
|
197
|
+
if result.structuredContent is not None:
|
|
198
|
+
_print_json(
|
|
199
|
+
result.structuredContent, indent=None if args.compact else 2
|
|
200
|
+
)
|
|
201
|
+
else:
|
|
202
|
+
_print_json(
|
|
203
|
+
result.model_dump(mode="json", exclude_none=True),
|
|
204
|
+
indent=None if args.compact else 2,
|
|
205
|
+
)
|
|
206
|
+
|
|
207
|
+
return 1 if result.isError else 0
|
|
208
|
+
|
|
209
|
+
raise ValueError(f"unknown cmd: {args.cmd}")
|
|
210
|
+
|
|
211
|
+
|
|
212
|
+
def main(argv: list[str] | None = None) -> int:
|
|
213
|
+
parser = argparse.ArgumentParser(
|
|
214
|
+
prog="genai-mcp-cli",
|
|
215
|
+
description="MCP client for debugging nous-genai MCP server (Streamable HTTP)",
|
|
216
|
+
)
|
|
217
|
+
parser.add_argument(
|
|
218
|
+
"--bearer-token",
|
|
219
|
+
dest="bearer_token",
|
|
220
|
+
help="HTTP Authorization Bearer token (or set NOUS_GENAI_MCP_BEARER_TOKEN).",
|
|
221
|
+
)
|
|
222
|
+
sub = parser.add_subparsers(dest="cmd")
|
|
223
|
+
|
|
224
|
+
sub.add_parser("env", help="Print resolved env + MCP URL")
|
|
225
|
+
|
|
226
|
+
p_tools = sub.add_parser("tools", help="List tools (or dump JSON with --json)")
|
|
227
|
+
p_tools.add_argument("name", nargs="?", help="Tool name (optional, e.g. generate)")
|
|
228
|
+
p_tools.add_argument("--full", action="store_true", help="Print full JSON schemas")
|
|
229
|
+
p_tools.add_argument(
|
|
230
|
+
"--json", action="store_true", help="Dump JSON output (list_tools or tool)"
|
|
231
|
+
)
|
|
232
|
+
p_tools.add_argument(
|
|
233
|
+
"--out", help="Write JSON to file instead of stdout (requires --json)"
|
|
234
|
+
)
|
|
235
|
+
p_tools.add_argument(
|
|
236
|
+
"--compact", action="store_true", help="Compact JSON output (requires --json)"
|
|
237
|
+
)
|
|
238
|
+
|
|
239
|
+
sub.add_parser("templates", help="List resource templates")
|
|
240
|
+
sub.add_parser("resources", help="List concrete resources")
|
|
241
|
+
|
|
242
|
+
p_read = sub.add_parser("read", help="Read resource by URI")
|
|
243
|
+
p_read.add_argument("uri", help='URI like "genai://artifact/{id}"')
|
|
244
|
+
p_read.add_argument(
|
|
245
|
+
"--max-chars",
|
|
246
|
+
type=int,
|
|
247
|
+
default=2000,
|
|
248
|
+
help="Max chars to print for text resources",
|
|
249
|
+
)
|
|
250
|
+
|
|
251
|
+
p_call = sub.add_parser("call", help="Call a tool with JSON arguments")
|
|
252
|
+
p_call.add_argument(
|
|
253
|
+
"name", help="Tool name (e.g. list_providers, list_available_models, generate)"
|
|
254
|
+
)
|
|
255
|
+
p_call.add_argument("--args", help='JSON string (e.g. \'{"provider":"openai"}\')')
|
|
256
|
+
p_call.add_argument("--args-file", help="Path to JSON file with tool arguments")
|
|
257
|
+
p_call.add_argument("--compact", action="store_true", help="Compact JSON output")
|
|
258
|
+
|
|
259
|
+
args = parser.parse_args(argv)
|
|
260
|
+
if not args.cmd:
|
|
261
|
+
args.cmd = "tools"
|
|
262
|
+
|
|
263
|
+
try:
|
|
264
|
+
return asyncio.run(_run(args))
|
|
265
|
+
except BrokenPipeError:
|
|
266
|
+
return 0
|
|
267
|
+
except KeyboardInterrupt:
|
|
268
|
+
return 130
|
|
269
|
+
except Exception as e:
|
|
270
|
+
print(f"error: {e}", file=sys.stderr)
|
|
271
|
+
return 1
|
|
272
|
+
|
|
273
|
+
|
|
274
|
+
if __name__ == "__main__": # pragma: no cover
|
|
275
|
+
raise SystemExit(main())
|