simajilord 0.1.0__tar.gz

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (62) hide show
  1. simajilord-0.1.0/PKG-INFO +138 -0
  2. simajilord-0.1.0/PYPI.md +119 -0
  3. simajilord-0.1.0/README.md +56 -0
  4. simajilord-0.1.0/pyproject.toml +64 -0
  5. simajilord-0.1.0/setup.cfg +4 -0
  6. simajilord-0.1.0/src/cappuccino/__init__.py +3 -0
  7. simajilord-0.1.0/src/cappuccino/adaptive_reasoning.py +15 -0
  8. simajilord-0.1.0/src/cappuccino/auto_reasoning.py +640 -0
  9. simajilord-0.1.0/src/cappuccino/benchmarking.py +540 -0
  10. simajilord-0.1.0/src/cappuccino/cli.py +1348 -0
  11. simajilord-0.1.0/src/cappuccino/context_windows.py +124 -0
  12. simajilord-0.1.0/src/cappuccino/corpus_preparation.py +5406 -0
  13. simajilord-0.1.0/src/cappuccino/dataset_manifests.py +83 -0
  14. simajilord-0.1.0/src/cappuccino/dataset_preparation.py +1393 -0
  15. simajilord-0.1.0/src/cappuccino/image_preprocessing.py +118 -0
  16. simajilord-0.1.0/src/cappuccino/model_inspector.py +260 -0
  17. simajilord-0.1.0/src/cappuccino/openai_compat.py +249 -0
  18. simajilord-0.1.0/src/cappuccino/openai_contract.py +187 -0
  19. simajilord-0.1.0/src/cappuccino/preference_loop.py +536 -0
  20. simajilord-0.1.0/src/cappuccino/quantization.py +88 -0
  21. simajilord-0.1.0/src/cappuccino/reasoning.py +505 -0
  22. simajilord-0.1.0/src/cappuccino/responses_runtime.py +904 -0
  23. simajilord-0.1.0/src/cappuccino/runtime_compat.py +654 -0
  24. simajilord-0.1.0/src/cappuccino/server.py +1646 -0
  25. simajilord-0.1.0/src/cappuccino/skill_registry.py +295 -0
  26. simajilord-0.1.0/src/cappuccino/tool_runtime.py +206 -0
  27. simajilord-0.1.0/src/cappuccino/tool_search.py +340 -0
  28. simajilord-0.1.0/src/cappuccino/trainer.py +4121 -0
  29. simajilord-0.1.0/src/cappuccino/training_schemas.py +249 -0
  30. simajilord-0.1.0/src/cappuccino/wikipedia_tool.py +158 -0
  31. simajilord-0.1.0/src/cappuccino/workload_lock.py +76 -0
  32. simajilord-0.1.0/src/simajilord/__init__.py +3 -0
  33. simajilord-0.1.0/src/simajilord/__main__.py +5 -0
  34. simajilord-0.1.0/src/simajilord/cli.py +3 -0
  35. simajilord-0.1.0/src/simajilord/server.py +3 -0
  36. simajilord-0.1.0/src/simajilord.egg-info/PKG-INFO +138 -0
  37. simajilord-0.1.0/src/simajilord.egg-info/SOURCES.txt +60 -0
  38. simajilord-0.1.0/src/simajilord.egg-info/dependency_links.txt +1 -0
  39. simajilord-0.1.0/src/simajilord.egg-info/entry_points.txt +3 -0
  40. simajilord-0.1.0/src/simajilord.egg-info/requires.txt +13 -0
  41. simajilord-0.1.0/src/simajilord.egg-info/top_level.txt +2 -0
  42. simajilord-0.1.0/tests/test_adaptive_reasoning.py +279 -0
  43. simajilord-0.1.0/tests/test_benchmarking.py +63 -0
  44. simajilord-0.1.0/tests/test_branding_audit.py +62 -0
  45. simajilord-0.1.0/tests/test_cli.py +1209 -0
  46. simajilord-0.1.0/tests/test_context_windows.py +57 -0
  47. simajilord-0.1.0/tests/test_corpus_preparation.py +1447 -0
  48. simajilord-0.1.0/tests/test_dataset_preparation.py +684 -0
  49. simajilord-0.1.0/tests/test_image_preprocessing.py +66 -0
  50. simajilord-0.1.0/tests/test_model_inspector.py +80 -0
  51. simajilord-0.1.0/tests/test_openai_contract.py +43 -0
  52. simajilord-0.1.0/tests/test_openai_sdk_compat.py +276 -0
  53. simajilord-0.1.0/tests/test_preference_loop.py +138 -0
  54. simajilord-0.1.0/tests/test_reasoning.py +191 -0
  55. simajilord-0.1.0/tests/test_runtime_compat.py +225 -0
  56. simajilord-0.1.0/tests/test_server.py +969 -0
  57. simajilord-0.1.0/tests/test_simajilord_namespace.py +12 -0
  58. simajilord-0.1.0/tests/test_skill_registry.py +107 -0
  59. simajilord-0.1.0/tests/test_tool_runtime.py +114 -0
  60. simajilord-0.1.0/tests/test_tool_search.py +109 -0
  61. simajilord-0.1.0/tests/test_trainer.py +960 -0
  62. simajilord-0.1.0/tests/test_training_schemas.py +74 -0
@@ -0,0 +1,138 @@
1
+ Metadata-Version: 2.4
2
+ Name: simajilord
3
+ Version: 0.1.0
4
+ Summary: Local OpenAI API runtime and tool-search foundation for multimodal Cappuccino-class models on Apple Silicon.
5
+ Requires-Python: >=3.12
6
+ Description-Content-Type: text/markdown
7
+ Requires-Dist: fastapi>=0.135.1
8
+ Requires-Dist: httpx>=0.28.1
9
+ Requires-Dist: mlx>=0.31.1
10
+ Requires-Dist: mlx-lm>=0.31.1
11
+ Requires-Dist: mlx-vlm>=0.4.0
12
+ Requires-Dist: pdfplumber>=0.11.9
13
+ Requires-Dist: pydantic>=2.12.5
14
+ Requires-Dist: pypdf>=6.9.2
15
+ Requires-Dist: uvicorn>=0.42.0
16
+ Provides-Extra: dev
17
+ Requires-Dist: mypy>=1.18.2; extra == "dev"
18
+ Requires-Dist: pytest>=9.0.2; extra == "dev"
19
+
20
+ # Cappuccino
21
+
22
+ `simajilord` is the package and CLI that exposes a local OpenAI-compatible runtime for Cappuccino-class models on Apple Silicon.
23
+
24
+ ## Install
25
+
26
+ The model name stays `cappuccino`, but the package name is:
27
+
28
+ ```bash
29
+ pip install simajilord openai
30
+ ```
31
+
32
+ Public import and CLI:
33
+
34
+ ```bash
35
+ python -c "import simajilord; print(simajilord.__file__)"
36
+ simajilord --help
37
+ ```
38
+
39
+ ## Serve a local OpenAI-compatible endpoint
40
+
41
+ ```bash
42
+ simajilord serve \
43
+ --default-model-path /path/to/Cappuccino-27B \
44
+ --public-model-id cappuccino \
45
+ --host 127.0.0.1 \
46
+ --port 8020
47
+ ```
48
+
49
+ ## Quickstart with the OpenAI Python SDK
50
+
51
+ ```python
52
+ from openai import OpenAI
53
+
54
+ client = OpenAI(
55
+ base_url="http://127.0.0.1:8020/v1",
56
+ api_key="dummy",
57
+ )
58
+
59
+ response = client.chat.completions.create(
60
+ model="cappuccino",
61
+ messages=[
62
+ {"role": "user", "content": "日本語で一文だけ自己紹介して。"},
63
+ ],
64
+ )
65
+
66
+ print(response.choices[0].message.content)
67
+ ```
68
+
69
+ ## Quickstart with skill selection
70
+
71
+ Skill selection is a Cappuccino extension. The request stays OpenAI-compatible because skill metadata is converted into ordinary upstream instructions before the model call.
72
+
73
+ ```python
74
+ from openai import OpenAI
75
+
76
+ client = OpenAI(
77
+ base_url="http://127.0.0.1:8020/v1",
78
+ api_key="dummy",
79
+ )
80
+
81
+ response = client.responses.create(
82
+ model="cappuccino",
83
+ input="OpenAI API の公式情報だけで確認して要点をまとめて。",
84
+ extra_body={
85
+ "skill_choice": "auto",
86
+ },
87
+ )
88
+
89
+ print(response.output_text)
90
+ ```
91
+
92
+ Explicit skill selection:
93
+
94
+ ```python
95
+ from openai import OpenAI
96
+
97
+ client = OpenAI(
98
+ base_url="http://127.0.0.1:8020/v1",
99
+ api_key="dummy",
100
+ )
101
+
102
+ response = client.responses.create(
103
+ model="cappuccino",
104
+ input="最新の API 仕様を確認して。",
105
+ extra_body={
106
+ "skills": ["openai-docs"],
107
+ },
108
+ )
109
+
110
+ print(response.output_text)
111
+ ```
112
+
113
+ ## Skill registry layout
114
+
115
+ Each skill lives in its own directory and uses `SKILL.md` frontmatter:
116
+
117
+ ```md
118
+ ---
119
+ name: openai-docs
120
+ description: Use when a task needs current OpenAI API documentation and official references.
121
+ ---
122
+
123
+ Read the official docs first.
124
+ ```
125
+
126
+ Load the registry at serve time:
127
+
128
+ ```bash
129
+ simajilord serve \
130
+ --default-model-path /path/to/Cappuccino-27B \
131
+ --skill-registry /path/to/skills
132
+ ```
133
+
134
+ Compatibility CLI alias:
135
+
136
+ ```bash
137
+ cappuccino --help
138
+ ```
@@ -0,0 +1,119 @@
1
+ # Cappuccino
2
+
3
+ `simajilord` is the package and CLI that exposes a local OpenAI-compatible runtime for Cappuccino-class models on Apple Silicon.
4
+
5
+ ## Install
6
+
7
+ The model name stays `cappuccino`, but the package name is:
8
+
9
+ ```bash
10
+ pip install simajilord openai
11
+ ```
12
+
13
+ Public import and CLI:
14
+
15
+ ```bash
16
+ python -c "import simajilord; print(simajilord.__file__)"
17
+ simajilord --help
18
+ ```
19
+
20
+ ## Serve a local OpenAI-compatible endpoint
21
+
22
+ ```bash
23
+ simajilord serve \
24
+ --default-model-path /path/to/Cappuccino-27B \
25
+ --public-model-id cappuccino \
26
+ --host 127.0.0.1 \
27
+ --port 8020
28
+ ```
29
+
30
+ ## Quickstart with the OpenAI Python SDK
31
+
32
+ ```python
33
+ from openai import OpenAI
34
+
35
+ client = OpenAI(
36
+ base_url="http://127.0.0.1:8020/v1",
37
+ api_key="dummy",
38
+ )
39
+
40
+ response = client.chat.completions.create(
41
+ model="cappuccino",
42
+ messages=[
43
+ {"role": "user", "content": "日本語で一文だけ自己紹介して。"},
44
+ ],
45
+ )
46
+
47
+ print(response.choices[0].message.content)
48
+ ```
49
+
50
+ ## Quickstart with skill selection
51
+
52
+ Skill selection is a Cappuccino extension. The request stays OpenAI-compatible because skill metadata is converted into ordinary upstream instructions before the model call.
53
+
54
+ ```python
55
+ from openai import OpenAI
56
+
57
+ client = OpenAI(
58
+ base_url="http://127.0.0.1:8020/v1",
59
+ api_key="dummy",
60
+ )
61
+
62
+ response = client.responses.create(
63
+ model="cappuccino",
64
+ input="OpenAI API の公式情報だけで確認して要点をまとめて。",
65
+ extra_body={
66
+ "skill_choice": "auto",
67
+ },
68
+ )
69
+
70
+ print(response.output_text)
71
+ ```
72
+
73
+ Explicit skill selection:
74
+
75
+ ```python
76
+ from openai import OpenAI
77
+
78
+ client = OpenAI(
79
+ base_url="http://127.0.0.1:8020/v1",
80
+ api_key="dummy",
81
+ )
82
+
83
+ response = client.responses.create(
84
+ model="cappuccino",
85
+ input="最新の API 仕様を確認して。",
86
+ extra_body={
87
+ "skills": ["openai-docs"],
88
+ },
89
+ )
90
+
91
+ print(response.output_text)
92
+ ```
93
+
94
+ ## Skill registry layout
95
+
96
+ Each skill lives in its own directory and uses `SKILL.md` frontmatter:
97
+
98
+ ```md
99
+ ---
100
+ name: openai-docs
101
+ description: Use when a task needs current OpenAI API documentation and official references.
102
+ ---
103
+
104
+ Read the official docs first.
105
+ ```
106
+
107
+ Load the registry at serve time:
108
+
109
+ ```bash
110
+ simajilord serve \
111
+ --default-model-path /path/to/Cappuccino-27B \
112
+ --skill-registry /path/to/skills
113
+ ```
114
+
115
+ Compatibility CLI alias:
116
+
117
+ ```bash
118
+ cappuccino --help
119
+ ```
@@ -0,0 +1,56 @@
1
+ ---
2
+ license: apache-2.0
3
+ base_model:
4
+ - cappuccino-27b-base
5
+ pipeline_tag: text-generation
6
+ tags:
7
+ - cappuccino
8
+ - multimodal
9
+ - tool-use
10
+ - thinking
11
+ - local-runtime
12
+ library_name: transformers
13
+ ---
14
+
15
+ # Cappuccino-27B
16
+
17
+ Cappuccino-27B is the local Cappuccino foundation checkpoint used by this repository for runtime serving, continual pretraining, and supervised fine-tuning.
18
+
19
+ ## Identity
20
+
21
+ - Public model name: `Cappuccino-27B`
22
+ - Public base-model identity: `cappuccino-27b-base`
23
+ - Repository root stores the original-precision MLX weights.
24
+ - `artifacts/q6` is a separate 6-bit inference copy for serving and benchmarking.
25
+ - Model and processor identifiers in the model directory are branded as `Cappuccino`.
26
+
27
+ ## Architecture
28
+
29
+ - Type: causal language model with a vision encoder
30
+ - Scale: 27B-class multimodal model
31
+ - Decoder layers: 64
32
+ - Native context window: `262144`
33
+ - Context length: 262,144 natively and extensible up to 1,010,000 tokens.
34
+ - Training and runtime surface: text, image, video placeholders, tool calls, tool responses, and thinking traces
35
+
36
+ ## Runtime
37
+
38
+ Cappuccino exposes the OpenAI API through the local wrapper, with the Responses API as the default interface for tools and multimodal turns, and can be inspected, trained, quantized, and served with the `cappuccino` CLI.
39
+
40
+ ```bash
41
+ ./.venv/bin/cappuccino inspect-model --model-path /Users/megantemeteo/Desktop/cappuccino
42
+ ```
43
+
44
+ ```bash
45
+ ./.venv/bin/cappuccino train \
46
+ --model-path /Users/megantemeteo/Desktop/cappuccino \
47
+ --dataset-path /path/to/train.jsonl \
48
+ --output-dir /path/to/out \
49
+ --dry-run
50
+ ```
51
+
52
+ ## Storage
53
+
54
+ - `inspect-model` reports `weight_bytes` for the base checkpoint already stored in the model directory.
55
+ - `train --dry-run` reports `saved_trainable_tensor_bytes` separately so the saved trainable tensors can be accounted for without conflating them with the base checkpoint.
56
+ - If adapters are merged back into a full checkpoint later, the merged checkpoint size stays close to the base model size because the architecture does not grow.
@@ -0,0 +1,64 @@
1
+ [build-system]
2
+ requires = ["setuptools>=68", "wheel"]
3
+ build-backend = "setuptools.build_meta"
4
+
5
+ [project]
6
+ name = "simajilord"
7
+ version = "0.1.0"
8
+ description = "Local OpenAI API runtime and tool-search foundation for multimodal Cappuccino-class models on Apple Silicon."
9
+ readme = "PYPI.md"
10
+ requires-python = ">=3.12"
11
+ dependencies = [
12
+ "fastapi>=0.135.1",
13
+ "httpx>=0.28.1",
14
+ "mlx>=0.31.1",
15
+ "mlx-lm>=0.31.1",
16
+ "mlx-vlm>=0.4.0",
17
+ "pdfplumber>=0.11.9",
18
+ "pydantic>=2.12.5",
19
+ "pypdf>=6.9.2",
20
+ "uvicorn>=0.42.0",
21
+ ]
22
+
23
+ [project.optional-dependencies]
24
+ dev = [
25
+ "mypy>=1.18.2",
26
+ "pytest>=9.0.2",
27
+ ]
28
+
29
+ [project.scripts]
30
+ simajilord = "simajilord.cli:main"
31
+ cappuccino = "cappuccino.cli:main"
32
+
33
+ [tool.pytest.ini_options]
34
+ pythonpath = ["src"]
35
+ testpaths = ["tests"]
36
+
37
+ [tool.mypy]
38
+ python_version = "3.12"
39
+ files = ["src", "tests"]
40
+ check_untyped_defs = true
41
+ disallow_incomplete_defs = true
42
+ disallow_untyped_defs = true
43
+ no_implicit_optional = true
44
+ warn_redundant_casts = true
45
+ warn_return_any = true
46
+ warn_unreachable = true
47
+ warn_unused_ignores = true
48
+
49
+ [[tool.mypy.overrides]]
50
+ module = [
51
+ "mlx.*",
52
+ "mlx_lm.*",
53
+ "mlx_vlm.*",
54
+ "pdfplumber.*",
55
+ "pypdf.*",
56
+ "uvicorn.*",
57
+ ]
58
+ ignore_missing_imports = true
59
+
60
+ [tool.setuptools]
61
+ package-dir = {"" = "src"}
62
+
63
+ [tool.setuptools.packages.find]
64
+ where = ["src"]
@@ -0,0 +1,4 @@
1
+ [egg_info]
2
+ tag_build =
3
+ tag_date = 0
4
+
@@ -0,0 +1,3 @@
1
+ __all__ = ["__version__"]
2
+
3
+ __version__ = "0.1.0"
@@ -0,0 +1,15 @@
1
+ from __future__ import annotations
2
+
3
+ from .auto_reasoning import (
4
+ run_adaptive_chat_completion,
5
+ run_auto_chat_completion,
6
+ run_on_chat_completion,
7
+ run_structured_reasoning_chat_completion,
8
+ )
9
+
10
+ __all__ = [
11
+ "run_adaptive_chat_completion",
12
+ "run_auto_chat_completion",
13
+ "run_on_chat_completion",
14
+ "run_structured_reasoning_chat_completion",
15
+ ]