objectiveai-cocoindex 2.0.5__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -0,0 +1,37 @@
1
+ """ObjectiveAI integration for `cocoindex <https://github.com/cocoindex-io/cocoindex>`_.
2
+
3
+ Exposes a single ``Function`` class that runs ObjectiveAI function executions
4
+ as memoized cocoindex processing components. Memo key combines the bound
5
+ ``(function, profile, strategy)`` triple with the per-call ``input``.
6
+
7
+ The ``client`` is intentionally excluded from the memo key — two
8
+ ``Function`` instances over the same triple with different clients share
9
+ cache entries. This makes the library safe to drop into pipelines without
10
+ worrying about client identity.
11
+ """
12
+
13
+ from __future__ import annotations
14
+
15
+ from objectiveai_cocoindex._client import set_default_client
16
+ from objectiveai_cocoindex._errors import ObjectiveAIExecutionError
17
+ from objectiveai_cocoindex._function import Function
18
+ from objectiveai_cocoindex._sources import (
19
+ FunctionSource,
20
+ InlineFunction,
21
+ InlineProfile,
22
+ ProfileSource,
23
+ RemoteFunction,
24
+ RemoteProfile,
25
+ )
26
+
27
+ __all__ = [
28
+ "Function",
29
+ "set_default_client",
30
+ "FunctionSource",
31
+ "ProfileSource",
32
+ "RemoteFunction",
33
+ "InlineFunction",
34
+ "RemoteProfile",
35
+ "InlineProfile",
36
+ "ObjectiveAIExecutionError",
37
+ ]
@@ -0,0 +1,33 @@
1
+ """Module-default ObjectiveAI client.
2
+
3
+ Used by `Function` instances constructed without an explicit ``client``.
4
+ Constructed lazily so importing this module doesn't require auth env vars.
5
+ """
6
+
7
+ from __future__ import annotations
8
+
9
+ from objectiveai_sdk.client import ObjectiveAI
10
+
11
+ _default_instance: ObjectiveAI | None = None
12
+
13
+
14
+ def set_default_client(client: ObjectiveAI | None) -> None:
15
+ """Set (or clear) the process-wide default ``ObjectiveAI`` client.
16
+
17
+ ``Function`` instances constructed without an explicit ``client``
18
+ will use this default. If unset, a fresh ``ObjectiveAI()`` is
19
+ constructed lazily on first use (reads ``OBJECTIVEAI_AUTHORIZATION``
20
+ and friends from the environment via ``ObjectiveAI()``'s own
21
+ constructor defaults).
22
+ """
23
+ global _default_instance
24
+ _default_instance = client
25
+
26
+
27
+ def resolve_client() -> ObjectiveAI:
28
+ """Return the current default client, constructing one from env if
29
+ none has been set."""
30
+ global _default_instance
31
+ if _default_instance is None:
32
+ _default_instance = ObjectiveAI()
33
+ return _default_instance
@@ -0,0 +1,11 @@
1
+ """Errors raised by objectiveai-cocoindex wrappers."""
2
+
3
+ from __future__ import annotations
4
+
5
+
6
+ class ObjectiveAIExecutionError(RuntimeError):
7
+ """Raised when a function execution returns an error output (TaskOutputErr)."""
8
+
9
+ def __init__(self, payload: object) -> None:
10
+ super().__init__(f"ObjectiveAI execution error: {payload!r}")
11
+ self.payload = payload
@@ -0,0 +1,91 @@
1
+ """Single-class wrapper that runs an ObjectiveAI function execution as a
2
+ memoized cocoindex processing component.
3
+
4
+ Bind the (function, profile, strategy) triple at construction; call with
5
+ per-execution ``input``. Memo key combines all four. The optional
6
+ ``client`` is intentionally excluded from the memo key — two ``Function``
7
+ instances over the same triple but with different clients share cache
8
+ entries.
9
+ """
10
+
11
+ from __future__ import annotations
12
+
13
+ from typing import Any
14
+
15
+ import cocoindex as coco
16
+ from objectiveai_sdk.client import ObjectiveAI
17
+ from objectiveai_sdk.functions.executions.http import create_function_execution
18
+ from objectiveai_sdk.functions.executions.request import FunctionExecutionCreateParams
19
+ from objectiveai_sdk.functions.executions.request.strategy import Strategy
20
+ from objectiveai_sdk.functions.executions.response.unary import FunctionExecution
21
+
22
+ from objectiveai_cocoindex._client import resolve_client
23
+ from objectiveai_cocoindex._sources import FunctionSource, ProfileSource
24
+
25
+
26
+ class Function:
27
+ """An ObjectiveAI function bound to a (function, profile, strategy)
28
+ triple, callable as a memoized cocoindex processing component.
29
+
30
+ Memo key combines the three constructor args + the per-call ``input``.
31
+ The optional ``client`` is intentionally excluded — two ``Function``
32
+ instances over the same triple with different clients share cache
33
+ entries.
34
+
35
+ Example::
36
+
37
+ import objectiveai_cocoindex as oai_coco
38
+
39
+ scorer = oai_coco.Function(
40
+ function=oai_coco.RemoteFunction.github(
41
+ owner="ObjectiveAI", repository="example-quality", commit="abc"
42
+ ),
43
+ profile=oai_coco.RemoteProfile.github(
44
+ owner="ObjectiveAI", repository="example-quality", commit="abc"
45
+ ),
46
+ )
47
+
48
+ execution = await scorer({"text": "hello"})
49
+ out = execution.output.output.root
50
+ # out is one of TaskOutputScalar | TaskOutputVector |
51
+ # TaskOutputVectors | TaskOutputErr
52
+ """
53
+
54
+ __slots__ = ("_function", "_profile", "_strategy", "_client")
55
+
56
+ def __init__(
57
+ self,
58
+ function: FunctionSource,
59
+ profile: ProfileSource,
60
+ strategy: Strategy | None = None,
61
+ *,
62
+ client: ObjectiveAI | None = None,
63
+ ) -> None:
64
+ self._function = function
65
+ self._profile = profile
66
+ self._strategy = strategy
67
+ self._client = client
68
+
69
+ def __coco_memo_key__(self) -> object:
70
+ # Excludes self._client by design — clients should not invalidate
71
+ # cache entries.
72
+ return (
73
+ "objectiveai_cocoindex.Function",
74
+ self._function.__coco_memo_key__(),
75
+ self._profile.__coco_memo_key__(),
76
+ self._strategy.model_dump() if self._strategy is not None else None,
77
+ )
78
+
79
+ @coco.fn(memo=True, logic_tracking="self")
80
+ async def __call__(self, input: Any) -> FunctionExecution:
81
+ client = self._client if self._client is not None else resolve_client()
82
+ params = FunctionExecutionCreateParams(
83
+ function=self._function.to_function_field(),
84
+ profile=self._profile.to_profile_field(),
85
+ strategy=self._strategy,
86
+ input=input,
87
+ )
88
+ result = await create_function_execution(client, params)
89
+ if isinstance(result, FunctionExecution):
90
+ return result
91
+ return FunctionExecution.model_validate(result)
@@ -0,0 +1,243 @@
1
+ """ADTs for ObjectiveAI function and profile sources.
2
+
3
+ A ``Function`` (the executor) takes one ``FunctionSource`` and one
4
+ ``ProfileSource`` at construction. Each source knows how to:
5
+
6
+ - translate itself into the request-field shape expected by
7
+ ``FunctionExecutionCreateParams``;
8
+ - return a stable ``__coco_memo_key__`` so cocoindex's memoization
9
+ fingerprint changes whenever the underlying ref/body changes.
10
+ """
11
+
12
+ from __future__ import annotations
13
+
14
+ from typing import Protocol, runtime_checkable
15
+
16
+ from objectiveai_sdk.functions.full_inline_function import FullInlineFunction
17
+ from objectiveai_sdk.functions.full_inline_function_or_remote_commit_optional import (
18
+ FullInlineFunctionOrRemoteCommitOptional,
19
+ FullInlineFunctionOrRemoteCommitOptionalInline,
20
+ FullInlineFunctionOrRemoteCommitOptionalRemote,
21
+ )
22
+ from objectiveai_sdk.functions.inline_profile import InlineProfile as InlineProfileBody
23
+ from objectiveai_sdk.functions.inline_profile_or_remote_commit_optional import (
24
+ InlineProfileOrRemoteCommitOptional,
25
+ InlineProfileOrRemoteCommitOptionalInline,
26
+ InlineProfileOrRemoteCommitOptionalRemote,
27
+ )
28
+ from objectiveai_sdk.remote_path_commit_optional import (
29
+ RemotePathCommitOptional,
30
+ RemotePathCommitOptionalFilesystem,
31
+ RemotePathCommitOptionalGithub,
32
+ RemotePathCommitOptionalMock,
33
+ )
34
+
35
+
36
+ @runtime_checkable
37
+ class FunctionSource(Protocol):
38
+ """Either an inline function body or a remote function reference."""
39
+
40
+ def to_function_field(self) -> FullInlineFunctionOrRemoteCommitOptional: ...
41
+ def __coco_memo_key__(self) -> object: ...
42
+
43
+
44
+ @runtime_checkable
45
+ class ProfileSource(Protocol):
46
+ """Either an inline profile body or a remote profile reference."""
47
+
48
+ def to_profile_field(self) -> InlineProfileOrRemoteCommitOptional: ...
49
+ def __coco_memo_key__(self) -> object: ...
50
+
51
+
52
+ # ---------------------------------------------------------------------------
53
+ # Remote sources
54
+ # ---------------------------------------------------------------------------
55
+
56
+
57
+ class RemoteFunction:
58
+ """Reference to a remotely-hosted function (GitHub, filesystem, mock).
59
+
60
+ Use the ``github`` / ``filesystem`` / ``mock`` classmethods rather than
61
+ the bare constructor.
62
+ """
63
+
64
+ __slots__ = ("_ref",)
65
+
66
+ def __init__(self, ref: RemotePathCommitOptional) -> None:
67
+ self._ref = ref
68
+
69
+ @classmethod
70
+ def github(
71
+ cls,
72
+ *,
73
+ owner: str,
74
+ repository: str,
75
+ commit: str | None = None,
76
+ ) -> RemoteFunction:
77
+ return cls(
78
+ RemotePathCommitOptional(
79
+ root=RemotePathCommitOptionalGithub(
80
+ remote="github", owner=owner, repository=repository, commit=commit,
81
+ )
82
+ )
83
+ )
84
+
85
+ @classmethod
86
+ def filesystem(
87
+ cls,
88
+ *,
89
+ owner: str,
90
+ repository: str,
91
+ commit: str | None = None,
92
+ ) -> RemoteFunction:
93
+ return cls(
94
+ RemotePathCommitOptional(
95
+ root=RemotePathCommitOptionalFilesystem(
96
+ remote="filesystem", owner=owner, repository=repository, commit=commit,
97
+ )
98
+ )
99
+ )
100
+
101
+ @classmethod
102
+ def mock(cls, *, name: str) -> RemoteFunction:
103
+ return cls(
104
+ RemotePathCommitOptional(
105
+ root=RemotePathCommitOptionalMock(remote="mock", name=name)
106
+ )
107
+ )
108
+
109
+ @property
110
+ def ref(self) -> RemotePathCommitOptional:
111
+ return self._ref
112
+
113
+ def to_function_field(self) -> FullInlineFunctionOrRemoteCommitOptional:
114
+ return FullInlineFunctionOrRemoteCommitOptional(
115
+ root=FullInlineFunctionOrRemoteCommitOptionalRemote(root=self._ref)
116
+ )
117
+
118
+ def __coco_memo_key__(self) -> object:
119
+ return ("objectiveai_cocoindex.RemoteFunction", self._ref.model_dump())
120
+
121
+ def __repr__(self) -> str:
122
+ return f"RemoteFunction({self._ref.root!r})"
123
+
124
+
125
+ class RemoteProfile:
126
+ """Reference to a remotely-hosted profile (GitHub, filesystem, mock)."""
127
+
128
+ __slots__ = ("_ref",)
129
+
130
+ def __init__(self, ref: RemotePathCommitOptional) -> None:
131
+ self._ref = ref
132
+
133
+ @classmethod
134
+ def github(
135
+ cls,
136
+ *,
137
+ owner: str,
138
+ repository: str,
139
+ commit: str | None = None,
140
+ ) -> RemoteProfile:
141
+ return cls(
142
+ RemotePathCommitOptional(
143
+ root=RemotePathCommitOptionalGithub(
144
+ remote="github", owner=owner, repository=repository, commit=commit,
145
+ )
146
+ )
147
+ )
148
+
149
+ @classmethod
150
+ def filesystem(
151
+ cls,
152
+ *,
153
+ owner: str,
154
+ repository: str,
155
+ commit: str | None = None,
156
+ ) -> RemoteProfile:
157
+ return cls(
158
+ RemotePathCommitOptional(
159
+ root=RemotePathCommitOptionalFilesystem(
160
+ remote="filesystem", owner=owner, repository=repository, commit=commit,
161
+ )
162
+ )
163
+ )
164
+
165
+ @classmethod
166
+ def mock(cls, *, name: str) -> RemoteProfile:
167
+ return cls(
168
+ RemotePathCommitOptional(
169
+ root=RemotePathCommitOptionalMock(remote="mock", name=name)
170
+ )
171
+ )
172
+
173
+ @property
174
+ def ref(self) -> RemotePathCommitOptional:
175
+ return self._ref
176
+
177
+ def to_profile_field(self) -> InlineProfileOrRemoteCommitOptional:
178
+ return InlineProfileOrRemoteCommitOptional(
179
+ root=InlineProfileOrRemoteCommitOptionalRemote(root=self._ref)
180
+ )
181
+
182
+ def __coco_memo_key__(self) -> object:
183
+ return ("objectiveai_cocoindex.RemoteProfile", self._ref.model_dump())
184
+
185
+ def __repr__(self) -> str:
186
+ return f"RemoteProfile({self._ref.root!r})"
187
+
188
+
189
+ # ---------------------------------------------------------------------------
190
+ # Inline sources
191
+ # ---------------------------------------------------------------------------
192
+
193
+
194
+ class InlineFunction:
195
+ """Inline function definition. Accepts a ``FullInlineFunction`` body
196
+ constructed from ``objectiveai.functions``.
197
+ """
198
+
199
+ __slots__ = ("_body",)
200
+
201
+ def __init__(self, body: FullInlineFunction) -> None:
202
+ self._body = body
203
+
204
+ @property
205
+ def body(self) -> FullInlineFunction:
206
+ return self._body
207
+
208
+ def to_function_field(self) -> FullInlineFunctionOrRemoteCommitOptional:
209
+ return FullInlineFunctionOrRemoteCommitOptional(
210
+ root=FullInlineFunctionOrRemoteCommitOptionalInline(root=self._body)
211
+ )
212
+
213
+ def __coco_memo_key__(self) -> object:
214
+ return ("objectiveai_cocoindex.InlineFunction", self._body.model_dump())
215
+
216
+ def __repr__(self) -> str:
217
+ return f"InlineFunction({self._body!r})"
218
+
219
+
220
+ class InlineProfile:
221
+ """Inline profile definition. Accepts an ``InlineProfile`` body
222
+ constructed from ``objectiveai.functions``.
223
+ """
224
+
225
+ __slots__ = ("_body",)
226
+
227
+ def __init__(self, body: InlineProfileBody) -> None:
228
+ self._body = body
229
+
230
+ @property
231
+ def body(self) -> InlineProfileBody:
232
+ return self._body
233
+
234
+ def to_profile_field(self) -> InlineProfileOrRemoteCommitOptional:
235
+ return InlineProfileOrRemoteCommitOptional(
236
+ root=InlineProfileOrRemoteCommitOptionalInline(root=self._body)
237
+ )
238
+
239
+ def __coco_memo_key__(self) -> object:
240
+ return ("objectiveai_cocoindex.InlineProfile", self._body.model_dump())
241
+
242
+ def __repr__(self) -> str:
243
+ return f"InlineProfile({self._body!r})"
@@ -0,0 +1,236 @@
1
+ Metadata-Version: 2.4
2
+ Name: objectiveai-cocoindex
3
+ Version: 2.0.5
4
+ Summary: ObjectiveAI integration for CocoIndex
5
+ Project-URL: Homepage, https://objectiveai.dev
6
+ Project-URL: Repository, https://github.com/ObjectiveAI/objectiveai
7
+ Project-URL: Issues, https://github.com/ObjectiveAI/objectiveai/issues
8
+ Author-email: ObjectiveAI <admin@objectiveai.dev>
9
+ License-Expression: MIT
10
+ License-File: LICENSE
11
+ Keywords: ai,cocoindex,indexing,llm,objectiveai
12
+ Classifier: Development Status :: 4 - Beta
13
+ Classifier: Intended Audience :: Developers
14
+ Classifier: License :: OSI Approved :: MIT License
15
+ Classifier: Operating System :: OS Independent
16
+ Classifier: Programming Language :: Python :: 3
17
+ Classifier: Programming Language :: Python :: 3.10
18
+ Classifier: Programming Language :: Python :: 3.11
19
+ Classifier: Programming Language :: Python :: 3.12
20
+ Classifier: Programming Language :: Python :: 3.13
21
+ Classifier: Topic :: Software Development :: Libraries :: Python Modules
22
+ Requires-Python: >=3.10
23
+ Requires-Dist: cocoindex
24
+ Requires-Dist: objectiveai-sdk==2.0.5
25
+ Description-Content-Type: text/markdown
26
+
27
+ # {ai} | ObjectiveAI
28
+
29
+ [![npm version](https://img.shields.io/npm/v/objectiveai-sdk.svg)](https://www.npmjs.com/package/objectiveai-sdk)
30
+ [![Crates.io](https://img.shields.io/crates/v/objectiveai-sdk.svg)](https://crates.io/crates/objectiveai-sdk)
31
+ [![License: MIT](https://img.shields.io/badge/License-MIT-yellow.svg)](https://opensource.org/licenses/MIT)
32
+
33
+ ## The agentic collective judgment harness.
34
+
35
+ Your agent doesn't have to decide alone. ObjectiveAI lets any agent call out to a **swarm** of models for collective judgment — routing decisions through recursive scoring trees that map arbitrary input into a vector of scores across every option. Functions are invented by agents, trained on data, and deployed as reusable decision infrastructure that gets better over time.
36
+
37
+ [Website](https://objectiveai.dev) | [API](https://api.objectiveai.dev) | [Discord](https://discord.gg/gbNFHensby) | [npm](https://www.npmjs.com/package/objectiveai-sdk) | [crates.io](https://crates.io/crates/objectiveai-sdk) | [Built with ObjectiveAI](examples/README.md)
38
+
39
+ ## What this is
40
+
41
+ An agent faces a choice. Instead of relying on a single model's judgment, it calls an ObjectiveAI Function — a recursive decision tree hosted as a JSON file. The Function fans out to a swarm of models, each with its own perspective. They vote. Their votes combine with learned weights. Out comes a vector of scores that sums to 1, one per option. The agent takes the highest-scoring option and moves on.
42
+
43
+ The scoring pipeline itself is a composition of tasks — vector completions, nested functions, map operations — that can be arbitrarily deep. A Function can call other Functions. An agent can *invent* new Functions. The whole system is content-addressed, version-tracked, and trainable.
44
+
45
+ ```
46
+ Agent has a decision
47
+ -> Calls ObjectiveAI Function
48
+ -> Function fans out to swarm of models
49
+ -> Each model votes across all options
50
+ -> Votes combine with learned weights
51
+ -> Returns scores: [0.42, 0.31, 0.18, 0.09]
52
+ -> Agent takes the best option
53
+ ```
54
+
55
+ ## Install
56
+
57
+ ### CLI
58
+
59
+ Install the pre-built binary with one command:
60
+
61
+ ```bash
62
+ curl -fsSL https://raw.githubusercontent.com/ObjectiveAI/objectiveai/main/install.sh | bash
63
+ . "$HOME/.objectiveai/env"
64
+ ```
65
+
66
+ Leaner, no-viewer build:
67
+
68
+ ```bash
69
+ curl -fsSL https://raw.githubusercontent.com/ObjectiveAI/objectiveai/main/install.sh | bash -s -- --no-viewer
70
+ . "$HOME/.objectiveai/env"
71
+ ```
72
+
73
+ Sourcing `~/.objectiveai/env` puts `objectiveai` on `PATH` for the current shell. New shells pick it up automatically (the installer wires `~/.bashrc` / `~/.zshrc` to source the same file).
74
+
75
+ Supported platforms: Linux x86_64, Linux aarch64 (Raspberry Pi 4+, Graviton), macOS x86_64, macOS aarch64 (Apple Silicon), Windows x86_64. The installer drops the binary at `~/.objectiveai/objectiveai`; the CLI self-updates on startup from [GitHub Releases](https://github.com/ObjectiveAI/objectiveai/releases).
76
+
77
+ ### SDK
78
+
79
+ ```bash
80
+ npm install objectiveai-sdk
81
+ ```
82
+
83
+ ```toml
84
+ [dependencies]
85
+ objectiveai-sdk = "2.0.5"
86
+ ```
87
+
88
+ ## Core primitives
89
+
90
+ ### Agents
91
+
92
+ An **Agent** is a fully-specified configuration of a single upstream model — model identity, prompt structure, decoding parameters, output mode, provider preferences. Content-addressed via XXHash3-128, so the same configuration always produces the same ID.
93
+
94
+ Agents are stored as `agent.json` in Git repositories. Reference them by `owner/repo@commit` or define them inline.
95
+
96
+ ### Swarms
97
+
98
+ A **Swarm** is a collection of Agents used together for collective judgment. Each agent can have its own personality, temperature, output mode, and count. Weights control each agent's influence on the final score.
99
+
100
+ ```json
101
+ {
102
+ "description": "Balanced judgment panel",
103
+ "agents": [
104
+ {
105
+ "upstream": "openrouter",
106
+ "model": "openai/gpt-4o",
107
+ "output_mode": "json_schema",
108
+ "prefix_messages": [
109
+ { "role": "system", "content": "You are a rational skeptic. Ground every choice in logic." }
110
+ ],
111
+ "count": 2
112
+ },
113
+ {
114
+ "upstream": "openrouter",
115
+ "model": "anthropic/claude-sonnet-4-20250514",
116
+ "output_mode": "tool_call",
117
+ "suffix_messages": [
118
+ { "role": "system", "content": "You are an intuitive thinker. Trust your instincts." }
119
+ ],
120
+ "count": 1
121
+ }
122
+ ],
123
+ "weights": [0.6, 0.4]
124
+ }
125
+ ```
126
+
127
+ Swarms are stored as `swarm.json` in Git repositories — shareable, version-tracked, and reusable across Functions.
128
+
129
+ ### Vector Completions
130
+
131
+ The core primitive. Give a swarm a prompt and a set of possible responses. Each agent votes for what it thinks is the best response. Votes combine with weights to produce a score vector that sums to 1.
132
+
133
+ ```
134
+ Prompt: "Which approach best handles edge cases?"
135
+ Responses: ["defensive coding", "fuzzing", "formal verification", "property testing"]
136
+
137
+ -> Scores: [0.12, 0.28, 0.19, 0.41]
138
+ ```
139
+
140
+ #### Probabilistic voting via logprobs
141
+
142
+ LLMs are inherently probabilistic — the sampler makes the final discrete choice, destroying information. ObjectiveAI bypasses the sampler entirely using **logprobs** to capture each model's full preference distribution.
143
+
144
+ If a model is 70% confident in option A and 30% in option B, we capture both signals rather than losing one to sampling. For large response sets exceeding logprobs limits, a prefix tree captures preferences in stages — the tree width matches the logprobs count (typically 20), enabling voting over hundreds of options while preserving probability information at each level.
145
+
146
+ ```
147
+ Traditional: Model outputs "A" (loses the 30% signal for B)
148
+ ObjectiveAI: Model vote = [0.70, 0.30, 0.00, 0.00] (full distribution preserved)
149
+ ```
150
+
151
+ ### Functions
152
+
153
+ **Functions** are composable scoring pipelines. Data in, scores out. They're recursive decision trees that can contain vector completions, nested function calls, and map operations — arbitrarily composed.
154
+
155
+ ```
156
+ Input -> [Task 1: Vector Completion] -> Score
157
+ [Task 2: Nested Function] -> Score
158
+ [Task 3: Mapped Function] -> Score
159
+ -> Weighted average -> Final score
160
+ ```
161
+
162
+ Functions are hosted as `function.json` in Git repositories. Reference them by `owner/repo`:
163
+
164
+ ```
165
+ objectiveai/sentiment-scorer
166
+ ```
167
+
168
+ Functions produce either a **scalar** (single score in [0, 1]) or a **vector** (array of scores summing to 1). A scalar function that calls a vector function that calls another scalar function — all valid, all composable.
169
+
170
+ ### Profiles
171
+
172
+ ObjectiveAI doesn't fine-tune models. It learns **weights**.
173
+
174
+ Give it a dataset of inputs with expected outputs. ObjectiveAI executes repeatedly, computes loss, and adjusts the weights across your swarm to match. The result is a **Profile** — a learned weight configuration stored as `profile.json` that makes your Function's judgments converge on ground truth.
175
+
176
+ ### The resource graph
177
+
178
+ Resources reference each other inline or remote:
179
+
180
+ ```
181
+ agent.json <- swarm.json <- profile.json function.json
182
+ (agents) (swarms+weights) (tasks + input_schema)
183
+
184
+ At execution: function.json + profile.json -> scores
185
+ ```
186
+
187
+ Function and profile are independent files — execution takes both and combines them. All remote references use `(owner, repository, commit)` triples. Pin a commit SHA for reproducibility, or omit it to resolve to latest. The retrieval system resolves the entire graph, caching and deduplicating fetches along the way.
188
+
189
+ ## Function invention
190
+
191
+ Agents can **invent** new Functions. The invention system takes a description of what you want to score, generates the input schema, designs the task tree, and produces a complete `function.json` — ready to deploy, train, and use. Recursive invention builds multi-level decision trees where each node is itself an invented Function.
192
+
193
+ An agent that can invent its own judgment criteria, train them on data, and deploy them for future use. That's the loop.
194
+
195
+ ## Concepts
196
+
197
+ | Concept | What it is |
198
+ |---------|-----------|
199
+ | **Agent** | A configured model with prompt, params, output mode. Content-addressed. `agent.json`. |
200
+ | **Swarm** | A collection of Agents with weights. `swarm.json`. |
201
+ | **Vector Completion** | Prompt + responses -> score vector that sums to 1. |
202
+ | **Function** | Recursive scoring pipeline. Data in, scores out. `function.json`. |
203
+ | **Profile** | Learned weights for a Function. Trained on data. `profile.json`. |
204
+ | **Invention** | Agent-driven creation of new Functions. |
205
+
206
+ ## Repo structure
207
+
208
+ ```
209
+ objectiveai/
210
+ ├── objectiveai-rs/ # Rust SDK (core crate: types, validation, compilation)
211
+ ├── objectiveai-api/ # API server (self-hostable)
212
+ ├── objectiveai-cli/ # CLI tool
213
+ ├── objectiveai-mcp-cli/ # MCP server exposing the CLI as an MCP tool
214
+ ├── objectiveai-mcp-filesystem/ # MCP filesystem server (Docker-injected into lab executions)
215
+ ├── objectiveai-mcp-proxy/ # MCP multiplexing proxy (sidecar of objectiveai-api)
216
+ ├── objectiveai-viewer/ # Local Tauri viewer (optional, embedded in CLI)
217
+ ├── objectiveai-web/ # Web interface (production)
218
+ ├── objectiveai-js/ # TypeScript SDK (npm: objectiveai)
219
+ ├── objectiveai-rs-wasm-js/ # WASM bindings for browser/Node.js
220
+ ├── objectiveai-py/ # Python package
221
+ ├── objectiveai-rs-pyo3/ # Rust crate behind objectiveai-py
222
+ ├── objectiveai-go/ # Go SDK
223
+ ├── objectiveai-dotnet/ # .NET SDK (in progress)
224
+ ├── objectiveai-rs-cffi/ # C FFI bindings (foundation for other langs)
225
+ └── objectiveai-json-schema/ # Generated JSON Schema files
226
+ ```
227
+
228
+ ## Related
229
+
230
+ ### [ObjectiveAI-claude-code-1](https://github.com/ObjectiveAI-claude-code-1)
231
+
232
+ An autonomous Claude Code agent that invents and publishes ObjectiveAI Functions without human intervention. Uses the Agent SDK to create, test, and deploy new scoring pipelines.
233
+
234
+ ## License
235
+
236
+ MIT
@@ -0,0 +1,9 @@
1
+ objectiveai_cocoindex/__init__.py,sha256=i0cMHEtxDWBfKn_1E1w6DChvPOF2WyKXJFqrY96TIoE,1164
2
+ objectiveai_cocoindex/_client.py,sha256=Ss86qNAN1ttaOlgLUCLxROnfK68DLIrhpIlGeQ_R5Ik,1094
3
+ objectiveai_cocoindex/_errors.py,sha256=y4dEfpHIk99KPHUH5GnYs9o18K9Rw6jUrEIwaQrbk_k,375
4
+ objectiveai_cocoindex/_function.py,sha256=O2ujm5CmK3NzCJYyGRFIRceP5aGfZ80yAwFF1J1DEyY,3442
5
+ objectiveai_cocoindex/_sources.py,sha256=D9Lo5HcBmU7pp5Q2SvA2LnfMriklrr0i47YK7a1ydZg,7374
6
+ objectiveai_cocoindex-2.0.5.dist-info/METADATA,sha256=a1P-1IRvotAOcsSlu9pjebf_2UKPfYNhFXzSDp4F58k,11007
7
+ objectiveai_cocoindex-2.0.5.dist-info/WHEEL,sha256=QccIxa26bgl1E6uMy58deGWi-0aeIkkangHcxk2kWfw,87
8
+ objectiveai_cocoindex-2.0.5.dist-info/licenses/LICENSE,sha256=QZu5oSTN6sZoulddS3v0oGpjXSpK24axI_lkmiOXR3A,1101
9
+ objectiveai_cocoindex-2.0.5.dist-info/RECORD,,
@@ -0,0 +1,4 @@
1
+ Wheel-Version: 1.0
2
+ Generator: hatchling 1.29.0
3
+ Root-Is-Purelib: true
4
+ Tag: py3-none-any
@@ -0,0 +1,21 @@
1
+ MIT License
2
+
3
+ Copyright (c) 2025-2026 Objective Artificial Intelligence, Inc.
4
+
5
+ Permission is hereby granted, free of charge, to any person obtaining a copy
6
+ of this software and associated documentation files (the "Software"), to deal
7
+ in the Software without restriction, including without limitation the rights
8
+ to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
9
+ copies of the Software, and to permit persons to whom the Software is
10
+ furnished to do so, subject to the following conditions:
11
+
12
+ The above copyright notice and this permission notice shall be included in all
13
+ copies or substantial portions of the Software.
14
+
15
+ THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16
+ IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17
+ FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
18
+ AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19
+ LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
20
+ OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
21
+ SOFTWARE.