@rcrsr/rill-ext-openai 0.16.0 → 0.18.1

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
package/README.md CHANGED
@@ -1,8 +1,6 @@
1
1
  # @rcrsr/rill-ext-openai
2
2
 
3
- [rill](https://rill.run) extension for [OpenAI](https://platform.openai.com/docs) API integration. Provides `message`, `messages`, `embed`, `embed_batch`, `tool_loop`, and `generate` host functions. Compatible with any OpenAI-compatible server (LM Studio, Ollama, vLLM).
4
-
5
- > **Experimental.** Breaking changes will occur before stabilization.
3
+ [rill](https://rill.run) extension for [OpenAI](https://platform.openai.com/docs) API integration. Provides `message`, `messages`, `embed`, `embed_batch`, `tool_loop`, and `generate` host functions. Compatible with OpenAI-compatible servers (LM Studio, Ollama, vLLM).
6
4
 
7
5
  ## Install
8
6
 
@@ -10,42 +8,54 @@
10
8
  npm install @rcrsr/rill-ext-openai
11
9
  ```
12
10
 
13
- **Peer dependencies:** `@rcrsr/rill`
14
-
15
11
  ## Quick Start
16
12
 
17
- ```typescript
18
- import { parse, execute, createRuntimeContext, prefixFunctions } from '@rcrsr/rill';
19
- import { createOpenAIExtension } from '@rcrsr/rill-ext-openai';
13
+ **rill-config.json**
14
+
15
+ ```json
16
+ {
17
+ "main": "hello.rill",
18
+ "extensions": {
19
+ "mounts": {
20
+ "llm": "@rcrsr/rill-ext-openai"
21
+ },
22
+ "config": {
23
+ "llm": {
24
+ "api_key": "${OPENAI_API_KEY}",
25
+ "model": "gpt-4o"
26
+ }
27
+ }
28
+ }
29
+ }
30
+ ```
31
+
32
+ **hello.rill**
33
+
34
+ ```rill
35
+ use<ext:llm> => $llm
20
36
 
21
- const ext = createOpenAIExtension({
22
- api_key: process.env.OPENAI_API_KEY!,
23
- model: 'gpt-4o',
24
- });
25
- const prefixed = prefixFunctions('openai', ext);
26
- const { dispose, ...functions } = prefixed;
37
+ $llm.message("Explain TCP handshakes") -> each { log }
38
+ ```
27
39
 
28
- const ctx = createRuntimeContext({
29
- functions,
30
- callbacks: { onLog: (v) => console.log(v) },
31
- });
40
+ ```bash
41
+ rill-run
42
+ ```
32
43
 
33
- const script = `openai::message("Explain TCP handshakes")`;
34
- const result = await execute(parse(script), ctx);
44
+ For local models, set `base_url` to point at the compatible server:
35
45
 
36
- dispose?.();
46
+ ```json
47
+ {
48
+ "llm": {
49
+ "base_url": "http://localhost:1234/v1",
50
+ "model": "llama3"
51
+ }
52
+ }
37
53
  ```
38
54
 
39
55
  ## Documentation
40
56
 
41
57
  See [full documentation](docs/extension-llm-openai.md) for configuration, functions, error handling, events, and examples.
42
58
 
43
- ## Related
44
-
45
- - [rill](https://github.com/rcrsr/rill) — Core language runtime
46
- - [Extensions Guide](https://github.com/rcrsr/rill/blob/main/docs/integration-extensions.md) — Extension contract and patterns
47
- - [Host API Reference](https://github.com/rcrsr/rill/blob/main/docs/ref-host-api.md) — Runtime context and host functions
48
-
49
59
  ## License
50
60
 
51
61
  MIT
package/dist/index.d.ts CHANGED
@@ -1,6 +1,6 @@
1
1
  // Generated by dts-bundle-generator v9.5.1
2
2
 
3
- import { ExtensionConfigSchema, ExtensionManifest, ExtensionResult } from '@rcrsr/rill';
3
+ import { ExtensionConfigSchema, ExtensionFactoryResult, ExtensionManifest } from '@rcrsr/rill';
4
4
 
5
5
  /**
6
6
  * Base configuration for LLM extensions
@@ -72,7 +72,7 @@ export type OpenAIExtensionConfig = LLMProviderConfig;
72
72
  * await ext.dispose();
73
73
  * ```
74
74
  */
75
- export declare function createOpenAIExtension(config: OpenAIExtensionConfig): ExtensionResult;
75
+ export declare function createOpenAIExtension(config: OpenAIExtensionConfig): ExtensionFactoryResult;
76
76
  export declare const VERSION: string;
77
77
  export declare const configSchema: ExtensionConfigSchema;
78
78
  export declare const extensionManifest: ExtensionManifest;