@semiont/inference 0.2.30 → 0.2.31
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/README.md +171 -142
- package/dist/index.d.ts +50 -190
- package/dist/index.js +113 -818
- package/dist/index.js.map +1 -1
- package/package.json +4 -2
package/README.md
CHANGED
|
@@ -1,14 +1,27 @@
|
|
|
1
1
|
# @semiont/inference
|
|
2
2
|
|
|
3
|
-
[](https://www.npmjs.com/package/@semiont/inference)
|
|
4
3
|
[](https://github.com/The-AI-Alliance/semiont/actions/workflows/package-tests.yml?query=branch%3Amain+is%3Asuccess+job%3A%22Test+inference%22)
|
|
4
|
+
[](https://codecov.io/gh/The-AI-Alliance/semiont?flag=inference)
|
|
5
|
+
[](https://www.npmjs.com/package/@semiont/inference)
|
|
6
|
+
[](https://www.npmjs.com/package/@semiont/inference)
|
|
7
|
+
[](https://github.com/The-AI-Alliance/semiont/blob/main/LICENSE)
|
|
5
8
|
|
|
6
|
-
AI
|
|
9
|
+
**AI primitives for text generation and client management.**
|
|
10
|
+
|
|
11
|
+
This package provides the **core AI primitives** for the Semiont platform:
|
|
12
|
+
- Anthropic client singleton management
|
|
13
|
+
- Simple text generation interface
|
|
14
|
+
- Environment variable expansion for API keys
|
|
15
|
+
- Provider abstraction for future extensibility
|
|
16
|
+
|
|
17
|
+
For **application-specific AI logic** (entity extraction, resource generation, motivation prompts/parsers), see [@semiont/make-meaning](../make-meaning/).
|
|
7
18
|
|
|
8
19
|
## Philosophy
|
|
9
20
|
|
|
10
21
|
This package is named `inference` rather than `ai-inference` to align with Semiont's core tenet: humans and AI agents have equal opportunity to work behind similar interfaces. The abstraction remains open for future human-agent parity.
|
|
11
22
|
|
|
23
|
+
**Package Responsibility**: AI primitives only. No application logic, no prompt engineering, no response parsing. Those belong in `@semiont/make-meaning`.
|
|
24
|
+
|
|
12
25
|
## Installation
|
|
13
26
|
|
|
14
27
|
```bash
|
|
@@ -18,7 +31,7 @@ npm install @semiont/inference
|
|
|
18
31
|
## Quick Start
|
|
19
32
|
|
|
20
33
|
```typescript
|
|
21
|
-
import {
|
|
34
|
+
import { generateText, getInferenceClient, getInferenceModel } from '@semiont/inference';
|
|
22
35
|
import type { EnvironmentConfig } from '@semiont/core';
|
|
23
36
|
|
|
24
37
|
const config: EnvironmentConfig = {
|
|
@@ -31,83 +44,24 @@ const config: EnvironmentConfig = {
|
|
|
31
44
|
}
|
|
32
45
|
};
|
|
33
46
|
|
|
34
|
-
//
|
|
35
|
-
const entities = await extractEntities(
|
|
36
|
-
'Paris is the capital of France.',
|
|
37
|
-
['Location'],
|
|
38
|
-
config
|
|
39
|
-
);
|
|
40
|
-
|
|
41
|
-
// Generate text
|
|
47
|
+
// Generate text using the primitive
|
|
42
48
|
const text = await generateText(
|
|
43
49
|
'Explain quantum computing in simple terms',
|
|
44
|
-
config
|
|
50
|
+
config,
|
|
51
|
+
500, // maxTokens
|
|
52
|
+
0.7 // temperature
|
|
45
53
|
);
|
|
46
|
-
```
|
|
47
|
-
|
|
48
|
-
## API Reference
|
|
49
|
-
|
|
50
|
-
From [src/index.ts](src/index.ts):
|
|
51
|
-
|
|
52
|
-
### Entity Extraction
|
|
53
|
-
|
|
54
|
-
**`extractEntities(text, entityTypes, config, includeDescriptiveReferences?)`**
|
|
55
54
|
|
|
56
|
-
|
|
57
|
-
|
|
58
|
-
**Parameters:**
|
|
59
|
-
- `text: string` - Text to analyze
|
|
60
|
-
- `entityTypes: string[] | { type: string; examples?: string[] }[]` - Entity types to detect
|
|
61
|
-
- `config: EnvironmentConfig` - Configuration
|
|
62
|
-
- `includeDescriptiveReferences?: boolean` - Include anaphoric/cataphoric references (default: false)
|
|
63
|
-
|
|
64
|
-
**Returns:** `Promise<ExtractedEntity[]>`
|
|
65
|
-
|
|
66
|
-
```typescript
|
|
67
|
-
interface ExtractedEntity {
|
|
68
|
-
exact: string; // Actual text span from input
|
|
69
|
-
entityType: string; // Detected entity type
|
|
70
|
-
startOffset: number; // Character position where entity starts (0-indexed)
|
|
71
|
-
endOffset: number; // Character position where entity ends
|
|
72
|
-
prefix?: string; // Up to 32 chars before entity (for disambiguation)
|
|
73
|
-
suffix?: string; // Up to 32 chars after entity (for disambiguation)
|
|
74
|
-
}
|
|
55
|
+
console.log(text);
|
|
75
56
|
```
|
|
76
57
|
|
|
77
|
-
|
|
78
|
-
|
|
79
|
-
From [src/entity-extractor.ts:101-102](src/entity-extractor.ts):
|
|
80
|
-
- Uses 4000 max_tokens to handle many entities without truncation
|
|
81
|
-
- Uses temperature 0.3 for consistent extraction
|
|
82
|
-
|
|
83
|
-
From [src/entity-extractor.ts:131-135](src/entity-extractor.ts):
|
|
84
|
-
- Throws error if response is truncated (stop_reason === 'max_tokens')
|
|
85
|
-
- Validates all character offsets after AI response
|
|
86
|
-
|
|
87
|
-
From [src/entity-extractor.ts:147-199](src/entity-extractor.ts):
|
|
88
|
-
- Corrects misaligned offsets using prefix/suffix context matching
|
|
89
|
-
- Filters invalid entities (negative offsets, out-of-bounds, mismatches)
|
|
90
|
-
|
|
91
|
-
**Anaphoric/Cataphoric Reference Support:**
|
|
92
|
-
|
|
93
|
-
From [src/entity-extractor.ts:48-75](src/entity-extractor.ts):
|
|
94
|
-
|
|
95
|
-
When `includeDescriptiveReferences` is true, includes:
|
|
96
|
-
- Direct mentions (names, proper nouns)
|
|
97
|
-
- Definite descriptions: "the Nobel laureate", "the tech giant"
|
|
98
|
-
- Role-based references: "the CEO", "the physicist"
|
|
99
|
-
- Epithets with context: "the Cupertino-based company"
|
|
100
|
-
|
|
101
|
-
Excludes:
|
|
102
|
-
- Simple pronouns: he, she, it, they
|
|
103
|
-
- Generic determiners: this, that, these, those
|
|
104
|
-
- Possessives without substance: his, her, their
|
|
58
|
+
## API Reference
|
|
105
59
|
|
|
106
|
-
###
|
|
60
|
+
### Core Primitives
|
|
107
61
|
|
|
108
|
-
**`generateText(prompt, config, maxTokens?, temperature?)
|
|
62
|
+
**`generateText(prompt, config, maxTokens?, temperature?): Promise<string>`**
|
|
109
63
|
|
|
110
|
-
Simple text generation
|
|
64
|
+
Simple text generation primitive.
|
|
111
65
|
|
|
112
66
|
**Parameters:**
|
|
113
67
|
- `prompt: string` - The prompt
|
|
@@ -115,95 +69,65 @@ Simple text generation with configurable parameters.
|
|
|
115
69
|
- `maxTokens?: number` - Maximum tokens (default: 500)
|
|
116
70
|
- `temperature?: number` - Sampling temperature (default: 0.7)
|
|
117
71
|
|
|
118
|
-
**Returns:** `Promise<string>`
|
|
72
|
+
**Returns:** `Promise<string>` - Generated text
|
|
119
73
|
|
|
120
|
-
|
|
74
|
+
**Implementation** ([src/factory.ts:68-102](src/factory.ts#L68-L102)):
|
|
121
75
|
- Uses Anthropic Messages API
|
|
122
76
|
- Extracts text content from first text block in response
|
|
123
77
|
- Throws error if no text content in response
|
|
124
78
|
|
|
125
|
-
|
|
126
|
-
|
|
127
|
-
|
|
128
|
-
|
|
129
|
-
|
|
130
|
-
|
|
131
|
-
|
|
132
|
-
|
|
133
|
-
|
|
134
|
-
- `locale?: string` - Language locale (e.g., 'es', 'fr')
|
|
135
|
-
- `context?: GenerationContext` - Source document context
|
|
136
|
-
- `temperature?: number` - Sampling temperature (default: 0.7)
|
|
137
|
-
- `maxTokens?: number` - Maximum tokens (default: 500)
|
|
138
|
-
|
|
139
|
-
**Returns:** `Promise<{ title: string; content: string }>`
|
|
140
|
-
|
|
141
|
-
From [src/factory.ts:186-189](src/factory.ts):
|
|
142
|
-
- Returns topic as title (not extracted from generated content)
|
|
143
|
-
- Returns generated markdown as content
|
|
144
|
-
|
|
145
|
-
From [src/factory.ts:136-138](src/factory.ts):
|
|
146
|
-
- Supports non-English languages using locale parameter
|
|
147
|
-
- Converts locale to language name (e.g., 'es' → 'Spanish')
|
|
148
|
-
|
|
149
|
-
From [src/factory.ts:166-182](src/factory.ts):
|
|
150
|
-
- Automatically strips markdown code fences from response if present
|
|
151
|
-
- Handles ```markdown, ```md, and ``` formats
|
|
152
|
-
|
|
153
|
-
**`generateResourceSummary(resourceName, content, entityTypes, config)`**
|
|
154
|
-
|
|
155
|
-
Generate a 2-3 sentence summary of a resource.
|
|
156
|
-
|
|
157
|
-
**Parameters:**
|
|
158
|
-
- `resourceName: string` - Name of the resource
|
|
159
|
-
- `content: string` - Content to summarize (truncated to 2000 chars)
|
|
160
|
-
- `entityTypes: string[]` - Entity types mentioned
|
|
161
|
-
- `config: EnvironmentConfig` - Configuration
|
|
162
|
-
|
|
163
|
-
**Returns:** `Promise<string>`
|
|
164
|
-
|
|
165
|
-
From [src/factory.ts:216-219](src/factory.ts):
|
|
166
|
-
- Truncates content to first 2000 characters to stay within limits
|
|
167
|
-
- Uses temperature 0.7, max_tokens 150
|
|
79
|
+
**Example:**
|
|
80
|
+
```typescript
|
|
81
|
+
const result = await generateText(
|
|
82
|
+
'Write a haiku about programming',
|
|
83
|
+
config,
|
|
84
|
+
100,
|
|
85
|
+
0.8
|
|
86
|
+
);
|
|
87
|
+
```
|
|
168
88
|
|
|
169
|
-
**`
|
|
89
|
+
**`getInferenceClient(config): Promise<Anthropic>`**
|
|
170
90
|
|
|
171
|
-
|
|
91
|
+
Get the singleton Anthropic client instance.
|
|
172
92
|
|
|
173
93
|
**Parameters:**
|
|
174
|
-
- `referenceTitle: string` - Title of the reference
|
|
175
94
|
- `config: EnvironmentConfig` - Configuration
|
|
176
|
-
- `entityType?: string` - Optional entity type
|
|
177
|
-
- `currentContent?: string` - Optional current content for context
|
|
178
|
-
|
|
179
|
-
**Returns:** `Promise<string[] | null>`
|
|
180
|
-
|
|
181
|
-
From [src/factory.ts:246-249](src/factory.ts):
|
|
182
|
-
- Returns array of 3 suggestions or null on parse error
|
|
183
|
-
- Uses temperature 0.8 for creative suggestions
|
|
184
|
-
|
|
185
|
-
### Client Factory
|
|
186
95
|
|
|
187
|
-
|
|
96
|
+
**Returns:** `Promise<Anthropic>` - Anthropic client
|
|
188
97
|
|
|
189
|
-
|
|
190
|
-
|
|
191
|
-
**Returns:** `Promise<Anthropic>`
|
|
192
|
-
|
|
193
|
-
From [src/factory.ts:10-51](src/factory.ts):
|
|
98
|
+
**Implementation** ([src/factory.ts:17-52](src/factory.ts#L17-L52)):
|
|
194
99
|
- Singleton pattern - creates client once, caches for reuse
|
|
195
100
|
- Supports environment variable expansion in API keys (e.g., '${ANTHROPIC_API_KEY}')
|
|
196
101
|
- Configurable baseURL with fallback to https://api.anthropic.com
|
|
197
102
|
|
|
198
|
-
|
|
103
|
+
**Example:**
|
|
104
|
+
```typescript
|
|
105
|
+
const client = await getInferenceClient(config);
|
|
106
|
+
const response = await client.messages.create({
|
|
107
|
+
model: 'claude-3-5-sonnet-20241022',
|
|
108
|
+
max_tokens: 100,
|
|
109
|
+
messages: [{ role: 'user', content: 'Hello' }]
|
|
110
|
+
});
|
|
111
|
+
```
|
|
112
|
+
|
|
113
|
+
**`getInferenceModel(config): string`**
|
|
199
114
|
|
|
200
115
|
Get the configured model name.
|
|
201
116
|
|
|
202
|
-
**
|
|
117
|
+
**Parameters:**
|
|
118
|
+
- `config: EnvironmentConfig` - Configuration
|
|
119
|
+
|
|
120
|
+
**Returns:** `string` - Model name (e.g., 'claude-3-5-sonnet-20241022')
|
|
121
|
+
|
|
122
|
+
**Example:**
|
|
123
|
+
```typescript
|
|
124
|
+
const model = getInferenceModel(config);
|
|
125
|
+
console.log(`Using model: ${model}`);
|
|
126
|
+
```
|
|
203
127
|
|
|
204
128
|
## Configuration
|
|
205
129
|
|
|
206
|
-
From [src/factory.ts:22-48](src/factory.ts):
|
|
130
|
+
From [src/factory.ts:22-48](src/factory.ts#L22-L48):
|
|
207
131
|
|
|
208
132
|
```typescript
|
|
209
133
|
config.services.inference = {
|
|
@@ -217,7 +141,7 @@ config.services.inference = {
|
|
|
217
141
|
|
|
218
142
|
### Environment Variable Expansion
|
|
219
143
|
|
|
220
|
-
From [src/factory.ts:27-36](src/factory.ts):
|
|
144
|
+
From [src/factory.ts:27-36](src/factory.ts#L27-L36):
|
|
221
145
|
|
|
222
146
|
API keys support ${VAR_NAME} syntax:
|
|
223
147
|
|
|
@@ -227,17 +151,122 @@ config.services.inference = {
|
|
|
227
151
|
}
|
|
228
152
|
```
|
|
229
153
|
|
|
230
|
-
Pattern
|
|
231
|
-
Throws error if environment variable is not set
|
|
154
|
+
**Pattern:** starts with '${' and ends with '}'
|
|
155
|
+
**Behavior:** Throws error if environment variable is not set
|
|
156
|
+
|
|
157
|
+
## Application-Specific AI Logic
|
|
158
|
+
|
|
159
|
+
This package provides **primitives only**. For application-specific features, use [@semiont/make-meaning](../make-meaning/):
|
|
160
|
+
|
|
161
|
+
**Entity Extraction:**
|
|
162
|
+
```typescript
|
|
163
|
+
import { extractEntities } from '@semiont/make-meaning';
|
|
164
|
+
|
|
165
|
+
const entities = await extractEntities(
|
|
166
|
+
'Marie Curie worked at the University of Paris.',
|
|
167
|
+
['Person', 'Organization'],
|
|
168
|
+
config
|
|
169
|
+
);
|
|
170
|
+
```
|
|
171
|
+
|
|
172
|
+
**Resource Generation:**
|
|
173
|
+
```typescript
|
|
174
|
+
import { generateResourceFromTopic } from '@semiont/make-meaning';
|
|
175
|
+
|
|
176
|
+
const { title, content } = await generateResourceFromTopic(
|
|
177
|
+
'Quantum Computing',
|
|
178
|
+
['Technology', 'Physics'],
|
|
179
|
+
config
|
|
180
|
+
);
|
|
181
|
+
```
|
|
182
|
+
|
|
183
|
+
**Motivation Prompts & Parsers:**
|
|
184
|
+
```typescript
|
|
185
|
+
import { MotivationPrompts, MotivationParsers } from '@semiont/make-meaning';
|
|
186
|
+
|
|
187
|
+
// Build prompt for comment detection
|
|
188
|
+
const prompt = MotivationPrompts.buildCommentPrompt(content, instructions);
|
|
189
|
+
|
|
190
|
+
// Call generateText from @semiont/inference
|
|
191
|
+
const response = await generateText(prompt, config);
|
|
192
|
+
|
|
193
|
+
// Parse response
|
|
194
|
+
const comments = MotivationParsers.parseComments(response, content);
|
|
195
|
+
```
|
|
196
|
+
|
|
197
|
+
**Orchestrated Detection:**
|
|
198
|
+
```typescript
|
|
199
|
+
import { AnnotationDetection } from '@semiont/make-meaning';
|
|
200
|
+
|
|
201
|
+
const comments = await AnnotationDetection.detectComments(resourceId, config);
|
|
202
|
+
const highlights = await AnnotationDetection.detectHighlights(resourceId, config);
|
|
203
|
+
```
|
|
204
|
+
|
|
205
|
+
## Architecture
|
|
206
|
+
|
|
207
|
+
```
|
|
208
|
+
┌─────────────────────────────────────────────┐
|
|
209
|
+
│ @semiont/make-meaning │
|
|
210
|
+
│ (Application-specific AI logic) │
|
|
211
|
+
│ - Entity extraction with validation │
|
|
212
|
+
│ - Resource generation with templates │
|
|
213
|
+
│ - Motivation prompts (comment/highlight) │
|
|
214
|
+
│ - Response parsers with offset correction │
|
|
215
|
+
│ - Orchestrated detection pipelines │
|
|
216
|
+
└──────────────────┬──────────────────────────┘
|
|
217
|
+
│ uses
|
|
218
|
+
┌──────────────────▼──────────────────────────┐
|
|
219
|
+
│ @semiont/inference │
|
|
220
|
+
│ (AI primitives only) │
|
|
221
|
+
│ - getInferenceClient() │
|
|
222
|
+
│ - getInferenceModel() │
|
|
223
|
+
│ - generateText() │
|
|
224
|
+
└──────────────────┬──────────────────────────┘
|
|
225
|
+
│ uses
|
|
226
|
+
┌──────────────────▼──────────────────────────┐
|
|
227
|
+
│ @anthropic-ai/sdk │
|
|
228
|
+
│ (Anthropic Messages API) │
|
|
229
|
+
└─────────────────────────────────────────────┘
|
|
230
|
+
```
|
|
231
|
+
|
|
232
|
+
**Key Principles:**
|
|
233
|
+
- **@semiont/inference**: Provider abstraction, client management, core text generation
|
|
234
|
+
- **@semiont/make-meaning**: Semantic processing, prompt engineering, response parsing
|
|
235
|
+
- **Clean separation**: Adding OpenAI support only affects @semiont/inference
|
|
236
|
+
|
|
237
|
+
## Provider Extensibility
|
|
238
|
+
|
|
239
|
+
The package is designed for future provider support:
|
|
240
|
+
|
|
241
|
+
1. Update `getInferenceClient()` to support `config.services.inference.type`
|
|
242
|
+
2. Add provider-specific client initialization
|
|
243
|
+
3. Update `generateText()` to handle different API formats
|
|
244
|
+
4. Application code in `@semiont/make-meaning` remains unchanged
|
|
245
|
+
|
|
246
|
+
**Current Support:** Anthropic (Claude) via `@anthropic-ai/sdk`
|
|
247
|
+
**Future:** OpenAI, Google Vertex AI, local models, etc.
|
|
232
248
|
|
|
233
249
|
## Dependencies
|
|
234
250
|
|
|
235
251
|
From [package.json](package.json):
|
|
236
252
|
|
|
237
253
|
- `@anthropic-ai/sdk` ^0.63.0 - Anthropic API client
|
|
238
|
-
- `@semiont/api-client` * - Types and utilities
|
|
239
254
|
- `@semiont/core` * - Environment configuration
|
|
240
255
|
|
|
256
|
+
**Note:** No dependency on `@semiont/api-client` - primitives have minimal dependencies
|
|
257
|
+
|
|
258
|
+
## Testing
|
|
259
|
+
|
|
260
|
+
```bash
|
|
261
|
+
npm test # Run tests
|
|
262
|
+
npm run test:watch # Watch mode
|
|
263
|
+
npm run test:coverage # Coverage report
|
|
264
|
+
```
|
|
265
|
+
|
|
266
|
+
## Examples
|
|
267
|
+
|
|
268
|
+
See [examples/basic.ts](examples/basic.ts) for usage examples.
|
|
269
|
+
|
|
241
270
|
## License
|
|
242
271
|
|
|
243
272
|
Apache-2.0
|
package/dist/index.d.ts
CHANGED
|
@@ -1,205 +1,65 @@
|
|
|
1
|
-
import Anthropic from '@anthropic-ai/sdk';
|
|
2
|
-
import { GenerationContext } from '@semiont/api-client';
|
|
3
1
|
import { EnvironmentConfig } from '@semiont/core';
|
|
4
2
|
|
|
5
|
-
|
|
6
|
-
|
|
7
|
-
|
|
8
|
-
*/
|
|
9
|
-
declare function getInferenceClient(config: EnvironmentConfig): Promise<Anthropic>;
|
|
10
|
-
/**
|
|
11
|
-
* Get the configured model name
|
|
12
|
-
*/
|
|
13
|
-
declare function getInferenceModel(config: EnvironmentConfig): string;
|
|
14
|
-
/**
|
|
15
|
-
* Helper function to make a simple inference call
|
|
16
|
-
*/
|
|
17
|
-
declare function generateText(prompt: string, config: EnvironmentConfig, maxTokens?: number, temperature?: number): Promise<string>;
|
|
18
|
-
/**
|
|
19
|
-
* Generate resource content using inference
|
|
20
|
-
*/
|
|
21
|
-
declare function generateResourceFromTopic(topic: string, entityTypes: string[], config: EnvironmentConfig, userPrompt?: string, locale?: string, context?: GenerationContext, temperature?: number, maxTokens?: number): Promise<{
|
|
22
|
-
title: string;
|
|
23
|
-
content: string;
|
|
24
|
-
}>;
|
|
25
|
-
/**
|
|
26
|
-
* Generate an intelligent summary for a resource
|
|
27
|
-
*/
|
|
28
|
-
declare function generateResourceSummary(resourceName: string, content: string, entityTypes: string[], config: EnvironmentConfig): Promise<string>;
|
|
29
|
-
/**
|
|
30
|
-
* Generate smart suggestions for a reference
|
|
31
|
-
*/
|
|
32
|
-
declare function generateReferenceSuggestions(referenceTitle: string, config: EnvironmentConfig, entityType?: string, currentContent?: string): Promise<string[] | null>;
|
|
33
|
-
|
|
34
|
-
/**
|
|
35
|
-
* Entity reference extracted from text
|
|
36
|
-
*/
|
|
37
|
-
interface ExtractedEntity {
|
|
38
|
-
exact: string;
|
|
39
|
-
entityType: string;
|
|
40
|
-
startOffset: number;
|
|
41
|
-
endOffset: number;
|
|
42
|
-
prefix?: string;
|
|
43
|
-
suffix?: string;
|
|
3
|
+
interface InferenceResponse {
|
|
4
|
+
text: string;
|
|
5
|
+
stopReason: 'end_turn' | 'max_tokens' | 'stop_sequence' | string;
|
|
44
6
|
}
|
|
45
|
-
|
|
46
|
-
* Extract entity references from text using AI
|
|
47
|
-
*
|
|
48
|
-
* @param text - The text to analyze
|
|
49
|
-
* @param entityTypes - Array of entity types to detect (optionally with examples)
|
|
50
|
-
* @param config - Application configuration
|
|
51
|
-
* @param includeDescriptiveReferences - Include anaphoric/cataphoric references (default: false)
|
|
52
|
-
* @returns Array of extracted entities with their character offsets
|
|
53
|
-
*/
|
|
54
|
-
declare function extractEntities(exact: string, entityTypes: string[] | {
|
|
55
|
-
type: string;
|
|
56
|
-
examples?: string[];
|
|
57
|
-
}[], config: EnvironmentConfig, includeDescriptiveReferences?: boolean): Promise<ExtractedEntity[]>;
|
|
58
|
-
|
|
59
|
-
/**
|
|
60
|
-
* Prompt builders for annotation detection motivations
|
|
61
|
-
*
|
|
62
|
-
* Provides static methods to build AI prompts for each Web Annotation motivation type.
|
|
63
|
-
* Extracted from worker implementations to centralize prompt logic.
|
|
64
|
-
*/
|
|
65
|
-
declare class MotivationPrompts {
|
|
66
|
-
/**
|
|
67
|
-
* Build a prompt for detecting comment-worthy passages
|
|
68
|
-
*
|
|
69
|
-
* @param content - The text content to analyze (will be truncated to 8000 chars)
|
|
70
|
-
* @param instructions - Optional user-provided instructions
|
|
71
|
-
* @param tone - Optional tone guidance (e.g., "academic", "conversational")
|
|
72
|
-
* @param density - Optional target number of comments per 2000 words
|
|
73
|
-
* @returns Formatted prompt string
|
|
74
|
-
*/
|
|
75
|
-
static buildCommentPrompt(content: string, instructions?: string, tone?: string, density?: number): string;
|
|
76
|
-
/**
|
|
77
|
-
* Build a prompt for detecting highlight-worthy passages
|
|
78
|
-
*
|
|
79
|
-
* @param content - The text content to analyze (will be truncated to 8000 chars)
|
|
80
|
-
* @param instructions - Optional user-provided instructions
|
|
81
|
-
* @param density - Optional target number of highlights per 2000 words
|
|
82
|
-
* @returns Formatted prompt string
|
|
83
|
-
*/
|
|
84
|
-
static buildHighlightPrompt(content: string, instructions?: string, density?: number): string;
|
|
7
|
+
interface InferenceClient {
|
|
85
8
|
/**
|
|
86
|
-
*
|
|
87
|
-
*
|
|
88
|
-
* @param
|
|
89
|
-
* @param
|
|
90
|
-
* @
|
|
91
|
-
* @param density - Optional target number of assessments per 2000 words
|
|
92
|
-
* @returns Formatted prompt string
|
|
9
|
+
* Generate text from a prompt (simple interface)
|
|
10
|
+
* @param prompt - The input prompt
|
|
11
|
+
* @param maxTokens - Maximum tokens to generate
|
|
12
|
+
* @param temperature - Sampling temperature (0-1)
|
|
13
|
+
* @returns Generated text
|
|
93
14
|
*/
|
|
94
|
-
|
|
15
|
+
generateText(prompt: string, maxTokens: number, temperature: number): Promise<string>;
|
|
95
16
|
/**
|
|
96
|
-
*
|
|
97
|
-
*
|
|
98
|
-
* @param
|
|
99
|
-
* @param
|
|
100
|
-
* @
|
|
101
|
-
* @param schemaDescription - Schema description
|
|
102
|
-
* @param schemaDomain - Schema domain
|
|
103
|
-
* @param categoryDescription - Category description
|
|
104
|
-
* @param categoryExamples - Example questions/guidance for this category
|
|
105
|
-
* @returns Formatted prompt string
|
|
17
|
+
* Generate text with detailed response information
|
|
18
|
+
* @param prompt - The input prompt
|
|
19
|
+
* @param maxTokens - Maximum tokens to generate
|
|
20
|
+
* @param temperature - Sampling temperature (0-1)
|
|
21
|
+
* @returns Response with text and metadata
|
|
106
22
|
*/
|
|
107
|
-
|
|
23
|
+
generateTextWithMetadata(prompt: string, maxTokens: number, temperature: number): Promise<InferenceResponse>;
|
|
108
24
|
}
|
|
109
25
|
|
|
110
|
-
|
|
111
|
-
|
|
112
|
-
|
|
113
|
-
|
|
114
|
-
|
|
115
|
-
|
|
116
|
-
|
|
117
|
-
/**
|
|
118
|
-
* Represents a detected comment with validated position
|
|
119
|
-
*/
|
|
120
|
-
interface CommentMatch {
|
|
121
|
-
exact: string;
|
|
122
|
-
start: number;
|
|
123
|
-
end: number;
|
|
124
|
-
prefix?: string;
|
|
125
|
-
suffix?: string;
|
|
126
|
-
comment: string;
|
|
26
|
+
type InferenceClientType = 'anthropic';
|
|
27
|
+
interface InferenceClientConfig {
|
|
28
|
+
type: InferenceClientType;
|
|
29
|
+
apiKey?: string;
|
|
30
|
+
model: string;
|
|
31
|
+
endpoint?: string;
|
|
32
|
+
baseURL?: string;
|
|
127
33
|
}
|
|
34
|
+
declare function createInferenceClient(config: InferenceClientConfig): InferenceClient;
|
|
35
|
+
declare function getInferenceClient(config: EnvironmentConfig): Promise<InferenceClient>;
|
|
128
36
|
/**
|
|
129
|
-
*
|
|
130
|
-
*/
|
|
131
|
-
interface HighlightMatch {
|
|
132
|
-
exact: string;
|
|
133
|
-
start: number;
|
|
134
|
-
end: number;
|
|
135
|
-
prefix?: string;
|
|
136
|
-
suffix?: string;
|
|
137
|
-
}
|
|
138
|
-
/**
|
|
139
|
-
* Represents a detected assessment with validated position
|
|
140
|
-
*/
|
|
141
|
-
interface AssessmentMatch {
|
|
142
|
-
exact: string;
|
|
143
|
-
start: number;
|
|
144
|
-
end: number;
|
|
145
|
-
prefix?: string;
|
|
146
|
-
suffix?: string;
|
|
147
|
-
assessment: string;
|
|
148
|
-
}
|
|
149
|
-
/**
|
|
150
|
-
* Represents a detected tag with validated position
|
|
37
|
+
* Get the configured model name
|
|
151
38
|
*/
|
|
152
|
-
|
|
153
|
-
|
|
154
|
-
|
|
155
|
-
|
|
156
|
-
|
|
157
|
-
|
|
158
|
-
|
|
39
|
+
declare function getInferenceModel(config: EnvironmentConfig): string;
|
|
40
|
+
|
|
41
|
+
declare class AnthropicInferenceClient implements InferenceClient {
|
|
42
|
+
private client;
|
|
43
|
+
private model;
|
|
44
|
+
constructor(apiKey: string, model: string, baseURL?: string);
|
|
45
|
+
generateText(prompt: string, maxTokens: number, temperature: number): Promise<string>;
|
|
46
|
+
generateTextWithMetadata(prompt: string, maxTokens: number, temperature: number): Promise<InferenceResponse>;
|
|
159
47
|
}
|
|
160
|
-
|
|
161
|
-
|
|
162
|
-
|
|
163
|
-
|
|
164
|
-
|
|
165
|
-
|
|
166
|
-
|
|
167
|
-
|
|
168
|
-
|
|
169
|
-
|
|
170
|
-
|
|
171
|
-
|
|
172
|
-
|
|
173
|
-
|
|
174
|
-
|
|
175
|
-
*/
|
|
176
|
-
static parseHighlights(response: string, content: string): HighlightMatch[];
|
|
177
|
-
/**
|
|
178
|
-
* Parse and validate AI response for assessment detection
|
|
179
|
-
*
|
|
180
|
-
* @param response - Raw AI response string (may include markdown code fences)
|
|
181
|
-
* @param content - Original content to validate offsets against
|
|
182
|
-
* @returns Array of validated assessment matches
|
|
183
|
-
*/
|
|
184
|
-
static parseAssessments(response: string, content: string): AssessmentMatch[];
|
|
185
|
-
/**
|
|
186
|
-
* Parse and validate AI response for tag detection
|
|
187
|
-
* Note: Does NOT validate offsets - caller must do that with content
|
|
188
|
-
*
|
|
189
|
-
* @param response - Raw AI response string (may include markdown code fences)
|
|
190
|
-
* @returns Array of tag matches (offsets not yet validated)
|
|
191
|
-
*/
|
|
192
|
-
static parseTags(response: string): Omit<TagMatch, 'category'>[];
|
|
193
|
-
/**
|
|
194
|
-
* Validate tag offsets against content and add category
|
|
195
|
-
* Helper for tag detection after initial parsing
|
|
196
|
-
*
|
|
197
|
-
* @param tags - Parsed tags without validated offsets
|
|
198
|
-
* @param content - Original content to validate against
|
|
199
|
-
* @param category - Category to assign to validated tags
|
|
200
|
-
* @returns Array of validated tag matches
|
|
201
|
-
*/
|
|
202
|
-
static validateTagOffsets(tags: Omit<TagMatch, 'category'>[], content: string, category: string): TagMatch[];
|
|
48
|
+
|
|
49
|
+
declare class MockInferenceClient implements InferenceClient {
|
|
50
|
+
private responses;
|
|
51
|
+
private responseIndex;
|
|
52
|
+
private stopReasons;
|
|
53
|
+
calls: Array<{
|
|
54
|
+
prompt: string;
|
|
55
|
+
maxTokens: number;
|
|
56
|
+
temperature: number;
|
|
57
|
+
}>;
|
|
58
|
+
constructor(responses?: string[], stopReasons?: string[]);
|
|
59
|
+
generateText(prompt: string, maxTokens: number, temperature: number): Promise<string>;
|
|
60
|
+
generateTextWithMetadata(prompt: string, maxTokens: number, temperature: number): Promise<InferenceResponse>;
|
|
61
|
+
reset(): void;
|
|
62
|
+
setResponses(responses: string[], stopReasons?: string[]): void;
|
|
203
63
|
}
|
|
204
64
|
|
|
205
|
-
export {
|
|
65
|
+
export { AnthropicInferenceClient, type InferenceClient, type InferenceClientConfig, type InferenceClientType, type InferenceResponse, MockInferenceClient, createInferenceClient, getInferenceClient, getInferenceModel };
|