@almadar/llm 2.0.1 → 2.0.3

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
package/dist/index.d.ts CHANGED
@@ -4,6 +4,7 @@ export { R as RateLimiter, a as RateLimiterOptions, T as TokenTracker, b as Toke
4
4
  export { autoCloseJson, extractJsonFromText, isValidJson, parseJsonResponse, safeParseJson } from './json-parser.js';
5
5
  import { z } from 'zod';
6
6
  export { JsonSchema, STRUCTURED_OUTPUT_MODELS, StructuredGenerationOptions, StructuredGenerationResult, StructuredOutputClient, StructuredOutputOptions, getStructuredOutputClient, isStructuredOutputAvailable, resetStructuredOutputClient } from './structured-output.js';
7
+ import { ServiceContract } from '@almadar/core';
7
8
  import '@langchain/openai';
8
9
  import '@langchain/anthropic';
9
10
 
@@ -64,4 +65,82 @@ declare function salvagePartialResponse<T>(rawResponse: string): T | null;
64
65
  declare function callWithContinuation<T>(options: ContinuationOptions<T>): Promise<ContinuationResult<T>>;
65
66
  declare function buildGenericContinuationPrompt(context: string, partialResponse: string, attempt: number, maxAttempts?: number): string;
66
67
 
67
- export { type ContinuationOptions, type ContinuationResult, LLMClient, LLMFinishReason, type TruncationReason, type TruncationResult, buildGenericContinuationPrompt, callWithContinuation, detectTruncation, findLastCompleteElement, isLikelyTruncated, mergeResponses, salvagePartialResponse };
68
+ /**
69
+ * LLM Service Contract
70
+ *
71
+ * Defines the call-service-compatible actions for the LLM service.
72
+ * These actions can be invoked from `.orb` schemas via
73
+ * `["call-service", "llm", "<action>", {...}]`.
74
+ *
75
+ * @packageDocumentation
76
+ */
77
+
78
+ /**
79
+ * All call-service actions exposed by the LLM service.
80
+ */
81
+ type LLMServiceActions = {
82
+ /** Generate text from a prompt. */
83
+ generate: {
84
+ params: {
85
+ systemPrompt?: string;
86
+ userPrompt: string;
87
+ model?: string;
88
+ temperature?: number;
89
+ maxTokens?: number;
90
+ };
91
+ result: {
92
+ content: string;
93
+ usage: {
94
+ promptTokens: number;
95
+ completionTokens: number;
96
+ totalTokens: number;
97
+ };
98
+ };
99
+ };
100
+ /** Classify text into one of the given categories. */
101
+ classify: {
102
+ params: {
103
+ text: string;
104
+ categories: string[];
105
+ model?: string;
106
+ };
107
+ result: {
108
+ category: string;
109
+ confidence: number;
110
+ reasoning: string;
111
+ };
112
+ };
113
+ /** Extract structured data from text according to a JSON schema. */
114
+ extract: {
115
+ params: {
116
+ text: string;
117
+ schema: Record<string, unknown>;
118
+ model?: string;
119
+ };
120
+ result: {
121
+ data: Record<string, unknown>;
122
+ confidence: number;
123
+ };
124
+ };
125
+ /** Summarize text with optional length and style constraints. */
126
+ summarize: {
127
+ params: {
128
+ text: string;
129
+ maxLength?: number;
130
+ style?: string;
131
+ model?: string;
132
+ };
133
+ result: {
134
+ summary: string;
135
+ keyPoints: string[];
136
+ };
137
+ };
138
+ };
139
+ /**
140
+ * The full service contract for the LLM service.
141
+ * Implementations must provide an `execute(action, params)` method
142
+ * that dispatches to the correct action handler.
143
+ */
144
+ type LLMServiceContract = ServiceContract<LLMServiceActions>;
145
+
146
+ export { type ContinuationOptions, type ContinuationResult, LLMClient, LLMFinishReason, type LLMServiceActions, type LLMServiceContract, type TruncationReason, type TruncationResult, buildGenericContinuationPrompt, callWithContinuation, detectTruncation, findLastCompleteElement, isLikelyTruncated, mergeResponses, salvagePartialResponse };
package/package.json CHANGED
@@ -1,6 +1,6 @@
1
1
  {
2
2
  "name": "@almadar/llm",
3
- "version": "2.0.1",
3
+ "version": "2.0.3",
4
4
  "description": "Multi-provider LLM client with rate limiting, token tracking, structured outputs, and continuation handling",
5
5
  "type": "module",
6
6
  "main": "./dist/index.js",
@@ -33,7 +33,7 @@
33
33
  "@langchain/openai": "^1.2.6",
34
34
  "openai": "^6.18.0",
35
35
  "zod": "^3.22.0",
36
- "@langchain/core": "^0.3.0"
36
+ "@langchain/core": "^1.1.30"
37
37
  },
38
38
  "peerDependencies": {
39
39
  "@almadar/core": ">=2.0.0"
@@ -44,9 +44,12 @@
44
44
  }
45
45
  },
46
46
  "devDependencies": {
47
+ "@almadar/eslint-plugin": ">=2.3.0",
48
+ "@typescript-eslint/parser": "8.56.0",
49
+ "@types/node": "^22.0.0",
50
+ "eslint": "10.0.0",
47
51
  "tsup": "^8.0.0",
48
- "typescript": "^5.3.0",
49
- "@types/node": "^22.0.0"
52
+ "typescript": "^5.3.0"
50
53
  },
51
54
  "repository": {
52
55
  "type": "git",
@@ -0,0 +1,83 @@
1
+ /**
2
+ * LLM Service Contract
3
+ *
4
+ * Defines the call-service-compatible actions for the LLM service.
5
+ * These actions can be invoked from `.orb` schemas via
6
+ * `["call-service", "llm", "<action>", {...}]`.
7
+ *
8
+ * @packageDocumentation
9
+ */
10
+
11
+ import type { ServiceContract } from "@almadar/core";
12
+
13
+ /**
14
+ * All call-service actions exposed by the LLM service.
15
+ */
16
+ export type LLMServiceActions = {
17
+ /** Generate text from a prompt. */
18
+ generate: {
19
+ params: {
20
+ systemPrompt?: string;
21
+ userPrompt: string;
22
+ model?: string;
23
+ temperature?: number;
24
+ maxTokens?: number;
25
+ };
26
+ result: {
27
+ content: string;
28
+ usage: {
29
+ promptTokens: number;
30
+ completionTokens: number;
31
+ totalTokens: number;
32
+ };
33
+ };
34
+ };
35
+
36
+ /** Classify text into one of the given categories. */
37
+ classify: {
38
+ params: {
39
+ text: string;
40
+ categories: string[];
41
+ model?: string;
42
+ };
43
+ result: {
44
+ category: string;
45
+ confidence: number;
46
+ reasoning: string;
47
+ };
48
+ };
49
+
50
+ /** Extract structured data from text according to a JSON schema. */
51
+ extract: {
52
+ params: {
53
+ text: string;
54
+ schema: Record<string, unknown>;
55
+ model?: string;
56
+ };
57
+ result: {
58
+ data: Record<string, unknown>;
59
+ confidence: number;
60
+ };
61
+ };
62
+
63
+ /** Summarize text with optional length and style constraints. */
64
+ summarize: {
65
+ params: {
66
+ text: string;
67
+ maxLength?: number;
68
+ style?: string;
69
+ model?: string;
70
+ };
71
+ result: {
72
+ summary: string;
73
+ keyPoints: string[];
74
+ };
75
+ };
76
+ };
77
+
78
+ /**
79
+ * The full service contract for the LLM service.
80
+ * Implementations must provide an `execute(action, params)` method
81
+ * that dispatches to the correct action handler.
82
+ */
83
+ export type LLMServiceContract = ServiceContract<LLMServiceActions>;
package/src/index.ts CHANGED
@@ -87,3 +87,8 @@ export {
87
87
  type StructuredGenerationResult,
88
88
  type JsonSchema,
89
89
  } from './structured-output.js';
90
+
91
+ export {
92
+ type LLMServiceActions,
93
+ type LLMServiceContract,
94
+ } from './contracts.js';