modality-ai 0.0.0

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -0,0 +1,43 @@
1
+ import { type StreamableHTTPClientTransportOptions } from "@modelcontextprotocol/sdk/client/streamableHttp.js";
2
+ import { type StdioServerParameters } from "@modelcontextprotocol/sdk/client/stdio.js";
3
+ import { type SSEClientTransportOptions } from "@modelcontextprotocol/sdk/client/sse.js";
4
+ export type TransportType = "http" | "stdio" | "sse";
5
+ export interface HttpTransportConfig {
6
+ type: "http";
7
+ url: string;
8
+ options?: StreamableHTTPClientTransportOptions;
9
+ }
10
+ export interface StdioTransportConfig {
11
+ type: "stdio";
12
+ serverParams: StdioServerParameters;
13
+ }
14
+ export interface SSETransportConfig {
15
+ type: "sse";
16
+ url: string;
17
+ options?: SSEClientTransportOptions;
18
+ }
19
+ export type TransportConfig = HttpTransportConfig | StdioTransportConfig | SSETransportConfig;
20
+ declare class ModalityClientImpl {
21
+ private client;
22
+ private transportConfig;
23
+ private timeout;
24
+ constructor(config: TransportConfig, timeout?: number);
25
+ private createTransport;
26
+ private getTransportIdentifier;
27
+ call(method: string, params?: any, autoParse?: boolean): Promise<any>;
28
+ callOnce(method: string, params?: any, autoParse?: boolean): Promise<any>;
29
+ callStream(method: string, params?: any, callback?: (p: any) => void): ReadableStream;
30
+ close(): void;
31
+ listTools(): Promise<any>;
32
+ parseContent(toolResult: any): any;
33
+ }
34
+ declare function http(url: string, timeout?: number, options?: StreamableHTTPClientTransportOptions): ModalityClientImpl;
35
+ declare function stdio(serverParams: StdioServerParameters, timeout?: number): ModalityClientImpl;
36
+ declare function sse(url: string, timeout?: number, options?: SSEClientTransportOptions): ModalityClientImpl;
37
+ export declare const ModalityClient: {
38
+ http: typeof http;
39
+ stdio: typeof stdio;
40
+ sse: typeof sse;
41
+ };
42
+ export type ModalityClientInstance = ModalityClientImpl;
43
+ export {};
@@ -0,0 +1,19 @@
1
+ import type { Transport } from "@modelcontextprotocol/sdk/shared/transport.js";
2
+ import type { JSONRPCMessage } from "@modelcontextprotocol/sdk/types.js";
3
+ /**
4
+ * Custom transport wrapper that captures FastMCP streaming content
5
+ * This intercepts HTTP streaming data sent via streamContent() calls using SSE
6
+ */
7
+ export declare class StreamingMCPTransportWrapper implements Transport {
8
+ private transport;
9
+ private onStreamingContent?;
10
+ onclose?: () => void;
11
+ onerror?: (error: Error) => void;
12
+ onmessage?: (message: JSONRPCMessage) => void;
13
+ constructor(url: string, onStreamingContent?: (content: string) => void);
14
+ private interceptStreamingMessage;
15
+ start(): Promise<void>;
16
+ close(): Promise<void>;
17
+ send(message: JSONRPCMessage | JSONRPCMessage[]): Promise<void>;
18
+ get sessionId(): string | undefined;
19
+ }
@@ -0,0 +1 @@
1
+ export {};
@@ -0,0 +1,5 @@
1
+ /**
2
+ * Unit tests for AI Model Tool Support functionality
3
+ * Tests the new tool calling capabilities added to util_ai_model.ts
4
+ */
5
+ export {};
@@ -0,0 +1,5 @@
1
+ /**
2
+ * Unit Tests for AI Response Consistency
3
+ * Tests that ensure both Ollama and Gemini providers handle new features consistently
4
+ */
5
+ export {};
@@ -0,0 +1,5 @@
1
+ /**
2
+ * Unit Tests for Message ID Extraction Feature
3
+ * Tests the new message ID extraction logic from AI SDK responses
4
+ */
5
+ export {};
@@ -0,0 +1,3 @@
1
+ export { createAIChat, mergeToolCallsAndResults, OllamaProvider, } from "./util_ai_model";
2
+ export { ModalityClient } from "./ModalityClient";
3
+ export type { ModalityClientInstance } from "./ModalityClient";
@@ -0,0 +1,81 @@
1
+ import { LanguageModelV2, LanguageModelV2CallOptions, LanguageModelV2FinishReason, LanguageModelV2StreamPart, LanguageModelV2Content, LanguageModelV2Usage, LanguageModelV2CallWarning, LanguageModelV2ResponseMetadata, SharedV2ProviderMetadata } from "@ai-sdk/provider";
2
+ /**
3
+ * Provider configuration options specific to our provider
4
+ */
5
+ export interface VsCodeLmProviderOptions {
6
+ model?: string;
7
+ apiUrl?: string;
8
+ timeout?: number;
9
+ }
10
+ /**
11
+ * Result interface for doGenerate method
12
+ */
13
+ export interface VsCodeGenerateResult {
14
+ content: LanguageModelV2Content[];
15
+ finishReason: LanguageModelV2FinishReason;
16
+ usage: LanguageModelV2Usage;
17
+ warnings: LanguageModelV2CallWarning[];
18
+ response: LanguageModelV2ResponseMetadata;
19
+ providerMetadata?: SharedV2ProviderMetadata;
20
+ }
21
+ /**
22
+ * Result interface for doStream method
23
+ */
24
+ export interface VsCodeStreamResult {
25
+ stream: ReadableStream<LanguageModelV2StreamPart>;
26
+ request?: {
27
+ body?: unknown;
28
+ };
29
+ response?: {
30
+ headers?: Record<string, string>;
31
+ };
32
+ }
33
+ /**
34
+ * VS Code Language Model Provider using ModalityClient for simplified architecture
35
+ */
36
+ export declare class VsCodeLmProvider implements LanguageModelV2 {
37
+ readonly specificationVersion: "v2";
38
+ readonly provider: "vscode-lm";
39
+ readonly modelId: string;
40
+ readonly supportedUrls: Record<string, RegExp[]>;
41
+ private modalityClient;
42
+ constructor(options: VsCodeLmProviderOptions);
43
+ /**
44
+ * Dispose resources (simplified - ModalityClient handles its own cleanup)
45
+ */
46
+ dispose(): Promise<void>;
47
+ /**
48
+ * Extract and validate prompt from options
49
+ */
50
+ private getPrompt;
51
+ /**
52
+ * Parse tool calls with ultra-smart hybrid approach
53
+ */
54
+ private parseToolCalls;
55
+ /**
56
+ * ULTRATHINK MODE: Clean tool call markup with hybrid AI-powered system
57
+ * Uses tokenizer + parser + regex + JSON validation for 100% bulletproof cleaning
58
+ */
59
+ private cleanToolCalls;
60
+ /**
61
+ * Prepare request with tools
62
+ */
63
+ private prepareRequestWithTools;
64
+ /**
65
+ * Simple token estimation based on text length
66
+ */
67
+ private estimateTokens;
68
+ /**
69
+ * Create simplified Vercel AI SDK compatible stream with tool call support
70
+ */
71
+ private createVercelStream;
72
+ private returnError;
73
+ /**
74
+ * Main method to handle language model calls (non-streaming)
75
+ */
76
+ doGenerate(options: LanguageModelV2CallOptions): Promise<VsCodeGenerateResult>;
77
+ /**
78
+ * Handle streaming language model calls
79
+ */
80
+ doStream(options: LanguageModelV2CallOptions): Promise<VsCodeStreamResult>;
81
+ }
@@ -0,0 +1,31 @@
1
+ /**
2
+ * Ollama V2 Adapter
3
+ *
4
+ * Clean V2-only adapter for ollama-ai-provider that converts V1 models to V2 interfaces.
5
+ * This adapter ensures compatibility with AI SDK 5.0 by providing V2-compliant models.
6
+ */
7
+ import type { LanguageModelV2, EmbeddingModelV2 } from '@ai-sdk/provider';
8
+ /**
9
+ * V2-compliant Ollama Provider
10
+ */
11
+ export interface OllamaV2ProviderOptions {
12
+ baseURL?: string;
13
+ headers?: Record<string, string>;
14
+ fetch?: typeof fetch;
15
+ }
16
+ export declare class OllamaV2Provider {
17
+ private provider;
18
+ constructor(options?: OllamaV2ProviderOptions);
19
+ /**
20
+ * Get V2-compliant language model
21
+ */
22
+ languageModel(modelId: string): LanguageModelV2;
23
+ /**
24
+ * Get V2-compliant embedding model
25
+ */
26
+ embeddingModel<VALUE = string>(modelId: string): EmbeddingModelV2<VALUE>;
27
+ }
28
+ /**
29
+ * Create a V2-compliant Ollama provider
30
+ */
31
+ export declare function createOllamaV2(options?: OllamaV2ProviderOptions): OllamaV2Provider;
@@ -0,0 +1,109 @@
1
+ import { z } from "zod";
2
+ export declare const DEFAULT_AI_PROVIDER = "gemini";
3
+ export declare const OLLAMA_URL: string;
4
+ export declare const chatMessageSchema: z.ZodObject<{
5
+ role: z.ZodEnum<{
6
+ user: "user";
7
+ assistant: "assistant";
8
+ system: "system";
9
+ }>;
10
+ content: z.ZodString;
11
+ }, z.core.$strip>;
12
+ export declare const DEFAULT_OLLAMA_CONFIG: {
13
+ baseURL: string;
14
+ model: string;
15
+ };
16
+ export declare const DEFAULT_VSCODE_CONFIG: {
17
+ baseURL: string;
18
+ model: string;
19
+ };
20
+ export declare const DEFAULT_GEMINI_CONFIG: {
21
+ apiKey: string;
22
+ model: string;
23
+ };
24
+ export declare const vscodeConfigSchema: z.ZodObject<{
25
+ baseURL: z.ZodDefault<z.ZodOptional<z.ZodString>>;
26
+ model: z.ZodDefault<z.ZodOptional<z.ZodString>>;
27
+ }, z.core.$strip>;
28
+ export declare const ollamaConfigSchema: z.ZodObject<{
29
+ baseURL: z.ZodDefault<z.ZodOptional<z.ZodString>>;
30
+ model: z.ZodDefault<z.ZodOptional<z.ZodString>>;
31
+ }, z.core.$strip>;
32
+ export declare const geminiConfigSchema: z.ZodObject<{
33
+ apiKey: z.ZodDefault<z.ZodOptional<z.ZodString>>;
34
+ model: z.ZodDefault<z.ZodOptional<z.ZodString>>;
35
+ }, z.core.$strip>;
36
+ declare const chatOptionsSchema: z.ZodObject<{
37
+ temperature: z.ZodOptional<z.ZodNumber>;
38
+ maxOutputTokens: z.ZodOptional<z.ZodNumber>;
39
+ topP: z.ZodOptional<z.ZodNumber>;
40
+ toolChoice: z.ZodOptional<z.ZodEnum<{
41
+ auto: "auto";
42
+ none: "none";
43
+ required: "required";
44
+ }>>;
45
+ maxSteps: z.ZodOptional<z.ZodNumber>;
46
+ }, z.core.$strip>;
47
+ declare const memoryOptionsSchema: z.ZodObject<{
48
+ maxMessages: z.ZodDefault<z.ZodOptional<z.ZodNumber>>;
49
+ showContext: z.ZodDefault<z.ZodOptional<z.ZodBoolean>>;
50
+ }, z.core.$strip>;
51
+ export declare const simpleModalityAnswerSchema: z.ZodObject<{
52
+ question: z.ZodString;
53
+ context: z.ZodOptional<z.ZodString>;
54
+ useMemory: z.ZodDefault<z.ZodBoolean>;
55
+ }, z.core.$strip>;
56
+ declare const providerEnum: z.ZodOptional<z.ZodEnum<{
57
+ ollama: "ollama";
58
+ gemini: "gemini";
59
+ vscode: "vscode";
60
+ }>>;
61
+ export declare const modalityAnswerSchema: z.ZodObject<{
62
+ question: z.ZodString;
63
+ context: z.ZodOptional<z.ZodString>;
64
+ useMemory: z.ZodDefault<z.ZodBoolean>;
65
+ provider: z.ZodOptional<z.ZodEnum<{
66
+ ollama: "ollama";
67
+ gemini: "gemini";
68
+ vscode: "vscode";
69
+ }>>;
70
+ ollama: z.ZodDefault<z.ZodOptional<z.ZodObject<{
71
+ baseURL: z.ZodDefault<z.ZodOptional<z.ZodString>>;
72
+ model: z.ZodDefault<z.ZodOptional<z.ZodString>>;
73
+ }, z.core.$strip>>>;
74
+ gemini: z.ZodDefault<z.ZodOptional<z.ZodObject<{
75
+ apiKey: z.ZodDefault<z.ZodOptional<z.ZodString>>;
76
+ model: z.ZodDefault<z.ZodOptional<z.ZodString>>;
77
+ }, z.core.$strip>>>;
78
+ vscode: z.ZodDefault<z.ZodOptional<z.ZodObject<{
79
+ baseURL: z.ZodDefault<z.ZodOptional<z.ZodString>>;
80
+ model: z.ZodDefault<z.ZodOptional<z.ZodString>>;
81
+ }, z.core.$strip>>>;
82
+ chatOptions: z.ZodDefault<z.ZodOptional<z.ZodObject<{
83
+ temperature: z.ZodOptional<z.ZodNumber>;
84
+ maxOutputTokens: z.ZodOptional<z.ZodNumber>;
85
+ topP: z.ZodOptional<z.ZodNumber>;
86
+ toolChoice: z.ZodOptional<z.ZodEnum<{
87
+ auto: "auto";
88
+ none: "none";
89
+ required: "required";
90
+ }>>;
91
+ maxSteps: z.ZodOptional<z.ZodNumber>;
92
+ }, z.core.$strip>>>;
93
+ memoryOptions: z.ZodDefault<z.ZodOptional<z.ZodObject<{
94
+ maxMessages: z.ZodDefault<z.ZodOptional<z.ZodNumber>>;
95
+ showContext: z.ZodDefault<z.ZodOptional<z.ZodBoolean>>;
96
+ }, z.core.$strip>>>;
97
+ }, z.core.$strip>;
98
+ export declare const conversationResetSchema: z.ZodObject<{
99
+ sessionId: z.ZodOptional<z.ZodString>;
100
+ }, z.core.$strip>;
101
+ export type ModalityAnswerSchema = z.input<typeof modalityAnswerSchema>;
102
+ export type ChatMessageSchema = z.infer<typeof chatMessageSchema>;
103
+ export type ChatOptionsSchema = z.infer<typeof chatOptionsSchema>;
104
+ export type MemoryOptionsSchema = z.infer<typeof memoryOptionsSchema>;
105
+ export type OllamaConfig = z.input<typeof ollamaConfigSchema>;
106
+ export type VSCodeConfig = z.input<typeof vscodeConfigSchema>;
107
+ export type GeminiConfig = z.input<typeof geminiConfigSchema>;
108
+ export type ProviderType = z.input<typeof providerEnum>;
109
+ export {};
@@ -0,0 +1,149 @@
1
+ /**
2
+ * AI Chat and Embedding Abstraction
3
+ * Supports both Gemini and Ollama providers for chat and embedding generation
4
+ */
5
+ /**
6
+ * Type Imports
7
+ */
8
+ import type { ModelMessage } from "ai";
9
+ import type { AITools } from "modality-mcp-kit";
10
+ import type { OllamaConfig, GeminiConfig, VSCodeConfig } from "./schemas/schemas_modality";
11
+ export type AIProvider = "gemini" | "ollama" | "vscode";
12
+ export interface AIConfig {
13
+ provider: AIProvider;
14
+ ollama?: OllamaConfig;
15
+ gemini?: GeminiConfig;
16
+ vscode?: VSCodeConfig;
17
+ }
18
+ export interface ChatOptions {
19
+ temperature?: number;
20
+ maxOutputTokens?: number;
21
+ topP?: number;
22
+ tools?: AITools;
23
+ toolChoice?: "auto" | "none" | "required" | {
24
+ type: "tool";
25
+ toolName: string;
26
+ };
27
+ rateLimitDelay?: number;
28
+ enableRateLimit?: boolean;
29
+ maxSteps?: number;
30
+ }
31
+ export interface ChatResponse {
32
+ content: string;
33
+ usage?: {
34
+ promptTokens: number;
35
+ completionTokens: number;
36
+ totalTokens: number;
37
+ };
38
+ toolCalls?: any[];
39
+ toolResults?: any[];
40
+ steps?: any[];
41
+ messageId?: string;
42
+ }
43
+ export interface AIProviderInterface {
44
+ generateEmbedding(text: string): Promise<number[]>;
45
+ chat(messages: ModelMessage[], options?: ChatOptions): Promise<ChatResponse>;
46
+ evict(): void;
47
+ getModel(): string;
48
+ }
49
+ /**
50
+ * Ollama AI Provider Implementation
51
+ */
52
+ export declare class OllamaProvider implements AIProviderInterface {
53
+ private config;
54
+ constructor(config: OllamaConfig);
55
+ evict(): void;
56
+ getModel(): string;
57
+ /**
58
+ * Generate embeddings using Ollama
59
+ */
60
+ generateEmbedding(text: string): Promise<number[]>;
61
+ /**
62
+ * Generate chat response using Ollama
63
+ */
64
+ chat(messages: ModelMessage[], options?: ChatOptions): Promise<ChatResponse>;
65
+ }
66
+ export declare class GeminiProvider implements AIProviderInterface {
67
+ private config;
68
+ private model;
69
+ constructor(config: GeminiConfig);
70
+ evict(): void;
71
+ getModel(): string;
72
+ /**
73
+ * Generate embeddings using Gemini
74
+ */
75
+ generateEmbedding(text: string): Promise<number[]>;
76
+ /**
77
+ * Generate chat response using Gemini
78
+ */
79
+ chat(messages: ModelMessage[], options?: ChatOptions): Promise<ChatResponse>;
80
+ }
81
+ /**
82
+ * VS Code AI Provider Implementation
83
+ */
84
+ export declare class VsCodeProvider implements AIProviderInterface {
85
+ private config;
86
+ private provider;
87
+ constructor(config: VSCodeConfig);
88
+ evict(): void;
89
+ getModel(): string;
90
+ /**
91
+ * Generate embeddings using VS Code provider
92
+ * Note: VS Code provider doesn't support embeddings, so we'll throw an error
93
+ */
94
+ generateEmbedding(_text: string): Promise<number[]>;
95
+ /**
96
+ * Generate chat response using VS Code provider
97
+ * Uses direct callGenerateText approach - SIMPLICITY FIRST!
98
+ */
99
+ chat(messages: ModelMessage[], options?: ChatOptions): Promise<ChatResponse>;
100
+ }
101
+ /**
102
+ * AI Chat Abstraction - Main class for managing different AI providers
103
+ */
104
+ export declare class AIChat {
105
+ private provider;
106
+ private lastRequestTime;
107
+ constructor(config: AIConfig);
108
+ evict(): void;
109
+ getModel(): string;
110
+ /**
111
+ * Generate embeddings using the configured provider
112
+ */
113
+ generateEmbedding(text: string): Promise<number[]>;
114
+ /**
115
+ * Generate chat response using the configured provider
116
+ */
117
+ chat(messages: ModelMessage[], options?: ChatOptions): Promise<ChatResponse>;
118
+ }
119
+ /**
120
+ * Create a standalone function for generating embeddings (backwards compatibility)
121
+ */
122
+ export declare function generateEmbedding(text: string, config: OllamaConfig): Promise<number[]>;
123
+ /**
124
+ * Factory function to create AI chat instances
125
+ */
126
+ export declare function createAIChat(config: AIConfig): AIChat;
127
+ /**
128
+ * Convenience function to create Ollama chat instance
129
+ */
130
+ export declare function createOllamaChat(config: OllamaConfig): AIChat;
131
+ /**
132
+ * Convenience function to create Gemini chat instance
133
+ */
134
+ export declare function createGeminiChat(config: GeminiConfig): AIChat;
135
+ /**
136
+ * Convenience function to create VS Code chat instance
137
+ */
138
+ export declare function createVsCodeChat(config: VSCodeConfig): AIChat;
139
+ export declare function createTextMessage(role: "user" | "assistant" | "system", content: string): ModelMessage;
140
+ export declare function createAssistantMessageWithToolCalls(toolCalls: Array<{
141
+ toolCallId: string;
142
+ toolName: string;
143
+ args: Record<string, any>;
144
+ }>): ModelMessage;
145
+ export declare function buildMessagesWithToolResults(baseMessages: ModelMessage[], response: any): ModelMessage[];
146
+ export declare function mergeToolCallsAndResults(response: any): {
147
+ call: any;
148
+ result: any;
149
+ }[];
@@ -0,0 +1,40 @@
1
+ /**
2
+ * Shared mock utilities for AI Model testing
3
+ * Centralizes mock class definitions to reduce duplication across test files
4
+ */
5
+ export declare const mockOllamaError: Error;
6
+ export declare const mockGeminiError: Error;
7
+ export declare class MockOllamaProvider {
8
+ constructor(_config: any);
9
+ chat(_messages: any[], _options?: any): Promise<any>;
10
+ }
11
+ export declare class MockGeminiProvider {
12
+ constructor(_config: any);
13
+ chat(_messages: any[], _options?: any): Promise<any>;
14
+ }
15
+ export declare class MockAIChat {
16
+ private config;
17
+ constructor(config: any);
18
+ generateEmbedding(_text: string): Promise<any>;
19
+ chat(_messages: any[], _options?: any): Promise<any>;
20
+ }
21
+ export declare const createMockOllamaChat: (_config: any) => {
22
+ chat: () => Promise<never>;
23
+ };
24
+ export declare const createMockGeminiChat: (_config: any) => {
25
+ chat: () => Promise<never>;
26
+ };
27
+ export declare const createAiModelMockModule: () => {
28
+ OllamaProvider: typeof MockOllamaProvider;
29
+ GeminiProvider: typeof MockGeminiProvider;
30
+ AIChat: typeof MockAIChat;
31
+ createOllamaChat: (_config: any) => {
32
+ chat: () => Promise<never>;
33
+ };
34
+ createGeminiChat: (_config: any) => {
35
+ chat: () => Promise<never>;
36
+ };
37
+ AIConfig: any;
38
+ ChatOptions: any;
39
+ ChatResponse: any;
40
+ };
@@ -0,0 +1,101 @@
1
+ import { mock } from "bun:test";
2
+ /**
3
+ * Centralized mock utilities for ModalityClient testing
4
+ * Provides reusable mocks with easy configuration and cleanup
5
+ */
6
+ export interface MockModalityClient {
7
+ call: ReturnType<typeof mock>;
8
+ callOnce: ReturnType<typeof mock>;
9
+ callStream: ReturnType<typeof mock>;
10
+ listTools: ReturnType<typeof mock>;
11
+ close: ReturnType<typeof mock>;
12
+ parseContent?: ReturnType<typeof mock>;
13
+ }
14
+ export interface MockClientConfig {
15
+ shouldThrow?: boolean;
16
+ throwMessage?: string;
17
+ defaultResponses?: {
18
+ call?: any;
19
+ callOnce?: any;
20
+ callStream?: any;
21
+ listTools?: any;
22
+ };
23
+ }
24
+ /**
25
+ * Creates a mock ModalityClient with configurable behavior
26
+ */
27
+ export declare function createMockModalityClient(config?: MockClientConfig): MockModalityClient;
28
+ /**
29
+ * Creates mock factory functions for the ModalityClient namespace
30
+ */
31
+ export declare function createMockModalityClientFactories(httpConfig?: MockClientConfig, stdioConfig?: MockClientConfig, sseConfig?: MockClientConfig): {
32
+ http: import("bun:test").Mock<() => MockModalityClient>;
33
+ stdio: import("bun:test").Mock<() => MockModalityClient>;
34
+ sse: import("bun:test").Mock<() => MockModalityClient>;
35
+ };
36
+ /**
37
+ * Utility to reset all mocks in a MockModalityClient
38
+ */
39
+ export declare function resetMockModalityClient(mockClient: MockModalityClient): void;
40
+ /**
41
+ * Utility to reset all factory mocks
42
+ */
43
+ export declare function resetMockFactories(factories: ReturnType<typeof createMockModalityClientFactories>): void;
44
+ /**
45
+ * Creates a complete mock module for ModalityClient
46
+ */
47
+ export declare function createModalityClientMockModule(config?: {
48
+ http?: MockClientConfig;
49
+ stdio?: MockClientConfig;
50
+ sse?: MockClientConfig;
51
+ }): {
52
+ ModalityClient: {
53
+ http: import("bun:test").Mock<() => MockModalityClient>;
54
+ stdio: import("bun:test").Mock<() => MockModalityClient>;
55
+ sse: import("bun:test").Mock<() => MockModalityClient>;
56
+ };
57
+ __testFactories: {
58
+ http: import("bun:test").Mock<() => MockModalityClient>;
59
+ stdio: import("bun:test").Mock<() => MockModalityClient>;
60
+ sse: import("bun:test").Mock<() => MockModalityClient>;
61
+ };
62
+ };
63
+ /**
64
+ * Default successful responses for common test scenarios
65
+ */
66
+ export declare const DEFAULT_RESPONSES: {
67
+ readonly SUCCESS: {
68
+ readonly call: {
69
+ readonly content: {
70
+ readonly message: "success";
71
+ };
72
+ };
73
+ readonly callOnce: {
74
+ readonly content: {
75
+ readonly message: "success";
76
+ };
77
+ };
78
+ readonly callStream: ReadableStream<any>;
79
+ };
80
+ readonly ERROR: {
81
+ readonly shouldThrow: true;
82
+ readonly throwMessage: "Mock client error";
83
+ };
84
+ };
85
+ /**
86
+ * Pre-configured mock scenarios for common test cases
87
+ */
88
+ export declare const MOCK_SCENARIOS: {
89
+ /**
90
+ * Normal working client that returns successful responses
91
+ */
92
+ readonly WORKING_CLIENT: () => MockModalityClient;
93
+ /**
94
+ * Client that throws errors on all operations
95
+ */
96
+ readonly ERROR_CLIENT: () => MockModalityClient;
97
+ /**
98
+ * Client with custom responses
99
+ */
100
+ readonly CUSTOM_CLIENT: (responses: MockClientConfig["defaultResponses"]) => MockModalityClient;
101
+ };
package/package.json ADDED
@@ -0,0 +1,45 @@
1
+ {
2
+ "version": "0.0.0",
3
+ "name": "modality-ai",
4
+ "repository": {
5
+ "type": "git",
6
+ "url": "git+https://github.com/react-atomic/modality.git"
7
+ },
8
+ "homepage": "https://github.com/react-atomic/modality/tree/main/modality-ai",
9
+ "description": "",
10
+ "keywords": [],
11
+ "author": "Hill <hill@kimo.com>",
12
+ "license": "ISC",
13
+ "peerDependencies": {
14
+ "@modelcontextprotocol/sdk": "^1.25.1"
15
+ },
16
+ "dependencies": {
17
+ "@ai-sdk/google": "^3.0.2",
18
+ "ai": "^6.0.5",
19
+ "modality-mcp-kit": "^0.4.0",
20
+ "ollama-ai-provider": "^1.2.0"
21
+ },
22
+ "devDependencies": {
23
+ "@types/bun": "^1.3.5",
24
+ "modality-bun-kit": "^0.0.2",
25
+ "typescript": "^5.9.3"
26
+ },
27
+ "exports": {
28
+ "types": "./dist/types/index.d.ts",
29
+ "require": "./dist/index.js",
30
+ "import": "./dist/index.js"
31
+ },
32
+ "types": "./dist/types/index.d.ts",
33
+ "main": "./dist/index.js",
34
+ "module": "./dist/index.js",
35
+ "scripts": {
36
+ "build:clean": "find ./dist -name '*.*' | xargs rm -rf",
37
+ "build:types": "bun tsc -p ./",
38
+ "build:src": "bun build src/index.ts --outdir dist --external @modelcontextprotocol/sdk",
39
+ "build": "bun run build:clean && bun run build:types && bun run build:src",
40
+ "dev": "bunx concurrently 'bun --watch tsc -p ./' 'bun build:src -- --watch --sourcemap=inline'",
41
+ "test": "npm run build && bun test",
42
+ "prepublishOnly": "npm t"
43
+ },
44
+ "files": ["package.json", "README.md", "dist"]
45
+ }