@wox-launcher/wox-plugin 0.0.107 → 0.0.111

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
package/README.md CHANGED
@@ -1 +1,208 @@
1
- Nodejs Type Definition for Wox Plugin
1
+ # Wox Plugin SDK for TypeScript/JavaScript
2
+
3
+ TypeScript type definitions and SDK for developing Wox plugins in TypeScript/JavaScript.
4
+
5
+ ## Quick Start
6
+
7
+ ### Installation
8
+
9
+ ```bash
10
+ npm install wox-plugin
11
+ ```
12
+
13
+ ### Basic Plugin
14
+
15
+ ```typescript
16
+ import { Plugin, Context, Query, Result, NewContext, WoxImage } from "wox-plugin"
17
+
18
+ class MyPlugin implements Plugin {
19
+ private api: PublicAPI
20
+
21
+ async init(ctx: Context, initParams: PluginInitParams): Promise<void> {
22
+ this.api = initParams.API
23
+ await this.api.Log(ctx, "Info", "MyPlugin initialized")
24
+ }
25
+
26
+ async query(ctx: Context, query: Query): Promise<Result[]> {
27
+ const results: Result[] = []
28
+
29
+ for (const item of this.getItems(query.Search)) {
30
+ results.push({
31
+ Title: item.name,
32
+ SubTitle: item.description,
33
+ Icon: { ImageType: "emoji", ImageData: "🔍" },
34
+ Score: 100,
35
+ Actions: [
36
+ {
37
+ Name: "Open",
38
+ Icon: { ImageType: "emoji", ImageData: "🔗" },
39
+ IsDefault: true,
40
+ Action: async (ctx, actionCtx) => {
41
+ await this.openItem(item)
42
+ }
43
+ }
44
+ ]
45
+ })
46
+ }
47
+
48
+ return results
49
+ }
50
+ }
51
+ ```
52
+
53
+ ## Key Components
54
+
55
+ ### Plugin Interface
56
+
57
+ Every plugin must implement the `Plugin` interface:
58
+
59
+ ```typescript
60
+ interface Plugin {
61
+ init: (ctx: Context, initParams: PluginInitParams) => Promise<void>
62
+ query: (ctx: Context, query: Query) => Promise<Result[]>
63
+ }
64
+ ```
65
+
66
+ ### Query Models
67
+
68
+ - **Query**: User query with search text, type, selection, environment
69
+ - **QueryType**: `INPUT` (typing) or `SELECTION` (selected content)
70
+ - **Selection**: Text or file paths selected by user
71
+ - **QueryEnv**: Environment context (active window, browser URL)
72
+
73
+ ### Result Models
74
+
75
+ - **Result**: Search result with title, icon, preview, actions
76
+ - **ResultAction**: User action on a result
77
+ - **ResultActionType**: `EXECUTE` (immediate) or `FORM` (show form)
78
+ - **ResultTail**: Additional visual elements (text or image)
79
+ - **UpdatableResult**: Result that can be updated in UI
80
+
81
+ ### Image Types
82
+
83
+ Supported image types:
84
+
85
+ - `absolute`: Absolute file path
86
+ - `relative`: Path relative to plugin directory
87
+ - `base64`: Base64 encoded image with data URI prefix (`data:image/png;base64,...`)
88
+ - `svg`: SVG string content
89
+ - `url`: HTTP/HTTPS URL
90
+ - `emoji`: Emoji character
91
+ - `lottie`: Lottie animation JSON
92
+
93
+ ```typescript
94
+ // Emoji icon
95
+ { ImageType: "emoji", ImageData: "🔍" }
96
+
97
+ // Base64 image
98
+ { ImageType: "base64", ImageData: "data:image/png;base64,iVBORw0..." }
99
+
100
+ // Relative path
101
+ { ImageType: "relative", ImageData: "./icons/icon.png" }
102
+ ```
103
+
104
+ ### Public API
105
+
106
+ Methods for interacting with Wox:
107
+
108
+ - **UI Control**: `showApp()`, `hideApp()`, `isVisible()`, `notify()`
109
+ - **Query**: `changeQuery()`, `refreshQuery()`, `pushResults()`
110
+ - **Settings**: `getSetting()`, `saveSetting()`, `onSettingChanged()`
111
+ - **Logging**: `log()`
112
+ - **i18n**: `getTranslation()`
113
+ - **Results**: `getUpdatableResult()`, `updateResult()`
114
+ - **AI**: `llmStream()`
115
+ - **MRU**: `onMruRestore()`
116
+ - **Callbacks**: `onUnload()`, `onDeepLink()`
117
+ - **Commands**: `registerQueryCommands()`
118
+ - **Clipboard**: `copy()`
119
+
120
+ ## Actions
121
+
122
+ Actions are operations users can perform on results:
123
+
124
+ ```typescript
125
+ ResultAction({
126
+ name: "Copy",
127
+ icon: { ImageType: "emoji", ImageData: "📋" },
128
+ isDefault: true,
129
+ hotkey: "Ctrl+C",
130
+ action: async (ctx, actionCtx) => {
131
+ await this.copyToClipboard(actionCtx.contextData)
132
+ }
133
+ })
134
+ ```
135
+
136
+ ## Settings
137
+
138
+ Define settings for your plugin:
139
+
140
+ ```typescript
141
+ const settings: PluginSettingDefinitionItem[] = [
142
+ createTextboxSetting({
143
+ key: "apiKey",
144
+ label: "API Key",
145
+ tooltip: "Enter your API key",
146
+ defaultValue: ""
147
+ }),
148
+ createCheckboxSetting({
149
+ key: "enabled",
150
+ label: "Enable Feature",
151
+ defaultValue: "true"
152
+ })
153
+ ]
154
+ ```
155
+
156
+ ## AI/LLM Integration
157
+
158
+ Stream responses from AI models:
159
+
160
+ ```typescript
161
+ const conversations: AI.Conversation[] = [
162
+ { Role: "system", Text: "You are a helpful assistant.", Timestamp: Date.now() },
163
+ { Role: "user", Text: "Hello!", Timestamp: Date.now() }
164
+ ]
165
+
166
+ await api.LLMStream(ctx, conversations, (data: AI.ChatStreamData) => {
167
+ if (data.Status === "streaming") {
168
+ console.log("Chunk:", data.Data)
169
+ } else if (data.Status === "finished") {
170
+ console.log("Complete:", data.Data)
171
+ }
172
+ })
173
+ ```
174
+
175
+ ## Plugin Metadata
176
+
177
+ Plugins must declare metadata in a `plugin.json` file:
178
+
179
+ ```json
180
+ {
181
+ "ID": "com.myplugin.example",
182
+ "Name": "My Plugin",
183
+ "Author": "Your Name",
184
+ "Version": "1.0.0",
185
+ "MinWoxVersion": "2.0.0",
186
+ "Runtime": "nodejs",
187
+ "Entry": "main.js",
188
+ "TriggerKeywords": ["my"],
189
+ "Description": "My awesome Wox plugin",
190
+ "Website": "https://github.com/user/myplugin",
191
+ "Icon": "https://example.com/icon.png"
192
+ }
193
+ ```
194
+
195
+ ## Query Flow
196
+
197
+ 1. User triggers Wox and types trigger keyword (e.g., "my query")
198
+ 2. Wox calls `plugin.query()` with:
199
+ - `query.triggerKeyword = "my"`
200
+ - `query.command = ""`
201
+ - `query.search = "query"`
202
+ 3. Plugin returns `Result[]`
203
+ 4. Wox displays results sorted by score
204
+
205
+ ## For More Information
206
+
207
+ - Wox Documentation: https://github.com/Wox-launcher/Wox
208
+ - Plugin Examples: https://github.com/Wox-launcher/Wox.Plugin.Nodejs
package/dist/context.js CHANGED
@@ -1,3 +1,17 @@
1
+ /**
2
+ * Create a new context with auto-generated trace ID.
3
+ *
4
+ * The context is used throughout the plugin API for request tracking
5
+ * and passing custom data between function calls.
6
+ *
7
+ * @returns A new Context instance with a UUID in the "traceId" key
8
+ *
9
+ * @example
10
+ * ```typescript
11
+ * const ctx = NewContext()
12
+ * console.log(ctx.Get("traceId")) // e.g., "550e8400-e29b-41d4-a716-446655440000"
13
+ * ```
14
+ */
1
15
  export function NewContext() {
2
16
  return {
3
17
  Values: {
@@ -14,6 +28,23 @@ export function NewContext() {
14
28
  }
15
29
  };
16
30
  }
31
+ /**
32
+ * Create a new context with an initial key-value pair.
33
+ *
34
+ * In addition to the auto-generated trace ID, this function
35
+ * initializes the context with a custom key-value pair.
36
+ *
37
+ * @param key - The key to set
38
+ * @param value - The value to store
39
+ * @returns A new Context instance with the trace ID and custom value
40
+ *
41
+ * @example
42
+ * ```typescript
43
+ * const ctx = NewContextWithValue("userId", "12345")
44
+ * console.log(ctx.Get("userId")) // "12345"
45
+ * console.log(ctx.Get("traceId")) // auto-generated UUID
46
+ * ```
47
+ */
17
48
  export function NewContextWithValue(key, value) {
18
49
  const ctx = NewContext();
19
50
  ctx.Set(key, value);
package/dist/image.js CHANGED
@@ -1,3 +1,24 @@
1
+ /**
2
+ * Create a base64 WoxImage from image data.
3
+ *
4
+ * This helper function creates a WoxImage with type "base64".
5
+ * The image data must include the data URI prefix.
6
+ *
7
+ * @param imageData - Base64 image data with data URI prefix (e.g., "data:image/png;base64,...")
8
+ * @returns A WoxImage with type "base64"
9
+ *
10
+ * @example
11
+ * ```typescript
12
+ * const pngData = "data:image/png;base64,iVBORw0KGgoAAAA..."
13
+ * const icon = NewBase64WoxImage(pngData)
14
+ *
15
+ * // Use in a result
16
+ * const result = {
17
+ * Title: "My Result",
18
+ * Icon: icon
19
+ * }
20
+ * ```
21
+ */
1
22
  export function NewBase64WoxImage(imageData) {
2
23
  return {
3
24
  ImageType: "base64",
package/dist/index.js CHANGED
@@ -1,2 +1,22 @@
1
+ /**
2
+ * Wox Plugin SDK for TypeScript/JavaScript
3
+ *
4
+ * This package provides utility functions for developing Wox plugins in TypeScript/JavaScript.
5
+ *
6
+ * @module
7
+ *
8
+ * @example
9
+ * ```typescript
10
+ * import { NewContext, NewBase64WoxImage } from "wox-plugin"
11
+ *
12
+ * // Create a new context
13
+ * const ctx = NewContext()
14
+ *
15
+ * // Create a base64 image
16
+ * const icon = NewBase64WoxImage("data:image/png;base64,...")
17
+ * ```
18
+ */
19
+ // Re-export all context-related functions
1
20
  export * from "./context.js";
21
+ // Re-export all image-related functions
2
22
  export * from "./image.js";
package/package.json CHANGED
@@ -1,6 +1,6 @@
1
1
  {
2
2
  "name": "@wox-launcher/wox-plugin",
3
- "version": "0.0.107",
3
+ "version": "0.0.111",
4
4
  "description": "All nodejs plugin for Wox should use types in this package",
5
5
  "repository": {
6
6
  "type": "git",
package/types/ai.d.ts CHANGED
@@ -1,21 +1,370 @@
1
+ /**
2
+ * AI/LLM related types for streaming chat conversations.
3
+ *
4
+ * This namespace provides types for interacting with AI models through Wox's
5
+ * streaming chat interface. It supports conversation management, streaming
6
+ * responses, and tool calling capabilities.
7
+ *
8
+ * @example
9
+ * ```typescript
10
+ * import { AI, PublicAPI } from "wox-plugin"
11
+ *
12
+ * // Prepare conversation history
13
+ * const conversations: AI.Conversation[] = [
14
+ * { Role: "system", Text: "You are a helpful assistant.", Timestamp: Date.now() },
15
+ * { Role: "user", Text: "What is the capital of France?", Timestamp: Date.now() }
16
+ * ]
17
+ *
18
+ * // Stream the response
19
+ * await api.LLMStream(ctx, conversations, (streamData) => {
20
+ * if (streamData.Status === "streaming") {
21
+ * console.log("Chunk:", streamData.Data)
22
+ * } else if (streamData.Status === "finished") {
23
+ * console.log("Complete:", streamData.Data)
24
+ * }
25
+ * })
26
+ * ```
27
+ */
1
28
  export namespace AI {
2
- export type ConversationRole = "user" | "system"
3
- export type ChatStreamDataType = "streaming" | "finished" | "error"
29
+ /**
30
+ * Role of a message in a conversation.
31
+ *
32
+ * - `system`: System prompt that sets AI behavior
33
+ * - `user`: Message from the user
34
+ * - `assistant`: Response from the AI model
35
+ * - `tool`: Result from a tool/function call execution
36
+ */
37
+ export type ConversationRole = "user" | "system" | "assistant" | "tool"
4
38
 
39
+ /**
40
+ * Status of a streaming chat response.
41
+ *
42
+ * - `streaming`: actively receiving response chunks
43
+ * - `streamed`: all content received, ready to process tool calls (if any)
44
+ * - `running_tool_call`: executing tool calls
45
+ * - `finished`: all content and tool calls completed
46
+ * - `error`: an error occurred during streaming or tool execution
47
+ */
48
+ export type ChatStreamDataType = "streaming" | "streamed" | "running_tool_call" | "finished" | "error"
49
+
50
+ /**
51
+ * Status of a tool call execution.
52
+ *
53
+ * - `streaming`: tool call arguments are being streamed
54
+ * - `pending`: streaming finished, ready to execute
55
+ * - `running`: currently executing
56
+ * - `succeeded`: executed successfully
57
+ * - `failed`: execution failed
58
+ */
59
+ export type ToolCallStatus = "streaming" | "pending" | "running" | "succeeded" | "failed"
60
+
61
+ /**
62
+ * Represents a single message in an AI conversation.
63
+ *
64
+ * Conversations are passed to the AI model in order to maintain context
65
+ * across multiple turns of dialogue.
66
+ *
67
+ * @example
68
+ * ```typescript
69
+ * const systemMessage: AI.Conversation = {
70
+ * Role: "system",
71
+ * Text: "You are a helpful coding assistant.",
72
+ * Timestamp: Date.now()
73
+ * }
74
+ *
75
+ * const userMessage: AI.Conversation = {
76
+ * Role: "user",
77
+ * Text: "How do I reverse a string in JavaScript?",
78
+ * Timestamp: Date.now()
79
+ * }
80
+ * ```
81
+ */
5
82
  export interface Conversation {
83
+ /**
84
+ * The role of the message sender.
85
+ *
86
+ * - `system`: Sets behavior/context for the AI
87
+ * - `user`: Human input
88
+ * - `assistant`: AI response
89
+ * - `tool`: Result from tool execution
90
+ */
6
91
  Role: ConversationRole
92
+
93
+ /**
94
+ * The text content of the message.
95
+ *
96
+ * For system messages, this defines the AI's behavior.
97
+ * For user messages, this is the user's input.
98
+ * For assistant messages, this is the AI's response.
99
+ * For tool messages, this is the stringified tool result.
100
+ */
7
101
  Text: string
102
+
103
+ /**
104
+ * Reasoning content from models that support reasoning (e.g., DeepSeek, OpenAI o1, Qwen).
105
+ *
106
+ * This is the model's internal thinking process, separate from the final response.
107
+ * It's useful for understanding how the model arrived at its answer.
108
+ *
109
+ * Only present for assistant messages when using models that support reasoning.
110
+ */
111
+ Reasoning?: string
112
+
113
+ /**
114
+ * List of PNG image bytes for vision models.
115
+ *
116
+ * Only applicable for user messages when using vision-capable models.
117
+ * Images should be in PNG format as raw byte arrays.
118
+ */
119
+ Images?: Uint8Array[]
120
+
121
+ /**
122
+ * Tool call information for assistant or tool messages.
123
+ *
124
+ * - For assistant messages: describes which tool(s) the AI wants to call
125
+ * - For tool messages: describes the result of a tool execution
126
+ */
127
+ ToolCallInfo?: ToolCallInfo
128
+
129
+ /**
130
+ * Unix timestamp in milliseconds when this message was created.
131
+ *
132
+ * Used for ordering messages in the conversation history.
133
+ */
8
134
  Timestamp: number
9
135
  }
10
136
 
137
+ /**
138
+ * Information about a tool/function call requested by the AI model.
139
+ *
140
+ * When the AI decides to use a tool, it populates this structure with
141
+ * the tool name and arguments. After execution, the result is stored here.
142
+ *
143
+ * @example
144
+ * ```typescript
145
+ * const toolCall: AI.ToolCallInfo = {
146
+ * Id: "call_123",
147
+ * Name: "get_weather",
148
+ * Arguments: { city: "London", unit: "celsius" },
149
+ * Status: "running",
150
+ * Delta: "",
151
+ * Response: "",
152
+ * StartTimestamp: Date.now(),
153
+ * EndTimestamp: 0
154
+ * }
155
+ * ```
156
+ */
157
+ export interface ToolCallInfo {
158
+ /**
159
+ * Unique identifier for this tool call.
160
+ *
161
+ * Used to match tool requests with responses.
162
+ */
163
+ Id: string
164
+
165
+ /**
166
+ * Name of the tool/function to call.
167
+ *
168
+ * This should match a registered tool name in the Wox system.
169
+ */
170
+ Name: string
171
+
172
+ /**
173
+ * Arguments to pass to the tool function.
174
+ *
175
+ * Key-value pairs representing the parameters for the tool call.
176
+ * The schema depends on the tool's definition.
177
+ */
178
+ Arguments: Record<string, any>
179
+
180
+ /**
181
+ * Current status of the tool call.
182
+ *
183
+ * Progresses through: streaming -> pending -> running -> succeeded/failed
184
+ */
185
+ Status: ToolCallStatus
186
+
187
+ /**
188
+ * Delta content when tool call is being streamed.
189
+ *
190
+ * As the model streams the tool call arguments, partial content
191
+ * appears here. Once complete, Arguments contains the full data.
192
+ */
193
+ Delta: string
194
+
195
+ /**
196
+ * The response/result from tool execution.
197
+ *
198
+ * After the tool completes, this contains the stringified result
199
+ * that will be sent back to the AI model.
200
+ */
201
+ Response: string
202
+
203
+ /**
204
+ * Unix timestamp in milliseconds when tool execution started.
205
+ */
206
+ StartTimestamp: number
207
+
208
+ /**
209
+ * Unix timestamp in milliseconds when tool execution finished.
210
+ *
211
+ * Zero if the tool hasn't finished yet.
212
+ */
213
+ EndTimestamp: number
214
+ }
215
+
216
+ /**
217
+ * Data returned during a streaming chat response.
218
+ *
219
+ * As the AI model generates its response, this structure is updated
220
+ * with new content. It supports regular text, reasoning content,
221
+ * and tool calling.
222
+ *
223
+ * @example
224
+ * ```typescript
225
+ * await api.LLMStream(ctx, conversations, (data: AI.ChatStreamData) => {
226
+ * switch (data.Status) {
227
+ * case "streaming":
228
+ * // Still receiving content
229
+ * process.stdout.write(data.Data)
230
+ * break
231
+ * case "streamed":
232
+ * // All content received, checking for tool calls
233
+ * console.log("\nContent complete. Tool calls:", data.ToolCalls)
234
+ * break
235
+ * case "running_tool_call":
236
+ * // Executing tools
237
+ * data.ToolCalls.forEach(tc => {
238
+ * console.log(`Running ${tc.Name}: ${tc.Status}`)
239
+ * })
240
+ * break
241
+ * case "finished":
242
+ * // Completely done
243
+ * console.log("\nFinal result:", data.Data)
244
+ * break
245
+ * case "error":
246
+ * console.error("Error:", data.Data)
247
+ * break
248
+ * }
249
+ * })
250
+ * ```
251
+ */
11
252
  export interface ChatStreamData {
12
- /** Stream status */
253
+ /**
254
+ * Current status of the stream.
255
+ *
256
+ * Indicates what phase the chat is in: streaming content,
257
+ * executing tools, completed, or errored.
258
+ */
13
259
  Status: ChatStreamDataType
14
- /** Aggregated content data */
260
+
261
+ /**
262
+ * Aggregated text content from the AI.
263
+ *
264
+ * This field accumulates content as it's streamed. For example:
265
+ * - Chunk 1: Data = "Hello"
266
+ * - Chunk 2: Data = "Hello world"
267
+ * - Chunk 3: Data = "Hello world!"
268
+ *
269
+ * Use this for the final complete response.
270
+ */
15
271
  Data: string
16
- /** Reasoning content from models that support reasoning (e.g., DeepSeek, OpenAI o1). Separate from Data for clean processing. */
272
+
273
+ /**
274
+ * Reasoning content from models that support thinking.
275
+ *
276
+ * Models like DeepSeek-R1, OpenAI o1, and Qwen separate their
277
+ * internal reasoning from the final response. This field contains
278
+ * the model's thinking process.
279
+ *
280
+ * To display reasoning nicely, you can format it with a "> " prefix:
281
+ * ```typescript
282
+ * const reasoning = data.Reasoning
283
+ * .split('\n')
284
+ * .map(line => `> ${line}`)
285
+ * .join('\n')
286
+ * ```
287
+ */
17
288
  Reasoning: string
289
+
290
+ /**
291
+ * Tool calls requested by the AI model.
292
+ *
293
+ * Populated when the model decides to use one or more tools.
294
+ * Each tool call includes its name, arguments, and execution status.
295
+ *
296
+ * @example
297
+ * ```typescript
298
+ * if (data.ToolCalls.length > 0) {
299
+ * for (const toolCall of data.ToolCalls) {
300
+ * console.log(`Tool: ${toolCall.Name}`)
301
+ * console.log(`Args: ${JSON.stringify(toolCall.Arguments)}`)
302
+ * console.log(`Status: ${toolCall.Status}`)
303
+ * }
304
+ * }
305
+ * ```
306
+ */
307
+ ToolCalls: ToolCallInfo[]
18
308
  }
19
309
 
310
+ /**
311
+ * Callback function type for receiving streaming chat data.
312
+ *
313
+ * Passed to `PublicAPI.LLMStream()` and called repeatedly as
314
+ * the AI generates its response.
315
+ *
316
+ * @param streamData - The current streaming data chunk
317
+ *
318
+ * @example
319
+ * ```typescript
320
+ * const callback: AI.ChatStreamFunc = (streamData) => {
321
+ * if (streamData.Status === "streaming") {
322
+ * // Append to display
323
+ * display.textContent = streamData.Data
324
+ * }
325
+ * }
326
+ *
327
+ * await api.LLMStream(ctx, conversations, callback)
328
+ * ```
329
+ */
20
330
  export type ChatStreamFunc = (streamData: ChatStreamData) => void
331
+
332
+ /**
333
+ * Definition of an AI model (provider and model name).
334
+ *
335
+ * Used when specifying which model to use for chat completions.
336
+ *
337
+ * @example
338
+ * ```typescript
339
+ * const model: AI.AIModel = {
340
+ * Provider: "openai",
341
+ * ProviderAlias: "my-openai-account", // optional, for multiple configs
342
+ * Name: "gpt-4"
343
+ * }
344
+ * ```
345
+ */
346
+ export interface AIModel {
347
+ /**
348
+ * The model provider name.
349
+ *
350
+ * Common values: "openai", "anthropic", "deepseek", "ollama", etc.
351
+ */
352
+ Provider: string
353
+
354
+ /**
355
+ * Optional provider alias.
356
+ *
357
+ * When you have multiple configurations of the same provider
358
+ * (e.g., two different OpenAI API keys), use this to specify
359
+ * which one to use. If omitted, uses the default configuration.
360
+ */
361
+ ProviderAlias?: string
362
+
363
+ /**
364
+ * The specific model name.
365
+ *
366
+ * Examples: "gpt-4", "claude-3-opus", "deepseek-chat", "llama3:70b"
367
+ */
368
+ Name: string
369
+ }
21
370
  }