heyi 1.0.0

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
package/LICENSE ADDED
@@ -0,0 +1,21 @@
1
+ The MIT License (MIT)
2
+
3
+ Copyright (c) Tobias Reich
4
+
5
+ Permission is hereby granted, free of charge, to any person obtaining a copy
6
+ of this software and associated documentation files (the "Software"), to deal
7
+ in the Software without restriction, including without limitation the rights
8
+ to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
9
+ copies of the Software, and to permit persons to whom the Software is
10
+ furnished to do so, subject to the following conditions:
11
+
12
+ The above copyright notice and this permission notice shall be included in
13
+ all copies or substantial portions of the Software.
14
+
15
+ THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16
+ IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17
+ FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
18
+ AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19
+ LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
20
+ OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
21
+ THE SOFTWARE.
package/README.md ADDED
@@ -0,0 +1,108 @@
1
+ # heyi
2
+
3
+ > CLI tool to execute AI prompts with flexible output formatting
4
+
5
+ Execute AI prompts directly from your terminal with support for multiple models and structured output formats using OpenRouter and the Vercel AI SDK.
6
+
7
+ ## Install
8
+
9
+ ```sh
10
+ npm install heyi -g
11
+ ```
12
+
13
+ ## Usage
14
+
15
+ ### CLI
16
+
17
+ ```sh
18
+ heyi [prompt] [options]
19
+ ```
20
+
21
+ #### Options
22
+
23
+ - `-m, --model <model>` - AI model to use (default: `openai/gpt-4o-mini`)
24
+ - `-f, --format <format>` - Output format: `string`, `number`, `object`, `array` (default: `string`)
25
+ - `-s, --schema <schema>` - Zod schema for object/array format (required when format is `object` or `array`)
26
+ - `--file <path>` - Read content from file and include as context
27
+ - `-h, --help` - Display help information
28
+ - `-V, --version` - Display version number
29
+
30
+ #### Environment Variables
31
+
32
+ - `API_KEY` - OpenRouter API key (required, can be set via environment or `.env` file)
33
+ - `MODEL` - Default AI model to use (optional, can be overridden with `--model` flag)
34
+
35
+ ### Examples
36
+
37
+ ```sh
38
+ # Simple text prompt
39
+ heyi "What is the capital of France?"
40
+
41
+ # Use a different model
42
+ heyi "Explain quantum computing" --model google/gemini-2.0-flash-exp
43
+
44
+ # Get structured output as array of strings
45
+ heyi "List 5 programming languages" --format array --schema "z.string()"
46
+
47
+ # Get structured output as array of objects
48
+ heyi "List 3 countries with their capitals" --format array --schema "z.object({name:z.string(),capital:z.string()})"
49
+
50
+ # Get structured output as single object
51
+ heyi "Analyze: revenue 100k, costs 60k" --format object --schema "z.object({revenue:z.number(),costs:z.number()})"
52
+
53
+ # Complex nested schema
54
+ heyi "Analyze top 3 tech companies" --format array --schema "z.object({name:z.string(),founded:z.number(),products:z.array(z.string())})"
55
+
56
+ # Set default model via environment variable
57
+ MODEL=perplexity/sonar heyi "Explain AI"
58
+
59
+ # Set API key via environment variable
60
+ API_KEY=your-key heyi "Hello, AI!"
61
+
62
+ # Input from file as context
63
+ heyi "Summarize this content" --file input.txt
64
+
65
+ # Input from stdin
66
+ cat article.md | heyi "Extract all URLs mentioned"
67
+ echo "Analyze this text" | heyi
68
+ ```
69
+
70
+ ## Output Formats
71
+
72
+ - **string** (default): Plain text response from the AI model
73
+ - **number**: Numeric response from the AI model
74
+ - **object**: Single JSON object with structured data (requires `--schema` flag)
75
+ - **array**: JSON array with structured data (requires `--schema` flag)
76
+
77
+ The tool uses Zod schemas to ensure the AI model returns data in the requested format. When using `object` or `array` formats, you must provide a Zod schema string via the `--schema` flag.
78
+
79
+ ### Schema Examples
80
+
81
+ - String array: `--format array --schema "z.string()"`
82
+ - URL array: `--format array --schema "z.url()"` (not supported by all models)
83
+ - Object array: `--format array --schema "z.object({name:z.string(),age:z.number()})"`
84
+ - Single object: `--format object --schema "z.object({total:z.number(),items:z.array(z.string())})"`
85
+
86
+ ## Development
87
+
88
+ ```sh
89
+ # Install dependencies
90
+ npm install
91
+
92
+ # Run tests
93
+ npm test
94
+
95
+ # Lint and format code
96
+ npm run format
97
+
98
+ # Run the CLI in development
99
+ npm start -- "Your prompt here"
100
+
101
+ # Or run directly
102
+ ./bin/index.js "Your prompt here"
103
+ ```
104
+
105
+ ## Related
106
+
107
+ - [Vercel AI SDK](https://sdk.vercel.ai/) - Toolkit for building AI applications
108
+ - [OpenRouter](https://openrouter.ai/) - Unified API for LLMs
package/bin/index.js ADDED
@@ -0,0 +1,88 @@
1
+ #!/usr/bin/env node
2
+
3
+ import { Command } from 'commander'
4
+ import pkg from '../package.json' with { type: 'json' }
5
+ import { executePrompt } from '../src/index.js'
6
+ import { hasStdinData, readFileContent, readStdin } from '../src/utils/input.js'
7
+
8
+ const DEFAULT_MODEL = 'openai/gpt-4o-mini'
9
+
10
+ const program = new Command()
11
+
12
+ const helpText = `
13
+ Examples:
14
+ $ heyi "What is the capital of France?"
15
+ $ heyi "What is quantum computing?" --model google/gemini-2.5-pro
16
+
17
+ # Different output formats
18
+ $ heyi "List 5 programming languages" --format array --schema "z.string()"
19
+ $ heyi "Analyze this data" --format object --schema "z.object({revenue:z.number(),costs:z.number()})"
20
+ $ heyi "List 3 countries" --format array --schema "z.object({name:z.string(),capital:z.string()})"
21
+
22
+ # Environment variables
23
+ $ MODEL=perplexity/sonar heyi "Explain AI"
24
+ $ API_KEY=your-key heyi "Hello, AI!"
25
+
26
+ # Input from stdin or file
27
+ $ heyi "Summarize this content" --file input.txt
28
+ $ cat prompt.txt | heyi
29
+ `
30
+
31
+ const action = async (prompt, options) => {
32
+ try {
33
+ // Validate that schema is provided for object/array formats
34
+ if ((options.format === 'object' || options.format === 'array') && !options.schema) {
35
+ throw new Error(`--schema or -s is required when format is '${options.format}'`)
36
+ }
37
+
38
+ // Handle file content as context
39
+ let fileContent = null
40
+ if (options.file) {
41
+ fileContent = await readFileContent(options.file)
42
+ }
43
+
44
+ // Handle stdin input
45
+ let stdinContent = null
46
+ if (hasStdinData()) {
47
+ stdinContent = await readStdin()
48
+ }
49
+
50
+ // Validate that we have a prompt
51
+ if (!prompt && !stdinContent) {
52
+ throw new Error('A prompt is required. Provide it as an argument or via stdin.')
53
+ }
54
+
55
+ // Build the final prompt
56
+ let finalPrompt = prompt ?? stdinContent
57
+ if (fileContent) {
58
+ finalPrompt = `${finalPrompt}\n\nContext from file:\n${fileContent}`
59
+ }
60
+
61
+ const result = await executePrompt(finalPrompt, {
62
+ model: options.model,
63
+ format: options.format,
64
+ schema: options.schema,
65
+ })
66
+
67
+ console.log(result)
68
+ } catch (error) {
69
+ const relevantFields = Object.keys(error).filter((key) => ['stack', 'isRetryable', 'data'].includes(key) === false)
70
+ const relevantError = Object.fromEntries(relevantFields.map((key) => [key, error[key]]))
71
+ console.error(relevantError)
72
+
73
+ process.exit(1)
74
+ }
75
+ }
76
+
77
+ program
78
+ .name(pkg.name)
79
+ .description(pkg.description)
80
+ .version(pkg.version)
81
+ .argument('[prompt]', 'The AI prompt to execute (optional when using stdin)')
82
+ .option('-m, --model <model>', 'AI model to use', process.env.MODEL ?? DEFAULT_MODEL)
83
+ .option('-f, --format <format>', 'Output format: string, number, object, array', 'string')
84
+ .option('-s, --schema <schema>', 'Zod schema for object/array format (required when format is object or array)')
85
+ .option('--file <path>', 'Read content from file and include as context')
86
+ .addHelpText('after', helpText)
87
+ .action(action)
88
+ .parse()
package/package.json ADDED
@@ -0,0 +1,46 @@
1
+ {
2
+ "name": "heyi",
3
+ "version": "1.0.0",
4
+ "description": "CLI tool to execute AI prompts with flexible output formatting",
5
+ "keywords": [
6
+ "ai",
7
+ "cli",
8
+ "openrouter",
9
+ "llm",
10
+ "prompt"
11
+ ],
12
+ "license": "MIT",
13
+ "type": "module",
14
+ "bin": "./bin/index.js",
15
+ "files": [
16
+ "bin",
17
+ "src"
18
+ ],
19
+ "scripts": {
20
+ "eslint": "eslint \"**/*.js\"",
21
+ "format": "npm run eslint -- --fix && npm run prettier -- --write",
22
+ "lint": "npm run eslint && npm run prettier -- --check",
23
+ "prettier": "prettier --ignore-path .gitignore \"**/*.{js,json,md,yml}\"",
24
+ "start": "./bin/index.js",
25
+ "test": "npm run lint"
26
+ },
27
+ "dependencies": {
28
+ "@openrouter/ai-sdk-provider": "^1.5.3",
29
+ "ai": "^5.0.113",
30
+ "commander": "^14.0.2",
31
+ "dotenv": "^14.3.2",
32
+ "zod": "^4.2.0"
33
+ },
34
+ "devDependencies": {
35
+ "@electerious/eslint-config": "^5.2.1",
36
+ "@electerious/prettier-config": "^4.0.0",
37
+ "eslint": "^9.39.2",
38
+ "prettier": "^3.7.4"
39
+ },
40
+ "engines": {
41
+ "node": ">=22"
42
+ },
43
+ "authors": [
44
+ "Tobias Reich <tobias@electerious.com>"
45
+ ]
46
+ }
package/src/index.js ADDED
@@ -0,0 +1,51 @@
1
+ import { createOpenRouter } from '@openrouter/ai-sdk-provider'
2
+ import { generateObject } from 'ai'
3
+ import { config } from 'dotenv'
4
+ import { getFormatSchema } from './utils/schema.js'
5
+
6
+ // Load environment variables from .env file
7
+ config()
8
+
9
+ /**
10
+ * Execute an AI prompt with the specified model and format.
11
+ *
12
+ * @param {string} prompt - The user's prompt
13
+ * @param {object} options - Configuration options
14
+ * @param {string} options.model - The AI model to use
15
+ * @param {string} options.format - The output format (string, number, object, array)
16
+ * @param {string} options.schema - The Zod schema string for object/array format
17
+ * @returns {Promise<string>} The formatted AI response
18
+ */
19
+ export const executePrompt = async (prompt, options = {}) => {
20
+ const { model, format = 'string', schema } = options
21
+
22
+ const apiKey = process.env.API_KEY
23
+ if (!apiKey) {
24
+ throw new Error('API_KEY environment variable is required. Set it via environment or .env file.')
25
+ }
26
+
27
+ const openrouter = createOpenRouter({
28
+ apiKey,
29
+ })
30
+
31
+ const zodSchema = getFormatSchema(format, schema)
32
+ const { object } = await generateObject({
33
+ model: openrouter(model),
34
+ prompt,
35
+ schema: zodSchema,
36
+ })
37
+
38
+ switch (format) {
39
+ case 'string':
40
+ case 'number': {
41
+ return object.result
42
+ }
43
+ case 'object':
44
+ case 'array': {
45
+ return JSON.stringify(object.result, null, 2)
46
+ }
47
+ default: {
48
+ throw new Error(`Can't format response for unknown format '${format}'`)
49
+ }
50
+ }
51
+ }
@@ -0,0 +1,56 @@
1
+ import { readFile } from 'node:fs/promises'
2
+ import { createInterface } from 'node:readline'
3
+
4
+ /**
5
+ * Read content from a file.
6
+ *
7
+ * @param {string} filePath - Path to the file to read
8
+ * @returns {Promise<string>} The file content
9
+ */
10
+ export const readFileContent = async (filePath) => {
11
+ try {
12
+ return await readFile(filePath, 'utf8')
13
+ } catch (error) {
14
+ throw new Error(`Failed to read file '${filePath}'`, { cause: error })
15
+ }
16
+ }
17
+
18
+ /**
19
+ * Read content from stdin.
20
+ *
21
+ * @returns {Promise<string>} The stdin content
22
+ */
23
+ export const readStdin = () => {
24
+ const { promise, resolve, reject } = Promise.withResolvers()
25
+
26
+ let data = ''
27
+
28
+ const rl = createInterface({
29
+ input: process.stdin,
30
+ output: process.stdout,
31
+ terminal: false,
32
+ })
33
+
34
+ rl.on('line', (line) => {
35
+ data += line + '\n'
36
+ })
37
+
38
+ rl.on('close', () => {
39
+ resolve(data.trim())
40
+ })
41
+
42
+ rl.on('error', (error) => {
43
+ reject(new Error(`Failed to read stdin`, { cause: error }))
44
+ })
45
+
46
+ return promise
47
+ }
48
+
49
+ /**
50
+ * Check if stdin has data available.
51
+ *
52
+ * @returns {boolean} True if stdin has data
53
+ */
54
+ export const hasStdinData = () => {
55
+ return !process.stdin.isTTY
56
+ }
@@ -0,0 +1,44 @@
1
+ import { z } from 'zod'
2
+
3
+ /**
4
+ * Get the appropriate Zod schema for the requested format.
5
+ *
6
+ * @param {string} format - The output format
7
+ * @param {string} schemaString - The Zod schema string for object/array format
8
+ * @returns {z.ZodType} The Zod schema for the format
9
+ */
10
+ export const getFormatSchema = (format, schemaString) => {
11
+ switch (format) {
12
+ case 'string': {
13
+ return z.object({
14
+ result: z.string(),
15
+ })
16
+ }
17
+ case 'number': {
18
+ return z.object({
19
+ result: z.number(),
20
+ })
21
+ }
22
+ case 'object': {
23
+ // Parse the schema string (e.g., "z.object({name:z.string()})")
24
+ // We need to evaluate it in the context of zod
25
+ // eslint-disable-next-line no-eval
26
+ const parsedSchema = eval(schemaString)
27
+ return z.object({
28
+ result: parsedSchema,
29
+ })
30
+ }
31
+ case 'array': {
32
+ // Parse the schema string (e.g., "z.string()" or "z.object({name:z.string()})")
33
+ // We need to evaluate it in the context of zod
34
+ // eslint-disable-next-line no-eval
35
+ const parsedSchema = eval(schemaString)
36
+ return z.object({
37
+ result: z.array(parsedSchema),
38
+ })
39
+ }
40
+ default: {
41
+ throw new Error(`Can't create schema for unknown format '${format}'`)
42
+ }
43
+ }
44
+ }