n8n-nodes-githubmodels 0.0.2

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
package/README.md ADDED
@@ -0,0 +1,58 @@
1
+ # n8n-nodes-githubmodels
2
+
3
+ This package provides an n8n community Language Model node for GitHub Models.
4
+
5
+ It allows you to use GitHub-hosted LLMs (for example, OpenAI and other partner models exposed through GitHub Models) as the model provider in n8n AI workflows.
6
+
7
+ [n8n](https://n8n.io/) is a [fair-code licensed](https://docs.n8n.io/sustainable-use-license/) workflow automation platform.
8
+
9
+ [Installation](#installation)
10
+ [Operations](#operations)
11
+ [Credentials](#credentials)
12
+ [Compatibility](#compatibility)
13
+ [Usage](#usage)
14
+ [Resources](#resources)
15
+ [Version history](#version-history)
16
+
17
+ ## Installation
18
+
19
+ Follow the [installation guide](https://docs.n8n.io/integrations/community-nodes/installation/) in the n8n community nodes documentation.
20
+
21
+ ## Operations
22
+
23
+ This package includes one AI Language Model node:
24
+
25
+ - **GitHub Models Chat Model**: supplies a chat-capable language model to n8n AI nodes (for example, AI Agent, Basic LLM Chain, and related nodes that accept a Language Model input).
26
+
27
+ ## Credentials
28
+
29
+ Create a GitHub personal access token and configure the **GitHub Models API** credential in n8n:
30
+
31
+ - **API Key**: your GitHub token.
32
+ - **Base URL**: defaults to `https://models.github.ai/inference`.
33
+
34
+ Token setup docs: [GitHub Models documentation](https://docs.github.com/en/github-models).
35
+
36
+ ## Compatibility
37
+
38
+ Designed for modern n8n 1.x releases with AI node support.
39
+
40
+ ## Usage
41
+
42
+ 1. Add **GitHub Models Chat Model** to your workflow.
43
+ 2. Select the model name (for example `openai/gpt-4.1-mini`).
44
+ 3. Optionally set sampling temperature and max tokens.
45
+ 4. Connect the node output to a node that accepts a **Language Model** input.
46
+
47
+ Browse available models: [GitHub Models Marketplace](https://github.com/marketplace/models).
48
+
49
+ ## Resources
50
+
51
+ * [n8n community nodes documentation](https://docs.n8n.io/integrations/#community-nodes)
52
+ * [n8n AI nodes documentation](https://docs.n8n.io/integrations/builtin/cluster-nodes/root-nodes/n8n-nodes-langchain.agent/)
53
+ * [GitHub Models documentation](https://docs.github.com/en/github-models)
54
+ * [GitHub Models marketplace](https://github.com/marketplace/models)
55
+
56
+ ## Version history
57
+
58
+ - **0.0.1**: Initial release with GitHub Models Language Model support for n8n AI workflows.
@@ -0,0 +1,10 @@
1
+ import type { ICredentialDataDecryptedObject, ICredentialTestRequest, ICredentialType, IHttpRequestOptions, INodeProperties, Icon } from 'n8n-workflow';
2
+ export declare class GitHubModelsApi implements ICredentialType {
3
+ name: string;
4
+ displayName: string;
5
+ documentationUrl: string;
6
+ icon: Icon;
7
+ properties: INodeProperties[];
8
+ test: ICredentialTestRequest;
9
+ authenticate(credentials: ICredentialDataDecryptedObject, requestOptions: IHttpRequestOptions): Promise<IHttpRequestOptions>;
10
+ }
@@ -0,0 +1,53 @@
1
+ "use strict";
2
+ Object.defineProperty(exports, "__esModule", { value: true });
3
+ exports.GitHubModelsApi = void 0;
4
+ class GitHubModelsApi {
5
+ constructor() {
6
+ this.name = 'gitHubModelsApi';
7
+ this.displayName = 'GitHub Models API';
8
+ this.documentationUrl = 'https://docs.github.com/en/github-models';
9
+ this.icon = {
10
+ light: 'file:../icons/githubmodels.svg',
11
+ dark: 'file:../icons/githubmodels.dark.svg',
12
+ };
13
+ this.properties = [
14
+ {
15
+ displayName: 'API Key',
16
+ name: 'apiKey',
17
+ type: 'string',
18
+ typeOptions: { password: true },
19
+ required: true,
20
+ default: '',
21
+ description: 'Your GitHub personal access token. Generate one at https://github.com/settings/tokens',
22
+ },
23
+ {
24
+ displayName: 'Base URL',
25
+ name: 'url',
26
+ type: 'string',
27
+ default: 'https://models.github.ai/inference',
28
+ description: 'Base URL for the GitHub Models inference API',
29
+ },
30
+ ];
31
+ this.test = {
32
+ request: {
33
+ baseURL: '={{$credentials?.url}}',
34
+ url: '/chat/completions',
35
+ method: 'POST',
36
+ body: {
37
+ model: 'openai/gpt-4.1-mini',
38
+ messages: [{ role: 'user', content: 'Hi' }],
39
+ max_tokens: 1,
40
+ },
41
+ json: true,
42
+ },
43
+ };
44
+ }
45
+ async authenticate(credentials, requestOptions) {
46
+ var _a;
47
+ (_a = requestOptions.headers) !== null && _a !== void 0 ? _a : (requestOptions.headers = {});
48
+ requestOptions.headers['Authorization'] = `Bearer ${credentials.apiKey}`;
49
+ return requestOptions;
50
+ }
51
+ }
52
+ exports.GitHubModelsApi = GitHubModelsApi;
53
+ //# sourceMappingURL=GitHubModelsApi.credentials.js.map
@@ -0,0 +1 @@
1
+ {"version":3,"file":"GitHubModelsApi.credentials.js","sourceRoot":"","sources":["../../credentials/GitHubModelsApi.credentials.ts"],"names":[],"mappings":";;;AASA,MAAa,eAAe;IAA5B;QACC,SAAI,GAAG,iBAAiB,CAAC;QAEzB,gBAAW,GAAG,mBAAmB,CAAC;QAElC,qBAAgB,GAAG,0CAA0C,CAAC;QAE9D,SAAI,GAAS;YACZ,KAAK,EAAE,gCAAgC;YACvC,IAAI,EAAE,qCAAqC;SAC3C,CAAC;QAEF,eAAU,GAAsB;YAC/B;gBACC,WAAW,EAAE,SAAS;gBACtB,IAAI,EAAE,QAAQ;gBACd,IAAI,EAAE,QAAQ;gBACd,WAAW,EAAE,EAAE,QAAQ,EAAE,IAAI,EAAE;gBAC/B,QAAQ,EAAE,IAAI;gBACd,OAAO,EAAE,EAAE;gBACX,WAAW,EACV,uFAAuF;aACxF;YACD;gBACC,WAAW,EAAE,UAAU;gBACvB,IAAI,EAAE,KAAK;gBACX,IAAI,EAAE,QAAQ;gBACd,OAAO,EAAE,oCAAoC;gBAC7C,WAAW,EAAE,8CAA8C;aAC3D;SACD,CAAC;QAEF,SAAI,GAA2B;YAC9B,OAAO,EAAE;gBACR,OAAO,EAAE,wBAAwB;gBACjC,GAAG,EAAE,mBAAmB;gBACxB,MAAM,EAAE,MAAM;gBACd,IAAI,EAAE;oBACL,KAAK,EAAE,qBAAqB;oBAC5B,QAAQ,EAAE,CAAC,EAAE,IAAI,EAAE,MAAM,EAAE,OAAO,EAAE,IAAI,EAAE,CAAC;oBAC3C,UAAU,EAAE,CAAC;iBACb;gBACD,IAAI,EAAE,IAAI;aACV;SACD,CAAC;IAUH,CAAC;IARA,KAAK,CAAC,YAAY,CACjB,WAA2C,EAC3C,cAAmC;;QAEnC,MAAA,cAAc,CAAC,OAAO,oCAAtB,cAAc,CAAC,OAAO,GAAK,EAAE,EAAC;QAC9B,cAAc,CAAC,OAAO,CAAC,eAAe,CAAC,GAAG,UAAU,WAAW,CAAC,MAAM,EAAE,CAAC;QACzE,OAAO,cAAc,CAAC;IACvB,CAAC;CACD;AAtDD,0CAsDC"}
@@ -0,0 +1,3 @@
1
+ <svg xmlns="http://www.w3.org/2000/svg" viewBox="0 0 24 24" fill="#f0f6fc">
2
+ <path d="M12 0C5.37 0 0 5.37 0 12c0 5.31 3.435 9.795 8.205 11.385.6.105.825-.255.825-.57 0-.285-.015-1.23-.015-2.235-3.015.555-3.795-.735-4.035-1.41-.135-.345-.72-1.41-1.23-1.695-.42-.225-1.02-.78-.015-.795.945-.015 1.62.87 1.845 1.23 1.08 1.815 2.805 1.305 3.495.99.105-.78.42-1.305.765-1.605-2.67-.3-5.46-1.335-5.46-5.925 0-1.305.465-2.385 1.23-3.225-.12-.3-.54-1.53.12-3.18 0 0 1.005-.315 3.3 1.23.96-.27 1.98-.405 3-.405s2.04.135 3 .405c2.295-1.56 3.3-1.23 3.3-1.23.66 1.65.24 2.88.12 3.18.765.84 1.23 1.905 1.23 3.225 0 4.605-2.805 5.625-5.475 5.925.435.375.81 1.095.81 2.22 0 1.605-.015 2.895-.015 3.3 0 .315.225.69.825.57A12.02 12.02 0 0 0 24 12c0-6.63-5.37-12-12-12z"/>
3
+ </svg>
@@ -0,0 +1,3 @@
1
+ <svg xmlns="http://www.w3.org/2000/svg" viewBox="0 0 24 24" fill="#24292f">
2
+ <path d="M12 0C5.37 0 0 5.37 0 12c0 5.31 3.435 9.795 8.205 11.385.6.105.825-.255.825-.57 0-.285-.015-1.23-.015-2.235-3.015.555-3.795-.735-4.035-1.41-.135-.345-.72-1.41-1.23-1.695-.42-.225-1.02-.78-.015-.795.945-.015 1.62.87 1.845 1.23 1.08 1.815 2.805 1.305 3.495.99.105-.78.42-1.305.765-1.605-2.67-.3-5.46-1.335-5.46-5.925 0-1.305.465-2.385 1.23-3.225-.12-.3-.54-1.53.12-3.18 0 0 1.005-.315 3.3 1.23.96-.27 1.98-.405 3-.405s2.04.135 3 .405c2.295-1.56 3.3-1.23 3.3-1.23.66 1.65.24 2.88.12 3.18.765.84 1.23 1.905 1.23 3.225 0 4.605-2.805 5.625-5.475 5.925.435.375.81 1.095.81 2.22 0 1.605-.015 2.895-.015 3.3 0 .315.225.69.825.57A12.02 12.02 0 0 0 24 12c0-6.63-5.37-12-12-12z"/>
3
+ </svg>
@@ -0,0 +1,5 @@
1
+ import type { INodeType, INodeTypeDescription, ISupplyDataFunctions } from 'n8n-workflow';
2
+ export declare class GitHubChatModelNode implements INodeType {
3
+ description: INodeTypeDescription;
4
+ supplyData(this: ISupplyDataFunctions, itemIndex: number): Promise<import("n8n-workflow").SupplyData>;
5
+ }
@@ -0,0 +1,115 @@
1
+ "use strict";
2
+ Object.defineProperty(exports, "__esModule", { value: true });
3
+ exports.GitHubChatModelNode = void 0;
4
+ const n8n_workflow_1 = require("n8n-workflow");
5
+ const ai_node_sdk_1 = require("@n8n/ai-node-sdk");
6
+ const model_1 = require("./model");
7
+ class GitHubChatModelNode {
8
+ constructor() {
9
+ this.description = {
10
+ displayName: 'GitHub Models Chat Model',
11
+ name: 'gitHubChatModel',
12
+ icon: {
13
+ light: 'file:../../icons/githubmodels.svg',
14
+ dark: 'file:../../icons/githubmodels.dark.svg',
15
+ },
16
+ group: ['transform'],
17
+ version: [1],
18
+ description: 'Use GitHub Models as an LLM provider for AI chains and agents',
19
+ defaults: {
20
+ name: 'GitHub Models Chat Model',
21
+ },
22
+ codex: {
23
+ categories: ['AI'],
24
+ subcategories: {
25
+ AI: ['Language Models', 'Root Nodes'],
26
+ 'Language Models': ['Chat Models (Recommended)'],
27
+ },
28
+ resources: {
29
+ primaryDocumentation: [
30
+ {
31
+ url: 'https://docs.github.com/en/github-models',
32
+ },
33
+ ],
34
+ },
35
+ },
36
+ inputs: [],
37
+ outputs: [n8n_workflow_1.NodeConnectionTypes.AiLanguageModel],
38
+ outputNames: ['Model'],
39
+ credentials: [
40
+ {
41
+ name: 'gitHubModelsApi',
42
+ required: true,
43
+ },
44
+ ],
45
+ properties: [
46
+ {
47
+ displayName: 'Model',
48
+ name: 'model',
49
+ type: 'string',
50
+ default: 'openai/gpt-4.1-mini',
51
+ description: 'The model to use for generating completions. See the <a href="https://github.com/marketplace/models" target="_blank">GitHub Models Marketplace</a> for available models.',
52
+ placeholder: 'e.g. openai/gpt-4.1, meta/Llama-3.3-70B-Instruct',
53
+ },
54
+ {
55
+ displayName: 'Options',
56
+ name: 'options',
57
+ placeholder: 'Add Option',
58
+ description: 'Additional options to configure the model behaviour',
59
+ type: 'collection',
60
+ default: {},
61
+ options: [
62
+ {
63
+ displayName: 'Sampling Temperature',
64
+ name: 'temperature',
65
+ type: 'number',
66
+ default: 0.7,
67
+ typeOptions: { maxValue: 2, minValue: 0, numberPrecision: 2 },
68
+ description: 'Controls randomness: lower values produce more focused and deterministic outputs, higher values produce more random outputs. Range: 0–2.',
69
+ },
70
+ {
71
+ displayName: 'Max Tokens',
72
+ name: 'maxTokens',
73
+ type: 'number',
74
+ default: -1,
75
+ typeOptions: { minValue: -1 },
76
+ description: 'Maximum number of tokens to generate in the response. Set to -1 to use the model default.',
77
+ },
78
+ ],
79
+ },
80
+ ],
81
+ };
82
+ }
83
+ async supplyData(itemIndex) {
84
+ const credentials = await this.getCredentials('gitHubModelsApi');
85
+ const modelName = this.getNodeParameter('model', itemIndex);
86
+ const options = this.getNodeParameter('options', itemIndex, {});
87
+ const apiKey = credentials.apiKey;
88
+ const url = credentials.url || 'https://models.github.ai/inference';
89
+ const model = new model_1.GitHubChatModel(modelName, {
90
+ httpRequest: async (method, reqUrl, body, headers) => {
91
+ const response = await this.helpers.httpRequest({
92
+ method,
93
+ url: reqUrl,
94
+ body,
95
+ headers: {
96
+ 'Content-Type': 'application/json',
97
+ ...headers,
98
+ },
99
+ json: true,
100
+ });
101
+ return { body: response };
102
+ },
103
+ }, {
104
+ url,
105
+ apiKey,
106
+ temperature: options.temperature,
107
+ maxTokens: options.maxTokens !== undefined && options.maxTokens >= 0
108
+ ? options.maxTokens
109
+ : undefined,
110
+ });
111
+ return (0, ai_node_sdk_1.supplyModel)(this, model);
112
+ }
113
+ }
114
+ exports.GitHubChatModelNode = GitHubChatModelNode;
115
+ //# sourceMappingURL=GitHubChatModel.node.js.map
@@ -0,0 +1 @@
1
+ {"version":3,"file":"GitHubChatModel.node.js","sourceRoot":"","sources":["../../../nodes/GitHubChatModel/GitHubChatModel.node.ts"],"names":[],"mappings":";;;AACA,+CAAmD;AACnD,kDAA+C;AAC/C,mCAA0C;AAO1C,MAAa,mBAAmB;IAAhC;QACC,gBAAW,GAAyB;YACnC,WAAW,EAAE,0BAA0B;YACvC,IAAI,EAAE,iBAAiB;YACvB,IAAI,EAAE;gBACL,KAAK,EAAE,mCAAmC;gBAC1C,IAAI,EAAE,wCAAwC;aAC9C;YACD,KAAK,EAAE,CAAC,WAAW,CAAC;YACpB,OAAO,EAAE,CAAC,CAAC,CAAC;YACZ,WAAW,EAAE,+DAA+D;YAC5E,QAAQ,EAAE;gBACT,IAAI,EAAE,0BAA0B;aAChC;YACD,KAAK,EAAE;gBACN,UAAU,EAAE,CAAC,IAAI,CAAC;gBAClB,aAAa,EAAE;oBACd,EAAE,EAAE,CAAC,iBAAiB,EAAE,YAAY,CAAC;oBACrC,iBAAiB,EAAE,CAAC,2BAA2B,CAAC;iBAChD;gBACD,SAAS,EAAE;oBACV,oBAAoB,EAAE;wBACrB;4BACC,GAAG,EAAE,0CAA0C;yBAC/C;qBACD;iBACD;aACD;YACD,MAAM,EAAE,EAAE;YACV,OAAO,EAAE,CAAC,kCAAmB,CAAC,eAAe,CAAC;YAC9C,WAAW,EAAE,CAAC,OAAO,CAAC;YACtB,WAAW,EAAE;gBACZ;oBACC,IAAI,EAAE,iBAAiB;oBACvB,QAAQ,EAAE,IAAI;iBACd;aACD;YACD,UAAU,EAAE;gBACX;oBACC,WAAW,EAAE,OAAO;oBACpB,IAAI,EAAE,OAAO;oBACb,IAAI,EAAE,QAAQ;oBACd,OAAO,EAAE,qBAAqB;oBAC9B,WAAW,EACV,0KAA0K;oBAC3K,WAAW,EAAE,kDAAkD;iBAC/D;gBACD;oBACC,WAAW,EAAE,SAAS;oBACtB,IAAI,EAAE,SAAS;oBACf,WAAW,EAAE,YAAY;oBACzB,WAAW,EAAE,qDAAqD;oBAClE,IAAI,EAAE,YAAY;oBAClB,OAAO,EAAE,EAAE;oBACX,OAAO,EAAE;wBACR;4BACC,WAAW,EAAE,sBAAsB;4BACnC,IAAI,EAAE,aAAa;4BACnB,IAAI,EAAE,QAAQ;4BACd,OAAO,EAAE,GAAG;4BACZ,WAAW,EAAE,EAAE,QAAQ,EAAE,CAAC,EAAE,QAAQ,EAAE,CAAC,EAAE,eAAe,EAAE,CAAC,EAAE;4BAC7D,WAAW,EACV,0IAA0I;yBAC3I;wBACD;4BACC,WAAW,EAAE,YAAY;4BACzB,IAAI,EAAE,WAAW;4BACjB,IAAI,EAAE,QAAQ;4BACd,OAAO,EAAE,CAAC,CAAC;4BACX,WAAW,EAAE,EAAE,QAAQ,EAAE,CAAC,CAAC,EAAE;4BAC7B,WAAW,EACV,2FAA2F;yBAC5F;qBACD;iBACD;aACD;SACD,CAAC;IAuCH,CAAC;IArCA,KAAK,CAAC,UAAU,CAA6B,SAAiB;QAC7D,MAAM,WAAW,GAAG,MAAM,IAAI,CAAC,cAAc,CAAC,iBAAiB,CAAC,CAAC;QACjE,MAAM,SAAS,GAAG,IAAI,CAAC,gBAAgB,CAAC,OAAO,EAAE,SAAS,CAAW,CAAC;QACtE,MAAM,OAAO,GAAG,IAAI,CAAC,gBAAgB,CAAC,SAAS,EAAE,SAAS,EAAE,EAAE,CAAiB,CAAC;QAEhF,MAAM,MAAM,GAAG,WAAW,CAAC,MAAgB,CAAC;QAC5C,MAAM,GAAG,GAAI,WAAW,CAAC,GAAc,IAAI,oCAAoC,CAAC;QAEhF,MAAM,KAAK,GAAG,IAAI,uBAAe,CAChC,SAAS,EACT;YACC,WAAW,EAAE,KAAK,EAAE,MAAM,EAAE,MAAM,EAAE,IAAI,EAAE,OAAO,EAAE,EAAE;gBACpD,MAAM,QAAQ,GAAG,MAAM,IAAI,CAAC,OAAO,CAAC,WAAW,CAAC;oBAC/C,MAAM;oBACN,GAAG,EAAE,MAAM;oBACX,IAAI;oBACJ,OAAO,EAAE;wBACR,cAAc,EAAE,kBAAkB;wBAClC,GAAG,OAAO;qBACV;oBACD,IAAI,EAAE,IAAI;iBACV,CAAC,CAAC;gBACH,OAAO,EAAE,IAAI,EAAE,QAAQ,EAAE,CAAC;YAC3B,CAAC;SACD,EACD;YACC,GAAG;YACH,MAAM;YACN,WAAW,EAAE,OAAO,CAAC,WAAW;YAChC,SAAS,EAAE,OAAO,CAAC,SAAS,KAAK,SAAS,IAAI,OAAO,CAAC,SAAS,IAAI,CAAC;gBACnE,CAAC,CAAC,OAAO,CAAC,SAAS;gBACnB,CAAC,CAAC,SAAS;SACZ,CACD,CAAC;QAEF,OAAO,IAAA,yBAAW,EAAC,IAAI,EAAE,KAAK,CAAC,CAAC;IACjC,CAAC;CACD;AAnHD,kDAmHC"}
@@ -0,0 +1,18 @@
1
+ {
2
+ "node": "n8n-nodes-githubmodels.gitHubChatModel",
3
+ "nodeVersion": "1.0",
4
+ "codexVersion": "1.0",
5
+ "categories": ["AI", "Developer Tools"],
6
+ "resources": {
7
+ "credentialDocumentation": [
8
+ {
9
+ "url": "https://docs.github.com/en/github-models"
10
+ }
11
+ ],
12
+ "primaryDocumentation": [
13
+ {
14
+ "url": "https://docs.github.com/en/github-models"
15
+ }
16
+ ]
17
+ }
18
+ }
@@ -0,0 +1,19 @@
1
+ import { BaseChatModel, type ChatModelConfig, type GenerateResult, type Message, type StreamChunk } from '@n8n/ai-node-sdk';
2
+ export interface GitHubModelsConfig extends ChatModelConfig {
3
+ url: string;
4
+ apiKey: string;
5
+ }
6
+ interface RequestConfig {
7
+ httpRequest: (method: 'POST', url: string, body: object, headers: Record<string, string>) => Promise<{
8
+ body: unknown;
9
+ }>;
10
+ }
11
+ export declare class GitHubChatModel extends BaseChatModel<GitHubModelsConfig> {
12
+ private requests;
13
+ private baseURL;
14
+ private apiKey;
15
+ constructor(modelId: string, requests: RequestConfig, config: GitHubModelsConfig);
16
+ generate(messages: Message[], config?: GitHubModelsConfig): Promise<GenerateResult>;
17
+ stream(messages: Message[], config?: GitHubModelsConfig): AsyncIterable<StreamChunk>;
18
+ }
19
+ export {};
@@ -0,0 +1,147 @@
1
+ "use strict";
2
+ Object.defineProperty(exports, "__esModule", { value: true });
3
+ exports.GitHubChatModel = void 0;
4
+ const ai_node_sdk_1 = require("@n8n/ai-node-sdk");
5
+ function contentToOpenAI(content) {
6
+ var _a;
7
+ if (content.type === 'text') {
8
+ return { type: 'text', text: content.text };
9
+ }
10
+ if (content.type === 'file') {
11
+ const data = content.data;
12
+ if (typeof data === 'string') {
13
+ const mimeType = (_a = content.mediaType) !== null && _a !== void 0 ? _a : 'image/jpeg';
14
+ return {
15
+ type: 'image_url',
16
+ image_url: { url: `data:${mimeType};base64,${data}` },
17
+ };
18
+ }
19
+ }
20
+ return null;
21
+ }
22
+ function messagesToOpenAI(messages) {
23
+ return messages.map((m) => {
24
+ const parts = m.content
25
+ .map((c) => contentToOpenAI(c))
26
+ .filter((p) => p !== null);
27
+ const hasNonText = parts.some((p) => p.type !== 'text');
28
+ const content = hasNonText
29
+ ? parts
30
+ : parts.map((p) => p.text).join('\n');
31
+ return { role: m.role, content };
32
+ });
33
+ }
34
+ class GitHubChatModel extends ai_node_sdk_1.BaseChatModel {
35
+ constructor(modelId, requests, config) {
36
+ super('github-models', modelId, config);
37
+ this.requests = requests;
38
+ this.baseURL = config.url.replace(/\/$/, '');
39
+ this.apiKey = config.apiKey;
40
+ }
41
+ async generate(messages, config) {
42
+ var _a, _b, _c, _d, _e, _f, _g, _h;
43
+ const merged = this.mergeConfig(config);
44
+ const requestBody = {
45
+ model: this.modelId,
46
+ messages: messagesToOpenAI(messages),
47
+ stream: false,
48
+ };
49
+ if (merged.temperature !== undefined)
50
+ requestBody.temperature = merged.temperature;
51
+ if (merged.maxTokens !== undefined)
52
+ requestBody.max_tokens = merged.maxTokens;
53
+ const response = await this.requests.httpRequest('POST', `${this.baseURL}/chat/completions`, requestBody, { Authorization: `Bearer ${this.apiKey}` });
54
+ const body = response.body;
55
+ const choice = body.choices[0];
56
+ const message = {
57
+ role: 'assistant',
58
+ content: [{ type: 'text', text: (_a = choice.message.content) !== null && _a !== void 0 ? _a : '' }],
59
+ };
60
+ return {
61
+ id: body.id,
62
+ finishReason: (_b = choice.finish_reason) !== null && _b !== void 0 ? _b : 'stop',
63
+ usage: {
64
+ promptTokens: (_d = (_c = body.usage) === null || _c === void 0 ? void 0 : _c.prompt_tokens) !== null && _d !== void 0 ? _d : 0,
65
+ completionTokens: (_f = (_e = body.usage) === null || _e === void 0 ? void 0 : _e.completion_tokens) !== null && _f !== void 0 ? _f : 0,
66
+ totalTokens: (_h = (_g = body.usage) === null || _g === void 0 ? void 0 : _g.total_tokens) !== null && _h !== void 0 ? _h : 0,
67
+ },
68
+ message,
69
+ };
70
+ }
71
+ async *stream(messages, config) {
72
+ var _a, _b, _c;
73
+ const merged = this.mergeConfig(config);
74
+ const requestBody = {
75
+ model: this.modelId,
76
+ messages: messagesToOpenAI(messages),
77
+ stream: true,
78
+ };
79
+ if (merged.temperature !== undefined)
80
+ requestBody.temperature = merged.temperature;
81
+ if (merged.maxTokens !== undefined)
82
+ requestBody.max_tokens = merged.maxTokens;
83
+ const url = `${this.baseURL}/chat/completions`;
84
+ const reqHeaders = {
85
+ Authorization: `Bearer ${this.apiKey}`,
86
+ 'Content-Type': 'application/json',
87
+ Accept: 'text/event-stream',
88
+ };
89
+ const response = await fetch(url, {
90
+ method: 'POST',
91
+ headers: reqHeaders,
92
+ body: JSON.stringify(requestBody),
93
+ });
94
+ if (!response.ok) {
95
+ const errorText = await response.text();
96
+ throw new Error(`GitHub Models API error ${response.status}: ${errorText}`);
97
+ }
98
+ if (!response.body) {
99
+ throw new Error('No response body for streaming request');
100
+ }
101
+ const reader = response.body.getReader();
102
+ const decoder = new TextDecoder();
103
+ let buffer = '';
104
+ let finishReason = 'stop';
105
+ try {
106
+ while (true) {
107
+ const { done, value } = await reader.read();
108
+ if (done)
109
+ break;
110
+ buffer += decoder.decode(value, { stream: true });
111
+ const lines = buffer.split('\n');
112
+ buffer = (_a = lines.pop()) !== null && _a !== void 0 ? _a : '';
113
+ for (const line of lines) {
114
+ const trimmed = line.trim();
115
+ if (!trimmed || trimmed.startsWith(':'))
116
+ continue;
117
+ if (trimmed.startsWith('data:')) {
118
+ const data = trimmed.slice(5).trim();
119
+ if (data === '[DONE]')
120
+ break;
121
+ try {
122
+ const parsed = JSON.parse(data);
123
+ const streamChoice = (_b = parsed.choices) === null || _b === void 0 ? void 0 : _b[0];
124
+ if (!streamChoice)
125
+ continue;
126
+ if (streamChoice.finish_reason) {
127
+ finishReason = streamChoice.finish_reason;
128
+ }
129
+ const content = (_c = streamChoice.delta) === null || _c === void 0 ? void 0 : _c.content;
130
+ if (content) {
131
+ yield { type: 'text-delta', delta: content };
132
+ }
133
+ }
134
+ catch {
135
+ }
136
+ }
137
+ }
138
+ }
139
+ }
140
+ finally {
141
+ reader.releaseLock();
142
+ }
143
+ yield { type: 'finish', finishReason };
144
+ }
145
+ }
146
+ exports.GitHubChatModel = GitHubChatModel;
147
+ //# sourceMappingURL=model.js.map
@@ -0,0 +1 @@
1
+ {"version":3,"file":"model.js","sourceRoot":"","sources":["../../../nodes/GitHubChatModel/model.ts"],"names":[],"mappings":";;;AAAA,kDAQ0B;AAsD1B,SAAS,eAAe,CAAC,OAAuB;;IAC/C,IAAI,OAAO,CAAC,IAAI,KAAK,MAAM,EAAE,CAAC;QAC7B,OAAO,EAAE,IAAI,EAAE,MAAM,EAAE,IAAI,EAAE,OAAO,CAAC,IAAI,EAAE,CAAC;IAC7C,CAAC;IACD,IAAI,OAAO,CAAC,IAAI,KAAK,MAAM,EAAE,CAAC;QAE7B,MAAM,IAAI,GAAG,OAAO,CAAC,IAAI,CAAC;QAC1B,IAAI,OAAO,IAAI,KAAK,QAAQ,EAAE,CAAC;YAC9B,MAAM,QAAQ,GAAG,MAAA,OAAO,CAAC,SAAS,mCAAI,YAAY,CAAC;YACnD,OAAO;gBACN,IAAI,EAAE,WAAW;gBACjB,SAAS,EAAE,EAAE,GAAG,EAAE,QAAQ,QAAQ,WAAW,IAAI,EAAE,EAAE;aACrD,CAAC;QACH,CAAC;IACF,CAAC;IAED,OAAO,IAAI,CAAC;AACb,CAAC;AAED,SAAS,gBAAgB,CAAC,QAAmB;IAC5C,OAAO,QAAQ,CAAC,GAAG,CAAC,CAAC,CAAC,EAAE,EAAE;QACzB,MAAM,KAAK,GAAG,CAAC,CAAC,OAAO;aACrB,GAAG,CAAC,CAAC,CAAC,EAAE,EAAE,CAAC,eAAe,CAAC,CAAC,CAAC,CAAC;aAC9B,MAAM,CAAC,CAAC,CAAC,EAA0B,EAAE,CAAC,CAAC,KAAK,IAAI,CAAC,CAAC;QAGpD,MAAM,UAAU,GAAG,KAAK,CAAC,IAAI,CAAC,CAAC,CAAC,EAAE,EAAE,CAAC,CAAC,CAAC,IAAI,KAAK,MAAM,CAAC,CAAC;QACxD,MAAM,OAAO,GAAiC,UAAU;YACvD,CAAC,CAAC,KAAK;YACP,CAAC,CAAC,KAAK,CAAC,GAAG,CAAC,CAAC,CAAC,EAAE,EAAE,CAAE,CAAuB,CAAC,IAAI,CAAC,CAAC,IAAI,CAAC,IAAI,CAAC,CAAC;QAE9D,OAAO,EAAE,IAAI,EAAE,CAAC,CAAC,IAAI,EAAE,OAAO,EAAE,CAAC;IAClC,CAAC,CAAC,CAAC;AACJ,CAAC;AAED,MAAa,eAAgB,SAAQ,2BAAiC;IAIrE,YACC,OAAe,EACP,QAAuB,EAC/B,MAA0B;QAE1B,KAAK,CAAC,eAAe,EAAE,OAAO,EAAE,MAAM,CAAC,CAAC;QAHhC,aAAQ,GAAR,QAAQ,CAAe;QAI/B,IAAI,CAAC,OAAO,GAAG,MAAM,CAAC,GAAG,CAAC,OAAO,CAAC,KAAK,EAAE,EAAE,CAAC,CAAC;QAC7C,IAAI,CAAC,MAAM,GAAG,MAAM,CAAC,MAAM,CAAC;IAC7B,CAAC;IAED,KAAK,CAAC,QAAQ,CAAC,QAAmB,EAAE,MAA2B;;QAC9D,MAAM,MAAM,GAAG,IAAI,CAAC,WAAW,CAAC,MAAM,CAAC,CAAC;QACxC,MAAM,WAAW,GAA4B;YAC5C,KAAK,EAAE,IAAI,CAAC,OAAO;YACnB,QAAQ,EAAE,gBAAgB,CAAC,QAAQ,CAAC;YACpC,MAAM,EAAE,KAAK;SACb,CAAC;QAEF,IAAI,MAAM,CAAC,WAAW,KAAK,SAAS;YAAE,WAAW,CAAC,WAAW,GAAG,MAAM,CAAC,WAAW,CAAC;QACnF,IAAI,MAAM,CAAC,SAAS,KAAK,SAAS;YAAE,WAAW,CAAC,UAAU,GAAG,MAAM,CAAC,SAAS,CAAC;QAE9E,MAAM,QAAQ,GAAG,MAAM,IAAI,CAAC,QAAQ,CAAC,WAAW,CAC/C,MAAM,EACN,GAAG,IAAI,CAAC,OAAO,mBAAmB,EAClC,WAAW,EACX,EAAE,aAAa,EAAE,UAAU,IAAI,CAAC,MAAM,EAAE,EAAE,CAC1C,CAAC;QAEF,MAAM,IAAI,GAAG,QAAQ,CAAC,IAA0B,CAAC;QACjD,MAAM,MAAM,GAAG,IAAI,CAAC,OAAO,CAAC,CAAC,CAAC,CAAC;QAE/B,MAAM,OAAO,GAAY;YACxB,IAAI,EAAE,WAAW;YACjB,OAAO,EAAE,CAAC,EAAE,IAAI,EAAE,MAAM,EAAE,IAAI,EAAE,MAAA,MAAM,CAAC,OAAO,CAAC,OAAO,mCAAI,EAAE,EAAE,CAAC;SAC/D,CAAC;QAEF,OAAO;YACN,EAAE,EAAE,IAAI,CAAC,EAAE;YACX,YAAY,EAAE,MAAC,MAAM,CAAC,aAA8B,mCAAI,MAAM;YAC9D,KAAK,EAAE;gBACN,YAAY,EAAE,MAAA,MAAA,IAAI,CAAC,KAAK,0CAAE,aAAa,mCAAI,CAAC;gBAC5C,gBAAgB,EAAE,MAAA,MAAA,IAAI,CAAC,KAAK,0CAAE,iBAAiB,mCAAI,CAAC;gBACpD,WAAW,EAAE,MAAA,MAAA,IAAI,CAAC,KAAK,0CAAE,YAAY,mCAAI,CAAC;aAC1C;YACD,OAAO;SACP,CAAC;IACH,CAAC;IAED,KAAK,CAAC,CAAC,MAAM,CAAC,QAAmB,EAAE,MAA2B;;QAC7D,MAAM,MAAM,GAAG,IAAI,CAAC,WAAW,CAAC,MAAM,CAAC,CAAC;QACxC,MAAM,WAAW,GAA4B;YAC5C,KAAK,EAAE,IAAI,CAAC,OAAO;YACnB,QAAQ,EAAE,gBAAgB,CAAC,QAAQ,CAAC;YACpC,MAAM,EAAE,IAAI;SACZ,CAAC;QAEF,IAAI,MAAM,CAAC,WAAW,KAAK,SAAS;YAAE,WAAW,CAAC,WAAW,GAAG,MAAM,CAAC,WAAW,CAAC;QACnF,IAAI,MAAM,CAAC,SAAS,KAAK,SAAS;YAAE,WAAW,CAAC,UAAU,GAAG,MAAM,CAAC,SAAS,CAAC;QAE9E,MAAM,GAAG,GAAG,GAAG,IAAI,CAAC,OAAO,mBAAmB,CAAC;QAC/C,MAAM,UAAU,GAA2B;YAC1C,aAAa,EAAE,UAAU,IAAI,CAAC,MAAM,EAAE;YACtC,cAAc,EAAE,kBAAkB;YAClC,MAAM,EAAE,mBAAmB;SAC3B,CAAC;QAGF,MAAM,QAAQ,GAAG,MAAM,KAAK,CAAC,GAAG,EAAE;YACjC,MAAM,EAAE,MAAM;YACd,OAAO,EAAE,UAAU;YACnB,IAAI,EAAE,IAAI,CAAC,SAAS,CAAC,WAAW,CAAC;SACjC,CAAC,CAAC;QAEH,IAAI,CAAC,QAAQ,CAAC,EAAE,EAAE,CAAC;YAClB,MAAM,SAAS,GAAG,MAAM,QAAQ,CAAC,IAAI,EAAE,CAAC;YACxC,MAAM,IAAI,KAAK,CAAC,2BAA2B,QAAQ,CAAC,MAAM,KAAK,SAAS,EAAE,CAAC,CAAC;QAC7E,CAAC;QAED,IAAI,CAAC,QAAQ,CAAC,IAAI,EAAE,CAAC;YACpB,MAAM,IAAI,KAAK,CAAC,wCAAwC,CAAC,CAAC;QAC3D,CAAC;QAED,MAAM,MAAM,GAAI,QAAQ,CAAC,IAAmC,CAAC,SAAS,EAAE,CAAC;QACzE,MAAM,OAAO,GAAG,IAAI,WAAW,EAAE,CAAC;QAClC,IAAI,MAAM,GAAG,EAAE,CAAC;QAChB,IAAI,YAAY,GAAiB,MAAM,CAAC;QAExC,IAAI,CAAC;YACJ,OAAO,IAAI,EAAE,CAAC;gBACb,MAAM,EAAE,IAAI,EAAE,KAAK,EAAE,GAAG,MAAM,MAAM,CAAC,IAAI,EAAE,CAAC;gBAC5C,IAAI,IAAI;oBAAE,MAAM;gBAEhB,MAAM,IAAI,OAAO,CAAC,MAAM,CAAC,KAAK,EAAE,EAAE,MAAM,EAAE,IAAI,EAAE,CAAC,CAAC;gBAClD,MAAM,KAAK,GAAG,MAAM,CAAC,KAAK,CAAC,IAAI,CAAC,CAAC;gBACjC,MAAM,GAAG,MAAA,KAAK,CAAC,GAAG,EAAE,mCAAI,EAAE,CAAC;gBAE3B,KAAK,MAAM,IAAI,IAAI,KAAK,EAAE,CAAC;oBAC1B,MAAM,OAAO,GAAG,IAAI,CAAC,IAAI,EAAE,CAAC;oBAC5B,IAAI,CAAC,OAAO,IAAI,OAAO,CAAC,UAAU,CAAC,GAAG,CAAC;wBAAE,SAAS;oBAElD,IAAI,OAAO,CAAC,UAAU,CAAC,OAAO,CAAC,EAAE,CAAC;wBACjC,MAAM,IAAI,GAAG,OAAO,CAAC,KAAK,CAAC,CAAC,CAAC,CAAC,IAAI,EAAE,CAAC;wBACrC,IAAI,IAAI,KAAK,QAAQ;4BAAE,MAAM;wBAE7B,IAAI,CAAC;4BACJ,MAAM,MAAM,GAAG,IAAI,CAAC,KAAK,CAAC,IAAI,CAA0B,CAAC;4BACzD,MAAM,YAAY,GAAG,MAAA,MAAM,CAAC,OAAO,0CAAG,CAAC,CAAC,CAAC;4BACzC,IAAI,CAAC,YAAY;gCAAE,SAAS;4BAE5B,IAAI,YAAY,CAAC,aAAa,EAAE,CAAC;gCAChC,YAAY,GAAG,YAAY,CAAC,aAA6B,CAAC;4BAC3D,CAAC;4BAED,MAAM,OAAO,GAAG,MAAA,YAAY,CAAC,KAAK,0CAAE,OAAO,CAAC;4BAC5C,IAAI,OAAO,EAAE,CAAC;gCACb,MAAM,EAAE,IAAI,EAAE,YAAY,EAAE,KAAK,EAAE,OAAO,EAAE,CAAC;4BAC9C,CAAC;wBACF,CAAC;wBAAC,MAAM,CAAC;wBAET,CAAC;oBACF,CAAC;gBACF,CAAC;YACF,CAAC;QACF,CAAC;gBAAS,CAAC;YACV,MAAM,CAAC,WAAW,EAAE,CAAC;QACtB,CAAC;QAED,MAAM,EAAE,IAAI,EAAE,QAAQ,EAAE,YAAY,EAAE,CAAC;IACxC,CAAC;CACD;AArID,0CAqIC"}
@@ -0,0 +1,60 @@
1
+ {
2
+ "name": "n8n-nodes-githubmodels",
3
+ "version": "0.0.2",
4
+ "description": "n8n Language Model node for GitHub Models",
5
+ "license": "MIT",
6
+ "homepage": "https://github.com/hintdesk/n8n-nodes-githubmodels",
7
+ "keywords": [
8
+ "n8n-community-node-package",
9
+ "n8n",
10
+ "n8n-nodes",
11
+ "github-models",
12
+ "llm",
13
+ "ai"
14
+ ],
15
+ "author": {
16
+ "name": "hintdesk",
17
+ "email": "hintdesk@gmail.com"
18
+ },
19
+ "repository": {
20
+ "type": "git",
21
+ "url": "https://github.com/hintdesk/n8n-nodes-githubmodels.git"
22
+ },
23
+ "bugs": {
24
+ "url": "https://github.com/hintdesk/n8n-nodes-githubmodels/issues"
25
+ },
26
+ "scripts": {
27
+ "build": "n8n-node build",
28
+ "build:watch": "tsc --watch",
29
+ "dev": "n8n-node dev",
30
+ "lint": "n8n-node lint",
31
+ "lint:fix": "n8n-node lint --fix",
32
+ "release": "n8n-node release",
33
+ "prepublishOnly": "n8n-node prerelease"
34
+ },
35
+ "files": [
36
+ "dist"
37
+ ],
38
+ "n8n": {
39
+ "n8nNodesApiVersion": 1,
40
+ "aiNodeSdkVersion": 1,
41
+ "strict": true,
42
+ "credentials": [
43
+ "dist/credentials/GitHubModelsApi.credentials.js"
44
+ ],
45
+ "nodes": [
46
+ "dist/nodes/GitHubChatModel/GitHubChatModel.node.js"
47
+ ]
48
+ },
49
+ "devDependencies": {
50
+ "@n8n/node-cli": "*",
51
+ "eslint": "9.32.0",
52
+ "prettier": "3.6.2",
53
+ "release-it": "^19.0.4",
54
+ "typescript": "5.9.2"
55
+ },
56
+ "peerDependencies": {
57
+ "n8n-workflow": "*",
58
+ "@n8n/ai-node-sdk": "*"
59
+ }
60
+ }