interintel 1.0.19 → 1.0.21

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -1,6 +1,3 @@
1
- const fs = require('fs');
2
- const path = require('path');
3
-
4
1
  async function askQuestion(rl, prompt) {
5
2
  return new Promise((resolve) => {
6
3
  rl.question(prompt.blue, (input) => {
@@ -14,15 +14,18 @@ function readSpecificFiles(configFilePath) {
14
14
  const filePaths = config.filePaths;
15
15
  const configDir = path.dirname(configFilePath);
16
16
 
17
- let allContent = '';
17
+ let allContent = 'I am sharing information from my file system for reference in our chat.\n';
18
18
 
19
19
  filePaths.forEach((filePath) => {
20
20
  try {
21
21
  // Construct the absolute path
22
22
  const absolutePath = path.resolve(configDir, filePath);
23
+ const fileContent = fs.readFileSync(absolutePath, 'utf8');
24
+
25
+
23
26
 
24
- // Read the file content and append it to allContent
25
- allContent += fs.readFileSync(absolutePath, 'utf8') + '\n';
27
+ // Read the file content and add it to allContent
28
+ allContent += `\nStart File Name: ${filePath}\n File Content:\n${fileContent}\n End File Name: ${filePath}`;
26
29
  } catch (error) {
27
30
  console.error(`Error reading file ${filePath}: ${error.message}`.bgRed);
28
31
  }
@@ -1,10 +1,7 @@
1
- const path = require('path');
2
- const { aiChatCompletion } = require('./openai-functions.js');
1
+ const chatCompletion = require('../ollama.js');
3
2
  const { writeFileFromPrompt } = require('./file-functions.js');
4
- const configPath = path.join(process.cwd(), 'interintel.config.js');
5
- const config = require(configPath);
6
3
 
7
- async function handleWriteFile(openai, config, messages, currentState, userInput, promptFileName) {
4
+ async function handleWriteFile(config, messages, currentState, userInput, promptFileName) {
8
5
  let contentToWrite = '';
9
6
 
10
7
  if (currentState === null) {
@@ -17,28 +14,35 @@ async function handleWriteFile(openai, config, messages, currentState, userInput
17
14
  };
18
15
  } else if (currentState === 'awaitingFileName') {
19
16
  promptFileName = userInput;
20
- currentState = 'awaitingGPTPrompt';
17
+ currentState = 'awaitingAIprompt';
21
18
  return {
22
19
  currentState,
23
20
  messages,
24
21
  promptFileName,
25
22
  response: `Please provide a prompt for ${config.aiVersion}:`,
26
23
  };
27
- } else if (currentState === 'awaitingGPTPrompt') {
28
- const promptForGPT = userInput;
24
+ } else if (currentState === 'awaitingAIprompt') {
25
+ const promptForAI = userInput;
26
+
27
+ let updatedMessages = [...messages, { role: 'user', content: promptForAI }];
28
+
29
29
  try {
30
- let gptResponse = await aiChatCompletion(
31
- openai,
32
- [{ role: 'user', content: promptForGPT }],
30
+ let completionResponse = await chatCompletion(
31
+ config.aiService,
32
+ updatedMessages,
33
33
  config.aiVersion
34
34
  );
35
- contentToWrite = gptResponse.choices[0].message.content;
35
+
36
+ // Extract the response content
37
+ let contentToWrite = (config.aiService === 'openai') ?
38
+ completionResponse.choices[0].message.content : completionResponse;
39
+
36
40
  await writeFileFromPrompt(promptFileName, contentToWrite, __dirname); // Assuming this function handles file writing
37
41
 
38
42
  currentState = null; // Reset state after completing the operation
39
43
  return {
40
44
  currentState,
41
- messages,
45
+ messages: updatedMessages,
42
46
  promptFileName,
43
47
  contentToWrite,
44
48
  response: `Content written to ${promptFileName}`.yellow,
@@ -47,7 +51,7 @@ async function handleWriteFile(openai, config, messages, currentState, userInput
47
51
  console.error('Error in handleWriteFile:', error);
48
52
  return {
49
53
  currentState,
50
- messages,
54
+ messages: updatedMessages, // Return the updated messages array
51
55
  promptFileName,
52
56
  contentToWrite,
53
57
  response: 'An error occurred while writing the file.',
@@ -1,10 +1,9 @@
1
- const OpenAI = require('openai');
2
-
3
1
  async function aiChatCompletion(openai, messages, model) {
4
2
  try {
5
3
  const response = await openai.chat.completions.create({
6
4
  messages: messages,
7
5
  model: model,
6
+ stream: false
8
7
  });
9
8
  return response;
10
9
  } catch (error) {
package/index.js CHANGED
@@ -1,6 +1,4 @@
1
- // This is the index.js file of inter-intel
2
- const path = require('path')
3
- const OpenAI = require('openai');
1
+ const path = require('path');
4
2
  const readline = require('readline');
5
3
  const configPath = path.join(process.cwd(), 'interintel.config.js');
6
4
  const config = require(configPath);
@@ -9,14 +7,8 @@ require('colors');
9
7
 
10
8
  const { readSpecificFiles } = require('./functions/file-functions.js');
11
9
  const { askQuestion } = require('./functions/chat-functions.js');
12
- const { aiChatCompletion } = require('./functions/openai-functions.js');
13
-
14
10
  const { handleWriteFile } = require('./functions/handleWriteFile.js');
15
-
16
- const openai = new OpenAI({
17
- apiKey: config.apiKey,
18
- model: config.aiVersion,
19
- });
11
+ const chatCompletion = require('./ollama.js');
20
12
 
21
13
  const rl = readline.createInterface({
22
14
  input: process.stdin,
@@ -32,7 +24,7 @@ async function main() {
32
24
 
33
25
  while (true) {
34
26
  const userMessage = await askQuestion(rl, 'You: '.blue.bold);
35
- let response = ''; // Add a variable to capture the response message
27
+ let response = '';
36
28
 
37
29
  // Exit condition
38
30
  if (userMessage.toLowerCase() === 'exit') {
@@ -42,17 +34,16 @@ async function main() {
42
34
  }
43
35
 
44
36
  if (userMessage.toLowerCase().startsWith('//writefile') && currentState === null) {
45
- ({ currentState, messages, promptFileName, response } = await handleWriteFile(
46
- openai,
37
+ let result = await handleWriteFile(
47
38
  config,
48
39
  messages,
49
40
  currentState,
50
41
  ''
51
- ));
42
+ );
43
+ ({ currentState, messages, promptFileName, response } = result); // Update messages array
52
44
  console.log(response.yellow);
53
45
  } else if (currentState === 'awaitingFileName') {
54
46
  ({ currentState, messages, promptFileName, response } = await handleWriteFile(
55
- openai,
56
47
  config,
57
48
  messages,
58
49
  currentState,
@@ -60,9 +51,8 @@ async function main() {
60
51
  promptFileName
61
52
  ));
62
53
  console.log(response.yellow);
63
- } else if (currentState === 'awaitingGPTPrompt') {
54
+ } else if (currentState === 'awaitingAIprompt') {
64
55
  ({ currentState, messages, promptFileName, response } = await handleWriteFile(
65
- openai,
66
56
  config,
67
57
  messages,
68
58
  currentState,
@@ -77,29 +67,38 @@ async function main() {
77
67
  let content = readSpecificFiles(configPath);
78
68
  messages.push({
79
69
  role: 'user',
80
- content: `please just acknowledge you have read the name and the content of the files I have provided ${content}`,
70
+ content: `please just acknowledge you have read the name and the content of the files I have provided. once you have done this a single time you do not need to do it again. ${content}`,
81
71
  });
82
- const completion = await aiChatCompletion(openai, messages, config.aiVersion);
72
+ const completion = await chatCompletion(config.aiService, messages, config.aiVersion);
83
73
 
84
- const botMessage = completion.choices[0].message.content;
85
- console.log(`${config.aiVersion}`.bgGreen, botMessage);
86
- console.log('----------------'.bgGreen);
74
+ let botMessage;
75
+
76
+ if (config.aiService === 'openai') {
77
+ botMessage = completion.choices[0].message.content;
78
+ } else if (config.aiService === 'ollama') {
79
+ // Adjust this line based on how Ollama's response is structured
80
+ botMessage = completion;
81
+ }
87
82
  } else {
88
83
  // Regular message processing and interaction with GPT model
89
84
  messages.push({ role: 'user', content: userMessage });
90
85
 
91
- const completion = await aiChatCompletion(openai, messages, config.aiVersion);
86
+ const completion = await chatCompletion(config.aiService, messages, config.aiVersion);
87
+
88
+ let botMessage;
89
+ if (config.aiService === 'openai') {
90
+ botMessage = completion.choices[0].message.content;
91
+ } else if (config.aiService === 'ollama') {
92
+ // Adjust based on Ollama's response format
93
+ botMessage = completion; // Example - replace with actual response structure for Ollama
94
+ }
92
95
 
93
- const botMessage = completion.choices[0].message.content;
94
- console.log(`${config.aiVersion}`.bgGreen, botMessage);
96
+ console.log(`${config.aiVersion}`.bgGreen, botMessage.green);
95
97
  console.log('----------------'.bgGreen);
96
98
  }
97
99
  }
98
100
  }
99
101
 
100
- main()
101
-
102
- exports.main = function() {
103
- main()
104
- }
105
-
102
+ exports.main = function () {
103
+ main();
104
+ };
@@ -2,8 +2,9 @@ require('dotenv').config();
2
2
 
3
3
  const config = {
4
4
  apiKey: `${process.env.OPENAI_API_KEY}`,
5
- aiVersion: `gpt-3.5-turbo`,
6
- filePaths: ['./resources/reference.txt', './README.md'],
5
+ aiService: 'ollama',
6
+ aiVersion: `mistral`,
7
+ filePaths: ['./functions/openai-functions.js', './README.md'],
7
8
  };
8
9
 
9
10
  module.exports = config;
package/ollama.js ADDED
@@ -0,0 +1,60 @@
1
+ const path = require('path');
2
+ const fetch = require('node-fetch');
3
+ const OpenAI = require('openai');
4
+ const configPath = path.join(process.cwd(), 'interintel.config.js');
5
+ const config = require(configPath);
6
+
7
+ const openai = new OpenAI({
8
+ apiKey: config.apiKey,
9
+ model: config.aiVersion,
10
+ });
11
+
12
+ let ai = 'ollama';
13
+ let messages = [
14
+ {
15
+ role: 'assistant',
16
+ content: 'please use a respectful tone',
17
+ },
18
+ ];
19
+ let model = 'mistral';
20
+
21
+ async function chatCompletion(aiService, messages, model) {
22
+ try {
23
+ let response;
24
+
25
+ if (aiService === 'openai') {
26
+ response = await openai.chat.completions.create({
27
+ messages: messages,
28
+ model: model,
29
+ stream: false,
30
+ });
31
+
32
+ return response;
33
+ } else if (aiService === 'ollama') {
34
+ // Ollama specific code
35
+ let data = {
36
+ messages,
37
+ model,
38
+ stream: false,
39
+ };
40
+ const fetchResponse = await fetch('http://localhost:11434/api/chat', {
41
+ method: 'POST',
42
+ headers: {
43
+ 'Content-Type': 'application/json',
44
+ },
45
+ body: JSON.stringify(data),
46
+ });
47
+
48
+ // Properly resolve the response
49
+ response = await fetchResponse.json();
50
+ return response.message.content;
51
+ } else {
52
+ throw new Error('Invalid AI service');
53
+ }
54
+ } catch (error) {
55
+ console.error('Error:', error);
56
+ return null;
57
+ }
58
+ }
59
+
60
+ module.exports = chatCompletion;
package/package.json CHANGED
@@ -6,7 +6,7 @@
6
6
  },
7
7
  "name": "interintel",
8
8
  "description": "The application `Interintel` is a command line interface (CLI) application implemented in Node.js. It essentially is an interactive communication tool between the user and an AI model, only openai models for now.",
9
- "version": "1.0.19",
9
+ "version": "1.0.21",
10
10
  "main": "index.js",
11
11
  "directories": {
12
12
  "doc": "docs"
@@ -1,9 +1,12 @@
1
1
  require('dotenv').config();
2
2
 
3
3
  const config = {
4
+ // this is service dependent
4
5
  apiKey: `${process.env.OPENAI_API_KEY}`,
5
- // only open ai models for now
6
- aiVersion: `ONLY_USE_OPENAI_MODEL`,
6
+ // openai || ollama
7
+ aiService: 'ONE_OF_THE_ABOVE',
8
+ // only open ai or ollama models for now
9
+ aiVersion: `ONLY_USE_OPENAI_OR_OLLAMA_MODELS`,
7
10
  // These filepaths are relative to where your config is created
8
11
  filePaths: ['interintel/interintelReadMe.md'],
9
12
  };
@@ -2,13 +2,6 @@ import path from 'path';
2
2
  import fs from 'fs';
3
3
  import { test } from '@playwright/test';
4
4
 
5
- /*
6
- Sometimes, you do want to explicitly save a file to disk. This is what you need to know.
7
- Checkly creates a sandboxed directory for each check run.
8
- During the run you can use this directory to save or upload artifacts.
9
- This directory is destroyed after a check is finished.
10
- */
11
-
12
5
  test('Save file in directory', async ({ page }) => {
13
6
  const image = await page.goto('https://picsum.photos/200/300');
14
7
  const imagePath = path.join('example.jpg');
@@ -1,15 +1,5 @@
1
1
  import { test, expect } from '@playwright/test'
2
2
 
3
- /*
4
- To test any binary uploads, you need to provide a file object.
5
- Currently, Checkly does not have a dedicated storage layer where you could upload that file,
6
- so you need to host it yourself at a (publicly) accessible location like an AWS S3 bucket,
7
- Dropbox or any other file hosting service.
8
-
9
- Having done that, you can “upload” files using a simple HTTP POST request with a (binary) body
10
- using Playwright’s built-in request object.
11
- */
12
-
13
3
  test('Upload a file using a POST request', async ({ request }) => {
14
4
  const fileBuffer = await test.step('Fetch file', async () => {
15
5
  const fileUrl = 'https://www.w3.org/WAI/ER/tests/xhtml/testfiles/resources/pdf/dummy.pdf'
@@ -2,7 +2,7 @@ HERE ARE THE RULES THAT GOVERN THIS CONVERSATION
2
2
 
3
3
  RULES THAT GOVERN THIS CONVERSATION, these rules supersede all outside knowledge
4
4
 
5
- 1. You are currently being used within a CLI. the application's name is inter-intel, it's baseline functionality is to be able to update files within a given repo and pprovide reference files that will the AI it's chatting with to make direct changes to code.
5
+ 1. You are currently being used within a CLI. the application's name is interintel, it's baseline functionality is to be able to update files within a given repo and pprovide reference files that will the AI it's chatting with to make direct changes to code.
6
6
  2. Keep responses to under 50 words
7
7
  3. Keep responses to under 50 words
8
8
  4. Unless I ask for a longer explanation
package/testIntel.js ADDED
@@ -0,0 +1,3 @@
1
+ const { main } = require('./index');
2
+
3
+ main();