interintel 1.1.0 → 2.0.0

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -1,84 +0,0 @@
1
- import fs from 'fs';
2
- import path from 'path';
3
-
4
- // READING FOR INITAL REFERENCE
5
- async function readSpecificFiles(configFilePath) {
6
- try {
7
- // Dynamically import the config file
8
- const absoluteConfigPath = path.resolve(configFilePath);
9
- const configModule = await import('file://' + absoluteConfigPath);
10
- const config = configModule.default;
11
-
12
- const filePaths = config.filePaths;
13
- let allContent = 'I am sharing information from my file system for reference in our chat.\n';
14
-
15
- for (const filePath of filePaths) {
16
- try {
17
- // Construct the absolute path
18
- const absolutePath = path.resolve(process.cwd(), filePath);
19
- const fileContent = fs.readFileSync(absolutePath, 'utf8');
20
-
21
- // Read the file content and add it to allContent
22
- allContent += `\nStart File Name: ${filePath}\n File Content:\n${fileContent}\n End File Name: ${filePath}`;
23
- } catch (error) {
24
- console.error(`Error reading file ${filePath}: ${error.message}`);
25
- }
26
- }
27
-
28
- // Add console.log statements to communicate to the user
29
- console.log(
30
- `${config.aiVersion} sent reference files:`.yellow,
31
- `${logFileNames(filePaths)}`.yellow
32
- );
33
- return allContent;
34
- } catch (error) {
35
- console.error(`Error reading config file: ${error.message}`);
36
- return '';
37
- }
38
- }
39
-
40
- function writeFileFromPrompt(promptFileName, contentToWrite, baseDir) {
41
- try {
42
- if (!promptFileName.includes('.')) {
43
- throw new Error(
44
- "Invalid file name. Please include a file name with an extension (e.g., 'output.txt')."
45
- );
46
- }
47
-
48
- const projectRoot = process.cwd();
49
- const fullPath = path.join(projectRoot, `interintel/session-samples/${promptFileName}`);
50
- const directoryPath = path.dirname(fullPath);
51
-
52
- if (!fs.existsSync(directoryPath)) {
53
- fs.mkdirSync(directoryPath, { recursive: true });
54
- }
55
-
56
- fs.writeFileSync(fullPath, contentToWrite + '\n');
57
- console.log(`Content written to ${fullPath}`.yellow);
58
- return true;
59
- } catch (error) {
60
- console.error(`Error writing file: ${error.message}`.bgRed);
61
- return false;
62
- }
63
- }
64
-
65
- // LOG FILE NAMES
66
- function logFileNames(filePaths) {
67
- let fileNames = [];
68
-
69
- console.log('');
70
- console.log(`System message`.bgYellow + `: `.yellow);
71
- filePaths.forEach((filePath) => {
72
- const fileName = path.basename(filePath);
73
- fileNames.push(fileName); // Add the file name to the array
74
- });
75
-
76
- return fileNames; // Return the array of file names
77
- }
78
-
79
- function appendToFile(filePath, data) {
80
- fs.appendFileSync(filePath, data + '\n');
81
- }
82
-
83
- // Export the function and the array
84
- export { readSpecificFiles, writeFileFromPrompt };
@@ -1,72 +0,0 @@
1
- import path from 'path';
2
- import { fileURLToPath } from 'url';
3
- import { chatCompletion } from '../serviceInterface.js';
4
- import { writeFileFromPrompt } from './file-functions.js';
5
-
6
- async function handleWriteFile(config, messages, currentState, userInput, promptFileName) {
7
- let contentToWrite = '';
8
-
9
- if (currentState === null) {
10
- currentState = 'awaitingFileName';
11
- return {
12
- currentState,
13
- messages,
14
- promptFileName,
15
- response: 'Please provide a name for the session file:',
16
- };
17
- } else if (currentState === 'awaitingFileName') {
18
- promptFileName = userInput;
19
- currentState = 'awaitingAIprompt';
20
- return {
21
- currentState,
22
- messages,
23
- promptFileName,
24
- response: `Please provide a prompt for ${config.aiVersion}:`,
25
- };
26
- } else if (currentState === 'awaitingAIprompt') {
27
- const promptForAI = userInput;
28
-
29
- let updatedMessages = [...messages, { role: 'user', content: promptForAI }];
30
-
31
- try {
32
- let completionResponse = await chatCompletion(
33
- config.aiService,
34
- updatedMessages,
35
- config.aiVersion
36
- );
37
-
38
- // Extract the response content
39
- let contentToWrite =
40
- config.aiService === 'openai' || config.aiService === 'mistral'
41
- ? completionResponse.choices[0].message.content
42
- : completionResponse;
43
-
44
- const __dirname = path.dirname(fileURLToPath(import.meta.url));
45
-
46
- await writeFileFromPrompt(promptFileName, contentToWrite, __dirname); // Assuming this function handles file writing
47
-
48
- currentState = null; // Reset state after completing the operation
49
- return {
50
- currentState,
51
- messages: updatedMessages,
52
- promptFileName,
53
- contentToWrite,
54
- response: `Content written to ${promptFileName}`.yellow,
55
- };
56
- } catch (error) {
57
- console.error('Error in handleWriteFile:', error);
58
- return {
59
- currentState,
60
- messages: updatedMessages, // Return the updated messages array
61
- promptFileName,
62
- contentToWrite,
63
- response: 'An error occurred while writing the file.',
64
- };
65
- }
66
- }
67
-
68
- // Return default state if none of the conditions are met
69
- return { currentState, messages, promptFileName, contentToWrite, response: '' };
70
- }
71
-
72
- export { handleWriteFile };
@@ -1,15 +0,0 @@
1
- async function aiChatCompletion(openai, messages, model) {
2
- try {
3
- const response = await openai.chat.completions.create({
4
- messages: messages,
5
- model: model,
6
- stream: false
7
- });
8
- return response;
9
- } catch (error) {
10
- console.error('Error:', error);
11
- return null;
12
- }
13
- }
14
-
15
- module.exports.aiChatCompletion = aiChatCompletion;
@@ -1,13 +0,0 @@
1
- import dotenv from 'dotenv';
2
- dotenv.config();
3
-
4
- const config = {
5
- apiKey: `${process.env.MISTRAL_API_KEY}`,
6
- aiService: 'ollama',
7
- aiVersion: `mistral:instruct`,
8
- filePaths: [
9
- 'resources/reference.txt'
10
- ],
11
- };
12
-
13
- export default config;
package/mistral.js DELETED
@@ -1,14 +0,0 @@
1
- import MistralClient from '@mistralai/mistralai'
2
- import dotenv from 'dotenv';
3
- dotenv.config();
4
-
5
- const apiKey = process.env.MISTRAL_API_KEY;
6
-
7
- const client = new MistralClient(apiKey);
8
-
9
- const completion = await client.chat({
10
- model: 'mistral-medium',
11
- messages: [{role: 'user', content: 'When were you last fine tuned? Please keep to under 25 words'}],
12
- });
13
-
14
- console.log('Chat:', completion.choices[0].message.content);
@@ -1,14 +0,0 @@
1
- require('dotenv').config();
2
-
3
- const config = {
4
- // this is service dependent
5
- apiKey: `${process.env.OPENAI_API_KEY}`,
6
- // openai || ollama
7
- aiService: 'ONE_OF_THE_ABOVE',
8
- // only open ai or ollama models for now
9
- aiVersion: `ONLY_USE_OPENAI_OR_OLLAMA_MODELS`,
10
- // These filepaths are relative to where your config is created
11
- filePaths: ['interintel/interintelReadMe.md'],
12
- };
13
-
14
- module.exports = config;
@@ -1,15 +0,0 @@
1
- HERE ARE THE RULES THAT GOVERN THIS CONVERSATION
2
-
3
- RULES THAT GOVERN THIS CONVERSATION, these rules supersede all outside knowledge
4
-
5
- You are currently being used within a CLI. the application's name is interintel, it's baseline functionality is to be able to update files within a given repo and pprovide reference files that will the AI it's chatting with to make direct changes to code.
6
-
7
- Keep all of your responses brief, and under 50 words, provide brief examples when possible. i.e. Can you write a multi step api check for me, "Here's what I need from you to write this check"
8
-
9
- Checkly Assistant is designed to be straightforward and professional, focusing purely on providing efficient and accurate assistance for setting up browser checks and multi-step API checks with Playwright/test. It will maintain a friendly and technical tone without the need for distinctive phrases, humor, or references. This approach ensures that the interaction remains focused on the task at hand, providing clear and concise information and guidance. Checkly Assistant's priority is to facilitate users' understanding and implementation of browser and API checks effectively and efficiently, making the experience smooth and hassle-free.
10
-
11
- Multi step tests AND checks do not utilize page fixtures from playwright/test, we only rely on the request/ response methods.
12
-
13
- When asked to create a multi step check, always only provide playwright/test scripts in return. This is what the user expects, and there's no need to explain that.
14
-
15
- A multi step construct is a Checkly construct that allows you to handle things like alerts, grouping, frequency, of a deployed check.
@@ -1,13 +0,0 @@
1
- When creating json1 training data, please keep the following in mind
2
-
3
- 1. "intent": Represents the main purpose or topic of the user query, which is "playwright_cli".
4
-
5
- 2. "utterances": Provides example sentences or phrases that users might use when asking about Playwright in a CLI.
6
-
7
- 3. "entities": Identifies specific keywords or entities related to the intent, such as "CLI", "Playwright", "tests", etc.
8
-
9
- 4. "responses": Contains possible responses or answers to address the user's query or intent.
10
-
11
- 5. "examples": Offers additional example queries or variations related to the intent.
12
-
13
- 6. "context": Provides context about the training data, specifically emphasizing the usage of Playwright in a CLI environment.
@@ -1,72 +0,0 @@
1
- import path from 'path';
2
- import fetch from 'node-fetch';
3
- import OpenAI from 'openai';
4
- import MistralClient from '@mistralai/mistralai';
5
-
6
- const configPath = path.join(process.cwd(), 'interintel.config.js');
7
-
8
- let config;
9
- try {
10
- const importedModule = await import(configPath);
11
- config = importedModule.default;
12
- } catch (error) {
13
- console.error('Failed to import config:', error);
14
- }
15
-
16
- const mistralClient = new MistralClient(config.apiKey);
17
-
18
- const openai = new OpenAI({
19
- apiKey: config.apiKey,
20
- model: config.aiVersion,
21
- });
22
-
23
- async function chatCompletion(aiService, messages, model) {
24
- try {
25
- let response;
26
-
27
- if (aiService === 'openai') {
28
- response = await openai.chat.completions.create({
29
- messages: messages,
30
- model: model,
31
- stream: false,
32
- });
33
-
34
- return response;
35
-
36
- } else if (aiService === 'mistral') {
37
- let chatResponse;
38
-
39
- chatResponse = await mistralClient.chat({
40
- model: model, // or a specific model you wish to use
41
- messages: messages,
42
- });
43
-
44
- return chatResponse;
45
- } else if (aiService === 'ollama') {
46
- // Ollama specific code
47
- let data = {
48
- messages,
49
- model,
50
- stream: false,
51
- };
52
- const fetchResponse = await fetch('http://localhost:11434/api/chat', {
53
- method: 'POST',
54
- headers: {
55
- 'Content-Type': 'application/json',
56
- },
57
- body: JSON.stringify(data),
58
- });
59
-
60
- // Properly resolve the response
61
- response = await fetchResponse.json();
62
- return response.message.content;
63
- } else {
64
- throw new Error('Invalid AI service');
65
- }
66
- } catch (error) {
67
- console.error('Error:', error);
68
- return null;
69
- }
70
- }
71
-
72
- export { chatCompletion };
package/testIntel.js DELETED
@@ -1,3 +0,0 @@
1
- import { main } from './index.js'
2
-
3
- main();