mobbdev 1.0.213 → 1.0.215
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
|
@@ -96,6 +96,7 @@ type Scalars = {
|
|
|
96
96
|
type AiBlameInferenceFinalizeInput = {
|
|
97
97
|
aiBlameInferenceId: Scalars['String']['input'];
|
|
98
98
|
aiResponseAt: Scalars['Timestamp']['input'];
|
|
99
|
+
blameType?: InputMaybe<AiBlameInferenceType>;
|
|
99
100
|
inferenceKey: Scalars['String']['input'];
|
|
100
101
|
metadataJSON?: InputMaybe<Scalars['String']['input']>;
|
|
101
102
|
model?: InputMaybe<Scalars['String']['input']>;
|
|
@@ -104,12 +105,18 @@ type AiBlameInferenceFinalizeInput = {
|
|
|
104
105
|
};
|
|
105
106
|
type AiBlameInferenceInitInput = {
|
|
106
107
|
aiResponseAt: Scalars['Timestamp']['input'];
|
|
108
|
+
blameType?: InputMaybe<AiBlameInferenceType>;
|
|
107
109
|
inferenceFileName: Scalars['String']['input'];
|
|
108
110
|
metadataJSON?: InputMaybe<Scalars['String']['input']>;
|
|
109
111
|
model?: InputMaybe<Scalars['String']['input']>;
|
|
110
112
|
promptFileName: Scalars['String']['input'];
|
|
111
113
|
toolName?: InputMaybe<Scalars['String']['input']>;
|
|
112
114
|
};
|
|
115
|
+
declare enum AiBlameInferenceType {
|
|
116
|
+
Chat = "CHAT",
|
|
117
|
+
HumanEdit = "HUMAN_EDIT",
|
|
118
|
+
TabAutocomplete = "TAB_AUTOCOMPLETE"
|
|
119
|
+
}
|
|
113
120
|
/** Boolean expression to compare columns of type "Boolean". All fields are combined with logical 'AND'. */
|
|
114
121
|
type Boolean_Comparison_Exp = {
|
|
115
122
|
_eq?: InputMaybe<Scalars['Boolean']['input']>;
|
|
@@ -4569,8 +4576,10 @@ type UploadAiBlameOptions = {
|
|
|
4569
4576
|
aiResponseAt?: string[];
|
|
4570
4577
|
model?: string[];
|
|
4571
4578
|
toolName?: string[];
|
|
4579
|
+
blameType?: AiBlameInferenceType[];
|
|
4572
4580
|
'ai-response-at'?: string[];
|
|
4573
4581
|
'tool-name'?: string[];
|
|
4582
|
+
'blame-type'?: AiBlameInferenceType[];
|
|
4574
4583
|
};
|
|
4575
4584
|
declare function uploadAiBlameBuilder(args: Yargs.Argv<unknown>): Yargs.Argv<UploadAiBlameOptions>;
|
|
4576
4585
|
declare function uploadAiBlameHandlerFromExtension(args: {
|
|
@@ -4579,6 +4588,7 @@ declare function uploadAiBlameHandlerFromExtension(args: {
|
|
|
4579
4588
|
model: string;
|
|
4580
4589
|
tool: string;
|
|
4581
4590
|
responseTime: string;
|
|
4591
|
+
blameType?: AiBlameInferenceType;
|
|
4582
4592
|
}): Promise<void>;
|
|
4583
4593
|
/**
|
|
4584
4594
|
* Initializes and authenticates a GQL client for AI Blame upload operations.
|
|
@@ -112,6 +112,12 @@ import * as dotenv from "dotenv";
|
|
|
112
112
|
import { z as z8 } from "zod";
|
|
113
113
|
|
|
114
114
|
// src/features/analysis/scm/generates/client_generates.ts
|
|
115
|
+
var AiBlameInferenceType = /* @__PURE__ */ ((AiBlameInferenceType2) => {
|
|
116
|
+
AiBlameInferenceType2["Chat"] = "CHAT";
|
|
117
|
+
AiBlameInferenceType2["HumanEdit"] = "HUMAN_EDIT";
|
|
118
|
+
AiBlameInferenceType2["TabAutocomplete"] = "TAB_AUTOCOMPLETE";
|
|
119
|
+
return AiBlameInferenceType2;
|
|
120
|
+
})(AiBlameInferenceType || {});
|
|
115
121
|
var FixQuestionInputType = /* @__PURE__ */ ((FixQuestionInputType2) => {
|
|
116
122
|
FixQuestionInputType2["Number"] = "NUMBER";
|
|
117
123
|
FixQuestionInputType2["Select"] = "SELECT";
|
|
@@ -5118,6 +5124,13 @@ function uploadAiBlameBuilder(args) {
|
|
|
5118
5124
|
type: "string",
|
|
5119
5125
|
array: true,
|
|
5120
5126
|
describe: chalk3.bold("Tool/IDE name(s) (optional, one per session)")
|
|
5127
|
+
}).option("blame-type", {
|
|
5128
|
+
type: "string",
|
|
5129
|
+
array: true,
|
|
5130
|
+
choices: Object.values(AiBlameInferenceType),
|
|
5131
|
+
describe: chalk3.bold(
|
|
5132
|
+
"Blame type(s) (optional, one per session, defaults to CHAT)"
|
|
5133
|
+
)
|
|
5121
5134
|
}).strict();
|
|
5122
5135
|
}
|
|
5123
5136
|
async function uploadAiBlameHandlerFromExtension(args) {
|
|
@@ -5126,7 +5139,8 @@ async function uploadAiBlameHandlerFromExtension(args) {
|
|
|
5126
5139
|
inference: [],
|
|
5127
5140
|
model: [],
|
|
5128
5141
|
toolName: [],
|
|
5129
|
-
aiResponseAt: []
|
|
5142
|
+
aiResponseAt: [],
|
|
5143
|
+
blameType: []
|
|
5130
5144
|
};
|
|
5131
5145
|
await withFile(async (promptFile) => {
|
|
5132
5146
|
await fsPromises2.writeFile(
|
|
@@ -5141,6 +5155,7 @@ async function uploadAiBlameHandlerFromExtension(args) {
|
|
|
5141
5155
|
uploadArgs.model.push(args.model);
|
|
5142
5156
|
uploadArgs.toolName.push(args.tool);
|
|
5143
5157
|
uploadArgs.aiResponseAt.push(args.responseTime);
|
|
5158
|
+
uploadArgs.blameType.push(args.blameType || "CHAT" /* Chat */);
|
|
5144
5159
|
await uploadAiBlameHandler(uploadArgs, false);
|
|
5145
5160
|
});
|
|
5146
5161
|
});
|
|
@@ -5163,6 +5178,7 @@ async function uploadAiBlameHandler(args, exitOnError = true) {
|
|
|
5163
5178
|
const models = args.model || [];
|
|
5164
5179
|
const tools = args.toolName || args["tool-name"] || [];
|
|
5165
5180
|
const responseTimes = args.aiResponseAt || args["ai-response-at"] || [];
|
|
5181
|
+
const blameTypes = args.blameType || args["blame-type"] || [];
|
|
5166
5182
|
if (prompts.length !== inferences.length) {
|
|
5167
5183
|
const errorMsg = "prompt and inference must have the same number of entries";
|
|
5168
5184
|
console.error(chalk3.red(errorMsg));
|
|
@@ -5194,7 +5210,8 @@ async function uploadAiBlameHandler(args, exitOnError = true) {
|
|
|
5194
5210
|
inferenceFileName: path6.basename(inferencePath),
|
|
5195
5211
|
aiResponseAt: responseTimes[i] || nowIso,
|
|
5196
5212
|
model: models[i],
|
|
5197
|
-
toolName: tools[i]
|
|
5213
|
+
toolName: tools[i],
|
|
5214
|
+
blameType: blameTypes[i] || "CHAT" /* Chat */
|
|
5198
5215
|
});
|
|
5199
5216
|
}
|
|
5200
5217
|
const authenticatedClient = await getAuthenticatedGQLClientForIdeExtension();
|
|
@@ -5214,18 +5231,22 @@ async function uploadAiBlameHandler(args, exitOnError = true) {
|
|
|
5214
5231
|
const us = uploadSessions[i];
|
|
5215
5232
|
const promptPath = String(prompts[i]);
|
|
5216
5233
|
const inferencePath = String(inferences[i]);
|
|
5217
|
-
await
|
|
5218
|
-
|
|
5219
|
-
|
|
5220
|
-
|
|
5221
|
-
|
|
5222
|
-
|
|
5223
|
-
|
|
5224
|
-
|
|
5225
|
-
|
|
5226
|
-
|
|
5227
|
-
|
|
5228
|
-
|
|
5234
|
+
await Promise.all([
|
|
5235
|
+
// Prompt
|
|
5236
|
+
uploadFile({
|
|
5237
|
+
file: promptPath,
|
|
5238
|
+
url: us.prompt.url,
|
|
5239
|
+
uploadFields: JSON.parse(us.prompt.uploadFieldsJSON),
|
|
5240
|
+
uploadKey: us.prompt.uploadKey
|
|
5241
|
+
}),
|
|
5242
|
+
// Inference
|
|
5243
|
+
uploadFile({
|
|
5244
|
+
file: inferencePath,
|
|
5245
|
+
url: us.inference.url,
|
|
5246
|
+
uploadFields: JSON.parse(us.inference.uploadFieldsJSON),
|
|
5247
|
+
uploadKey: us.inference.uploadKey
|
|
5248
|
+
})
|
|
5249
|
+
]);
|
|
5229
5250
|
}
|
|
5230
5251
|
const finalizeSessions = uploadSessions.map((us, i) => {
|
|
5231
5252
|
const s = sessions[i];
|
|
@@ -5235,7 +5256,8 @@ async function uploadAiBlameHandler(args, exitOnError = true) {
|
|
|
5235
5256
|
inferenceKey: us.inference.uploadKey,
|
|
5236
5257
|
aiResponseAt: s.aiResponseAt,
|
|
5237
5258
|
model: s.model,
|
|
5238
|
-
toolName: s.toolName
|
|
5259
|
+
toolName: s.toolName,
|
|
5260
|
+
blameType: s.blameType
|
|
5239
5261
|
};
|
|
5240
5262
|
});
|
|
5241
5263
|
const finRes = await authenticatedClient.finalizeAIBlameInferencesUploadRaw({
|
package/dist/index.mjs
CHANGED
|
@@ -1371,6 +1371,12 @@ import { z as z14 } from "zod";
|
|
|
1371
1371
|
import { z as z2 } from "zod";
|
|
1372
1372
|
|
|
1373
1373
|
// src/features/analysis/scm/generates/client_generates.ts
|
|
1374
|
+
var AiBlameInferenceType = /* @__PURE__ */ ((AiBlameInferenceType2) => {
|
|
1375
|
+
AiBlameInferenceType2["Chat"] = "CHAT";
|
|
1376
|
+
AiBlameInferenceType2["HumanEdit"] = "HUMAN_EDIT";
|
|
1377
|
+
AiBlameInferenceType2["TabAutocomplete"] = "TAB_AUTOCOMPLETE";
|
|
1378
|
+
return AiBlameInferenceType2;
|
|
1379
|
+
})(AiBlameInferenceType || {});
|
|
1374
1380
|
var FixQuestionInputType = /* @__PURE__ */ ((FixQuestionInputType2) => {
|
|
1375
1381
|
FixQuestionInputType2["Number"] = "NUMBER";
|
|
1376
1382
|
FixQuestionInputType2["Select"] = "SELECT";
|
|
@@ -7942,6 +7948,23 @@ function getGithubSdk(params = {}) {
|
|
|
7942
7948
|
per_page: 100
|
|
7943
7949
|
});
|
|
7944
7950
|
},
|
|
7951
|
+
async getRecentCommits(params2) {
|
|
7952
|
+
const commits = await octokit.paginate(octokit.rest.repos.listCommits, {
|
|
7953
|
+
owner: params2.owner,
|
|
7954
|
+
repo: params2.repo,
|
|
7955
|
+
since: params2.since,
|
|
7956
|
+
per_page: 100
|
|
7957
|
+
});
|
|
7958
|
+
return { data: commits };
|
|
7959
|
+
},
|
|
7960
|
+
async getRateLimitStatus() {
|
|
7961
|
+
const response = await octokit.rest.rateLimit.get();
|
|
7962
|
+
return {
|
|
7963
|
+
remaining: response.data.rate.remaining,
|
|
7964
|
+
reset: new Date(response.data.rate.reset * 1e3),
|
|
7965
|
+
limit: response.data.rate.limit
|
|
7966
|
+
};
|
|
7967
|
+
},
|
|
7945
7968
|
async getRepoPullRequests(params2) {
|
|
7946
7969
|
return octokit.rest.pulls.list({
|
|
7947
7970
|
owner: params2.owner,
|
|
@@ -8075,6 +8098,14 @@ var GithubSCMLib = class extends SCMLib {
|
|
|
8075
8098
|
const branches = await this.githubSdk.getGithubBranchList(this.url);
|
|
8076
8099
|
return branches.data.map((branch) => branch.name);
|
|
8077
8100
|
}
|
|
8101
|
+
async getRecentCommits(since) {
|
|
8102
|
+
this._validateAccessTokenAndUrl();
|
|
8103
|
+
const { owner, repo } = parseGithubOwnerAndRepo(this.url);
|
|
8104
|
+
return await this.githubSdk.getRecentCommits({ owner, repo, since });
|
|
8105
|
+
}
|
|
8106
|
+
async getRateLimitStatus() {
|
|
8107
|
+
return await this.githubSdk.getRateLimitStatus();
|
|
8108
|
+
}
|
|
8078
8109
|
get scmLibType() {
|
|
8079
8110
|
return "GITHUB" /* GITHUB */;
|
|
8080
8111
|
}
|
|
@@ -21073,6 +21104,13 @@ function uploadAiBlameBuilder(args) {
|
|
|
21073
21104
|
type: "string",
|
|
21074
21105
|
array: true,
|
|
21075
21106
|
describe: chalk10.bold("Tool/IDE name(s) (optional, one per session)")
|
|
21107
|
+
}).option("blame-type", {
|
|
21108
|
+
type: "string",
|
|
21109
|
+
array: true,
|
|
21110
|
+
choices: Object.values(AiBlameInferenceType),
|
|
21111
|
+
describe: chalk10.bold(
|
|
21112
|
+
"Blame type(s) (optional, one per session, defaults to CHAT)"
|
|
21113
|
+
)
|
|
21076
21114
|
}).strict();
|
|
21077
21115
|
}
|
|
21078
21116
|
var config5 = new Configstore6(packageJson.name, { apiToken: "" });
|
|
@@ -21093,6 +21131,7 @@ async function uploadAiBlameHandler(args, exitOnError = true) {
|
|
|
21093
21131
|
const models = args.model || [];
|
|
21094
21132
|
const tools = args.toolName || args["tool-name"] || [];
|
|
21095
21133
|
const responseTimes = args.aiResponseAt || args["ai-response-at"] || [];
|
|
21134
|
+
const blameTypes = args.blameType || args["blame-type"] || [];
|
|
21096
21135
|
if (prompts.length !== inferences.length) {
|
|
21097
21136
|
const errorMsg = "prompt and inference must have the same number of entries";
|
|
21098
21137
|
console.error(chalk10.red(errorMsg));
|
|
@@ -21124,7 +21163,8 @@ async function uploadAiBlameHandler(args, exitOnError = true) {
|
|
|
21124
21163
|
inferenceFileName: path17.basename(inferencePath),
|
|
21125
21164
|
aiResponseAt: responseTimes[i] || nowIso,
|
|
21126
21165
|
model: models[i],
|
|
21127
|
-
toolName: tools[i]
|
|
21166
|
+
toolName: tools[i],
|
|
21167
|
+
blameType: blameTypes[i] || "CHAT" /* Chat */
|
|
21128
21168
|
});
|
|
21129
21169
|
}
|
|
21130
21170
|
const authenticatedClient = await getAuthenticatedGQLClientForIdeExtension();
|
|
@@ -21144,18 +21184,22 @@ async function uploadAiBlameHandler(args, exitOnError = true) {
|
|
|
21144
21184
|
const us = uploadSessions[i];
|
|
21145
21185
|
const promptPath = String(prompts[i]);
|
|
21146
21186
|
const inferencePath = String(inferences[i]);
|
|
21147
|
-
await
|
|
21148
|
-
|
|
21149
|
-
|
|
21150
|
-
|
|
21151
|
-
|
|
21152
|
-
|
|
21153
|
-
|
|
21154
|
-
|
|
21155
|
-
|
|
21156
|
-
|
|
21157
|
-
|
|
21158
|
-
|
|
21187
|
+
await Promise.all([
|
|
21188
|
+
// Prompt
|
|
21189
|
+
uploadFile({
|
|
21190
|
+
file: promptPath,
|
|
21191
|
+
url: us.prompt.url,
|
|
21192
|
+
uploadFields: JSON.parse(us.prompt.uploadFieldsJSON),
|
|
21193
|
+
uploadKey: us.prompt.uploadKey
|
|
21194
|
+
}),
|
|
21195
|
+
// Inference
|
|
21196
|
+
uploadFile({
|
|
21197
|
+
file: inferencePath,
|
|
21198
|
+
url: us.inference.url,
|
|
21199
|
+
uploadFields: JSON.parse(us.inference.uploadFieldsJSON),
|
|
21200
|
+
uploadKey: us.inference.uploadKey
|
|
21201
|
+
})
|
|
21202
|
+
]);
|
|
21159
21203
|
}
|
|
21160
21204
|
const finalizeSessions = uploadSessions.map((us, i) => {
|
|
21161
21205
|
const s = sessions[i];
|
|
@@ -21165,7 +21209,8 @@ async function uploadAiBlameHandler(args, exitOnError = true) {
|
|
|
21165
21209
|
inferenceKey: us.inference.uploadKey,
|
|
21166
21210
|
aiResponseAt: s.aiResponseAt,
|
|
21167
21211
|
model: s.model,
|
|
21168
|
-
toolName: s.toolName
|
|
21212
|
+
toolName: s.toolName,
|
|
21213
|
+
blameType: s.blameType
|
|
21169
21214
|
};
|
|
21170
21215
|
});
|
|
21171
21216
|
const finRes = await authenticatedClient.finalizeAIBlameInferencesUploadRaw({
|