@hongymagic/q 0.3.2 → 0.4.0

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (3) hide show
  1. package/README.md +0 -3
  2. package/dist/q.js +19 -841
  3. package/package.json +1 -1
package/README.md CHANGED
@@ -35,7 +35,6 @@ export ANTHROPIC_API_KEY="your-key-here"
35
35
 
36
36
  ```bash
37
37
  q how do I restart docker
38
- q --stream explain git rebase
39
38
  q --copy what is a kubernetes pod
40
39
  ```
41
40
 
@@ -45,9 +44,7 @@ q --copy what is a kubernetes pod
45
44
  |--------|-------------|
46
45
  | `-p, --provider <name>` | Override the default provider |
47
46
  | `-m, --model <id>` | Override the default model |
48
- | `--stream` | Stream response as it arrives |
49
47
  | `--copy` | Copy answer to clipboard |
50
- | `--json` | Output structured JSON |
51
48
  | `-h, --help` | Show help message |
52
49
 
53
50
  ### Commands
package/dist/q.js CHANGED
@@ -13396,9 +13396,7 @@ USAGE:
13396
13396
  OPTIONS:
13397
13397
  -p, --provider <name> Override the default provider
13398
13398
  -m, --model <id> Override the default model
13399
- --stream Stream response tokens as they arrive
13400
13399
  --copy Copy answer to clipboard
13401
- --json Output structured JSON
13402
13400
  --debug Enable debug logging to stderr
13403
13401
  -h, --help Show this help message
13404
13402
  -v, --version Show version
@@ -13415,20 +13413,17 @@ CONFIG:
13415
13413
 
13416
13414
  EXAMPLES:
13417
13415
  q how do I restart docker
13418
- q --stream explain git rebase
13419
13416
  q -p openai --model gpt-4o what is recursion
13420
13417
  q config init
13421
13418
  `;
13422
- var VERSION = "0.3.1";
13419
+ var VERSION = "0.4.0";
13423
13420
  function parseCliArgs(argv = Bun.argv.slice(2)) {
13424
13421
  const { values, positionals } = parseArgs({
13425
13422
  args: argv,
13426
13423
  options: {
13427
13424
  provider: { type: "string", short: "p" },
13428
13425
  model: { type: "string", short: "m" },
13429
- stream: { type: "boolean", default: false },
13430
13426
  copy: { type: "boolean", default: false },
13431
- json: { type: "boolean", default: false },
13432
13427
  debug: { type: "boolean", default: false },
13433
13428
  help: { type: "boolean", short: "h", default: false },
13434
13429
  version: { type: "boolean", short: "v", default: false }
@@ -13439,9 +13434,7 @@ function parseCliArgs(argv = Bun.argv.slice(2)) {
13439
13434
  const options = {
13440
13435
  provider: values.provider,
13441
13436
  model: values.model,
13442
- stream: values.stream ?? false,
13443
13437
  copy: values.copy ?? false,
13444
- json: values.json ?? false,
13445
13438
  debug: values.debug ?? false,
13446
13439
  help: values.help ?? false,
13447
13440
  version: values.version ?? false
@@ -27462,19 +27455,6 @@ function logDebug2(message, debug) {
27462
27455
  }
27463
27456
  }
27464
27457
 
27465
- // src/output.ts
27466
- function formatOutput(options) {
27467
- if (options.json) {
27468
- const output = {
27469
- text: options.text,
27470
- provider: options.providerName,
27471
- model: options.modelId
27472
- };
27473
- return JSON.stringify(output, null, 2);
27474
- }
27475
- return options.text;
27476
- }
27477
-
27478
27458
  // src/env-info.ts
27479
27459
  import * as os3 from "node:os";
27480
27460
  function getEnvironmentInfo2() {
@@ -49525,18 +49505,6 @@ async function executeToolCall({
49525
49505
  }
49526
49506
  });
49527
49507
  }
49528
- function extractReasoningContent(content) {
49529
- const parts = content.filter((content2) => content2.type === "reasoning");
49530
- return parts.length === 0 ? undefined : parts.map((content2) => content2.text).join(`
49531
- `);
49532
- }
49533
- function extractTextContent(content) {
49534
- const parts = content.filter((content2) => content2.type === "text");
49535
- if (parts.length === 0) {
49536
- return;
49537
- }
49538
- return parts.map((content2) => content2.text).join("");
49539
- }
49540
49508
  var DefaultGeneratedFile = class {
49541
49509
  constructor({
49542
49510
  data,
@@ -50574,763 +50542,6 @@ var originalGenerateId = createIdGenerator({
50574
50542
  prefix: "aitxt",
50575
50543
  size: 24
50576
50544
  });
50577
- async function generateText({
50578
- model: modelArg,
50579
- tools,
50580
- toolChoice,
50581
- system,
50582
- prompt,
50583
- messages,
50584
- maxRetries: maxRetriesArg,
50585
- abortSignal,
50586
- timeout,
50587
- headers,
50588
- stopWhen = stepCountIs(1),
50589
- experimental_output,
50590
- output = experimental_output,
50591
- experimental_telemetry: telemetry,
50592
- providerOptions,
50593
- experimental_activeTools,
50594
- activeTools = experimental_activeTools,
50595
- experimental_prepareStep,
50596
- prepareStep = experimental_prepareStep,
50597
- experimental_repairToolCall: repairToolCall,
50598
- experimental_download: download2,
50599
- experimental_context,
50600
- experimental_include: include,
50601
- _internal: { generateId: generateId2 = originalGenerateId } = {},
50602
- experimental_onStart: onStart,
50603
- experimental_onStepStart: onStepStart,
50604
- experimental_onToolCallStart: onToolCallStart,
50605
- experimental_onToolCallFinish: onToolCallFinish,
50606
- onStepFinish,
50607
- onFinish,
50608
- ...settings
50609
- }) {
50610
- const model = resolveLanguageModel(modelArg);
50611
- const stopConditions = asArray(stopWhen);
50612
- const totalTimeoutMs = getTotalTimeoutMs(timeout);
50613
- const stepTimeoutMs = getStepTimeoutMs(timeout);
50614
- const stepAbortController = stepTimeoutMs != null ? new AbortController : undefined;
50615
- const mergedAbortSignal = mergeAbortSignals(abortSignal, totalTimeoutMs != null ? AbortSignal.timeout(totalTimeoutMs) : undefined, stepAbortController == null ? undefined : stepAbortController.signal);
50616
- const { maxRetries, retry } = prepareRetries({
50617
- maxRetries: maxRetriesArg,
50618
- abortSignal: mergedAbortSignal
50619
- });
50620
- const callSettings = prepareCallSettings(settings);
50621
- const headersWithUserAgent = withUserAgentSuffix(headers != null ? headers : {}, `ai/${VERSION7}`);
50622
- const baseTelemetryAttributes = getBaseTelemetryAttributes({
50623
- model,
50624
- telemetry,
50625
- headers: headersWithUserAgent,
50626
- settings: { ...callSettings, maxRetries }
50627
- });
50628
- const modelInfo = { provider: model.provider, modelId: model.modelId };
50629
- const initialPrompt = await standardizePrompt({
50630
- system,
50631
- prompt,
50632
- messages
50633
- });
50634
- try {
50635
- await (onStart == null ? undefined : onStart({
50636
- model: modelInfo,
50637
- system,
50638
- prompt,
50639
- messages,
50640
- tools,
50641
- toolChoice,
50642
- activeTools,
50643
- maxOutputTokens: callSettings.maxOutputTokens,
50644
- temperature: callSettings.temperature,
50645
- topP: callSettings.topP,
50646
- topK: callSettings.topK,
50647
- presencePenalty: callSettings.presencePenalty,
50648
- frequencyPenalty: callSettings.frequencyPenalty,
50649
- stopSequences: callSettings.stopSequences,
50650
- seed: callSettings.seed,
50651
- maxRetries,
50652
- timeout,
50653
- headers,
50654
- providerOptions,
50655
- stopWhen,
50656
- output,
50657
- abortSignal,
50658
- include,
50659
- functionId: telemetry == null ? undefined : telemetry.functionId,
50660
- metadata: telemetry == null ? undefined : telemetry.metadata,
50661
- experimental_context
50662
- }));
50663
- } catch (_ignored) {}
50664
- const tracer = getTracer(telemetry);
50665
- try {
50666
- return await recordSpan({
50667
- name: "ai.generateText",
50668
- attributes: selectTelemetryAttributes({
50669
- telemetry,
50670
- attributes: {
50671
- ...assembleOperationName({
50672
- operationId: "ai.generateText",
50673
- telemetry
50674
- }),
50675
- ...baseTelemetryAttributes,
50676
- "ai.model.provider": model.provider,
50677
- "ai.model.id": model.modelId,
50678
- "ai.prompt": {
50679
- input: () => JSON.stringify({ system, prompt, messages })
50680
- }
50681
- }
50682
- }),
50683
- tracer,
50684
- fn: async (span) => {
50685
- var _a21, _b16, _c, _d, _e, _f, _g, _h, _i, _j, _k, _l, _m;
50686
- const initialMessages = initialPrompt.messages;
50687
- const responseMessages = [];
50688
- const { approvedToolApprovals, deniedToolApprovals } = collectToolApprovals({ messages: initialMessages });
50689
- const localApprovedToolApprovals = approvedToolApprovals.filter((toolApproval) => !toolApproval.toolCall.providerExecuted);
50690
- if (deniedToolApprovals.length > 0 || localApprovedToolApprovals.length > 0) {
50691
- const toolOutputs = await executeTools({
50692
- toolCalls: localApprovedToolApprovals.map((toolApproval) => toolApproval.toolCall),
50693
- tools,
50694
- tracer,
50695
- telemetry,
50696
- messages: initialMessages,
50697
- abortSignal: mergedAbortSignal,
50698
- experimental_context,
50699
- stepNumber: 0,
50700
- model: modelInfo,
50701
- onToolCallStart,
50702
- onToolCallFinish
50703
- });
50704
- const toolContent = [];
50705
- for (const output2 of toolOutputs) {
50706
- const modelOutput = await createToolModelOutput({
50707
- toolCallId: output2.toolCallId,
50708
- input: output2.input,
50709
- tool: tools == null ? undefined : tools[output2.toolName],
50710
- output: output2.type === "tool-result" ? output2.output : output2.error,
50711
- errorMode: output2.type === "tool-error" ? "json" : "none"
50712
- });
50713
- toolContent.push({
50714
- type: "tool-result",
50715
- toolCallId: output2.toolCallId,
50716
- toolName: output2.toolName,
50717
- output: modelOutput
50718
- });
50719
- }
50720
- for (const toolApproval of deniedToolApprovals) {
50721
- toolContent.push({
50722
- type: "tool-result",
50723
- toolCallId: toolApproval.toolCall.toolCallId,
50724
- toolName: toolApproval.toolCall.toolName,
50725
- output: {
50726
- type: "execution-denied",
50727
- reason: toolApproval.approvalResponse.reason,
50728
- ...toolApproval.toolCall.providerExecuted && {
50729
- providerOptions: {
50730
- openai: {
50731
- approvalId: toolApproval.approvalResponse.approvalId
50732
- }
50733
- }
50734
- }
50735
- }
50736
- });
50737
- }
50738
- responseMessages.push({
50739
- role: "tool",
50740
- content: toolContent
50741
- });
50742
- }
50743
- const providerExecutedToolApprovals = [
50744
- ...approvedToolApprovals,
50745
- ...deniedToolApprovals
50746
- ].filter((toolApproval) => toolApproval.toolCall.providerExecuted);
50747
- if (providerExecutedToolApprovals.length > 0) {
50748
- responseMessages.push({
50749
- role: "tool",
50750
- content: providerExecutedToolApprovals.map((toolApproval) => ({
50751
- type: "tool-approval-response",
50752
- approvalId: toolApproval.approvalResponse.approvalId,
50753
- approved: toolApproval.approvalResponse.approved,
50754
- reason: toolApproval.approvalResponse.reason,
50755
- providerExecuted: true
50756
- }))
50757
- });
50758
- }
50759
- const callSettings2 = prepareCallSettings(settings);
50760
- let currentModelResponse;
50761
- let clientToolCalls = [];
50762
- let clientToolOutputs = [];
50763
- const steps = [];
50764
- const pendingDeferredToolCalls = /* @__PURE__ */ new Map;
50765
- do {
50766
- const stepTimeoutId = stepTimeoutMs != null ? setTimeout(() => stepAbortController.abort(), stepTimeoutMs) : undefined;
50767
- try {
50768
- const stepInputMessages = [...initialMessages, ...responseMessages];
50769
- const prepareStepResult = await (prepareStep == null ? undefined : prepareStep({
50770
- model,
50771
- steps,
50772
- stepNumber: steps.length,
50773
- messages: stepInputMessages,
50774
- experimental_context
50775
- }));
50776
- const stepModel = resolveLanguageModel((_a21 = prepareStepResult == null ? undefined : prepareStepResult.model) != null ? _a21 : model);
50777
- const stepModelInfo = {
50778
- provider: stepModel.provider,
50779
- modelId: stepModel.modelId
50780
- };
50781
- const promptMessages = await convertToLanguageModelPrompt({
50782
- prompt: {
50783
- system: (_b16 = prepareStepResult == null ? undefined : prepareStepResult.system) != null ? _b16 : initialPrompt.system,
50784
- messages: (_c = prepareStepResult == null ? undefined : prepareStepResult.messages) != null ? _c : stepInputMessages
50785
- },
50786
- supportedUrls: await stepModel.supportedUrls,
50787
- download: download2
50788
- });
50789
- experimental_context = (_d = prepareStepResult == null ? undefined : prepareStepResult.experimental_context) != null ? _d : experimental_context;
50790
- const stepActiveTools = (_e = prepareStepResult == null ? undefined : prepareStepResult.activeTools) != null ? _e : activeTools;
50791
- const { toolChoice: stepToolChoice, tools: stepTools } = await prepareToolsAndToolChoice({
50792
- tools,
50793
- toolChoice: (_f = prepareStepResult == null ? undefined : prepareStepResult.toolChoice) != null ? _f : toolChoice,
50794
- activeTools: stepActiveTools
50795
- });
50796
- const stepMessages = (_g = prepareStepResult == null ? undefined : prepareStepResult.messages) != null ? _g : stepInputMessages;
50797
- const stepSystem = (_h = prepareStepResult == null ? undefined : prepareStepResult.system) != null ? _h : initialPrompt.system;
50798
- const stepProviderOptions = mergeObjects(providerOptions, prepareStepResult == null ? undefined : prepareStepResult.providerOptions);
50799
- try {
50800
- await (onStepStart == null ? undefined : onStepStart({
50801
- stepNumber: steps.length,
50802
- model: stepModelInfo,
50803
- system: stepSystem,
50804
- messages: stepMessages,
50805
- tools,
50806
- toolChoice: stepToolChoice,
50807
- activeTools: stepActiveTools,
50808
- steps: [...steps],
50809
- providerOptions: stepProviderOptions,
50810
- timeout,
50811
- headers,
50812
- stopWhen,
50813
- output,
50814
- abortSignal,
50815
- include,
50816
- functionId: telemetry == null ? undefined : telemetry.functionId,
50817
- metadata: telemetry == null ? undefined : telemetry.metadata,
50818
- experimental_context
50819
- }));
50820
- } catch (_ignored) {}
50821
- currentModelResponse = await retry(() => {
50822
- var _a222;
50823
- return recordSpan({
50824
- name: "ai.generateText.doGenerate",
50825
- attributes: selectTelemetryAttributes({
50826
- telemetry,
50827
- attributes: {
50828
- ...assembleOperationName({
50829
- operationId: "ai.generateText.doGenerate",
50830
- telemetry
50831
- }),
50832
- ...baseTelemetryAttributes,
50833
- "ai.model.provider": stepModel.provider,
50834
- "ai.model.id": stepModel.modelId,
50835
- "ai.prompt.messages": {
50836
- input: () => stringifyForTelemetry(promptMessages)
50837
- },
50838
- "ai.prompt.tools": {
50839
- input: () => stepTools == null ? undefined : stepTools.map((tool2) => JSON.stringify(tool2))
50840
- },
50841
- "ai.prompt.toolChoice": {
50842
- input: () => stepToolChoice != null ? JSON.stringify(stepToolChoice) : undefined
50843
- },
50844
- "gen_ai.system": stepModel.provider,
50845
- "gen_ai.request.model": stepModel.modelId,
50846
- "gen_ai.request.frequency_penalty": settings.frequencyPenalty,
50847
- "gen_ai.request.max_tokens": settings.maxOutputTokens,
50848
- "gen_ai.request.presence_penalty": settings.presencePenalty,
50849
- "gen_ai.request.stop_sequences": settings.stopSequences,
50850
- "gen_ai.request.temperature": (_a222 = settings.temperature) != null ? _a222 : undefined,
50851
- "gen_ai.request.top_k": settings.topK,
50852
- "gen_ai.request.top_p": settings.topP
50853
- }
50854
- }),
50855
- tracer,
50856
- fn: async (span2) => {
50857
- var _a232, _b23, _c2, _d2, _e2, _f2, _g2, _h2;
50858
- const result = await stepModel.doGenerate({
50859
- ...callSettings2,
50860
- tools: stepTools,
50861
- toolChoice: stepToolChoice,
50862
- responseFormat: await (output == null ? undefined : output.responseFormat),
50863
- prompt: promptMessages,
50864
- providerOptions: stepProviderOptions,
50865
- abortSignal: mergedAbortSignal,
50866
- headers: headersWithUserAgent
50867
- });
50868
- const responseData = {
50869
- id: (_b23 = (_a232 = result.response) == null ? undefined : _a232.id) != null ? _b23 : generateId2(),
50870
- timestamp: (_d2 = (_c2 = result.response) == null ? undefined : _c2.timestamp) != null ? _d2 : /* @__PURE__ */ new Date,
50871
- modelId: (_f2 = (_e2 = result.response) == null ? undefined : _e2.modelId) != null ? _f2 : stepModel.modelId,
50872
- headers: (_g2 = result.response) == null ? undefined : _g2.headers,
50873
- body: (_h2 = result.response) == null ? undefined : _h2.body
50874
- };
50875
- span2.setAttributes(await selectTelemetryAttributes({
50876
- telemetry,
50877
- attributes: {
50878
- "ai.response.finishReason": result.finishReason.unified,
50879
- "ai.response.text": {
50880
- output: () => extractTextContent(result.content)
50881
- },
50882
- "ai.response.reasoning": {
50883
- output: () => extractReasoningContent(result.content)
50884
- },
50885
- "ai.response.toolCalls": {
50886
- output: () => {
50887
- const toolCalls = asToolCalls(result.content);
50888
- return toolCalls == null ? undefined : JSON.stringify(toolCalls);
50889
- }
50890
- },
50891
- "ai.response.id": responseData.id,
50892
- "ai.response.model": responseData.modelId,
50893
- "ai.response.timestamp": responseData.timestamp.toISOString(),
50894
- "ai.response.providerMetadata": JSON.stringify(result.providerMetadata),
50895
- "ai.usage.promptTokens": result.usage.inputTokens.total,
50896
- "ai.usage.completionTokens": result.usage.outputTokens.total,
50897
- "gen_ai.response.finish_reasons": [
50898
- result.finishReason.unified
50899
- ],
50900
- "gen_ai.response.id": responseData.id,
50901
- "gen_ai.response.model": responseData.modelId,
50902
- "gen_ai.usage.input_tokens": result.usage.inputTokens.total,
50903
- "gen_ai.usage.output_tokens": result.usage.outputTokens.total
50904
- }
50905
- }));
50906
- return { ...result, response: responseData };
50907
- }
50908
- });
50909
- });
50910
- const stepToolCalls = await Promise.all(currentModelResponse.content.filter((part) => part.type === "tool-call").map((toolCall) => parseToolCall({
50911
- toolCall,
50912
- tools,
50913
- repairToolCall,
50914
- system,
50915
- messages: stepInputMessages
50916
- })));
50917
- const toolApprovalRequests = {};
50918
- for (const toolCall of stepToolCalls) {
50919
- if (toolCall.invalid) {
50920
- continue;
50921
- }
50922
- const tool2 = tools == null ? undefined : tools[toolCall.toolName];
50923
- if (tool2 == null) {
50924
- continue;
50925
- }
50926
- if ((tool2 == null ? undefined : tool2.onInputAvailable) != null) {
50927
- await tool2.onInputAvailable({
50928
- input: toolCall.input,
50929
- toolCallId: toolCall.toolCallId,
50930
- messages: stepInputMessages,
50931
- abortSignal: mergedAbortSignal,
50932
- experimental_context
50933
- });
50934
- }
50935
- if (await isApprovalNeeded({
50936
- tool: tool2,
50937
- toolCall,
50938
- messages: stepInputMessages,
50939
- experimental_context
50940
- })) {
50941
- toolApprovalRequests[toolCall.toolCallId] = {
50942
- type: "tool-approval-request",
50943
- approvalId: generateId2(),
50944
- toolCall
50945
- };
50946
- }
50947
- }
50948
- const invalidToolCalls = stepToolCalls.filter((toolCall) => toolCall.invalid && toolCall.dynamic);
50949
- clientToolOutputs = [];
50950
- for (const toolCall of invalidToolCalls) {
50951
- clientToolOutputs.push({
50952
- type: "tool-error",
50953
- toolCallId: toolCall.toolCallId,
50954
- toolName: toolCall.toolName,
50955
- input: toolCall.input,
50956
- error: getErrorMessage2(toolCall.error),
50957
- dynamic: true
50958
- });
50959
- }
50960
- clientToolCalls = stepToolCalls.filter((toolCall) => !toolCall.providerExecuted);
50961
- if (tools != null) {
50962
- clientToolOutputs.push(...await executeTools({
50963
- toolCalls: clientToolCalls.filter((toolCall) => !toolCall.invalid && toolApprovalRequests[toolCall.toolCallId] == null),
50964
- tools,
50965
- tracer,
50966
- telemetry,
50967
- messages: stepInputMessages,
50968
- abortSignal: mergedAbortSignal,
50969
- experimental_context,
50970
- stepNumber: steps.length,
50971
- model: stepModelInfo,
50972
- onToolCallStart,
50973
- onToolCallFinish
50974
- }));
50975
- }
50976
- for (const toolCall of stepToolCalls) {
50977
- if (!toolCall.providerExecuted)
50978
- continue;
50979
- const tool2 = tools == null ? undefined : tools[toolCall.toolName];
50980
- if ((tool2 == null ? undefined : tool2.type) === "provider" && tool2.supportsDeferredResults) {
50981
- const hasResultInResponse = currentModelResponse.content.some((part) => part.type === "tool-result" && part.toolCallId === toolCall.toolCallId);
50982
- if (!hasResultInResponse) {
50983
- pendingDeferredToolCalls.set(toolCall.toolCallId, {
50984
- toolName: toolCall.toolName
50985
- });
50986
- }
50987
- }
50988
- }
50989
- for (const part of currentModelResponse.content) {
50990
- if (part.type === "tool-result") {
50991
- pendingDeferredToolCalls.delete(part.toolCallId);
50992
- }
50993
- }
50994
- const stepContent = asContent({
50995
- content: currentModelResponse.content,
50996
- toolCalls: stepToolCalls,
50997
- toolOutputs: clientToolOutputs,
50998
- toolApprovalRequests: Object.values(toolApprovalRequests),
50999
- tools
51000
- });
51001
- responseMessages.push(...await toResponseMessages({
51002
- content: stepContent,
51003
- tools
51004
- }));
51005
- const stepRequest = ((_i = include == null ? undefined : include.requestBody) != null ? _i : true) ? (_j = currentModelResponse.request) != null ? _j : {} : { ...currentModelResponse.request, body: undefined };
51006
- const stepResponse = {
51007
- ...currentModelResponse.response,
51008
- messages: structuredClone(responseMessages),
51009
- body: ((_k = include == null ? undefined : include.responseBody) != null ? _k : true) ? (_l = currentModelResponse.response) == null ? undefined : _l.body : undefined
51010
- };
51011
- const stepNumber = steps.length;
51012
- const currentStepResult = new DefaultStepResult({
51013
- stepNumber,
51014
- model: stepModelInfo,
51015
- functionId: telemetry == null ? undefined : telemetry.functionId,
51016
- metadata: telemetry == null ? undefined : telemetry.metadata,
51017
- experimental_context,
51018
- content: stepContent,
51019
- finishReason: currentModelResponse.finishReason.unified,
51020
- rawFinishReason: currentModelResponse.finishReason.raw,
51021
- usage: asLanguageModelUsage(currentModelResponse.usage),
51022
- warnings: currentModelResponse.warnings,
51023
- providerMetadata: currentModelResponse.providerMetadata,
51024
- request: stepRequest,
51025
- response: stepResponse
51026
- });
51027
- logWarnings({
51028
- warnings: (_m = currentModelResponse.warnings) != null ? _m : [],
51029
- provider: stepModelInfo.provider,
51030
- model: stepModelInfo.modelId
51031
- });
51032
- steps.push(currentStepResult);
51033
- await (onStepFinish == null ? undefined : onStepFinish(currentStepResult));
51034
- } finally {
51035
- if (stepTimeoutId != null) {
51036
- clearTimeout(stepTimeoutId);
51037
- }
51038
- }
51039
- } while ((clientToolCalls.length > 0 && clientToolOutputs.length === clientToolCalls.length || pendingDeferredToolCalls.size > 0) && !await isStopConditionMet({ stopConditions, steps }));
51040
- span.setAttributes(await selectTelemetryAttributes({
51041
- telemetry,
51042
- attributes: {
51043
- "ai.response.finishReason": currentModelResponse.finishReason.unified,
51044
- "ai.response.text": {
51045
- output: () => extractTextContent(currentModelResponse.content)
51046
- },
51047
- "ai.response.reasoning": {
51048
- output: () => extractReasoningContent(currentModelResponse.content)
51049
- },
51050
- "ai.response.toolCalls": {
51051
- output: () => {
51052
- const toolCalls = asToolCalls(currentModelResponse.content);
51053
- return toolCalls == null ? undefined : JSON.stringify(toolCalls);
51054
- }
51055
- },
51056
- "ai.response.providerMetadata": JSON.stringify(currentModelResponse.providerMetadata),
51057
- "ai.usage.promptTokens": currentModelResponse.usage.inputTokens.total,
51058
- "ai.usage.completionTokens": currentModelResponse.usage.outputTokens.total
51059
- }
51060
- }));
51061
- const lastStep = steps[steps.length - 1];
51062
- const totalUsage = steps.reduce((totalUsage2, step) => {
51063
- return addLanguageModelUsage(totalUsage2, step.usage);
51064
- }, {
51065
- inputTokens: undefined,
51066
- outputTokens: undefined,
51067
- totalTokens: undefined,
51068
- reasoningTokens: undefined,
51069
- cachedInputTokens: undefined
51070
- });
51071
- await (onFinish == null ? undefined : onFinish({
51072
- stepNumber: lastStep.stepNumber,
51073
- model: lastStep.model,
51074
- functionId: lastStep.functionId,
51075
- metadata: lastStep.metadata,
51076
- experimental_context: lastStep.experimental_context,
51077
- finishReason: lastStep.finishReason,
51078
- rawFinishReason: lastStep.rawFinishReason,
51079
- usage: lastStep.usage,
51080
- content: lastStep.content,
51081
- text: lastStep.text,
51082
- reasoningText: lastStep.reasoningText,
51083
- reasoning: lastStep.reasoning,
51084
- files: lastStep.files,
51085
- sources: lastStep.sources,
51086
- toolCalls: lastStep.toolCalls,
51087
- staticToolCalls: lastStep.staticToolCalls,
51088
- dynamicToolCalls: lastStep.dynamicToolCalls,
51089
- toolResults: lastStep.toolResults,
51090
- staticToolResults: lastStep.staticToolResults,
51091
- dynamicToolResults: lastStep.dynamicToolResults,
51092
- request: lastStep.request,
51093
- response: lastStep.response,
51094
- warnings: lastStep.warnings,
51095
- providerMetadata: lastStep.providerMetadata,
51096
- steps,
51097
- totalUsage
51098
- }));
51099
- let resolvedOutput;
51100
- if (lastStep.finishReason === "stop") {
51101
- const outputSpecification = output != null ? output : text();
51102
- resolvedOutput = await outputSpecification.parseCompleteOutput({ text: lastStep.text }, {
51103
- response: lastStep.response,
51104
- usage: lastStep.usage,
51105
- finishReason: lastStep.finishReason
51106
- });
51107
- }
51108
- return new DefaultGenerateTextResult({
51109
- steps,
51110
- totalUsage,
51111
- output: resolvedOutput
51112
- });
51113
- }
51114
- });
51115
- } catch (error48) {
51116
- throw wrapGatewayError(error48);
51117
- }
51118
- }
51119
- async function executeTools({
51120
- toolCalls,
51121
- tools,
51122
- tracer,
51123
- telemetry,
51124
- messages,
51125
- abortSignal,
51126
- experimental_context,
51127
- stepNumber,
51128
- model,
51129
- onToolCallStart,
51130
- onToolCallFinish
51131
- }) {
51132
- const toolOutputs = await Promise.all(toolCalls.map(async (toolCall) => executeToolCall({
51133
- toolCall,
51134
- tools,
51135
- tracer,
51136
- telemetry,
51137
- messages,
51138
- abortSignal,
51139
- experimental_context,
51140
- stepNumber,
51141
- model,
51142
- onToolCallStart,
51143
- onToolCallFinish
51144
- })));
51145
- return toolOutputs.filter((output) => output != null);
51146
- }
51147
- var DefaultGenerateTextResult = class {
51148
- constructor(options) {
51149
- this.steps = options.steps;
51150
- this._output = options.output;
51151
- this.totalUsage = options.totalUsage;
51152
- }
51153
- get finalStep() {
51154
- return this.steps[this.steps.length - 1];
51155
- }
51156
- get content() {
51157
- return this.finalStep.content;
51158
- }
51159
- get text() {
51160
- return this.finalStep.text;
51161
- }
51162
- get files() {
51163
- return this.finalStep.files;
51164
- }
51165
- get reasoningText() {
51166
- return this.finalStep.reasoningText;
51167
- }
51168
- get reasoning() {
51169
- return this.finalStep.reasoning;
51170
- }
51171
- get toolCalls() {
51172
- return this.finalStep.toolCalls;
51173
- }
51174
- get staticToolCalls() {
51175
- return this.finalStep.staticToolCalls;
51176
- }
51177
- get dynamicToolCalls() {
51178
- return this.finalStep.dynamicToolCalls;
51179
- }
51180
- get toolResults() {
51181
- return this.finalStep.toolResults;
51182
- }
51183
- get staticToolResults() {
51184
- return this.finalStep.staticToolResults;
51185
- }
51186
- get dynamicToolResults() {
51187
- return this.finalStep.dynamicToolResults;
51188
- }
51189
- get sources() {
51190
- return this.finalStep.sources;
51191
- }
51192
- get finishReason() {
51193
- return this.finalStep.finishReason;
51194
- }
51195
- get rawFinishReason() {
51196
- return this.finalStep.rawFinishReason;
51197
- }
51198
- get warnings() {
51199
- return this.finalStep.warnings;
51200
- }
51201
- get providerMetadata() {
51202
- return this.finalStep.providerMetadata;
51203
- }
51204
- get response() {
51205
- return this.finalStep.response;
51206
- }
51207
- get request() {
51208
- return this.finalStep.request;
51209
- }
51210
- get usage() {
51211
- return this.finalStep.usage;
51212
- }
51213
- get experimental_output() {
51214
- return this.output;
51215
- }
51216
- get output() {
51217
- if (this._output == null) {
51218
- throw new NoOutputGeneratedError;
51219
- }
51220
- return this._output;
51221
- }
51222
- };
51223
- function asToolCalls(content) {
51224
- const parts = content.filter((part) => part.type === "tool-call");
51225
- if (parts.length === 0) {
51226
- return;
51227
- }
51228
- return parts.map((toolCall) => ({
51229
- toolCallId: toolCall.toolCallId,
51230
- toolName: toolCall.toolName,
51231
- input: toolCall.input
51232
- }));
51233
- }
51234
- function asContent({
51235
- content,
51236
- toolCalls,
51237
- toolOutputs,
51238
- toolApprovalRequests,
51239
- tools
51240
- }) {
51241
- const contentParts = [];
51242
- for (const part of content) {
51243
- switch (part.type) {
51244
- case "text":
51245
- case "reasoning":
51246
- case "source":
51247
- contentParts.push(part);
51248
- break;
51249
- case "file": {
51250
- contentParts.push({
51251
- type: "file",
51252
- file: new DefaultGeneratedFile(part),
51253
- ...part.providerMetadata != null ? { providerMetadata: part.providerMetadata } : {}
51254
- });
51255
- break;
51256
- }
51257
- case "tool-call": {
51258
- contentParts.push(toolCalls.find((toolCall) => toolCall.toolCallId === part.toolCallId));
51259
- break;
51260
- }
51261
- case "tool-result": {
51262
- const toolCall = toolCalls.find((toolCall2) => toolCall2.toolCallId === part.toolCallId);
51263
- if (toolCall == null) {
51264
- const tool2 = tools == null ? undefined : tools[part.toolName];
51265
- const supportsDeferredResults = (tool2 == null ? undefined : tool2.type) === "provider" && tool2.supportsDeferredResults;
51266
- if (!supportsDeferredResults) {
51267
- throw new Error(`Tool call ${part.toolCallId} not found.`);
51268
- }
51269
- if (part.isError) {
51270
- contentParts.push({
51271
- type: "tool-error",
51272
- toolCallId: part.toolCallId,
51273
- toolName: part.toolName,
51274
- input: undefined,
51275
- error: part.result,
51276
- providerExecuted: true,
51277
- dynamic: part.dynamic
51278
- });
51279
- } else {
51280
- contentParts.push({
51281
- type: "tool-result",
51282
- toolCallId: part.toolCallId,
51283
- toolName: part.toolName,
51284
- input: undefined,
51285
- output: part.result,
51286
- providerExecuted: true,
51287
- dynamic: part.dynamic
51288
- });
51289
- }
51290
- break;
51291
- }
51292
- if (part.isError) {
51293
- contentParts.push({
51294
- type: "tool-error",
51295
- toolCallId: part.toolCallId,
51296
- toolName: part.toolName,
51297
- input: toolCall.input,
51298
- error: part.result,
51299
- providerExecuted: true,
51300
- dynamic: toolCall.dynamic
51301
- });
51302
- } else {
51303
- contentParts.push({
51304
- type: "tool-result",
51305
- toolCallId: part.toolCallId,
51306
- toolName: part.toolName,
51307
- input: toolCall.input,
51308
- output: part.result,
51309
- providerExecuted: true,
51310
- dynamic: toolCall.dynamic
51311
- });
51312
- }
51313
- break;
51314
- }
51315
- case "tool-approval-request": {
51316
- const toolCall = toolCalls.find((toolCall2) => toolCall2.toolCallId === part.toolCallId);
51317
- if (toolCall == null) {
51318
- throw new ToolCallNotFoundForApprovalError({
51319
- toolCallId: part.toolCallId,
51320
- approvalId: part.approvalId
51321
- });
51322
- }
51323
- contentParts.push({
51324
- type: "tool-approval-request",
51325
- approvalId: part.approvalId,
51326
- toolCall
51327
- });
51328
- break;
51329
- }
51330
- }
51331
- }
51332
- return [...contentParts, ...toolOutputs, ...toolApprovalRequests];
51333
- }
51334
50545
  function prepareHeaders(headers, defaultHeaders) {
51335
50546
  const responseHeaders = new Headers(headers != null ? headers : {});
51336
50547
  for (const [key, value] of Object.entries(defaultHeaders)) {
@@ -54484,43 +53695,29 @@ var defaultDownload2 = createDownload();
54484
53695
 
54485
53696
  // src/run.ts
54486
53697
  async function runQuery(options) {
54487
- const { model, query, systemPrompt, stream } = options;
53698
+ const { model, query, systemPrompt } = options;
54488
53699
  try {
54489
- if (stream) {
54490
- return await runStreamingQuery(model, query, systemPrompt);
53700
+ const result = streamText({
53701
+ model,
53702
+ system: systemPrompt,
53703
+ prompt: query
53704
+ });
53705
+ let fullText = "";
53706
+ for await (const textPart of result.textStream) {
53707
+ process.stdout.write(textPart);
53708
+ fullText += textPart;
54491
53709
  }
54492
- return await runNonStreamingQuery(model, query, systemPrompt);
53710
+ if (!fullText.endsWith(`
53711
+ `)) {
53712
+ process.stdout.write(`
53713
+ `);
53714
+ }
53715
+ return { text: fullText };
54493
53716
  } catch (err) {
54494
53717
  const message = err instanceof Error ? err.message : String(err);
54495
53718
  throw new ProviderError(`AI request failed: ${message}`);
54496
53719
  }
54497
53720
  }
54498
- async function runNonStreamingQuery(model, query, systemPrompt) {
54499
- const result = await generateText({
54500
- model,
54501
- system: systemPrompt,
54502
- prompt: query
54503
- });
54504
- return { text: result.text };
54505
- }
54506
- async function runStreamingQuery(model, query, systemPrompt) {
54507
- const result = streamText({
54508
- model,
54509
- system: systemPrompt,
54510
- prompt: query
54511
- });
54512
- let fullText = "";
54513
- for await (const textPart of result.textStream) {
54514
- process.stdout.write(textPart);
54515
- fullText += textPart;
54516
- }
54517
- if (!fullText.endsWith(`
54518
- `)) {
54519
- process.stdout.write(`
54520
- `);
54521
- }
54522
- return { text: fullText };
54523
- }
54524
53721
 
54525
53722
  // src/cli.ts
54526
53723
  async function main() {
@@ -54564,31 +53761,12 @@ async function main() {
54564
53761
  const envInfo = getEnvironmentInfo();
54565
53762
  logDebug2(`Query: ${query}`, debug);
54566
53763
  logDebug2(`Provider: ${providerName}, Model: ${modelId}`, debug);
54567
- logDebug2(`Stream: ${args.options.stream}`, debug);
54568
53764
  logDebug2(formatEnvForDebug(envInfo), debug);
54569
53765
  const result = await runQuery({
54570
53766
  model,
54571
53767
  query,
54572
- systemPrompt: buildSystemPrompt(envInfo),
54573
- stream: args.options.stream
54574
- });
54575
- if (!args.options.stream) {
54576
- const output = formatOutput({
54577
- text: result.text,
54578
- providerName,
54579
- modelId,
54580
- json: args.options.json
54581
- });
54582
- console.log(output);
54583
- } else if (args.options.json) {
54584
- const output = formatOutput({
54585
- text: result.text,
54586
- providerName,
54587
- modelId,
54588
- json: true
54589
- });
54590
- console.log(output);
54591
- }
53768
+ systemPrompt: buildSystemPrompt(envInfo)
53769
+ });
54592
53770
  if (args.options.copy) {
54593
53771
  await clipboardy_default.write(result.text);
54594
53772
  logDebug2("Copied to clipboard", debug);
package/package.json CHANGED
@@ -1,6 +1,6 @@
1
1
  {
2
2
  "name": "@hongymagic/q",
3
- "version": "0.3.2",
3
+ "version": "0.4.0",
4
4
  "description": "Quick AI answers from the command line",
5
5
  "main": "dist/q.js",
6
6
  "type": "module",