llmist 0.1.5 → 0.1.6
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/dist/cli.cjs +5 -3
- package/dist/cli.cjs.map +1 -1
- package/dist/cli.js +8 -4
- package/dist/cli.js.map +1 -1
- package/package.json +1 -1
package/dist/cli.cjs
CHANGED
|
@@ -4253,7 +4253,7 @@ var import_commander3 = require("commander");
|
|
|
4253
4253
|
// package.json
|
|
4254
4254
|
var package_default = {
|
|
4255
4255
|
name: "llmist",
|
|
4256
|
-
version: "0.1.
|
|
4256
|
+
version: "0.1.6",
|
|
4257
4257
|
description: "Universal TypeScript LLM client with streaming-first agent framework. Works with any model - no structured outputs or native tool calling required. Implements its own flexible grammar for function calling.",
|
|
4258
4258
|
type: "module",
|
|
4259
4259
|
main: "dist/index.cjs",
|
|
@@ -5206,17 +5206,19 @@ function registerAgentCommand(program, env) {
|
|
|
5206
5206
|
|
|
5207
5207
|
// src/cli/complete-command.ts
|
|
5208
5208
|
init_messages();
|
|
5209
|
+
init_model_shortcuts();
|
|
5209
5210
|
init_constants2();
|
|
5210
5211
|
async function handleCompleteCommand(promptArg, options, env) {
|
|
5211
5212
|
const prompt = await resolvePrompt(promptArg, env);
|
|
5212
5213
|
const client = env.createClient();
|
|
5214
|
+
const model = resolveModel(options.model);
|
|
5213
5215
|
const builder = new LLMMessageBuilder();
|
|
5214
5216
|
if (options.system) {
|
|
5215
5217
|
builder.addSystem(options.system);
|
|
5216
5218
|
}
|
|
5217
5219
|
builder.addUser(prompt);
|
|
5218
5220
|
const stream2 = client.stream({
|
|
5219
|
-
model
|
|
5221
|
+
model,
|
|
5220
5222
|
messages: builder.build(),
|
|
5221
5223
|
temperature: options.temperature,
|
|
5222
5224
|
maxTokens: options.maxTokens
|
|
@@ -5225,7 +5227,7 @@ async function handleCompleteCommand(promptArg, options, env) {
|
|
|
5225
5227
|
const stderrTTY = env.stderr.isTTY === true;
|
|
5226
5228
|
const progress = new StreamProgress(env.stderr, stderrTTY, client.modelRegistry);
|
|
5227
5229
|
const estimatedInputTokens = Math.round(prompt.length / FALLBACK_CHARS_PER_TOKEN);
|
|
5228
|
-
progress.startCall(
|
|
5230
|
+
progress.startCall(model, estimatedInputTokens);
|
|
5229
5231
|
let finishReason;
|
|
5230
5232
|
let usage;
|
|
5231
5233
|
let totalChars = 0;
|