llama-stack-client 0.2.0 → 0.2.2

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (84) hide show
  1. package/_shims/index.d.ts +2 -0
  2. package/_shims/index.js +5 -1
  3. package/_shims/index.mjs +5 -1
  4. package/core.d.ts +11 -0
  5. package/core.d.ts.map +1 -1
  6. package/core.js +3 -1
  7. package/core.js.map +1 -1
  8. package/core.mjs +4 -2
  9. package/core.mjs.map +1 -1
  10. package/index.d.mts +4 -7
  11. package/index.d.ts +4 -7
  12. package/index.d.ts.map +1 -1
  13. package/index.js +0 -3
  14. package/index.js.map +1 -1
  15. package/index.mjs +0 -3
  16. package/index.mjs.map +1 -1
  17. package/internal/decoders/line.d.ts +1 -0
  18. package/internal/decoders/line.d.ts.map +1 -1
  19. package/package.json +3 -2
  20. package/resources/datasets.d.ts +13 -6
  21. package/resources/datasets.d.ts.map +1 -1
  22. package/resources/datasets.js.map +1 -1
  23. package/resources/datasets.mjs.map +1 -1
  24. package/resources/eval/eval.d.ts +1 -1
  25. package/resources/eval/eval.d.ts.map +1 -1
  26. package/resources/index.d.ts +2 -3
  27. package/resources/index.d.ts.map +1 -1
  28. package/resources/index.js +1 -3
  29. package/resources/index.js.map +1 -1
  30. package/resources/index.mjs +0 -1
  31. package/resources/index.mjs.map +1 -1
  32. package/resources/inference.d.ts +86 -1
  33. package/resources/inference.d.ts.map +1 -1
  34. package/resources/inference.js +6 -0
  35. package/resources/inference.js.map +1 -1
  36. package/resources/inference.mjs +6 -0
  37. package/resources/inference.mjs.map +1 -1
  38. package/resources/post-training/job.d.ts +1 -1
  39. package/resources/post-training/job.d.ts.map +1 -1
  40. package/resources/post-training/job.js.map +1 -1
  41. package/resources/post-training/job.mjs.map +1 -1
  42. package/resources/shared.d.ts +23 -0
  43. package/resources/shared.d.ts.map +1 -1
  44. package/resources/tool-runtime/index.d.ts +1 -1
  45. package/resources/tool-runtime/index.d.ts.map +1 -1
  46. package/resources/tool-runtime/index.js.map +1 -1
  47. package/resources/tool-runtime/index.mjs.map +1 -1
  48. package/resources/tool-runtime/tool-runtime.d.ts +4 -4
  49. package/resources/tool-runtime/tool-runtime.d.ts.map +1 -1
  50. package/resources/tool-runtime/tool-runtime.js +1 -9
  51. package/resources/tool-runtime/tool-runtime.js.map +1 -1
  52. package/resources/tool-runtime/tool-runtime.mjs +1 -9
  53. package/resources/tool-runtime/tool-runtime.mjs.map +1 -1
  54. package/src/_shims/index.d.ts +2 -0
  55. package/src/_shims/index.js +5 -1
  56. package/src/_shims/index.mjs +5 -1
  57. package/src/core.ts +20 -1
  58. package/src/index.ts +8 -15
  59. package/src/resources/datasets.ts +13 -6
  60. package/src/resources/eval/eval.ts +1 -1
  61. package/src/resources/index.ts +4 -6
  62. package/src/resources/inference.ts +121 -0
  63. package/src/resources/post-training/job.ts +1 -2
  64. package/src/resources/shared.ts +24 -0
  65. package/src/resources/tool-runtime/index.ts +1 -0
  66. package/src/resources/tool-runtime/tool-runtime.ts +11 -12
  67. package/src/version.ts +1 -1
  68. package/version.d.ts +1 -1
  69. package/version.js +1 -1
  70. package/version.mjs +1 -1
  71. package/internal/decoders/jsonl.d.ts +0 -12
  72. package/internal/decoders/jsonl.d.ts.map +0 -1
  73. package/internal/decoders/jsonl.js +0 -35
  74. package/internal/decoders/jsonl.js.map +0 -1
  75. package/internal/decoders/jsonl.mjs +0 -31
  76. package/internal/decoders/jsonl.mjs.map +0 -1
  77. package/resources/batch-inference.d.ts +0 -66
  78. package/resources/batch-inference.d.ts.map +0 -1
  79. package/resources/batch-inference.js +0 -15
  80. package/resources/batch-inference.js.map +0 -1
  81. package/resources/batch-inference.mjs +0 -11
  82. package/resources/batch-inference.mjs.map +0 -1
  83. package/src/internal/decoders/jsonl.ts +0 -41
  84. package/src/resources/batch-inference.ts +0 -103
@@ -6,7 +6,6 @@ import * as Core from '../../core';
6
6
  import * as Shared from '../shared';
7
7
  import * as RagToolAPI from './rag-tool';
8
8
  import { RagTool, RagToolInsertParams, RagToolQueryParams } from './rag-tool';
9
- import { JSONLDecoder } from '../../internal/decoders/jsonl';
10
9
 
11
10
  export class ToolRuntime extends APIResource {
12
11
  ragTool: RagToolAPI.RagTool = new RagToolAPI.RagTool(this._client);
@@ -24,23 +23,20 @@ export class ToolRuntime extends APIResource {
24
23
  listTools(
25
24
  query?: ToolRuntimeListToolsParams,
26
25
  options?: Core.RequestOptions,
27
- ): Core.APIPromise<JSONLDecoder<ToolDef>>;
28
- listTools(options?: Core.RequestOptions): Core.APIPromise<JSONLDecoder<ToolDef>>;
26
+ ): Core.APIPromise<ToolRuntimeListToolsResponse>;
27
+ listTools(options?: Core.RequestOptions): Core.APIPromise<ToolRuntimeListToolsResponse>;
29
28
  listTools(
30
29
  query: ToolRuntimeListToolsParams | Core.RequestOptions = {},
31
30
  options?: Core.RequestOptions,
32
- ): Core.APIPromise<JSONLDecoder<ToolDef>> {
31
+ ): Core.APIPromise<ToolRuntimeListToolsResponse> {
33
32
  if (isRequestOptions(query)) {
34
33
  return this.listTools({}, query);
35
34
  }
36
- return this._client
37
- .get('/v1/tool-runtime/list-tools', {
38
- query,
39
- ...options,
40
- headers: { Accept: 'application/jsonl', ...options?.headers },
41
- __binaryResponse: true,
42
- })
43
- ._thenUnwrap((_, props) => JSONLDecoder.fromResponse(props.response, props.controller));
35
+ return (
36
+ this._client.get('/v1/tool-runtime/list-tools', { query, ...options }) as Core.APIPromise<{
37
+ data: ToolRuntimeListToolsResponse;
38
+ }>
39
+ )._thenUnwrap((obj) => obj.data);
44
40
  }
45
41
  }
46
42
 
@@ -81,6 +77,8 @@ export interface ToolInvocationResult {
81
77
  metadata?: Record<string, boolean | number | string | Array<unknown> | unknown | null>;
82
78
  }
83
79
 
80
+ export type ToolRuntimeListToolsResponse = Array<ToolDef>;
81
+
84
82
  export interface ToolRuntimeInvokeToolParams {
85
83
  kwargs: Record<string, boolean | number | string | Array<unknown> | unknown | null>;
86
84
 
@@ -105,6 +103,7 @@ export declare namespace ToolRuntime {
105
103
  export {
106
104
  type ToolDef as ToolDef,
107
105
  type ToolInvocationResult as ToolInvocationResult,
106
+ type ToolRuntimeListToolsResponse as ToolRuntimeListToolsResponse,
108
107
  type ToolRuntimeInvokeToolParams as ToolRuntimeInvokeToolParams,
109
108
  type ToolRuntimeListToolsParams as ToolRuntimeListToolsParams,
110
109
  };
package/src/version.ts CHANGED
@@ -1 +1 @@
1
- export const VERSION = '0.2.0';
1
+ export const VERSION = '0.2.2';
package/version.d.ts CHANGED
@@ -1,2 +1,2 @@
1
- export declare const VERSION = "0.2.0";
1
+ export declare const VERSION = "0.2.2";
2
2
  //# sourceMappingURL=version.d.ts.map
package/version.js CHANGED
@@ -1,5 +1,5 @@
1
1
  "use strict";
2
2
  Object.defineProperty(exports, "__esModule", { value: true });
3
3
  exports.VERSION = void 0;
4
- exports.VERSION = '0.2.0';
4
+ exports.VERSION = '0.2.2';
5
5
  //# sourceMappingURL=version.js.map
package/version.mjs CHANGED
@@ -1,2 +1,2 @@
1
- export const VERSION = '0.2.0';
1
+ export const VERSION = '0.2.2';
2
2
  //# sourceMappingURL=version.mjs.map
@@ -1,12 +0,0 @@
1
-
2
- import { type Response } from "../../_shims/index.js";
3
- import { type Bytes } from "./line.js";
4
- export declare class JSONLDecoder<T> {
5
- private iterator;
6
- controller: AbortController;
7
- constructor(iterator: AsyncIterableIterator<Bytes>, controller: AbortController);
8
- private decoder;
9
- [Symbol.asyncIterator](): AsyncIterator<T>;
10
- static fromResponse<T>(response: Response, controller: AbortController): JSONLDecoder<T>;
11
- }
12
- //# sourceMappingURL=jsonl.d.ts.map
@@ -1 +0,0 @@
1
- {"version":3,"file":"jsonl.d.ts","sourceRoot":"","sources":["../../src/internal/decoders/jsonl.ts"],"names":[],"mappings":";AAEA,OAAO,EAAE,KAAK,QAAQ,EAAE,MAAM,oBAAoB,CAAC;AACnD,OAAO,EAAe,KAAK,KAAK,EAAE,MAAM,QAAQ,CAAC;AAEjD,qBAAa,YAAY,CAAC,CAAC;IAIvB,OAAO,CAAC,QAAQ;IAHlB,UAAU,EAAE,eAAe,CAAC;gBAGlB,QAAQ,EAAE,qBAAqB,CAAC,KAAK,CAAC,EAC9C,UAAU,EAAE,eAAe;YAKd,OAAO;IAatB,CAAC,MAAM,CAAC,aAAa,CAAC,IAAI,aAAa,CAAC,CAAC,CAAC;IAI1C,MAAM,CAAC,YAAY,CAAC,CAAC,EAAE,QAAQ,EAAE,QAAQ,EAAE,UAAU,EAAE,eAAe,GAAG,YAAY,CAAC,CAAC,CAAC;CAQzF"}
@@ -1,35 +0,0 @@
1
- "use strict";
2
- Object.defineProperty(exports, "__esModule", { value: true });
3
- exports.JSONLDecoder = void 0;
4
- const error_1 = require("../../error.js");
5
- const stream_utils_1 = require("../stream-utils.js");
6
- const line_1 = require("./line.js");
7
- class JSONLDecoder {
8
- constructor(iterator, controller) {
9
- this.iterator = iterator;
10
- this.controller = controller;
11
- }
12
- async *decoder() {
13
- const lineDecoder = new line_1.LineDecoder();
14
- for await (const chunk of this.iterator) {
15
- for (const line of lineDecoder.decode(chunk)) {
16
- yield JSON.parse(line);
17
- }
18
- }
19
- for (const line of lineDecoder.flush()) {
20
- yield JSON.parse(line);
21
- }
22
- }
23
- [Symbol.asyncIterator]() {
24
- return this.decoder();
25
- }
26
- static fromResponse(response, controller) {
27
- if (!response.body) {
28
- controller.abort();
29
- throw new error_1.LlamaStackClientError(`Attempted to iterate over a response with no body`);
30
- }
31
- return new JSONLDecoder((0, stream_utils_1.ReadableStreamToAsyncIterable)(response.body), controller);
32
- }
33
- }
34
- exports.JSONLDecoder = JSONLDecoder;
35
- //# sourceMappingURL=jsonl.js.map
@@ -1 +0,0 @@
1
- {"version":3,"file":"jsonl.js","sourceRoot":"","sources":["../../src/internal/decoders/jsonl.ts"],"names":[],"mappings":";;;AAAA,0CAAoD;AACpD,qDAAgE;AAEhE,oCAAiD;AAEjD,MAAa,YAAY;IAGvB,YACU,QAAsC,EAC9C,UAA2B;QADnB,aAAQ,GAAR,QAAQ,CAA8B;QAG9C,IAAI,CAAC,UAAU,GAAG,UAAU,CAAC;IAC/B,CAAC;IAEO,KAAK,CAAC,CAAC,OAAO;QACpB,MAAM,WAAW,GAAG,IAAI,kBAAW,EAAE,CAAC;QACtC,IAAI,KAAK,EAAE,MAAM,KAAK,IAAI,IAAI,CAAC,QAAQ,EAAE;YACvC,KAAK,MAAM,IAAI,IAAI,WAAW,CAAC,MAAM,CAAC,KAAK,CAAC,EAAE;gBAC5C,MAAM,IAAI,CAAC,KAAK,CAAC,IAAI,CAAC,CAAC;aACxB;SACF;QAED,KAAK,MAAM,IAAI,IAAI,WAAW,CAAC,KAAK,EAAE,EAAE;YACtC,MAAM,IAAI,CAAC,KAAK,CAAC,IAAI,CAAC,CAAC;SACxB;IACH,CAAC;IAED,CAAC,MAAM,CAAC,aAAa,CAAC;QACpB,OAAO,IAAI,CAAC,OAAO,EAAE,CAAC;IACxB,CAAC;IAED,MAAM,CAAC,YAAY,CAAI,QAAkB,EAAE,UAA2B;QACpE,IAAI,CAAC,QAAQ,CAAC,IAAI,EAAE;YAClB,UAAU,CAAC,KAAK,EAAE,CAAC;YACnB,MAAM,IAAI,6BAAqB,CAAC,mDAAmD,CAAC,CAAC;SACtF;QAED,OAAO,IAAI,YAAY,CAAC,IAAA,4CAA6B,EAAQ,QAAQ,CAAC,IAAI,CAAC,EAAE,UAAU,CAAC,CAAC;IAC3F,CAAC;CACF;AAnCD,oCAmCC"}
@@ -1,31 +0,0 @@
1
- import { LlamaStackClientError } from "../../error.mjs";
2
- import { ReadableStreamToAsyncIterable } from "../stream-utils.mjs";
3
- import { LineDecoder } from "./line.mjs";
4
- export class JSONLDecoder {
5
- constructor(iterator, controller) {
6
- this.iterator = iterator;
7
- this.controller = controller;
8
- }
9
- async *decoder() {
10
- const lineDecoder = new LineDecoder();
11
- for await (const chunk of this.iterator) {
12
- for (const line of lineDecoder.decode(chunk)) {
13
- yield JSON.parse(line);
14
- }
15
- }
16
- for (const line of lineDecoder.flush()) {
17
- yield JSON.parse(line);
18
- }
19
- }
20
- [Symbol.asyncIterator]() {
21
- return this.decoder();
22
- }
23
- static fromResponse(response, controller) {
24
- if (!response.body) {
25
- controller.abort();
26
- throw new LlamaStackClientError(`Attempted to iterate over a response with no body`);
27
- }
28
- return new JSONLDecoder(ReadableStreamToAsyncIterable(response.body), controller);
29
- }
30
- }
31
- //# sourceMappingURL=jsonl.mjs.map
@@ -1 +0,0 @@
1
- {"version":3,"file":"jsonl.mjs","sourceRoot":"","sources":["../../src/internal/decoders/jsonl.ts"],"names":[],"mappings":"OAAO,EAAE,qBAAqB,EAAE;OACzB,EAAE,6BAA6B,EAAE;OAEjC,EAAE,WAAW,EAAc;AAElC,MAAM,OAAO,YAAY;IAGvB,YACU,QAAsC,EAC9C,UAA2B;QADnB,aAAQ,GAAR,QAAQ,CAA8B;QAG9C,IAAI,CAAC,UAAU,GAAG,UAAU,CAAC;IAC/B,CAAC;IAEO,KAAK,CAAC,CAAC,OAAO;QACpB,MAAM,WAAW,GAAG,IAAI,WAAW,EAAE,CAAC;QACtC,IAAI,KAAK,EAAE,MAAM,KAAK,IAAI,IAAI,CAAC,QAAQ,EAAE;YACvC,KAAK,MAAM,IAAI,IAAI,WAAW,CAAC,MAAM,CAAC,KAAK,CAAC,EAAE;gBAC5C,MAAM,IAAI,CAAC,KAAK,CAAC,IAAI,CAAC,CAAC;aACxB;SACF;QAED,KAAK,MAAM,IAAI,IAAI,WAAW,CAAC,KAAK,EAAE,EAAE;YACtC,MAAM,IAAI,CAAC,KAAK,CAAC,IAAI,CAAC,CAAC;SACxB;IACH,CAAC;IAED,CAAC,MAAM,CAAC,aAAa,CAAC;QACpB,OAAO,IAAI,CAAC,OAAO,EAAE,CAAC;IACxB,CAAC;IAED,MAAM,CAAC,YAAY,CAAI,QAAkB,EAAE,UAA2B;QACpE,IAAI,CAAC,QAAQ,CAAC,IAAI,EAAE;YAClB,UAAU,CAAC,KAAK,EAAE,CAAC;YACnB,MAAM,IAAI,qBAAqB,CAAC,mDAAmD,CAAC,CAAC;SACtF;QAED,OAAO,IAAI,YAAY,CAAC,6BAA6B,CAAQ,QAAQ,CAAC,IAAI,CAAC,EAAE,UAAU,CAAC,CAAC;IAC3F,CAAC;CACF"}
@@ -1,66 +0,0 @@
1
- import { APIResource } from "../resource.js";
2
- import * as Core from "../core.js";
3
- import * as Shared from "./shared.js";
4
- export declare class BatchInference extends APIResource {
5
- chatCompletion(body: BatchInferenceChatCompletionParams, options?: Core.RequestOptions): Core.APIPromise<BatchInferenceChatCompletionResponse>;
6
- completion(body: BatchInferenceCompletionParams, options?: Core.RequestOptions): Core.APIPromise<Shared.BatchCompletion>;
7
- }
8
- export interface BatchInferenceChatCompletionResponse {
9
- batch: Array<Shared.ChatCompletionResponse>;
10
- }
11
- export interface BatchInferenceChatCompletionParams {
12
- messages_batch: Array<Array<Shared.Message>>;
13
- model: string;
14
- logprobs?: BatchInferenceChatCompletionParams.Logprobs;
15
- /**
16
- * Configuration for JSON schema-guided response generation.
17
- */
18
- response_format?: Shared.ResponseFormat;
19
- sampling_params?: Shared.SamplingParams;
20
- /**
21
- * Whether tool use is required or automatic. This is a hint to the model which may
22
- * not be followed. It depends on the Instruction Following capabilities of the
23
- * model.
24
- */
25
- tool_choice?: 'auto' | 'required' | 'none';
26
- /**
27
- * Prompt format for calling custom / zero shot tools.
28
- */
29
- tool_prompt_format?: 'json' | 'function_tag' | 'python_list';
30
- tools?: Array<BatchInferenceChatCompletionParams.Tool>;
31
- }
32
- export declare namespace BatchInferenceChatCompletionParams {
33
- interface Logprobs {
34
- /**
35
- * How many tokens (for each position) to return log probabilities for.
36
- */
37
- top_k?: number;
38
- }
39
- interface Tool {
40
- tool_name: 'brave_search' | 'wolfram_alpha' | 'photogen' | 'code_interpreter' | (string & {});
41
- description?: string;
42
- parameters?: Record<string, Shared.ToolParamDefinition>;
43
- }
44
- }
45
- export interface BatchInferenceCompletionParams {
46
- content_batch: Array<Shared.InterleavedContent>;
47
- model: string;
48
- logprobs?: BatchInferenceCompletionParams.Logprobs;
49
- /**
50
- * Configuration for JSON schema-guided response generation.
51
- */
52
- response_format?: Shared.ResponseFormat;
53
- sampling_params?: Shared.SamplingParams;
54
- }
55
- export declare namespace BatchInferenceCompletionParams {
56
- interface Logprobs {
57
- /**
58
- * How many tokens (for each position) to return log probabilities for.
59
- */
60
- top_k?: number;
61
- }
62
- }
63
- export declare namespace BatchInference {
64
- export { type BatchInferenceChatCompletionResponse as BatchInferenceChatCompletionResponse, type BatchInferenceChatCompletionParams as BatchInferenceChatCompletionParams, type BatchInferenceCompletionParams as BatchInferenceCompletionParams, };
65
- }
66
- //# sourceMappingURL=batch-inference.d.ts.map
@@ -1 +0,0 @@
1
- {"version":3,"file":"batch-inference.d.ts","sourceRoot":"","sources":["../src/resources/batch-inference.ts"],"names":[],"mappings":"AAEA,OAAO,EAAE,WAAW,EAAE,MAAM,aAAa,CAAC;AAC1C,OAAO,KAAK,IAAI,MAAM,SAAS,CAAC;AAChC,OAAO,KAAK,MAAM,MAAM,UAAU,CAAC;AAEnC,qBAAa,cAAe,SAAQ,WAAW;IAC7C,cAAc,CACZ,IAAI,EAAE,kCAAkC,EACxC,OAAO,CAAC,EAAE,IAAI,CAAC,cAAc,GAC5B,IAAI,CAAC,UAAU,CAAC,oCAAoC,CAAC;IAIxD,UAAU,CACR,IAAI,EAAE,8BAA8B,EACpC,OAAO,CAAC,EAAE,IAAI,CAAC,cAAc,GAC5B,IAAI,CAAC,UAAU,CAAC,MAAM,CAAC,eAAe,CAAC;CAG3C;AAED,MAAM,WAAW,oCAAoC;IACnD,KAAK,EAAE,KAAK,CAAC,MAAM,CAAC,sBAAsB,CAAC,CAAC;CAC7C;AAED,MAAM,WAAW,kCAAkC;IACjD,cAAc,EAAE,KAAK,CAAC,KAAK,CAAC,MAAM,CAAC,OAAO,CAAC,CAAC,CAAC;IAE7C,KAAK,EAAE,MAAM,CAAC;IAEd,QAAQ,CAAC,EAAE,kCAAkC,CAAC,QAAQ,CAAC;IAEvD;;OAEG;IACH,eAAe,CAAC,EAAE,MAAM,CAAC,cAAc,CAAC;IAExC,eAAe,CAAC,EAAE,MAAM,CAAC,cAAc,CAAC;IAExC;;;;OAIG;IACH,WAAW,CAAC,EAAE,MAAM,GAAG,UAAU,GAAG,MAAM,CAAC;IAE3C;;OAEG;IACH,kBAAkB,CAAC,EAAE,MAAM,GAAG,cAAc,GAAG,aAAa,CAAC;IAE7D,KAAK,CAAC,EAAE,KAAK,CAAC,kCAAkC,CAAC,IAAI,CAAC,CAAC;CACxD;AAED,yBAAiB,kCAAkC,CAAC;IAClD,UAAiB,QAAQ;QACvB;;WAEG;QACH,KAAK,CAAC,EAAE,MAAM,CAAC;KAChB;IAED,UAAiB,IAAI;QACnB,SAAS,EAAE,cAAc,GAAG,eAAe,GAAG,UAAU,GAAG,kBAAkB,GAAG,CAAC,MAAM,GAAG,EAAE,CAAC,CAAC;QAE9F,WAAW,CAAC,EAAE,MAAM,CAAC;QAErB,UAAU,CAAC,EAAE,MAAM,CAAC,MAAM,EAAE,MAAM,CAAC,mBAAmB,CAAC,CAAC;KACzD;CACF;AAED,MAAM,WAAW,8BAA8B;IAC7C,aAAa,EAAE,KAAK,CAAC,MAAM,CAAC,kBAAkB,CAAC,CAAC;IAEhD,KAAK,EAAE,MAAM,CAAC;IAEd,QAAQ,CAAC,EAAE,8BAA8B,CAAC,QAAQ,CAAC;IAEnD;;OAEG;IACH,eAAe,CAAC,EAAE,MAAM,CAAC,cAAc,CAAC;IAExC,eAAe,CAAC,EAAE,MAAM,CAAC,cAAc,CAAC;CACzC;AAED,yBAAiB,8BAA8B,CAAC;IAC9C,UAAiB,QAAQ;QACvB;;WAEG;QACH,KAAK,CAAC,EAAE,MAAM,CAAC;KAChB;CACF;AAED,MAAM,CAAC,OAAO,WAAW,cAAc,CAAC;IACtC,OAAO,EACL,KAAK,oCAAoC,IAAI,oCAAoC,EACjF,KAAK,kCAAkC,IAAI,kCAAkC,EAC7E,KAAK,8BAA8B,IAAI,8BAA8B,GACtE,CAAC;CACH"}
@@ -1,15 +0,0 @@
1
- "use strict";
2
- // File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
3
- Object.defineProperty(exports, "__esModule", { value: true });
4
- exports.BatchInference = void 0;
5
- const resource_1 = require("../resource.js");
6
- class BatchInference extends resource_1.APIResource {
7
- chatCompletion(body, options) {
8
- return this._client.post('/v1/batch-inference/chat-completion', { body, ...options });
9
- }
10
- completion(body, options) {
11
- return this._client.post('/v1/batch-inference/completion', { body, ...options });
12
- }
13
- }
14
- exports.BatchInference = BatchInference;
15
- //# sourceMappingURL=batch-inference.js.map
@@ -1 +0,0 @@
1
- {"version":3,"file":"batch-inference.js","sourceRoot":"","sources":["../src/resources/batch-inference.ts"],"names":[],"mappings":";AAAA,sFAAsF;;;AAEtF,6CAA0C;AAI1C,MAAa,cAAe,SAAQ,sBAAW;IAC7C,cAAc,CACZ,IAAwC,EACxC,OAA6B;QAE7B,OAAO,IAAI,CAAC,OAAO,CAAC,IAAI,CAAC,qCAAqC,EAAE,EAAE,IAAI,EAAE,GAAG,OAAO,EAAE,CAAC,CAAC;IACxF,CAAC;IAED,UAAU,CACR,IAAoC,EACpC,OAA6B;QAE7B,OAAO,IAAI,CAAC,OAAO,CAAC,IAAI,CAAC,gCAAgC,EAAE,EAAE,IAAI,EAAE,GAAG,OAAO,EAAE,CAAC,CAAC;IACnF,CAAC;CACF;AAdD,wCAcC"}
@@ -1,11 +0,0 @@
1
- // File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
2
- import { APIResource } from "../resource.mjs";
3
- export class BatchInference extends APIResource {
4
- chatCompletion(body, options) {
5
- return this._client.post('/v1/batch-inference/chat-completion', { body, ...options });
6
- }
7
- completion(body, options) {
8
- return this._client.post('/v1/batch-inference/completion', { body, ...options });
9
- }
10
- }
11
- //# sourceMappingURL=batch-inference.mjs.map
@@ -1 +0,0 @@
1
- {"version":3,"file":"batch-inference.mjs","sourceRoot":"","sources":["../src/resources/batch-inference.ts"],"names":[],"mappings":"AAAA,sFAAsF;OAE/E,EAAE,WAAW,EAAE;AAItB,MAAM,OAAO,cAAe,SAAQ,WAAW;IAC7C,cAAc,CACZ,IAAwC,EACxC,OAA6B;QAE7B,OAAO,IAAI,CAAC,OAAO,CAAC,IAAI,CAAC,qCAAqC,EAAE,EAAE,IAAI,EAAE,GAAG,OAAO,EAAE,CAAC,CAAC;IACxF,CAAC;IAED,UAAU,CACR,IAAoC,EACpC,OAA6B;QAE7B,OAAO,IAAI,CAAC,OAAO,CAAC,IAAI,CAAC,gCAAgC,EAAE,EAAE,IAAI,EAAE,GAAG,OAAO,EAAE,CAAC,CAAC;IACnF,CAAC;CACF"}
@@ -1,41 +0,0 @@
1
- import { LlamaStackClientError } from '../../error';
2
- import { ReadableStreamToAsyncIterable } from '../stream-utils';
3
- import { type Response } from '../../_shims/index';
4
- import { LineDecoder, type Bytes } from './line';
5
-
6
- export class JSONLDecoder<T> {
7
- controller: AbortController;
8
-
9
- constructor(
10
- private iterator: AsyncIterableIterator<Bytes>,
11
- controller: AbortController,
12
- ) {
13
- this.controller = controller;
14
- }
15
-
16
- private async *decoder(): AsyncIterator<T, any, undefined> {
17
- const lineDecoder = new LineDecoder();
18
- for await (const chunk of this.iterator) {
19
- for (const line of lineDecoder.decode(chunk)) {
20
- yield JSON.parse(line);
21
- }
22
- }
23
-
24
- for (const line of lineDecoder.flush()) {
25
- yield JSON.parse(line);
26
- }
27
- }
28
-
29
- [Symbol.asyncIterator](): AsyncIterator<T> {
30
- return this.decoder();
31
- }
32
-
33
- static fromResponse<T>(response: Response, controller: AbortController): JSONLDecoder<T> {
34
- if (!response.body) {
35
- controller.abort();
36
- throw new LlamaStackClientError(`Attempted to iterate over a response with no body`);
37
- }
38
-
39
- return new JSONLDecoder(ReadableStreamToAsyncIterable<Bytes>(response.body), controller);
40
- }
41
- }
@@ -1,103 +0,0 @@
1
- // File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
2
-
3
- import { APIResource } from '../resource';
4
- import * as Core from '../core';
5
- import * as Shared from './shared';
6
-
7
- export class BatchInference extends APIResource {
8
- chatCompletion(
9
- body: BatchInferenceChatCompletionParams,
10
- options?: Core.RequestOptions,
11
- ): Core.APIPromise<BatchInferenceChatCompletionResponse> {
12
- return this._client.post('/v1/batch-inference/chat-completion', { body, ...options });
13
- }
14
-
15
- completion(
16
- body: BatchInferenceCompletionParams,
17
- options?: Core.RequestOptions,
18
- ): Core.APIPromise<Shared.BatchCompletion> {
19
- return this._client.post('/v1/batch-inference/completion', { body, ...options });
20
- }
21
- }
22
-
23
- export interface BatchInferenceChatCompletionResponse {
24
- batch: Array<Shared.ChatCompletionResponse>;
25
- }
26
-
27
- export interface BatchInferenceChatCompletionParams {
28
- messages_batch: Array<Array<Shared.Message>>;
29
-
30
- model: string;
31
-
32
- logprobs?: BatchInferenceChatCompletionParams.Logprobs;
33
-
34
- /**
35
- * Configuration for JSON schema-guided response generation.
36
- */
37
- response_format?: Shared.ResponseFormat;
38
-
39
- sampling_params?: Shared.SamplingParams;
40
-
41
- /**
42
- * Whether tool use is required or automatic. This is a hint to the model which may
43
- * not be followed. It depends on the Instruction Following capabilities of the
44
- * model.
45
- */
46
- tool_choice?: 'auto' | 'required' | 'none';
47
-
48
- /**
49
- * Prompt format for calling custom / zero shot tools.
50
- */
51
- tool_prompt_format?: 'json' | 'function_tag' | 'python_list';
52
-
53
- tools?: Array<BatchInferenceChatCompletionParams.Tool>;
54
- }
55
-
56
- export namespace BatchInferenceChatCompletionParams {
57
- export interface Logprobs {
58
- /**
59
- * How many tokens (for each position) to return log probabilities for.
60
- */
61
- top_k?: number;
62
- }
63
-
64
- export interface Tool {
65
- tool_name: 'brave_search' | 'wolfram_alpha' | 'photogen' | 'code_interpreter' | (string & {});
66
-
67
- description?: string;
68
-
69
- parameters?: Record<string, Shared.ToolParamDefinition>;
70
- }
71
- }
72
-
73
- export interface BatchInferenceCompletionParams {
74
- content_batch: Array<Shared.InterleavedContent>;
75
-
76
- model: string;
77
-
78
- logprobs?: BatchInferenceCompletionParams.Logprobs;
79
-
80
- /**
81
- * Configuration for JSON schema-guided response generation.
82
- */
83
- response_format?: Shared.ResponseFormat;
84
-
85
- sampling_params?: Shared.SamplingParams;
86
- }
87
-
88
- export namespace BatchInferenceCompletionParams {
89
- export interface Logprobs {
90
- /**
91
- * How many tokens (for each position) to return log probabilities for.
92
- */
93
- top_k?: number;
94
- }
95
- }
96
-
97
- export declare namespace BatchInference {
98
- export {
99
- type BatchInferenceChatCompletionResponse as BatchInferenceChatCompletionResponse,
100
- type BatchInferenceChatCompletionParams as BatchInferenceChatCompletionParams,
101
- type BatchInferenceCompletionParams as BatchInferenceCompletionParams,
102
- };
103
- }