@huggingface/tasks 0.5.2 → 0.6.1
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/dist/index.cjs +46 -0
- package/dist/index.d.ts +230 -22
- package/dist/index.js +46 -0
- package/package.json +2 -2
- package/src/model-libraries-snippets.ts +12 -0
- package/src/model-libraries.ts +36 -0
- package/src/tasks/chat-completion/inference.ts +158 -0
- package/src/tasks/chat-completion/spec/input.json +63 -0
- package/src/tasks/chat-completion/spec/output.json +58 -0
- package/src/tasks/chat-completion/spec/output_stream.json +48 -0
- package/src/tasks/index.ts +15 -4
- package/src/tasks/text-generation/inference.ts +75 -19
- package/src/tasks/text-generation/spec/input.json +4 -0
- package/src/tasks/text-generation/spec/output.json +101 -56
- package/src/tasks/text-generation/spec/output_stream.json +47 -0
|
@@ -10,43 +10,45 @@
|
|
|
10
10
|
"description": "The generated text"
|
|
11
11
|
},
|
|
12
12
|
"details": {
|
|
13
|
-
"
|
|
14
|
-
"
|
|
15
|
-
"allOf": [
|
|
16
|
-
{ "$ref": "#/$defs/SequenceDetails" },
|
|
17
|
-
{
|
|
18
|
-
"type": "object",
|
|
19
|
-
"properties": {
|
|
20
|
-
"best_of_sequences": {
|
|
21
|
-
"type": "array",
|
|
22
|
-
"description": "Details about additional sequences when best_of is provided",
|
|
23
|
-
"items": {
|
|
24
|
-
"allOf": [
|
|
25
|
-
{ "$ref": "#/$defs/SequenceDetails" },
|
|
26
|
-
{
|
|
27
|
-
"type": "object",
|
|
28
|
-
"properties": {
|
|
29
|
-
"generated_text": {
|
|
30
|
-
"type": "integer",
|
|
31
|
-
"description": "The generated text"
|
|
32
|
-
}
|
|
33
|
-
},
|
|
34
|
-
"required": ["generated_text"]
|
|
35
|
-
}
|
|
36
|
-
]
|
|
37
|
-
}
|
|
38
|
-
}
|
|
39
|
-
}
|
|
40
|
-
}
|
|
41
|
-
]
|
|
13
|
+
"$ref": "#/$defs/Details",
|
|
14
|
+
"description": "When enabled, details about the generation"
|
|
42
15
|
}
|
|
43
16
|
},
|
|
44
17
|
"required": ["generated_text"],
|
|
45
|
-
|
|
46
18
|
"$defs": {
|
|
19
|
+
"FinishReason": {
|
|
20
|
+
"type": "string",
|
|
21
|
+
"title": "TextGenerationFinishReason",
|
|
22
|
+
"description": "The reason why the generation was stopped.",
|
|
23
|
+
"oneOf": [
|
|
24
|
+
{ "const": "length", "description": "length: The generated sequence reached the maximum allowed length" },
|
|
25
|
+
{ "const": "eos_token", "description": "eos_token: The model generated an end-of-sentence (EOS) token" },
|
|
26
|
+
{
|
|
27
|
+
"const": "stop_sequence",
|
|
28
|
+
"description": "stop_sequence: One of the sequence in stop_sequences was generated"
|
|
29
|
+
}
|
|
30
|
+
]
|
|
31
|
+
},
|
|
32
|
+
"PrefillToken": {
|
|
33
|
+
"title": "TextGenerationPrefillToken",
|
|
34
|
+
"type": "object",
|
|
35
|
+
"properties": {
|
|
36
|
+
"id": {
|
|
37
|
+
"type": "integer"
|
|
38
|
+
},
|
|
39
|
+
"logprob": {
|
|
40
|
+
"type": "number"
|
|
41
|
+
},
|
|
42
|
+
"text": {
|
|
43
|
+
"type": "string",
|
|
44
|
+
"description": "The text associated with that token"
|
|
45
|
+
}
|
|
46
|
+
},
|
|
47
|
+
"required": ["id", "logprob", "text"]
|
|
48
|
+
},
|
|
47
49
|
"Token": {
|
|
48
50
|
"type": "object",
|
|
49
|
-
"title": "
|
|
51
|
+
"title": "TextGenerationOutputToken",
|
|
50
52
|
"properties": {
|
|
51
53
|
"id": {
|
|
52
54
|
"type": "integer"
|
|
@@ -63,20 +65,15 @@
|
|
|
63
65
|
"description": "The text associated with that token"
|
|
64
66
|
}
|
|
65
67
|
},
|
|
66
|
-
"required": ["id", "
|
|
68
|
+
"required": ["id", "special", "text"]
|
|
67
69
|
},
|
|
68
|
-
"
|
|
70
|
+
"Details": {
|
|
69
71
|
"type": "object",
|
|
70
|
-
"title": "
|
|
72
|
+
"title": "TextGenerationOutputDetails",
|
|
71
73
|
"properties": {
|
|
72
74
|
"finish_reason": {
|
|
73
|
-
"
|
|
74
|
-
"description": "The reason why the generation was stopped."
|
|
75
|
-
"oneOf": [
|
|
76
|
-
{ "const": "length", "description": "The generated sequence reached the maximum allowed length" },
|
|
77
|
-
{ "const": "eos_token", "description": "The model generated an end-of-sentence (EOS) token" },
|
|
78
|
-
{ "const": "stop_sequence", "description": "One of the sequence in stop_sequences was generated" }
|
|
79
|
-
]
|
|
75
|
+
"$ref": "#/$defs/FinishReason",
|
|
76
|
+
"description": "The reason why the generation was stopped."
|
|
80
77
|
},
|
|
81
78
|
"generated_tokens": {
|
|
82
79
|
"type": "integer",
|
|
@@ -85,21 +82,7 @@
|
|
|
85
82
|
"prefill": {
|
|
86
83
|
"type": "array",
|
|
87
84
|
"items": {
|
|
88
|
-
"
|
|
89
|
-
"type": "object",
|
|
90
|
-
"properties": {
|
|
91
|
-
"id": {
|
|
92
|
-
"type": "integer"
|
|
93
|
-
},
|
|
94
|
-
"logprob": {
|
|
95
|
-
"type": "number"
|
|
96
|
-
},
|
|
97
|
-
"text": {
|
|
98
|
-
"type": "string",
|
|
99
|
-
"description": "The text associated with that token"
|
|
100
|
-
}
|
|
101
|
-
},
|
|
102
|
-
"required": ["id", "logprob", "text"]
|
|
85
|
+
"$ref": "#/$defs/PrefillToken"
|
|
103
86
|
}
|
|
104
87
|
},
|
|
105
88
|
"seed": {
|
|
@@ -112,9 +95,71 @@
|
|
|
112
95
|
"items": {
|
|
113
96
|
"$ref": "#/$defs/Token"
|
|
114
97
|
}
|
|
98
|
+
},
|
|
99
|
+
"top_tokens": {
|
|
100
|
+
"type": "array",
|
|
101
|
+
"description": "Most likely tokens",
|
|
102
|
+
"items": {
|
|
103
|
+
"type": "array",
|
|
104
|
+
"items": {
|
|
105
|
+
"$ref": "#/$defs/Token"
|
|
106
|
+
}
|
|
107
|
+
}
|
|
108
|
+
},
|
|
109
|
+
"best_of_sequences": {
|
|
110
|
+
"type": "array",
|
|
111
|
+
"description": "Details about additional sequences when best_of is provided",
|
|
112
|
+
"items": {
|
|
113
|
+
"$ref": "#/$defs/SequenceDetails"
|
|
114
|
+
}
|
|
115
115
|
}
|
|
116
116
|
},
|
|
117
117
|
"required": ["finish_reason", "generated_tokens", "prefill", "tokens"]
|
|
118
|
+
},
|
|
119
|
+
"SequenceDetails": {
|
|
120
|
+
"type": "object",
|
|
121
|
+
"title": "TextGenerationOutputSequenceDetails",
|
|
122
|
+
"properties": {
|
|
123
|
+
"generated_text": {
|
|
124
|
+
"type": "string",
|
|
125
|
+
"description": "The generated text"
|
|
126
|
+
},
|
|
127
|
+
"finish_reason": {
|
|
128
|
+
"$ref": "#/$defs/FinishReason"
|
|
129
|
+
},
|
|
130
|
+
"generated_tokens": {
|
|
131
|
+
"type": "integer",
|
|
132
|
+
"description": "The number of generated tokens"
|
|
133
|
+
},
|
|
134
|
+
"prefill": {
|
|
135
|
+
"type": "array",
|
|
136
|
+
"items": {
|
|
137
|
+
"$ref": "#/$defs/PrefillToken"
|
|
138
|
+
}
|
|
139
|
+
},
|
|
140
|
+
"seed": {
|
|
141
|
+
"type": "integer",
|
|
142
|
+
"description": "The random seed used for generation"
|
|
143
|
+
},
|
|
144
|
+
"tokens": {
|
|
145
|
+
"type": "array",
|
|
146
|
+
"description": "The generated tokens and associated details",
|
|
147
|
+
"items": {
|
|
148
|
+
"$ref": "#/$defs/Token"
|
|
149
|
+
}
|
|
150
|
+
},
|
|
151
|
+
"top_tokens": {
|
|
152
|
+
"type": "array",
|
|
153
|
+
"description": "Most likely tokens",
|
|
154
|
+
"items": {
|
|
155
|
+
"type": "array",
|
|
156
|
+
"items": {
|
|
157
|
+
"$ref": "#/$defs/Token"
|
|
158
|
+
}
|
|
159
|
+
}
|
|
160
|
+
}
|
|
161
|
+
},
|
|
162
|
+
"required": ["generated_text", "finish_reason", "generated_tokens", "prefill", "tokens"]
|
|
118
163
|
}
|
|
119
164
|
}
|
|
120
165
|
}
|
|
@@ -0,0 +1,47 @@
|
|
|
1
|
+
{
|
|
2
|
+
"$id": "/inference/schemas/text-generation/output.json",
|
|
3
|
+
"$schema": "http://json-schema.org/draft-06/schema#",
|
|
4
|
+
"description": "Text Generation Stream Output",
|
|
5
|
+
"title": "TextGenerationStreamOutput",
|
|
6
|
+
"type": "object",
|
|
7
|
+
"properties": {
|
|
8
|
+
"token": {
|
|
9
|
+
"$ref": "#/$defs/Token",
|
|
10
|
+
"description": "Generated token."
|
|
11
|
+
},
|
|
12
|
+
"index": {
|
|
13
|
+
"type": "integer",
|
|
14
|
+
"description": "The token index within the stream. Optional to support older clients that omit it."
|
|
15
|
+
},
|
|
16
|
+
"generated_text": {
|
|
17
|
+
"type": "string",
|
|
18
|
+
"description": "The complete generated text. Only available when the generation is finished."
|
|
19
|
+
},
|
|
20
|
+
"details": {
|
|
21
|
+
"$ref": "#/$defs/StreamDetails",
|
|
22
|
+
"description": "Generation details. Only available when the generation is finished."
|
|
23
|
+
}
|
|
24
|
+
},
|
|
25
|
+
"required": ["token"],
|
|
26
|
+
"$defs": {
|
|
27
|
+
"StreamDetails": {
|
|
28
|
+
"type": "object",
|
|
29
|
+
"title": "TextGenerationStreamDetails",
|
|
30
|
+
"properties": {
|
|
31
|
+
"finish_reason": {
|
|
32
|
+
"$ref": "#/$defs/FinishReason",
|
|
33
|
+
"description": "The reason why the generation was stopped."
|
|
34
|
+
},
|
|
35
|
+
"generated_tokens": {
|
|
36
|
+
"type": "integer",
|
|
37
|
+
"description": "The number of generated tokens"
|
|
38
|
+
},
|
|
39
|
+
"seed": {
|
|
40
|
+
"type": "integer",
|
|
41
|
+
"description": "The random seed used for generation"
|
|
42
|
+
}
|
|
43
|
+
},
|
|
44
|
+
"required": ["finish_reason", "generated_tokens", "seed"]
|
|
45
|
+
}
|
|
46
|
+
}
|
|
47
|
+
}
|