@gapi/openai 1.8.140 → 1.8.143
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/README.md +35 -0
- package/dist/index.d.ts +1 -0
- package/dist/index.js +1 -0
- package/dist/index.js.map +1 -1
- package/dist/openai.controller.js +2 -6
- package/dist/openai.controller.js.map +1 -1
- package/dist/types/create-completion-input.type.js +35 -1
- package/dist/types/create-completion-input.type.js.map +1 -1
- package/package.json +2 -2
- package/src/index.ts +1 -0
- package/src/openai.controller.ts +1 -2
- package/src/types/create-completion-input.type.ts +51 -1
package/README.md
CHANGED
|
@@ -54,3 +54,38 @@ mutation {
|
|
|
54
54
|
}
|
|
55
55
|
}
|
|
56
56
|
```
|
|
57
|
+
|
|
58
|
+
#### Extending the OpenAPI graphql endpoints using internal provided injection token `OpenAI`
|
|
59
|
+
|
|
60
|
+
```typescript
|
|
61
|
+
import { Controller, GraphQLNonNull, Inject, Mutation, Type } from '@gapi/core';
|
|
62
|
+
import {
|
|
63
|
+
CreateCompletionInputType,
|
|
64
|
+
CreateCompletionRequest,
|
|
65
|
+
CreateCompletionType,
|
|
66
|
+
OpenAI,
|
|
67
|
+
} from '@gapi/openai';
|
|
68
|
+
import { from } from 'rxjs';
|
|
69
|
+
import { map } from 'rxjs/operators';
|
|
70
|
+
|
|
71
|
+
@Controller()
|
|
72
|
+
export class CustomControllerController {
|
|
73
|
+
constructor(@Inject(OpenAI) private openai: OpenAI) {}
|
|
74
|
+
|
|
75
|
+
@Type(CreateCompletionType)
|
|
76
|
+
@Mutation({
|
|
77
|
+
payload: {
|
|
78
|
+
type: new GraphQLNonNull(CreateCompletionInputType),
|
|
79
|
+
},
|
|
80
|
+
})
|
|
81
|
+
createCompletion(root, { payload }: { payload: CreateCompletionRequest }) {
|
|
82
|
+
return from(
|
|
83
|
+
this.openai.createCompletion({
|
|
84
|
+
...payload,
|
|
85
|
+
max_tokens: payload.max_tokens ?? 2048,
|
|
86
|
+
}),
|
|
87
|
+
).pipe(map((res) => res.data));
|
|
88
|
+
}
|
|
89
|
+
}
|
|
90
|
+
|
|
91
|
+
```
|
package/dist/index.d.ts
CHANGED
package/dist/index.js
CHANGED
|
@@ -15,4 +15,5 @@ __exportStar(require("./openai.module"), exports);
|
|
|
15
15
|
__exportStar(require("./openai.tokens"), exports);
|
|
16
16
|
__exportStar(require("./types/create-completion-input.type"), exports);
|
|
17
17
|
__exportStar(require("./types/create-completion.type"), exports);
|
|
18
|
+
__exportStar(require("openai"), exports);
|
|
18
19
|
//# sourceMappingURL=index.js.map
|
package/dist/index.js.map
CHANGED
|
@@ -1 +1 @@
|
|
|
1
|
-
{"version":3,"file":"index.js","sourceRoot":"","sources":["../src/index.ts"],"names":[],"mappings":";;;;;;;;;;;;AAAA,sDAAoC;AACpC,kDAAgC;AAChC,kDAAgC;AAChC,uEAAqD;AACrD,iEAA+C"}
|
|
1
|
+
{"version":3,"file":"index.js","sourceRoot":"","sources":["../src/index.ts"],"names":[],"mappings":";;;;;;;;;;;;AAAA,sDAAoC;AACpC,kDAAgC;AAChC,kDAAgC;AAChC,uEAAqD;AACrD,iEAA+C;AAC/C,yCAAuB"}
|
|
@@ -24,12 +24,8 @@ let OpenAIController = class OpenAIController {
|
|
|
24
24
|
this.openai = openai;
|
|
25
25
|
}
|
|
26
26
|
createCompletion(root, { payload }) {
|
|
27
|
-
var _a
|
|
28
|
-
return rxjs_1.from(this.openai.createCompletion({
|
|
29
|
-
model: (_a = payload.model) !== null && _a !== void 0 ? _a : 'text-davinci-003',
|
|
30
|
-
prompt: payload.prompt,
|
|
31
|
-
max_tokens: (_b = payload.max_tokens) !== null && _b !== void 0 ? _b : 2048,
|
|
32
|
-
})).pipe(operators_1.map((res) => res.data));
|
|
27
|
+
var _a;
|
|
28
|
+
return rxjs_1.from(this.openai.createCompletion(Object.assign(Object.assign({}, payload), { max_tokens: (_a = payload.max_tokens) !== null && _a !== void 0 ? _a : 2048 }))).pipe(operators_1.map((res) => res.data));
|
|
33
29
|
}
|
|
34
30
|
};
|
|
35
31
|
__decorate([
|
|
@@ -1 +1 @@
|
|
|
1
|
-
{"version":3,"file":"openai.controller.js","sourceRoot":"","sources":["../src/openai.controller.ts"],"names":[],"mappings":";;;;;;;;;;;;;;;AAAA,qCAAgF;AAEhF,+BAA4B;AAC5B,8CAAqC;AAErC,mDAAyC;AACzC,uFAAiF;AACjF,2EAAsE;AAGtE,IAAa,gBAAgB,GAA7B,MAAa,gBAAgB;IAC3B,YAAoC,MAAc;QAAd,WAAM,GAAN,MAAM,CAAQ;IAAG,CAAC;IAQtD,gBAAgB,CAAC,IAAI,EAAE,EAAE,OAAO,EAAwC;;QACtE,OAAO,WAAI,CACT,IAAI,CAAC,MAAM,CAAC,gBAAgB,
|
|
1
|
+
{"version":3,"file":"openai.controller.js","sourceRoot":"","sources":["../src/openai.controller.ts"],"names":[],"mappings":";;;;;;;;;;;;;;;AAAA,qCAAgF;AAEhF,+BAA4B;AAC5B,8CAAqC;AAErC,mDAAyC;AACzC,uFAAiF;AACjF,2EAAsE;AAGtE,IAAa,gBAAgB,GAA7B,MAAa,gBAAgB;IAC3B,YAAoC,MAAc;QAAd,WAAM,GAAN,MAAM,CAAQ;IAAG,CAAC;IAQtD,gBAAgB,CAAC,IAAI,EAAE,EAAE,OAAO,EAAwC;;QACtE,OAAO,WAAI,CACT,IAAI,CAAC,MAAM,CAAC,gBAAgB,iCACvB,OAAO,KACV,UAAU,QAAE,OAAO,CAAC,UAAU,mCAAI,IAAI,IACtC,CACH,CAAC,IAAI,CAAC,eAAG,CAAC,CAAC,GAAG,EAAE,EAAE,CAAC,GAAG,CAAC,IAAI,CAAC,CAAC,CAAC;IACjC,CAAC;CACF,CAAA;AARC;IANC,WAAI,CAAC,6CAAoB,CAAC;IAC1B,eAAQ,CAAC;QACR,OAAO,EAAE;YACP,IAAI,EAAE,IAAI,qBAAc,CAAC,wDAAyB,CAAC;SACpD;KACF,CAAC;;;;wDAQD;AAhBU,gBAAgB;IAD5B,iBAAU,EAAE;IAEE,WAAA,aAAM,CAAC,sBAAM,CAAC,CAAA;;GADhB,gBAAgB,CAiB5B;AAjBY,4CAAgB"}
|
|
@@ -6,13 +6,47 @@ exports.CreateCompletionInputType = new graphql_1.GraphQLInputObjectType({
|
|
|
6
6
|
name: 'CreateCompletionInputType',
|
|
7
7
|
fields: () => ({
|
|
8
8
|
model: {
|
|
9
|
+
description: 'ID of the model to use. You can use the [List models](https://platform.openai.com/docs/api-reference/models/list) API to see all of your available models, or see our [Model overview](/docs/models/overview) for descriptions of them.',
|
|
9
10
|
type: new graphql_1.GraphQLNonNull(graphql_1.GraphQLString),
|
|
10
11
|
},
|
|
11
12
|
prompt: {
|
|
12
13
|
type: new graphql_1.GraphQLNonNull(graphql_1.GraphQLString),
|
|
13
14
|
},
|
|
14
15
|
max_tokens: {
|
|
15
|
-
|
|
16
|
+
description: "The maximum number of [tokens](/tokenizer) to generate in the completion. The token count of your prompt plus `max_tokens` cannot exceed the model's context length. Most models have a context length of 2048 tokens (except for the newest models, which support 4096).",
|
|
17
|
+
type: graphql_1.GraphQLInt,
|
|
18
|
+
},
|
|
19
|
+
suffix: {
|
|
20
|
+
description: 'The suffix that comes after a completion of inserted text.',
|
|
21
|
+
type: graphql_1.GraphQLString,
|
|
22
|
+
},
|
|
23
|
+
temperature: {
|
|
24
|
+
description: 'What [sampling temperature](https://towardsdatascience.com/how-to-sample-from-language-models-682bceb97277) to use. Higher values means the model will take more risks. Try 0.9 for more creative applications, and 0 (argmax sampling) for ones with a well-defined answer. We generally recommend altering this or `top_p` but not both.',
|
|
25
|
+
type: graphql_1.GraphQLInt,
|
|
26
|
+
},
|
|
27
|
+
top_p: {
|
|
28
|
+
description: 'An alternative to sampling with temperature, called nucleus sampling, where the model considers the results of the tokens with top_p probability mass. So 0.1 means only the tokens comprising the top 10% probability mass are considered. We generally recommend altering this or `temperature` but not both.',
|
|
29
|
+
type: graphql_1.GraphQLInt,
|
|
30
|
+
},
|
|
31
|
+
n: {
|
|
32
|
+
description: 'How many completions to generate for each prompt. **Note:** Because this parameter generates many completions, it can quickly consume your token quota. Use carefully and ensure that you have reasonable settings for `max_tokens` and `stop`.',
|
|
33
|
+
type: graphql_1.GraphQLInt,
|
|
34
|
+
},
|
|
35
|
+
logprobs: {
|
|
36
|
+
description: 'Include the log probabilities on the `logprobs` most likely tokens, as well the chosen tokens. For example, if `logprobs` is 5, the API will return a list of the 5 most likely tokens. The API will always return the `logprob` of the sampled token, so there may be up to `logprobs+1` elements in the response. The maximum value for `logprobs` is 5. If you need more than this, please contact us through our [Help center](https://help.openai.com) and describe your use case.',
|
|
37
|
+
type: graphql_1.GraphQLString,
|
|
38
|
+
},
|
|
39
|
+
presence_penalty: {
|
|
40
|
+
description: "Number between -2.0 and 2.0. Positive values penalize new tokens based on whether they appear in the text so far, increasing the model's likelihood to talk about new topics. [See more information about frequency and presence penalties.](https://platform.openai.com/docs/api-reference/parameter-details)",
|
|
41
|
+
type: graphql_1.GraphQLInt,
|
|
42
|
+
},
|
|
43
|
+
frequency_penalty: {
|
|
44
|
+
description: "Number between -2.0 and 2.0. Positive values penalize new tokens based on their existing frequency in the text so far, decreasing the model's likelihood to repeat the same line verbatim. [See more information about frequency and presence penalties.](https://platform.openai.com/docs/api-reference/parameter-details)",
|
|
45
|
+
type: graphql_1.GraphQLInt,
|
|
46
|
+
},
|
|
47
|
+
best_of: {
|
|
48
|
+
description: 'Generates `best_of` completions server-side and returns the "best" (the one with the highest log probability per token). Results cannot be streamed. When used with `n`, `best_of` controls the number of candidate completions and `n` specifies how many to return – `best_of` must be greater than `n`. **Note:** Because this parameter generates many completions, it can quickly consume your token quota. Use carefully and ensure that you have reasonable settings for `max_tokens` and `stop`.',
|
|
49
|
+
type: graphql_1.GraphQLInt,
|
|
16
50
|
},
|
|
17
51
|
}),
|
|
18
52
|
});
|
|
@@ -1 +1 @@
|
|
|
1
|
-
{"version":3,"file":"create-completion-input.type.js","sourceRoot":"","sources":["../../src/types/create-completion-input.type.ts"],"names":[],"mappings":";;;AAAA,qCAKiB;AAEJ,QAAA,yBAAyB,GAAG,IAAI,gCAAsB,CAAC;IAClE,IAAI,EAAE,2BAA2B;IACjC,MAAM,EAAE,GAAG,EAAE,CAAC,CAAC;QACb,KAAK,EAAE;YACL,IAAI,EAAE,IAAI,wBAAc,CAAC,uBAAa,CAAC;SACxC;QACD,MAAM,EAAE;YACN,IAAI,EAAE,IAAI,wBAAc,CAAC,uBAAa,CAAC;SACxC;QACD,UAAU,EAAE;YACV,IAAI,EAAE,IAAI,
|
|
1
|
+
{"version":3,"file":"create-completion-input.type.js","sourceRoot":"","sources":["../../src/types/create-completion-input.type.ts"],"names":[],"mappings":";;;AAAA,qCAKiB;AAEJ,QAAA,yBAAyB,GAAG,IAAI,gCAAsB,CAAC;IAClE,IAAI,EAAE,2BAA2B;IACjC,MAAM,EAAE,GAAG,EAAE,CAAC,CAAC;QACb,KAAK,EAAE;YACL,WAAW,EACT,yOAAyO;YAC3O,IAAI,EAAE,IAAI,wBAAc,CAAC,uBAAa,CAAC;SACxC;QACD,MAAM,EAAE;YACN,IAAI,EAAE,IAAI,wBAAc,CAAC,uBAAa,CAAC;SACxC;QACD,UAAU,EAAE;YACV,WAAW,EACT,4QAA4Q;YAC9Q,IAAI,EAAE,oBAAU;SACjB;QAED,MAAM,EAAE;YACN,WAAW,EAAE,4DAA4D;YACzE,IAAI,EAAE,uBAAa;SACpB;QAED,WAAW,EAAE;YACX,WAAW,EACT,6UAA6U;YAC/U,IAAI,EAAE,oBAAU;SACjB;QAED,KAAK,EAAE;YACL,WAAW,EACT,kTAAkT;YACpT,IAAI,EAAE,oBAAU;SACjB;QAED,CAAC,EAAE;YACD,WAAW,EACT,kPAAkP;YACpP,IAAI,EAAE,oBAAU;SACjB;QACD,QAAQ,EAAE;YACR,WAAW,EACT,0dAA0d;YAC5d,IAAI,EAAE,uBAAa;SACpB;QAED,gBAAgB,EAAE;YAChB,WAAW,EACT,iTAAiT;YACnT,IAAI,EAAE,oBAAU;SACjB;QAED,iBAAiB,EAAE;YACjB,WAAW,EACT,8TAA8T;YAChU,IAAI,EAAE,oBAAU;SACjB;QAED,OAAO,EAAE;YACP,WAAW,EACT,4eAA4e;YAC9e,IAAI,EAAE,oBAAU;SACjB;KACF,CAAC;CACH,CAAC,CAAC"}
|
package/package.json
CHANGED
|
@@ -1,6 +1,6 @@
|
|
|
1
1
|
{
|
|
2
2
|
"name": "@gapi/openai",
|
|
3
|
-
"version": "1.8.
|
|
3
|
+
"version": "1.8.143",
|
|
4
4
|
"repository": {
|
|
5
5
|
"type": "git",
|
|
6
6
|
"url": "https://github.com/Stradivario/gapi.git"
|
|
@@ -30,7 +30,7 @@
|
|
|
30
30
|
},
|
|
31
31
|
"dependencies": {
|
|
32
32
|
"openai": "^3.1.0",
|
|
33
|
-
"@gapi/core": "^1.8.
|
|
33
|
+
"@gapi/core": "^1.8.142"
|
|
34
34
|
},
|
|
35
35
|
"devDependencies": {
|
|
36
36
|
"@types/node": "^13.11.1",
|
package/src/index.ts
CHANGED
package/src/openai.controller.ts
CHANGED
|
@@ -20,8 +20,7 @@ export class OpenAIController {
|
|
|
20
20
|
createCompletion(root, { payload }: { payload: CreateCompletionRequest }) {
|
|
21
21
|
return from(
|
|
22
22
|
this.openai.createCompletion({
|
|
23
|
-
|
|
24
|
-
prompt: payload.prompt,
|
|
23
|
+
...payload,
|
|
25
24
|
max_tokens: payload.max_tokens ?? 2048,
|
|
26
25
|
}),
|
|
27
26
|
).pipe(map((res) => res.data));
|
|
@@ -9,13 +9,63 @@ export const CreateCompletionInputType = new GraphQLInputObjectType({
|
|
|
9
9
|
name: 'CreateCompletionInputType',
|
|
10
10
|
fields: () => ({
|
|
11
11
|
model: {
|
|
12
|
+
description:
|
|
13
|
+
'ID of the model to use. You can use the [List models](https://platform.openai.com/docs/api-reference/models/list) API to see all of your available models, or see our [Model overview](/docs/models/overview) for descriptions of them.',
|
|
12
14
|
type: new GraphQLNonNull(GraphQLString),
|
|
13
15
|
},
|
|
14
16
|
prompt: {
|
|
15
17
|
type: new GraphQLNonNull(GraphQLString),
|
|
16
18
|
},
|
|
17
19
|
max_tokens: {
|
|
18
|
-
|
|
20
|
+
description:
|
|
21
|
+
"The maximum number of [tokens](/tokenizer) to generate in the completion. The token count of your prompt plus `max_tokens` cannot exceed the model's context length. Most models have a context length of 2048 tokens (except for the newest models, which support 4096).",
|
|
22
|
+
type: GraphQLInt,
|
|
23
|
+
},
|
|
24
|
+
|
|
25
|
+
suffix: {
|
|
26
|
+
description: 'The suffix that comes after a completion of inserted text.',
|
|
27
|
+
type: GraphQLString,
|
|
28
|
+
},
|
|
29
|
+
|
|
30
|
+
temperature: {
|
|
31
|
+
description:
|
|
32
|
+
'What [sampling temperature](https://towardsdatascience.com/how-to-sample-from-language-models-682bceb97277) to use. Higher values means the model will take more risks. Try 0.9 for more creative applications, and 0 (argmax sampling) for ones with a well-defined answer. We generally recommend altering this or `top_p` but not both.',
|
|
33
|
+
type: GraphQLInt,
|
|
34
|
+
},
|
|
35
|
+
|
|
36
|
+
top_p: {
|
|
37
|
+
description:
|
|
38
|
+
'An alternative to sampling with temperature, called nucleus sampling, where the model considers the results of the tokens with top_p probability mass. So 0.1 means only the tokens comprising the top 10% probability mass are considered. We generally recommend altering this or `temperature` but not both.',
|
|
39
|
+
type: GraphQLInt,
|
|
40
|
+
},
|
|
41
|
+
|
|
42
|
+
n: {
|
|
43
|
+
description:
|
|
44
|
+
'How many completions to generate for each prompt. **Note:** Because this parameter generates many completions, it can quickly consume your token quota. Use carefully and ensure that you have reasonable settings for `max_tokens` and `stop`.',
|
|
45
|
+
type: GraphQLInt,
|
|
46
|
+
},
|
|
47
|
+
logprobs: {
|
|
48
|
+
description:
|
|
49
|
+
'Include the log probabilities on the `logprobs` most likely tokens, as well the chosen tokens. For example, if `logprobs` is 5, the API will return a list of the 5 most likely tokens. The API will always return the `logprob` of the sampled token, so there may be up to `logprobs+1` elements in the response. The maximum value for `logprobs` is 5. If you need more than this, please contact us through our [Help center](https://help.openai.com) and describe your use case.',
|
|
50
|
+
type: GraphQLString,
|
|
51
|
+
},
|
|
52
|
+
|
|
53
|
+
presence_penalty: {
|
|
54
|
+
description:
|
|
55
|
+
"Number between -2.0 and 2.0. Positive values penalize new tokens based on whether they appear in the text so far, increasing the model's likelihood to talk about new topics. [See more information about frequency and presence penalties.](https://platform.openai.com/docs/api-reference/parameter-details)",
|
|
56
|
+
type: GraphQLInt,
|
|
57
|
+
},
|
|
58
|
+
|
|
59
|
+
frequency_penalty: {
|
|
60
|
+
description:
|
|
61
|
+
"Number between -2.0 and 2.0. Positive values penalize new tokens based on their existing frequency in the text so far, decreasing the model's likelihood to repeat the same line verbatim. [See more information about frequency and presence penalties.](https://platform.openai.com/docs/api-reference/parameter-details)",
|
|
62
|
+
type: GraphQLInt,
|
|
63
|
+
},
|
|
64
|
+
|
|
65
|
+
best_of: {
|
|
66
|
+
description:
|
|
67
|
+
'Generates `best_of` completions server-side and returns the "best" (the one with the highest log probability per token). Results cannot be streamed. When used with `n`, `best_of` controls the number of candidate completions and `n` specifies how many to return – `best_of` must be greater than `n`. **Note:** Because this parameter generates many completions, it can quickly consume your token quota. Use carefully and ensure that you have reasonable settings for `max_tokens` and `stop`.',
|
|
68
|
+
type: GraphQLInt,
|
|
19
69
|
},
|
|
20
70
|
}),
|
|
21
71
|
});
|