Skip to content
This repository was archived by the owner on Mar 6, 2024. It is now read-only.

Commit 70069ea

Browse files
authored
set tokens based on model (#99)
<!-- This is an auto-generated comment: release notes by openai --> ### Summary by OpenAI Release Notes: - New Feature: Users can now set maximum token limits for models, requests, and responses using the `Bot` and `Options` classes. > "Tokens set with ease, > Limits to put your mind at peace. > With this new feature, > Your API usage will be a pleasure." <!-- end of auto-generated comment: release notes by openai -->
1 parent c826d63 commit 70069ea

File tree

3 files changed

+39
-11
lines changed

3 files changed

+39
-11
lines changed

dist/index.js

Lines changed: 19 additions & 6 deletions
Some generated files are not rendered by default. Learn more about customizing how changed files appear on GitHub.

src/bot.ts

Lines changed: 2 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -23,6 +23,8 @@ export class Bot {
2323
systemMessage: options.system_message,
2424
apiKey: process.env.OPENAI_API_KEY,
2525
debug: options.debug,
26+
maxModelTokens: options.max_model_tokens,
27+
maxResponseTokens: options.max_tokens_for_response,
2628
completionParams: {
2729
temperature: options.openai_model_temperature,
2830
model: options.openai_model

src/options.ts

Lines changed: 18 additions & 5 deletions
Original file line numberDiff line numberDiff line change
@@ -203,6 +203,9 @@ export class Options {
203203
openai_retries: number
204204
openai_timeout_ms: number
205205
openai_concurrency_limit: number
206+
max_model_tokens: number
207+
max_tokens_for_request: number
208+
max_tokens_for_response: number
206209
max_tokens_for_extra_content: number
207210

208211
constructor(
@@ -231,14 +234,21 @@ export class Options {
231234
this.openai_concurrency_limit = parseInt(openai_concurrency_limit)
232235

233236
if (this.openai_model === 'gpt-4-32k') {
234-
this.max_tokens_for_extra_content = 30000
237+
this.max_model_tokens = 32700
238+
this.max_tokens_for_response = 4000
235239
} else if (this.openai_model === 'gpt-4') {
236-
this.max_tokens_for_extra_content = 6000
237-
} else if (this.openai_model === 'gpt-3.5-turbo') {
238-
this.max_tokens_for_extra_content = 2000
240+
this.max_model_tokens = 8100
241+
this.max_tokens_for_response = 2000
239242
} else {
240-
this.max_tokens_for_extra_content = 1000
243+
this.max_model_tokens = 4000
244+
this.max_tokens_for_response = 1000
241245
}
246+
247+
// calculate the max tokens for the request and response
248+
this.max_tokens_for_request =
249+
this.max_model_tokens - this.max_tokens_for_response
250+
// use half the request tokens for extra content
251+
this.max_tokens_for_extra_content = this.max_tokens_for_request / 1.5
242252
}
243253

244254
// print all options using core.info
@@ -254,6 +264,9 @@ export class Options {
254264
core.info(`openai_retries: ${this.openai_retries}`)
255265
core.info(`openai_timeout_ms: ${this.openai_timeout_ms}`)
256266
core.info(`openai_concurrency_limit: ${this.openai_concurrency_limit}`)
267+
core.info(`max_model_tokens: ${this.max_model_tokens}`)
268+
core.info(`max_tokens_for_request: ${this.max_tokens_for_request}`)
269+
core.info(`max_tokens_for_response: ${this.max_tokens_for_response}`)
257270
core.info(
258271
`max_tokens_for_extra_content: ${this.max_tokens_for_extra_content}`
259272
)

0 commit comments

Comments
 (0)