Skip to content

Commit a942648

Browse files
authored
feat: Add max_tokens for chat completion requests (#50)
- Add the `max_tokens` field to the `CreateChatCompletionRequest` struct.
1 parent 208123f commit a942648

File tree

3 files changed

+8
-0
lines changed

3 files changed

+8
-0
lines changed

async-openai/src/types/types.rs

Lines changed: 6 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -724,6 +724,12 @@ pub struct CreateChatCompletionRequest {
724724
/// The messages to generate chat completions for, in the [chat format](https://platform.openai.com/docs/guides/chat/introduction).
725725
pub messages: Vec<ChatCompletionRequestMessage>, // min: 1
726726

727+
/// The maximum number of [tokens](/tokenizer) to generate in the completion.
728+
///
729+
/// The token count of your prompt plus `max_tokens` cannot exceed the model's context length. Most models have a context length of 2048 tokens (except for the newest models, which support 4096).
730+
#[serde(skip_serializing_if = "Option::is_none")]
731+
pub max_tokens: Option<u16>,
732+
727733
/// What sampling temperature to use, between 0 and 2. Higher values like 0.8 will make the output more random, while lower values like 0.2 will make it more focused and deterministic.
728734
///
729735
/// We generally recommend altering this or `top_p` but not both.

examples/chat-stream/src/main.rs

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -12,6 +12,7 @@ async fn main() -> Result<(), Box<dyn Error>> {
1212

1313
let request = CreateChatCompletionRequestArgs::default()
1414
.model("gpt-3.5-turbo")
15+
.max_tokens(1024u16)
1516
.messages([ChatCompletionRequestMessageArgs::default()
1617
.content("write a song if Coldplay and AR Rahman collaborated together")
1718
.role(Role::User)

examples/chat/src/main.rs

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -10,6 +10,7 @@ async fn main() -> Result<(), Box<dyn Error>> {
1010
let client = Client::new();
1111

1212
let request = CreateChatCompletionRequestArgs::default()
13+
.max_tokens(512u16)
1314
.model("gpt-3.5-turbo")
1415
.messages([
1516
ChatCompletionRequestMessageArgs::default()

0 commit comments

Comments
 (0)