Skip to content

Commit a0220d0

Browse files
authored
Add missing fields (#51)
* update max_tokens position and doc to that of openai docs; add user field in CreateChatCompletionRequest * add language to CreateTranscriptionRequest * sync openapi spec from openai-openapi
1 parent a942648 commit a0220d0

File tree

2 files changed

+52
-39
lines changed

2 files changed

+52
-39
lines changed

async-openai/src/types/types.rs

Lines changed: 10 additions & 6 deletions
Original file line numberDiff line numberDiff line change
@@ -724,12 +724,6 @@ pub struct CreateChatCompletionRequest {
724724
/// The messages to generate chat completions for, in the [chat format](https://platform.openai.com/docs/guides/chat/introduction).
725725
pub messages: Vec<ChatCompletionRequestMessage>, // min: 1
726726

727-
/// The maximum number of [tokens](/tokenizer) to generate in the completion.
728-
///
729-
/// The token count of your prompt plus `max_tokens` cannot exceed the model's context length. Most models have a context length of 2048 tokens (except for the newest models, which support 4096).
730-
#[serde(skip_serializing_if = "Option::is_none")]
731-
pub max_tokens: Option<u16>,
732-
733727
/// What sampling temperature to use, between 0 and 2. Higher values like 0.8 will make the output more random, while lower values like 0.2 will make it more focused and deterministic.
734728
///
735729
/// We generally recommend altering this or `top_p` but not both.
@@ -754,6 +748,10 @@ pub struct CreateChatCompletionRequest {
754748
#[serde(skip_serializing_if = "Option::is_none")]
755749
pub stop: Option<Stop>,
756750

751+
/// The maximum number of tokens allowed for the generated answer. By default, the number of tokens the model can return will be (4096 - prompt tokens).
752+
#[serde(skip_serializing_if = "Option::is_none")]
753+
pub max_tokens: Option<u16>, // default: inf
754+
757755
/// Number between -2.0 and 2.0. Positive values penalize new tokens based on whether they appear in the text so far, increasing the model's likelihood to talk about new topics.
758756
///
759757
/// [See more information about frequency and presence penalties.](https://platform.openai.com/docs/api-reference/parameter-details)
@@ -771,6 +769,9 @@ pub struct CreateChatCompletionRequest {
771769
/// Accepts a json object that maps tokens (specified by their token ID in the tokenizer) to an associated bias value from -100 to 100. Mathematically, the bias is added to the logits generated by the model prior to sampling. The exact effect will vary per model, but values between -1 and 1 should decrease or increase likelihood of selection; values like -100 or 100 should result in a ban or exclusive selection of the relevant token.
772770
#[serde(skip_serializing_if = "Option::is_none")]
773771
pub logit_bias: Option<HashMap<String, serde_json::Value>>, // default: null
772+
773+
/// A unique identifier representing your end-user, which can help OpenAI to monitor and detect abuse. [Learn more](https://platform.openai.com/docs/guides/safety-best-practices/end-user-ids).
774+
pub user: Option<String>,
774775
}
775776

776777
#[derive(Debug, Deserialize)]
@@ -856,6 +857,9 @@ pub struct CreateTranscriptionRequest {
856857

857858
/// The sampling temperature, between 0 and 1. Higher values like 0.8 will make the output more random, while lower values like 0.2 will make it more focused and deterministic. If set to 0, the model will use [log probability](https://en.wikipedia.org/wiki/Log_probability) to automatically increase the temperature until certain thresholds are hit.
858859
pub temperature: Option<f32>, // default: 0
860+
861+
/// The language of the input audio. Supplying the input language in [ISO-639-1](https://en.wikipedia.org/wiki/List_of_ISO_639-1_codes) format will improve accuracy and latency.
862+
pub language: Option<String>,
859863
}
860864

861865
#[derive(Debug, Deserialize)]

openapi.yaml

Lines changed: 42 additions & 33 deletions
Original file line numberDiff line numberDiff line change
@@ -2223,7 +2223,7 @@ components:
22232223
A unique identifier representing your end-user, which can help OpenAI to monitor and detect abuse. [Learn more](/docs/guides/safety-best-practices/end-user-ids).
22242224
required:
22252225
- model
2226-
2226+
22272227
CreateCompletionResponse:
22282228
type: object
22292229
properties:
@@ -2275,11 +2275,11 @@ components:
22752275
type: integer
22762276
total_tokens:
22772277
type: integer
2278-
required:
2278+
required:
22792279
- prompt_tokens
22802280
- completion_tokens
22812281
- total_tokens
2282-
required:
2282+
required:
22832283
- id
22842284
- object
22852285
- created
@@ -2299,7 +2299,7 @@ components:
22992299
name:
23002300
type: string
23012301
description: The name of the user in a multi-user chat
2302-
required:
2302+
required:
23032303
- role
23042304
- content
23052305

@@ -2313,7 +2313,7 @@ components:
23132313
content:
23142314
type: string
23152315
description: The contents of the message
2316-
required:
2316+
required:
23172317
- role
23182318
- content
23192319

@@ -2372,6 +2372,11 @@ components:
23722372
maxItems: 4
23732373
items:
23742374
type: string
2375+
max_tokens:
2376+
description: |
2377+
The maximum number of tokens allowed for the generated answer. By default, the number of tokens the model can return will be (4096 - prompt tokens).
2378+
default: inf
2379+
type: integer
23752380
presence_penalty:
23762381
type: number
23772382
default: 0
@@ -2431,11 +2436,11 @@ components:
24312436
type: integer
24322437
total_tokens:
24332438
type: integer
2434-
required:
2439+
required:
24352440
- prompt_tokens
24362441
- completion_tokens
24372442
- total_tokens
2438-
required:
2443+
required:
24392444
- id
24402445
- object
24412446
- created
@@ -2536,11 +2541,11 @@ components:
25362541
type: integer
25372542
total_tokens:
25382543
type: integer
2539-
required:
2544+
required:
25402545
- prompt_tokens
25412546
- completion_tokens
25422547
- total_tokens
2543-
required:
2548+
required:
25442549
- object
25452550
- created
25462551
- choices
@@ -2690,7 +2695,7 @@ components:
26902695
type: boolean
26912696
violence/graphic:
26922697
type: boolean
2693-
required:
2698+
required:
26942699
- hate
26952700
- hate/threatening
26962701
- self-harm
@@ -2715,19 +2720,19 @@ components:
27152720
type: number
27162721
violence/graphic:
27172722
type: number
2718-
required:
2723+
required:
27192724
- hate
27202725
- hate/threatening
27212726
- self-harm
27222727
- sexual
27232728
- sexual/minors
27242729
- violence
27252730
- violence/graphic
2726-
required:
2731+
required:
27272732
- flagged
27282733
- categories
27292734
- category_scores
2730-
required:
2735+
required:
27312736
- id
27322737
- model
27332738
- results
@@ -2810,7 +2815,7 @@ components:
28102815
type: array
28112816
items:
28122817
$ref: '#/components/schemas/OpenAIFile'
2813-
required:
2818+
required:
28142819
- object
28152820
- data
28162821

@@ -2845,7 +2850,7 @@ components:
28452850
type: string
28462851
deleted:
28472852
type: boolean
2848-
required:
2853+
required:
28492854
- id
28502855
- object
28512856
- deleted
@@ -3249,7 +3254,7 @@ components:
32493254
type: array
32503255
items:
32513256
$ref: '#/components/schemas/FineTune'
3252-
required:
3257+
required:
32533258
- object
32543259
- data
32553260

@@ -3262,7 +3267,7 @@ components:
32623267
type: array
32633268
items:
32643269
$ref: '#/components/schemas/FineTuneEvent'
3265-
required:
3270+
required:
32663271
- object
32673272
- data
32683273

@@ -3322,7 +3327,7 @@ components:
33223327
type: array
33233328
items:
33243329
type: number
3325-
required:
3330+
required:
33263331
- index
33273332
- object
33283333
- embedding
@@ -3333,10 +3338,10 @@ components:
33333338
type: integer
33343339
total_tokens:
33353340
type: integer
3336-
required:
3341+
required:
33373342
- prompt_tokens
33383343
- total_tokens
3339-
required:
3344+
required:
33403345
- object
33413346
- model
33423347
- data
@@ -3346,12 +3351,12 @@ components:
33463351
type: object
33473352
additionalProperties: false
33483353
properties:
3349-
file:
3354+
file:
33503355
description: |
33513356
The audio file to transcribe, in one of these formats: mp3, mp4, mpeg, mpga, m4a, wav, or webm.
33523357
type: string
33533358
format: binary
3354-
model:
3359+
model:
33553360
description: |
33563361
ID of the model to use. Only `whisper-1` is currently available.
33573362
type: string
@@ -3369,29 +3374,33 @@ components:
33693374
The sampling temperature, between 0 and 1. Higher values like 0.8 will make the output more random, while lower values like 0.2 will make it more focused and deterministic. If set to 0, the model will use [log probability](https://en.wikipedia.org/wiki/Log_probability) to automatically increase the temperature until certain thresholds are hit.
33703375
type: number
33713376
default: 0
3377+
language:
3378+
description: |
3379+
The language of the input audio. Supplying the input language in [ISO-639-1](https://en.wikipedia.org/wiki/List_of_ISO_639-1_codes) format will improve accuracy and latency.
3380+
type: string
33723381
required:
33733382
- file
33743383
- model
33753384

3376-
# Note: This does not currently support the non-default response format types.
3385+
# Note: This does not currently support the non-default response format types.
33773386
CreateTranscriptionResponse:
33783387
type: object
33793388
properties:
33803389
text:
33813390
type: string
3382-
required:
3391+
required:
33833392
- text
33843393

33853394
CreateTranslationRequest:
33863395
type: object
33873396
additionalProperties: false
33883397
properties:
3389-
file:
3398+
file:
33903399
description: |
33913400
The audio file to translate, in one of these formats: mp3, mp4, mpeg, mpga, m4a, wav, or webm.
33923401
type: string
33933402
format: binary
3394-
model:
3403+
model:
33953404
description: |
33963405
ID of the model to use. Only `whisper-1` is currently available.
33973406
type: string
@@ -3413,13 +3422,13 @@ components:
34133422
- file
34143423
- model
34153424

3416-
# Note: This does not currently support the non-default response format types.
3425+
# Note: This does not currently support the non-default response format types.
34173426
CreateTranslationResponse:
34183427
type: object
34193428
properties:
34203429
text:
34213430
type: string
3422-
required:
3431+
required:
34233432
- text
34243433

34253434
Engine:
@@ -3434,7 +3443,7 @@ components:
34343443
nullable: true
34353444
ready:
34363445
type: boolean
3437-
required:
3446+
required:
34383447
- id
34393448
- object
34403449
- created
@@ -3451,7 +3460,7 @@ components:
34513460
type: integer
34523461
owned_by:
34533462
type: string
3454-
required:
3463+
required:
34553464
- id
34563465
- object
34573466
- created
@@ -3477,7 +3486,7 @@ components:
34773486
status_details:
34783487
type: object
34793488
nullable: true
3480-
required:
3489+
required:
34813490
- id
34823491
- object
34833492
- bytes
@@ -3523,7 +3532,7 @@ components:
35233532
type: array
35243533
items:
35253534
$ref: '#/components/schemas/FineTuneEvent'
3526-
required:
3535+
required:
35273536
- id
35283537
- object
35293538
- created_at
@@ -3548,7 +3557,7 @@ components:
35483557
type: string
35493558
message:
35503559
type: string
3551-
required:
3560+
required:
35523561
- object
35533562
- created_at
35543563
- level

0 commit comments

Comments
 (0)