Skip to content

Commit 00b49ae

Browse files
committed
fix: add missing prompt_cache_key & prompt_cache_key params
1 parent a3315d9 commit 00b49ae

File tree

2 files changed

+24
-0
lines changed

2 files changed

+24
-0
lines changed

src/openai/resources/chat/completions/completions.py

Lines changed: 16 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -101,7 +101,9 @@ def parse(
101101
parallel_tool_calls: bool | NotGiven = NOT_GIVEN,
102102
prediction: Optional[ChatCompletionPredictionContentParam] | NotGiven = NOT_GIVEN,
103103
presence_penalty: Optional[float] | NotGiven = NOT_GIVEN,
104+
prompt_cache_key: str | NotGiven = NOT_GIVEN,
104105
reasoning_effort: Optional[ReasoningEffort] | NotGiven = NOT_GIVEN,
106+
safety_identifier: str | NotGiven = NOT_GIVEN,
105107
seed: Optional[int] | NotGiven = NOT_GIVEN,
106108
service_tier: Optional[Literal["auto", "default", "flex", "scale", "priority"]] | NotGiven = NOT_GIVEN,
107109
stop: Union[Optional[str], List[str], None] | NotGiven = NOT_GIVEN,
@@ -197,8 +199,10 @@ def parser(raw_completion: ChatCompletion) -> ParsedChatCompletion[ResponseForma
197199
"parallel_tool_calls": parallel_tool_calls,
198200
"prediction": prediction,
199201
"presence_penalty": presence_penalty,
202+
"prompt_cache_key": prompt_cache_key,
200203
"reasoning_effort": reasoning_effort,
201204
"response_format": _type_to_response_format(response_format),
205+
"safety_identifier": safety_identifier,
202206
"seed": seed,
203207
"service_tier": service_tier,
204208
"stop": stop,
@@ -1378,7 +1382,9 @@ def stream(
13781382
parallel_tool_calls: bool | NotGiven = NOT_GIVEN,
13791383
prediction: Optional[ChatCompletionPredictionContentParam] | NotGiven = NOT_GIVEN,
13801384
presence_penalty: Optional[float] | NotGiven = NOT_GIVEN,
1385+
prompt_cache_key: str | NotGiven = NOT_GIVEN,
13811386
reasoning_effort: Optional[ReasoningEffort] | NotGiven = NOT_GIVEN,
1387+
safety_identifier: str | NotGiven = NOT_GIVEN,
13821388
seed: Optional[int] | NotGiven = NOT_GIVEN,
13831389
service_tier: Optional[Literal["auto", "default", "flex", "scale", "priority"]] | NotGiven = NOT_GIVEN,
13841390
stop: Union[Optional[str], List[str], None] | NotGiven = NOT_GIVEN,
@@ -1445,7 +1451,9 @@ def stream(
14451451
parallel_tool_calls=parallel_tool_calls,
14461452
prediction=prediction,
14471453
presence_penalty=presence_penalty,
1454+
prompt_cache_key=prompt_cache_key,
14481455
reasoning_effort=reasoning_effort,
1456+
safety_identifier=safety_identifier,
14491457
seed=seed,
14501458
service_tier=service_tier,
14511459
store=store,
@@ -1514,7 +1522,9 @@ async def parse(
15141522
parallel_tool_calls: bool | NotGiven = NOT_GIVEN,
15151523
prediction: Optional[ChatCompletionPredictionContentParam] | NotGiven = NOT_GIVEN,
15161524
presence_penalty: Optional[float] | NotGiven = NOT_GIVEN,
1525+
prompt_cache_key: str | NotGiven = NOT_GIVEN,
15171526
reasoning_effort: Optional[ReasoningEffort] | NotGiven = NOT_GIVEN,
1527+
safety_identifier: str | NotGiven = NOT_GIVEN,
15181528
seed: Optional[int] | NotGiven = NOT_GIVEN,
15191529
service_tier: Optional[Literal["auto", "default", "flex", "scale", "priority"]] | NotGiven = NOT_GIVEN,
15201530
stop: Union[Optional[str], List[str], None] | NotGiven = NOT_GIVEN,
@@ -1610,8 +1620,10 @@ def parser(raw_completion: ChatCompletion) -> ParsedChatCompletion[ResponseForma
16101620
"parallel_tool_calls": parallel_tool_calls,
16111621
"prediction": prediction,
16121622
"presence_penalty": presence_penalty,
1623+
"prompt_cache_key": prompt_cache_key,
16131624
"reasoning_effort": reasoning_effort,
16141625
"response_format": _type_to_response_format(response_format),
1626+
"safety_identifier": safety_identifier,
16151627
"seed": seed,
16161628
"service_tier": service_tier,
16171629
"store": store,
@@ -2791,7 +2803,9 @@ def stream(
27912803
parallel_tool_calls: bool | NotGiven = NOT_GIVEN,
27922804
prediction: Optional[ChatCompletionPredictionContentParam] | NotGiven = NOT_GIVEN,
27932805
presence_penalty: Optional[float] | NotGiven = NOT_GIVEN,
2806+
prompt_cache_key: str | NotGiven = NOT_GIVEN,
27942807
reasoning_effort: Optional[ReasoningEffort] | NotGiven = NOT_GIVEN,
2808+
safety_identifier: str | NotGiven = NOT_GIVEN,
27952809
seed: Optional[int] | NotGiven = NOT_GIVEN,
27962810
service_tier: Optional[Literal["auto", "default", "flex", "scale", "priority"]] | NotGiven = NOT_GIVEN,
27972811
stop: Union[Optional[str], List[str], None] | NotGiven = NOT_GIVEN,
@@ -2859,7 +2873,9 @@ def stream(
28592873
parallel_tool_calls=parallel_tool_calls,
28602874
prediction=prediction,
28612875
presence_penalty=presence_penalty,
2876+
prompt_cache_key=prompt_cache_key,
28622877
reasoning_effort=reasoning_effort,
2878+
safety_identifier=safety_identifier,
28632879
seed=seed,
28642880
service_tier=service_tier,
28652881
stop=stop,

src/openai/resources/responses/responses.py

Lines changed: 8 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -1001,7 +1001,9 @@ def parse(
10011001
parallel_tool_calls: Optional[bool] | NotGiven = NOT_GIVEN,
10021002
previous_response_id: Optional[str] | NotGiven = NOT_GIVEN,
10031003
prompt: Optional[ResponsePromptParam] | NotGiven = NOT_GIVEN,
1004+
prompt_cache_key: str | NotGiven = NOT_GIVEN,
10041005
reasoning: Optional[Reasoning] | NotGiven = NOT_GIVEN,
1006+
safety_identifier: str | NotGiven = NOT_GIVEN,
10051007
service_tier: Optional[Literal["auto", "default", "flex", "scale", "priority"]] | NotGiven = NOT_GIVEN,
10061008
store: Optional[bool] | NotGiven = NOT_GIVEN,
10071009
stream: Optional[Literal[False]] | Literal[True] | NotGiven = NOT_GIVEN,
@@ -1053,7 +1055,9 @@ def parser(raw_response: Response) -> ParsedResponse[TextFormatT]:
10531055
"parallel_tool_calls": parallel_tool_calls,
10541056
"previous_response_id": previous_response_id,
10551057
"prompt": prompt,
1058+
"prompt_cache_key": prompt_cache_key,
10561059
"reasoning": reasoning,
1060+
"safety_identifier": safety_identifier,
10571061
"service_tier": service_tier,
10581062
"store": store,
10591063
"stream": stream,
@@ -2316,7 +2320,9 @@ async def parse(
23162320
parallel_tool_calls: Optional[bool] | NotGiven = NOT_GIVEN,
23172321
previous_response_id: Optional[str] | NotGiven = NOT_GIVEN,
23182322
prompt: Optional[ResponsePromptParam] | NotGiven = NOT_GIVEN,
2323+
prompt_cache_key: str | NotGiven = NOT_GIVEN,
23192324
reasoning: Optional[Reasoning] | NotGiven = NOT_GIVEN,
2325+
safety_identifier: str | NotGiven = NOT_GIVEN,
23202326
service_tier: Optional[Literal["auto", "default", "flex", "scale", "priority"]] | NotGiven = NOT_GIVEN,
23212327
store: Optional[bool] | NotGiven = NOT_GIVEN,
23222328
stream: Optional[Literal[False]] | Literal[True] | NotGiven = NOT_GIVEN,
@@ -2368,7 +2374,9 @@ def parser(raw_response: Response) -> ParsedResponse[TextFormatT]:
23682374
"parallel_tool_calls": parallel_tool_calls,
23692375
"previous_response_id": previous_response_id,
23702376
"prompt": prompt,
2377+
"prompt_cache_key": prompt_cache_key,
23712378
"reasoning": reasoning,
2379+
"safety_identifier": safety_identifier,
23722380
"service_tier": service_tier,
23732381
"store": store,
23742382
"stream": stream,

0 commit comments

Comments
 (0)