Skip to content

Commit a96ad8c

Browse files
Stainless Botstainless-app[bot]
authored andcommitted
feat(api): OpenAPI spec update via Stainless API (#139)
1 parent 70c1a9a commit a96ad8c

File tree

11 files changed

+1266
-1273
lines changed

11 files changed

+1266
-1273
lines changed

.github/workflows/ci.yml

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -6,6 +6,7 @@ on:
66
pull_request:
77
branches:
88
- main
9+
- next
910

1011
jobs:
1112
lint:

.stats.yml

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -1,2 +1,2 @@
11
configured_endpoints: 21
2-
openapi_spec_url: https://storage.googleapis.com/stainless-sdk-openapi-specs/prompt-foundry%2Fprompt-foundry-sdk-5d4722a755a01f8917b975ab7e6528e590f53d09891baac758abba1e28df15d1.yml
2+
openapi_spec_url: https://storage.googleapis.com/stainless-sdk-openapi-specs/prompt-foundry%2Fprompt-foundry-sdk-a552977d2b7ab16d16020c083420cadbca97d5766ea0df7b49b3ed275626b995.yml

README.md

Lines changed: 6 additions & 6 deletions
Original file line numberDiff line numberDiff line change
@@ -143,7 +143,7 @@ client = AsyncPromptFoundry(
143143

144144
async def main() -> None:
145145
model_parameters = await client.prompts.get_parameters(
146-
"1212121",
146+
id="1212121",
147147
variables={"hello": "world"},
148148
)
149149
print(model_parameters.parameters)
@@ -180,7 +180,7 @@ client = PromptFoundry()
180180

181181
try:
182182
client.prompts.get_parameters(
183-
"1212121",
183+
id="1212121",
184184
)
185185
except prompt_foundry_python_sdk.APIConnectionError as e:
186186
print("The server could not be reached")
@@ -225,7 +225,7 @@ client = PromptFoundry(
225225

226226
# Or, configure per-request:
227227
client.with_options(max_retries=5).prompts.get_parameters(
228-
"1212121",
228+
id="1212121",
229229
)
230230
```
231231

@@ -250,7 +250,7 @@ client = PromptFoundry(
250250

251251
# Override per-request:
252252
client.with_options(timeout=5.0).prompts.get_parameters(
253-
"1212121",
253+
id="1212121",
254254
)
255255
```
256256

@@ -291,7 +291,7 @@ from prompt_foundry_python_sdk import PromptFoundry
291291

292292
client = PromptFoundry()
293293
response = client.prompts.with_raw_response.get_parameters(
294-
"1212121",
294+
id="1212121",
295295
)
296296
print(response.headers.get('X-My-Header'))
297297

@@ -311,7 +311,7 @@ To stream the response body, use `.with_streaming_response` instead, which requi
311311

312312
```python
313313
with client.prompts.with_streaming_response.get_parameters(
314-
"1212121",
314+
id="1212121",
315315
) as response:
316316
print(response.headers.get("X-My-Header"))
317317

src/prompt_foundry_python_sdk/resources/evaluation_assertions.py

Lines changed: 1 addition & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -25,9 +25,7 @@
2525
async_to_raw_response_wrapper,
2626
async_to_streamed_response_wrapper,
2727
)
28-
from .._base_client import (
29-
make_request_options,
30-
)
28+
from .._base_client import make_request_options
3129
from ..types.evaluation_assertion import EvaluationAssertion
3230
from ..types.evaluation_assertion_list_response import EvaluationAssertionListResponse
3331
from ..types.evaluation_assertion_delete_response import EvaluationAssertionDeleteResponse

src/prompt_foundry_python_sdk/resources/evaluations.py

Lines changed: 1 addition & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -20,9 +20,7 @@
2020
async_to_raw_response_wrapper,
2121
async_to_streamed_response_wrapper,
2222
)
23-
from .._base_client import (
24-
make_request_options,
25-
)
23+
from .._base_client import make_request_options
2624
from ..types.evaluation import Evaluation
2725
from ..types.evaluation_list_response import EvaluationListResponse
2826
from ..types.evaluation_delete_response import EvaluationDeleteResponse

src/prompt_foundry_python_sdk/resources/prompts.py

Lines changed: 1 addition & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -20,9 +20,7 @@
2020
async_to_raw_response_wrapper,
2121
async_to_streamed_response_wrapper,
2222
)
23-
from .._base_client import (
24-
make_request_options,
25-
)
23+
from .._base_client import make_request_options
2624
from ..types.model_parameters import ModelParameters
2725
from ..types.prompt_configuration import PromptConfiguration
2826
from ..types.prompt_list_response import PromptListResponse

src/prompt_foundry_python_sdk/resources/tools.py

Lines changed: 1 addition & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -21,9 +21,7 @@
2121
async_to_streamed_response_wrapper,
2222
)
2323
from ..types.tool import Tool
24-
from .._base_client import (
25-
make_request_options,
26-
)
24+
from .._base_client import make_request_options
2725
from ..types.tool_list_response import ToolListResponse
2826
from ..types.tool_delete_response import ToolDeleteResponse
2927

tests/api_resources/test_evaluation_assertions.py

Lines changed: 64 additions & 64 deletions
Original file line numberDiff line numberDiff line change
@@ -24,21 +24,21 @@ class TestEvaluationAssertions:
2424
@parametrize
2525
def test_method_create(self, client: PromptFoundry) -> None:
2626
evaluation_assertion = client.evaluation_assertions.create(
27-
evaluation_id="string",
28-
json_path="string",
29-
target_value="string",
30-
tool_name="string",
27+
evaluation_id="evaluationId",
28+
json_path="jsonPath",
29+
target_value="targetValue",
30+
tool_name="toolName",
3131
type="CONTAINS",
3232
)
3333
assert_matches_type(EvaluationAssertion, evaluation_assertion, path=["response"])
3434

3535
@parametrize
3636
def test_raw_response_create(self, client: PromptFoundry) -> None:
3737
response = client.evaluation_assertions.with_raw_response.create(
38-
evaluation_id="string",
39-
json_path="string",
40-
target_value="string",
41-
tool_name="string",
38+
evaluation_id="evaluationId",
39+
json_path="jsonPath",
40+
target_value="targetValue",
41+
tool_name="toolName",
4242
type="CONTAINS",
4343
)
4444

@@ -50,10 +50,10 @@ def test_raw_response_create(self, client: PromptFoundry) -> None:
5050
@parametrize
5151
def test_streaming_response_create(self, client: PromptFoundry) -> None:
5252
with client.evaluation_assertions.with_streaming_response.create(
53-
evaluation_id="string",
54-
json_path="string",
55-
target_value="string",
56-
tool_name="string",
53+
evaluation_id="evaluationId",
54+
json_path="jsonPath",
55+
target_value="targetValue",
56+
tool_name="toolName",
5757
type="CONTAINS",
5858
) as response:
5959
assert not response.is_closed
@@ -67,23 +67,23 @@ def test_streaming_response_create(self, client: PromptFoundry) -> None:
6767
@parametrize
6868
def test_method_update(self, client: PromptFoundry) -> None:
6969
evaluation_assertion = client.evaluation_assertions.update(
70-
"1212121",
71-
evaluation_id="string",
72-
json_path="string",
73-
target_value="string",
74-
tool_name="string",
70+
id="1212121",
71+
evaluation_id="evaluationId",
72+
json_path="jsonPath",
73+
target_value="targetValue",
74+
tool_name="toolName",
7575
type="CONTAINS",
7676
)
7777
assert_matches_type(EvaluationAssertion, evaluation_assertion, path=["response"])
7878

7979
@parametrize
8080
def test_raw_response_update(self, client: PromptFoundry) -> None:
8181
response = client.evaluation_assertions.with_raw_response.update(
82-
"1212121",
83-
evaluation_id="string",
84-
json_path="string",
85-
target_value="string",
86-
tool_name="string",
82+
id="1212121",
83+
evaluation_id="evaluationId",
84+
json_path="jsonPath",
85+
target_value="targetValue",
86+
tool_name="toolName",
8787
type="CONTAINS",
8888
)
8989

@@ -95,11 +95,11 @@ def test_raw_response_update(self, client: PromptFoundry) -> None:
9595
@parametrize
9696
def test_streaming_response_update(self, client: PromptFoundry) -> None:
9797
with client.evaluation_assertions.with_streaming_response.update(
98-
"1212121",
99-
evaluation_id="string",
100-
json_path="string",
101-
target_value="string",
102-
tool_name="string",
98+
id="1212121",
99+
evaluation_id="evaluationId",
100+
json_path="jsonPath",
101+
target_value="targetValue",
102+
tool_name="toolName",
103103
type="CONTAINS",
104104
) as response:
105105
assert not response.is_closed
@@ -114,11 +114,11 @@ def test_streaming_response_update(self, client: PromptFoundry) -> None:
114114
def test_path_params_update(self, client: PromptFoundry) -> None:
115115
with pytest.raises(ValueError, match=r"Expected a non-empty value for `id` but received ''"):
116116
client.evaluation_assertions.with_raw_response.update(
117-
"",
118-
evaluation_id="string",
119-
json_path="string",
120-
target_value="string",
121-
tool_name="string",
117+
id="",
118+
evaluation_id="evaluationId",
119+
json_path="jsonPath",
120+
target_value="targetValue",
121+
tool_name="toolName",
122122
type="CONTAINS",
123123
)
124124

@@ -237,21 +237,21 @@ class TestAsyncEvaluationAssertions:
237237
@parametrize
238238
async def test_method_create(self, async_client: AsyncPromptFoundry) -> None:
239239
evaluation_assertion = await async_client.evaluation_assertions.create(
240-
evaluation_id="string",
241-
json_path="string",
242-
target_value="string",
243-
tool_name="string",
240+
evaluation_id="evaluationId",
241+
json_path="jsonPath",
242+
target_value="targetValue",
243+
tool_name="toolName",
244244
type="CONTAINS",
245245
)
246246
assert_matches_type(EvaluationAssertion, evaluation_assertion, path=["response"])
247247

248248
@parametrize
249249
async def test_raw_response_create(self, async_client: AsyncPromptFoundry) -> None:
250250
response = await async_client.evaluation_assertions.with_raw_response.create(
251-
evaluation_id="string",
252-
json_path="string",
253-
target_value="string",
254-
tool_name="string",
251+
evaluation_id="evaluationId",
252+
json_path="jsonPath",
253+
target_value="targetValue",
254+
tool_name="toolName",
255255
type="CONTAINS",
256256
)
257257

@@ -263,10 +263,10 @@ async def test_raw_response_create(self, async_client: AsyncPromptFoundry) -> No
263263
@parametrize
264264
async def test_streaming_response_create(self, async_client: AsyncPromptFoundry) -> None:
265265
async with async_client.evaluation_assertions.with_streaming_response.create(
266-
evaluation_id="string",
267-
json_path="string",
268-
target_value="string",
269-
tool_name="string",
266+
evaluation_id="evaluationId",
267+
json_path="jsonPath",
268+
target_value="targetValue",
269+
tool_name="toolName",
270270
type="CONTAINS",
271271
) as response:
272272
assert not response.is_closed
@@ -280,23 +280,23 @@ async def test_streaming_response_create(self, async_client: AsyncPromptFoundry)
280280
@parametrize
281281
async def test_method_update(self, async_client: AsyncPromptFoundry) -> None:
282282
evaluation_assertion = await async_client.evaluation_assertions.update(
283-
"1212121",
284-
evaluation_id="string",
285-
json_path="string",
286-
target_value="string",
287-
tool_name="string",
283+
id="1212121",
284+
evaluation_id="evaluationId",
285+
json_path="jsonPath",
286+
target_value="targetValue",
287+
tool_name="toolName",
288288
type="CONTAINS",
289289
)
290290
assert_matches_type(EvaluationAssertion, evaluation_assertion, path=["response"])
291291

292292
@parametrize
293293
async def test_raw_response_update(self, async_client: AsyncPromptFoundry) -> None:
294294
response = await async_client.evaluation_assertions.with_raw_response.update(
295-
"1212121",
296-
evaluation_id="string",
297-
json_path="string",
298-
target_value="string",
299-
tool_name="string",
295+
id="1212121",
296+
evaluation_id="evaluationId",
297+
json_path="jsonPath",
298+
target_value="targetValue",
299+
tool_name="toolName",
300300
type="CONTAINS",
301301
)
302302

@@ -308,11 +308,11 @@ async def test_raw_response_update(self, async_client: AsyncPromptFoundry) -> No
308308
@parametrize
309309
async def test_streaming_response_update(self, async_client: AsyncPromptFoundry) -> None:
310310
async with async_client.evaluation_assertions.with_streaming_response.update(
311-
"1212121",
312-
evaluation_id="string",
313-
json_path="string",
314-
target_value="string",
315-
tool_name="string",
311+
id="1212121",
312+
evaluation_id="evaluationId",
313+
json_path="jsonPath",
314+
target_value="targetValue",
315+
tool_name="toolName",
316316
type="CONTAINS",
317317
) as response:
318318
assert not response.is_closed
@@ -327,11 +327,11 @@ async def test_streaming_response_update(self, async_client: AsyncPromptFoundry)
327327
async def test_path_params_update(self, async_client: AsyncPromptFoundry) -> None:
328328
with pytest.raises(ValueError, match=r"Expected a non-empty value for `id` but received ''"):
329329
await async_client.evaluation_assertions.with_raw_response.update(
330-
"",
331-
evaluation_id="string",
332-
json_path="string",
333-
target_value="string",
334-
tool_name="string",
330+
id="",
331+
evaluation_id="evaluationId",
332+
json_path="jsonPath",
333+
target_value="targetValue",
334+
tool_name="toolName",
335335
type="CONTAINS",
336336
)
337337

0 commit comments

Comments
 (0)