Skip to content

Commit e1b6d19

Browse files
release: 0.4.0 (#58)
Co-authored-by: stainless-app[bot] <142633134+stainless-app[bot]@users.noreply.github.com>
1 parent dd0ea41 commit e1b6d19

File tree

13 files changed

+83
-17
lines changed

13 files changed

+83
-17
lines changed

.release-please-manifest.json

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -1,3 +1,3 @@
11
{
2-
".": "0.3.0"
2+
".": "0.4.0"
33
}

.stats.yml

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -1,2 +1,2 @@
11
configured_endpoints: 46
2-
openapi_spec_url: https://storage.googleapis.com/stainless-sdk-openapi-specs/contextual-ai%2Fsunrise-5298551c424bb999f258bdd6c311e96c80c70701ad59bbce19b46c788ee13bd4.yml
2+
openapi_spec_url: https://storage.googleapis.com/stainless-sdk-openapi-specs/contextual-ai%2Fsunrise-f43814080090927ee22816c5c7f517d8a7eb7f346329ada67915608e32124321.yml

CHANGELOG.md

Lines changed: 19 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -1,5 +1,24 @@
11
# Changelog
22

3+
## 0.4.0 (2025-03-03)
4+
5+
Full Changelog: [v0.3.0...v0.4.0](https://github.com/ContextualAI/contextual-client-python/compare/v0.3.0...v0.4.0)
6+
7+
### Features
8+
9+
* Add special snowflake path for internal dns usage ([#52](https://github.com/ContextualAI/contextual-client-python/issues/52)) ([dd0ea41](https://github.com/ContextualAI/contextual-client-python/commit/dd0ea4117c37eb53620304a30f736747f30f6ce6))
10+
* **api:** update via SDK Studio ([#59](https://github.com/ContextualAI/contextual-client-python/issues/59)) ([9b116a4](https://github.com/ContextualAI/contextual-client-python/commit/9b116a4e1d935a32ab8a44a36042891edf4d2125))
11+
12+
13+
### Chores
14+
15+
* **docs:** update client docstring ([#55](https://github.com/ContextualAI/contextual-client-python/issues/55)) ([ef1ee6e](https://github.com/ContextualAI/contextual-client-python/commit/ef1ee6e351e2c1a84af871f70742045df23fbe7f))
16+
17+
18+
### Documentation
19+
20+
* update URLs from stainlessapi.com to stainless.com ([#53](https://github.com/ContextualAI/contextual-client-python/issues/53)) ([4162888](https://github.com/ContextualAI/contextual-client-python/commit/41628880bfb7d72cb3759ea06f1c09c11bb60e1a))
21+
322
## 0.3.0 (2025-02-26)
423

524
Full Changelog: [v0.2.0...v0.3.0](https://github.com/ContextualAI/contextual-client-python/compare/v0.2.0...v0.3.0)

SECURITY.md

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -2,9 +2,9 @@
22

33
## Reporting Security Issues
44

5-
This SDK is generated by [Stainless Software Inc](http://stainlessapi.com). Stainless takes security seriously, and encourages you to report any security vulnerability promptly so that appropriate action can be taken.
5+
This SDK is generated by [Stainless Software Inc](http://stainless.com). Stainless takes security seriously, and encourages you to report any security vulnerability promptly so that appropriate action can be taken.
66

7-
To report a security issue, please contact the Stainless team at security@stainlessapi.com.
7+
To report a security issue, please contact the Stainless team at security@stainless.com.
88

99
## Responsible Disclosure
1010

pyproject.toml

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -1,6 +1,6 @@
11
[project]
22
name = "contextual-client"
3-
version = "0.3.0"
3+
version = "0.4.0"
44
description = "The official Python library for the Contextual AI API"
55
dynamic = ["readme"]
66
license = "Apache-2.0"

src/contextual/_client.py

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -85,7 +85,7 @@ def __init__(
8585
# part of our public interface in the future.
8686
_strict_response_validation: bool = False,
8787
) -> None:
88-
"""Construct a new synchronous Contextual AI client instance.
88+
"""Construct a new synchronous ContextualAI client instance.
8989
9090
This automatically infers the `api_key` argument from the `CONTEXTUAL_API_KEY` environment variable if it is not provided.
9191
"""
@@ -276,7 +276,7 @@ def __init__(
276276
# part of our public interface in the future.
277277
_strict_response_validation: bool = False,
278278
) -> None:
279-
"""Construct a new async Contextual AI client instance.
279+
"""Construct a new async AsyncContextualAI client instance.
280280
281281
This automatically infers the `api_key` argument from the `CONTEXTUAL_API_KEY` environment variable if it is not provided.
282282
"""

src/contextual/_version.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -1,4 +1,4 @@
11
# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
22

33
__title__ = "contextual"
4-
__version__ = "0.3.0" # x-release-please-version
4+
__version__ = "0.4.0" # x-release-please-version

src/contextual/resources/generate.py

Lines changed: 30 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -53,7 +53,10 @@ def create(
5353
messages: Iterable[generate_create_params.Message],
5454
model: str,
5555
avoid_commentary: bool | NotGiven = NOT_GIVEN,
56+
max_new_tokens: int | NotGiven = NOT_GIVEN,
5657
system_prompt: str | NotGiven = NOT_GIVEN,
58+
temperature: float | NotGiven = NOT_GIVEN,
59+
top_p: float | NotGiven = NOT_GIVEN,
5760
# Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
5861
# The extra values given here take precedence over values defined on the client or passed to this method.
5962
extra_headers: Headers | None = None,
@@ -83,9 +86,18 @@ def create(
8386
context. However, commentary may provide useful context which improves the
8487
helpfulness of responses.
8588
89+
max_new_tokens: The maximum number of tokens that the model can generate in the response.
90+
8691
system_prompt: Instructions that the model follows when generating responses. Note that we do
8792
not guarantee that the model follows these instructions exactly.
8893
94+
temperature: The sampling temperature, which affects the randomness in the response. Note
95+
that higher temperature values can reduce groundedness
96+
97+
top_p: A parameter for nucleus sampling, an alternative to temperature which also
98+
affects the randomness of the response. Note that higher top_p values can reduce
99+
groundedness
100+
89101
extra_headers: Send extra headers
90102
91103
extra_query: Add additional query parameters to the request
@@ -102,7 +114,10 @@ def create(
102114
"messages": messages,
103115
"model": model,
104116
"avoid_commentary": avoid_commentary,
117+
"max_new_tokens": max_new_tokens,
105118
"system_prompt": system_prompt,
119+
"temperature": temperature,
120+
"top_p": top_p,
106121
},
107122
generate_create_params.GenerateCreateParams,
108123
),
@@ -140,7 +155,10 @@ async def create(
140155
messages: Iterable[generate_create_params.Message],
141156
model: str,
142157
avoid_commentary: bool | NotGiven = NOT_GIVEN,
158+
max_new_tokens: int | NotGiven = NOT_GIVEN,
143159
system_prompt: str | NotGiven = NOT_GIVEN,
160+
temperature: float | NotGiven = NOT_GIVEN,
161+
top_p: float | NotGiven = NOT_GIVEN,
144162
# Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
145163
# The extra values given here take precedence over values defined on the client or passed to this method.
146164
extra_headers: Headers | None = None,
@@ -170,9 +188,18 @@ async def create(
170188
context. However, commentary may provide useful context which improves the
171189
helpfulness of responses.
172190
191+
max_new_tokens: The maximum number of tokens that the model can generate in the response.
192+
173193
system_prompt: Instructions that the model follows when generating responses. Note that we do
174194
not guarantee that the model follows these instructions exactly.
175195
196+
temperature: The sampling temperature, which affects the randomness in the response. Note
197+
that higher temperature values can reduce groundedness
198+
199+
top_p: A parameter for nucleus sampling, an alternative to temperature which also
200+
affects the randomness of the response. Note that higher top_p values can reduce
201+
groundedness
202+
176203
extra_headers: Send extra headers
177204
178205
extra_query: Add additional query parameters to the request
@@ -189,7 +216,10 @@ async def create(
189216
"messages": messages,
190217
"model": model,
191218
"avoid_commentary": avoid_commentary,
219+
"max_new_tokens": max_new_tokens,
192220
"system_prompt": system_prompt,
221+
"temperature": temperature,
222+
"top_p": top_p,
193223
},
194224
generate_create_params.GenerateCreateParams,
195225
),

src/contextual/types/agents/evaluate/list_evaluation_jobs_response.py

Lines changed: 3 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -31,6 +31,9 @@ class EvaluationRound(BaseModel):
3131
num_predictions: Optional[int] = None
3232
"""Total number of predictions made during the evaluation round"""
3333

34+
num_processed_predictions: Optional[int] = None
35+
"""Number of predictions that have been processed during the evaluation round"""
36+
3437
num_successful_predictions: Optional[int] = None
3538
"""Number of predictions that were successful during the evaluation round"""
3639

src/contextual/types/generate_create_params.py

Lines changed: 17 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -30,16 +30,32 @@ class GenerateCreateParams(TypedDict, total=False):
3030
helpfulness of responses.
3131
"""
3232

33+
max_new_tokens: int
34+
"""The maximum number of tokens that the model can generate in the response."""
35+
3336
system_prompt: str
3437
"""Instructions that the model follows when generating responses.
3538
3639
Note that we do not guarantee that the model follows these instructions exactly.
3740
"""
3841

42+
temperature: float
43+
"""The sampling temperature, which affects the randomness in the response.
44+
45+
Note that higher temperature values can reduce groundedness
46+
"""
47+
48+
top_p: float
49+
"""
50+
A parameter for nucleus sampling, an alternative to temperature which also
51+
affects the randomness of the response. Note that higher top_p values can reduce
52+
groundedness
53+
"""
54+
3955

4056
class Message(TypedDict, total=False):
4157
content: Required[str]
4258
"""Content of the message"""
4359

44-
role: Required[Literal["user", "system", "assistant", "knowledge"]]
60+
role: Required[Literal["user", "assistant"]]
4561
"""Role of the sender"""

0 commit comments

Comments
 (0)