From 2b015bcc3594f8ccf7ff1aaf5502fba70987d7ec Mon Sep 17 00:00:00 2001 From: Carson Date: Thu, 29 May 2025 17:14:32 -0500 Subject: [PATCH 1/3] Rename .stream()/.stream_async()'s content parameter to stream --- CHANGELOG.md | 4 ++++ chatlas/_chat.py | 45 +++++++++++++++++++++++---------------------- tests/conftest.py | 2 +- 3 files changed, 28 insertions(+), 23 deletions(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index 473e54fa..b85b64cb 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -20,6 +20,10 @@ and this project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0 * `ChatSnowflake()` now supports tool calling. (#98) * `Chat` instances can now be deep copied, which is useful for forking the chat session. (#96) +### Breaking changes + +* The `.stream()`/`.stream_async()` method's `content` parameter was renamed to `stream`. Set `stream` to `"content"` to gain access to tool request/result content objects. () + ### Changes * `ChatDatabricks()`'s `model` now defaults to `databricks-claude-3-7-sonnet` instead of `databricks-dbrx-instruct`. (#95) diff --git a/chatlas/_chat.py b/chatlas/_chat.py index a16c6ada..20a24a3e 100644 --- a/chatlas/_chat.py +++ b/chatlas/_chat.py @@ -458,7 +458,7 @@ async def _(user_input: str): user_input, kwargs=kwargs, echo=echo or "none", - content=content, + stream="content" if content == "all" else "text", ) ) else: @@ -569,7 +569,7 @@ def chat( self._chat_impl( turn, echo=echo, - content="text", + yield_content=False, stream=stream, kwargs=kwargs, ) @@ -619,7 +619,7 @@ async def chat_async( self._chat_impl_async( turn, echo=echo, - content="text", + yield_content=False, stream=stream, kwargs=kwargs, ), @@ -635,7 +635,7 @@ async def chat_async( def stream( self, *args: Content | str, - content: Literal["text"], + stream: Literal["text"], echo: EchoOptions = "none", kwargs: Optional[SubmitInputArgsT] = None, ) -> Generator[str, None, None]: ... @@ -644,7 +644,7 @@ def stream( def stream( self, *args: Content | str, - content: Literal["all"], + stream: Literal["content"], echo: EchoOptions = "none", kwargs: Optional[SubmitInputArgsT] = None, ) -> Generator[str | ContentToolRequest | ContentToolResult, None, None]: ... @@ -653,7 +653,7 @@ def stream( self, *args: Content | str, echo: EchoOptions = "none", - content: Literal["text", "all"] = "text", + stream: Literal["text", "content"] = "text", kwargs: Optional[SubmitInputArgsT] = None, ) -> Generator[str | ContentToolRequest | ContentToolResult, None, None]: """ @@ -667,7 +667,8 @@ def stream( Whether to echo text content, all content (i.e., tool calls), or no content. content - Whether to yield just text content, or all content (i.e., tool calls). + Whether to yield just text content, or rich content objects (e.g., tool + calls) when relevant. kwargs Additional keyword arguments to pass to the method used for requesting the response. @@ -686,7 +687,7 @@ def stream( turn, stream=True, echo=echo, - content=content, + yield_content=stream == "content", kwargs=kwargs, ) @@ -703,7 +704,7 @@ def wrapper() -> Generator[ async def stream_async( self, *args: Content | str, - content: Literal["text"], + stream: Literal["text"], echo: EchoOptions = "none", kwargs: Optional[SubmitInputArgsT] = None, ) -> AsyncGenerator[str, None]: ... @@ -712,7 +713,7 @@ async def stream_async( async def stream_async( self, *args: Content | str, - content: Literal["all"], + stream: Literal["content"], echo: EchoOptions = "none", kwargs: Optional[SubmitInputArgsT] = None, ) -> AsyncGenerator[str | ContentToolRequest | ContentToolResult, None]: ... @@ -721,7 +722,7 @@ async def stream_async( self, *args: Content | str, echo: EchoOptions = "none", - content: Literal["text", "all"] = "text", + stream: Literal["text", "content"] = "text", kwargs: Optional[SubmitInputArgsT] = None, ) -> AsyncGenerator[str | ContentToolRequest | ContentToolResult, None]: """ @@ -758,7 +759,7 @@ async def wrapper() -> AsyncGenerator[ turn, stream=True, echo=echo, - content=content, + yield_content=stream == "content", kwargs=kwargs, ): yield chunk @@ -1192,7 +1193,7 @@ def _chat_impl( self, user_turn: Turn, echo: EchoOptions, - content: Literal["text"], + yield_content: Literal[False], stream: bool, kwargs: Optional[SubmitInputArgsT] = None, ) -> Generator[str, None, None]: ... @@ -1202,7 +1203,7 @@ def _chat_impl( self, user_turn: Turn, echo: EchoOptions, - content: Literal["all"], + yield_content: Literal[True], stream: bool, kwargs: Optional[SubmitInputArgsT] = None, ) -> Generator[str | ContentToolRequest | ContentToolResult, None, None]: ... @@ -1211,7 +1212,7 @@ def _chat_impl( self, user_turn: Turn, echo: EchoOptions, - content: Literal["text", "all"], + yield_content: bool, stream: bool, kwargs: Optional[SubmitInputArgsT] = None, ) -> Generator[str | ContentToolRequest | ContentToolResult, None, None]: @@ -1234,12 +1235,12 @@ def _chat_impl( if isinstance(x, ContentToolRequest): if echo == "output": self._echo_content(f"\n\n{x}\n\n") - if content == "all": + if yield_content: yield x res = self._invoke_tool(x) if echo == "output": self._echo_content(f"\n\n{res}\n\n") - if content == "all": + if yield_content: yield res results.append(res) @@ -1251,7 +1252,7 @@ def _chat_impl_async( self, user_turn: Turn, echo: EchoOptions, - content: Literal["text"], + yield_content: Literal[False], stream: bool, kwargs: Optional[SubmitInputArgsT] = None, ) -> AsyncGenerator[str, None]: ... @@ -1261,7 +1262,7 @@ def _chat_impl_async( self, user_turn: Turn, echo: EchoOptions, - content: Literal["all"], + yield_content: Literal[True], stream: bool, kwargs: Optional[SubmitInputArgsT] = None, ) -> AsyncGenerator[str | ContentToolRequest | ContentToolResult, None]: ... @@ -1270,7 +1271,7 @@ async def _chat_impl_async( self, user_turn: Turn, echo: EchoOptions, - content: Literal["text", "all"], + yield_content: bool, stream: bool, kwargs: Optional[SubmitInputArgsT] = None, ) -> AsyncGenerator[str | ContentToolRequest | ContentToolResult, None]: @@ -1293,12 +1294,12 @@ async def _chat_impl_async( if isinstance(x, ContentToolRequest): if echo == "output": self._echo_content(f"\n\n{x}\n\n") - if content == "all": + if yield_content: yield x res = await self._invoke_tool_async(x) if echo == "output": self._echo_content(f"\n\n{res}\n\n") - if content == "all": + if yield_content: yield res else: yield "\n\n" diff --git a/tests/conftest.py b/tests/conftest.py index c6f3286d..87f66758 100644 --- a/tests/conftest.py +++ b/tests/conftest.py @@ -93,7 +93,7 @@ def get_date(): chat.register_tool(get_date) - response = chat.stream("What's the current date in Y-M-D format?", content="all") + response = chat.stream("What's the current date in Y-M-D format?", stream="content",) chunks = [chunk for chunk in response] request = [x for x in chunks if isinstance(x, ContentToolRequest)] assert len(request) == 1 From f061634a02829f6be5c05eaa51f40388246e39f5 Mon Sep 17 00:00:00 2001 From: Carson Date: Thu, 29 May 2025 17:42:29 -0500 Subject: [PATCH 2/3] Clarify echo paramater docs --- chatlas/_chat.py | 67 +++++++++++++++++++++++++++++++++-------------- tests/conftest.py | 2 +- 2 files changed, 48 insertions(+), 21 deletions(-) diff --git a/chatlas/_chat.py b/chatlas/_chat.py index 20a24a3e..3e9c8fda 100644 --- a/chatlas/_chat.py +++ b/chatlas/_chat.py @@ -418,9 +418,12 @@ def app( Whether to run the app in a background thread. If `None`, the app will run in a background thread if the current environment is a notebook. echo - Whether to echo text content, all content (i.e., tool calls), or no - content. Defaults to `"none"` when `stream=True` and `"text"` when - `stream=False`. + One of the following (defaults to `"none"` when `stream=True` and `"text"` when + `stream=False`): + - `"text"`: Echo just the text content of the response. + - `"output"`: Echo text and tool call content. + - `"all"`: Echo both the assistant and user turn. + - `"none"`: Do not echo any content. content Whether to display text content or all content (i.e., tool calls). kwargs @@ -508,8 +511,11 @@ def console( Parameters ---------- echo - Whether to echo text content, all content (i.e., tool calls), or no - content. + One of the following (default is "output"): + - `"text"`: Echo just the text content of the response. + - `"output"`: Echo text and tool call content. + - `"all"`: Echo both the assistant and user turn. + - `"none"`: Do not echo any content. stream Whether to stream the response (i.e., have the response appear in chunks). kwargs @@ -546,8 +552,11 @@ def chat( args The user input(s) to generate a response from. echo - Whether to echo text content, all content (i.e., tool calls), or no - content. + One of the following (default is "output"): + - `"text"`: Echo just the text content of the response. + - `"output"`: Echo text and tool call content. + - `"all"`: Echo both the assistant and user turn. + - `"none"`: Do not echo any content. stream Whether to stream the response (i.e., have the response appear in chunks). @@ -596,8 +605,11 @@ async def chat_async( args The user input(s) to generate a response from. echo - Whether to echo text content, all content (i.e., tool calls, images, - etc), or no content. + One of the following (default is "output"): + - `"text"`: Echo just the text content of the response. + - `"output"`: Echo text and tool call content. + - `"all"`: Echo both the assistant and user turn. + - `"none"`: Do not echo any content. stream Whether to stream the response (i.e., have the response appear in chunks). @@ -664,11 +676,14 @@ def stream( args The user input(s) to generate a response from. echo - Whether to echo text content, all content (i.e., tool calls), or no - content. - content - Whether to yield just text content, or rich content objects (e.g., tool - calls) when relevant. + One of the following (default is "none"): + - `"text"`: Echo just the text content of the response. + - `"output"`: Echo text and tool call content. + - `"all"`: Echo both the assistant and user turn. + - `"none"`: Do not echo any content. + stream + Whether to yield just text content or include rich content objects + (e.g., tool calls) when relevant. kwargs Additional keyword arguments to pass to the method used for requesting the response. @@ -733,10 +748,14 @@ async def stream_async( args The user input(s) to generate a response from. echo - Whether to echo text content, all content (i.e., tool calls), or no - content. - content - Whether to yield just text content, or all content (i.e., tool calls). + One of the following (default is "none"): + - `"text"`: Echo just the text content of the response. + - `"output"`: Echo text and tool call content. + - `"all"`: Echo both the assistant and user turn. + - `"none"`: Do not echo any content. + stream + Whether to yield just text content or include rich content objects + (e.g., tool calls) when relevant. kwargs Additional keyword arguments to pass to the method used for requesting the response. @@ -783,7 +802,11 @@ def extract_data( data_model A Pydantic model describing the structure of the data to extract. echo - Whether to echo text content, all content (i.e., tool calls), or no content. + One of the following (default is "none"): + - `"text"`: Echo just the text content of the response. + - `"output"`: Echo text and tool call content. + - `"all"`: Echo both the assistant and user turn. + - `"none"`: Do not echo any content. stream Whether to stream the response (i.e., have the response appear in chunks). @@ -841,7 +864,11 @@ async def extract_data_async( data_model A Pydantic model describing the structure of the data to extract. echo - Whether to echo text content, all content (i.e., tool calls), or no content + One of the following (default is "none"): + - `"text"`: Echo just the text content of the response. + - `"output"`: Echo text and tool call content. + - `"all"`: Echo both the assistant and user turn. + - `"none"`: Do not echo any content. stream Whether to stream the response (i.e., have the response appear in chunks). Defaults to `True` if `echo` is not "none". diff --git a/tests/conftest.py b/tests/conftest.py index 87f66758..e4e4585e 100644 --- a/tests/conftest.py +++ b/tests/conftest.py @@ -93,7 +93,7 @@ def get_date(): chat.register_tool(get_date) - response = chat.stream("What's the current date in Y-M-D format?", stream="content",) + response = chat.stream("What's the current date in Y-M-D format?", stream="content") chunks = [chunk for chunk in response] request = [x for x in chunks if isinstance(x, ContentToolRequest)] assert len(request) == 1 From 5996f680ef88336c1a342f38ef6fada6ad67a5e6 Mon Sep 17 00:00:00 2001 From: Carson Sievert Date: Thu, 29 May 2025 17:53:05 -0500 Subject: [PATCH 3/3] Update CHANGELOG.md --- CHANGELOG.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index b85b64cb..fe380627 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -22,7 +22,7 @@ and this project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0 ### Breaking changes -* The `.stream()`/`.stream_async()` method's `content` parameter was renamed to `stream`. Set `stream` to `"content"` to gain access to tool request/result content objects. () +* The `.stream()`/`.stream_async()` method's `content` parameter was renamed to `stream`. Set `stream` to `"content"` to gain access to tool request/result content objects. (#102) ### Changes