From 457f985d6c1dd11526cf02245f43802a98cb44dd Mon Sep 17 00:00:00 2001 From: Nimar Date: Tue, 24 Jun 2025 15:37:24 +0200 Subject: [PATCH 01/28] init commit placeholder messages --- langfuse/api/__init__.py | 8 ++++ langfuse/api/reference.md | 6 +-- langfuse/api/resources/__init__.py | 8 ++++ langfuse/api/resources/prompts/__init__.py | 8 ++++ langfuse/api/resources/prompts/client.py | 12 ++--- .../api/resources/prompts/types/__init__.py | 12 ++++- .../resources/prompts/types/chat_message.py | 44 ++++++++++++++++++- .../prompts/types/placeholder_message.py | 42 ++++++++++++++++++ .../prompts/types/prompt_chat_message.py | 43 ++++++++++++++++++ 9 files changed, 172 insertions(+), 11 deletions(-) create mode 100644 langfuse/api/resources/prompts/types/placeholder_message.py create mode 100644 langfuse/api/resources/prompts/types/prompt_chat_message.py diff --git a/langfuse/api/__init__.py b/langfuse/api/__init__.py index bf6bdb508..374664ce4 100644 --- a/langfuse/api/__init__.py +++ b/langfuse/api/__init__.py @@ -21,6 +21,8 @@ CategoricalScore, CategoricalScoreV1, ChatMessage, + ChatMessage_Placeholder, + ChatMessage_Promptchatmessage, ChatPrompt, Comment, CommentObjectType, @@ -126,10 +128,12 @@ PaginatedModels, PaginatedSessions, PatchMediaBody, + PlaceholderMessage, Project, ProjectDeletionResponse, Projects, Prompt, + PromptChatMessage, PromptMeta, PromptMetaListResponse, Prompt_Chat, @@ -230,6 +234,8 @@ "CategoricalScore", "CategoricalScoreV1", "ChatMessage", + "ChatMessage_Placeholder", + "ChatMessage_Promptchatmessage", "ChatPrompt", "Comment", "CommentObjectType", @@ -335,10 +341,12 @@ "PaginatedModels", "PaginatedSessions", "PatchMediaBody", + "PlaceholderMessage", "Project", "ProjectDeletionResponse", "Projects", "Prompt", + "PromptChatMessage", "PromptMeta", "PromptMetaListResponse", "Prompt_Chat", diff --git a/langfuse/api/reference.md b/langfuse/api/reference.md index e5595df8c..223361d41 100644 --- a/langfuse/api/reference.md +++ b/langfuse/api/reference.md @@ -4290,7 +4290,7 @@ Create a new version for the prompt with the given `name`
```python -from langfuse import ChatMessage, CreatePromptRequest_Chat +from langfuse import ChatMessage_Promptchatmessage, CreatePromptRequest_Chat from langfuse.client import FernLangfuse client = FernLangfuse( @@ -4305,11 +4305,11 @@ client.prompts.create( request=CreatePromptRequest_Chat( name="name", prompt=[ - ChatMessage( + ChatMessage_Promptchatmessage( role="role", content="content", ), - ChatMessage( + ChatMessage_Promptchatmessage( role="role", content="content", ), diff --git a/langfuse/api/resources/__init__.py b/langfuse/api/resources/__init__.py index 6ddea00eb..2ea0974b1 100644 --- a/langfuse/api/resources/__init__.py +++ b/langfuse/api/resources/__init__.py @@ -173,13 +173,17 @@ from .prompts import ( BasePrompt, ChatMessage, + ChatMessage_Placeholder, + ChatMessage_Promptchatmessage, ChatPrompt, CreateChatPromptRequest, CreatePromptRequest, CreatePromptRequest_Chat, CreatePromptRequest_Text, CreateTextPromptRequest, + PlaceholderMessage, Prompt, + PromptChatMessage, PromptMeta, PromptMetaListResponse, Prompt_Chat, @@ -242,6 +246,8 @@ "CategoricalScore", "CategoricalScoreV1", "ChatMessage", + "ChatMessage_Placeholder", + "ChatMessage_Promptchatmessage", "ChatPrompt", "Comment", "CommentObjectType", @@ -347,10 +353,12 @@ "PaginatedModels", "PaginatedSessions", "PatchMediaBody", + "PlaceholderMessage", "Project", "ProjectDeletionResponse", "Projects", "Prompt", + "PromptChatMessage", "PromptMeta", "PromptMetaListResponse", "Prompt_Chat", diff --git a/langfuse/api/resources/prompts/__init__.py b/langfuse/api/resources/prompts/__init__.py index 73dcf38b0..308be46cd 100644 --- a/langfuse/api/resources/prompts/__init__.py +++ b/langfuse/api/resources/prompts/__init__.py @@ -3,13 +3,17 @@ from .types import ( BasePrompt, ChatMessage, + ChatMessage_Placeholder, + ChatMessage_Promptchatmessage, ChatPrompt, CreateChatPromptRequest, CreatePromptRequest, CreatePromptRequest_Chat, CreatePromptRequest_Text, CreateTextPromptRequest, + PlaceholderMessage, Prompt, + PromptChatMessage, PromptMeta, PromptMetaListResponse, Prompt_Chat, @@ -20,13 +24,17 @@ __all__ = [ "BasePrompt", "ChatMessage", + "ChatMessage_Placeholder", + "ChatMessage_Promptchatmessage", "ChatPrompt", "CreateChatPromptRequest", "CreatePromptRequest", "CreatePromptRequest_Chat", "CreatePromptRequest_Text", "CreateTextPromptRequest", + "PlaceholderMessage", "Prompt", + "PromptChatMessage", "PromptMeta", "PromptMetaListResponse", "Prompt_Chat", diff --git a/langfuse/api/resources/prompts/client.py b/langfuse/api/resources/prompts/client.py index b41ab5642..123b79fb6 100644 --- a/langfuse/api/resources/prompts/client.py +++ b/langfuse/api/resources/prompts/client.py @@ -228,7 +228,7 @@ def create( Examples -------- - from langfuse import ChatMessage, CreatePromptRequest_Chat + from langfuse import ChatMessage_Promptchatmessage, CreatePromptRequest_Chat from langfuse.client import FernLangfuse client = FernLangfuse( @@ -243,11 +243,11 @@ def create( request=CreatePromptRequest_Chat( name="name", prompt=[ - ChatMessage( + ChatMessage_Promptchatmessage( role="role", content="content", ), - ChatMessage( + ChatMessage_Promptchatmessage( role="role", content="content", ), @@ -512,7 +512,7 @@ async def create( -------- import asyncio - from langfuse import ChatMessage, CreatePromptRequest_Chat + from langfuse import ChatMessage_Promptchatmessage, CreatePromptRequest_Chat from langfuse.client import AsyncFernLangfuse client = AsyncFernLangfuse( @@ -530,11 +530,11 @@ async def main() -> None: request=CreatePromptRequest_Chat( name="name", prompt=[ - ChatMessage( + ChatMessage_Promptchatmessage( role="role", content="content", ), - ChatMessage( + ChatMessage_Promptchatmessage( role="role", content="content", ), diff --git a/langfuse/api/resources/prompts/types/__init__.py b/langfuse/api/resources/prompts/types/__init__.py index cb5ba920c..13e994fa5 100644 --- a/langfuse/api/resources/prompts/types/__init__.py +++ b/langfuse/api/resources/prompts/types/__init__.py @@ -1,7 +1,11 @@ # This file was auto-generated by Fern from our API Definition. from .base_prompt import BasePrompt -from .chat_message import ChatMessage +from .chat_message import ( + ChatMessage, + ChatMessage_Placeholder, + ChatMessage_Promptchatmessage, +) from .chat_prompt import ChatPrompt from .create_chat_prompt_request import CreateChatPromptRequest from .create_prompt_request import ( @@ -10,7 +14,9 @@ CreatePromptRequest_Text, ) from .create_text_prompt_request import CreateTextPromptRequest +from .placeholder_message import PlaceholderMessage from .prompt import Prompt, Prompt_Chat, Prompt_Text +from .prompt_chat_message import PromptChatMessage from .prompt_meta import PromptMeta from .prompt_meta_list_response import PromptMetaListResponse from .text_prompt import TextPrompt @@ -18,13 +24,17 @@ __all__ = [ "BasePrompt", "ChatMessage", + "ChatMessage_Placeholder", + "ChatMessage_Promptchatmessage", "ChatPrompt", "CreateChatPromptRequest", "CreatePromptRequest", "CreatePromptRequest_Chat", "CreatePromptRequest_Text", "CreateTextPromptRequest", + "PlaceholderMessage", "Prompt", + "PromptChatMessage", "PromptMeta", "PromptMetaListResponse", "Prompt_Chat", diff --git a/langfuse/api/resources/prompts/types/chat_message.py b/langfuse/api/resources/prompts/types/chat_message.py index d009bc8cf..ff207dcd6 100644 --- a/langfuse/api/resources/prompts/types/chat_message.py +++ b/langfuse/api/resources/prompts/types/chat_message.py @@ -1,5 +1,7 @@ # This file was auto-generated by Fern from our API Definition. +from __future__ import annotations + import datetime as dt import typing @@ -7,9 +9,46 @@ from ....core.pydantic_utilities import deep_union_pydantic_dicts, pydantic_v1 -class ChatMessage(pydantic_v1.BaseModel): +class ChatMessage_Promptchatmessage(pydantic_v1.BaseModel): role: str content: str + type: typing.Literal["promptchatmessage"] = "promptchatmessage" + + def json(self, **kwargs: typing.Any) -> str: + kwargs_with_defaults: typing.Any = { + "by_alias": True, + "exclude_unset": True, + **kwargs, + } + return super().json(**kwargs_with_defaults) + + def dict(self, **kwargs: typing.Any) -> typing.Dict[str, typing.Any]: + kwargs_with_defaults_exclude_unset: typing.Any = { + "by_alias": True, + "exclude_unset": True, + **kwargs, + } + kwargs_with_defaults_exclude_none: typing.Any = { + "by_alias": True, + "exclude_none": True, + **kwargs, + } + + return deep_union_pydantic_dicts( + super().dict(**kwargs_with_defaults_exclude_unset), + super().dict(**kwargs_with_defaults_exclude_none), + ) + + class Config: + frozen = True + smart_union = True + extra = pydantic_v1.Extra.allow + json_encoders = {dt.datetime: serialize_datetime} + + +class ChatMessage_Placeholder(pydantic_v1.BaseModel): + name: str + type: typing.Literal["placeholder"] = "placeholder" def json(self, **kwargs: typing.Any) -> str: kwargs_with_defaults: typing.Any = { @@ -41,3 +80,6 @@ class Config: smart_union = True extra = pydantic_v1.Extra.allow json_encoders = {dt.datetime: serialize_datetime} + + +ChatMessage = typing.Union[ChatMessage_Promptchatmessage, ChatMessage_Placeholder] diff --git a/langfuse/api/resources/prompts/types/placeholder_message.py b/langfuse/api/resources/prompts/types/placeholder_message.py new file mode 100644 index 000000000..a3352b391 --- /dev/null +++ b/langfuse/api/resources/prompts/types/placeholder_message.py @@ -0,0 +1,42 @@ +# This file was auto-generated by Fern from our API Definition. + +import datetime as dt +import typing + +from ....core.datetime_utils import serialize_datetime +from ....core.pydantic_utilities import deep_union_pydantic_dicts, pydantic_v1 + + +class PlaceholderMessage(pydantic_v1.BaseModel): + name: str + + def json(self, **kwargs: typing.Any) -> str: + kwargs_with_defaults: typing.Any = { + "by_alias": True, + "exclude_unset": True, + **kwargs, + } + return super().json(**kwargs_with_defaults) + + def dict(self, **kwargs: typing.Any) -> typing.Dict[str, typing.Any]: + kwargs_with_defaults_exclude_unset: typing.Any = { + "by_alias": True, + "exclude_unset": True, + **kwargs, + } + kwargs_with_defaults_exclude_none: typing.Any = { + "by_alias": True, + "exclude_none": True, + **kwargs, + } + + return deep_union_pydantic_dicts( + super().dict(**kwargs_with_defaults_exclude_unset), + super().dict(**kwargs_with_defaults_exclude_none), + ) + + class Config: + frozen = True + smart_union = True + extra = pydantic_v1.Extra.allow + json_encoders = {dt.datetime: serialize_datetime} diff --git a/langfuse/api/resources/prompts/types/prompt_chat_message.py b/langfuse/api/resources/prompts/types/prompt_chat_message.py new file mode 100644 index 000000000..048784bc4 --- /dev/null +++ b/langfuse/api/resources/prompts/types/prompt_chat_message.py @@ -0,0 +1,43 @@ +# This file was auto-generated by Fern from our API Definition. + +import datetime as dt +import typing + +from ....core.datetime_utils import serialize_datetime +from ....core.pydantic_utilities import deep_union_pydantic_dicts, pydantic_v1 + + +class PromptChatMessage(pydantic_v1.BaseModel): + role: str + content: str + + def json(self, **kwargs: typing.Any) -> str: + kwargs_with_defaults: typing.Any = { + "by_alias": True, + "exclude_unset": True, + **kwargs, + } + return super().json(**kwargs_with_defaults) + + def dict(self, **kwargs: typing.Any) -> typing.Dict[str, typing.Any]: + kwargs_with_defaults_exclude_unset: typing.Any = { + "by_alias": True, + "exclude_unset": True, + **kwargs, + } + kwargs_with_defaults_exclude_none: typing.Any = { + "by_alias": True, + "exclude_none": True, + **kwargs, + } + + return deep_union_pydantic_dicts( + super().dict(**kwargs_with_defaults_exclude_unset), + super().dict(**kwargs_with_defaults_exclude_none), + ) + + class Config: + frozen = True + smart_union = True + extra = pydantic_v1.Extra.allow + json_encoders = {dt.datetime: serialize_datetime} From 0288197c1d021b78a89819694f848a9851ae6329 Mon Sep 17 00:00:00 2001 From: Nimar Date: Wed, 25 Jun 2025 14:21:56 +0200 Subject: [PATCH 02/28] make the types beautiful again --- langfuse/api/__init__.py | 12 ++--- langfuse/api/reference.md | 6 +-- langfuse/api/resources/__init__.py | 12 ++--- langfuse/api/resources/prompts/__init__.py | 12 ++--- langfuse/api/resources/prompts/client.py | 12 ++--- .../api/resources/prompts/types/__init__.py | 18 ++++---- .../resources/prompts/types/chat_message.py | 44 +------------------ .../prompts/types/prompt_chat_message.py | 43 ------------------ 8 files changed, 37 insertions(+), 122 deletions(-) delete mode 100644 langfuse/api/resources/prompts/types/prompt_chat_message.py diff --git a/langfuse/api/__init__.py b/langfuse/api/__init__.py index 374664ce4..2a274a811 100644 --- a/langfuse/api/__init__.py +++ b/langfuse/api/__init__.py @@ -21,8 +21,9 @@ CategoricalScore, CategoricalScoreV1, ChatMessage, - ChatMessage_Placeholder, - ChatMessage_Promptchatmessage, + ChatMessageWithPlaceholders, + ChatMessageWithPlaceholders_Chatmessage, + ChatMessageWithPlaceholders_Placeholder, ChatPrompt, Comment, CommentObjectType, @@ -133,7 +134,6 @@ ProjectDeletionResponse, Projects, Prompt, - PromptChatMessage, PromptMeta, PromptMetaListResponse, Prompt_Chat, @@ -234,8 +234,9 @@ "CategoricalScore", "CategoricalScoreV1", "ChatMessage", - "ChatMessage_Placeholder", - "ChatMessage_Promptchatmessage", + "ChatMessageWithPlaceholders", + "ChatMessageWithPlaceholders_Chatmessage", + "ChatMessageWithPlaceholders_Placeholder", "ChatPrompt", "Comment", "CommentObjectType", @@ -346,7 +347,6 @@ "ProjectDeletionResponse", "Projects", "Prompt", - "PromptChatMessage", "PromptMeta", "PromptMetaListResponse", "Prompt_Chat", diff --git a/langfuse/api/reference.md b/langfuse/api/reference.md index 223361d41..e5595df8c 100644 --- a/langfuse/api/reference.md +++ b/langfuse/api/reference.md @@ -4290,7 +4290,7 @@ Create a new version for the prompt with the given `name`
```python -from langfuse import ChatMessage_Promptchatmessage, CreatePromptRequest_Chat +from langfuse import ChatMessage, CreatePromptRequest_Chat from langfuse.client import FernLangfuse client = FernLangfuse( @@ -4305,11 +4305,11 @@ client.prompts.create( request=CreatePromptRequest_Chat( name="name", prompt=[ - ChatMessage_Promptchatmessage( + ChatMessage( role="role", content="content", ), - ChatMessage_Promptchatmessage( + ChatMessage( role="role", content="content", ), diff --git a/langfuse/api/resources/__init__.py b/langfuse/api/resources/__init__.py index 2ea0974b1..453774283 100644 --- a/langfuse/api/resources/__init__.py +++ b/langfuse/api/resources/__init__.py @@ -173,8 +173,9 @@ from .prompts import ( BasePrompt, ChatMessage, - ChatMessage_Placeholder, - ChatMessage_Promptchatmessage, + ChatMessageWithPlaceholders, + ChatMessageWithPlaceholders_Chatmessage, + ChatMessageWithPlaceholders_Placeholder, ChatPrompt, CreateChatPromptRequest, CreatePromptRequest, @@ -183,7 +184,6 @@ CreateTextPromptRequest, PlaceholderMessage, Prompt, - PromptChatMessage, PromptMeta, PromptMetaListResponse, Prompt_Chat, @@ -246,8 +246,9 @@ "CategoricalScore", "CategoricalScoreV1", "ChatMessage", - "ChatMessage_Placeholder", - "ChatMessage_Promptchatmessage", + "ChatMessageWithPlaceholders", + "ChatMessageWithPlaceholders_Chatmessage", + "ChatMessageWithPlaceholders_Placeholder", "ChatPrompt", "Comment", "CommentObjectType", @@ -358,7 +359,6 @@ "ProjectDeletionResponse", "Projects", "Prompt", - "PromptChatMessage", "PromptMeta", "PromptMetaListResponse", "Prompt_Chat", diff --git a/langfuse/api/resources/prompts/__init__.py b/langfuse/api/resources/prompts/__init__.py index 308be46cd..77c27486d 100644 --- a/langfuse/api/resources/prompts/__init__.py +++ b/langfuse/api/resources/prompts/__init__.py @@ -3,8 +3,9 @@ from .types import ( BasePrompt, ChatMessage, - ChatMessage_Placeholder, - ChatMessage_Promptchatmessage, + ChatMessageWithPlaceholders, + ChatMessageWithPlaceholders_Chatmessage, + ChatMessageWithPlaceholders_Placeholder, ChatPrompt, CreateChatPromptRequest, CreatePromptRequest, @@ -13,7 +14,6 @@ CreateTextPromptRequest, PlaceholderMessage, Prompt, - PromptChatMessage, PromptMeta, PromptMetaListResponse, Prompt_Chat, @@ -24,8 +24,9 @@ __all__ = [ "BasePrompt", "ChatMessage", - "ChatMessage_Placeholder", - "ChatMessage_Promptchatmessage", + "ChatMessageWithPlaceholders", + "ChatMessageWithPlaceholders_Chatmessage", + "ChatMessageWithPlaceholders_Placeholder", "ChatPrompt", "CreateChatPromptRequest", "CreatePromptRequest", @@ -34,7 +35,6 @@ "CreateTextPromptRequest", "PlaceholderMessage", "Prompt", - "PromptChatMessage", "PromptMeta", "PromptMetaListResponse", "Prompt_Chat", diff --git a/langfuse/api/resources/prompts/client.py b/langfuse/api/resources/prompts/client.py index 123b79fb6..b41ab5642 100644 --- a/langfuse/api/resources/prompts/client.py +++ b/langfuse/api/resources/prompts/client.py @@ -228,7 +228,7 @@ def create( Examples -------- - from langfuse import ChatMessage_Promptchatmessage, CreatePromptRequest_Chat + from langfuse import ChatMessage, CreatePromptRequest_Chat from langfuse.client import FernLangfuse client = FernLangfuse( @@ -243,11 +243,11 @@ def create( request=CreatePromptRequest_Chat( name="name", prompt=[ - ChatMessage_Promptchatmessage( + ChatMessage( role="role", content="content", ), - ChatMessage_Promptchatmessage( + ChatMessage( role="role", content="content", ), @@ -512,7 +512,7 @@ async def create( -------- import asyncio - from langfuse import ChatMessage_Promptchatmessage, CreatePromptRequest_Chat + from langfuse import ChatMessage, CreatePromptRequest_Chat from langfuse.client import AsyncFernLangfuse client = AsyncFernLangfuse( @@ -530,11 +530,11 @@ async def main() -> None: request=CreatePromptRequest_Chat( name="name", prompt=[ - ChatMessage_Promptchatmessage( + ChatMessage( role="role", content="content", ), - ChatMessage_Promptchatmessage( + ChatMessage( role="role", content="content", ), diff --git a/langfuse/api/resources/prompts/types/__init__.py b/langfuse/api/resources/prompts/types/__init__.py index 13e994fa5..101b946a2 100644 --- a/langfuse/api/resources/prompts/types/__init__.py +++ b/langfuse/api/resources/prompts/types/__init__.py @@ -1,10 +1,11 @@ # This file was auto-generated by Fern from our API Definition. from .base_prompt import BasePrompt -from .chat_message import ( - ChatMessage, - ChatMessage_Placeholder, - ChatMessage_Promptchatmessage, +from .chat_message import ChatMessage +from .chat_message_with_placeholders import ( + ChatMessageWithPlaceholders, + ChatMessageWithPlaceholders_Chatmessage, + ChatMessageWithPlaceholders_Placeholder, ) from .chat_prompt import ChatPrompt from .create_chat_prompt_request import CreateChatPromptRequest @@ -12,11 +13,10 @@ CreatePromptRequest, CreatePromptRequest_Chat, CreatePromptRequest_Text, -) + from .create_text_prompt_request import CreateTextPromptRequest from .placeholder_message import PlaceholderMessage from .prompt import Prompt, Prompt_Chat, Prompt_Text -from .prompt_chat_message import PromptChatMessage from .prompt_meta import PromptMeta from .prompt_meta_list_response import PromptMetaListResponse from .text_prompt import TextPrompt @@ -24,8 +24,9 @@ __all__ = [ "BasePrompt", "ChatMessage", - "ChatMessage_Placeholder", - "ChatMessage_Promptchatmessage", + "ChatMessageWithPlaceholders", + "ChatMessageWithPlaceholders_Chatmessage", + "ChatMessageWithPlaceholders_Placeholder", "ChatPrompt", "CreateChatPromptRequest", "CreatePromptRequest", @@ -34,7 +35,6 @@ "CreateTextPromptRequest", "PlaceholderMessage", "Prompt", - "PromptChatMessage", "PromptMeta", "PromptMetaListResponse", "Prompt_Chat", diff --git a/langfuse/api/resources/prompts/types/chat_message.py b/langfuse/api/resources/prompts/types/chat_message.py index ff207dcd6..d009bc8cf 100644 --- a/langfuse/api/resources/prompts/types/chat_message.py +++ b/langfuse/api/resources/prompts/types/chat_message.py @@ -1,7 +1,5 @@ # This file was auto-generated by Fern from our API Definition. -from __future__ import annotations - import datetime as dt import typing @@ -9,46 +7,9 @@ from ....core.pydantic_utilities import deep_union_pydantic_dicts, pydantic_v1 -class ChatMessage_Promptchatmessage(pydantic_v1.BaseModel): +class ChatMessage(pydantic_v1.BaseModel): role: str content: str - type: typing.Literal["promptchatmessage"] = "promptchatmessage" - - def json(self, **kwargs: typing.Any) -> str: - kwargs_with_defaults: typing.Any = { - "by_alias": True, - "exclude_unset": True, - **kwargs, - } - return super().json(**kwargs_with_defaults) - - def dict(self, **kwargs: typing.Any) -> typing.Dict[str, typing.Any]: - kwargs_with_defaults_exclude_unset: typing.Any = { - "by_alias": True, - "exclude_unset": True, - **kwargs, - } - kwargs_with_defaults_exclude_none: typing.Any = { - "by_alias": True, - "exclude_none": True, - **kwargs, - } - - return deep_union_pydantic_dicts( - super().dict(**kwargs_with_defaults_exclude_unset), - super().dict(**kwargs_with_defaults_exclude_none), - ) - - class Config: - frozen = True - smart_union = True - extra = pydantic_v1.Extra.allow - json_encoders = {dt.datetime: serialize_datetime} - - -class ChatMessage_Placeholder(pydantic_v1.BaseModel): - name: str - type: typing.Literal["placeholder"] = "placeholder" def json(self, **kwargs: typing.Any) -> str: kwargs_with_defaults: typing.Any = { @@ -80,6 +41,3 @@ class Config: smart_union = True extra = pydantic_v1.Extra.allow json_encoders = {dt.datetime: serialize_datetime} - - -ChatMessage = typing.Union[ChatMessage_Promptchatmessage, ChatMessage_Placeholder] diff --git a/langfuse/api/resources/prompts/types/prompt_chat_message.py b/langfuse/api/resources/prompts/types/prompt_chat_message.py deleted file mode 100644 index 048784bc4..000000000 --- a/langfuse/api/resources/prompts/types/prompt_chat_message.py +++ /dev/null @@ -1,43 +0,0 @@ -# This file was auto-generated by Fern from our API Definition. - -import datetime as dt -import typing - -from ....core.datetime_utils import serialize_datetime -from ....core.pydantic_utilities import deep_union_pydantic_dicts, pydantic_v1 - - -class PromptChatMessage(pydantic_v1.BaseModel): - role: str - content: str - - def json(self, **kwargs: typing.Any) -> str: - kwargs_with_defaults: typing.Any = { - "by_alias": True, - "exclude_unset": True, - **kwargs, - } - return super().json(**kwargs_with_defaults) - - def dict(self, **kwargs: typing.Any) -> typing.Dict[str, typing.Any]: - kwargs_with_defaults_exclude_unset: typing.Any = { - "by_alias": True, - "exclude_unset": True, - **kwargs, - } - kwargs_with_defaults_exclude_none: typing.Any = { - "by_alias": True, - "exclude_none": True, - **kwargs, - } - - return deep_union_pydantic_dicts( - super().dict(**kwargs_with_defaults_exclude_unset), - super().dict(**kwargs_with_defaults_exclude_none), - ) - - class Config: - frozen = True - smart_union = True - extra = pydantic_v1.Extra.allow - json_encoders = {dt.datetime: serialize_datetime} From e130dbf10498e685a9202c749f7729e379022e1b Mon Sep 17 00:00:00 2001 From: Nimar Date: Wed, 25 Jun 2025 14:24:10 +0200 Subject: [PATCH 03/28] fix type --- langfuse/api/resources/prompts/types/__init__.py | 1 + 1 file changed, 1 insertion(+) diff --git a/langfuse/api/resources/prompts/types/__init__.py b/langfuse/api/resources/prompts/types/__init__.py index 101b946a2..456fd933e 100644 --- a/langfuse/api/resources/prompts/types/__init__.py +++ b/langfuse/api/resources/prompts/types/__init__.py @@ -13,6 +13,7 @@ CreatePromptRequest, CreatePromptRequest_Chat, CreatePromptRequest_Text, +) from .create_text_prompt_request import CreateTextPromptRequest from .placeholder_message import PlaceholderMessage From d349c38fa4583d4e098de45c61628a32fb31745b Mon Sep 17 00:00:00 2001 From: Nimar Date: Wed, 25 Jun 2025 15:29:43 +0200 Subject: [PATCH 04/28] update types --- langfuse/api/resources/prompts/types/__init__.py | 1 - langfuse/api/resources/prompts/types/chat_prompt.py | 4 ++-- langfuse/api/resources/prompts/types/prompt.py | 4 ++-- 3 files changed, 4 insertions(+), 5 deletions(-) diff --git a/langfuse/api/resources/prompts/types/__init__.py b/langfuse/api/resources/prompts/types/__init__.py index 456fd933e..3067f9f04 100644 --- a/langfuse/api/resources/prompts/types/__init__.py +++ b/langfuse/api/resources/prompts/types/__init__.py @@ -14,7 +14,6 @@ CreatePromptRequest_Chat, CreatePromptRequest_Text, ) - from .create_text_prompt_request import CreateTextPromptRequest from .placeholder_message import PlaceholderMessage from .prompt import Prompt, Prompt_Chat, Prompt_Text diff --git a/langfuse/api/resources/prompts/types/chat_prompt.py b/langfuse/api/resources/prompts/types/chat_prompt.py index 7699d288d..494449ea2 100644 --- a/langfuse/api/resources/prompts/types/chat_prompt.py +++ b/langfuse/api/resources/prompts/types/chat_prompt.py @@ -6,11 +6,11 @@ from ....core.datetime_utils import serialize_datetime from ....core.pydantic_utilities import deep_union_pydantic_dicts, pydantic_v1 from .base_prompt import BasePrompt -from .chat_message import ChatMessage +from .chat_message_with_placeholders import ChatMessageWithPlaceholders class ChatPrompt(BasePrompt): - prompt: typing.List[ChatMessage] + prompt: typing.List[ChatMessageWithPlaceholders] def json(self, **kwargs: typing.Any) -> str: kwargs_with_defaults: typing.Any = { diff --git a/langfuse/api/resources/prompts/types/prompt.py b/langfuse/api/resources/prompts/types/prompt.py index 2fee54b70..1ad894879 100644 --- a/langfuse/api/resources/prompts/types/prompt.py +++ b/langfuse/api/resources/prompts/types/prompt.py @@ -7,11 +7,11 @@ from ....core.datetime_utils import serialize_datetime from ....core.pydantic_utilities import deep_union_pydantic_dicts, pydantic_v1 -from .chat_message import ChatMessage +from .chat_message_with_placeholders import ChatMessageWithPlaceholders class Prompt_Chat(pydantic_v1.BaseModel): - prompt: typing.List[ChatMessage] + prompt: typing.List[ChatMessageWithPlaceholders] name: str version: int config: typing.Any From d7fb9696eaccd12407c17ca048fc4b79b6524bb8 Mon Sep 17 00:00:00 2001 From: Nimar Date: Wed, 25 Jun 2025 16:31:32 +0200 Subject: [PATCH 05/28] placeholer compile --- langfuse/model.py | 90 ++++++++++++++++++++++++++++++++++++++++++++--- 1 file changed, 86 insertions(+), 4 deletions(-) diff --git a/langfuse/model.py b/langfuse/model.py index 6380bf5f2..e3051a494 100644 --- a/langfuse/model.py +++ b/langfuse/model.py @@ -2,7 +2,7 @@ import re from abc import ABC, abstractmethod -from typing import Any, Dict, List, Optional, Tuple, TypedDict, Union +from typing import Any, Dict, List, Literal, Optional, Tuple, TypedDict, Union from langfuse.api.resources.commons.types.dataset import ( Dataset, # noqa: F401 @@ -54,6 +54,26 @@ class ChatMessageDict(TypedDict): content: str +class ChatMessageWithPlaceholdersDict_Message(TypedDict): + type: Literal["message"] + role: str + content: str + name: None + + +class ChatMessageWithPlaceholdersDict_Placeholder(TypedDict): + type: Literal["placeholder"] + role: None + content: None + name: str + + +ChatMessageWithPlaceholdersDict = Union[ + ChatMessageWithPlaceholdersDict_Message, + ChatMessageWithPlaceholdersDict_Placeholder, +] + + class TemplateParser: OPENING = "{{" CLOSING = "}}" @@ -208,9 +228,27 @@ def get_langchain_prompt(self, **kwargs) -> str: class ChatPromptClient(BasePromptClient): def __init__(self, prompt: Prompt_Chat, is_fallback: bool = False): super().__init__(prompt, is_fallback) - self.prompt = [ - ChatMessageDict(role=p.role, content=p.content) for p in prompt.prompt - ] + self.prompt: List[ChatMessageWithPlaceholdersDict] = [] + + for p in prompt.prompt: + if hasattr(p, "type") and p.type == "placeholder": + self.prompt.append( + ChatMessageWithPlaceholdersDict_Placeholder( + type="placeholder", + role=None, + content=None, + name=p.name, + ) + ) + elif hasattr(p, "type") and p.type == "message": + self.prompt.append( + ChatMessageWithPlaceholdersDict_Message( + type="message", + role=p.role, + content=p.content, + name=None, + ) + ) def compile(self, **kwargs) -> List[ChatMessageDict]: return [ @@ -221,6 +259,7 @@ def compile(self, **kwargs) -> List[ChatMessageDict]: role=chat_message["role"], ) for chat_message in self.prompt + if chat_message["type"] == "message" ] @property @@ -229,6 +268,7 @@ def variables(self) -> List[str]: return [ variable for chat_message in self.prompt + if chat_message["type"] == "message" for variable in TemplateParser.find_variable_names(chat_message["content"]) ] @@ -246,6 +286,47 @@ def __eq__(self, other): return False + def compileWithPlaceholders( + self, + variables: Dict[str, Any], + placeholders: Dict[str, List[ChatMessage]], + ) -> List[ChatMessageDict]: + """Compile chat prompt by first replacing placeholders, then expanding variables. + + Args: + variables: Dictionary of variable names to values for template substitution + placeholders: Dictionary of placeholder names to lists of ChatMessage objects + + Returns: + List[ChatMessageDict]: Compiled chat messages + """ + messages_with_placeholders_replaced: List[ChatMessage] = [] + + # Subsitute the placeholders for their supplied ChatMessages + for item in self.prompt: + if item["type"] == "placeholder" and item["name"] in placeholders: + messages_with_placeholders_replaced.extend(placeholders[item["name"]]) + elif item["type"] == "message": + messages_with_placeholders_replaced.append( + ChatMessage( + role=item["role"], + content=item["content"], + ) + ) + + # Then, replace the variables in the ChatMessage content. + return [ + ChatMessageDict( + content=TemplateParser.compile_template( + chat_message.content, variables, + ), + role=chat_message.role, + ) + for chat_message in messages_with_placeholders_replaced + if hasattr(chat_message, "role") and hasattr(chat_message, "content") + ] + + def get_langchain_prompt(self, **kwargs): """Convert Langfuse prompt into string compatible with Langchain ChatPromptTemplate. @@ -269,6 +350,7 @@ def get_langchain_prompt(self, **kwargs): ), ) for msg in self.prompt + if msg["type"] == "message" ] From 3487207451eac38d3014dc8b1044e101de3ebed8 Mon Sep 17 00:00:00 2001 From: Nimar Date: Wed, 25 Jun 2025 16:39:14 +0200 Subject: [PATCH 06/28] fix model --- langfuse/model.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/langfuse/model.py b/langfuse/model.py index e3051a494..7d61d3948 100644 --- a/langfuse/model.py +++ b/langfuse/model.py @@ -231,7 +231,7 @@ def __init__(self, prompt: Prompt_Chat, is_fallback: bool = False): self.prompt: List[ChatMessageWithPlaceholdersDict] = [] for p in prompt.prompt: - if hasattr(p, "type") and p.type == "placeholder": + if hasattr(p, "type") and hasattr(p, "name"): self.prompt.append( ChatMessageWithPlaceholdersDict_Placeholder( type="placeholder", @@ -240,7 +240,7 @@ def __init__(self, prompt: Prompt_Chat, is_fallback: bool = False): name=p.name, ) ) - elif hasattr(p, "type") and p.type == "message": + elif hasattr(p, "role") and hasattr(p, "content"): self.prompt.append( ChatMessageWithPlaceholdersDict_Message( type="message", From 401bf6498269695477a491d94cc59455386d9f3a Mon Sep 17 00:00:00 2001 From: Nimar Date: Wed, 25 Jun 2025 17:13:23 +0200 Subject: [PATCH 07/28] fix types --- langfuse/_client/client.py | 5 +- langfuse/model.py | 17 ++-- tests/test_prompt.py | 167 +++++++++++++++++++++++++++++++++++++ 3 files changed, 176 insertions(+), 13 deletions(-) diff --git a/langfuse/_client/client.py b/langfuse/_client/client.py index 100e3a053..0bbb12184 100644 --- a/langfuse/_client/client.py +++ b/langfuse/_client/client.py @@ -55,6 +55,7 @@ from langfuse.media import LangfuseMedia from langfuse.model import ( ChatMessageDict, + ChatMessageWithPlaceholdersDict, ChatPromptClient, CreateDatasetItemRequest, CreateDatasetRequest, @@ -2127,7 +2128,7 @@ def create_prompt( self, *, name: str, - prompt: List[ChatMessageDict], + prompt: List[Union[ChatMessageDict, ChatMessageWithPlaceholdersDict]], labels: List[str] = [], tags: Optional[List[str]] = None, type: Optional[Literal["chat"]], @@ -2152,7 +2153,7 @@ def create_prompt( self, *, name: str, - prompt: Union[str, List[ChatMessageDict]], + prompt: Union[str, List[Union[ChatMessageDict, ChatMessageWithPlaceholdersDict]]], labels: List[str] = [], tags: Optional[List[str]] = None, type: Optional[Literal["chat", "text"]] = "text", diff --git a/langfuse/model.py b/langfuse/model.py index 7d61d3948..6778854a9 100644 --- a/langfuse/model.py +++ b/langfuse/model.py @@ -58,13 +58,10 @@ class ChatMessageWithPlaceholdersDict_Message(TypedDict): type: Literal["message"] role: str content: str - name: None class ChatMessageWithPlaceholdersDict_Placeholder(TypedDict): type: Literal["placeholder"] - role: None - content: None name: str @@ -235,8 +232,6 @@ def __init__(self, prompt: Prompt_Chat, is_fallback: bool = False): self.prompt.append( ChatMessageWithPlaceholdersDict_Placeholder( type="placeholder", - role=None, - content=None, name=p.name, ) ) @@ -246,7 +241,6 @@ def __init__(self, prompt: Prompt_Chat, is_fallback: bool = False): type="message", role=p.role, content=p.content, - name=None, ) ) @@ -280,6 +274,7 @@ def __eq__(self, other): and all( m1["role"] == m2["role"] and m1["content"] == m2["content"] for m1, m2 in zip(self.prompt, other.prompt) + if m1["type"] == "message" and m2["type"] == "message" ) and self.config == other.config ) @@ -289,7 +284,7 @@ def __eq__(self, other): def compileWithPlaceholders( self, variables: Dict[str, Any], - placeholders: Dict[str, List[ChatMessage]], + placeholders: Dict[str, List[ChatMessageDict]], ) -> List[ChatMessageDict]: """Compile chat prompt by first replacing placeholders, then expanding variables. @@ -300,7 +295,7 @@ def compileWithPlaceholders( Returns: List[ChatMessageDict]: Compiled chat messages """ - messages_with_placeholders_replaced: List[ChatMessage] = [] + messages_with_placeholders_replaced: List[ChatMessageDict] = [] # Subsitute the placeholders for their supplied ChatMessages for item in self.prompt: @@ -308,7 +303,7 @@ def compileWithPlaceholders( messages_with_placeholders_replaced.extend(placeholders[item["name"]]) elif item["type"] == "message": messages_with_placeholders_replaced.append( - ChatMessage( + ChatMessageDict( role=item["role"], content=item["content"], ) @@ -318,9 +313,9 @@ def compileWithPlaceholders( return [ ChatMessageDict( content=TemplateParser.compile_template( - chat_message.content, variables, + chat_message["content"], variables, ), - role=chat_message.role, + role=chat_message["role"], ) for chat_message in messages_with_placeholders_replaced if hasattr(chat_message, "role") and hasattr(chat_message, "content") diff --git a/tests/test_prompt.py b/tests/test_prompt.py index 5630bdd41..51f5b3c46 100644 --- a/tests/test_prompt.py +++ b/tests/test_prompt.py @@ -90,6 +90,57 @@ def test_create_chat_prompt(): assert prompt_client.config == {} +def test_create_chat_prompt_with_placeholders(): + langfuse = Langfuse() + prompt_name = create_uuid() + + prompt_client = langfuse.create_prompt( + name=prompt_name, + prompt=[ + {"role": "system", "content": "You are a {{role}} assistant"}, + {"type": "placeholder", "name": "history"}, + {"role": "user", "content": "Help me with {{task}}"}, + ], + labels=["production"], + tags=["test"], + type="chat", + commit_message="initial commit", + ) + + second_prompt_client = langfuse.get_prompt(prompt_name, type="chat") + + messages = second_prompt_client.compileWithPlaceholders( + variables={"role": "helpful", "task": "coding"}, + placeholders={ + "history": [ + {"role": "user", "content": "Example: {{task}}"}, + {"role": "assistant", "content": "Example response"}, + ], + }, + ) + + # Create a test generation using compiled messages + completion = openai.OpenAI().chat.completions.create( + model="gpt-4", + messages=messages, + ) + + assert len(completion.choices) > 0 + assert len(messages) == 4 + assert messages[0]["content"] == "You are a helpful assistant" + assert messages[1]["content"] == "Example: coding" + assert messages[2]["content"] == "Example response" + assert messages[3]["content"] == "Help me with coding" + + assert prompt_client.name == second_prompt_client.name + assert prompt_client.version == second_prompt_client.version + assert prompt_client.config == second_prompt_client.config + assert prompt_client.labels == ["production", "latest"] + assert prompt_client.tags == second_prompt_client.tags + assert prompt_client.commit_message == second_prompt_client.commit_message + assert prompt_client.config == {} + + def test_compiling_chat_prompt(): langfuse = Langfuse() prompt_name = create_uuid() @@ -1114,3 +1165,119 @@ def test_update_prompt(): expected_labels = sorted(["latest", "doe", "production", "john"]) assert sorted(fetched_prompt.labels) == expected_labels assert sorted(updated_prompt.labels) == expected_labels + + +def test_compile_with_placeholders(): + """Test compileWithPlaceholders method with chat prompt.""" + langfuse = Langfuse() + prompt_name = create_uuid() + + prompt_client = langfuse.create_prompt( + name=prompt_name, + prompt=[ + {"role": "system", "content": "You are a {{role}} assistant"}, + {"role": "user", "content": "Help me with {{task}}"}, + ], + labels=["production"], + type="chat", + ) + + second_prompt_client = langfuse.get_prompt(prompt_name, type="chat") + + # Test compileWithPlaceholders with no placeholders (should work like compile) + result = second_prompt_client.compileWithPlaceholders( + variables={"role": "helpful", "task": "coding"}, placeholders={} + ) + + assert len(result) == 2 + assert result[0]["role"] == "system" + assert result[0]["content"] == "You are a helpful assistant" + assert result[1]["role"] == "user" + assert result[1]["content"] == "Help me with coding" + + +def test_compile_with_placeholders_valid(): + """Test compileWithPlaceholders with valid placeholders.""" + from langfuse.api.resources.prompts.types import PromptChatMessage + + langfuse = Langfuse() + prompt_name = create_uuid() + + # Create prompt with placeholders + langfuse.create_prompt( + name=prompt_name, + prompt=[ + {"role": "system", "content": "You are a {{role}} assistant"}, + {"type": "placeholder", "name": "examples"}, + {"role": "user", "content": "Help me with {{task}}"}, + ], + type="chat", + ) + + prompt_client = langfuse.get_prompt(prompt_name, type="chat") + + # Test compileWithPlaceholders with valid placeholders + result = prompt_client.compileWithPlaceholders( + variables={"role": "helpful", "task": "coding"}, + placeholders={ + "examples": [ + PromptChatMessage(role="user", content="Example: {{task}}"), + PromptChatMessage(role="assistant", content="Example response"), + ] + }, + ) + + assert len(result) == 4 + assert result[0]["content"] == "You are a helpful assistant" + assert result[1]["content"] == "Example: coding" + assert result[2]["content"] == "Example response" + assert result[3]["content"] == "Help me with coding" + + +def test_create_prompt_with_placeholders(): + """Test creating a prompt with placeholder messages.""" + langfuse = Langfuse() + prompt_name = create_uuid() + + prompt_client = langfuse.create_prompt( + name=prompt_name, + prompt=[ + {"role": "system", "content": "System message"}, + {"type": "placeholder", "name": "context"}, + {"role": "user", "content": "User message"}, + ], + type="chat", + ) + + # Verify prompt_with_placeholders structure + assert len(prompt_client.prompt_with_placeholders) == 3 + assert prompt_client.prompt_with_placeholders[0]["type"] == "message" + assert prompt_client.prompt_with_placeholders[1]["type"] == "placeholder" + assert prompt_client.prompt_with_placeholders[1]["name"] == "context" + assert prompt_client.prompt_with_placeholders[2]["type"] == "message" + + # Regular prompt should only contain messages + assert len(prompt_client.prompt) == 2 + + +def test_get_prompt_with_placeholders(): + """Test retrieving a prompt with placeholders.""" + langfuse = Langfuse() + prompt_name = create_uuid() + + langfuse.create_prompt( + name=prompt_name, + prompt=[ + {"role": "system", "content": "You are {{name}}"}, + {"type": "placeholder", "name": "history"}, + {"role": "user", "content": "{{question}}"}, + ], + type="chat", + ) + + prompt_client = langfuse.get_prompt(prompt_name, type="chat") + + # Verify placeholder structure is preserved + assert len(prompt_client.prompt_with_placeholders) == 3 + assert prompt_client.prompt_with_placeholders[1]["type"] == "placeholder" + assert prompt_client.prompt_with_placeholders[1]["name"] == "history" From 2759684f2828ef6431d7fb9ceb7f752d7d830596 Mon Sep 17 00:00:00 2001 From: Nimar Date: Wed, 25 Jun 2025 17:26:11 +0200 Subject: [PATCH 08/28] update api --- langfuse/_client/client.py | 4 +++- langfuse/api/reference.md | 9 ++++++--- langfuse/api/resources/prompts/client.py | 18 ++++++++++++------ .../types/create_chat_prompt_request.py | 4 ++-- .../prompts/types/create_prompt_request.py | 4 ++-- 5 files changed, 25 insertions(+), 14 deletions(-) diff --git a/langfuse/_client/client.py b/langfuse/_client/client.py index 0bbb12184..a0710c5c2 100644 --- a/langfuse/_client/client.py +++ b/langfuse/_client/client.py @@ -2153,7 +2153,9 @@ def create_prompt( self, *, name: str, - prompt: Union[str, List[Union[ChatMessageDict, ChatMessageWithPlaceholdersDict]]], + prompt: Union[ + str, List[Union[ChatMessageDict, ChatMessageWithPlaceholdersDict]] + ], labels: List[str] = [], tags: Optional[List[str]] = None, type: Optional[Literal["chat", "text"]] = "text", diff --git a/langfuse/api/reference.md b/langfuse/api/reference.md index e5595df8c..994933edc 100644 --- a/langfuse/api/reference.md +++ b/langfuse/api/reference.md @@ -4290,7 +4290,10 @@ Create a new version for the prompt with the given `name`
```python -from langfuse import ChatMessage, CreatePromptRequest_Chat +from langfuse import ( + ChatMessageWithPlaceholders_Chatmessage, + CreatePromptRequest_Chat, +) from langfuse.client import FernLangfuse client = FernLangfuse( @@ -4305,11 +4308,11 @@ client.prompts.create( request=CreatePromptRequest_Chat( name="name", prompt=[ - ChatMessage( + ChatMessageWithPlaceholders_Chatmessage( role="role", content="content", ), - ChatMessage( + ChatMessageWithPlaceholders_Chatmessage( role="role", content="content", ), diff --git a/langfuse/api/resources/prompts/client.py b/langfuse/api/resources/prompts/client.py index b41ab5642..c38c20156 100644 --- a/langfuse/api/resources/prompts/client.py +++ b/langfuse/api/resources/prompts/client.py @@ -228,7 +228,10 @@ def create( Examples -------- - from langfuse import ChatMessage, CreatePromptRequest_Chat + from langfuse import ( + ChatMessageWithPlaceholders_Chatmessage, + CreatePromptRequest_Chat, + ) from langfuse.client import FernLangfuse client = FernLangfuse( @@ -243,11 +246,11 @@ def create( request=CreatePromptRequest_Chat( name="name", prompt=[ - ChatMessage( + ChatMessageWithPlaceholders_Chatmessage( role="role", content="content", ), - ChatMessage( + ChatMessageWithPlaceholders_Chatmessage( role="role", content="content", ), @@ -512,7 +515,10 @@ async def create( -------- import asyncio - from langfuse import ChatMessage, CreatePromptRequest_Chat + from langfuse import ( + ChatMessageWithPlaceholders_Chatmessage, + CreatePromptRequest_Chat, + ) from langfuse.client import AsyncFernLangfuse client = AsyncFernLangfuse( @@ -530,11 +536,11 @@ async def main() -> None: request=CreatePromptRequest_Chat( name="name", prompt=[ - ChatMessage( + ChatMessageWithPlaceholders_Chatmessage( role="role", content="content", ), - ChatMessage( + ChatMessageWithPlaceholders_Chatmessage( role="role", content="content", ), diff --git a/langfuse/api/resources/prompts/types/create_chat_prompt_request.py b/langfuse/api/resources/prompts/types/create_chat_prompt_request.py index 95d55c88a..1442164a6 100644 --- a/langfuse/api/resources/prompts/types/create_chat_prompt_request.py +++ b/langfuse/api/resources/prompts/types/create_chat_prompt_request.py @@ -5,12 +5,12 @@ from ....core.datetime_utils import serialize_datetime from ....core.pydantic_utilities import deep_union_pydantic_dicts, pydantic_v1 -from .chat_message import ChatMessage +from .chat_message_with_placeholders import ChatMessageWithPlaceholders class CreateChatPromptRequest(pydantic_v1.BaseModel): name: str - prompt: typing.List[ChatMessage] + prompt: typing.List[ChatMessageWithPlaceholders] config: typing.Optional[typing.Any] = None labels: typing.Optional[typing.List[str]] = pydantic_v1.Field(default=None) """ diff --git a/langfuse/api/resources/prompts/types/create_prompt_request.py b/langfuse/api/resources/prompts/types/create_prompt_request.py index 76cba7ff9..b9518a7c4 100644 --- a/langfuse/api/resources/prompts/types/create_prompt_request.py +++ b/langfuse/api/resources/prompts/types/create_prompt_request.py @@ -7,12 +7,12 @@ from ....core.datetime_utils import serialize_datetime from ....core.pydantic_utilities import deep_union_pydantic_dicts, pydantic_v1 -from .chat_message import ChatMessage +from .chat_message_with_placeholders import ChatMessageWithPlaceholders class CreatePromptRequest_Chat(pydantic_v1.BaseModel): name: str - prompt: typing.List[ChatMessage] + prompt: typing.List[ChatMessageWithPlaceholders] config: typing.Optional[typing.Any] = None labels: typing.Optional[typing.List[str]] = None tags: typing.Optional[typing.List[str]] = None From 31f3dc9f7a4d31ff8c8efbf18676c4d74b5629cc Mon Sep 17 00:00:00 2001 From: Nimar Date: Wed, 25 Jun 2025 17:27:05 +0200 Subject: [PATCH 09/28] fix tests --- .../types/chat_message_with_placeholders.py | 87 +++++++++++++++++++ langfuse/model.py | 6 +- 2 files changed, 90 insertions(+), 3 deletions(-) create mode 100644 langfuse/api/resources/prompts/types/chat_message_with_placeholders.py diff --git a/langfuse/api/resources/prompts/types/chat_message_with_placeholders.py b/langfuse/api/resources/prompts/types/chat_message_with_placeholders.py new file mode 100644 index 000000000..dc12d5073 --- /dev/null +++ b/langfuse/api/resources/prompts/types/chat_message_with_placeholders.py @@ -0,0 +1,87 @@ +# This file was auto-generated by Fern from our API Definition. + +from __future__ import annotations + +import datetime as dt +import typing + +from ....core.datetime_utils import serialize_datetime +from ....core.pydantic_utilities import deep_union_pydantic_dicts, pydantic_v1 + + +class ChatMessageWithPlaceholders_Chatmessage(pydantic_v1.BaseModel): + role: str + content: str + type: typing.Literal["chatmessage"] = "chatmessage" + + def json(self, **kwargs: typing.Any) -> str: + kwargs_with_defaults: typing.Any = { + "by_alias": True, + "exclude_unset": True, + **kwargs, + } + return super().json(**kwargs_with_defaults) + + def dict(self, **kwargs: typing.Any) -> typing.Dict[str, typing.Any]: + kwargs_with_defaults_exclude_unset: typing.Any = { + "by_alias": True, + "exclude_unset": True, + **kwargs, + } + kwargs_with_defaults_exclude_none: typing.Any = { + "by_alias": True, + "exclude_none": True, + **kwargs, + } + + return deep_union_pydantic_dicts( + super().dict(**kwargs_with_defaults_exclude_unset), + super().dict(**kwargs_with_defaults_exclude_none), + ) + + class Config: + frozen = True + smart_union = True + extra = pydantic_v1.Extra.allow + json_encoders = {dt.datetime: serialize_datetime} + + +class ChatMessageWithPlaceholders_Placeholder(pydantic_v1.BaseModel): + name: str + type: typing.Literal["placeholder"] = "placeholder" + + def json(self, **kwargs: typing.Any) -> str: + kwargs_with_defaults: typing.Any = { + "by_alias": True, + "exclude_unset": True, + **kwargs, + } + return super().json(**kwargs_with_defaults) + + def dict(self, **kwargs: typing.Any) -> typing.Dict[str, typing.Any]: + kwargs_with_defaults_exclude_unset: typing.Any = { + "by_alias": True, + "exclude_unset": True, + **kwargs, + } + kwargs_with_defaults_exclude_none: typing.Any = { + "by_alias": True, + "exclude_none": True, + **kwargs, + } + + return deep_union_pydantic_dicts( + super().dict(**kwargs_with_defaults_exclude_unset), + super().dict(**kwargs_with_defaults_exclude_none), + ) + + class Config: + frozen = True + smart_union = True + extra = pydantic_v1.Extra.allow + json_encoders = {dt.datetime: serialize_datetime} + + +ChatMessageWithPlaceholders = typing.Union[ + ChatMessageWithPlaceholders_Chatmessage, ChatMessageWithPlaceholders_Placeholder +] diff --git a/langfuse/model.py b/langfuse/model.py index 6778854a9..92471fb84 100644 --- a/langfuse/model.py +++ b/langfuse/model.py @@ -313,15 +313,15 @@ def compileWithPlaceholders( return [ ChatMessageDict( content=TemplateParser.compile_template( - chat_message["content"], variables, + chat_message["content"], + variables, ), role=chat_message["role"], ) for chat_message in messages_with_placeholders_replaced - if hasattr(chat_message, "role") and hasattr(chat_message, "content") + if "role" in chat_message and "content" in chat_message ] - def get_langchain_prompt(self, **kwargs): """Convert Langfuse prompt into string compatible with Langchain ChatPromptTemplate. From 6ddee624dc563fac062500e821432c7f5c24a04d Mon Sep 17 00:00:00 2001 From: Nimar Date: Wed, 25 Jun 2025 18:45:54 +0200 Subject: [PATCH 10/28] tests --- tests/test_prompt.py | 271 +++++++++++++++++++++++++------------------ 1 file changed, 155 insertions(+), 116 deletions(-) diff --git a/tests/test_prompt.py b/tests/test_prompt.py index 51f5b3c46..f8aa42243 100644 --- a/tests/test_prompt.py +++ b/tests/test_prompt.py @@ -141,6 +141,161 @@ def test_create_chat_prompt_with_placeholders(): assert prompt_client.config == {} +def test_create_prompt_with_placeholders(): + """Test creating a prompt with placeholder messages.""" + langfuse = Langfuse() + prompt_name = create_uuid() + prompt_client = langfuse.create_prompt( + name=prompt_name, + prompt=[ + {"role": "system", "content": "System message"}, + {"type": "placeholder", "name": "context"}, + {"role": "user", "content": "User message"}, + ], + type="chat", + ) + + # Verify the full prompt structure with placeholders + assert len(prompt_client.prompt) == 3 + + # First message - system + assert prompt_client.prompt[0]["type"] == "message" + assert prompt_client.prompt[0]["role"] == "system" + assert prompt_client.prompt[0]["content"] == "System message" + # Placeholder + assert prompt_client.prompt[1]["type"] == "placeholder" + assert prompt_client.prompt[1]["name"] == "context" + # Third message - user + assert prompt_client.prompt[2]["type"] == "message" + assert prompt_client.prompt[2]["role"] == "user" + assert prompt_client.prompt[2]["content"] == "User message" + + +def test_get_prompt_with_placeholders(): + """Test retrieving a prompt with placeholders.""" + langfuse = Langfuse() + prompt_name = create_uuid() + + langfuse.create_prompt( + name=prompt_name, + prompt=[ + {"role": "system", "content": "You are {{name}}"}, + {"type": "placeholder", "name": "history"}, + {"role": "user", "content": "{{question}}"}, + ], + type="chat", + ) + + prompt_client = langfuse.get_prompt(prompt_name, type="chat", version=1) + + # Verify placeholder structure is preserved + assert len(prompt_client.prompt) == 3 + + # First message - system with variable + assert prompt_client.prompt[0]["type"] == "message" + assert prompt_client.prompt[0]["role"] == "system" + assert prompt_client.prompt[0]["content"] == "You are {{name}}" + # Placeholder + assert prompt_client.prompt[1]["type"] == "placeholder" + assert prompt_client.prompt[1]["name"] == "history" + # Third message - user with variable + assert prompt_client.prompt[2]["type"] == "message" + assert prompt_client.prompt[2]["role"] == "user" + assert prompt_client.prompt[2]["content"] == "{{question}}" + + +@pytest.mark.parametrize( + "variables,placeholders,expected_len,expected_contents", + [ + # Variables only, no placeholders + ( + {"role": "helpful", "task": "coding"}, + {}, + 2, + ["You are a helpful assistant", "Help me with coding"], + ), + # No variables, no placeholders + ({}, {}, 2, ["You are a {{role}} assistant", "Help me with {{task}}"]), + # Placeholders only, no variables + ( + {}, + { + "examples": [ + {"role": "user", "content": "Example question"}, + {"role": "assistant", "content": "Example answer"}, + ] + }, + 4, + [ + "You are a {{role}} assistant", + "Example question", + "Example answer", + "Help me with {{task}}", + ], + ), + # Both variables and placeholders + ( + {"role": "helpful", "task": "coding"}, + { + "examples": [ + {"role": "user", "content": "Show me {{task}}"}, + {"role": "assistant", "content": "Here's {{task}}"}, + ] + }, + 4, + [ + "You are a helpful assistant", + "Show me coding", + "Here's coding", + "Help me with coding", + ], + ), + # Empty placeholder array + ( + {"role": "helpful", "task": "coding"}, + {"examples": []}, + 2, + ["You are a helpful assistant", "Help me with coding"], + ), + # Unused placeholders + ( + {"role": "helpful", "task": "coding"}, + {"unused": [{"role": "user", "content": "Won't appear"}]}, + 2, + ["You are a helpful assistant", "Help me with coding"], + ), + ], +) +def test_compile_with_placeholders( + variables, placeholders, expected_len, expected_contents +): + """Test compileWithPlaceholders with different variable/placeholder combinations.""" + from langfuse.api.resources.prompts import Prompt_Chat + from langfuse.model import ChatPromptClient + + mock_prompt = Prompt_Chat( + name="test_prompt", + version=1, + type="chat", + config={}, + tags=[], + labels=[], + prompt=[ + {"role": "system", "content": "You are a {{role}} assistant"}, + {"type": "placeholder", "name": "examples"}, + {"role": "user", "content": "Help me with {{task}}"}, + ], + ) + + result = ChatPromptClient(mock_prompt).compileWithPlaceholders( + variables, placeholders + ) + + assert len(result) == expected_len + for i, expected_content in enumerate(expected_contents): + assert result[i]["content"] == expected_content + + def test_compiling_chat_prompt(): langfuse = Langfuse() prompt_name = create_uuid() @@ -1165,119 +1320,3 @@ def test_update_prompt(): expected_labels = sorted(["latest", "doe", "production", "john"]) assert sorted(fetched_prompt.labels) == expected_labels assert sorted(updated_prompt.labels) == expected_labels - - -def test_compile_with_placeholders(): - """Test compileWithPlaceholders method with chat prompt.""" - langfuse = Langfuse() - prompt_name = create_uuid() - - prompt_client = langfuse.create_prompt( - name=prompt_name, - prompt=[ - {"role": "system", "content": "You are a {{role}} assistant"}, - {"role": "user", "content": "Help me with {{task}}"}, - ], - labels=["production"], - type="chat", - ) - - second_prompt_client = langfuse.get_prompt(prompt_name, type="chat") - - # Test compileWithPlaceholders with no placeholders (should work like compile) - result = second_prompt_client.compileWithPlaceholders( - variables={"role": "helpful", "task": "coding"}, placeholders={} - ) - - assert len(result) == 2 - assert result[0]["role"] == "system" - assert result[0]["content"] == "You are a helpful assistant" - assert result[1]["role"] == "user" - assert result[1]["content"] == "Help me with coding" - - -def test_compile_with_placeholders_valid(): - """Test compileWithPlaceholders with valid placeholders.""" - from langfuse.api.resources.prompts.types import PromptChatMessage - - langfuse = Langfuse() - prompt_name = create_uuid() - - # Create prompt with placeholders - langfuse.create_prompt( - name=prompt_name, - prompt=[ - {"role": "system", "content": "You are a {{role}} assistant"}, - {"type": "placeholder", "name": "examples"}, - {"role": "user", "content": "Help me with {{task}}"}, - ], - type="chat", - ) - - prompt_client = langfuse.get_prompt(prompt_name, type="chat") - - # Test compileWithPlaceholders with valid placeholders - result = prompt_client.compileWithPlaceholders( - variables={"role": "helpful", "task": "coding"}, - placeholders={ - "examples": [ - PromptChatMessage(role="user", content="Example: {{task}}"), - PromptChatMessage(role="assistant", content="Example response"), - ] - }, - ) - - assert len(result) == 4 - assert result[0]["content"] == "You are a helpful assistant" - assert result[1]["content"] == "Example: coding" - assert result[2]["content"] == "Example response" - assert result[3]["content"] == "Help me with coding" - - -def test_create_prompt_with_placeholders(): - """Test creating a prompt with placeholder messages.""" - langfuse = Langfuse() - prompt_name = create_uuid() - - prompt_client = langfuse.create_prompt( - name=prompt_name, - prompt=[ - {"role": "system", "content": "System message"}, - {"type": "placeholder", "name": "context"}, - {"role": "user", "content": "User message"}, - ], - type="chat", - ) - - # Verify prompt_with_placeholders structure - assert len(prompt_client.prompt_with_placeholders) == 3 - assert prompt_client.prompt_with_placeholders[0]["type"] == "message" - assert prompt_client.prompt_with_placeholders[1]["type"] == "placeholder" - assert prompt_client.prompt_with_placeholders[1]["name"] == "context" - assert prompt_client.prompt_with_placeholders[2]["type"] == "message" - - # Regular prompt should only contain messages - assert len(prompt_client.prompt) == 2 - - -def test_get_prompt_with_placeholders(): - """Test retrieving a prompt with placeholders.""" - langfuse = Langfuse() - prompt_name = create_uuid() - - langfuse.create_prompt( - name=prompt_name, - prompt=[ - {"role": "system", "content": "You are {{name}}"}, - {"type": "placeholder", "name": "history"}, - {"role": "user", "content": "{{question}}"}, - ], - type="chat", - ) - - prompt_client = langfuse.get_prompt(prompt_name, type="chat") - - # Verify placeholder structure is preserved - assert len(prompt_client.prompt_with_placeholders) == 3 - assert prompt_client.prompt_with_placeholders[1]["type"] == "placeholder" - assert prompt_client.prompt_with_placeholders[1]["name"] == "history" From 0c7ec1d8a34e4907d04b0b0ccf875d8afce8bda1 Mon Sep 17 00:00:00 2001 From: Nimar Date: Wed, 25 Jun 2025 18:54:42 +0200 Subject: [PATCH 11/28] prvoide equality --- langfuse/model.py | 10 ++++++++-- 1 file changed, 8 insertions(+), 2 deletions(-) diff --git a/langfuse/model.py b/langfuse/model.py index 92471fb84..927e12352 100644 --- a/langfuse/model.py +++ b/langfuse/model.py @@ -271,10 +271,16 @@ def __eq__(self, other): return ( self.name == other.name and self.version == other.version + and len(self.prompt) == len(other.prompt) and all( - m1["role"] == m2["role"] and m1["content"] == m2["content"] + # chatmessage equality + (m1["type"] == "message" and m2["type"] == "message" + and m1["role"] == m2["role"] and m1["content"] == m2["content"]) + or + # placeholder equality + (m1["type"] == "placeholder" and m2["type"] == "placeholder" + and m1["name"] == m2["name"]) for m1, m2 in zip(self.prompt, other.prompt) - if m1["type"] == "message" and m2["type"] == "message" ) and self.config == other.config ) From 39c0ae7b8e051abad6287c7f3cf93319d0cb75c1 Mon Sep 17 00:00:00 2001 From: Nimar Date: Thu, 26 Jun 2025 21:05:22 +0200 Subject: [PATCH 12/28] code review --- langfuse/model.py | 60 ++++++++++++++++++++++++++++++-------------- tests/test_prompt.py | 6 ++--- 2 files changed, 44 insertions(+), 22 deletions(-) diff --git a/langfuse/model.py b/langfuse/model.py index ec39e06ab..927748bcb 100644 --- a/langfuse/model.py +++ b/langfuse/model.py @@ -2,6 +2,7 @@ import re from abc import ABC, abstractmethod +from collections.abc import Iterable from typing import Any, Dict, List, Literal, Optional, Tuple, TypedDict, Union from langfuse.api.resources.commons.types.dataset import ( @@ -296,7 +297,7 @@ def __init__(self, prompt: Prompt_Chat, is_fallback: bool = False): self.prompt: List[ChatMessageWithPlaceholdersDict] = [] for p in prompt.prompt: - if hasattr(p, "type") and hasattr(p, "name"): + if hasattr(p, "type") and hasattr(p, "name") and p.type == "placeholder": self.prompt.append( ChatMessageWithPlaceholdersDict_Placeholder( type="placeholder", @@ -313,16 +314,21 @@ def __init__(self, prompt: Prompt_Chat, is_fallback: bool = False): ) def compile(self, **kwargs) -> List[ChatMessageDict]: - return [ - ChatMessageDict( - content=TemplateParser.compile_template( - chat_message["content"], kwargs - ), - role=chat_message["role"], - ) - for chat_message in self.prompt - if chat_message["type"] == "message" - ] + compiled_messages: List[ChatMessageDict] = [] + for chat_message in self.prompt: + if chat_message["type"] == "message": + compiled_messages.append( + ChatMessageDict( + content=TemplateParser.compile_template( + chat_message["content"], kwargs + ), + role=chat_message["role"], + ) + ) + elif chat_message["type"] == "placeholder": + placeholder_in_compile_error = f"Called compile on chat client with placeholder: {chat_message['name']}. Please use compile_with_placeholders instead." + raise ValueError(placeholder_in_compile_error) + return compiled_messages @property def variables(self) -> List[str]: @@ -342,12 +348,19 @@ def __eq__(self, other): and len(self.prompt) == len(other.prompt) and all( # chatmessage equality - (m1["type"] == "message" and m2["type"] == "message" - and m1["role"] == m2["role"] and m1["content"] == m2["content"]) + ( + m1["type"] == "message" + and m2["type"] == "message" + and m1["role"] == m2["role"] + and m1["content"] == m2["content"] + ) or # placeholder equality - (m1["type"] == "placeholder" and m2["type"] == "placeholder" - and m1["name"] == m2["name"]) + ( + m1["type"] == "placeholder" + and m2["type"] == "placeholder" + and m1["name"] == m2["name"] + ) for m1, m2 in zip(self.prompt, other.prompt) ) and self.config == other.config @@ -355,9 +368,9 @@ def __eq__(self, other): return False - def compileWithPlaceholders( + def compile_with_placeholders( self, - variables: Dict[str, Any], + variables: Dict[str, str], placeholders: Dict[str, List[ChatMessageDict]], ) -> List[ChatMessageDict]: """Compile chat prompt by first replacing placeholders, then expanding variables. @@ -374,7 +387,17 @@ def compileWithPlaceholders( # Subsitute the placeholders for their supplied ChatMessages for item in self.prompt: if item["type"] == "placeholder" and item["name"] in placeholders: - messages_with_placeholders_replaced.extend(placeholders[item["name"]]) + if ( + isinstance(placeholders[item["name"]], Iterable) + and len(placeholders[item["name"]]) > 0 + ): + messages_with_placeholders_replaced.extend( + placeholders[item["name"]] + ) + else: + raise ValueError( + f"The provided placeholder: {item['name']} is empty" + ) elif item["type"] == "message": messages_with_placeholders_replaced.append( ChatMessageDict( @@ -393,7 +416,6 @@ def compileWithPlaceholders( role=chat_message["role"], ) for chat_message in messages_with_placeholders_replaced - if "role" in chat_message and "content" in chat_message ] def get_langchain_prompt(self, **kwargs): diff --git a/tests/test_prompt.py b/tests/test_prompt.py index f8aa42243..047cb2196 100644 --- a/tests/test_prompt.py +++ b/tests/test_prompt.py @@ -109,7 +109,7 @@ def test_create_chat_prompt_with_placeholders(): second_prompt_client = langfuse.get_prompt(prompt_name, type="chat") - messages = second_prompt_client.compileWithPlaceholders( + messages = second_prompt_client.compile_with_placeholders( variables={"role": "helpful", "task": "coding"}, placeholders={ "history": [ @@ -269,7 +269,7 @@ def test_get_prompt_with_placeholders(): def test_compile_with_placeholders( variables, placeholders, expected_len, expected_contents ): - """Test compileWithPlaceholders with different variable/placeholder combinations.""" + """Test compile_with_placeholders with different variable/placeholder combinations.""" from langfuse.api.resources.prompts import Prompt_Chat from langfuse.model import ChatPromptClient @@ -287,7 +287,7 @@ def test_compile_with_placeholders( ], ) - result = ChatPromptClient(mock_prompt).compileWithPlaceholders( + result = ChatPromptClient(mock_prompt).compile_with_placeholders( variables, placeholders ) From fc7a3637a95483d1c564409505da57baa6fd378b Mon Sep 17 00:00:00 2001 From: Nimar Date: Fri, 27 Jun 2025 14:34:17 +0200 Subject: [PATCH 13/28] feedback --- langfuse/model.py | 12 ++++++++---- tests/test_prompt.py | 43 +++++++++++++++++++++++++++++++------------ 2 files changed, 39 insertions(+), 16 deletions(-) diff --git a/langfuse/model.py b/langfuse/model.py index 927748bcb..6a030ff75 100644 --- a/langfuse/model.py +++ b/langfuse/model.py @@ -370,8 +370,8 @@ def __eq__(self, other): def compile_with_placeholders( self, - variables: Dict[str, str], placeholders: Dict[str, List[ChatMessageDict]], + variables: Optional[Dict[str, str]] = None, ) -> List[ChatMessageDict]: """Compile chat prompt by first replacing placeholders, then expanding variables. @@ -382,22 +382,26 @@ def compile_with_placeholders( Returns: List[ChatMessageDict]: Compiled chat messages """ + if variables is None: + variables = {} + messages_with_placeholders_replaced: List[ChatMessageDict] = [] # Subsitute the placeholders for their supplied ChatMessages for item in self.prompt: if item["type"] == "placeholder" and item["name"] in placeholders: if ( - isinstance(placeholders[item["name"]], Iterable) + isinstance(placeholders[item["name"]], List) and len(placeholders[item["name"]]) > 0 ): messages_with_placeholders_replaced.extend( placeholders[item["name"]] ) else: - raise ValueError( + empty_placeholder_error = ( f"The provided placeholder: {item['name']} is empty" ) + raise ValueError(empty_placeholder_error) elif item["type"] == "message": messages_with_placeholders_replaced.append( ChatMessageDict( @@ -437,7 +441,7 @@ def get_langchain_prompt(self, **kwargs): self._get_langchain_prompt_string( TemplateParser.compile_template(msg["content"], kwargs) if kwargs - else msg["content"] + else msg["content"], ), ) for msg in self.prompt diff --git a/tests/test_prompt.py b/tests/test_prompt.py index 047cb2196..696617b70 100644 --- a/tests/test_prompt.py +++ b/tests/test_prompt.py @@ -110,13 +110,13 @@ def test_create_chat_prompt_with_placeholders(): second_prompt_client = langfuse.get_prompt(prompt_name, type="chat") messages = second_prompt_client.compile_with_placeholders( - variables={"role": "helpful", "task": "coding"}, placeholders={ "history": [ {"role": "user", "content": "Example: {{task}}"}, {"role": "assistant", "content": "Example response"}, ], }, + variables={"role": "helpful", "task": "coding"}, ) # Create a test generation using compiled messages @@ -205,7 +205,7 @@ def test_get_prompt_with_placeholders(): @pytest.mark.parametrize( - "variables,placeholders,expected_len,expected_contents", + ("variables", "placeholders", "expected_len", "expected_contents"), [ # Variables only, no placeholders ( @@ -223,7 +223,24 @@ def test_get_prompt_with_placeholders(): "examples": [ {"role": "user", "content": "Example question"}, {"role": "assistant", "content": "Example answer"}, - ] + ], + }, + 4, + [ + "You are a {{role}} assistant", + "Example question", + "Example answer", + "Help me with {{task}}", + ], + ), + # Placeholders only, variables None + ( + None, + { + "examples": [ + {"role": "user", "content": "Example question"}, + {"role": "assistant", "content": "Example answer"}, + ], }, 4, [ @@ -240,7 +257,7 @@ def test_get_prompt_with_placeholders(): "examples": [ {"role": "user", "content": "Show me {{task}}"}, {"role": "assistant", "content": "Here's {{task}}"}, - ] + ], }, 4, [ @@ -250,13 +267,14 @@ def test_get_prompt_with_placeholders(): "Help me with coding", ], ), - # Empty placeholder array - ( - {"role": "helpful", "task": "coding"}, - {"examples": []}, - 2, - ["You are a helpful assistant", "Help me with coding"], - ), + # # Empty placeholder array + # This is expected to fail! If the user provides a placeholder, it should contain an array + # ( + # {"role": "helpful", "task": "coding"}, + # {"examples": []}, + # 2, + # ["You are a helpful assistant", "Help me with coding"], + # ), # Unused placeholders ( {"role": "helpful", "task": "coding"}, @@ -288,7 +306,8 @@ def test_compile_with_placeholders( ) result = ChatPromptClient(mock_prompt).compile_with_placeholders( - variables, placeholders + placeholders, + variables, ) assert len(result) == expected_len From 9199be5094f89dbd37dff9433ca21ab7ff2f1c80 Mon Sep 17 00:00:00 2001 From: Nimar Date: Fri, 27 Jun 2025 14:49:18 +0200 Subject: [PATCH 14/28] fix for LC --- langfuse/model.py | 18 +++++++- tests/test_prompt_compilation.py | 70 ++++++++++++++++++++++++++++++++ 2 files changed, 86 insertions(+), 2 deletions(-) diff --git a/langfuse/model.py b/langfuse/model.py index 6a030ff75..e799cebac 100644 --- a/langfuse/model.py +++ b/langfuse/model.py @@ -2,7 +2,6 @@ import re from abc import ABC, abstractmethod -from collections.abc import Iterable from typing import Any, Dict, List, Literal, Optional, Tuple, TypedDict, Union from langfuse.api.resources.commons.types.dataset import ( @@ -372,12 +371,14 @@ def compile_with_placeholders( self, placeholders: Dict[str, List[ChatMessageDict]], variables: Optional[Dict[str, str]] = None, + persist_compilation: bool = False, ) -> List[ChatMessageDict]: """Compile chat prompt by first replacing placeholders, then expanding variables. Args: variables: Dictionary of variable names to values for template substitution placeholders: Dictionary of placeholder names to lists of ChatMessage objects + persist_compilation: If True, saves the compiled output to the internal state. Useful if using the output for langchain prompts. Returns: List[ChatMessageDict]: Compiled chat messages @@ -411,7 +412,7 @@ def compile_with_placeholders( ) # Then, replace the variables in the ChatMessage content. - return [ + compiled_messages = [ ChatMessageDict( content=TemplateParser.compile_template( chat_message["content"], @@ -422,6 +423,19 @@ def compile_with_placeholders( for chat_message in messages_with_placeholders_replaced ] + # Mutate the internal prompt object if requested + if persist_compilation: + self.prompt = [ + ChatMessageWithPlaceholdersDict_Message( + type="message", + role=msg["role"], + content=msg["content"], + ) + for msg in compiled_messages + ] + + return compiled_messages + def get_langchain_prompt(self, **kwargs): """Convert Langfuse prompt into string compatible with Langchain ChatPromptTemplate. diff --git a/tests/test_prompt_compilation.py b/tests/test_prompt_compilation.py index c3bcc11aa..73c5db8a9 100644 --- a/tests/test_prompt_compilation.py +++ b/tests/test_prompt_compilation.py @@ -732,3 +732,73 @@ def test_chat_prompt_with_json_variables(self): assert len(formatted_messages) == 2 assert formatted_messages[0].content == expected_system assert formatted_messages[1].content == expected_user + + def test_chat_prompt_with_placeholders_langchain(self): + """Test that chat prompts with placeholders work correctly with Langchain.""" + from langfuse.api.resources.prompts import Prompt_Chat + + chat_messages = [ + ChatMessage( + role="system", + content="You are a {{role}} assistant with {{capability}} capabilities.", + ), + {"type": "placeholder", "name": "examples"}, + ChatMessage( + role="user", + content="Help me with {{task}}.", + ), + ] + + prompt_client = ChatPromptClient( + Prompt_Chat( + type="chat", + name="chat_placeholder_langchain_test", + version=1, + config={}, + tags=[], + labels=[], + prompt=chat_messages, + ), + ) + + placeholders = { + "examples": [ + {"role": "user", "content": "Example: What is 2+2?"}, + {"role": "assistant", "content": "2+2 equals 4."}, + ], + } + + # Test compile_with_placeholders with only placeholders (no variables) + compiled_messages = prompt_client.compile_with_placeholders( + placeholders=placeholders, + ) + + assert len(compiled_messages) == 4 + assert ( + compiled_messages[0]["content"] + == "You are a {{role}} assistant with {{capability}} capabilities." + ) + assert compiled_messages[1]["content"] == "Example: What is 2+2?" + assert compiled_messages[2]["content"] == "2+2 equals 4." + assert compiled_messages[3]["content"] == "Help me with {{task}}." + + compiled_messages = prompt_client.compile_with_placeholders( + placeholders=placeholders, persist_compilation=True, + ) + + langchain_messages = prompt_client.get_langchain_prompt( + role="helpful", + capability="math", + task="addition", + ) + langchain_prompt = ChatPromptTemplate.from_messages(langchain_messages) + formatted_messages = langchain_prompt.format_messages() + + assert len(formatted_messages) == 4 + assert ( + formatted_messages[0].content + == "You are a helpful assistant with math capabilities." + ) + assert formatted_messages[1].content == "Example: What is 2+2?" + assert formatted_messages[2].content == "2+2 equals 4." + assert formatted_messages[3].content == "Help me with addition." From e295821962442c233e4fdb6ca74f6258c172d104 Mon Sep 17 00:00:00 2001 From: Nimar Date: Sat, 28 Jun 2025 00:19:54 +0200 Subject: [PATCH 15/28] migrate to getter setter --- langfuse/model.py | 233 ++++++++++++++++++------------- tests/test_prompt.py | 113 +++++++++------ tests/test_prompt_compilation.py | 18 +-- 3 files changed, 209 insertions(+), 155 deletions(-) diff --git a/langfuse/model.py b/langfuse/model.py index e799cebac..5f9811818 100644 --- a/langfuse/model.py +++ b/langfuse/model.py @@ -2,7 +2,8 @@ import re from abc import ABC, abstractmethod -from typing import Any, Dict, List, Literal, Optional, Tuple, TypedDict, Union +from typing import Any, Dict, List, Literal, Optional, Sequence, Tuple, TypedDict, Union +from langfuse.logger import langfuse_logger from langfuse.api.resources.commons.types.dataset import ( Dataset, # noqa: F401 @@ -37,6 +38,9 @@ CreateDatasetRequest, ) from langfuse.api.resources.prompts import ChatMessage, Prompt, Prompt_Chat, Prompt_Text +from langfuse.api.resources.prompts.types.chat_message_with_placeholders import ( + ChatMessageWithPlaceholders, +) class ModelUsage(TypedDict): @@ -54,6 +58,11 @@ class ChatMessageDict(TypedDict): content: str +class ChatMessagePlaceholderDict(TypedDict): + role: str + content: str + + class ChatMessageWithPlaceholdersDict_Message(TypedDict): type: Literal["message"] role: str @@ -293,18 +302,82 @@ def get_langchain_prompt(self, **kwargs) -> str: class ChatPromptClient(BasePromptClient): def __init__(self, prompt: Prompt_Chat, is_fallback: bool = False): super().__init__(prompt, is_fallback) - self.prompt: List[ChatMessageWithPlaceholdersDict] = [] + self.raw_prompt: List[ChatMessageWithPlaceholdersDict] = [] + self.placeholder_fillins: Dict[str, List[ChatMessageDict]] = {} + self.prompt = prompt.prompt + + @property + def prompt(self) -> List[Union[ChatMessageDict, ChatMessagePlaceholderDict]]: + """Returns the prompt with placeholders substituted for their values. + If no placeholders are set and raw_prompt contains placeholders, returns only messages. + """ + compiled_messages = [] + has_unresolved_placeholders = False + + for chat_message in self.raw_prompt: + if chat_message["type"] == "message": + compiled_messages.append( + ChatMessageDict( + content=chat_message["content"], + role=chat_message["role"], + ), + ) + elif chat_message["type"] == "placeholder": + if chat_message["name"] in self.placeholder_fillins: + placeholder_messages = self.placeholder_fillins[ + chat_message["name"] + ] + if isinstance(placeholder_messages, List): + compiled_messages.extend(placeholder_messages) + else: + err_placeholder_not_list = f"Placeholder '{chat_message['name']}' must contain a list of chat messages, got {type(placeholder_messages)}" + raise ValueError(err_placeholder_not_list) + else: + compiled_messages.append( + { + "type": "placeholder", + "name": chat_message["name"], + }, + ) + has_unresolved_placeholders = True + if has_unresolved_placeholders and len(self.placeholder_fillins) == 0: + unresolved = [ + msg["name"] for msg in self.raw_prompt if msg["type"] == "placeholder" + ] + err_unresolved_placeholders = f"Placeholders {unresolved} have no values set. Use update() to set placeholder values." + langfuse_logger.warning(err_unresolved_placeholders) + # raise ValueError(err_unresolved_placeholders) + elif has_unresolved_placeholders: + unresolved = [ + msg["name"] + for msg in self.raw_prompt + if msg["type"] == "placeholder" + and msg["name"] not in self.placeholder_fillins + ] + err_unresolved_placeholders = f"Placeholders {unresolved} have no values set. Use update() to set placeholder values." + langfuse_logger.warning(err_unresolved_placeholders) + # raise ValueError(err_unresolved_placeholders) + + return compiled_messages - for p in prompt.prompt: + @prompt.setter + def prompt( + self, + prompt: Sequence[ + Union[ChatMessageWithPlaceholdersDict, ChatMessageWithPlaceholders] + ], + ) -> None: + """Backward-compatible setter for raw prompt structure.""" + for p in prompt: if hasattr(p, "type") and hasattr(p, "name") and p.type == "placeholder": - self.prompt.append( + self.raw_prompt.append( ChatMessageWithPlaceholdersDict_Placeholder( type="placeholder", name=p.name, ) ) elif hasattr(p, "role") and hasattr(p, "content"): - self.prompt.append( + self.raw_prompt.append( ChatMessageWithPlaceholdersDict_Message( type="message", role=p.role, @@ -312,39 +385,72 @@ def __init__(self, prompt: Prompt_Chat, is_fallback: bool = False): ) ) + self.placeholder_fillins = {} # Clear because user expects old placeholders not to linger + def compile(self, **kwargs) -> List[ChatMessageDict]: - compiled_messages: List[ChatMessageDict] = [] - for chat_message in self.prompt: - if chat_message["type"] == "message": - compiled_messages.append( - ChatMessageDict( - content=TemplateParser.compile_template( - chat_message["content"], kwargs - ), - role=chat_message["role"], - ) - ) - elif chat_message["type"] == "placeholder": - placeholder_in_compile_error = f"Called compile on chat client with placeholder: {chat_message['name']}. Please use compile_with_placeholders instead." - raise ValueError(placeholder_in_compile_error) - return compiled_messages + # Compile skips placeholders which aren't resolved + return [ + ChatMessageDict( + content=TemplateParser.compile_template( + chat_message["content"], + kwargs, + ), + role=chat_message["role"], + ) + for chat_message in self.prompt + if "content" in chat_message and "role" in chat_message + ] + + def set(self, placeholders: Dict[str, List[ChatMessageDict]]) -> "ChatPromptClient": + """Sets the internal placeholders to the given dict + + Args: + placeholders: Dictionary mapping placeholder names to lists of chat messages + + Returns: + ChatPromptClient: Self for method chaining + """ + self.placeholder_fillins = placeholders.copy() + return self + + def update( + self, placeholders: Dict[str, List[ChatMessageDict]] + ) -> "ChatPromptClient": + """Updates the stored placeholder values. + + Only adds new placeholders or updates existing ones. Does not delete existing keys. + + Args: + placeholders: Dictionary mapping placeholder names to lists of chat messages + + Returns: + ChatPromptClient: Self for method chaining + """ + self.placeholder_fillins.update(placeholders) + return self @property def variables(self) -> List[str]: """Return all the variable names in the chat prompt template.""" - return [ - variable - for chat_message in self.prompt - if chat_message["type"] == "message" - for variable in TemplateParser.find_variable_names(chat_message["content"]) - ] + variables = [] + # Variables from raw prompt messages + for chat_message in self.raw_prompt: + if chat_message["type"] == "message": + variables.extend( + TemplateParser.find_variable_names(chat_message["content"]) + ) + # Variables from placeholder messages + for placeholder_messages in self.placeholder_fillins.values(): + for msg in placeholder_messages: + variables.extend(TemplateParser.find_variable_names(msg["content"])) + return variables def __eq__(self, other): if isinstance(self, other.__class__): return ( self.name == other.name and self.version == other.version - and len(self.prompt) == len(other.prompt) + and len(self.raw_prompt) == len(other.raw_prompt) and all( # chatmessage equality ( @@ -360,82 +466,14 @@ def __eq__(self, other): and m2["type"] == "placeholder" and m1["name"] == m2["name"] ) - for m1, m2 in zip(self.prompt, other.prompt) + for m1, m2 in zip(self.raw_prompt, other.raw_prompt) ) and self.config == other.config + and self.placeholder_fillins == other.placeholder_fillins ) return False - def compile_with_placeholders( - self, - placeholders: Dict[str, List[ChatMessageDict]], - variables: Optional[Dict[str, str]] = None, - persist_compilation: bool = False, - ) -> List[ChatMessageDict]: - """Compile chat prompt by first replacing placeholders, then expanding variables. - - Args: - variables: Dictionary of variable names to values for template substitution - placeholders: Dictionary of placeholder names to lists of ChatMessage objects - persist_compilation: If True, saves the compiled output to the internal state. Useful if using the output for langchain prompts. - - Returns: - List[ChatMessageDict]: Compiled chat messages - """ - if variables is None: - variables = {} - - messages_with_placeholders_replaced: List[ChatMessageDict] = [] - - # Subsitute the placeholders for their supplied ChatMessages - for item in self.prompt: - if item["type"] == "placeholder" and item["name"] in placeholders: - if ( - isinstance(placeholders[item["name"]], List) - and len(placeholders[item["name"]]) > 0 - ): - messages_with_placeholders_replaced.extend( - placeholders[item["name"]] - ) - else: - empty_placeholder_error = ( - f"The provided placeholder: {item['name']} is empty" - ) - raise ValueError(empty_placeholder_error) - elif item["type"] == "message": - messages_with_placeholders_replaced.append( - ChatMessageDict( - role=item["role"], - content=item["content"], - ) - ) - - # Then, replace the variables in the ChatMessage content. - compiled_messages = [ - ChatMessageDict( - content=TemplateParser.compile_template( - chat_message["content"], - variables, - ), - role=chat_message["role"], - ) - for chat_message in messages_with_placeholders_replaced - ] - - # Mutate the internal prompt object if requested - if persist_compilation: - self.prompt = [ - ChatMessageWithPlaceholdersDict_Message( - type="message", - role=msg["role"], - content=msg["content"], - ) - for msg in compiled_messages - ] - - return compiled_messages - def get_langchain_prompt(self, **kwargs): """Convert Langfuse prompt into string compatible with Langchain ChatPromptTemplate. @@ -459,7 +497,6 @@ def get_langchain_prompt(self, **kwargs): ), ) for msg in self.prompt - if msg["type"] == "message" ] diff --git a/tests/test_prompt.py b/tests/test_prompt.py index 696617b70..4d61c2f41 100644 --- a/tests/test_prompt.py +++ b/tests/test_prompt.py @@ -108,16 +108,15 @@ def test_create_chat_prompt_with_placeholders(): ) second_prompt_client = langfuse.get_prompt(prompt_name, type="chat") - - messages = second_prompt_client.compile_with_placeholders( - placeholders={ + second_prompt_client.update( + { "history": [ {"role": "user", "content": "Example: {{task}}"}, {"role": "assistant", "content": "Example response"}, ], }, - variables={"role": "helpful", "task": "coding"}, ) + messages = second_prompt_client.compile(role="helpful", task="coding") # Create a test generation using compiled messages completion = openai.OpenAI().chat.completions.create( @@ -159,16 +158,16 @@ def test_create_prompt_with_placeholders(): assert len(prompt_client.prompt) == 3 # First message - system - assert prompt_client.prompt[0]["type"] == "message" - assert prompt_client.prompt[0]["role"] == "system" - assert prompt_client.prompt[0]["content"] == "System message" + assert prompt_client.raw_prompt[0]["type"] == "message" + assert prompt_client.raw_prompt[0]["role"] == "system" + assert prompt_client.raw_prompt[0]["content"] == "System message" # Placeholder - assert prompt_client.prompt[1]["type"] == "placeholder" - assert prompt_client.prompt[1]["name"] == "context" + assert prompt_client.raw_prompt[1]["type"] == "placeholder" + assert prompt_client.raw_prompt[1]["name"] == "context" # Third message - user - assert prompt_client.prompt[2]["type"] == "message" - assert prompt_client.prompt[2]["role"] == "user" - assert prompt_client.prompt[2]["content"] == "User message" + assert prompt_client.raw_prompt[2]["type"] == "message" + assert prompt_client.raw_prompt[2]["role"] == "user" + assert prompt_client.raw_prompt[2]["content"] == "User message" def test_get_prompt_with_placeholders(): @@ -192,31 +191,32 @@ def test_get_prompt_with_placeholders(): assert len(prompt_client.prompt) == 3 # First message - system with variable - assert prompt_client.prompt[0]["type"] == "message" - assert prompt_client.prompt[0]["role"] == "system" - assert prompt_client.prompt[0]["content"] == "You are {{name}}" + assert prompt_client.raw_prompt[0]["type"] == "message" + assert prompt_client.raw_prompt[0]["role"] == "system" + assert prompt_client.raw_prompt[0]["content"] == "You are {{name}}" # Placeholder - assert prompt_client.prompt[1]["type"] == "placeholder" - assert prompt_client.prompt[1]["name"] == "history" + assert prompt_client.raw_prompt[1]["type"] == "placeholder" + assert prompt_client.raw_prompt[1]["name"] == "history" # Third message - user with variable - assert prompt_client.prompt[2]["type"] == "message" - assert prompt_client.prompt[2]["role"] == "user" - assert prompt_client.prompt[2]["content"] == "{{question}}" + assert prompt_client.raw_prompt[2]["type"] == "message" + assert prompt_client.raw_prompt[2]["role"] == "user" + assert prompt_client.raw_prompt[2]["content"] == "{{question}}" @pytest.mark.parametrize( ("variables", "placeholders", "expected_len", "expected_contents"), [ - # Variables only, no placeholders + # 0. Variables only, no placeholders. Expect verbatim message only + # Compile kills not filled placeholders ( {"role": "helpful", "task": "coding"}, {}, 2, ["You are a helpful assistant", "Help me with coding"], ), - # No variables, no placeholders + # 1. No variables, no placeholders. Expect verbatim message+placeholder output ({}, {}, 2, ["You are a {{role}} assistant", "Help me with {{task}}"]), - # Placeholders only, no variables + # 2. Placeholders only, empty variables. Expect output with placeholders filled in ( {}, { @@ -233,24 +233,7 @@ def test_get_prompt_with_placeholders(): "Help me with {{task}}", ], ), - # Placeholders only, variables None - ( - None, - { - "examples": [ - {"role": "user", "content": "Example question"}, - {"role": "assistant", "content": "Example answer"}, - ], - }, - 4, - [ - "You are a {{role}} assistant", - "Example question", - "Example answer", - "Help me with {{task}}", - ], - ), - # Both variables and placeholders + # 3. Both variables and placeholders. Expect fully compiled output ( {"role": "helpful", "task": "coding"}, { @@ -275,7 +258,7 @@ def test_get_prompt_with_placeholders(): # 2, # ["You are a helpful assistant", "Help me with coding"], # ), - # Unused placeholders + # 4. Unused placeholder fill ins. Expect verbatim message+placeholder output ( {"role": "helpful", "task": "coding"}, {"unused": [{"role": "user", "content": "Won't appear"}]}, @@ -305,14 +288,52 @@ def test_compile_with_placeholders( ], ) - result = ChatPromptClient(mock_prompt).compile_with_placeholders( - placeholders, - variables, + result = ( + ChatPromptClient(mock_prompt) + .update(placeholders) + .compile( + **variables, + ) ) assert len(result) == expected_len for i, expected_content in enumerate(expected_contents): - assert result[i]["content"] == expected_content + if "content" in result[i]: + assert result[i]["content"] == expected_content + elif "name" in result[i]: + # this is a placeholder + continue + else: + raise ValueError("Unexpected item in prompt compile output") + + +def test_warning_on_unresolved_placeholders(): + """Test that a warning is emitted when accessing prompt with unresolved placeholders.""" + from unittest.mock import patch + + langfuse = Langfuse() + prompt_name = create_uuid() + + langfuse.create_prompt( + name=prompt_name, + prompt=[ + {"role": "system", "content": "You are {{name}}"}, + {"type": "placeholder", "name": "history"}, + {"role": "user", "content": "{{question}}"}, + ], + type="chat", + ) + + prompt_client = langfuse.get_prompt(prompt_name, type="chat", version=1) + + # Test that warning is emitted when accessing prompt with unresolved placeholders + with patch("langfuse.logger.langfuse_logger.warning") as mock_warning: + _ = prompt_client.prompt # This should trigger the warning + + # Verify the warning was called with the expected message + mock_warning.assert_called_once() + warning_message = mock_warning.call_args[0][0] + assert "Placeholders ['history'] have no values set" in warning_message def test_compiling_chat_prompt(): diff --git a/tests/test_prompt_compilation.py b/tests/test_prompt_compilation.py index 73c5db8a9..c36a24f07 100644 --- a/tests/test_prompt_compilation.py +++ b/tests/test_prompt_compilation.py @@ -769,22 +769,18 @@ def test_chat_prompt_with_placeholders_langchain(self): } # Test compile_with_placeholders with only placeholders (no variables) - compiled_messages = prompt_client.compile_with_placeholders( + messages_with_placeholders = prompt_client.update( placeholders=placeholders, - ) + ).prompt - assert len(compiled_messages) == 4 + assert len(messages_with_placeholders) == 4 assert ( - compiled_messages[0]["content"] + messages_with_placeholders[0]["content"] == "You are a {{role}} assistant with {{capability}} capabilities." ) - assert compiled_messages[1]["content"] == "Example: What is 2+2?" - assert compiled_messages[2]["content"] == "2+2 equals 4." - assert compiled_messages[3]["content"] == "Help me with {{task}}." - - compiled_messages = prompt_client.compile_with_placeholders( - placeholders=placeholders, persist_compilation=True, - ) + assert messages_with_placeholders[1]["content"] == "Example: What is 2+2?" + assert messages_with_placeholders[2]["content"] == "2+2 equals 4." + assert messages_with_placeholders[3]["content"] == "Help me with {{task}}." langchain_messages = prompt_client.get_langchain_prompt( role="helpful", From 958914ba2abf9d3e7e6bd271739c0b07962b05eb Mon Sep 17 00:00:00 2001 From: Nimar Date: Mon, 30 Jun 2025 13:06:28 +0200 Subject: [PATCH 16/28] fix docstring --- langfuse/model.py | 11 ++++++----- 1 file changed, 6 insertions(+), 5 deletions(-) diff --git a/langfuse/model.py b/langfuse/model.py index 5f9811818..135ee1585 100644 --- a/langfuse/model.py +++ b/langfuse/model.py @@ -309,7 +309,8 @@ def __init__(self, prompt: Prompt_Chat, is_fallback: bool = False): @property def prompt(self) -> List[Union[ChatMessageDict, ChatMessagePlaceholderDict]]: """Returns the prompt with placeholders substituted for their values. - If no placeholders are set and raw_prompt contains placeholders, returns only messages. + If no placeholder fill-ins are set and raw_prompt contains placeholders, returns messages and placeholders + with a warning. """ compiled_messages = [] has_unresolved_placeholders = False @@ -345,8 +346,8 @@ def prompt(self) -> List[Union[ChatMessageDict, ChatMessagePlaceholderDict]]: msg["name"] for msg in self.raw_prompt if msg["type"] == "placeholder" ] err_unresolved_placeholders = f"Placeholders {unresolved} have no values set. Use update() to set placeholder values." + # Warning because users might want to further process placeholders as well langfuse_logger.warning(err_unresolved_placeholders) - # raise ValueError(err_unresolved_placeholders) elif has_unresolved_placeholders: unresolved = [ msg["name"] @@ -355,8 +356,8 @@ def prompt(self) -> List[Union[ChatMessageDict, ChatMessagePlaceholderDict]]: and msg["name"] not in self.placeholder_fillins ] err_unresolved_placeholders = f"Placeholders {unresolved} have no values set. Use update() to set placeholder values." + # Warning because users might want to further process placeholders as well langfuse_logger.warning(err_unresolved_placeholders) - # raise ValueError(err_unresolved_placeholders) return compiled_messages @@ -374,7 +375,7 @@ def prompt( ChatMessageWithPlaceholdersDict_Placeholder( type="placeholder", name=p.name, - ) + ), ) elif hasattr(p, "role") and hasattr(p, "content"): self.raw_prompt.append( @@ -382,7 +383,7 @@ def prompt( type="message", role=p.role, content=p.content, - ) + ), ) self.placeholder_fillins = {} # Clear because user expects old placeholders not to linger From 210021dee604853a7aa136448578ccfb2c84018a Mon Sep 17 00:00:00 2001 From: Nimar Date: Mon, 30 Jun 2025 13:18:59 +0200 Subject: [PATCH 17/28] remove superfluous set --- langfuse/model.py | 12 ------------ 1 file changed, 12 deletions(-) diff --git a/langfuse/model.py b/langfuse/model.py index 135ee1585..9f1572720 100644 --- a/langfuse/model.py +++ b/langfuse/model.py @@ -402,18 +402,6 @@ def compile(self, **kwargs) -> List[ChatMessageDict]: if "content" in chat_message and "role" in chat_message ] - def set(self, placeholders: Dict[str, List[ChatMessageDict]]) -> "ChatPromptClient": - """Sets the internal placeholders to the given dict - - Args: - placeholders: Dictionary mapping placeholder names to lists of chat messages - - Returns: - ChatPromptClient: Self for method chaining - """ - self.placeholder_fillins = placeholders.copy() - return self - def update( self, placeholders: Dict[str, List[ChatMessageDict]] ) -> "ChatPromptClient": From 5d03dbaf2386eb149eddae62dad2b8f95b930a36 Mon Sep 17 00:00:00 2001 From: Nimar Date: Mon, 30 Jun 2025 13:26:44 +0200 Subject: [PATCH 18/28] update uopdate --- langfuse/model.py | 6 ++---- 1 file changed, 2 insertions(+), 4 deletions(-) diff --git a/langfuse/model.py b/langfuse/model.py index 9f1572720..ef9cc42d2 100644 --- a/langfuse/model.py +++ b/langfuse/model.py @@ -405,9 +405,7 @@ def compile(self, **kwargs) -> List[ChatMessageDict]: def update( self, placeholders: Dict[str, List[ChatMessageDict]] ) -> "ChatPromptClient": - """Updates the stored placeholder values. - - Only adds new placeholders or updates existing ones. Does not delete existing keys. + """Sets the stored placeholder values to the provided ones. Args: placeholders: Dictionary mapping placeholder names to lists of chat messages @@ -415,7 +413,7 @@ def update( Returns: ChatPromptClient: Self for method chaining """ - self.placeholder_fillins.update(placeholders) + self.placeholder_fillins = placeholders.copy() return self @property From e5c8272f9d4c7bf8c53d2c286681733b194a5bbe Mon Sep 17 00:00:00 2001 From: Nimar Date: Mon, 30 Jun 2025 13:54:46 +0200 Subject: [PATCH 19/28] update update again --- langfuse/model.py | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/langfuse/model.py b/langfuse/model.py index ef9cc42d2..42e984f34 100644 --- a/langfuse/model.py +++ b/langfuse/model.py @@ -403,9 +403,9 @@ def compile(self, **kwargs) -> List[ChatMessageDict]: ] def update( - self, placeholders: Dict[str, List[ChatMessageDict]] + self, *, placeholders: Dict[str, List[ChatMessageDict]] ) -> "ChatPromptClient": - """Sets the stored placeholder values to the provided ones. + """Updates the stored placeholder values with the provided ones. Args: placeholders: Dictionary mapping placeholder names to lists of chat messages @@ -413,7 +413,7 @@ def update( Returns: ChatPromptClient: Self for method chaining """ - self.placeholder_fillins = placeholders.copy() + self.placeholder_fillins.update(placeholders) return self @property From c0e261cbeed4fb82d2ee7c0aba3aff8ab7206f0b Mon Sep 17 00:00:00 2001 From: Nimar Date: Mon, 30 Jun 2025 15:32:05 +0200 Subject: [PATCH 20/28] fix test --- tests/test_prompt.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/tests/test_prompt.py b/tests/test_prompt.py index 4d61c2f41..6d16fe638 100644 --- a/tests/test_prompt.py +++ b/tests/test_prompt.py @@ -290,7 +290,7 @@ def test_compile_with_placeholders( result = ( ChatPromptClient(mock_prompt) - .update(placeholders) + .update(placeholders=placeholders) .compile( **variables, ) From cbf226539af4fd86d282cf79d08e7a9f2afd64ca Mon Sep 17 00:00:00 2001 From: Nimar Date: Mon, 30 Jun 2025 16:27:31 +0200 Subject: [PATCH 21/28] fix test --- tests/test_prompt.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/tests/test_prompt.py b/tests/test_prompt.py index 6d16fe638..280bd85ff 100644 --- a/tests/test_prompt.py +++ b/tests/test_prompt.py @@ -109,7 +109,7 @@ def test_create_chat_prompt_with_placeholders(): second_prompt_client = langfuse.get_prompt(prompt_name, type="chat") second_prompt_client.update( - { + placeholders={ "history": [ {"role": "user", "content": "Example: {{task}}"}, {"role": "assistant", "content": "Example response"}, From c35d1b390d9ea23ff448139f4aca0c3be9748239 Mon Sep 17 00:00:00 2001 From: Nimar Date: Mon, 30 Jun 2025 22:14:45 +0200 Subject: [PATCH 22/28] use compile for all instead of update --- langfuse/model.py | 201 +++++++++++++++---------------- tests/test_prompt.py | 78 ++++++------ tests/test_prompt_compilation.py | 31 ++--- 3 files changed, 153 insertions(+), 157 deletions(-) diff --git a/langfuse/model.py b/langfuse/model.py index 42e984f34..16fbfda37 100644 --- a/langfuse/model.py +++ b/langfuse/model.py @@ -302,134 +302,121 @@ def get_langchain_prompt(self, **kwargs) -> str: class ChatPromptClient(BasePromptClient): def __init__(self, prompt: Prompt_Chat, is_fallback: bool = False): super().__init__(prompt, is_fallback) - self.raw_prompt: List[ChatMessageWithPlaceholdersDict] = [] - self.placeholder_fillins: Dict[str, List[ChatMessageDict]] = {} - self.prompt = prompt.prompt - - @property - def prompt(self) -> List[Union[ChatMessageDict, ChatMessagePlaceholderDict]]: - """Returns the prompt with placeholders substituted for their values. - If no placeholder fill-ins are set and raw_prompt contains placeholders, returns messages and placeholders - with a warning. - """ - compiled_messages = [] - has_unresolved_placeholders = False - - for chat_message in self.raw_prompt: - if chat_message["type"] == "message": - compiled_messages.append( - ChatMessageDict( - content=chat_message["content"], - role=chat_message["role"], - ), - ) - elif chat_message["type"] == "placeholder": - if chat_message["name"] in self.placeholder_fillins: - placeholder_messages = self.placeholder_fillins[ - chat_message["name"] - ] - if isinstance(placeholder_messages, List): - compiled_messages.extend(placeholder_messages) - else: - err_placeholder_not_list = f"Placeholder '{chat_message['name']}' must contain a list of chat messages, got {type(placeholder_messages)}" - raise ValueError(err_placeholder_not_list) - else: - compiled_messages.append( - { - "type": "placeholder", - "name": chat_message["name"], - }, - ) - has_unresolved_placeholders = True - if has_unresolved_placeholders and len(self.placeholder_fillins) == 0: - unresolved = [ - msg["name"] for msg in self.raw_prompt if msg["type"] == "placeholder" - ] - err_unresolved_placeholders = f"Placeholders {unresolved} have no values set. Use update() to set placeholder values." - # Warning because users might want to further process placeholders as well - langfuse_logger.warning(err_unresolved_placeholders) - elif has_unresolved_placeholders: - unresolved = [ - msg["name"] - for msg in self.raw_prompt - if msg["type"] == "placeholder" - and msg["name"] not in self.placeholder_fillins - ] - err_unresolved_placeholders = f"Placeholders {unresolved} have no values set. Use update() to set placeholder values." - # Warning because users might want to further process placeholders as well - langfuse_logger.warning(err_unresolved_placeholders) - return compiled_messages - - @prompt.setter - def prompt( - self, - prompt: Sequence[ - Union[ChatMessageWithPlaceholdersDict, ChatMessageWithPlaceholders] - ], - ) -> None: - """Backward-compatible setter for raw prompt structure.""" - for p in prompt: + # Convert and store the prompt directly + self.prompt = [] + for p in prompt.prompt: + # Handle objects with attributes (normal case) if hasattr(p, "type") and hasattr(p, "name") and p.type == "placeholder": - self.raw_prompt.append( + self.prompt.append( ChatMessageWithPlaceholdersDict_Placeholder( type="placeholder", name=p.name, ), ) elif hasattr(p, "role") and hasattr(p, "content"): - self.raw_prompt.append( + self.prompt.append( ChatMessageWithPlaceholdersDict_Message( type="message", role=p.role, content=p.content, ), ) - - self.placeholder_fillins = {} # Clear because user expects old placeholders not to linger + # Handle plain dictionaries (fallback case) + elif isinstance(p, dict): + if p.get("type") == "placeholder" and "name" in p: + self.prompt.append( + ChatMessageWithPlaceholdersDict_Placeholder( + type="placeholder", + name=p["name"], + ), + ) + elif "role" in p and "content" in p: + self.prompt.append( + ChatMessageWithPlaceholdersDict_Message( + type="message", + role=p["role"], + content=p["content"], + ), + ) def compile(self, **kwargs) -> List[ChatMessageDict]: - # Compile skips placeholders which aren't resolved - return [ - ChatMessageDict( - content=TemplateParser.compile_template( - chat_message["content"], - kwargs, - ), - role=chat_message["role"], - ) - for chat_message in self.prompt - if "content" in chat_message and "role" in chat_message - ] - - def update( - self, *, placeholders: Dict[str, List[ChatMessageDict]] - ) -> "ChatPromptClient": - """Updates the stored placeholder values with the provided ones. + """Compile the prompt with placeholders and variables. Args: - placeholders: Dictionary mapping placeholder names to lists of chat messages + **kwargs: Can contain both placeholder values (list of chat messages) and variable values. + Placeholders are resolved first, then variables are substituted. Returns: - ChatPromptClient: Self for method chaining + List of compiled chat messages as plain dictionaries. """ - self.placeholder_fillins.update(placeholders) - return self + compiled_messages = [] + unresolved_placeholders = [] + + for chat_message in self.prompt: + if chat_message["type"] == "message": + # For regular messages, compile variables and add to output + compiled_messages.append( + { + "role": chat_message["role"], + "content": TemplateParser.compile_template( + chat_message["content"], + kwargs, + ), + } + ) + elif chat_message["type"] == "placeholder": + # Check if placeholder value is provided in kwargs + placeholder_name = chat_message["name"] + if placeholder_name in kwargs: + placeholder_value = kwargs[placeholder_name] + if isinstance(placeholder_value, list): + # Add all messages from the placeholder + for msg in placeholder_value: + if ( + isinstance(msg, dict) + and "role" in msg + and "content" in msg + ): + # Compile variables in placeholder messages too + compiled_messages.append( + { + "role": msg["role"], + "content": TemplateParser.compile_template( + msg["content"], + kwargs, + ), + } + ) + else: + raise ValueError( + f"Placeholder '{placeholder_name}' must contain a list of chat messages with 'role' and 'content' fields" + ) + else: + raise ValueError( + f"Placeholder '{placeholder_name}' must contain a list of chat messages, got {type(placeholder_value)}" + ) + else: + # Placeholder not resolved - track it + unresolved_placeholders.append(placeholder_name) + + # Warn about unresolved placeholders + if unresolved_placeholders: + warning_msg = f"Placeholders {unresolved_placeholders} have not been resolved. Pass them as keyword arguments to compile()." + langfuse_logger.warning(warning_msg) + + return compiled_messages @property def variables(self) -> List[str]: """Return all the variable names in the chat prompt template.""" variables = [] - # Variables from raw prompt messages - for chat_message in self.raw_prompt: + # Variables from prompt messages + for chat_message in self.prompt: if chat_message["type"] == "message": variables.extend( TemplateParser.find_variable_names(chat_message["content"]) ) - # Variables from placeholder messages - for placeholder_messages in self.placeholder_fillins.values(): - for msg in placeholder_messages: - variables.extend(TemplateParser.find_variable_names(msg["content"])) return variables def __eq__(self, other): @@ -437,7 +424,7 @@ def __eq__(self, other): return ( self.name == other.name and self.version == other.version - and len(self.raw_prompt) == len(other.raw_prompt) + and len(self.prompt) == len(other.prompt) and all( # chatmessage equality ( @@ -453,10 +440,9 @@ def __eq__(self, other): and m2["type"] == "placeholder" and m1["name"] == m2["name"] ) - for m1, m2 in zip(self.raw_prompt, other.raw_prompt) + for m1, m2 in zip(self.prompt, other.prompt) ) and self.config == other.config - and self.placeholder_fillins == other.placeholder_fillins ) return False @@ -470,20 +456,21 @@ def get_langchain_prompt(self, **kwargs): kwargs: Optional keyword arguments to precompile the template string. Variables that match the provided keyword arguments will be precompiled. Remaining variables must then be handled by Langchain's prompt template. + Can also contain placeholders (list of chat messages) which will be expanded. Returns: List of messages in the format expected by Langchain's ChatPromptTemplate: (role, content) tuple. """ + # First compile with placeholders and variables to get full message list + compiled_messages = self.compile(**kwargs) + + # Then convert to Langchain format return [ ( msg["role"], - self._get_langchain_prompt_string( - TemplateParser.compile_template(msg["content"], kwargs) - if kwargs - else msg["content"], - ), + self._get_langchain_prompt_string(msg["content"]), ) - for msg in self.prompt + for msg in compiled_messages ] diff --git a/tests/test_prompt.py b/tests/test_prompt.py index 280bd85ff..8e8fea825 100644 --- a/tests/test_prompt.py +++ b/tests/test_prompt.py @@ -108,15 +108,14 @@ def test_create_chat_prompt_with_placeholders(): ) second_prompt_client = langfuse.get_prompt(prompt_name, type="chat") - second_prompt_client.update( - placeholders={ - "history": [ - {"role": "user", "content": "Example: {{task}}"}, - {"role": "assistant", "content": "Example response"}, - ], - }, + messages = second_prompt_client.compile( + role="helpful", + task="coding", + history=[ + {"role": "user", "content": "Example: {{task}}"}, + {"role": "assistant", "content": "Example response"}, + ], ) - messages = second_prompt_client.compile(role="helpful", task="coding") # Create a test generation using compiled messages completion = openai.OpenAI().chat.completions.create( @@ -158,16 +157,16 @@ def test_create_prompt_with_placeholders(): assert len(prompt_client.prompt) == 3 # First message - system - assert prompt_client.raw_prompt[0]["type"] == "message" - assert prompt_client.raw_prompt[0]["role"] == "system" - assert prompt_client.raw_prompt[0]["content"] == "System message" + assert prompt_client.prompt[0]["type"] == "message" + assert prompt_client.prompt[0]["role"] == "system" + assert prompt_client.prompt[0]["content"] == "System message" # Placeholder - assert prompt_client.raw_prompt[1]["type"] == "placeholder" - assert prompt_client.raw_prompt[1]["name"] == "context" + assert prompt_client.prompt[1]["type"] == "placeholder" + assert prompt_client.prompt[1]["name"] == "context" # Third message - user - assert prompt_client.raw_prompt[2]["type"] == "message" - assert prompt_client.raw_prompt[2]["role"] == "user" - assert prompt_client.raw_prompt[2]["content"] == "User message" + assert prompt_client.prompt[2]["type"] == "message" + assert prompt_client.prompt[2]["role"] == "user" + assert prompt_client.prompt[2]["content"] == "User message" def test_get_prompt_with_placeholders(): @@ -191,16 +190,16 @@ def test_get_prompt_with_placeholders(): assert len(prompt_client.prompt) == 3 # First message - system with variable - assert prompt_client.raw_prompt[0]["type"] == "message" - assert prompt_client.raw_prompt[0]["role"] == "system" - assert prompt_client.raw_prompt[0]["content"] == "You are {{name}}" + assert prompt_client.prompt[0]["type"] == "message" + assert prompt_client.prompt[0]["role"] == "system" + assert prompt_client.prompt[0]["content"] == "You are {{name}}" # Placeholder - assert prompt_client.raw_prompt[1]["type"] == "placeholder" - assert prompt_client.raw_prompt[1]["name"] == "history" + assert prompt_client.prompt[1]["type"] == "placeholder" + assert prompt_client.prompt[1]["name"] == "history" # Third message - user with variable - assert prompt_client.raw_prompt[2]["type"] == "message" - assert prompt_client.raw_prompt[2]["role"] == "user" - assert prompt_client.raw_prompt[2]["content"] == "{{question}}" + assert prompt_client.prompt[2]["type"] == "message" + assert prompt_client.prompt[2]["role"] == "user" + assert prompt_client.prompt[2]["content"] == "{{question}}" @pytest.mark.parametrize( @@ -288,13 +287,8 @@ def test_compile_with_placeholders( ], ) - result = ( - ChatPromptClient(mock_prompt) - .update(placeholders=placeholders) - .compile( - **variables, - ) - ) + compile_kwargs = {**placeholders, **variables} + result = ChatPromptClient(mock_prompt).compile(**compile_kwargs) assert len(result) == expected_len for i, expected_content in enumerate(expected_contents): @@ -308,7 +302,7 @@ def test_compile_with_placeholders( def test_warning_on_unresolved_placeholders(): - """Test that a warning is emitted when accessing prompt with unresolved placeholders.""" + """Test that a warning is emitted when compiling with unresolved placeholders.""" from unittest.mock import patch langfuse = Langfuse() @@ -326,14 +320,20 @@ def test_warning_on_unresolved_placeholders(): prompt_client = langfuse.get_prompt(prompt_name, type="chat", version=1) - # Test that warning is emitted when accessing prompt with unresolved placeholders + # Test that warning is emitted when compiling with unresolved placeholders with patch("langfuse.logger.langfuse_logger.warning") as mock_warning: - _ = prompt_client.prompt # This should trigger the warning + # Compile without providing the 'history' placeholder + result = prompt_client.compile(name="Assistant", question="What is 2+2?") # Verify the warning was called with the expected message mock_warning.assert_called_once() warning_message = mock_warning.call_args[0][0] - assert "Placeholders ['history'] have no values set" in warning_message + assert "Placeholders ['history'] have not been resolved" in warning_message + + # Verify the result only contains the resolved messages + assert len(result) == 2 + assert result[0]["content"] == "You are Assistant" + assert result[1]["content"] == "What is 2+2?" def test_compiling_chat_prompt(): @@ -1201,7 +1201,13 @@ def test_fallback_chat_prompt(): "nonexistent_chat_prompt", type="chat", fallback=fallback_chat_prompt ) - assert prompt.prompt == fallback_chat_prompt + # Check that the prompt structure contains the fallback data (allowing for internal formatting) + assert len(prompt.prompt) == len(fallback_chat_prompt) + assert all(msg["type"] == "message" for msg in prompt.prompt) + assert prompt.prompt[0]["role"] == "system" + assert prompt.prompt[0]["content"] == "fallback system" + assert prompt.prompt[1]["role"] == "user" + assert prompt.prompt[1]["content"] == "fallback user name {{name}}" assert prompt.compile(name="Jane") == [ {"role": "system", "content": "fallback system"}, {"role": "user", "content": "fallback user name Jane"}, diff --git a/tests/test_prompt_compilation.py b/tests/test_prompt_compilation.py index c36a24f07..158b1b705 100644 --- a/tests/test_prompt_compilation.py +++ b/tests/test_prompt_compilation.py @@ -761,31 +761,34 @@ def test_chat_prompt_with_placeholders_langchain(self): ), ) - placeholders = { - "examples": [ + # Test compile with placeholders and variables + compiled_messages = prompt_client.compile( + role="helpful", + capability="math", + task="addition", + examples=[ {"role": "user", "content": "Example: What is 2+2?"}, {"role": "assistant", "content": "2+2 equals 4."}, ], - } - - # Test compile_with_placeholders with only placeholders (no variables) - messages_with_placeholders = prompt_client.update( - placeholders=placeholders, - ).prompt + ) - assert len(messages_with_placeholders) == 4 + assert len(compiled_messages) == 4 assert ( - messages_with_placeholders[0]["content"] - == "You are a {{role}} assistant with {{capability}} capabilities." + compiled_messages[0]["content"] + == "You are a helpful assistant with math capabilities." ) - assert messages_with_placeholders[1]["content"] == "Example: What is 2+2?" - assert messages_with_placeholders[2]["content"] == "2+2 equals 4." - assert messages_with_placeholders[3]["content"] == "Help me with {{task}}." + assert compiled_messages[1]["content"] == "Example: What is 2+2?" + assert compiled_messages[2]["content"] == "2+2 equals 4." + assert compiled_messages[3]["content"] == "Help me with addition." langchain_messages = prompt_client.get_langchain_prompt( role="helpful", capability="math", task="addition", + examples=[ + {"role": "user", "content": "Example: What is 2+2?"}, + {"role": "assistant", "content": "2+2 equals 4."}, + ], ) langchain_prompt = ChatPromptTemplate.from_messages(langchain_messages) formatted_messages = langchain_prompt.format_messages() From 41c625227e58fea5bcf1ae636b5605f38c7096da Mon Sep 17 00:00:00 2001 From: Nimar Date: Tue, 1 Jul 2025 11:47:24 +0200 Subject: [PATCH 23/28] messagesplaceholder object returned --- langfuse/model.py | 74 +++++++++++++++++++------------- tests/test_prompt.py | 68 ++++++++++++++++++++++------- tests/test_prompt_compilation.py | 48 +++++++++++++++++++++ 3 files changed, 146 insertions(+), 44 deletions(-) diff --git a/langfuse/model.py b/langfuse/model.py index 16fbfda37..2f0162235 100644 --- a/langfuse/model.py +++ b/langfuse/model.py @@ -302,9 +302,8 @@ def get_langchain_prompt(self, **kwargs) -> str: class ChatPromptClient(BasePromptClient): def __init__(self, prompt: Prompt_Chat, is_fallback: bool = False): super().__init__(prompt, is_fallback) - - # Convert and store the prompt directly self.prompt = [] + for p in prompt.prompt: # Handle objects with attributes (normal case) if hasattr(p, "type") and hasattr(p, "name") and p.type == "placeholder": @@ -340,7 +339,9 @@ def __init__(self, prompt: Prompt_Chat, is_fallback: bool = False): ), ) - def compile(self, **kwargs) -> List[ChatMessageDict]: + def compile( + self, **kwargs + ) -> Sequence[Union[ChatMessageDict, ChatMessageWithPlaceholdersDict_Placeholder]]: """Compile the prompt with placeholders and variables. Args: @@ -348,7 +349,7 @@ def compile(self, **kwargs) -> List[ChatMessageDict]: Placeholders are resolved first, then variables are substituted. Returns: - List of compiled chat messages as plain dictionaries. + List of compiled chat messages as plain dictionaries, with unresolved placeholders kept as-is. """ compiled_messages = [] unresolved_placeholders = [] @@ -363,22 +364,19 @@ def compile(self, **kwargs) -> List[ChatMessageDict]: chat_message["content"], kwargs, ), - } + }, ) elif chat_message["type"] == "placeholder": - # Check if placeholder value is provided in kwargs placeholder_name = chat_message["name"] if placeholder_name in kwargs: placeholder_value = kwargs[placeholder_name] if isinstance(placeholder_value, list): - # Add all messages from the placeholder for msg in placeholder_value: if ( isinstance(msg, dict) and "role" in msg and "content" in msg ): - # Compile variables in placeholder messages too compiled_messages.append( { "role": msg["role"], @@ -386,24 +384,28 @@ def compile(self, **kwargs) -> List[ChatMessageDict]: msg["content"], kwargs, ), - } + }, ) else: - raise ValueError( - f"Placeholder '{placeholder_name}' must contain a list of chat messages with 'role' and 'content' fields" + compiled_messages.append( + str(placeholder_value), ) + no_role_content_in_placeholder = f"Placeholder '{placeholder_name}' should contain a list of chat messages with 'role' and 'content' fields. Appended as string." + langfuse_logger.warning(no_role_content_in_placeholder) else: - raise ValueError( - f"Placeholder '{placeholder_name}' must contain a list of chat messages, got {type(placeholder_value)}" + compiled_messages.append( + str(placeholder_value), ) + placeholder_not_a_list = f"Placeholder '{placeholder_name}' must contain a list of chat messages, got {type(placeholder_value)}" + langfuse_logger.warning(placeholder_not_a_list) else: - # Placeholder not resolved - track it + # Keep unresolved placeholder in the compiled messages + compiled_messages.append(chat_message) unresolved_placeholders.append(placeholder_name) - # Warn about unresolved placeholders if unresolved_placeholders: - warning_msg = f"Placeholders {unresolved_placeholders} have not been resolved. Pass them as keyword arguments to compile()." - langfuse_logger.warning(warning_msg) + unresolved_placeholders = f"Placeholders {unresolved_placeholders} have not been resolved. Pass them as keyword arguments to compile()." + langfuse_logger.warning(unresolved_placeholders) return compiled_messages @@ -415,7 +417,7 @@ def variables(self) -> List[str]: for chat_message in self.prompt: if chat_message["type"] == "message": variables.extend( - TemplateParser.find_variable_names(chat_message["content"]) + TemplateParser.find_variable_names(chat_message["content"]), ) return variables @@ -452,26 +454,40 @@ def get_langchain_prompt(self, **kwargs): It specifically adapts the mustache-style double curly braces {{variable}} used in Langfuse to the single curly brace {variable} format expected by Langchain. + Placeholders are filled-in from kwargs and unresolved placeholders are returned as Langchain MessagesPlaceholder. kwargs: Optional keyword arguments to precompile the template string. Variables that match the provided keyword arguments will be precompiled. Remaining variables must then be handled by Langchain's prompt template. - Can also contain placeholders (list of chat messages) which will be expanded. + Can also contain placeholders (list of chat messages) which will be resolved prior to variable + compilation. Returns: - List of messages in the format expected by Langchain's ChatPromptTemplate: (role, content) tuple. + List of messages in the format expected by Langchain's ChatPromptTemplate: + (role, content) tuples for regular messages or MessagesPlaceholder objects for unresolved placeholders. """ - # First compile with placeholders and variables to get full message list compiled_messages = self.compile(**kwargs) + langchain_messages = [] - # Then convert to Langchain format - return [ - ( - msg["role"], - self._get_langchain_prompt_string(msg["content"]), - ) - for msg in compiled_messages - ] + for msg in compiled_messages: + if "type" in msg and msg["type"] == "placeholder": + # unresolved placeholder -> add LC MessagesPlaceholder + placeholder_name = msg["name"] + try: + from langchain_core.prompts.chat import MessagesPlaceholder # noqa: PLC0415, I001 + + langchain_messages.append( + MessagesPlaceholder(variable_name=placeholder_name), + ) + except ImportError as e: + import_error = "langchain_core is required to use get_langchain_prompt() with unresolved placeholders." + raise ImportError(import_error) from e + else: + langchain_messages.append( + (msg["role"], self._get_langchain_prompt_string(msg["content"])), + ) + + return langchain_messages PromptClient = Union[TextPromptClient, ChatPromptClient] diff --git a/tests/test_prompt.py b/tests/test_prompt.py index 8e8fea825..3832f63b7 100644 --- a/tests/test_prompt.py +++ b/tests/test_prompt.py @@ -205,16 +205,24 @@ def test_get_prompt_with_placeholders(): @pytest.mark.parametrize( ("variables", "placeholders", "expected_len", "expected_contents"), [ - # 0. Variables only, no placeholders. Expect verbatim message only - # Compile kills not filled placeholders + # 0. Variables only, no placeholders. Unresolved placeholders kept in output ( {"role": "helpful", "task": "coding"}, {}, - 2, - ["You are a helpful assistant", "Help me with coding"], + 3, + [ + "You are a helpful assistant", + None, + "Help me with coding", + ], # None = placeholder ), # 1. No variables, no placeholders. Expect verbatim message+placeholder output - ({}, {}, 2, ["You are a {{role}} assistant", "Help me with {{task}}"]), + ( + {}, + {}, + 3, + ["You are a {{role}} assistant", None, "Help me with {{task}}"], + ), # None = placeholder # 2. Placeholders only, empty variables. Expect output with placeholders filled in ( {}, @@ -257,18 +265,45 @@ def test_get_prompt_with_placeholders(): # 2, # ["You are a helpful assistant", "Help me with coding"], # ), - # 4. Unused placeholder fill ins. Expect verbatim message+placeholder output + # 4. Unused placeholder fill ins. Unresolved placeholders kept in output ( {"role": "helpful", "task": "coding"}, {"unused": [{"role": "user", "content": "Won't appear"}]}, - 2, - ["You are a helpful assistant", "Help me with coding"], + 3, + [ + "You are a helpful assistant", + None, + "Help me with coding", + ], # None = placeholder + ), + # 5. Placeholder with non-list value (should log warning and create invalid dict) + ( + {"role": "helpful", "task": "coding"}, + {"examples": "not a list"}, + 3, + [ + "You are a helpful assistant", + "{'not a list'}", # Invalid dict becomes string when checked + "Help me with coding", + ], + ), + # 6. Placeholder with invalid message structure (should log warning and include both) + ( + {"role": "helpful", "task": "coding"}, + {"examples": ["invalid message", {"role": "user", "content": "valid message"}]}, + 4, + [ + "You are a helpful assistant", + "{\"['invalid message', {'role': 'user', 'content': 'valid message'}]\"}", # Invalid structure becomes string + "valid message", # Valid message processed normally + "Help me with coding", + ], ), ], ) def test_compile_with_placeholders( variables, placeholders, expected_len, expected_contents -): +) -> None: """Test compile_with_placeholders with different variable/placeholder combinations.""" from langfuse.api.resources.prompts import Prompt_Chat from langfuse.model import ChatPromptClient @@ -292,13 +327,16 @@ def test_compile_with_placeholders( assert len(result) == expected_len for i, expected_content in enumerate(expected_contents): - if "content" in result[i]: - assert result[i]["content"] == expected_content - elif "name" in result[i]: - # this is a placeholder - continue + if expected_content is None: + # This should be an unresolved placeholder + assert "type" in result[i] and result[i]["type"] == "placeholder" + elif expected_content.startswith("{") and expected_content.endswith("}"): + # This is an invalid dictionary that becomes a string representation + assert str(result[i]) == expected_content else: - raise ValueError("Unexpected item in prompt compile output") + # This should be a regular message + assert "content" in result[i] + assert result[i]["content"] == expected_content def test_warning_on_unresolved_placeholders(): diff --git a/tests/test_prompt_compilation.py b/tests/test_prompt_compilation.py index 158b1b705..bb019de49 100644 --- a/tests/test_prompt_compilation.py +++ b/tests/test_prompt_compilation.py @@ -801,3 +801,51 @@ def test_chat_prompt_with_placeholders_langchain(self): assert formatted_messages[1].content == "Example: What is 2+2?" assert formatted_messages[2].content == "2+2 equals 4." assert formatted_messages[3].content == "Help me with addition." + + def test_get_langchain_prompt_with_unresolved_placeholders(self): + """Test that unresolved placeholders become MessagesPlaceholder objects.""" + from langfuse.api.resources.prompts import Prompt_Chat + from langfuse.model import ChatPromptClient + + chat_messages = [ + {"role": "system", "content": "You are a {{role}} assistant"}, + {"type": "placeholder", "name": "examples"}, + {"role": "user", "content": "Help me with {{task}}"}, + ] + + prompt_client = ChatPromptClient( + Prompt_Chat( + type="chat", + name="test_unresolved_placeholder", + version=1, + config={}, + tags=[], + labels=[], + prompt=chat_messages, + ), + ) + + # Call get_langchain_prompt without resolving placeholder + langchain_messages = prompt_client.get_langchain_prompt( + role="helpful", task="coding", + ) + + # Should have 3 items: system message, MessagesPlaceholder, user message + assert len(langchain_messages) == 3 + + # First message should be the system message + assert langchain_messages[0] == ("system", "You are a helpful assistant") + + # Second should be a MessagesPlaceholder for the unresolved placeholder + placeholder_msg = langchain_messages[1] + try: + from langchain_core.prompts.chat import MessagesPlaceholder + + assert isinstance(placeholder_msg, MessagesPlaceholder) + assert placeholder_msg.variable_name == "examples" + except ImportError: + # Fallback case when langchain_core is not available + assert placeholder_msg == ("system", "{examples}") + + # Third message should be the user message + assert langchain_messages[2] == ("user", "Help me with coding") From 4845864862ad088051d5f3d01745ded027a0eec4 Mon Sep 17 00:00:00 2001 From: Nimar Date: Tue, 1 Jul 2025 11:56:48 +0200 Subject: [PATCH 24/28] lint it --- langfuse/model.py | 17 ----------------- tests/test_prompt.py | 19 ++++++++++++------- tests/test_prompt_compilation.py | 3 ++- 3 files changed, 14 insertions(+), 25 deletions(-) diff --git a/langfuse/model.py b/langfuse/model.py index 2f0162235..405bd0287 100644 --- a/langfuse/model.py +++ b/langfuse/model.py @@ -321,23 +321,6 @@ def __init__(self, prompt: Prompt_Chat, is_fallback: bool = False): content=p.content, ), ) - # Handle plain dictionaries (fallback case) - elif isinstance(p, dict): - if p.get("type") == "placeholder" and "name" in p: - self.prompt.append( - ChatMessageWithPlaceholdersDict_Placeholder( - type="placeholder", - name=p["name"], - ), - ) - elif "role" in p and "content" in p: - self.prompt.append( - ChatMessageWithPlaceholdersDict_Message( - type="message", - role=p["role"], - content=p["content"], - ), - ) def compile( self, **kwargs diff --git a/tests/test_prompt.py b/tests/test_prompt.py index 3832f63b7..864ce0aa4 100644 --- a/tests/test_prompt.py +++ b/tests/test_prompt.py @@ -276,25 +276,30 @@ def test_get_prompt_with_placeholders(): "Help me with coding", ], # None = placeholder ), - # 5. Placeholder with non-list value (should log warning and create invalid dict) + # 5. Placeholder with non-list value (should log warning and append as string) ( {"role": "helpful", "task": "coding"}, {"examples": "not a list"}, 3, [ "You are a helpful assistant", - "{'not a list'}", # Invalid dict becomes string when checked + "not a list", # String value appended directly "Help me with coding", ], ), # 6. Placeholder with invalid message structure (should log warning and include both) ( {"role": "helpful", "task": "coding"}, - {"examples": ["invalid message", {"role": "user", "content": "valid message"}]}, + { + "examples": [ + "invalid message", + {"role": "user", "content": "valid message"}, + ] + }, 4, [ "You are a helpful assistant", - "{\"['invalid message', {'role': 'user', 'content': 'valid message'}]\"}", # Invalid structure becomes string + "['invalid message', {'role': 'user', 'content': 'valid message'}]", # Invalid structure becomes string "valid message", # Valid message processed normally "Help me with coding", ], @@ -330,9 +335,9 @@ def test_compile_with_placeholders( if expected_content is None: # This should be an unresolved placeholder assert "type" in result[i] and result[i]["type"] == "placeholder" - elif expected_content.startswith("{") and expected_content.endswith("}"): - # This is an invalid dictionary that becomes a string representation - assert str(result[i]) == expected_content + elif isinstance(result[i], str): + # This is a string value from invalid placeholder + assert result[i] == expected_content else: # This should be a regular message assert "content" in result[i] diff --git a/tests/test_prompt_compilation.py b/tests/test_prompt_compilation.py index bb019de49..c8aa789dc 100644 --- a/tests/test_prompt_compilation.py +++ b/tests/test_prompt_compilation.py @@ -827,7 +827,8 @@ def test_get_langchain_prompt_with_unresolved_placeholders(self): # Call get_langchain_prompt without resolving placeholder langchain_messages = prompt_client.get_langchain_prompt( - role="helpful", task="coding", + role="helpful", + task="coding", ) # Should have 3 items: system message, MessagesPlaceholder, user message From 1bc57c93ef8deaa67ed7850b2768af7cc8f0d599 Mon Sep 17 00:00:00 2001 From: Nimar Date: Tue, 1 Jul 2025 11:59:54 +0200 Subject: [PATCH 25/28] fix lint --- langfuse/model.py | 20 +++++++++++++++++--- 1 file changed, 17 insertions(+), 3 deletions(-) diff --git a/langfuse/model.py b/langfuse/model.py index 405bd0287..3062ba0dd 100644 --- a/langfuse/model.py +++ b/langfuse/model.py @@ -38,9 +38,6 @@ CreateDatasetRequest, ) from langfuse.api.resources.prompts import ChatMessage, Prompt, Prompt_Chat, Prompt_Text -from langfuse.api.resources.prompts.types.chat_message_with_placeholders import ( - ChatMessageWithPlaceholders, -) class ModelUsage(TypedDict): @@ -321,6 +318,23 @@ def __init__(self, prompt: Prompt_Chat, is_fallback: bool = False): content=p.content, ), ) + # Handle plain dictionaries (fallback case) + elif isinstance(p, dict): + if p.get("type") == "placeholder" and "name" in p: + self.prompt.append( + ChatMessageWithPlaceholdersDict_Placeholder( + type="placeholder", + name=p["name"], + ), + ) + elif "role" in p and "content" in p: + self.prompt.append( + ChatMessageWithPlaceholdersDict_Message( + type="message", + role=p["role"], + content=p["content"], + ), + ) def compile( self, **kwargs From acd9c6fe11423b9015d570713efc6ce795891a2c Mon Sep 17 00:00:00 2001 From: Nimar Date: Tue, 1 Jul 2025 12:00:43 +0200 Subject: [PATCH 26/28] remove unsused --- langfuse/model.py | 17 ----------------- 1 file changed, 17 deletions(-) diff --git a/langfuse/model.py b/langfuse/model.py index 3062ba0dd..521f9a82c 100644 --- a/langfuse/model.py +++ b/langfuse/model.py @@ -318,23 +318,6 @@ def __init__(self, prompt: Prompt_Chat, is_fallback: bool = False): content=p.content, ), ) - # Handle plain dictionaries (fallback case) - elif isinstance(p, dict): - if p.get("type") == "placeholder" and "name" in p: - self.prompt.append( - ChatMessageWithPlaceholdersDict_Placeholder( - type="placeholder", - name=p["name"], - ), - ) - elif "role" in p and "content" in p: - self.prompt.append( - ChatMessageWithPlaceholdersDict_Message( - type="message", - role=p["role"], - content=p["content"], - ), - ) def compile( self, **kwargs From 8d53cd9dbcb1ed56958891ecc3699d905fd84e5c Mon Sep 17 00:00:00 2001 From: Nimar Date: Tue, 1 Jul 2025 13:16:11 +0200 Subject: [PATCH 27/28] fix test --- tests/test_prompt.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/tests/test_prompt.py b/tests/test_prompt.py index 864ce0aa4..5e7553db1 100644 --- a/tests/test_prompt.py +++ b/tests/test_prompt.py @@ -374,7 +374,7 @@ def test_warning_on_unresolved_placeholders(): assert "Placeholders ['history'] have not been resolved" in warning_message # Verify the result only contains the resolved messages - assert len(result) == 2 + assert len(result) == 3 assert result[0]["content"] == "You are Assistant" assert result[1]["content"] == "What is 2+2?" From 1c093d29ee370a278c86868650a6269305bcde11 Mon Sep 17 00:00:00 2001 From: Nimar Date: Tue, 1 Jul 2025 13:38:55 +0200 Subject: [PATCH 28/28] fix test --- tests/test_prompt.py | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/tests/test_prompt.py b/tests/test_prompt.py index 5e7553db1..d3c20d285 100644 --- a/tests/test_prompt.py +++ b/tests/test_prompt.py @@ -376,7 +376,8 @@ def test_warning_on_unresolved_placeholders(): # Verify the result only contains the resolved messages assert len(result) == 3 assert result[0]["content"] == "You are Assistant" - assert result[1]["content"] == "What is 2+2?" + assert result[1]["name"] == "history" + assert result[2]["content"] == "What is 2+2?" def test_compiling_chat_prompt():