diff --git a/langfuse/_client/client.py b/langfuse/_client/client.py
index 93ac91bd1..f6745b7d9 100644
--- a/langfuse/_client/client.py
+++ b/langfuse/_client/client.py
@@ -55,6 +55,7 @@
from langfuse.media import LangfuseMedia
from langfuse.model import (
ChatMessageDict,
+ ChatMessageWithPlaceholdersDict,
ChatPromptClient,
CreateDatasetItemRequest,
CreateDatasetRequest,
@@ -2133,7 +2134,7 @@ def create_prompt(
self,
*,
name: str,
- prompt: List[ChatMessageDict],
+ prompt: List[Union[ChatMessageDict, ChatMessageWithPlaceholdersDict]],
labels: List[str] = [],
tags: Optional[List[str]] = None,
type: Optional[Literal["chat"]],
@@ -2158,7 +2159,9 @@ def create_prompt(
self,
*,
name: str,
- prompt: Union[str, List[ChatMessageDict]],
+ prompt: Union[
+ str, List[Union[ChatMessageDict, ChatMessageWithPlaceholdersDict]]
+ ],
labels: List[str] = [],
tags: Optional[List[str]] = None,
type: Optional[Literal["chat", "text"]] = "text",
diff --git a/langfuse/api/__init__.py b/langfuse/api/__init__.py
index bf6bdb508..2a274a811 100644
--- a/langfuse/api/__init__.py
+++ b/langfuse/api/__init__.py
@@ -21,6 +21,9 @@
CategoricalScore,
CategoricalScoreV1,
ChatMessage,
+ ChatMessageWithPlaceholders,
+ ChatMessageWithPlaceholders_Chatmessage,
+ ChatMessageWithPlaceholders_Placeholder,
ChatPrompt,
Comment,
CommentObjectType,
@@ -126,6 +129,7 @@
PaginatedModels,
PaginatedSessions,
PatchMediaBody,
+ PlaceholderMessage,
Project,
ProjectDeletionResponse,
Projects,
@@ -230,6 +234,9 @@
"CategoricalScore",
"CategoricalScoreV1",
"ChatMessage",
+ "ChatMessageWithPlaceholders",
+ "ChatMessageWithPlaceholders_Chatmessage",
+ "ChatMessageWithPlaceholders_Placeholder",
"ChatPrompt",
"Comment",
"CommentObjectType",
@@ -335,6 +342,7 @@
"PaginatedModels",
"PaginatedSessions",
"PatchMediaBody",
+ "PlaceholderMessage",
"Project",
"ProjectDeletionResponse",
"Projects",
diff --git a/langfuse/api/reference.md b/langfuse/api/reference.md
index e5595df8c..994933edc 100644
--- a/langfuse/api/reference.md
+++ b/langfuse/api/reference.md
@@ -4290,7 +4290,10 @@ Create a new version for the prompt with the given `name`
```python
-from langfuse import ChatMessage, CreatePromptRequest_Chat
+from langfuse import (
+ ChatMessageWithPlaceholders_Chatmessage,
+ CreatePromptRequest_Chat,
+)
from langfuse.client import FernLangfuse
client = FernLangfuse(
@@ -4305,11 +4308,11 @@ client.prompts.create(
request=CreatePromptRequest_Chat(
name="name",
prompt=[
- ChatMessage(
+ ChatMessageWithPlaceholders_Chatmessage(
role="role",
content="content",
),
- ChatMessage(
+ ChatMessageWithPlaceholders_Chatmessage(
role="role",
content="content",
),
diff --git a/langfuse/api/resources/__init__.py b/langfuse/api/resources/__init__.py
index 6ddea00eb..453774283 100644
--- a/langfuse/api/resources/__init__.py
+++ b/langfuse/api/resources/__init__.py
@@ -173,12 +173,16 @@
from .prompts import (
BasePrompt,
ChatMessage,
+ ChatMessageWithPlaceholders,
+ ChatMessageWithPlaceholders_Chatmessage,
+ ChatMessageWithPlaceholders_Placeholder,
ChatPrompt,
CreateChatPromptRequest,
CreatePromptRequest,
CreatePromptRequest_Chat,
CreatePromptRequest_Text,
CreateTextPromptRequest,
+ PlaceholderMessage,
Prompt,
PromptMeta,
PromptMetaListResponse,
@@ -242,6 +246,9 @@
"CategoricalScore",
"CategoricalScoreV1",
"ChatMessage",
+ "ChatMessageWithPlaceholders",
+ "ChatMessageWithPlaceholders_Chatmessage",
+ "ChatMessageWithPlaceholders_Placeholder",
"ChatPrompt",
"Comment",
"CommentObjectType",
@@ -347,6 +354,7 @@
"PaginatedModels",
"PaginatedSessions",
"PatchMediaBody",
+ "PlaceholderMessage",
"Project",
"ProjectDeletionResponse",
"Projects",
diff --git a/langfuse/api/resources/prompts/__init__.py b/langfuse/api/resources/prompts/__init__.py
index 73dcf38b0..77c27486d 100644
--- a/langfuse/api/resources/prompts/__init__.py
+++ b/langfuse/api/resources/prompts/__init__.py
@@ -3,12 +3,16 @@
from .types import (
BasePrompt,
ChatMessage,
+ ChatMessageWithPlaceholders,
+ ChatMessageWithPlaceholders_Chatmessage,
+ ChatMessageWithPlaceholders_Placeholder,
ChatPrompt,
CreateChatPromptRequest,
CreatePromptRequest,
CreatePromptRequest_Chat,
CreatePromptRequest_Text,
CreateTextPromptRequest,
+ PlaceholderMessage,
Prompt,
PromptMeta,
PromptMetaListResponse,
@@ -20,12 +24,16 @@
__all__ = [
"BasePrompt",
"ChatMessage",
+ "ChatMessageWithPlaceholders",
+ "ChatMessageWithPlaceholders_Chatmessage",
+ "ChatMessageWithPlaceholders_Placeholder",
"ChatPrompt",
"CreateChatPromptRequest",
"CreatePromptRequest",
"CreatePromptRequest_Chat",
"CreatePromptRequest_Text",
"CreateTextPromptRequest",
+ "PlaceholderMessage",
"Prompt",
"PromptMeta",
"PromptMetaListResponse",
diff --git a/langfuse/api/resources/prompts/client.py b/langfuse/api/resources/prompts/client.py
index b41ab5642..c38c20156 100644
--- a/langfuse/api/resources/prompts/client.py
+++ b/langfuse/api/resources/prompts/client.py
@@ -228,7 +228,10 @@ def create(
Examples
--------
- from langfuse import ChatMessage, CreatePromptRequest_Chat
+ from langfuse import (
+ ChatMessageWithPlaceholders_Chatmessage,
+ CreatePromptRequest_Chat,
+ )
from langfuse.client import FernLangfuse
client = FernLangfuse(
@@ -243,11 +246,11 @@ def create(
request=CreatePromptRequest_Chat(
name="name",
prompt=[
- ChatMessage(
+ ChatMessageWithPlaceholders_Chatmessage(
role="role",
content="content",
),
- ChatMessage(
+ ChatMessageWithPlaceholders_Chatmessage(
role="role",
content="content",
),
@@ -512,7 +515,10 @@ async def create(
--------
import asyncio
- from langfuse import ChatMessage, CreatePromptRequest_Chat
+ from langfuse import (
+ ChatMessageWithPlaceholders_Chatmessage,
+ CreatePromptRequest_Chat,
+ )
from langfuse.client import AsyncFernLangfuse
client = AsyncFernLangfuse(
@@ -530,11 +536,11 @@ async def main() -> None:
request=CreatePromptRequest_Chat(
name="name",
prompt=[
- ChatMessage(
+ ChatMessageWithPlaceholders_Chatmessage(
role="role",
content="content",
),
- ChatMessage(
+ ChatMessageWithPlaceholders_Chatmessage(
role="role",
content="content",
),
diff --git a/langfuse/api/resources/prompts/types/__init__.py b/langfuse/api/resources/prompts/types/__init__.py
index cb5ba920c..3067f9f04 100644
--- a/langfuse/api/resources/prompts/types/__init__.py
+++ b/langfuse/api/resources/prompts/types/__init__.py
@@ -2,6 +2,11 @@
from .base_prompt import BasePrompt
from .chat_message import ChatMessage
+from .chat_message_with_placeholders import (
+ ChatMessageWithPlaceholders,
+ ChatMessageWithPlaceholders_Chatmessage,
+ ChatMessageWithPlaceholders_Placeholder,
+)
from .chat_prompt import ChatPrompt
from .create_chat_prompt_request import CreateChatPromptRequest
from .create_prompt_request import (
@@ -10,6 +15,7 @@
CreatePromptRequest_Text,
)
from .create_text_prompt_request import CreateTextPromptRequest
+from .placeholder_message import PlaceholderMessage
from .prompt import Prompt, Prompt_Chat, Prompt_Text
from .prompt_meta import PromptMeta
from .prompt_meta_list_response import PromptMetaListResponse
@@ -18,12 +24,16 @@
__all__ = [
"BasePrompt",
"ChatMessage",
+ "ChatMessageWithPlaceholders",
+ "ChatMessageWithPlaceholders_Chatmessage",
+ "ChatMessageWithPlaceholders_Placeholder",
"ChatPrompt",
"CreateChatPromptRequest",
"CreatePromptRequest",
"CreatePromptRequest_Chat",
"CreatePromptRequest_Text",
"CreateTextPromptRequest",
+ "PlaceholderMessage",
"Prompt",
"PromptMeta",
"PromptMetaListResponse",
diff --git a/langfuse/api/resources/prompts/types/chat_message_with_placeholders.py b/langfuse/api/resources/prompts/types/chat_message_with_placeholders.py
new file mode 100644
index 000000000..dc12d5073
--- /dev/null
+++ b/langfuse/api/resources/prompts/types/chat_message_with_placeholders.py
@@ -0,0 +1,87 @@
+# This file was auto-generated by Fern from our API Definition.
+
+from __future__ import annotations
+
+import datetime as dt
+import typing
+
+from ....core.datetime_utils import serialize_datetime
+from ....core.pydantic_utilities import deep_union_pydantic_dicts, pydantic_v1
+
+
+class ChatMessageWithPlaceholders_Chatmessage(pydantic_v1.BaseModel):
+ role: str
+ content: str
+ type: typing.Literal["chatmessage"] = "chatmessage"
+
+ def json(self, **kwargs: typing.Any) -> str:
+ kwargs_with_defaults: typing.Any = {
+ "by_alias": True,
+ "exclude_unset": True,
+ **kwargs,
+ }
+ return super().json(**kwargs_with_defaults)
+
+ def dict(self, **kwargs: typing.Any) -> typing.Dict[str, typing.Any]:
+ kwargs_with_defaults_exclude_unset: typing.Any = {
+ "by_alias": True,
+ "exclude_unset": True,
+ **kwargs,
+ }
+ kwargs_with_defaults_exclude_none: typing.Any = {
+ "by_alias": True,
+ "exclude_none": True,
+ **kwargs,
+ }
+
+ return deep_union_pydantic_dicts(
+ super().dict(**kwargs_with_defaults_exclude_unset),
+ super().dict(**kwargs_with_defaults_exclude_none),
+ )
+
+ class Config:
+ frozen = True
+ smart_union = True
+ extra = pydantic_v1.Extra.allow
+ json_encoders = {dt.datetime: serialize_datetime}
+
+
+class ChatMessageWithPlaceholders_Placeholder(pydantic_v1.BaseModel):
+ name: str
+ type: typing.Literal["placeholder"] = "placeholder"
+
+ def json(self, **kwargs: typing.Any) -> str:
+ kwargs_with_defaults: typing.Any = {
+ "by_alias": True,
+ "exclude_unset": True,
+ **kwargs,
+ }
+ return super().json(**kwargs_with_defaults)
+
+ def dict(self, **kwargs: typing.Any) -> typing.Dict[str, typing.Any]:
+ kwargs_with_defaults_exclude_unset: typing.Any = {
+ "by_alias": True,
+ "exclude_unset": True,
+ **kwargs,
+ }
+ kwargs_with_defaults_exclude_none: typing.Any = {
+ "by_alias": True,
+ "exclude_none": True,
+ **kwargs,
+ }
+
+ return deep_union_pydantic_dicts(
+ super().dict(**kwargs_with_defaults_exclude_unset),
+ super().dict(**kwargs_with_defaults_exclude_none),
+ )
+
+ class Config:
+ frozen = True
+ smart_union = True
+ extra = pydantic_v1.Extra.allow
+ json_encoders = {dt.datetime: serialize_datetime}
+
+
+ChatMessageWithPlaceholders = typing.Union[
+ ChatMessageWithPlaceholders_Chatmessage, ChatMessageWithPlaceholders_Placeholder
+]
diff --git a/langfuse/api/resources/prompts/types/chat_prompt.py b/langfuse/api/resources/prompts/types/chat_prompt.py
index 7699d288d..494449ea2 100644
--- a/langfuse/api/resources/prompts/types/chat_prompt.py
+++ b/langfuse/api/resources/prompts/types/chat_prompt.py
@@ -6,11 +6,11 @@
from ....core.datetime_utils import serialize_datetime
from ....core.pydantic_utilities import deep_union_pydantic_dicts, pydantic_v1
from .base_prompt import BasePrompt
-from .chat_message import ChatMessage
+from .chat_message_with_placeholders import ChatMessageWithPlaceholders
class ChatPrompt(BasePrompt):
- prompt: typing.List[ChatMessage]
+ prompt: typing.List[ChatMessageWithPlaceholders]
def json(self, **kwargs: typing.Any) -> str:
kwargs_with_defaults: typing.Any = {
diff --git a/langfuse/api/resources/prompts/types/create_chat_prompt_request.py b/langfuse/api/resources/prompts/types/create_chat_prompt_request.py
index 95d55c88a..1442164a6 100644
--- a/langfuse/api/resources/prompts/types/create_chat_prompt_request.py
+++ b/langfuse/api/resources/prompts/types/create_chat_prompt_request.py
@@ -5,12 +5,12 @@
from ....core.datetime_utils import serialize_datetime
from ....core.pydantic_utilities import deep_union_pydantic_dicts, pydantic_v1
-from .chat_message import ChatMessage
+from .chat_message_with_placeholders import ChatMessageWithPlaceholders
class CreateChatPromptRequest(pydantic_v1.BaseModel):
name: str
- prompt: typing.List[ChatMessage]
+ prompt: typing.List[ChatMessageWithPlaceholders]
config: typing.Optional[typing.Any] = None
labels: typing.Optional[typing.List[str]] = pydantic_v1.Field(default=None)
"""
diff --git a/langfuse/api/resources/prompts/types/create_prompt_request.py b/langfuse/api/resources/prompts/types/create_prompt_request.py
index 76cba7ff9..b9518a7c4 100644
--- a/langfuse/api/resources/prompts/types/create_prompt_request.py
+++ b/langfuse/api/resources/prompts/types/create_prompt_request.py
@@ -7,12 +7,12 @@
from ....core.datetime_utils import serialize_datetime
from ....core.pydantic_utilities import deep_union_pydantic_dicts, pydantic_v1
-from .chat_message import ChatMessage
+from .chat_message_with_placeholders import ChatMessageWithPlaceholders
class CreatePromptRequest_Chat(pydantic_v1.BaseModel):
name: str
- prompt: typing.List[ChatMessage]
+ prompt: typing.List[ChatMessageWithPlaceholders]
config: typing.Optional[typing.Any] = None
labels: typing.Optional[typing.List[str]] = None
tags: typing.Optional[typing.List[str]] = None
diff --git a/langfuse/api/resources/prompts/types/placeholder_message.py b/langfuse/api/resources/prompts/types/placeholder_message.py
new file mode 100644
index 000000000..a3352b391
--- /dev/null
+++ b/langfuse/api/resources/prompts/types/placeholder_message.py
@@ -0,0 +1,42 @@
+# This file was auto-generated by Fern from our API Definition.
+
+import datetime as dt
+import typing
+
+from ....core.datetime_utils import serialize_datetime
+from ....core.pydantic_utilities import deep_union_pydantic_dicts, pydantic_v1
+
+
+class PlaceholderMessage(pydantic_v1.BaseModel):
+ name: str
+
+ def json(self, **kwargs: typing.Any) -> str:
+ kwargs_with_defaults: typing.Any = {
+ "by_alias": True,
+ "exclude_unset": True,
+ **kwargs,
+ }
+ return super().json(**kwargs_with_defaults)
+
+ def dict(self, **kwargs: typing.Any) -> typing.Dict[str, typing.Any]:
+ kwargs_with_defaults_exclude_unset: typing.Any = {
+ "by_alias": True,
+ "exclude_unset": True,
+ **kwargs,
+ }
+ kwargs_with_defaults_exclude_none: typing.Any = {
+ "by_alias": True,
+ "exclude_none": True,
+ **kwargs,
+ }
+
+ return deep_union_pydantic_dicts(
+ super().dict(**kwargs_with_defaults_exclude_unset),
+ super().dict(**kwargs_with_defaults_exclude_none),
+ )
+
+ class Config:
+ frozen = True
+ smart_union = True
+ extra = pydantic_v1.Extra.allow
+ json_encoders = {dt.datetime: serialize_datetime}
diff --git a/langfuse/api/resources/prompts/types/prompt.py b/langfuse/api/resources/prompts/types/prompt.py
index 2fee54b70..1ad894879 100644
--- a/langfuse/api/resources/prompts/types/prompt.py
+++ b/langfuse/api/resources/prompts/types/prompt.py
@@ -7,11 +7,11 @@
from ....core.datetime_utils import serialize_datetime
from ....core.pydantic_utilities import deep_union_pydantic_dicts, pydantic_v1
-from .chat_message import ChatMessage
+from .chat_message_with_placeholders import ChatMessageWithPlaceholders
class Prompt_Chat(pydantic_v1.BaseModel):
- prompt: typing.List[ChatMessage]
+ prompt: typing.List[ChatMessageWithPlaceholders]
name: str
version: int
config: typing.Any
diff --git a/langfuse/model.py b/langfuse/model.py
index f2072f400..521f9a82c 100644
--- a/langfuse/model.py
+++ b/langfuse/model.py
@@ -2,7 +2,8 @@
import re
from abc import ABC, abstractmethod
-from typing import Any, Dict, List, Optional, Tuple, TypedDict, Union
+from typing import Any, Dict, List, Literal, Optional, Sequence, Tuple, TypedDict, Union
+from langfuse.logger import langfuse_logger
from langfuse.api.resources.commons.types.dataset import (
Dataset, # noqa: F401
@@ -54,6 +55,28 @@ class ChatMessageDict(TypedDict):
content: str
+class ChatMessagePlaceholderDict(TypedDict):
+ role: str
+ content: str
+
+
+class ChatMessageWithPlaceholdersDict_Message(TypedDict):
+ type: Literal["message"]
+ role: str
+ content: str
+
+
+class ChatMessageWithPlaceholdersDict_Placeholder(TypedDict):
+ type: Literal["placeholder"]
+ name: str
+
+
+ChatMessageWithPlaceholdersDict = Union[
+ ChatMessageWithPlaceholdersDict_Message,
+ ChatMessageWithPlaceholdersDict_Placeholder,
+]
+
+
class TemplateParser:
OPENING = "{{"
CLOSING = "}}"
@@ -276,37 +299,129 @@ def get_langchain_prompt(self, **kwargs) -> str:
class ChatPromptClient(BasePromptClient):
def __init__(self, prompt: Prompt_Chat, is_fallback: bool = False):
super().__init__(prompt, is_fallback)
- self.prompt = [
- ChatMessageDict(role=p.role, content=p.content) for p in prompt.prompt
- ]
-
- def compile(self, **kwargs) -> List[ChatMessageDict]:
- return [
- ChatMessageDict(
- content=TemplateParser.compile_template(
- chat_message["content"], kwargs
- ),
- role=chat_message["role"],
- )
- for chat_message in self.prompt
- ]
+ self.prompt = []
+
+ for p in prompt.prompt:
+ # Handle objects with attributes (normal case)
+ if hasattr(p, "type") and hasattr(p, "name") and p.type == "placeholder":
+ self.prompt.append(
+ ChatMessageWithPlaceholdersDict_Placeholder(
+ type="placeholder",
+ name=p.name,
+ ),
+ )
+ elif hasattr(p, "role") and hasattr(p, "content"):
+ self.prompt.append(
+ ChatMessageWithPlaceholdersDict_Message(
+ type="message",
+ role=p.role,
+ content=p.content,
+ ),
+ )
+
+ def compile(
+ self, **kwargs
+ ) -> Sequence[Union[ChatMessageDict, ChatMessageWithPlaceholdersDict_Placeholder]]:
+ """Compile the prompt with placeholders and variables.
+
+ Args:
+ **kwargs: Can contain both placeholder values (list of chat messages) and variable values.
+ Placeholders are resolved first, then variables are substituted.
+
+ Returns:
+ List of compiled chat messages as plain dictionaries, with unresolved placeholders kept as-is.
+ """
+ compiled_messages = []
+ unresolved_placeholders = []
+
+ for chat_message in self.prompt:
+ if chat_message["type"] == "message":
+ # For regular messages, compile variables and add to output
+ compiled_messages.append(
+ {
+ "role": chat_message["role"],
+ "content": TemplateParser.compile_template(
+ chat_message["content"],
+ kwargs,
+ ),
+ },
+ )
+ elif chat_message["type"] == "placeholder":
+ placeholder_name = chat_message["name"]
+ if placeholder_name in kwargs:
+ placeholder_value = kwargs[placeholder_name]
+ if isinstance(placeholder_value, list):
+ for msg in placeholder_value:
+ if (
+ isinstance(msg, dict)
+ and "role" in msg
+ and "content" in msg
+ ):
+ compiled_messages.append(
+ {
+ "role": msg["role"],
+ "content": TemplateParser.compile_template(
+ msg["content"],
+ kwargs,
+ ),
+ },
+ )
+ else:
+ compiled_messages.append(
+ str(placeholder_value),
+ )
+ no_role_content_in_placeholder = f"Placeholder '{placeholder_name}' should contain a list of chat messages with 'role' and 'content' fields. Appended as string."
+ langfuse_logger.warning(no_role_content_in_placeholder)
+ else:
+ compiled_messages.append(
+ str(placeholder_value),
+ )
+ placeholder_not_a_list = f"Placeholder '{placeholder_name}' must contain a list of chat messages, got {type(placeholder_value)}"
+ langfuse_logger.warning(placeholder_not_a_list)
+ else:
+ # Keep unresolved placeholder in the compiled messages
+ compiled_messages.append(chat_message)
+ unresolved_placeholders.append(placeholder_name)
+
+ if unresolved_placeholders:
+ unresolved_placeholders = f"Placeholders {unresolved_placeholders} have not been resolved. Pass them as keyword arguments to compile()."
+ langfuse_logger.warning(unresolved_placeholders)
+
+ return compiled_messages
@property
def variables(self) -> List[str]:
"""Return all the variable names in the chat prompt template."""
- return [
- variable
- for chat_message in self.prompt
- for variable in TemplateParser.find_variable_names(chat_message["content"])
- ]
+ variables = []
+ # Variables from prompt messages
+ for chat_message in self.prompt:
+ if chat_message["type"] == "message":
+ variables.extend(
+ TemplateParser.find_variable_names(chat_message["content"]),
+ )
+ return variables
def __eq__(self, other):
if isinstance(self, other.__class__):
return (
self.name == other.name
and self.version == other.version
+ and len(self.prompt) == len(other.prompt)
and all(
- m1["role"] == m2["role"] and m1["content"] == m2["content"]
+ # chatmessage equality
+ (
+ m1["type"] == "message"
+ and m2["type"] == "message"
+ and m1["role"] == m2["role"]
+ and m1["content"] == m2["content"]
+ )
+ or
+ # placeholder equality
+ (
+ m1["type"] == "placeholder"
+ and m2["type"] == "placeholder"
+ and m1["name"] == m2["name"]
+ )
for m1, m2 in zip(self.prompt, other.prompt)
)
and self.config == other.config
@@ -319,25 +434,40 @@ def get_langchain_prompt(self, **kwargs):
It specifically adapts the mustache-style double curly braces {{variable}} used in Langfuse
to the single curly brace {variable} format expected by Langchain.
+ Placeholders are filled-in from kwargs and unresolved placeholders are returned as Langchain MessagesPlaceholder.
kwargs: Optional keyword arguments to precompile the template string. Variables that match
the provided keyword arguments will be precompiled. Remaining variables must then be
handled by Langchain's prompt template.
+ Can also contain placeholders (list of chat messages) which will be resolved prior to variable
+ compilation.
Returns:
- List of messages in the format expected by Langchain's ChatPromptTemplate: (role, content) tuple.
+ List of messages in the format expected by Langchain's ChatPromptTemplate:
+ (role, content) tuples for regular messages or MessagesPlaceholder objects for unresolved placeholders.
"""
- return [
- (
- msg["role"],
- self._get_langchain_prompt_string(
- TemplateParser.compile_template(msg["content"], kwargs)
- if kwargs
- else msg["content"]
- ),
- )
- for msg in self.prompt
- ]
+ compiled_messages = self.compile(**kwargs)
+ langchain_messages = []
+
+ for msg in compiled_messages:
+ if "type" in msg and msg["type"] == "placeholder":
+ # unresolved placeholder -> add LC MessagesPlaceholder
+ placeholder_name = msg["name"]
+ try:
+ from langchain_core.prompts.chat import MessagesPlaceholder # noqa: PLC0415, I001
+
+ langchain_messages.append(
+ MessagesPlaceholder(variable_name=placeholder_name),
+ )
+ except ImportError as e:
+ import_error = "langchain_core is required to use get_langchain_prompt() with unresolved placeholders."
+ raise ImportError(import_error) from e
+ else:
+ langchain_messages.append(
+ (msg["role"], self._get_langchain_prompt_string(msg["content"])),
+ )
+
+ return langchain_messages
PromptClient = Union[TextPromptClient, ChatPromptClient]
diff --git a/tests/test_prompt.py b/tests/test_prompt.py
index 5630bdd41..d3c20d285 100644
--- a/tests/test_prompt.py
+++ b/tests/test_prompt.py
@@ -90,6 +90,296 @@ def test_create_chat_prompt():
assert prompt_client.config == {}
+def test_create_chat_prompt_with_placeholders():
+ langfuse = Langfuse()
+ prompt_name = create_uuid()
+
+ prompt_client = langfuse.create_prompt(
+ name=prompt_name,
+ prompt=[
+ {"role": "system", "content": "You are a {{role}} assistant"},
+ {"type": "placeholder", "name": "history"},
+ {"role": "user", "content": "Help me with {{task}}"},
+ ],
+ labels=["production"],
+ tags=["test"],
+ type="chat",
+ commit_message="initial commit",
+ )
+
+ second_prompt_client = langfuse.get_prompt(prompt_name, type="chat")
+ messages = second_prompt_client.compile(
+ role="helpful",
+ task="coding",
+ history=[
+ {"role": "user", "content": "Example: {{task}}"},
+ {"role": "assistant", "content": "Example response"},
+ ],
+ )
+
+ # Create a test generation using compiled messages
+ completion = openai.OpenAI().chat.completions.create(
+ model="gpt-4",
+ messages=messages,
+ )
+
+ assert len(completion.choices) > 0
+ assert len(messages) == 4
+ assert messages[0]["content"] == "You are a helpful assistant"
+ assert messages[1]["content"] == "Example: coding"
+ assert messages[2]["content"] == "Example response"
+ assert messages[3]["content"] == "Help me with coding"
+
+ assert prompt_client.name == second_prompt_client.name
+ assert prompt_client.version == second_prompt_client.version
+ assert prompt_client.config == second_prompt_client.config
+ assert prompt_client.labels == ["production", "latest"]
+ assert prompt_client.tags == second_prompt_client.tags
+ assert prompt_client.commit_message == second_prompt_client.commit_message
+ assert prompt_client.config == {}
+
+
+def test_create_prompt_with_placeholders():
+ """Test creating a prompt with placeholder messages."""
+ langfuse = Langfuse()
+ prompt_name = create_uuid()
+ prompt_client = langfuse.create_prompt(
+ name=prompt_name,
+ prompt=[
+ {"role": "system", "content": "System message"},
+ {"type": "placeholder", "name": "context"},
+ {"role": "user", "content": "User message"},
+ ],
+ type="chat",
+ )
+
+ # Verify the full prompt structure with placeholders
+ assert len(prompt_client.prompt) == 3
+
+ # First message - system
+ assert prompt_client.prompt[0]["type"] == "message"
+ assert prompt_client.prompt[0]["role"] == "system"
+ assert prompt_client.prompt[0]["content"] == "System message"
+ # Placeholder
+ assert prompt_client.prompt[1]["type"] == "placeholder"
+ assert prompt_client.prompt[1]["name"] == "context"
+ # Third message - user
+ assert prompt_client.prompt[2]["type"] == "message"
+ assert prompt_client.prompt[2]["role"] == "user"
+ assert prompt_client.prompt[2]["content"] == "User message"
+
+
+def test_get_prompt_with_placeholders():
+ """Test retrieving a prompt with placeholders."""
+ langfuse = Langfuse()
+ prompt_name = create_uuid()
+
+ langfuse.create_prompt(
+ name=prompt_name,
+ prompt=[
+ {"role": "system", "content": "You are {{name}}"},
+ {"type": "placeholder", "name": "history"},
+ {"role": "user", "content": "{{question}}"},
+ ],
+ type="chat",
+ )
+
+ prompt_client = langfuse.get_prompt(prompt_name, type="chat", version=1)
+
+ # Verify placeholder structure is preserved
+ assert len(prompt_client.prompt) == 3
+
+ # First message - system with variable
+ assert prompt_client.prompt[0]["type"] == "message"
+ assert prompt_client.prompt[0]["role"] == "system"
+ assert prompt_client.prompt[0]["content"] == "You are {{name}}"
+ # Placeholder
+ assert prompt_client.prompt[1]["type"] == "placeholder"
+ assert prompt_client.prompt[1]["name"] == "history"
+ # Third message - user with variable
+ assert prompt_client.prompt[2]["type"] == "message"
+ assert prompt_client.prompt[2]["role"] == "user"
+ assert prompt_client.prompt[2]["content"] == "{{question}}"
+
+
+@pytest.mark.parametrize(
+ ("variables", "placeholders", "expected_len", "expected_contents"),
+ [
+ # 0. Variables only, no placeholders. Unresolved placeholders kept in output
+ (
+ {"role": "helpful", "task": "coding"},
+ {},
+ 3,
+ [
+ "You are a helpful assistant",
+ None,
+ "Help me with coding",
+ ], # None = placeholder
+ ),
+ # 1. No variables, no placeholders. Expect verbatim message+placeholder output
+ (
+ {},
+ {},
+ 3,
+ ["You are a {{role}} assistant", None, "Help me with {{task}}"],
+ ), # None = placeholder
+ # 2. Placeholders only, empty variables. Expect output with placeholders filled in
+ (
+ {},
+ {
+ "examples": [
+ {"role": "user", "content": "Example question"},
+ {"role": "assistant", "content": "Example answer"},
+ ],
+ },
+ 4,
+ [
+ "You are a {{role}} assistant",
+ "Example question",
+ "Example answer",
+ "Help me with {{task}}",
+ ],
+ ),
+ # 3. Both variables and placeholders. Expect fully compiled output
+ (
+ {"role": "helpful", "task": "coding"},
+ {
+ "examples": [
+ {"role": "user", "content": "Show me {{task}}"},
+ {"role": "assistant", "content": "Here's {{task}}"},
+ ],
+ },
+ 4,
+ [
+ "You are a helpful assistant",
+ "Show me coding",
+ "Here's coding",
+ "Help me with coding",
+ ],
+ ),
+ # # Empty placeholder array
+ # This is expected to fail! If the user provides a placeholder, it should contain an array
+ # (
+ # {"role": "helpful", "task": "coding"},
+ # {"examples": []},
+ # 2,
+ # ["You are a helpful assistant", "Help me with coding"],
+ # ),
+ # 4. Unused placeholder fill ins. Unresolved placeholders kept in output
+ (
+ {"role": "helpful", "task": "coding"},
+ {"unused": [{"role": "user", "content": "Won't appear"}]},
+ 3,
+ [
+ "You are a helpful assistant",
+ None,
+ "Help me with coding",
+ ], # None = placeholder
+ ),
+ # 5. Placeholder with non-list value (should log warning and append as string)
+ (
+ {"role": "helpful", "task": "coding"},
+ {"examples": "not a list"},
+ 3,
+ [
+ "You are a helpful assistant",
+ "not a list", # String value appended directly
+ "Help me with coding",
+ ],
+ ),
+ # 6. Placeholder with invalid message structure (should log warning and include both)
+ (
+ {"role": "helpful", "task": "coding"},
+ {
+ "examples": [
+ "invalid message",
+ {"role": "user", "content": "valid message"},
+ ]
+ },
+ 4,
+ [
+ "You are a helpful assistant",
+ "['invalid message', {'role': 'user', 'content': 'valid message'}]", # Invalid structure becomes string
+ "valid message", # Valid message processed normally
+ "Help me with coding",
+ ],
+ ),
+ ],
+)
+def test_compile_with_placeholders(
+ variables, placeholders, expected_len, expected_contents
+) -> None:
+ """Test compile_with_placeholders with different variable/placeholder combinations."""
+ from langfuse.api.resources.prompts import Prompt_Chat
+ from langfuse.model import ChatPromptClient
+
+ mock_prompt = Prompt_Chat(
+ name="test_prompt",
+ version=1,
+ type="chat",
+ config={},
+ tags=[],
+ labels=[],
+ prompt=[
+ {"role": "system", "content": "You are a {{role}} assistant"},
+ {"type": "placeholder", "name": "examples"},
+ {"role": "user", "content": "Help me with {{task}}"},
+ ],
+ )
+
+ compile_kwargs = {**placeholders, **variables}
+ result = ChatPromptClient(mock_prompt).compile(**compile_kwargs)
+
+ assert len(result) == expected_len
+ for i, expected_content in enumerate(expected_contents):
+ if expected_content is None:
+ # This should be an unresolved placeholder
+ assert "type" in result[i] and result[i]["type"] == "placeholder"
+ elif isinstance(result[i], str):
+ # This is a string value from invalid placeholder
+ assert result[i] == expected_content
+ else:
+ # This should be a regular message
+ assert "content" in result[i]
+ assert result[i]["content"] == expected_content
+
+
+def test_warning_on_unresolved_placeholders():
+ """Test that a warning is emitted when compiling with unresolved placeholders."""
+ from unittest.mock import patch
+
+ langfuse = Langfuse()
+ prompt_name = create_uuid()
+
+ langfuse.create_prompt(
+ name=prompt_name,
+ prompt=[
+ {"role": "system", "content": "You are {{name}}"},
+ {"type": "placeholder", "name": "history"},
+ {"role": "user", "content": "{{question}}"},
+ ],
+ type="chat",
+ )
+
+ prompt_client = langfuse.get_prompt(prompt_name, type="chat", version=1)
+
+ # Test that warning is emitted when compiling with unresolved placeholders
+ with patch("langfuse.logger.langfuse_logger.warning") as mock_warning:
+ # Compile without providing the 'history' placeholder
+ result = prompt_client.compile(name="Assistant", question="What is 2+2?")
+
+ # Verify the warning was called with the expected message
+ mock_warning.assert_called_once()
+ warning_message = mock_warning.call_args[0][0]
+ assert "Placeholders ['history'] have not been resolved" in warning_message
+
+ # Verify the result only contains the resolved messages
+ assert len(result) == 3
+ assert result[0]["content"] == "You are Assistant"
+ assert result[1]["name"] == "history"
+ assert result[2]["content"] == "What is 2+2?"
+
+
def test_compiling_chat_prompt():
langfuse = Langfuse()
prompt_name = create_uuid()
@@ -955,7 +1245,13 @@ def test_fallback_chat_prompt():
"nonexistent_chat_prompt", type="chat", fallback=fallback_chat_prompt
)
- assert prompt.prompt == fallback_chat_prompt
+ # Check that the prompt structure contains the fallback data (allowing for internal formatting)
+ assert len(prompt.prompt) == len(fallback_chat_prompt)
+ assert all(msg["type"] == "message" for msg in prompt.prompt)
+ assert prompt.prompt[0]["role"] == "system"
+ assert prompt.prompt[0]["content"] == "fallback system"
+ assert prompt.prompt[1]["role"] == "user"
+ assert prompt.prompt[1]["content"] == "fallback user name {{name}}"
assert prompt.compile(name="Jane") == [
{"role": "system", "content": "fallback system"},
{"role": "user", "content": "fallback user name Jane"},
diff --git a/tests/test_prompt_compilation.py b/tests/test_prompt_compilation.py
index c3bcc11aa..c8aa789dc 100644
--- a/tests/test_prompt_compilation.py
+++ b/tests/test_prompt_compilation.py
@@ -732,3 +732,121 @@ def test_chat_prompt_with_json_variables(self):
assert len(formatted_messages) == 2
assert formatted_messages[0].content == expected_system
assert formatted_messages[1].content == expected_user
+
+ def test_chat_prompt_with_placeholders_langchain(self):
+ """Test that chat prompts with placeholders work correctly with Langchain."""
+ from langfuse.api.resources.prompts import Prompt_Chat
+
+ chat_messages = [
+ ChatMessage(
+ role="system",
+ content="You are a {{role}} assistant with {{capability}} capabilities.",
+ ),
+ {"type": "placeholder", "name": "examples"},
+ ChatMessage(
+ role="user",
+ content="Help me with {{task}}.",
+ ),
+ ]
+
+ prompt_client = ChatPromptClient(
+ Prompt_Chat(
+ type="chat",
+ name="chat_placeholder_langchain_test",
+ version=1,
+ config={},
+ tags=[],
+ labels=[],
+ prompt=chat_messages,
+ ),
+ )
+
+ # Test compile with placeholders and variables
+ compiled_messages = prompt_client.compile(
+ role="helpful",
+ capability="math",
+ task="addition",
+ examples=[
+ {"role": "user", "content": "Example: What is 2+2?"},
+ {"role": "assistant", "content": "2+2 equals 4."},
+ ],
+ )
+
+ assert len(compiled_messages) == 4
+ assert (
+ compiled_messages[0]["content"]
+ == "You are a helpful assistant with math capabilities."
+ )
+ assert compiled_messages[1]["content"] == "Example: What is 2+2?"
+ assert compiled_messages[2]["content"] == "2+2 equals 4."
+ assert compiled_messages[3]["content"] == "Help me with addition."
+
+ langchain_messages = prompt_client.get_langchain_prompt(
+ role="helpful",
+ capability="math",
+ task="addition",
+ examples=[
+ {"role": "user", "content": "Example: What is 2+2?"},
+ {"role": "assistant", "content": "2+2 equals 4."},
+ ],
+ )
+ langchain_prompt = ChatPromptTemplate.from_messages(langchain_messages)
+ formatted_messages = langchain_prompt.format_messages()
+
+ assert len(formatted_messages) == 4
+ assert (
+ formatted_messages[0].content
+ == "You are a helpful assistant with math capabilities."
+ )
+ assert formatted_messages[1].content == "Example: What is 2+2?"
+ assert formatted_messages[2].content == "2+2 equals 4."
+ assert formatted_messages[3].content == "Help me with addition."
+
+ def test_get_langchain_prompt_with_unresolved_placeholders(self):
+ """Test that unresolved placeholders become MessagesPlaceholder objects."""
+ from langfuse.api.resources.prompts import Prompt_Chat
+ from langfuse.model import ChatPromptClient
+
+ chat_messages = [
+ {"role": "system", "content": "You are a {{role}} assistant"},
+ {"type": "placeholder", "name": "examples"},
+ {"role": "user", "content": "Help me with {{task}}"},
+ ]
+
+ prompt_client = ChatPromptClient(
+ Prompt_Chat(
+ type="chat",
+ name="test_unresolved_placeholder",
+ version=1,
+ config={},
+ tags=[],
+ labels=[],
+ prompt=chat_messages,
+ ),
+ )
+
+ # Call get_langchain_prompt without resolving placeholder
+ langchain_messages = prompt_client.get_langchain_prompt(
+ role="helpful",
+ task="coding",
+ )
+
+ # Should have 3 items: system message, MessagesPlaceholder, user message
+ assert len(langchain_messages) == 3
+
+ # First message should be the system message
+ assert langchain_messages[0] == ("system", "You are a helpful assistant")
+
+ # Second should be a MessagesPlaceholder for the unresolved placeholder
+ placeholder_msg = langchain_messages[1]
+ try:
+ from langchain_core.prompts.chat import MessagesPlaceholder
+
+ assert isinstance(placeholder_msg, MessagesPlaceholder)
+ assert placeholder_msg.variable_name == "examples"
+ except ImportError:
+ # Fallback case when langchain_core is not available
+ assert placeholder_msg == ("system", "{examples}")
+
+ # Third message should be the user message
+ assert langchain_messages[2] == ("user", "Help me with coding")