Skip to content
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
4 changes: 0 additions & 4 deletions dapr_agents/agents/durableagent/agent.py
Original file line number Diff line number Diff line change
Expand Up @@ -120,10 +120,6 @@ def model_post_init(self, __context: Any) -> None:

self.register_agentic_system()

# Start the runtime if it's not already running
logger.info("Starting workflow runtime...")
self.start_runtime()

async def run(self, input_data: Union[str, Dict[str, Any]]) -> Any:
"""
Fire up the workflow, wait for it to complete, then return the final serialized_output.
Expand Down
55 changes: 42 additions & 13 deletions dapr_agents/llm/dapr/chat.py
Original file line number Diff line number Diff line change
Expand Up @@ -15,7 +15,6 @@
)

from pydantic import BaseModel, Field

from dapr_agents.llm.chat import ChatClientBase
from dapr_agents.llm.dapr.client import DaprInferenceClientBase
from dapr_agents.llm.utils import RequestHandler, ResponseHandler
Expand Down Expand Up @@ -80,6 +79,8 @@ class DaprChatClient(DaprInferenceClientBase, ChatClientBase):

component_name: Optional[str] = None

component_name: Optional[str] = None

# Only function_call–style structured output is supported
SUPPORTED_STRUCTURED_MODES: ClassVar[set[str]] = {"function_call"}

Expand All @@ -92,8 +93,8 @@ def model_post_init(self, __context: Any) -> None:
if not self._llm_component:
self._llm_component = os.environ.get("DAPR_LLM_COMPONENT_DEFAULT")
if not self._llm_component:
raise ValueError(
"You must provide a component_name or set DAPR_LLM_COMPONENT_DEFAULT in the environment."
logger.debug(
"No LLM component provided and no default component found in the environment. Will try to get it from the metadata at runtime."
)
super().model_post_init(__context)

Expand Down Expand Up @@ -327,16 +328,11 @@ def generate(
)
# get metadata information from the dapr client
metadata = self.client.dapr_client.get_metadata()
extended_metadata = metadata.extended_metadata
dapr_runtime_version = extended_metadata.get("daprRuntimeVersion", None)
if dapr_runtime_version is not None:
# Allow only versions >=1.16.0 and <2.0.0 for Alpha2 Chat Client
if not is_version_supported(
str(dapr_runtime_version), ">=1.16.0, edge, <2.0.0"
):
raise DaprRuntimeVersionNotSupportedError(
f"!!!!! Dapr Runtime Version {dapr_runtime_version} is not supported with Alpha2 Dapr Chat Client. Only Dapr runtime versions >=1.16.0, edge,and <2.0.0 are supported."
)
_check_dapr_runtime_support(metadata)

llm_component = llm_component or self._llm_component
if not llm_component:
llm_component = _get_llm_component(metadata)

raw = self.client.chat_completion_alpha2(
llm=llm_component or self._llm_component,
Expand Down Expand Up @@ -365,3 +361,36 @@ def generate(
structured_mode=structured_mode,
stream=False,
)


def _check_dapr_runtime_support(metadata: "GetMetadataResponse"): # noqa: F821
"""Check if the Dapr runtime version is supported for Alpha2 Chat Client."""
extended_metadata = metadata.extended_metadata
dapr_runtime_version = extended_metadata.get("daprRuntimeVersion", None)
if dapr_runtime_version is not None:
# Allow only versions >=1.16.0, edge, and <2.0.0 for Alpha2 Chat Client
if not is_version_supported(
str(dapr_runtime_version), ">=1.16.0, edge, <2.0.0"
):
raise DaprRuntimeVersionNotSupportedError(
f"!!!!! Dapr Runtime Version {dapr_runtime_version} is not supported with Alpha2 Dapr Chat Client. Only Dapr runtime versions >=1.16.0, edge, and <2.0.0 are supported."
)


def _get_llm_component(metadata: "GetMetadataResponse") -> str: # noqa: F821
"""Get the LLM component from the metadata."""
conversation_components = [
component
for component in metadata.registered_components
if component.type.startswith("conversation.")
]
if len(conversation_components) == 1:
return conversation_components[0].name
elif len(conversation_components) > 1:
raise ValueError(
"Multiple LLM components found in the metadata. Please provide the component name explicitly (e.g. llm = DaprChatClient(component_name='openai')) or environment variable DAPR_LLM_COMPONENT_DEFAULT."
)
else:
raise ValueError(
"No LLM component provided and no default component found in the metadata."
)
4 changes: 4 additions & 0 deletions dapr_agents/workflow/base.py
Original file line number Diff line number Diff line change
Expand Up @@ -3,6 +3,7 @@
import inspect
import json
import logging
import time
import sys
import uuid
from datetime import datetime, timezone
Expand Down Expand Up @@ -516,6 +517,9 @@ def start_runtime(self):
self.wf_runtime.start()
self.wf_runtime_is_running = True

logger.info("Sleeping for 5 seconds to ensure runtime is started.")
time.sleep(5)
Copy link
Contributor Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

helps with actor runtime not found

Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

should we rm since i have the open pr upstream and theyre planning to cut 1.16.1 next week

Copy link
Contributor Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

I'm not sure that will fix that, if there is no app port define won't you still need some time to have the runtime ready?


# Sync database state with Dapr workflow status after runtime starts
# This ensures our database reflects the actual state of resumed workflows
self._sync_workflow_state_after_startup()
Expand Down
19 changes: 13 additions & 6 deletions quickstarts/01-hello-world/03_durable_agent.py
Original file line number Diff line number Diff line change
Expand Up @@ -9,9 +9,10 @@
import asyncio
import logging


from typing import List
from pydantic import BaseModel, Field
from dapr_agents import tool, DurableAgent, DaprChatClient
from dapr_agents import tool, DurableAgent
from dapr_agents.memory import ConversationDaprStateMemory
from dotenv import load_dotenv

Expand All @@ -38,11 +39,17 @@ def search_flights(destination: str) -> List[FlightOption]:
]


# one can use the environment variable to set the default component name
# ----------------------------------------------------------------------------------------------------------------------
# There are three ways to set the LLM component with DaprChatClient:
#
# 1. Explicitly instantiate the DaprChatClient with the component name
# llm = DaprChatClient(component_name="openai")
#
# 2. Use the environment variable DAPR_LLM_COMPONENT_DEFAULT
# os.environ.setdefault("DAPR_LLM_COMPONENT_DEFAULT", "openai")

# or directly pass the component name
llm = DaprChatClient(component_name="openai")
#
# 3. If there is only one conversation component in the resources folder, it will be used by default
# ----------------------------------------------------------------------------------------------------------------------


async def main():
Expand All @@ -66,7 +73,7 @@ async def main():
memory=ConversationDaprStateMemory(
store_name="conversationstore", session_id="my-unique-id"
),
llm=llm,
# llm=llm, # if you don't set the llm attribute, it will be by default set to DaprChatClient()
)

await travel_planner.run("I want to find flights to Paris")
Expand Down
63 changes: 47 additions & 16 deletions quickstarts/01-hello-world/README.md
Original file line number Diff line number Diff line change
Expand Up @@ -4,15 +4,25 @@ This quickstart provides a hands-on introduction to Dapr Agents through simple e

## Prerequisites

- Python 3.10 (recommended)
- pip package manager
- Python 3.10+ (recommended)
- [uv package manager](https://docs.astral.sh/uv/getting-started/installation/)
- OpenAI API key (you can put in an .env file or directly in the `openai.yaml` file, but we recommend the .env file that is gitignored)

## Environment Setup

<details open>
<summary><strong>Option 1: Using uv (Recommended)</strong></summary>

<!-- We include setting up the venv as part of the first step to make sure the venv is created and activated before the examples are run.-->
Copy link
Contributor Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

the first step was moved up to setup the environment, we probably should add an option in mechanical markdown for SETUP/TEARDOWN

Copy link
Contributor Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

added this issue in mm repo dapr/mechanical-markdown#38 that I'll try to implement


<!-- STEP
name: Run basic LLM example
expected_stdout_lines:
- "Got response:"
timeout_seconds: 30
output_match_mode: substring
-->

```bash
# Create and activate virtual environment
uv venv .venv
Expand All @@ -27,7 +37,7 @@ uv pip install -r requirements.txt
<details>
<summary><strong>Option 2: Using pip</strong></summary>

```bash
```a shell [not setting type to avoid mechanical markdown execution]
# Create a virtual environment
python3.10 -m venv .venv

Expand All @@ -39,17 +49,19 @@ source .venv/bin/activate

# Install dependencies
pip install -r requirements.txt

```

</details>


## Configuration
## OpenAI API Key

> **Warning**
> The examples will not work if you do not have a OpenAI API key exported in the environment.

<details open>
<summary><strong>Option 1: Using .env file</strong></summary>

Create a `.env` file in the project root and add your OpenAI API key:

```env
Expand All @@ -59,23 +71,32 @@ OPENAI_API_KEY=your_api_key_here
Replace `your_api_key_here` with your actual OpenAI API key.

Export the environment variables from the .env file to your shell:
```bash
export $(grep -v '^#' .env | xargs) # or if .env is in the root directory, you can just run `export $(grep -v '^#' ../../.env | xargs)`
```a shell [not setting type to avoid mechanical markdown execution]
export $(grep -v '^#' .env | xargs)

# or if .env is in the root directory of the repository,
# export $(grep -v '^#' ../../.env | xargs)
```

</details>

<details>
<summary><strong>Option 2: Exporting the OpenAI API Key directly to the shell</strong></summary>

```a shell [not setting type to avoid mechanical markdown execution]
export OPENAI_API_KEY=your_api_key_here
```

Replace `your_api_key_here` with your actual OpenAI API key.

</details>

## Examples

### 1. Basic LLM Usage

Run the basic LLM example to see how to interact with OpenAI's language models:

<!-- STEP
name: Run basic LLM example
expected_stdout_lines:
- "Got response:"
timeout_seconds: 30
output_match_mode: substring
-->
```bash
python 01_ask_llm.py
```
Expand Down Expand Up @@ -167,6 +188,9 @@ A stateful agent that uses Dapr Workflows to ensure durability and persistence o
We are using the Dapr ChatClient to interact with the OpenAI API. In the components folder, we have a `openai.yaml` file that contains the configuration for the OpenAI API.
You need to replace the `{YOUR_OPENAI_API_KEY}` with your actual OpenAI API key.

We are using the Dapr ChatClient to interact with the OpenAI API. In the components folder, we have a `openai.yaml` file that contains the configuration for the OpenAI API.
You need to replace the `{YOUR_OPENAI_API_KEY}` with your actual OpenAI API key.

Make sure Dapr is initialized on your system:

```bash
Expand All @@ -180,14 +204,16 @@ name: Run basic LLM example
expected_stdout_lines:
- "I want to find flights to Paris"
- "TravelBuddy"
timeout_seconds: 30
timeout_seconds: 60
output_match_mode: substring
-->


We are using the `resolve_env_templates.py` script to resolve the environment variables in the components folder and substitute them with the actual values in your .env file, like the OpenAI API key.
We are using the `resolve_env_templates.py` script to resolve the environment variables in the components folder and substitute them with the actual values in your environment, like the OpenAI API key.

```bash
source .venv/bin/activate

dapr run --app-id stateful-llm --dapr-http-port 3500 --resources-path $(../resolve_env_templates.py ./components) -- python 03_durable_agent.py
```

Expand Down Expand Up @@ -321,6 +347,8 @@ expected_stdout_lines:
output_match_mode: substring
-->
```bash
source .venv/bin/activate

dapr run --app-id dapr-agent-wf --resources-path $(../resolve_env_templates.py ./components) -- python 04_chain_tasks.py
```
<!-- END_STEP -->
Expand Down Expand Up @@ -402,6 +430,8 @@ expected_stdout_lines:
output_match_mode: substring
-->
```bash
source .venv/bin/activate

python 05_agent_with_vectorstore.py
```
<!-- END_STEP -->
Expand Down Expand Up @@ -541,6 +571,7 @@ if __name__ == "__main__":

## Key Concepts

- **DaprChatClient**: The interface for interacting with Dapr's LLMs
- **OpenAIChatClient**: The interface for interacting with OpenAI's LLMs
- **Agent**: A class that combines an LLM with tools and instructions
- **@tool decorator**: A way to create tools that agents can use
Expand Down
Loading