From a04c6a638a0a322a3e180491f2ceb92499990d66 Mon Sep 17 00:00:00 2001 From: Ishu Kumar Date: Thu, 7 Aug 2025 20:35:37 +0530 Subject: [PATCH 01/28] Added MCP functionality with tool calls and tool call display --- src/client/content/chatbot.py | 250 +++++++++---- src/client/content/config/mcp_servers.py | 24 ++ src/client/content/config/settings.py | 47 ++- src/client/mcp/client.py | 446 +++++++++++++++++++++++ src/client/mcp/frontend.py | 94 +++++ src/client/utils/st_common.py | 18 + src/common/schema.py | 39 +- src/launch_client.py | 4 + src/launch_server.py | 120 +++++- src/server/api/core/bootstrap.py | 3 +- src/server/api/core/mcp.py | 31 ++ src/server/api/core/settings.py | 10 + src/server/api/v1/__init__.py | 2 +- src/server/api/v1/mcp.py | 147 ++++++++ src/server/api/v1/settings.py | 13 +- src/server/bootstrap/mcp.py | 89 +++++ src/server/mcp/server/archive_mcp.py | 182 +++++++++ src/server/mcp/server_config.json | 20 + 18 files changed, 1448 insertions(+), 91 deletions(-) create mode 100644 src/client/content/config/mcp_servers.py create mode 100644 src/client/mcp/client.py create mode 100644 src/client/mcp/frontend.py create mode 100644 src/server/api/core/mcp.py create mode 100644 src/server/api/v1/mcp.py create mode 100644 src/server/bootstrap/mcp.py create mode 100644 src/server/mcp/server/archive_mcp.py create mode 100644 src/server/mcp/server_config.json diff --git a/src/client/content/chatbot.py b/src/client/content/chatbot.py index d8382ecc..7ba3fe05 100644 --- a/src/client/content/chatbot.py +++ b/src/client/content/chatbot.py @@ -2,11 +2,11 @@ Copyright (c) 2024, 2025, Oracle and/or its affiliates. Licensed under the Universal Permissive License v1.0 as shown at http://oss.oracle.com/licenses/upl. -Session States Set: -- user_client: Stores the Client +This file merges the Streamlit Chatbot GUI with the MCPClient for a complete, +runnable example demonstrating their integration. """ -# spell-checker:ignore streamlit, oraclevs, selectai +# spell-checker:ignore streamlit, oraclevs, selectai, langgraph, prebuilt import asyncio import inspect import json @@ -21,8 +21,9 @@ import client.utils.api_call as api_call from client.utils.st_footer import render_chat_footer -import client.utils.client as client import common.logging_config as logging_config +from client.mcp.client import MCPClient +from pathlib import Path logger = logging_config.logging.getLogger("client.content.chatbot") @@ -67,95 +68,220 @@ async def main() -> None: ######################################################################### # Sidebar Settings ######################################################################### - # Get a list of available language models, if none, then stop ll_models_enabled = st_common.enabled_models_lookup("ll") if not ll_models_enabled: st.error("No language models are configured and/or enabled. Disabling Client.", icon="🛑") st.stop() - # the sidebars will set this to False if not everything is configured. state.enable_client = True st_common.tools_sidebar() st_common.history_sidebar() st_common.ll_sidebar() st_common.selectai_sidebar() st_common.vector_search_sidebar() - # Stop when sidebar configurations not set if not state.enable_client: st.stop() ######################################################################### # Chatty-Bot Centre ######################################################################### - # Establish the Client - if "user_client" not in state: - state.user_client = client.Client( - server=state.server, - settings=state.client_settings, - timeout=1200, - ) - user_client: client.Client = state.user_client - - history = await user_client.get_history() + + if "messages" not in state: + state.messages = [] + st.chat_message("ai").write("Hello, how can I help you?") - vector_search_refs = [] - for message in history or []: - if not message["content"]: + + for message in state.messages: + role = message.get("role") + display_role = "" + if role in ("human", "user"): + display_role = "human" + elif role in ("ai", "assistant"): + if not message.get("content") and not message.get("tool_trace"): + continue + display_role = "assistant" + else: continue - if message["role"] == "tool" and message["name"] == "oraclevs_tool": - vector_search_refs = json.loads(message["content"]) - if message["role"] in ("ai", "assistant"): - with st.chat_message("ai"): - st.markdown(message["content"]) - if vector_search_refs: - show_vector_search_refs(vector_search_refs) - vector_search_refs = [] - elif message["role"] in ("human", "user"): - with st.chat_message("human"): - content = message["content"] + + with st.chat_message(display_role): + if "tool_trace" in message and message["tool_trace"]: + for tool_call in message["tool_trace"]: + with st.expander(f"🛠️ **Tool Call:** `{tool_call['name']}`", expanded=False): + st.text("Arguments:") + st.code(json.dumps(tool_call.get('args', {}), indent=2), language="json") + if "error" in tool_call: + st.text("Error:") + st.error(tool_call['error']) + else: + st.text("Result:") + st.code(tool_call.get('result', ''), language="json") + if message.get("content"): + # Display file attachments if present + if "attachments" in message and message["attachments"]: + for file in message["attachments"]: + # Show appropriate icon based on file type + if file["type"].startswith("image/"): + st.image(file["preview"], use_container_width=True) + st.markdown(f"🖼️ **{file['name']}** ({file['size']//1024} KB)") + elif file["type"] == "application/pdf": + st.markdown(f"📄 **{file['name']}** ({file['size']//1024} KB)") + elif file["type"] in ("text/plain", "text/markdown"): + st.markdown(f"📝 **{file['name']}** ({file['size']//1024} KB)") + else: + st.markdown(f"📎 **{file['name']}** ({file['size']//1024} KB)") + + # Display message content - handle both string and list formats + content = message.get("content") if isinstance(content, list): - for part in content: - if part["type"] == "text": - st.write(part["text"]) - elif part["type"] == "image_url" and part["image_url"]["url"].startswith("data:image"): - st.image(part["image_url"]["url"]) + # Extract and display only text parts + text_parts = [part["text"] for part in content if part["type"] == "text"] + st.markdown("\n".join(text_parts)) else: - st.write(content) + st.markdown(content) sys_prompt = state.client_settings["prompts"]["sys"] render_chat_footer() + if human_request := st.chat_input( f"Ask your question here... (current prompt: {sys_prompt})", accept_file=True, - file_type=["jpg", "jpeg", "png"], + file_type=["jpg", "jpeg", "png", "pdf", "txt", "docx"], + key=f"chat_input_{len(state.messages)}", ): - st.chat_message("human").write(human_request.text) - file_b64 = None - if human_request["files"]: - file = human_request["files"][0] - file_bytes = file.read() - file_b64 = base64.b64encode(file_bytes).decode("utf-8") + # Process message with potential file attachments + message = {"role": "user", "content": human_request.text} + + # Handle file attachments + if hasattr(human_request, "files") and human_request.files: + # Store file information separately from content + message["attachments"] = [] + for file in human_request.files: + file_bytes = file.read() + file_b64 = base64.b64encode(file_bytes).decode("utf-8") + message["attachments"].append({ + "name": file.name, + "type": file.type, + "size": len(file_bytes), + "data": file_b64, + "preview": f"data:{file.type};base64,{file_b64}" if file.type.startswith("image/") else None + }) + + state.messages.append(message) + st.rerun() + if state.messages and state.messages[-1]["role"] == "user": try: - message_placeholder = st.chat_message("ai").empty() - full_answer = "" - async for chunk in user_client.stream(message=human_request.text, image_b64=file_b64): - full_answer += chunk - message_placeholder.markdown(full_answer) - # Stream until we hit the end then refresh to replace with history - st.rerun() - except Exception: - logger.error("Exception:", exc_info=1) - st.chat_message("ai").write( - """ - I'm sorry, something's gone wrong. Please try again. - If the problem persists, please raise an issue. - """ - ) - if st.button("Retry", key="reload_chatbot"): - st_common.clear_state_key("user_client") + with st.chat_message("ai"): + with st.spinner("Thinking..."): + client_settings_for_request = state.client_settings.copy() + model_id = client_settings_for_request.get('ll_model', {}).get('model') + if model_id: + all_model_configs = st_common.enabled_models_lookup("ll") + model_config = all_model_configs.get(model_id, {}) + if 'api_key' in model_config: + if 'll_model' not in client_settings_for_request: + client_settings_for_request['ll_model'] = {} + client_settings_for_request['ll_model']['api_key'] = model_config['api_key'] + + # Prepare message history for backend + message_history = [] + for msg in state.messages: + # Create a copy of the message + processed_msg = msg.copy() + + # If there are attachments, include them in the content + if "attachments" in msg and msg["attachments"]: + # Start with the text content + text_content = msg["content"] + + # Handle list content format (from OpenAI API) + if isinstance(text_content, list): + text_parts = [part["text"] for part in text_content if part["type"] == "text"] + text_content = "\n".join(text_parts) + + # Create a list to hold structured content parts + content_list = [{"type": "text", "text": text_content}] + + non_image_references = [] + for attachment in msg["attachments"]: + if attachment["type"].startswith("image/"): + # Only add image URLs for user messages + if msg["role"] in ("human", "user"): + # Normalize image MIME types for compatibility + mime_type = attachment["type"] + if mime_type == "image/jpg": + mime_type = "image/jpeg" + + content_list.append({ + "type": "image_url", + "image_url": { + "url": f"data:{mime_type};base64,{attachment['data']}", + "detail": "low" + } + }) + else: + # Handle non-image files as text references + non_image_references.append(f"\n[File: {attachment['name']} ({attachment['size']//1024} KB)]") + + # If there were non-image files, append their references to the main text part + if non_image_references: + content_list[0]['text'] += "".join(non_image_references) + + processed_msg["content"] = content_list + # Convert list content to string format + elif isinstance(msg.get("content"), list): + text_parts = [part["text"] for part in msg["content"] if part["type"] == "text"] + processed_msg["content"] = str("\n".join(text_parts)) + # Otherwise, ensure content is a string + else: + processed_msg["content"] = str(msg.get("content", "")) + + message_history.append(processed_msg) + + async with MCPClient(client_settings=client_settings_for_request) as mcp_client: + final_text, tool_trace, new_history = await mcp_client.invoke( + message_history=message_history + ) + + # Update the history for display. + # Keep the original message structure with attachments + for i in range(len(new_history) - 1, -1, -1): + if new_history[i].get("role") == "assistant": + # Preserve any attachments from the user message + user_message = state.messages[-1] + if "attachments" in user_message: + new_history[-1]["attachments"] = user_message["attachments"] + + new_history[i]["content"] = final_text + new_history[i]["tool_trace"] = tool_trace + break + + state.messages = new_history + st.rerun() + + except Exception as e: + logger.error("Exception during invoke call:", exc_info=True) + # Extract just the error message + error_msg = str(e) + + # Check if it's a file-related error + if "file" in error_msg.lower() or "image" in error_msg.lower() or "content" in error_msg.lower(): + st.error(f"Error: {error_msg}") + + # Add a button to remove files and retry + if st.button("Remove files and retry", key="remove_files_retry"): + # Remove attachments from the latest message + if state.messages and "attachments" in state.messages[-1]: + del state.messages[-1]["attachments"] + st.rerun() + else: + st.error(f"Error: {error_msg}") + + if st.button("Retry", key="reload_chatbot_error"): + if state.messages and state.messages[-1]["role"] == "user": + state.messages.pop() st.rerun() -if __name__ == "__main__" or "page.py" in inspect.stack()[1].filename: +if __name__ == "__main__" or ("page" in inspect.stack()[1].filename if inspect.stack() else False): try: asyncio.run(main()) except ValueError as ex: diff --git a/src/client/content/config/mcp_servers.py b/src/client/content/config/mcp_servers.py new file mode 100644 index 00000000..5535227d --- /dev/null +++ b/src/client/content/config/mcp_servers.py @@ -0,0 +1,24 @@ +import inspect + +from client.mcp.frontend import display_commands_tab, display_ide_tab, get_fastapi_base_url, get_server_capabilities + +import streamlit as st + +def main(): + fastapi_base_url = get_fastapi_base_url() + tools, resources, prompts = get_server_capabilities(fastapi_base_url) + if "chat_history" not in st.session_state: + st.session_state.chat_history = [] + ide, commands = st.tabs(["🛠️ IDE", "📚 Available Commands"]) + + with ide: + # Display the IDE tab using the original AI Optimizer logic. + display_ide_tab() + with commands: + # Display the commands tab using the original AI Optimizer logic. + display_commands_tab(tools, resources, prompts) + + + +if __name__ == "__main__" or "page.py" in inspect.stack()[1].filename: + main() diff --git a/src/client/content/config/settings.py b/src/client/content/config/settings.py index 399a141e..cdebe9dc 100644 --- a/src/client/content/config/settings.py +++ b/src/client/content/config/settings.py @@ -38,15 +38,32 @@ ############################################################################# def get_settings(include_sensitive: bool = False): """Get Server-Side Settings""" - settings = api_call.get( - endpoint="v1/settings", - params={ - "client": state.client_settings["client"], - "full_config": True, - "incl_sensitive": include_sensitive, - }, - ) - return settings + try: + settings = api_call.get( + endpoint="v1/settings", + params={ + "client": state.client_settings["client"], + "full_config": True, + "incl_sensitive": include_sensitive, + }, + ) + return settings + except api_call.ApiError as e: + if "not found" in str(e): + # If client settings not found, create them + logger.info("Client settings not found, creating new ones") + api_call.post(endpoint="v1/settings", params={"client": state.client_settings["client"]}) + settings = api_call.get( + endpoint="v1/settings", + params={ + "client": state.client_settings["client"], + "full_config": True, + "incl_sensitive": include_sensitive, + }, + ) + return settings + else: + raise def save_settings(settings): @@ -141,11 +158,11 @@ def apply_uploaded_settings(uploaded): def spring_ai_conf_check(ll_model, embed_model) -> str: """Check if configuration is valid for SpringAI package""" - if ll_model is None or embed_model is None: + if not ll_model or not embed_model: return "hybrid" - ll_api = ll_model["api"] - embed_api = embed_model["api"] + ll_api = ll_model.get("api", "") + embed_api = embed_model.get("api", "") if "OpenAI" in ll_api and "OpenAI" in embed_api: return "openai" @@ -287,9 +304,11 @@ def main(): st.header("SpringAI Settings", divider="red") # Merge the User Settings into the Model Config model_lookup = st_common.state_configs_lookup("model_configs", "id") - ll_config = model_lookup[state.client_settings["ll_model"]["model"]] | state.client_settings["ll_model"] + ll_model_id = state.client_settings["ll_model"].get("model") + ll_config = model_lookup.get(ll_model_id, {}) | state.client_settings["ll_model"] + embed_model_id = state.client_settings["vector_search"].get("model") embed_config = ( - model_lookup[state.client_settings["vector_search"]["model"]] | state.client_settings["vector_search"] + model_lookup.get(embed_model_id, {}) | state.client_settings["vector_search"] ) spring_ai_conf = spring_ai_conf_check(ll_config, embed_config) diff --git a/src/client/mcp/client.py b/src/client/mcp/client.py new file mode 100644 index 00000000..d4282828 --- /dev/null +++ b/src/client/mcp/client.py @@ -0,0 +1,446 @@ +import json +import os +import time +import asyncio +from dotenv import load_dotenv +from mcp import ClientSession, StdioServerParameters, types +from mcp.client.stdio import stdio_client +from typing import List, Dict, Optional, Tuple, Type, Any +from contextlib import AsyncExitStack + +# --- MODIFICATION: Import LangChain components --- +from langchain_core.messages import AIMessage, HumanMessage, SystemMessage, ToolMessage, BaseMessage +from langchain_core.language_models.base import BaseLanguageModel +from pydantic import create_model, BaseModel, Field +# Import the specific chat models you want to support +from langchain_openai import ChatOpenAI +from langchain_anthropic import ChatAnthropic +from langchain_google_genai import ChatGoogleGenerativeAI +from langchain_cohere import ChatCohere +from langchain_ollama import ChatOllama +from langchain_groq import ChatGroq +from langchain_mistralai import ChatMistralAI + +load_dotenv() + +if os.getenv("IS_STREAMLIT_CONTEXT"): + import nest_asyncio + nest_asyncio.apply() + +class MCPClient: + # MODIFICATION: Changed the constructor to accept client_settings + def __init__(self, client_settings: Dict): + """ + Initialize MCP Client using a settings dictionary from the Streamlit client. + + Args: + client_settings: The state.client_settings object. + """ + # 1. Validate the incoming settings dictionary + if not client_settings or 'll_model' not in client_settings: + raise ValueError("Client settings are incomplete. 'll_model' is required.") + + # 2. Store the settings and extract the model ID + self.model_settings = client_settings['ll_model'] + + # This is our new "Service Factory" using LangChain classes + # If no model is specified, we'll initialize with a default one + if 'model' not in self.model_settings or not self.model_settings['model']: + # Set a default model if none is specified + self.model_settings['model'] = 'llama3.1' + # Remove any OpenAI-specific parameters that might cause issues + self.model_settings.pop('openai_api_key', None) + + self.langchain_model = self._create_langchain_model(**self.model_settings) + + self.exit_stack = AsyncExitStack() + self.sessions: Dict[str, ClientSession] = {} + self.tool_to_session: Dict[str, Tuple[ClientSession, types.Tool]] = {} + self.available_prompts: Dict[str, types.Prompt] = {} + self.static_resources: Dict[str, str] = {} + self.dynamic_resources: List[str] = [] + self.resource_to_session: Dict[str, str] = {} + self.prompt_to_session: Dict[str, str] = {} + self.available_tools: List[Dict] = [] + self._stdio_generators: Dict[str, Any] = {} # To store stdio generators for cleanup + print(f"Initialized MCPClient with LangChain model: {self.langchain_model.__class__.__name__}") + + # --- FIX: Add __aenter__ and __aexit__ to make this a context manager --- + async def __aenter__(self): + """Enter the async context, connecting to all servers.""" + await self.connect_to_servers() + return self + + async def __aexit__(self, exc_type, exc_val, exc_tb): + """Exit the async context, ensuring all connections are cleaned up.""" + await self.cleanup() + + def _create_langchain_model(self, model: str, **kwargs) -> BaseLanguageModel: + """Factory to create and return a LangChain ChatModel instance.""" + # If no model is specified, default to llama3.1 which works with Ollama + if not model: + model = "llama3.1" + # Remove any OpenAI-specific parameters that might cause issues + kwargs.pop('openai_api_key', None) + + model_lower = model.lower() + + # Handle OpenAI models + if model_lower.startswith('gpt-'): + # Check if api_key is in kwargs and rename it to openai_api_key for ChatOpenAI + if 'api_key' in kwargs: + kwargs['openai_api_key'] = kwargs.pop('api_key') + # Remove parameters that shouldn't be passed to ChatOpenAI + kwargs.pop('context_length', None) + kwargs.pop('chat_history', None) + return ChatOpenAI(model=model, **kwargs) + + # Handle Anthropic models + elif model_lower.startswith('claude-'): + kwargs.pop('openai_api_key', None) + return ChatAnthropic(model=model, **kwargs) + + # Handle Google models + elif model_lower.startswith('gemini-'): + kwargs.pop('openai_api_key', None) + return ChatGoogleGenerativeAI(model=model, **kwargs) + + # Handle Mistral models + elif model_lower.startswith('mistral-'): + kwargs.pop('openai_api_key', None) + return ChatMistralAI(model=model, **kwargs) + + # Handle Cohere models + elif model_lower.startswith('cohere-'): + kwargs.pop('openai_api_key', None) + return ChatCohere(model=model, **kwargs) + + # Handle Groq models + elif model_lower.startswith('groq-'): + kwargs.pop('openai_api_key', None) + return ChatGroq(model=model, **kwargs) + + # Default to Ollama for any other model name + else: + return ChatOllama(model=model, **kwargs) + + def _convert_dict_to_langchain_messages(self, message_history: List[Dict]) -> List[BaseMessage]: + """Converts a list of message dictionaries to a list of LangChain message objects.""" + messages: List[BaseMessage] = [] + for msg in message_history: + role = msg.get("role") + content = msg.get("content", "") + if role == "user": + messages.append(HumanMessage(content=content)) # type: ignore + elif role == "assistant": + # AIMessage can handle tool calls directly from the dictionary format + tool_calls = msg.get("tool_calls") + messages.append(AIMessage(content=content, tool_calls=tool_calls or [])) # type: ignore + elif role == "system": + messages.append(SystemMessage(content=content)) # type: ignore + elif role == "tool": + messages.append(ToolMessage(content=content, tool_call_id=msg.get("tool_call_id", ""))) # type: ignore + return messages # type: ignore + + def _convert_langchain_messages_to_dict(self, langchain_messages: List[BaseMessage]) -> List[Dict]: + """Converts a list of LangChain message objects back to a list of dictionaries for session state.""" + dict_messages = [] + for msg in langchain_messages: + if isinstance(msg, HumanMessage): + dict_messages.append({"role": "user", "content": msg.content}) + elif isinstance(msg, AIMessage): + # Preserve tool calls in the dictionary format + dict_messages.append({"role": "assistant", "content": msg.content, "tool_calls": msg.tool_calls}) + elif isinstance(msg, SystemMessage): + dict_messages.append({"role": "system", "content": msg.content}) + elif isinstance(msg, ToolMessage): + dict_messages.append({"role": "tool", "content": msg.content, "tool_call_id": msg.tool_call_id}) + return dict_messages + + def _prepare_messages_for_service(self, message_history: List[Dict]) -> List[Dict]: + """ + FIX: Translates the rich message history from the GUI into a simple, + text-only format that AI services can understand. + """ + prepared_messages = [] + for msg in message_history: + content = msg.get("content") + # If content is a list (multimodal), extract only the text. + if isinstance(content, list): + text_content = " ".join( + part["text"] for part in content if part.get("type") == "text" + ) + prepared_messages.append({"role": msg["role"], "content": text_content}) + # Otherwise, use the content as is (assuming it's a string). + else: + prepared_messages.append(msg) + return prepared_messages + + async def connect_to_servers(self): + try: + config_paths = ["server/mcp/server_config.json", os.path.join(os.path.dirname(__file__), "..", "..", "server", "mcp", "server_config.json")] + servers = {} + for config_path in config_paths: + try: + with open(config_path, "r") as file: + servers = json.load(file).get("mcpServers", {}) + print(f"Loaded MCP server configuration from: {config_path}") + print(f"Found servers: {list(servers.keys())}") + break + except FileNotFoundError: + print(f"MCP server config not found at: {config_path}") + continue + except Exception as e: + print(f"Error reading MCP server config from {config_path}: {e}") + continue + if not servers: + print("No MCP server configuration found!") + for name, config in servers.items(): + print(f"Connecting to MCP server: {name}") + await self.connect_to_server(name, config) + except Exception as e: print(f"Error loading server configuration: {e}") + + async def connect_to_server(self, server_name: str, server_config: dict): + try: + print(f"Connecting to server '{server_name}' with config: {server_config}") + server_params = StdioServerParameters(**server_config) + + # Create the stdio client connection using the exit stack for proper cleanup + try: + read, write = await self.exit_stack.enter_async_context(stdio_client(server_params)) + + # Create the client session using the exit stack for proper cleanup + session = await self.exit_stack.enter_async_context(ClientSession(read, write)) + + await session.initialize() + self.sessions[server_name] = session + + # Load tools, resources, and prompts from this server + await self._load_server_capabilities(session, server_name) + except RuntimeError as e: + # Handle runtime errors related to task context + if "cancel scope" not in str(e).lower(): + raise + print(f"Warning: Connection to '{server_name}' had context issues: {e}") + except Exception as e: + raise + except Exception as e: + print(f"Failed to connect to '{server_name}': {e}") + import traceback + traceback.print_exc() + + async def _run_async_generator(self, generator): + """Helper method to run an async generator in the current task context.""" + return await generator.__anext__() + + async def _load_server_capabilities(self, session: ClientSession, server_name: str): + """Load tools, resources, and prompts from a connected server.""" + try: + # List tools + tools_list = await session.list_tools() + print(f"Found {len(tools_list.tools)} tools from server '{server_name}'") + for tool in tools_list.tools: + self.tool_to_session[tool.name] = (session, tool) + print(f"Loaded tool '{tool.name}' from server '{server_name}'") + + # List resources + try: + resp = await session.list_resources() + if resp.resources: print(f" - Found Static Resources: {[r.name for r in resp.resources]}") + for resource in resp.resources: + uri = resource.uri.encoded_string() + self.resource_to_session[uri] = server_name + user_shortcut = uri.split('//')[-1] + self.static_resources[user_shortcut] = uri + if resource.name and resource.name != user_shortcut: + self.static_resources[resource.name] = uri + except Exception as e: + print(f"Failed to load resources from server '{server_name}': {e}") + + # Discover DYNAMIC resource templates + try: + # The response object for templates has a `.templates` attribute + resp = await session.list_resource_templates() + if resp.resourceTemplates: print(f" - Found Dynamic Resource Templates: {[t.name for t in resp.resourceTemplates]}") + for template in resp.resourceTemplates: + uri = template.uriTemplate + # The key for the session map MUST be the pattern itself. + self.resource_to_session[uri] = server_name + if uri not in self.dynamic_resources: + self.dynamic_resources.append(uri) + except Exception as e: + # This is also okay, some servers don't have dynamic resources. + print(f"Failed to load dynamic resources from server '{server_name}': {e}") + + + # List prompts + try: + prompts_list = await session.list_prompts() + print(f"Found {len(prompts_list.prompts)} prompts from server '{server_name}'") + for prompt in prompts_list.prompts: + self.available_prompts[prompt.name] = prompt + self.prompt_to_session[prompt.name] = server_name + print(f"Loaded prompt '{prompt.name}' from server '{server_name}'") + except Exception as e: + print(f"Failed to load prompts from server '{server_name}': {e}") + + except Exception as e: + print(f"Failed to load capabilities from server '{server_name}': {e}") + + async def _rebuild_mcp_tool_schemas(self): + """Rebuilds the list of tools from connected MCP servers in a LangChain-compatible format.""" + self.available_tools = [] + for _, (_, tool_object) in self.tool_to_session.items(): + # LangChain's .bind_tools can often work directly with this MCP schema + tool_schema = { + "name": tool_object.name, + "description": tool_object.description, + "args_schema": self.create_pydantic_model_from_schema(tool_object.name, tool_object.inputSchema) + } + self.available_tools.append(tool_schema) + print(f"Available tools after rebuild: {len(self.available_tools)}") + + def create_pydantic_model_from_schema(self, name: str, schema: dict) -> Type[BaseModel]: + """Dynamically creates a Pydantic model from a JSON schema for LangChain tool binding.""" + fields = {} + if schema and 'properties' in schema: + for prop_name, prop_details in schema['properties'].items(): + field_type = str # Default to string + # A more robust implementation would map JSON schema types to Python types + if prop_details.get('type') == 'integer': field_type = int + elif prop_details.get('type') == 'number': field_type = float + elif prop_details.get('type') == 'boolean': field_type = bool + + fields[prop_name] = (field_type, Field(..., description=prop_details.get('description'))) + + return create_model(name, **fields) # type: ignore + + async def execute_mcp_tool(self, tool_name: str, tool_args: Dict) -> str: + try: + session, _ = self.tool_to_session[tool_name] + result = await session.call_tool(tool_name, arguments=tool_args) + if not result.content: return "Tool executed successfully." + + # Handle different content types properly + if isinstance(result.content, list): + text_parts = [] + for item in result.content: + # Check if item has a text attribute + if hasattr(item, 'text'): + text_parts.append(str(item.text)) + else: + # Handle other content types + text_parts.append(str(item)) + return " | ".join(text_parts) + else: + return str(result.content) + except Exception as e: + # Check if it's a closed resource error + if "ClosedResourceError" in str(type(e)) or "closed" in str(e).lower(): + raise Exception("MCP session is closed. Please try again.") from e + else: + raise + + async def invoke(self, message_history: List[Dict]) -> Tuple[str, List[Dict], List[Dict]]: + """ + Main entry point. Now returns a tuple of: + (final_text_response, tool_calls_trace, new_full_history) + """ + max_retries = 3 + for attempt in range(max_retries): + try: + langchain_messages = self._convert_dict_to_langchain_messages(message_history) + + # Separate the final text response from the tool trace + final_text_response = "" + tool_calls_trace = [] + + max_iterations = 10 + tool_execution_failed = False + for iteration in range(max_iterations): + await self._rebuild_mcp_tool_schemas() + model_with_tools = self.langchain_model.bind_tools(self.available_tools) + response_message: AIMessage = await model_with_tools.ainvoke(langchain_messages) + langchain_messages.append(response_message) + + # Capture the final text response from the last message + if response_message.content: + final_text_response = response_message.content + + if not response_message.tool_calls: + break + + for tool_call in response_message.tool_calls: + tool_name = tool_call['name'] + tool_args = tool_call['args'] + + try: + result_content = await self.execute_mcp_tool(tool_name, tool_args) + tool_calls_trace.append({ + "name": tool_name, + "args": tool_args, + "result": result_content + }) + except Exception as e: + if "MCP session is closed" in str(e) and attempt < max_retries - 1: + print(f"MCP session closed, reinitializing (attempt {attempt + 1})") + await self.cleanup(); await self.connect_to_servers() + await asyncio.sleep(0.1); tool_execution_failed = True; break + else: + result_content = f"Error executing tool {tool_name}: {e}" + tool_calls_trace.append({ + "name": tool_name, + "args": tool_args, + "error": result_content + }) + + langchain_messages.append(ToolMessage(content=result_content, tool_call_id=tool_call['id'])) + + if tool_execution_failed: break + + if tool_execution_failed and attempt < max_retries - 1: continue + + final_history_dict = self._convert_langchain_messages_to_dict(langchain_messages) + + return final_text_response, tool_calls_trace, final_history_dict + + except RuntimeError as e: + if "Event loop is closed" in str(e) and attempt < max_retries - 1: + print(f"Event loop closed, reinitializing model (attempt {attempt + 1})") + self.langchain_model = self._create_langchain_model(**self.model_settings) + await asyncio.sleep(0.1); continue + else: raise Exception("Event loop closed. Please try again.") from e + except Exception as e: + if attempt >= max_retries - 1: raise + print(f"Invoke attempt {attempt + 1} failed, retrying: {e}") + await asyncio.sleep(0.1) + + raise Exception("Failed to invoke MCP client after all retries") + + async def cleanup(self): + """Clean up all resources properly.""" + try: + # Close all sessions using the exit stack to avoid context issues + await self.exit_stack.aclose() + except Exception as e: + # Suppress errors related to async context management as they don't affect functionality + if "cancel scope" not in str(e).lower() and "asyncio" not in str(e).lower(): + print(f"Error during cleanup: {e}") + + try: + # Clear sessions + self.sessions.clear() + + # Clear other data structures + self.tool_to_session.clear() + self.available_prompts.clear() + self.static_resources.clear() + self.dynamic_resources.clear() + self.resource_to_session.clear() + self.prompt_to_session.clear() + self.available_tools.clear() + + # Recreate the exit stack for future use + self.exit_stack = AsyncExitStack() + except Exception as e: + print(f"Error during cleanup: {e}") diff --git a/src/client/mcp/frontend.py b/src/client/mcp/frontend.py new file mode 100644 index 00000000..383bb07f --- /dev/null +++ b/src/client/mcp/frontend.py @@ -0,0 +1,94 @@ +import streamlit as st +import os +import requests +import json + +def set_page(): + st.set_page_config( + page_title="MCP Universal Chatbot", + page_icon="🤖", + layout="wide" + ) + +def get_fastapi_base_url(): + return os.getenv("FASTAPI_BASE_URL", "http://127.0.0.1:8000") + +@st.cache_data(show_spinner="Connecting to MCP Backend...", ttl=60) +def get_server_capabilities(fastapi_base_url): + """Fetches the lists of tools and resources from the FastAPI backend.""" + try: + # Get API key from environment or generate one + api_key = os.getenv("API_SERVER_KEY") + headers = {"Authorization": f"Bearer {api_key}"} if api_key else {} + + # First check if MCP is enabled and initialized + status_response = requests.get(f"{fastapi_base_url}/v1/mcp/status", headers=headers) + if status_response.status_code == 200: + status = status_response.json() + if not status.get("enabled", False): + st.warning("MCP is not enabled. Please enable it in the configuration.") + return {"error": "MCP not enabled"}, {"error": "MCP not enabled"}, {"error": "MCP not enabled"} + if not status.get("initialized", False): + st.info("MCP is enabled but not yet initialized. Please select a model first.") + return {"tools": []}, {"static": [], "dynamic": []}, {"prompts": []} + + tools_response = requests.get(f"{fastapi_base_url}/v1/mcp/tools", headers=headers) + tools_response.raise_for_status() + tools = tools_response.json() + + resources_response = requests.get(f"{fastapi_base_url}/v1/mcp/resources", headers=headers) + resources_response.raise_for_status() + resources = resources_response.json() + + prompts_response = requests.get(f"{fastapi_base_url}/v1/mcp/prompts", headers=headers) + prompts_response.raise_for_status() + prompts = prompts_response.json() + + return tools, resources, prompts + except requests.exceptions.RequestException as e: + st.error(f"Could not connect to the MCP backend at {fastapi_base_url}. Is it running? Error: {e}") + return {"tools": []}, {"static": [], "dynamic": []}, {"prompts": []} + +def get_server_files(): + files = ["server/mcp/server_config.json"] + try: + with open("server/mcp/server_config.json", "r") as f: config = json.load(f) + for server in config.get("mcpServers", {}).values(): + script_path = server.get("args", [None])[0] + if script_path and os.path.exists(script_path): files.append(script_path) + except FileNotFoundError: st.sidebar.error("server_config.json not found!") + return list(set(files)) + +def display_ide_tab(): + st.header("🔧 Integrated MCP Server IDE") + st.info("Edit your server configuration or scripts. Restart the launcher for changes to take effect.") + server_files = get_server_files() + selected_file = st.selectbox("Select a file to edit", options=server_files) + if selected_file: + with open(selected_file, "r") as f: file_content = f.read() + from streamlit_ace import st_ace + new_content = st_ace(value=file_content, language="python" if selected_file.endswith(".py") else "json", theme="monokai", keybinding="vscode", height=500, auto_update=True) + if st.button("Save Changes"): + with open(selected_file, "w") as f: f.write(new_content) + st.success(f"Successfully saved {selected_file}!") + +def display_commands_tab(tools, resources, prompts): + st.header("📖 Discovered MCP Commands") + st.info("These commands were discovered from the MCP backend.") + + if tools: + with st.expander("🛠️ Available Tools (Used automatically by the AI)", expanded=True): + # Extract just the tool names from the tools response + if "tools" in tools and isinstance(tools["tools"], list): + tool_names = [tool.get("name", tool) if isinstance(tool, dict) else tool for tool in tools["tools"]] + st.write(tool_names) + else: + st.json(tools) + + if resources: + with st.expander("📦 Available Resources (Use with `@` or just ``)"): + st.json(resources) + + if prompts: + with st.expander("📝 Available Prompts (Use with `/prompt ` or select in chat)"): + st.json(prompts) diff --git a/src/client/utils/st_common.py b/src/client/utils/st_common.py index b1ec6d98..b386215d 100644 --- a/src/client/utils/st_common.py +++ b/src/client/utils/st_common.py @@ -17,6 +17,12 @@ import common.logging_config as logging_config from common.schema import PromptPromptType, PromptNameType, SelectAISettings, ClientIdType +# Import the MCP initialization function +try: + from launch_server import initialize_mcp_engine_with_model +except ImportError: + initialize_mcp_engine_with_model = None + logger = logging_config.logging.getLogger("client.utils.st_common") @@ -161,6 +167,8 @@ def ll_sidebar() -> None: selected_model = state.client_settings["ll_model"]["model"] ll_idx = list(ll_models_enabled.keys()).index(selected_model) if not state.client_settings["selectai"]["enabled"]: + # Store the previous model to detect changes + previous_model = selected_model selected_model = st.sidebar.selectbox( "Chat model:", options=list(ll_models_enabled.keys()), @@ -169,6 +177,16 @@ def ll_sidebar() -> None: on_change=update_client_settings("ll_model"), disabled=state.client_settings["selectai"]["enabled"], ) + + # If the model has changed, reinitialize the MCP engine + if selected_model != previous_model and initialize_mcp_engine_with_model: + try: + # Instead of creating a new event loop, we'll set a flag to indicate + # that the MCP engine needs to be reinitialized + state.mcp_needs_reinit = selected_model + logger.info(f"MCP engine marked for reinitialization with model: {selected_model}") + except Exception as e: + logger.error(f"Failed to mark MCP engine for reinitialization with model {selected_model}: {e}") # Temperature temperature = ll_models_enabled[selected_model]["temperature"] diff --git a/src/common/schema.py b/src/common/schema.py index ecd2fb98..da2d5ca3 100644 --- a/src/common/schema.py +++ b/src/common/schema.py @@ -4,8 +4,10 @@ """ # spell-checker:ignore ollama, hnsw, mult, ocid, testset, selectai, explainsql, showsql, vector_search, aioptimizer +from __future__ import annotations + import time -from typing import Optional, Literal, Union, get_args, Any +from typing import Optional, Literal, Union, get_args, Any, Dict, List from pydantic import BaseModel, Field, PrivateAttr, model_validator from langchain_core.messages import ChatMessage @@ -301,6 +303,7 @@ class Configuration(BaseModel): model_configs: Optional[list[Model]] = None oci_configs: Optional[list[OracleCloudSettings]] = None prompt_configs: Optional[list[Prompt]] = None + mcp_configs: Optional[list[MCPModelConfig]] = Field(default=None, description="List of MCP configurations") def model_dump_public(self, incl_sensitive: bool = False, incl_readonly: bool = False) -> dict: """Remove marked fields for FastAPI Response""" @@ -452,6 +455,37 @@ class EvaluationReport(Evaluation): html_report: str = Field(description="HTML Report") +##################################################### +# MCP +##################################################### +class MCPModelConfig(BaseModel): + """MCP Model Configuration""" + model_id: str = Field(..., description="Model identifier") + service_type: Literal["ollama", "openai"] = Field(..., description="AI service type") + base_url: str = Field(default="http://localhost:11434", description="Base URL for API") + api_key: Optional[str] = Field(default=None, description="API key", json_schema_extra={"sensitive": True}) + enabled: bool = Field(default=True, description="Model availability status") + streaming: bool = Field(default=False, description="Enable streaming responses") + temperature: float = Field(default=1.0, description="Model temperature") + max_tokens: int = Field(default=2048, description="Maximum tokens per response") + + +class MCPToolConfig(BaseModel): + """MCP Tool Configuration""" + name: str = Field(..., description="Tool name") + description: str = Field(..., description="Tool description") + parameters: Dict[str, Any] = Field(..., description="Tool parameters") + enabled: bool = Field(default=True, description="Tool availability status") + + +class MCPSettings(BaseModel): + """MCP Global Settings""" + models: List[MCPModelConfig] = Field(default_factory=list, description="Available MCP models") + tools: List[MCPToolConfig] = Field(default_factory=list, description="Available MCP tools") + default_model: Optional[str] = Field(default=None, description="Default model identifier") + enabled: bool = Field(default=True, description="Enable or disable MCP functionality") + + ##################################################### # Types ##################################################### @@ -469,3 +503,6 @@ class EvaluationReport(Evaluation): TestSetsIdType = TestSets.__annotations__["tid"] TestSetsNameType = TestSets.__annotations__["name"] TestSetDateType = TestSets.__annotations__["created"] +MCPModelIdType = MCPModelConfig.__annotations__["model_id"] +MCPServiceType = MCPModelConfig.__annotations__["service_type"] +MCPToolNameType = MCPToolConfig.__annotations__["name"] diff --git a/src/launch_client.py b/src/launch_client.py index 4e5e4797..39330c3d 100644 --- a/src/launch_client.py +++ b/src/launch_client.py @@ -128,6 +128,7 @@ def main() -> None: state.disabled["model_cfg"] = os.environ.get("DISABLE_MODEL_CFG", "false").lower() == "true" state.disabled["oci_cfg"] = os.environ.get("DISABLE_OCI_CFG", "false").lower() == "true" state.disabled["settings"] = os.environ.get("DISABLE_SETTINGS", "false").lower() == "true" + state.disabled["mcp_cfg"] = os.environ.get("DISABLE_MCP_CFG", "false").lower() == "true" # Left Hand Side - Navigation chatbot = st.Page("client/content/chatbot.py", title="ChatBot", icon="💬", default=True) @@ -166,6 +167,9 @@ def main() -> None: # When we get here, if there's nothing in "Configuration" delete it if not navigation["Configuration"]: del navigation["Configuration"] + if not state.disabled["mcp_cfg"]: + mcp_config = st.Page("client/content/config/mcp_servers.py", title="MCP Servers", icon="💾") + navigation["Configuration"].append(mcp_config) pg = st.navigation(navigation, position="sidebar", expanded=False) pg.run() diff --git a/src/launch_server.py b/src/launch_server.py index b9c02194..fceea5c0 100644 --- a/src/launch_server.py +++ b/src/launch_server.py @@ -25,19 +25,25 @@ import server.patches.litellm_patch # pylint: disable=unused-import import argparse +import json import queue import secrets import socket import subprocess import threading -from typing import Annotated +from typing import Annotated, Any, Dict, Optional from pathlib import Path import uvicorn +from contextlib import asynccontextmanager import psutil -from fastapi import FastAPI, HTTPException, Depends, status, APIRouter +from client.mcp.client import MCPClient +from fastapi import APIRouter, Depends, FastAPI, HTTPException, status +from fastapi.openapi.utils import get_openapi +from fastapi.routing import APIRoute from fastapi.security import HTTPAuthorizationCredentials, HTTPBearer +from pydantic import BaseModel # Logging import common.logging_config as logging_config @@ -45,9 +51,50 @@ # Configuration import server.bootstrap.configfile as configfile +from server.bootstrap import mcp as mcp_bootstrap logger = logging_config.logging.getLogger("launch_server") +mcp_engine: Optional[MCPClient] = None + +def get_mcp_engine() -> Optional[MCPClient]: + """Get the current MCP engine instance.""" + global mcp_engine + logger.debug(f"get_mcp_engine() called, returning: {mcp_engine}") + # Additional debugging to check if the variable exists + if 'mcp_engine' in globals(): + print(f"DEBUG: mcp_engine in globals: {globals().get('mcp_engine')}") + else: + print("DEBUG: mcp_engine not in globals") + # Print the module name to see which module this is + print(f"DEBUG: This is module: {__name__}") + return mcp_engine + +async def initialize_mcp_engine_with_model(model_name: str) -> Optional[MCPClient]: + """Initialize or reinitialize the MCP engine with a specific model.""" + global mcp_engine + + # Clean up existing engine if it exists + if mcp_engine: + try: + await mcp_engine.cleanup() + except Exception as e: + logger.error(f"Error cleaning up existing MCP engine: {e}") + + # Initialize new engine with the specified model + try: + mcp_engine = MCPClient(client_settings={'ll_model': {'model': model_name}}) + logger.info("MCP Client created with model %s, connecting to servers...", model_name) + await mcp_engine.connect_to_servers() + logger.info("MCP Engine initialized successfully with model %s", model_name) + return mcp_engine + except Exception as e: + logger.error(f"Failed to initialize MCP Engine with model {model_name}: {e}", exc_info=True) + mcp_engine = None + return None +class McpToolCallRequest(BaseModel): + tool_name: str + tool_args: Dict[str, Any] ########################################## # Process Control @@ -97,8 +144,7 @@ def start_subprocess(port: int, logfile: bool) -> subprocess.Popen: return process port = port or find_available_port() - existing_pid = get_pid_using_port(port) - if existing_pid: + if existing_pid := get_pid_using_port(port): logger.info("API server already running on port: %i (PID: %i)", port, existing_pid) return existing_pid @@ -118,11 +164,10 @@ def stop_server(pid: int) -> None: proc = psutil.Process(pid) proc.terminate() proc.wait() + logger.info("API server stopped.") except (psutil.NoSuchProcess, psutil.AccessDenied) as ex: logger.error("Failed to terminate process with PID: %i - %s", pid, ex) - logger.info("API server stopped.") - ########################################## # Server App and API Key @@ -170,12 +215,72 @@ def register_endpoints(noauth: APIRouter, auth: APIRouter): auth.include_router(api_v1.selectai.auth, prefix="/v1/selectai", tags=["SelectAI"]) auth.include_router(api_v1.settings.auth, prefix="/v1/settings", tags=["Tools - Settings"]) auth.include_router(api_v1.testbed.auth, prefix="/v1/testbed", tags=["Tools - Testbed"]) + auth.include_router(api_v1.mcp.auth, prefix="/v1/mcp", tags=["Config - MCP Servers"]) ############################################################################# # APP FACTORY ############################################################################# -def create_app(config: str = None) -> FastAPI: +@asynccontextmanager +async def lifespan(app: FastAPI): + """FastAPI startup/shutdown lifecycle for the MCP Engine.""" + logger.info("Starting API Server...") + global mcp_engine + + # Define a single, authoritative path for the configuration file. + config_path = Path("server/etc/mcp_config.json") + + # 1. Handle the missing configuration file as a critical error. + if not config_path.exists(): + logger.error( + f"CRITICAL: MCP configuration file not found at '{config_path}'. " + "MCP Engine cannot be initialized." + ) + # Yield control to allow the server to run, but without the MCP engine. + yield + return + + # 2. Load the configuration and initialize the engine. + try: + logger.info(f"Loading MCP configuration from '{config_path}'...") + with open(config_path, encoding='utf-8') as f: + mcp_config = json.load(f) + + mcp_bootstrap.load_mcp_settings(mcp_config) + + # 3. Check if MCP is enabled in the loaded configuration. + if mcp_bootstrap.MCP_SETTINGS and mcp_bootstrap.MCP_SETTINGS.enabled: + logger.info("MCP is enabled. Initializing MCP Engine...") + + # This structure assumes MCPClient can be initialized with just the default model. + client_init_settings = { + 'll_model': {'model': mcp_bootstrap.MCP_SETTINGS.default_model} + } + mcp_engine = MCPClient(client_settings=client_init_settings) + + await mcp_engine.connect_to_servers() + logger.info("MCP Engine initialized successfully.") + else: + logger.warning("MCP is disabled in the configuration file. Skipping initialization.") + + except Exception as e: + logger.error(f"Failed to initialize MCP Engine from configuration: {e}", exc_info=True) + # Ensure the engine is not set if initialization fails. + mcp_engine = None + + # Yield control to the running application. + yield + + # Shutdown the engine if it was successfully initialized. + if mcp_engine: + logger.info("Shutting down MCP Engine...") + try: + await mcp_engine.cleanup() + logger.info("MCP Engine cleanup completed.") + except Exception as e: + logger.error(f"Error during MCP Engine cleanup: {e}") + +def create_app(config: str = "") -> FastAPI: """Create and configure the FastAPI app.""" if not config: config = configfile.config_file_path() @@ -187,6 +292,7 @@ def create_app(config: str = None) -> FastAPI: version=__version__, docs_url="/v1/docs", openapi_url="/v1/openapi.json", + lifespan=lifespan, license_info={ "name": "Universal Permissive License", "url": "http://oss.oracle.com/licenses/upl", diff --git a/src/server/api/core/bootstrap.py b/src/server/api/core/bootstrap.py index f4087446..db95de41 100644 --- a/src/server/api/core/bootstrap.py +++ b/src/server/api/core/bootstrap.py @@ -3,10 +3,11 @@ Licensed under the Universal Permissive License v1.0 as shown at http://oss.oracle.com/licenses/upl. """ -from server.bootstrap import databases, models, oci, prompts, settings +from server.bootstrap import databases, models, oci, prompts, settings, mcp DATABASE_OBJECTS = databases.main() MODEL_OBJECTS = models.main() OCI_OBJECTS = oci.main() PROMPT_OBJECTS = prompts.main() SETTINGS_OBJECTS = settings.main() +MCP_OBJECTS = mcp.main() diff --git a/src/server/api/core/mcp.py b/src/server/api/core/mcp.py new file mode 100644 index 00000000..751a8fc0 --- /dev/null +++ b/src/server/api/core/mcp.py @@ -0,0 +1,31 @@ +from typing import Optional, List, Dict, Any +from common.schema import MCPModelConfig, MCPToolConfig, MCPSettings +from server.bootstrap import mcp as mcp_bootstrap +import common.logging_config as logging_config + +logger = logging_config.logging.getLogger("api.core.mcp") + +def get_mcp_model(model_id: str) -> Optional[MCPModelConfig]: + """Get MCP model configuration by ID""" + for model in mcp_bootstrap.MCP_MODELS: + if model.model_id == model_id: + return model + return None + +def get_mcp_tool(tool_name: str) -> Optional[MCPToolConfig]: + """Get MCP tool configuration by name""" + for tool in mcp_bootstrap.MCP_TOOLS: + if tool.name == tool_name: + return tool + return None + +def update_mcp_settings(settings: Dict[str, Any]) -> MCPSettings: + """Update MCP settings""" + if not mcp_bootstrap.MCP_SETTINGS: + raise ValueError("MCP settings not initialized") + + for key, value in settings.items(): + if hasattr(mcp_bootstrap.MCP_SETTINGS, key): + setattr(mcp_bootstrap.MCP_SETTINGS, key, value) + + return mcp_bootstrap.MCP_SETTINGS \ No newline at end of file diff --git a/src/server/api/core/settings.py b/src/server/api/core/settings.py index 7013eb48..6121a52e 100644 --- a/src/server/api/core/settings.py +++ b/src/server/api/core/settings.py @@ -54,11 +54,16 @@ def get_server_config() -> schema.Configuration: prompt_objects = bootstrap.PROMPT_OBJECTS prompt_configs = [prompt for prompt in prompt_objects] + # Add MCP configs as a list (similar to other configs) + mcp_objects = bootstrap.mcp.MCP_MODELS # Get list of models from bootstrap + mcp_configs = [model for model in mcp_objects] # Convert to list like other configs + full_config = { "database_configs": database_configs, "model_configs": model_configs, "oci_configs": oci_configs, "prompt_configs": prompt_configs, + "mcp_configs": mcp_configs, # Now it's a list like other configs } return full_config @@ -91,6 +96,11 @@ def update_server_config(config_data: dict) -> None: if "prompt_configs" in config_data: bootstrap.PROMPT_OBJECTS = config.prompt_configs or [] + + # Add MCP config handling (similar to other configs) + if "mcp_configs" in config_data: + from server.bootstrap import mcp + mcp.MCP_MODELS = config.mcp_configs or [] # Store as list like other configs def load_config_from_json_data(config_data: dict, client: schema.ClientIdType = None) -> None: diff --git a/src/server/api/v1/__init__.py b/src/server/api/v1/__init__.py index fcd6743f..873ce855 100644 --- a/src/server/api/v1/__init__.py +++ b/src/server/api/v1/__init__.py @@ -3,4 +3,4 @@ Licensed under the Universal Permissive License v1.0 as shown at http://oss.oracle.com/licenses/upl. """ -from . import chat, databases, embed, models, oci, probes, prompts, testbed, settings, selectai +from . import chat, databases, embed, models, oci, probes, prompts, testbed, settings, selectai, mcp diff --git a/src/server/api/v1/mcp.py b/src/server/api/v1/mcp.py new file mode 100644 index 00000000..c1e008a4 --- /dev/null +++ b/src/server/api/v1/mcp.py @@ -0,0 +1,147 @@ +""" +Copyright (c) 2024, 2025, Oracle and/or its affiliates. +Licensed under the Universal Permissive License v1.0 as shown at http://oss.oracle.com/licenses/upl. +This file is being used in APIs, and not the backend.py file. +""" + +from typing import Optional, Dict, Any +from fastapi import APIRouter, HTTPException +from pydantic import BaseModel +from datetime import datetime + +import common.logging_config as logging_config + +logger = logging_config.logging.getLogger("endpoints.v1.mcp") + +auth = APIRouter() + +def mcp_engine_obj(): + """Check if the MCP engine is initialized.""" + try: + from launch_server import get_mcp_engine + mcp_engine = get_mcp_engine() + except ImportError: + return None + return mcp_engine + +class McpToolCallRequest(BaseModel): + tool_name: str + tool_args: Dict[str, Any] + +class ChatRequest(BaseModel): + query: str + prompt_name: Optional[str] = None + resource_uri: Optional[str] = None + message_history: Optional[list] = None + +@auth.get( + "/tools", + description="List available MCP tools", + response_model=dict +) +async def list_mcp_tools(): + # Import here to avoid circular imports + mcp_engine = mcp_engine_obj() + if not mcp_engine: + raise HTTPException(status_code=503, detail="MCP Engine not initialized.") + try: + await mcp_engine._rebuild_mcp_tool_schemas() + except Exception as e: + logger.error(f"Error rebuilding tool schemas: {e}") + + tools_info = [] + for tool_name, (session, tool_object) in mcp_engine.tool_to_session.items(): + tools_info.append({ + "name": tool_object.name, + "description": tool_object.description, + "input_schema": tool_object.inputSchema + }) + return {"tools": tools_info} + +@auth.post( + "/execute", + description="Execute an MCP tool", + response_model=dict +) +async def execute_mcp_tool(request: McpToolCallRequest): + # Import here to avoid circular imports + mcp_engine = mcp_engine_obj() + if not mcp_engine: + raise HTTPException(status_code=503, detail="MCP Engine not initialized.") + try: + result = await mcp_engine.execute_mcp_tool(request.tool_name, request.tool_args) + return {"result": result} + except Exception as e: + logger.error(f"Error executing MCP tool: {e}") + raise HTTPException(status_code=500, detail=str(e)) + +@auth.post( + "/chat", + description="Chat with MCP engine", + response_model=dict +) +async def chat_endpoint(request: ChatRequest): + mcp_engine = mcp_engine_obj() + if not mcp_engine: + raise HTTPException(status_code=503, detail="MCP Engine not initialized.") + try: + message_history = request.message_history or [{"role": "user", "content": request.query}] + response_text, _ = await mcp_engine.invoke( + message_history=message_history + ) + return {"response": response_text} + except Exception as e: + logger.error(f"Error in MCP chat: {e}") + raise HTTPException(status_code=500, detail=str(e)) + +@auth.get( + "/resources", + description="List MCP resources", + response_model=dict +) +async def list_resources(): + # Import here to avoid circular imports + mcp_engine = mcp_engine_obj() + if not mcp_engine: + raise HTTPException(status_code=503, detail="MCP Engine not initialized.") + + try: + # This will trigger loading if not already loaded + _ = await mcp_engine._rebuild_mcp_tool_schemas() + except Exception as e: + logger.error(f"Error loading resources: {e}") + + return { + "static": list(getattr(mcp_engine, "static_resources", {}).keys()), + "dynamic": getattr(mcp_engine, "dynamic_resources", []) + } + +@auth.get( + "/prompts", + description="List MCP prompts", + response_model=dict +) +async def list_prompts(): + mcp_engine = mcp_engine_obj() + if not mcp_engine: + raise HTTPException(status_code=503, detail="MCP Engine not initialized.") + try: + # This will trigger loading if not already loaded + _ = await mcp_engine._rebuild_mcp_tool_schemas() + except Exception as e: + logger.error(f"Error loading prompts: {e}") + + return { + "prompts": list(getattr(mcp_engine, "available_prompts", {}).keys()) + } + +@auth.get("/health", response_model=dict) +async def health_check(): + """Check MCP engine health status""" + actual_mcp_engine = mcp_engine_obj() + return { + "status": "initialized" if actual_mcp_engine else "not_initialized", + "engine_type": str(type(actual_mcp_engine)) if actual_mcp_engine else None, + "available_tools": len(getattr(actual_mcp_engine, "available_tools", [])) if actual_mcp_engine else 0, + "timestamp": datetime.now().isoformat() + } diff --git a/src/server/api/v1/settings.py b/src/server/api/v1/settings.py index 4bcd9817..528560d5 100644 --- a/src/server/api/v1/settings.py +++ b/src/server/api/v1/settings.py @@ -38,7 +38,7 @@ async def settings_get( full_config: bool = False, incl_sensitive: bool = Depends(_incl_sensitive_param), incl_readonly: bool = Depends(_incl_readonly_param), -) -> Union[schema.Configuration, schema.Settings]: +) -> Union[schema.Configuration, schema.Settings, JSONResponse]: """Get settings for a specific client by name""" try: client_settings = settings.get_client_settings(client) @@ -55,8 +55,11 @@ async def settings_get( model_configs=config.get("model_configs"), oci_configs=config.get("oci_configs"), prompt_configs=config.get("prompt_configs"), + mcp_configs=config.get("mcp_configs", None) ) - return JSONResponse(content=response.model_dump_public(incl_sensitive=incl_sensitive, incl_readonly=incl_readonly)) + if incl_sensitive or incl_readonly: + return JSONResponse(content=response.model_dump_public(incl_sensitive=incl_sensitive, incl_readonly=incl_readonly)) + return response @auth.patch( @@ -114,12 +117,12 @@ async def load_settings_from_file( pass try: - if not file.filename.endswith(".json"): + if not file.filename or not file.filename.endswith(".json"): raise HTTPException(status_code=400, detail="Settings: Only JSON files are supported.") contents = await file.read() config_data = json.loads(contents) settings.load_config_from_json_data(config_data, client) - return {"message": "Configuration loaded successfully."} + return JSONResponse(content={"message": "Configuration loaded successfully."}) except json.JSONDecodeError as ex: raise HTTPException(status_code=400, detail="Settings: Invalid JSON file.") from ex except KeyError as ex: @@ -148,7 +151,7 @@ async def load_settings_from_json( try: settings.load_config_from_json_data(payload.model_dump(), client) - return {"message": "Configuration loaded successfully."} + return JSONResponse(content={"message": "Configuration loaded successfully."}) except json.JSONDecodeError as ex: raise HTTPException(status_code=400, detail="Settings: Invalid JSON file.") from ex except KeyError as ex: diff --git a/src/server/bootstrap/mcp.py b/src/server/bootstrap/mcp.py new file mode 100644 index 00000000..95e2e34a --- /dev/null +++ b/src/server/bootstrap/mcp.py @@ -0,0 +1,89 @@ +""" +Copyright (c) 2024, 2025, Oracle and/or its affiliates. +Licensed under the Universal Permissive License v1.0 as shown at http://oss.oracle.com/licenses/upl. +""" +from typing import List, Optional +import os + +from server.bootstrap.configfile import ConfigStore +from common.schema import MCPSettings, MCPModelConfig, MCPToolConfig +import common.logging_config as logging_config + +logger = logging_config.logging.getLogger("bootstrap.mcp") + +# Global configuration holders +MCP_SETTINGS: Optional[MCPSettings] = None +MCP_MODELS: List[MCPModelConfig] = [] +MCP_TOOLS: List[MCPToolConfig] = [] + +def load_mcp_settings(config: dict) -> None: + """Load MCP configuration from config file""" + global MCP_SETTINGS, MCP_MODELS, MCP_TOOLS + + # Convert to settings object first + mcp_settings = MCPSettings( + models=[MCPModelConfig(**model) for model in config.get("models", [])], + tools=[MCPToolConfig(**tool) for tool in config.get("tools", [])], + default_model=config.get("default_model"), + enabled=config.get("enabled", True) + ) + + # Set globals + MCP_SETTINGS = mcp_settings + MCP_MODELS = mcp_settings.models + MCP_TOOLS = mcp_settings.tools + + logger.info("Loaded %i MCP Models and %i Tools", len(MCP_MODELS), len(MCP_TOOLS)) + +def main() -> MCPSettings: + """Bootstrap MCP Configuration""" + logger.debug("*** Bootstrapping MCP - Start") + + # Load from ConfigStore if available + configuration = ConfigStore.get() + if configuration and configuration.mcp_configs: + logger.debug("Using MCP configs from ConfigStore") + # Convert list of MCPModelConfig objects to MCPSettings + mcp_settings = MCPSettings( + models=configuration.mcp_configs, + tools=[], # No tools in the current schema + default_model=configuration.mcp_configs[0].model_id if configuration.mcp_configs else None, + enabled=True + ) + else: + # Default MCP configuration + mcp_settings = MCPSettings( + models=[ + MCPModelConfig( + model_id="llama3.1", + service_type="ollama", + base_url=os.environ.get("ON_PREM_OLLAMA_URL", "http://localhost:11434"), + enabled=True, + streaming=False, + temperature=1.0, + max_tokens=2048 + ) + ], + tools=[ + MCPToolConfig( + name="file_reader", + description="Read contents of files", + parameters={ + "path": "string", + "encoding": "string" + }, + enabled=True + ) + ], + default_model=None, + enabled=True + ) + + logger.info("Loaded %i MCP Models and %i Tools", len(mcp_settings.models), len(mcp_settings.tools)) + logger.debug("*** Bootstrapping MCP - End") + logger.info("MCP Settings: %s", mcp_settings.model_dump_json()) + return mcp_settings + + +if __name__ == "__main__": + main() \ No newline at end of file diff --git a/src/server/mcp/server/archive_mcp.py b/src/server/mcp/server/archive_mcp.py new file mode 100644 index 00000000..d38a091f --- /dev/null +++ b/src/server/mcp/server/archive_mcp.py @@ -0,0 +1,182 @@ +import json +import os +from dotenv import load_dotenv +import arxiv +from typing import List +from mcp.server.fastmcp import FastMCP +import textwrap + +# --- Configuration and Setup --- +load_dotenv() +PAPER_DIR = "papers" +# Initialize FastMCP server with a name +mcp = FastMCP("research") +_paper_cache = {} + +# --- Tool Definitions --- + +@mcp.tool() +def search_papers(topic: str, max_results: int = 5) -> List[str]: + """ + Searches for papers on arXiv based on a topic and saves their metadata. + + Args: + topic (str): The topic to search for. + max_results (int): Maximum number of results to retrieve. + + Returns: + List[str]: A list of the paper IDs found and saved. + """ + client_arxiv = arxiv.Client() + search = arxiv.Search( + query=topic, + max_results=max_results, + sort_by=arxiv.SortCriterion.Relevance + ) + papers = list(client_arxiv.results(search)) + + if not papers: + # It's good practice to print feedback on the server side + print(f"Server: No papers found for topic '{topic}'") + return [] + + path = os.path.join(PAPER_DIR, topic.lower().replace(" ", "_")) + os.makedirs(path, exist_ok=True) + file_path = os.path.join(path, "papers_info.json") + + try: + with open(file_path, "r") as json_file: + papers_info = json.load(json_file) + except (FileNotFoundError, json.JSONDecodeError): + papers_info = {} + + paper_ids = [] + for paper in papers: + paper_id = paper.get_short_id() + paper_ids.append(paper_id) + papers_info[paper_id] = { + 'title': paper.title, + 'authors': [author.name for author in paper.authors], + 'summary': paper.summary, + 'pdf_url': paper.pdf_url, + 'published': str(paper.published.date()) + } + + with open(file_path, "w") as json_file: + json.dump(papers_info, json_file, indent=2) + + print(f"Server: Saved {len(paper_ids)} papers to {file_path}") + return paper_ids + +@mcp.tool() +def extract_info(paper_id: str) -> str: + """ + Retrieves saved information for a specific paper ID from all topics. + Uses an in-memory cache for performance. + + Args: + paper_id (str): The ID of the paper to look for. + + Returns: + str: JSON string with paper information if found, else an error message. + """ + # 1. First, check the cache for an exact match + if paper_id in _paper_cache: + return json.dumps(_paper_cache[paper_id], indent=2) + + # 2. If not in cache, perform the expensive file search (your original logic) + for item in os.listdir(PAPER_DIR): + item_path = os.path.join(PAPER_DIR, item) + if os.path.isdir(item_path): + file_path = os.path.join(item_path, "papers_info.json") + if os.path.isfile(file_path): + try: + with open(file_path, "r") as json_file: + papers_info = json.load(json_file) + + # Search logic (can be simplified if we populate cache first) + for key, value in papers_info.items(): + # Add every paper from this file to the cache to avoid re-reading this file + if key not in _paper_cache: + _paper_cache[key] = value + + except (FileNotFoundError, json.JSONDecodeError): + continue + + # 3. Now that the cache is populated from relevant files, check again. + # This handles version differences as well. + if paper_id in _paper_cache: + return json.dumps(_paper_cache[paper_id], indent=2) + + base_id = paper_id.split('v')[0] + for key, value in _paper_cache.items(): + if key.startswith(base_id): + return json.dumps(value, indent=2) + + return f"Error: No saved information found for paper ID {paper_id}." + +# --- Resource Definitions --- + +@mcp.resource("papers://folders") +def get_available_folders() -> str: + """Lists all available topic folders that contain saved paper information.""" + print(f"Server: Listing available topic folders in {PAPER_DIR}") + folders = [] + if os.path.exists(PAPER_DIR): + for topic_dir in os.listdir(PAPER_DIR): + if os.path.isdir(os.path.join(PAPER_DIR, topic_dir)): + folders.append(topic_dir) + + content = "# Available Research Topics\n\n" + if folders: + content += "You can retrieve info for any of these topics using `@`.\n\n" + for folder in folders: + content += f"- `{folder}`\n" + else: + content += "No topic folders found. Use `search_papers` to create one." + print(f"Server: Found {len(folders)} topic folders.") + return content + +@mcp.resource("papers://{topic}") +def get_topic_papers(topic: str) -> str: + """Gets detailed information about all saved papers for a specific topic.""" + print(f"Server: Retrieving papers for topic '{topic}'") + topic_dir = topic.lower().replace(" ", "_") + papers_file = os.path.join(PAPER_DIR, topic_dir, "papers_info.json") + + if not os.path.exists(papers_file): + return f"# No papers found for topic: {topic}" + + with open(papers_file, 'r') as f: + papers_data = json.load(f) + + content = f"# Papers on {topic.replace('_', ' ').title()}\n\n" + for paper_id, info in papers_data.items(): + content += f"## {info['title']} (`{paper_id}`)\n" + content += f"- **Authors**: {', '.join(info['authors'])}\n" + content += f"- **Summary**: {info['summary'][:200]}...\n---\n" + print(f"Server: Found {len(papers_data)} papers for topic '{topic}'") + return content + +# --- Prompt Definition --- + +@mcp.prompt() +def generate_search_prompt(topic: str) -> str: + """Generates a system prompt to guide an AI in researching a topic.""" + return textwrap.dedent(f""" + You are a research assistant. Your goal is to provide a comprehensive overview of a topic. + When asked about '{topic}', follow these steps: + 1. Use the `search_papers` tool to find relevant papers. + 2. For each paper ID returned, use the `extract_info` tool to get its details. + 3. Synthesize the information from all papers into a cohesive summary. + 4. Present the key findings, common themes, and any differing conclusions. + Do not present the raw JSON. Format the final output for readability. + """) + +# --- Main Execution Block --- + +if __name__ == "__main__": + # This is the original, simple, and correct way to run the server. + # It will not crash. + print("Research MCP Server running on stdio...") + mcp.run(transport='stdio') diff --git a/src/server/mcp/server_config.json b/src/server/mcp/server_config.json new file mode 100644 index 00000000..3d8b0321 --- /dev/null +++ b/src/server/mcp/server_config.json @@ -0,0 +1,20 @@ +{ + "mcpServers": { + "filesystem": { + "command": "npx", + "args": [ + "-y", + "@modelcontextprotocol/server-filesystem", + "." + ] + }, + "research": { + "command": "python3", + "args": ["server/mcp/server/archive_mcp.py"] + }, + "fetch": { + "command": "python3", + "args": ["-m", "mcp_server_fetch"] + } + } +} From 16ef70aba387dcb4a2bcd045ce9cc97e20d09bbd Mon Sep 17 00:00:00 2001 From: gotsysdba Date: Mon, 11 Aug 2025 14:46:25 +0100 Subject: [PATCH 02/28] Include additional langchain and MCP modules --- src/pyproject.toml | 5 +++++ 1 file changed, 5 insertions(+) diff --git a/src/pyproject.toml b/src/pyproject.toml index 7650f9da..b31fe996 100644 --- a/src/pyproject.toml +++ b/src/pyproject.toml @@ -28,9 +28,13 @@ server = [ "fastapi==0.116.1", "faiss-cpu==1.11.0.post1", "giskard==2.17.0", + "langchain-anthropic==0.3.18", "langchain-cohere==0.4.5", "langchain-community==0.3.27", + "langchain-google-genai==2.1.9", + "langchain-groq==0.3.7", "langchain-huggingface==0.3.1", + "langchain-mistralai==0.2.11", "langchain-ollama==0.3.6", "langchain-openai==0.3.29", "langgraph==0.6.4", @@ -38,6 +42,7 @@ server = [ "llama-index==0.13.1", "lxml==6.0.0", "matplotlib==3.10.5", + "mcp==1.12.4", "oci~=2.0", "psutil==7.0.0", "python-multipart==0.0.20", From 0c3dfd0a4c57dd37848f93994cc21f18f31085eb Mon Sep 17 00:00:00 2001 From: gotsysdba Date: Mon, 11 Aug 2025 15:09:07 +0100 Subject: [PATCH 03/28] Re-Org Schema --- src/common/schema.py | 69 ++++++++++++++++++++++---------------------- 1 file changed, 35 insertions(+), 34 deletions(-) diff --git a/src/common/schema.py b/src/common/schema.py index ea7bbdcf..b5ffebd6 100644 --- a/src/common/schema.py +++ b/src/common/schema.py @@ -4,10 +4,8 @@ """ # spell-checker:ignore ollama hnsw mult ocid testset selectai explainsql showsql vector_search aioptimizer genai -from __future__ import annotations - import time -from typing import Optional, Literal, Union, get_args, Any, Dict, List +from typing import Optional, Literal, Union, get_args, Any from pydantic import BaseModel, Field, PrivateAttr, model_validator from langchain_core.messages import ChatMessage @@ -101,6 +99,40 @@ def set_connection(self, connection: oracledb.Connection) -> None: self._connection = connection +##################################################### +# MCP +##################################################### +class MCPModelConfig(BaseModel): + """MCP Model Configuration""" + + model_id: str = Field(..., description="Model identifier") + service_type: Literal["ollama", "openai"] = Field(..., description="AI service type") + base_url: str = Field(default="http://localhost:11434", description="Base URL for API") + api_key: Optional[str] = Field(default=None, description="API key", json_schema_extra={"sensitive": True}) + enabled: bool = Field(default=True, description="Model availability status") + streaming: bool = Field(default=False, description="Enable streaming responses") + temperature: float = Field(default=1.0, description="Model temperature") + max_tokens: int = Field(default=2048, description="Maximum tokens per response") + + +class MCPToolConfig(BaseModel): + """MCP Tool Configuration""" + + name: str = Field(..., description="Tool name") + description: str = Field(..., description="Tool description") + parameters: dict[str, Any] = Field(..., description="Tool parameters") + enabled: bool = Field(default=True, description="Tool availability status") + + +class MCPSettings(BaseModel): + """MCP Global Settings""" + + models: list[MCPModelConfig] = Field(default_factory=list, description="Available MCP models") + tools: list[MCPToolConfig] = Field(default_factory=list, description="Available MCP tools") + default_model: Optional[str] = Field(default=None, description="Default model identifier") + enabled: bool = Field(default=True, description="Enable or disable MCP functionality") + + ##################################################### # Models ##################################################### @@ -474,37 +506,6 @@ class EvaluationReport(Evaluation): html_report: str = Field(description="HTML Report") -##################################################### -# MCP -##################################################### -class MCPModelConfig(BaseModel): - """MCP Model Configuration""" - model_id: str = Field(..., description="Model identifier") - service_type: Literal["ollama", "openai"] = Field(..., description="AI service type") - base_url: str = Field(default="http://localhost:11434", description="Base URL for API") - api_key: Optional[str] = Field(default=None, description="API key", json_schema_extra={"sensitive": True}) - enabled: bool = Field(default=True, description="Model availability status") - streaming: bool = Field(default=False, description="Enable streaming responses") - temperature: float = Field(default=1.0, description="Model temperature") - max_tokens: int = Field(default=2048, description="Maximum tokens per response") - - -class MCPToolConfig(BaseModel): - """MCP Tool Configuration""" - name: str = Field(..., description="Tool name") - description: str = Field(..., description="Tool description") - parameters: Dict[str, Any] = Field(..., description="Tool parameters") - enabled: bool = Field(default=True, description="Tool availability status") - - -class MCPSettings(BaseModel): - """MCP Global Settings""" - models: List[MCPModelConfig] = Field(default_factory=list, description="Available MCP models") - tools: List[MCPToolConfig] = Field(default_factory=list, description="Available MCP tools") - default_model: Optional[str] = Field(default=None, description="Default model identifier") - enabled: bool = Field(default=True, description="Enable or disable MCP functionality") - - ##################################################### # Types ##################################################### From 681087bc7cb82d534d388cac8c86533a56bfe0fb Mon Sep 17 00:00:00 2001 From: gotsysdba Date: Wed, 13 Aug 2025 15:36:53 +0100 Subject: [PATCH 04/28] Using FastMCP --- src/client/content/config/settings.py | 4 +- src/common/schema.py | 16 +- src/launch_server.py | 176 ++++++----------- src/pyproject.toml | 10 +- src/server/Dockerfile | 2 +- src/server/api/v1/__init__.py | 1 + src/server/api/v1/mcp.py | 186 +++++++----------- src/server/api/v1/probes.py | 24 ++- src/server/mcp/__init__.py | 75 +++++++ src/server/mcp/clients/sqlcl.py | 31 +++ src/server/mcp/prompts/__init__.py | 0 src/server/mcp/proxies/__init__.py | 0 src/server/mcp/proxies/sqlcl.py | 21 ++ src/server/mcp/resources/__init__.py | 0 src/server/mcp/tools/__init__.py | 0 src/server/mcp/tools/say_hello.py | 6 + src/server/mcp_bak/register_mcp_servers.py | 36 ++++ .../{mcp => mcp_bak}/server/archive_mcp.py | 0 .../{mcp => mcp_bak}/server_config.json | 0 19 files changed, 340 insertions(+), 248 deletions(-) create mode 100644 src/server/mcp/__init__.py create mode 100644 src/server/mcp/clients/sqlcl.py create mode 100644 src/server/mcp/prompts/__init__.py create mode 100644 src/server/mcp/proxies/__init__.py create mode 100644 src/server/mcp/proxies/sqlcl.py create mode 100644 src/server/mcp/resources/__init__.py create mode 100644 src/server/mcp/tools/__init__.py create mode 100644 src/server/mcp/tools/say_hello.py create mode 100644 src/server/mcp_bak/register_mcp_servers.py rename src/server/{mcp => mcp_bak}/server/archive_mcp.py (100%) rename src/server/{mcp => mcp_bak}/server_config.json (100%) diff --git a/src/client/content/config/settings.py b/src/client/content/config/settings.py index 8865e11e..e28df28d 100644 --- a/src/client/content/config/settings.py +++ b/src/client/content/config/settings.py @@ -48,8 +48,8 @@ def get_settings(include_sensitive: bool = False): }, ) return settings - except api_call.ApiError as e: - if "not found" in str(e): + except api_call.ApiError as ex: + if "not found" in str(ex): # If client settings not found, create them logger.info("Client settings not found, creating new ones") api_call.post(endpoint="v1/settings", params={"client": state.client_settings["client"]}) diff --git a/src/common/schema.py b/src/common/schema.py index b5ffebd6..e756a480 100644 --- a/src/common/schema.py +++ b/src/common/schema.py @@ -44,7 +44,7 @@ class DatabaseVectorStorage(BaseModel): """Database Vector Storage Tables""" vector_store: Optional[str] = Field( - default=None, description="Vector Store Table Name (auto-generated, do not set)", readOnly=True + default=None, description="Vector Store Table Name (auto-generated, do not set)", json_schema_extra={"readOnly": True} ) alias: Optional[str] = Field(default=None, description="Identifiable Alias") model: Optional[str] = Field(default=None, description="Embedding Model") @@ -57,8 +57,8 @@ class DatabaseVectorStorage(BaseModel): class DatabaseSelectAIObjects(BaseModel): """Database SelectAI Objects""" - owner: Optional[str] = Field(default=None, description="Object Owner", readOnly=True) - name: Optional[str] = Field(default=None, description="Object Name", readOnly=True) + owner: Optional[str] = Field(default=None, description="Object Owner", json_schema_extra={"readOnly": True}) + name: Optional[str] = Field(default=None, description="Object Name", json_schema_extra={"readOnly": True}) enabled: bool = Field(default=False, description="SelectAI Enabled") @@ -80,12 +80,12 @@ class Database(DatabaseAuth): """Database Object""" name: str = Field(default="DEFAULT", description="Name of Database (Alias)") - connected: bool = Field(default=False, description="Connection Established", readOnly=True) + connected: bool = Field(default=False, description="Connection Established", json_schema_extra={"readOnly": True}) vector_stores: Optional[list[DatabaseVectorStorage]] = Field( - default=[], description="Vector Storage (read-only)", readOnly=True + default=[], description="Vector Storage (read-only)", json_schema_extra={"readOnly": True} ) selectai: bool = Field(default=False, description="SelectAI Possible") - selectai_profiles: Optional[list] = Field(default=[], description="SelectAI Profiles (read-only)", readOnly=True) + selectai_profiles: Optional[list] = Field(default=[], description="SelectAI Profiles (read-only)", json_schema_extra={"readOnly": True}) # Do not expose the connection to the endpoint _connection: oracledb.Connection = PrivateAttr(default=None) @@ -213,7 +213,7 @@ class OracleCloudSettings(BaseModel): """Store Oracle Cloud Infrastructure Settings""" auth_profile: str = Field(default="DEFAULT", description="Config File Profile") - namespace: Optional[str] = Field(default=None, description="Object Store Namespace", readOnly=True) + namespace: Optional[str] = Field(default=None, description="Object Store Namespace", json_schema_extra={"readOnly": True}) user: Optional[str] = Field( default=None, description="Optional if using Auth Token", @@ -411,7 +411,7 @@ class ChatChoices(BaseModel): """A list of chat completion choices.""" index: int = Field(description="The index of the choice in the list of choices.") - message: ChatMessage = Field(descriptions="A chat completion message generated by the model.") + message: ChatMessage = Field(description="A chat completion message generated by the model.") finish_reason: Literal["stop", "length", "content_filter", "tool_calls"] = Field( description=( "The reason the model stopped generating tokens. " diff --git a/src/launch_server.py b/src/launch_server.py index fceea5c0..ac9dc602 100644 --- a/src/launch_server.py +++ b/src/launch_server.py @@ -2,10 +2,11 @@ Copyright (c) 2024, 2025, Oracle and/or its affiliates. Licensed under the Universal Permissive License v1.0 as shown at http://oss.oracle.com/licenses/upl. """ -# spell-checker:ignore fastapi laddr checkpointer langgraph litellm +# spell-checker:ignore fastapi laddr checkpointer langgraph litellm fastmcp getpid procs # spell-checker:ignore noauth apiserver configfile selectai giskard ollama llms # pylint: disable=redefined-outer-name,wrong-import-position +from contextlib import asynccontextmanager import os # Set OS Environment (Don't move their position to reflect on imports) @@ -25,25 +26,25 @@ import server.patches.litellm_patch # pylint: disable=unused-import import argparse -import json + +# import json import queue import secrets import socket import subprocess import threading -from typing import Annotated, Any, Dict, Optional +from typing import Annotated from pathlib import Path import uvicorn -from contextlib import asynccontextmanager + import psutil -from client.mcp.client import MCPClient +# from client.mcp.client import MCPClient from fastapi import APIRouter, Depends, FastAPI, HTTPException, status -from fastapi.openapi.utils import get_openapi -from fastapi.routing import APIRoute from fastapi.security import HTTPAuthorizationCredentials, HTTPBearer -from pydantic import BaseModel +from fastmcp import FastMCP, settings +from fastmcp.server.auth.providers.jwt import StaticTokenVerifier # Logging import common.logging_config as logging_config @@ -51,50 +52,10 @@ # Configuration import server.bootstrap.configfile as configfile -from server.bootstrap import mcp as mcp_bootstrap +# from server.bootstrap import mcp as mcp_bootstrap logger = logging_config.logging.getLogger("launch_server") -mcp_engine: Optional[MCPClient] = None - -def get_mcp_engine() -> Optional[MCPClient]: - """Get the current MCP engine instance.""" - global mcp_engine - logger.debug(f"get_mcp_engine() called, returning: {mcp_engine}") - # Additional debugging to check if the variable exists - if 'mcp_engine' in globals(): - print(f"DEBUG: mcp_engine in globals: {globals().get('mcp_engine')}") - else: - print("DEBUG: mcp_engine not in globals") - # Print the module name to see which module this is - print(f"DEBUG: This is module: {__name__}") - return mcp_engine - -async def initialize_mcp_engine_with_model(model_name: str) -> Optional[MCPClient]: - """Initialize or reinitialize the MCP engine with a specific model.""" - global mcp_engine - - # Clean up existing engine if it exists - if mcp_engine: - try: - await mcp_engine.cleanup() - except Exception as e: - logger.error(f"Error cleaning up existing MCP engine: {e}") - - # Initialize new engine with the specified model - try: - mcp_engine = MCPClient(client_settings={'ll_model': {'model': model_name}}) - logger.info("MCP Client created with model %s, connecting to servers...", model_name) - await mcp_engine.connect_to_servers() - logger.info("MCP Engine initialized successfully with model %s", model_name) - return mcp_engine - except Exception as e: - logger.error(f"Failed to initialize MCP Engine with model {model_name}: {e}", exc_info=True) - mcp_engine = None - return None -class McpToolCallRequest(BaseModel): - tool_name: str - tool_args: Dict[str, Any] ########################################## # Process Control @@ -123,7 +84,7 @@ def get_pid_using_port(port: int) -> int: return None def start_subprocess(port: int, logfile: bool) -> subprocess.Popen: - """Start the uvicorn server as a subprocess.""" + """Start the uvicorn server as a subprocess when started via the client.""" logger.info("API server starting on port: %i", port) log_file = open(f"apiserver_{port}.log", "a", encoding="utf-8") if logfile else None stdout = stderr = log_file if logfile else subprocess.PIPE @@ -159,7 +120,7 @@ def start_subprocess(port: int, logfile: bool) -> subprocess.Popen: def stop_server(pid: int) -> None: - """Stop the uvicorn server for FastAPI.""" + """Stop the uvicorn server for FastAPI when started via the client""" try: proc = psutil.Process(pid) proc.terminate() @@ -196,11 +157,13 @@ def verify_key( raise HTTPException(status_code=status.HTTP_401_UNAUTHORIZED) -def register_endpoints(noauth: APIRouter, auth: APIRouter): +def register_endpoints(mcp: FastMCP, auth: APIRouter, noauth: APIRouter): """Register API Endpoints - Imports to avoid bootstrapping before config file read New endpoints need to be registered in server.api.v1.__init__.py """ - import server.api.v1 as api_v1 # pylint: disable=import-outside-toplevel + # pylint: disable=import-outside-toplevel + import server.api.v1 as api_v1 + from server.mcp import register_all_mcp # No-Authentication (probes only) noauth.include_router(api_v1.probes.noauth, prefix="/v1", tags=["Probes"]) @@ -217,69 +180,15 @@ def register_endpoints(noauth: APIRouter, auth: APIRouter): auth.include_router(api_v1.testbed.auth, prefix="/v1/testbed", tags=["Tools - Testbed"]) auth.include_router(api_v1.mcp.auth, prefix="/v1/mcp", tags=["Config - MCP Servers"]) + # Auto-discover all MCP tools and register HTTP + MCP endpoints + mcp_router = APIRouter(prefix="/mcp", tags=["MCP Tools"]) + register_all_mcp(mcp, auth) + auth.include_router(mcp_router) + ############################################################################# # APP FACTORY ############################################################################# -@asynccontextmanager -async def lifespan(app: FastAPI): - """FastAPI startup/shutdown lifecycle for the MCP Engine.""" - logger.info("Starting API Server...") - global mcp_engine - - # Define a single, authoritative path for the configuration file. - config_path = Path("server/etc/mcp_config.json") - - # 1. Handle the missing configuration file as a critical error. - if not config_path.exists(): - logger.error( - f"CRITICAL: MCP configuration file not found at '{config_path}'. " - "MCP Engine cannot be initialized." - ) - # Yield control to allow the server to run, but without the MCP engine. - yield - return - - # 2. Load the configuration and initialize the engine. - try: - logger.info(f"Loading MCP configuration from '{config_path}'...") - with open(config_path, encoding='utf-8') as f: - mcp_config = json.load(f) - - mcp_bootstrap.load_mcp_settings(mcp_config) - - # 3. Check if MCP is enabled in the loaded configuration. - if mcp_bootstrap.MCP_SETTINGS and mcp_bootstrap.MCP_SETTINGS.enabled: - logger.info("MCP is enabled. Initializing MCP Engine...") - - # This structure assumes MCPClient can be initialized with just the default model. - client_init_settings = { - 'll_model': {'model': mcp_bootstrap.MCP_SETTINGS.default_model} - } - mcp_engine = MCPClient(client_settings=client_init_settings) - - await mcp_engine.connect_to_servers() - logger.info("MCP Engine initialized successfully.") - else: - logger.warning("MCP is disabled in the configuration file. Skipping initialization.") - - except Exception as e: - logger.error(f"Failed to initialize MCP Engine from configuration: {e}", exc_info=True) - # Ensure the engine is not set if initialization fails. - mcp_engine = None - - # Yield control to the running application. - yield - - # Shutdown the engine if it was successfully initialized. - if mcp_engine: - logger.info("Shutting down MCP Engine...") - try: - await mcp_engine.cleanup() - logger.info("MCP Engine cleanup completed.") - except Exception as e: - logger.error(f"Error during MCP Engine cleanup: {e}") - def create_app(config: str = "") -> FastAPI: """Create and configure the FastAPI app.""" if not config: @@ -287,23 +196,60 @@ def create_app(config: str = "") -> FastAPI: config_file = Path(os.getenv("CONFIG_FILE", config)) configfile.ConfigStore.load_from_file(config_file) + verifier = StaticTokenVerifier( + tokens={get_api_key(): {"client_id": "optimizer", "scopes": ["read:data", "write:data", "admin:users"]}}, + required_scopes=["read:data"], + ) + # MCP Server + settings.stateless_http = True + mcp = FastMCP(name="Optimizer MCP Server", auth=verifier) + mcp_app = mcp.http_app(path="/mcp") + + @asynccontextmanager + async def combined_lifespan(app): + async with mcp_app.lifespan(app): + yield + # Shutdown cleanup + logger.info("Cleaning up leftover processes...") + parent = psutil.Process(os.getpid()) + children = parent.children(recursive=True) + for p in children: + try: + p.terminate() + except psutil.NoSuchProcess: + continue + # Wait synchronously, outside the event loop + _, still_alive = psutil.wait_procs(children, timeout=3) + for p in still_alive: + try: + p.kill() + except psutil.NoSuchProcess: + continue + + # API Server app = FastAPI( title="Oracle AI Optimizer and Toolkit", version=__version__, docs_url="/v1/docs", openapi_url="/v1/openapi.json", - lifespan=lifespan, + lifespan=combined_lifespan, license_info={ "name": "Universal Permissive License", "url": "http://oss.oracle.com/licenses/upl", }, ) + # Store MCP in the app state + app.state.mcp = mcp + # Setup Routes and Register non-MCP endpoints noauth = APIRouter() auth = APIRouter(dependencies=[Depends(verify_key)]) - # Register Endpoints - register_endpoints(noauth, auth) + register_endpoints(mcp, auth, noauth) + + # Register MCP Server into FastAPI + app.mount("/mcp_tools", mcp_app) + app.include_router(noauth) app.include_router(auth) diff --git a/src/pyproject.toml b/src/pyproject.toml index b31fe996..4494ee4d 100644 --- a/src/pyproject.toml +++ b/src/pyproject.toml @@ -17,7 +17,7 @@ dependencies = [ "langchain-core==0.3.74", "httpx==0.28.1", "oracledb~=3.1", - "plotly==6.2.0", + "plotly==6.3.0", ] [project.optional-dependencies] @@ -25,8 +25,9 @@ dependencies = [ server = [ "bokeh==3.7.3", "evaluate==0.4.5", + "faiss-cpu==1.12.0", "fastapi==0.116.1", - "faiss-cpu==1.11.0.post1", + "fastmcp==2.11.3", "giskard==2.17.0", "langchain-anthropic==0.3.18", "langchain-cohere==0.4.5", @@ -36,13 +37,12 @@ server = [ "langchain-huggingface==0.3.1", "langchain-mistralai==0.2.11", "langchain-ollama==0.3.6", - "langchain-openai==0.3.29", + "langchain-openai==0.3.30", "langgraph==0.6.4", - "litellm==1.75.3", + "litellm==1.75.5.post1", "llama-index==0.13.1", "lxml==6.0.0", "matplotlib==3.10.5", - "mcp==1.12.4", "oci~=2.0", "psutil==7.0.0", "python-multipart==0.0.20", diff --git a/src/server/Dockerfile b/src/server/Dockerfile index 70992359..c9113274 100644 --- a/src/server/Dockerfile +++ b/src/server/Dockerfile @@ -12,7 +12,7 @@ ENV RUNUSER=oracleai ENV PATH=/opt/.venv/bin:$PATH RUN microdnf --nodocs -y update && \ - microdnf --nodocs -y install python3.11 python3.11-pip && \ + microdnf --nodocs -y install python3.11 python3.11-pip sqlcl && \ microdnf clean all && \ python3.11 -m venv --symlinks --upgrade-deps /opt/.venv && \ groupadd $RUNUSER && \ diff --git a/src/server/api/v1/__init__.py b/src/server/api/v1/__init__.py index 873ce855..f9da75e0 100644 --- a/src/server/api/v1/__init__.py +++ b/src/server/api/v1/__init__.py @@ -2,5 +2,6 @@ Copyright (c) 2024, 2025, Oracle and/or its affiliates. Licensed under the Universal Permissive License v1.0 as shown at http://oss.oracle.com/licenses/upl. """ +# spell-checker:ignore selectai from . import chat, databases, embed, models, oci, probes, prompts, testbed, settings, selectai, mcp diff --git a/src/server/api/v1/mcp.py b/src/server/api/v1/mcp.py index c1e008a4..1b5fdb0d 100644 --- a/src/server/api/v1/mcp.py +++ b/src/server/api/v1/mcp.py @@ -4,144 +4,100 @@ This file is being used in APIs, and not the backend.py file. """ -from typing import Optional, Dict, Any -from fastapi import APIRouter, HTTPException -from pydantic import BaseModel -from datetime import datetime +# spell-checker:ignore noauth fastmcp healthz +from fastapi import APIRouter, Request, Depends +from fastmcp import FastMCP, Client import common.logging_config as logging_config -logger = logging_config.logging.getLogger("endpoints.v1.mcp") +logger = logging_config.logging.getLogger("api.v1.mcp") auth = APIRouter() -def mcp_engine_obj(): - """Check if the MCP engine is initialized.""" - try: - from launch_server import get_mcp_engine - mcp_engine = get_mcp_engine() - except ImportError: - return None - return mcp_engine - -class McpToolCallRequest(BaseModel): - tool_name: str - tool_args: Dict[str, Any] - -class ChatRequest(BaseModel): - query: str - prompt_name: Optional[str] = None - resource_uri: Optional[str] = None - message_history: Optional[list] = None + +def get_mcp(request: Request) -> FastMCP: + """Get the MCP engine from the app state""" + return request.app.state.mcp + @auth.get( "/tools", description="List available MCP tools", - response_model=dict + response_model=dict, ) -async def list_mcp_tools(): - # Import here to avoid circular imports - mcp_engine = mcp_engine_obj() - if not mcp_engine: - raise HTTPException(status_code=503, detail="MCP Engine not initialized.") - try: - await mcp_engine._rebuild_mcp_tool_schemas() - except Exception as e: - logger.error(f"Error rebuilding tool schemas: {e}") - +async def mcp_get_tools(mcp_engine: FastMCP = Depends(get_mcp)) -> dict: + """List MCP tools""" tools_info = [] - for tool_name, (session, tool_object) in mcp_engine.tool_to_session.items(): - tools_info.append({ - "name": tool_object.name, - "description": tool_object.description, - "input_schema": tool_object.inputSchema - }) + try: + client = Client(mcp_engine) + async with client: + tools = await client.list_tools() + logger.debug("MCP Tools: %s", tools) + for tool_object in tools: + tools_info.append( + { + "name": tool_object.name, + "description": tool_object.description, + "input_schema": getattr(tool_object, "inputSchema", None), + } + ) + finally: + await client.close() + return {"tools": tools_info} -@auth.post( - "/execute", - description="Execute an MCP tool", - response_model=dict -) -async def execute_mcp_tool(request: McpToolCallRequest): - # Import here to avoid circular imports - mcp_engine = mcp_engine_obj() - if not mcp_engine: - raise HTTPException(status_code=503, detail="MCP Engine not initialized.") - try: - result = await mcp_engine.execute_mcp_tool(request.tool_name, request.tool_args) - return {"result": result} - except Exception as e: - logger.error(f"Error executing MCP tool: {e}") - raise HTTPException(status_code=500, detail=str(e)) - -@auth.post( - "/chat", - description="Chat with MCP engine", - response_model=dict -) -async def chat_endpoint(request: ChatRequest): - mcp_engine = mcp_engine_obj() - if not mcp_engine: - raise HTTPException(status_code=503, detail="MCP Engine not initialized.") - try: - message_history = request.message_history or [{"role": "user", "content": request.query}] - response_text, _ = await mcp_engine.invoke( - message_history=message_history - ) - return {"response": response_text} - except Exception as e: - logger.error(f"Error in MCP chat: {e}") - raise HTTPException(status_code=500, detail=str(e)) @auth.get( "/resources", - description="List MCP resources", - response_model=dict + description="Get MCP resources", + response_model=dict, ) -async def list_resources(): - # Import here to avoid circular imports - mcp_engine = mcp_engine_obj() - if not mcp_engine: - raise HTTPException(status_code=503, detail="MCP Engine not initialized.") - - try: - # This will trigger loading if not already loaded - _ = await mcp_engine._rebuild_mcp_tool_schemas() - except Exception as e: - logger.error(f"Error loading resources: {e}") - +async def mcp_get_resources(mcp_engine: FastMCP = Depends(get_mcp)) -> dict: + """Get MCP Resources""" + resources = await mcp_engine.get_resources() + logger.debug("MCP Resources: %s", resources) return { "static": list(getattr(mcp_engine, "static_resources", {}).keys()), - "dynamic": getattr(mcp_engine, "dynamic_resources", []) + "dynamic": getattr(mcp_engine, "dynamic_resources", []), } + @auth.get( "/prompts", - description="List MCP prompts", - response_model=dict + description="Get MCP prompts", + response_model=dict, ) -async def list_prompts(): - mcp_engine = mcp_engine_obj() - if not mcp_engine: - raise HTTPException(status_code=503, detail="MCP Engine not initialized.") - try: - # This will trigger loading if not already loaded - _ = await mcp_engine._rebuild_mcp_tool_schemas() - except Exception as e: - logger.error(f"Error loading prompts: {e}") - - return { - "prompts": list(getattr(mcp_engine, "available_prompts", {}).keys()) - } +async def mcp_get_prompts(mcp_engine: FastMCP = Depends(get_mcp)) -> dict: + """Get MCP prompts""" + prompts = await mcp_engine.get_prompts() + logger.debug("MCP Prompts: %s", prompts) + return {"prompts": list(getattr(mcp_engine, "available_prompts", {}).keys())} -@auth.get("/health", response_model=dict) -async def health_check(): - """Check MCP engine health status""" - actual_mcp_engine = mcp_engine_obj() - return { - "status": "initialized" if actual_mcp_engine else "not_initialized", - "engine_type": str(type(actual_mcp_engine)) if actual_mcp_engine else None, - "available_tools": len(getattr(actual_mcp_engine, "available_tools", [])) if actual_mcp_engine else 0, - "timestamp": datetime.now().isoformat() - } + +# @auth.post("/execute", description="Execute an MCP tool", response_model=dict) +# async def mcp_execute_tool(request: McpToolCallRequest): +# """Execute MCP Tool""" +# mcp_engine = mcp_engine_obj() +# if not mcp_engine: +# raise HTTPException(status_code=503, detail="MCP Engine not initialized.") +# try: +# result = await mcp_engine.execute_mcp_tool(request.tool_name, request.tool_args) +# return {"result": result} +# except Exception as ex: +# logger.error("Error executing MCP tool: %s", ex) +# raise HTTPException(status_code=500, detail=str(ex)) from ex + + +# @auth.post("/chat", description="Chat with MCP engine", response_model=dict) +# async def chat_endpoint(request: ChatRequest): +# """Chat with MCP Engine""" +# mcp_engine = mcp_engine_obj() +# if not mcp_engine: +# raise HTTPException(status_code=503, detail="MCP Engine not initialized.") +# try: +# message_history = request.message_history or [{"role": "user", "content": request.query}] +# response_text, _ = await mcp_engine.invoke(message_history=message_history) +# return {"response": response_text} +# except Exception as ex: +# logger.error("Error in MCP chat: %s", ex) +# raise HTTPException(status_code=500, detail=str(ex)) from ex diff --git a/src/server/api/v1/probes.py b/src/server/api/v1/probes.py index 6dba7c3d..1e30b820 100644 --- a/src/server/api/v1/probes.py +++ b/src/server/api/v1/probes.py @@ -2,13 +2,20 @@ Copyright (c) 2024, 2025, Oracle and/or its affiliates. Licensed under the Universal Permissive License v1.0 as shown at http://oss.oracle.com/licenses/upl. """ -# spell-checker:ignore noauth -from fastapi import APIRouter +# spell-checker:ignore noauth fastmcp healthz +from datetime import datetime +from fastapi import APIRouter, Request, Depends +from fastmcp import FastMCP noauth = APIRouter() +def get_mcp(request: Request) -> FastMCP: + """Get the MCP engine from the app state""" + return request.app.state.mcp + + @noauth.get("/liveness") async def liveness_probe(): """Kubernetes liveness probe""" @@ -19,3 +26,16 @@ async def liveness_probe(): async def readiness_probe(): """Kubernetes readiness probe""" return {"status": "ready"} + + +@noauth.get("/mcp/healthz") +def mcp_healthz(mcp_engine: FastMCP = Depends(get_mcp)): + """Check if MCP server is ready.""" + if mcp_engine is None: + return {"status": "not ready"} + return { + "status": "ready", + "engine_type": str(type(mcp_engine)) if mcp_engine else None, + "available_tools": len(getattr(mcp_engine, "available_tools", [])) if mcp_engine else 0, + "timestamp": datetime.now().isoformat(), + } diff --git a/src/server/mcp/__init__.py b/src/server/mcp/__init__.py new file mode 100644 index 00000000..ea4a5b85 --- /dev/null +++ b/src/server/mcp/__init__.py @@ -0,0 +1,75 @@ +""" +Copyright (c) 2024, 2025, Oracle and/or its affiliates. +Licensed under the Universal Permissive License v1.0 as shown at http://oss.oracle.com/licenses/upl. +""" +# spell-checker:ignore fastapi fastmcp + +import importlib +import pkgutil + +from fastapi import APIRouter +from fastmcp import FastMCP + +import common.logging_config as logging_config + +logger = logging_config.logging.getLogger("mcp.__init__.py") + + +def _discover_and_register( + package: str, + mcp: FastMCP = None, + auth: APIRouter = None, +): + """Import all modules in a package and call their register function.""" + try: + pkg = importlib.import_module(package) + except ImportError: + logger.warning("Package %s not found, skipping.", package) + return + + for module_info in pkgutil.walk_packages(pkg.__path__, prefix=f"{package}."): + if module_info.name.endswith("__init__"): + continue + + try: + module = importlib.import_module(module_info.name) + except Exception as ex: + logger.error("Failed to import %s: %s", module_info.name, ex) + continue + + # Decide what to register based on available functions + if hasattr(module, "register"): + logger.info("Registering via %s.register()", module_info.name) + if ".tools." in module.__name__: + module.register(mcp, auth) + if ".proxies." in module.__name__: + module.register(mcp) + # elif hasattr(module, "register_tool"): + # logger.info("Registering tool via %s.register_tool()", module_info.name) + # module.register_tool(mcp, auth) + # elif hasattr(module, "register_prompt"): + # logger.info("Registering prompt via %s.register_prompt()", module_info.name) + # module.register_prompt(mcp) + # elif hasattr(module, "register_resource"): + # logger.info("Registering resource via %s.register_resource()", module_info.name) + # module.register_resource(mcp) + # elif hasattr(module, "register_proxy"): + # logger.info("Registering proxy via %s.register_resource()", module_info.name) + # module.register_resource(mcp) + else: + logger.debug("No register function in %s, skipping.", module_info.name) + + +def register_all_mcp(mcp: FastMCP, auth: APIRouter): + """ + Auto-discover and register all MCP tools, prompts, resources, and proxies. + + Each module should have one of: + - register_tool(mcp, auth) + - register_prompt(mcp) + - register_resource(mcp) + - register_proxies(mcp, auth) + - register(mcp, auth) # generic + """ + _discover_and_register("server.mcp.tools", mcp=mcp, auth=auth) + _discover_and_register("server.mcp.proxies", mcp=mcp) diff --git a/src/server/mcp/clients/sqlcl.py b/src/server/mcp/clients/sqlcl.py new file mode 100644 index 00000000..9a57f9f3 --- /dev/null +++ b/src/server/mcp/clients/sqlcl.py @@ -0,0 +1,31 @@ +import asyncio +from fastmcp import Client + +# Your configuration dictionary using multiple servers +config = { + "mcpServers": { + "sqlcl": { + "transport": "stdio", + "command": "sql", + "args": ["-mcp"] + } + } +} + +client = Client(config) + +async def main(): + async with client: + # Test connection + pong = await client.ping() + print("Ping response:", pong) + + # List available tools on the sqlcl server + tools = await client.list_tools() + print("Available tools:", tools) + + # Example: call a tool if you know its name + # result = await client.call_tool("your_tool_name", {"param": "value"}) + # print("Tool result:", result) + +asyncio.run(main()) diff --git a/src/server/mcp/prompts/__init__.py b/src/server/mcp/prompts/__init__.py new file mode 100644 index 00000000..e69de29b diff --git a/src/server/mcp/proxies/__init__.py b/src/server/mcp/proxies/__init__.py new file mode 100644 index 00000000..e69de29b diff --git a/src/server/mcp/proxies/sqlcl.py b/src/server/mcp/proxies/sqlcl.py new file mode 100644 index 00000000..40ff054d --- /dev/null +++ b/src/server/mcp/proxies/sqlcl.py @@ -0,0 +1,21 @@ +""" +Copyright (c) 2024, 2025, Oracle and/or its affiliates. +Licensed under the Universal Permissive License v1.0 as shown at http://oss.oracle.com/licenses/upl. +""" +# spell-checker:ignore sqlcl fastmcp + + +def register(mcp): + """Register the SQLcl MCP Server as Local (via Proxy)""" + config = { + "mcpServers": { + "sqlcl": { + "command": "sql", + "args": ["-mcp", "-daemon", "-thin", "-noupdates"], + } + } + } + + # Create a proxy to the configured server (auto-creates ProxyClient) + mcp_proxy = mcp.as_proxy(config, name="SQLclProxy") + mcp.mount(mcp_proxy) diff --git a/src/server/mcp/resources/__init__.py b/src/server/mcp/resources/__init__.py new file mode 100644 index 00000000..e69de29b diff --git a/src/server/mcp/tools/__init__.py b/src/server/mcp/tools/__init__.py new file mode 100644 index 00000000..e69de29b diff --git a/src/server/mcp/tools/say_hello.py b/src/server/mcp/tools/say_hello.py new file mode 100644 index 00000000..f77888cb --- /dev/null +++ b/src/server/mcp/tools/say_hello.py @@ -0,0 +1,6 @@ +def register(mcp, auth): + @mcp.tool() + @auth.get("/hello", operation_id="say_hello") + def greet(name: str = "World") -> str: + """Say hello to someone.""" + return f"Hello, {name}!" diff --git a/src/server/mcp_bak/register_mcp_servers.py b/src/server/mcp_bak/register_mcp_servers.py new file mode 100644 index 00000000..c08a59b2 --- /dev/null +++ b/src/server/mcp_bak/register_mcp_servers.py @@ -0,0 +1,36 @@ +from fastapi import FastAPI, APIRouter, Request +from fastapi.responses import JSONResponse, PlainTextResponse +from mcp.server import Server +import json + +def mount_mcp(router: APIRouter, prefix: str, mcp_server: Server): + @router.get(f"{prefix}/.well-known/mcp.json") + async def manifest(): + return JSONResponse(content=mcp_server.manifest.dict()) + + @router.post(f"{prefix}/mcp") + async def mcp_api(request: Request): + body = await request.body() + resp = mcp_server.handle_http(body) + try: + return JSONResponse(content=json.loads(resp)) + except Exception: + return PlainTextResponse(content=resp) + +def register_mcp_servers(app: FastAPI): + # Create routers for MCP endpoints + mcp_router = APIRouter() + + # Define MCP servers + mcp_sqlcl = Server(name="Built-in SQLcl MCP Server") + + # Example tools + @mcp.tool() + def greet(name: str) -> str: + return f"Hello from MCP Server One, {name}!" + + # Mount MCP servers into the router under prefixes + mount_mcp(app, "/mcp_sqlcl", mcp_sqlcl) + + # Include the MCP router into the main app + app.include_router(mcp_router) \ No newline at end of file diff --git a/src/server/mcp/server/archive_mcp.py b/src/server/mcp_bak/server/archive_mcp.py similarity index 100% rename from src/server/mcp/server/archive_mcp.py rename to src/server/mcp_bak/server/archive_mcp.py diff --git a/src/server/mcp/server_config.json b/src/server/mcp_bak/server_config.json similarity index 100% rename from src/server/mcp/server_config.json rename to src/server/mcp_bak/server_config.json From e3269af2bb12bba8f9f67abc46aef43b99641a83 Mon Sep 17 00:00:00 2001 From: gotsysdba Date: Thu, 14 Aug 2025 08:26:32 +0100 Subject: [PATCH 05/28] AuthN for MCP --- src/common/logging_config.py | 38 ++++++++++++++++++--- src/launch_server.py | 52 +++++++++++++++++----------- src/server/mcp/proxies/sqlcl.py | 60 ++++++++++++++++++++++++++++----- 3 files changed, 117 insertions(+), 33 deletions(-) diff --git a/src/common/logging_config.py b/src/common/logging_config.py index 30127a3d..d9e423d1 100644 --- a/src/common/logging_config.py +++ b/src/common/logging_config.py @@ -7,6 +7,7 @@ # spell-checker:ignore levelname inotify openai httpcore fsevents litellm import os +import asyncio import logging from logging.config import dictConfig from common._version import __version__ @@ -20,6 +21,30 @@ def filter(self, record): return True +class PrettifyCancelledError(logging.Filter): + """Filter that keeps the log but removes the traceback and replaces the message.""" + + def _contains_cancelled(self, exc: BaseException) -> bool: + if isinstance(exc, asyncio.CancelledError): + return True + if hasattr(exc, "exceptions") and isinstance(exc, BaseExceptionGroup): # type: ignore[name-defined] + return any(self._contains_cancelled(e) for e in exc.exceptions) # type: ignore[attr-defined] + return False + + def filter(self, record: logging.LogRecord) -> bool: + exc_info = record.__dict__.get("exc_info") + if not exc_info: + return True + _, exc, _ = exc_info + if exc and self._contains_cancelled(exc): + # Strip the traceback and make it pretty + record.exc_info = None + record.msg = "Shutdown cancelled — graceful timeout exceeded." + record.levelno = logging.WARNING + record.levelname = logging.getLevelName(logging.WARNING) + return True + + # Standard formatter FORMATTER = { "format": "%(asctime)s (v%(__version__)s) - %(levelname)-8s - (%(name)s): %(message)s", @@ -33,9 +58,8 @@ def filter(self, record): "standard": FORMATTER, }, "filters": { - "version_filter": { - "()": VersionFilter, - }, + "version_filter": {"()": VersionFilter}, + "prettify_cancelled": {"()": PrettifyCancelledError}, }, "handlers": { "default": { @@ -56,13 +80,19 @@ def filter(self, record): "level": LOG_LEVEL, "handlers": ["default"], "propagate": False, + "filters": ["prettify_cancelled"], }, "uvicorn.access": { "level": LOG_LEVEL, "handlers": ["default"], "propagate": False, }, - "asyncio": {"level": LOG_LEVEL, "handlers": ["default"], "propagate": False}, + "asyncio": { + "level": LOG_LEVEL, + "handlers": ["default"], + "propagate": False, + "filters": ["prettify_cancelled"], + }, "watchdog.observers.inotify_buffer": {"level": "INFO", "handlers": ["default"], "propagate": False}, "PIL": {"level": "INFO", "handlers": ["default"], "propagate": False}, "fsevents": {"level": "INFO", "handlers": ["default"], "propagate": False}, diff --git a/src/launch_server.py b/src/launch_server.py index ac9dc602..aad1c95f 100644 --- a/src/launch_server.py +++ b/src/launch_server.py @@ -44,7 +44,7 @@ from fastapi import APIRouter, Depends, FastAPI, HTTPException, status from fastapi.security import HTTPAuthorizationCredentials, HTTPBearer from fastmcp import FastMCP, settings -from fastmcp.server.auth.providers.jwt import StaticTokenVerifier +from fastmcp.server.auth import StaticTokenVerifier # Logging import common.logging_config as logging_config @@ -97,6 +97,8 @@ def start_subprocess(port: int, logfile: bool) -> subprocess.Popen: "0.0.0.0", "--port", str(port), + "--timeout-graceful-shutdown", + "5", ], stdout=stdout, stderr=stderr, @@ -146,17 +148,6 @@ def get_api_key() -> str: return os.getenv("API_SERVER_KEY") -def verify_key( - http_auth: Annotated[ - HTTPAuthorizationCredentials, - Depends(HTTPBearer(description="Please provide API_SERVER_KEY.")), - ], -) -> None: - """Verify that the provided API key is correct.""" - if http_auth.credentials != get_api_key(): - raise HTTPException(status_code=status.HTTP_401_UNAUTHORIZED) - - def register_endpoints(mcp: FastMCP, auth: APIRouter, noauth: APIRouter): """Register API Endpoints - Imports to avoid bootstrapping before config file read New endpoints need to be registered in server.api.v1.__init__.py @@ -191,19 +182,31 @@ def register_endpoints(mcp: FastMCP, auth: APIRouter, noauth: APIRouter): ############################################################################# def create_app(config: str = "") -> FastAPI: """Create and configure the FastAPI app.""" + + def fastapi_verify_key( + http_auth: Annotated[ + HTTPAuthorizationCredentials, + Depends(HTTPBearer(description="Please provide API_SERVER_KEY.")), + ], + ) -> None: + """FastAPI: Verify that the provided API key is correct.""" + if http_auth.credentials != get_api_key(): + raise HTTPException(status_code=status.HTTP_401_UNAUTHORIZED) + + ### Start if not config: config = configfile.config_file_path() config_file = Path(os.getenv("CONFIG_FILE", config)) configfile.ConfigStore.load_from_file(config_file) - verifier = StaticTokenVerifier( - tokens={get_api_key(): {"client_id": "optimizer", "scopes": ["read:data", "write:data", "admin:users"]}}, - required_scopes=["read:data"], + fastmcp_verifier = StaticTokenVerifier( + tokens={get_api_key(): {"client_id": "optimizer", "scopes": ["read", "write"]}} ) + # MCP Server settings.stateless_http = True - mcp = FastMCP(name="Optimizer MCP Server", auth=verifier) - mcp_app = mcp.http_app(path="/mcp") + mcp = FastMCP(name="Optimizer MCP Server", auth=fastmcp_verifier) + mcp_app = mcp.http_app(path="/") @asynccontextmanager async def combined_lifespan(app): @@ -243,12 +246,12 @@ async def combined_lifespan(app): # Setup Routes and Register non-MCP endpoints noauth = APIRouter() - auth = APIRouter(dependencies=[Depends(verify_key)]) + auth = APIRouter(dependencies=[Depends(fastapi_verify_key)]) register_endpoints(mcp, auth, noauth) # Register MCP Server into FastAPI - app.mount("/mcp_tools", mcp_app) + app.mount("/mcp", mcp_app) app.include_router(noauth) app.include_router(auth) @@ -271,4 +274,13 @@ async def combined_lifespan(app): logger.info("API Server Using port: %i", PORT) app = create_app(args.config) - uvicorn.run(app, host="0.0.0.0", port=PORT, log_config=logging_config.LOGGING_CONFIG) + try: + uvicorn.run( + app, + host="0.0.0.0", + port=PORT, + timeout_graceful_shutdown=5, + log_config=logging_config.LOGGING_CONFIG, + ) + except Exception as ex: + logger.info("Forced Shutdown: %s", ex) diff --git a/src/server/mcp/proxies/sqlcl.py b/src/server/mcp/proxies/sqlcl.py index 40ff054d..84dc122b 100644 --- a/src/server/mcp/proxies/sqlcl.py +++ b/src/server/mcp/proxies/sqlcl.py @@ -2,20 +2,62 @@ Copyright (c) 2024, 2025, Oracle and/or its affiliates. Licensed under the Universal Permissive License v1.0 as shown at http://oss.oracle.com/licenses/upl. """ + # spell-checker:ignore sqlcl fastmcp +import os +import shutil +import subprocess + +import server.api.core.databases as core_databases +import common.logging_config as logging_config + +logger = logging_config.logging.getLogger("mcp.proxies.sqlcl") def register(mcp): """Register the SQLcl MCP Server as Local (via Proxy)""" - config = { - "mcpServers": { - "sqlcl": { - "command": "sql", - "args": ["-mcp", "-daemon", "-thin", "-noupdates"], + sqlcl_binary = shutil.which("sql") + if sqlcl_binary: + config = { + "mcpServers": { + "sqlcl": { + "command": f"{sqlcl_binary}", + "args": ["-mcp", "-daemon", "-thin", "-noupdates"], + } } } - } + databases = core_databases.get_databases() + for database in databases: + env_vars = os.environ.copy() + if database.config_dir: + env_vars["TNS_ADMIN"] = database.config_dir + # Start sql in no-login mode + try: + proc = subprocess.Popen( + [sqlcl_binary, "/nolog"], + stdin=subprocess.PIPE, + stdout=subprocess.PIPE, + stderr=subprocess.PIPE, + text=True, + env=env_vars, + ) + + # Prepare commands: connect, then exit + commands = [ + f"connmgr delete -conn optimizer_{database.name}", + f"conn -savepwd -save optimizer_{database.name} -user {database.user} -password {database.password} -url {database.dsn}", + "exit", + ] - # Create a proxy to the configured server (auto-creates ProxyClient) - mcp_proxy = mcp.as_proxy(config, name="SQLclProxy") - mcp.mount(mcp_proxy) + # Send commands joined by newlines + proc.communicate("\n".join(commands) + "\n") + logger.info("Established Connection Store for: %s", database.name) + except subprocess.SubprocessError as ex: + logger.error("Failed to create connection store: %s", ex) + except Exception as ex: + logger.error("Unexpected error creating connection store: %s", ex) + # Create a proxy to the configured server (auto-creates ProxyClient) + mcp_proxy = mcp.as_proxy(config, name="SQLclProxy") + mcp.mount(mcp_proxy) + else: + logger.warning("Not enabling SQLcl MCP server, sqlcl not found in PATH.") From fb0f9f961b45ade33767b9009214aa3bfb91a21d Mon Sep 17 00:00:00 2001 From: gotsysdba Date: Thu, 14 Aug 2025 11:12:40 +0100 Subject: [PATCH 06/28] Start server with TNS_ADMIN env --- src/server/mcp/proxies/sqlcl.py | 11 ++++++----- 1 file changed, 6 insertions(+), 5 deletions(-) diff --git a/src/server/mcp/proxies/sqlcl.py b/src/server/mcp/proxies/sqlcl.py index 84dc122b..ab292958 100644 --- a/src/server/mcp/proxies/sqlcl.py +++ b/src/server/mcp/proxies/sqlcl.py @@ -18,19 +18,20 @@ def register(mcp): """Register the SQLcl MCP Server as Local (via Proxy)""" sqlcl_binary = shutil.which("sql") if sqlcl_binary: + env_vars = os.environ.copy() + env_vars["TNS_ADMIN"] = os.getenv("TNS_ADMIN", "tns_admin") config = { "mcpServers": { "sqlcl": { "command": f"{sqlcl_binary}", "args": ["-mcp", "-daemon", "-thin", "-noupdates"], + "env": env_vars } } } + databases = core_databases.get_databases() for database in databases: - env_vars = os.environ.copy() - if database.config_dir: - env_vars["TNS_ADMIN"] = database.config_dir # Start sql in no-login mode try: proc = subprocess.Popen( @@ -44,8 +45,8 @@ def register(mcp): # Prepare commands: connect, then exit commands = [ - f"connmgr delete -conn optimizer_{database.name}", - f"conn -savepwd -save optimizer_{database.name} -user {database.user} -password {database.password} -url {database.dsn}", + f"connmgr delete -conn OPTIMIZER_{database.name}", + f"conn -savepwd -save OPTIMIZER_{database.name} -user {database.user} -password {database.password} -url {database.dsn}", "exit", ] From 0301c4c58c7931663ae6aacc37091ca8f1537afd Mon Sep 17 00:00:00 2001 From: gotsysdba Date: Thu, 14 Aug 2025 13:46:15 +0100 Subject: [PATCH 07/28] Updating GUI --- src/client/content/api_server.py | 3 +- src/client/content/chatbot.py | 2 +- src/client/content/config/config.py | 64 +++++++++++++++++++ .../content/config/{ => tabs}/databases.py | 12 +--- src/client/content/config/tabs/mcp.py | 12 ++++ .../{mcp_servers.py => tabs/mcp_bak.py} | 14 ++-- .../content/config/{ => tabs}/models.py | 16 ++--- src/client/content/config/{ => tabs}/oci.py | 11 +--- .../content/config/{ => tabs}/settings.py | 11 +--- src/client/content/testbed.py | 5 +- .../content/tools/{ => tabs}/prompt_eng.py | 12 +--- .../content/tools/{ => tabs}/split_embed.py | 19 ++---- src/client/content/tools/tools.py | 25 ++++++++ src/client/utils/st_common.py | 14 ++++ src/launch_client.py | 45 +++---------- 15 files changed, 157 insertions(+), 108 deletions(-) create mode 100644 src/client/content/config/config.py rename src/client/content/config/{ => tabs}/databases.py (98%) create mode 100644 src/client/content/config/tabs/mcp.py rename src/client/content/config/{mcp_servers.py => tabs/mcp_bak.py} (77%) rename src/client/content/config/{ => tabs}/models.py (97%) rename src/client/content/config/{ => tabs}/oci.py (97%) rename src/client/content/config/{ => tabs}/settings.py (98%) rename src/client/content/tools/{ => tabs}/prompt_eng.py (94%) rename src/client/content/tools/{ => tabs}/split_embed.py (97%) create mode 100644 src/client/content/tools/tools.py diff --git a/src/client/content/api_server.py b/src/client/content/api_server.py index 16de7c38..3e8bd6b6 100644 --- a/src/client/content/api_server.py +++ b/src/client/content/api_server.py @@ -16,6 +16,7 @@ import client.utils.client as client import client.utils.api_call as api_call +from client.utils.st_common import style from client.utils.st_footer import remove_footer import common.logging_config as logging_config @@ -64,7 +65,7 @@ def server_restart() -> None: ##################################################### async def main() -> None: """Streamlit GUI""" - + style() remove_footer() st.header("API Server") st.write("Access with your own client.") diff --git a/src/client/content/chatbot.py b/src/client/content/chatbot.py index 7ba3fe05..18ca5010 100644 --- a/src/client/content/chatbot.py +++ b/src/client/content/chatbot.py @@ -15,7 +15,7 @@ import streamlit as st from streamlit import session_state as state -from client.content.config.models import get_models +from client.content.config.tabs.models import get_models import client.utils.st_common as st_common import client.utils.api_call as api_call diff --git a/src/client/content/config/config.py b/src/client/content/config/config.py new file mode 100644 index 00000000..5eb4551a --- /dev/null +++ b/src/client/content/config/config.py @@ -0,0 +1,64 @@ +""" +Copyright (c) 2024, 2025, Oracle and/or its affiliates. +Licensed under the Universal Permissive License v1.0 as shown at http://oss.oracle.com/licenses/upl. +""" + +import inspect +import streamlit as st +from streamlit import session_state as state +from client.utils.st_common import style +from client.utils.st_footer import remove_footer + +from client.content.config.tabs.settings import display_settings +from client.content.config.tabs.oci import display_oci +from client.content.config.tabs.databases import display_databases +from client.content.config.tabs.models import display_models +from client.content.config.tabs.mcp import display_mcp + + +def main() -> None: + """Streamlit GUI""" + style() + remove_footer() + tabs_list = [] + if not state.disabled["settings"]: + tabs_list.append("💾 Settings") + if not state.disabled["db_cfg"]: + tabs_list.append("🗄️ Databases") + if not state.disabled["model_cfg"]: + tabs_list.append("🤖 Models") + if not state.disabled["oci_cfg"]: + tabs_list.append("☁️ OCI") + if not state.disabled["mcp_cfg"]: + tabs_list.append("🔗 MCP") + + # Only create tabs if there is at least one + tab_index = 0 + if tabs_list: + tabs = st.tabs(tabs_list) + + # Map tab objects to content conditionally + if not state.disabled["settings"]: + with tabs[tab_index]: + display_settings() + tab_index += 1 + if not state.disabled["db_cfg"]: + with tabs[tab_index]: + display_databases() + tab_index += 1 + if not state.disabled["model_cfg"]: + with tabs[tab_index]: + display_models() + tab_index += 1 + if not state.disabled["oci_cfg"]: + with tabs[tab_index]: + display_oci() + tab_index += 1 + if not state.disabled["mcp_cfg"]: + with tabs[tab_index]: + display_mcp() + tab_index += 1 + + +if __name__ == "__main__" or "page.py" in inspect.stack()[1].filename: + main() diff --git a/src/client/content/config/databases.py b/src/client/content/config/tabs/databases.py similarity index 98% rename from src/client/content/config/databases.py rename to src/client/content/config/tabs/databases.py index 67fa882c..850868a0 100644 --- a/src/client/content/config/databases.py +++ b/src/client/content/config/tabs/databases.py @@ -7,7 +7,6 @@ """ # spell-checker:ignore streamlit, selectbox, selectai -import inspect import json import pandas as pd @@ -16,10 +15,10 @@ import client.utils.api_call as api_call import client.utils.st_common as st_common + import common.logging_config as logging_config -from client.utils.st_footer import remove_footer -logger = logging_config.logging.getLogger("client.content.config.database") +logger = logging_config.logging.getLogger("client.content.config.tabs.database") ##################################################### @@ -106,9 +105,8 @@ def update_selectai(sai_new_df: pd.DataFrame, sai_old_df: pd.DataFrame) -> None: ##################################################### # MAIN ##################################################### -def main() -> None: +def display_databases() -> None: """Streamlit GUI""" - remove_footer() st.header("Database", divider="red") st.write("Configure the database used for Vector Storage and SelectAI.") try: @@ -237,7 +235,3 @@ def main() -> None: st.write("Unable to use SelectAI with Database.") elif len(selectai_profiles) == 0: st.write("No SelectAI Profiles Found.") - - -if __name__ == "__main__" or "page.py" in inspect.stack()[1].filename: - main() diff --git a/src/client/content/config/tabs/mcp.py b/src/client/content/config/tabs/mcp.py new file mode 100644 index 00000000..a066018c --- /dev/null +++ b/src/client/content/config/tabs/mcp.py @@ -0,0 +1,12 @@ +""" +Copyright (c) 2024, 2025, Oracle and/or its affiliates. +Licensed under the Universal Permissive License v1.0 as shown at http://oss.oracle.com/licenses/upl. +""" + +import streamlit as st +from streamlit import session_state as state + + +def display_mcp() -> None: + """Streamlit GUI""" + st.header("Model Context Protocol", divider="red") diff --git a/src/client/content/config/mcp_servers.py b/src/client/content/config/tabs/mcp_bak.py similarity index 77% rename from src/client/content/config/mcp_servers.py rename to src/client/content/config/tabs/mcp_bak.py index 5535227d..89450432 100644 --- a/src/client/content/config/mcp_servers.py +++ b/src/client/content/config/tabs/mcp_bak.py @@ -1,14 +1,19 @@ -import inspect +""" +Copyright (c) 2024, 2025, Oracle and/or its affiliates. +Licensed under the Universal Permissive License v1.0 as shown at http://oss.oracle.com/licenses/upl. +""" from client.mcp.frontend import display_commands_tab, display_ide_tab, get_fastapi_base_url, get_server_capabilities import streamlit as st -def main(): +def display_mcp(): fastapi_base_url = get_fastapi_base_url() tools, resources, prompts = get_server_capabilities(fastapi_base_url) if "chat_history" not in st.session_state: st.session_state.chat_history = [] + + ide, commands = st.tabs(["🛠️ IDE", "📚 Available Commands"]) with ide: @@ -17,8 +22,3 @@ def main(): with commands: # Display the commands tab using the original AI Optimizer logic. display_commands_tab(tools, resources, prompts) - - - -if __name__ == "__main__" or "page.py" in inspect.stack()[1].filename: - main() diff --git a/src/client/content/config/models.py b/src/client/content/config/tabs/models.py similarity index 97% rename from src/client/content/config/models.py rename to src/client/content/config/tabs/models.py index 62882177..704b40db 100644 --- a/src/client/content/config/models.py +++ b/src/client/content/config/tabs/models.py @@ -9,14 +9,12 @@ """ # spell-checker:ignore selectbox -import inspect from time import sleep from typing import Literal import urllib.parse import streamlit as st from streamlit import session_state as state -from client.utils.st_footer import render_models_footer import client.utils.api_call as api_call import client.utils.st_common as st_common @@ -24,7 +22,7 @@ import common.help_text as help_text import common.logging_config as logging_config -logger = logging_config.logging.getLogger("client.content.config.models") +logger = logging_config.logging.getLogger("client.content.config.tabs.models") ################################### @@ -123,7 +121,7 @@ def edit_model(model_type: str, action: Literal["add", "edit"], model_id: str = help=help_text.help_dict["model_api_url"], key="add_model_api_url", value=model.get("url", ""), - disabled=disable_for_oci + disabled=disable_for_oci, ) model["api_key"] = st.text_input( "API Key:", @@ -131,7 +129,7 @@ def edit_model(model_type: str, action: Literal["add", "edit"], model_id: str = key="add_model_api_key", type="password", value=model.get("api_key", ""), - disabled=disable_for_oci + disabled=disable_for_oci, ) if model_type == "ll": model["context_length"] = st.number_input( @@ -255,7 +253,7 @@ def render_model_rows(model_type): ############################################################################# # MAIN ############################################################################# -def main() -> None: +def display_models() -> None: """Streamlit GUI""" st.header("Models", divider="red") st.write("Update, Add, or Delete model configuration parameters.") @@ -273,9 +271,3 @@ def main() -> None: st.divider() st.subheader("Embedding Models") render_model_rows("embed") - - render_models_footer() - - -if __name__ == "__main__" or "page.py" in inspect.stack()[1].filename: - main() diff --git a/src/client/content/config/oci.py b/src/client/content/config/tabs/oci.py similarity index 97% rename from src/client/content/config/oci.py rename to src/client/content/config/tabs/oci.py index 63c30e1c..316efa2b 100644 --- a/src/client/content/config/oci.py +++ b/src/client/content/config/tabs/oci.py @@ -7,7 +7,6 @@ """ # spell-checker:ignore streamlit, ocid, selectbox, genai, oraclecloud -import inspect import pandas as pd import streamlit as st @@ -15,11 +14,10 @@ import client.utils.api_call as api_call import client.utils.st_common as st_common -from client.utils.st_footer import remove_footer import common.logging_config as logging_config -logger = logging_config.logging.getLogger("client.content.config.oci") +logger = logging_config.logging.getLogger("client.content.config.tabs.oci") ##################################################### @@ -79,9 +77,8 @@ def patch_oci(auth_profile: str, supplied: dict, namespace: str, toast: bool = T ##################################################### # MAIN ##################################################### -def main() -> None: +def display_oci() -> None: """Streamlit GUI""" - remove_footer() st.header("Oracle Cloud Infrastructure", divider="red") st.write("Configure OCI for Object Storage Access and OCI GenAI Services.") try: @@ -213,7 +210,3 @@ def main() -> None: _ = create_genai_models() st_common.clear_state_key("model_configs") st.success("Oracle GenAI models - Enabled.", icon="✅") - - -if __name__ == "__main__" or "page.py" in inspect.stack()[1].filename: - main() diff --git a/src/client/content/config/settings.py b/src/client/content/config/tabs/settings.py similarity index 98% rename from src/client/content/config/settings.py rename to src/client/content/config/tabs/settings.py index e28df28d..bce3ba47 100644 --- a/src/client/content/config/settings.py +++ b/src/client/content/config/tabs/settings.py @@ -6,7 +6,6 @@ """ # spell-checker:ignore streamlit, mvnw, obaas, ollama -import inspect import time import os import io @@ -26,11 +25,10 @@ # Utilities import client.utils.api_call as api_call import client.utils.st_common as st_common -from client.utils.st_footer import remove_footer import common.logging_config as logging_config -logger = logging_config.logging.getLogger("client.content.config.settings") +logger = logging_config.logging.getLogger("client.content.config.tabs.settings") ############################################################################# @@ -249,9 +247,8 @@ def spring_ai_zip(provider, ll_config, embed_config): ##################################################### # MAIN ##################################################### -def main(): +def display_settings(): """Streamlit GUI""" - remove_footer() st.header("Client Settings", divider="red") if "selected_sensitive_settings" not in state: state.selected_sensitive_settings = False @@ -331,7 +328,3 @@ def main(): mime="application/zip", # Mime type for zip file disabled=spring_ai_conf == "hybrid", ) - - -if __name__ == "__main__" or "page.py" in inspect.stack()[1].filename: - main() diff --git a/src/client/content/testbed.py b/src/client/content/testbed.py index 7696374c..426e5a61 100644 --- a/src/client/content/testbed.py +++ b/src/client/content/testbed.py @@ -15,7 +15,7 @@ import streamlit as st from streamlit import session_state as state -from client.content.config.models import get_models +from client.content.config.tabs.models import get_models import client.utils.st_common as st_common import client.utils.api_call as api_call @@ -237,8 +237,9 @@ def qa_update_gui(qa_testset: list) -> None: ############################################################################# # MAIN ############################################################################# -def main(): +def main() -> None: """Streamlit GUI""" + st_common.style() remove_footer() try: get_models() diff --git a/src/client/content/tools/prompt_eng.py b/src/client/content/tools/tabs/prompt_eng.py similarity index 94% rename from src/client/content/tools/prompt_eng.py rename to src/client/content/tools/tabs/prompt_eng.py index 7010e14b..d7b841a9 100644 --- a/src/client/content/tools/prompt_eng.py +++ b/src/client/content/tools/tabs/prompt_eng.py @@ -9,8 +9,6 @@ """ # spell-checker:ignore selectbox -import inspect - import streamlit as st from streamlit import session_state as state @@ -18,9 +16,8 @@ import client.utils.api_call as api_call import common.logging_config as logging_config -from client.utils.st_footer import remove_footer -logger = logging_config.logging.getLogger("client.tools.prompt_eng") +logger = logging_config.logging.getLogger("client.tools.tabs.prompt_eng") ##################################################### @@ -65,9 +62,8 @@ def patch_prompt(category: str, name: str, prompt: str) -> bool: ############################################################################# # MAIN ############################################################################# -def main(): +def display_prompt_eng(): """Streamlit GUI""" - remove_footer() st.header("Prompt Engineering") st.write("Select which prompts to use and their instructions. Currently selected prompts are used.") try: @@ -114,7 +110,3 @@ def main(): if st.button("Save Instructions", key="save_ctx_prompt"): if patch_prompt("ctx", selected_prompt_ctx_name, prompt_ctx_prompt): st.rerun() - - -if __name__ == "__main__" or "page.py" in inspect.stack()[1].filename: - main() diff --git a/src/client/content/tools/split_embed.py b/src/client/content/tools/tabs/split_embed.py similarity index 97% rename from src/client/content/tools/split_embed.py rename to src/client/content/tools/tabs/split_embed.py index 52c68e87..f38427a0 100644 --- a/src/client/content/tools/split_embed.py +++ b/src/client/content/tools/tabs/split_embed.py @@ -4,9 +4,8 @@ This script initializes is used for the splitting and chunking process using Streamlit (`st`). """ -# spell-checker:ignore selectbox, hnsw, ivf, ocids,iterrows -import inspect +# spell-checker:ignore selectbox, hnsw, ivf, ocids,iterrows import math import re @@ -17,18 +16,17 @@ import client.utils.api_call as api_call import client.utils.st_common as st_common -from client.utils.st_footer import remove_footer -from client.content.config.databases import get_databases -from client.content.config.models import get_models -from client.content.config.oci import get_oci +from client.content.config.tabs.databases import get_databases +from client.content.config.tabs.models import get_models +from client.content.config.tabs.oci import get_oci from common.schema import DistanceMetrics, IndexTypes, DatabaseVectorStorage import common.functions as functions import common.help_text as help_text import common.logging_config as logging_config -logger = logging_config.logging.getLogger("client.tools.split_embed") +logger = logging_config.logging.getLogger("client.tools.tabs.split_embed") ##################################################### @@ -113,7 +111,7 @@ def update_chunk_size_input() -> None: ############################################################################# # MAIN ############################################################################# -def main() -> None: +def display_split_embed() -> None: """Streamlit GUI""" try: get_models() @@ -122,7 +120,6 @@ def main() -> None: except api_call.ApiError: st.stop() - remove_footer() db_avail = st_common.is_db_configured() if not db_avail: logger.debug("Embedding Disabled (Database not configured)") @@ -400,7 +397,3 @@ def main() -> None: get_databases(force="True") except api_call.ApiError as ex: st.error(ex, icon="🚨") - - -if __name__ == "__main__" or "page.py" in inspect.stack()[1].filename: - main() diff --git a/src/client/content/tools/tools.py b/src/client/content/tools/tools.py new file mode 100644 index 00000000..19614376 --- /dev/null +++ b/src/client/content/tools/tools.py @@ -0,0 +1,25 @@ +""" +Copyright (c) 2024, 2025, Oracle and/or its affiliates. +Licensed under the Universal Permissive License v1.0 as shown at http://oss.oracle.com/licenses/upl. +""" +import inspect +import streamlit as st +from client.utils.st_common import style +from client.utils.st_footer import remove_footer + +from client.content.tools.tabs.prompt_eng import display_prompt_eng +from client.content.tools.tabs.split_embed import display_split_embed + +def main() -> None: + """Streamlit GUI""" + style() + remove_footer() + prompt_eng, split_embed = st.tabs(["🎤 Prompts", "📚 Split/Embed"]) + + with prompt_eng: + display_prompt_eng() + with split_embed: + display_split_embed() + +if __name__ == "__main__" or "page.py" in inspect.stack()[1].filename: + main() \ No newline at end of file diff --git a/src/client/utils/st_common.py b/src/client/utils/st_common.py index 246c8d9b..4f0ffd9d 100644 --- a/src/client/utils/st_common.py +++ b/src/client/utils/st_common.py @@ -25,6 +25,20 @@ logger = logging_config.logging.getLogger("client.utils.st_common") +############################################################################# +# GUI Helper +############################################################################# +def style(): + """Beautify""" + return st.html( + """ + + """, + ) ############################################################################# # State Helpers diff --git a/src/launch_client.py b/src/launch_client.py index 39330c3d..945b68ae 100644 --- a/src/launch_client.py +++ b/src/launch_client.py @@ -88,7 +88,7 @@ def main() -> None: } .stAppHeader img[alt="Logo"] { width: 50%; - } + } """, ) @@ -132,48 +132,23 @@ def main() -> None: # Left Hand Side - Navigation chatbot = st.Page("client/content/chatbot.py", title="ChatBot", icon="💬", default=True) - navigation = { + sidebar_navigation = { "": [chatbot], } if not state.disabled["tests"]: testbed = st.Page("client/content/testbed.py", title="Testbed", icon="🧪") - navigation[""].append(testbed) + sidebar_navigation[""].append(testbed) if not state.disabled["api"]: api_server = st.Page("client/content/api_server.py", title="API Server", icon="📡") - navigation[""].append(api_server) - - # Tools - if not state.disabled["tools"]: - split_embed = st.Page("client/content/tools/split_embed.py", title="Split/Embed", icon="📚") - navigation["Tools"] = [split_embed] - prompt_eng = st.Page("client/content/tools/prompt_eng.py", title="Prompts", icon="🎤") - navigation["Tools"].append(prompt_eng) - - # Administration + sidebar_navigation[""].append(api_server) if not state.disabled["tools"]: - navigation["Configuration"] = [] - if not state.disabled["db_cfg"]: - db_config = st.Page("client/content/config/databases.py", title="Databases", icon="🗄️") - navigation["Configuration"].append(db_config) - if not state.disabled["model_cfg"]: - model_config = st.Page("client/content/config/models.py", title="Models", icon="🤖") - navigation["Configuration"].append(model_config) - if not state.disabled["oci_cfg"]: - oci_config = st.Page("client/content/config/oci.py", title="OCI", icon="☁️") - navigation["Configuration"].append(oci_config) - if not state.disabled["settings"]: - settings = st.Page("client/content/config/settings.py", title="Settings", icon="💾") - navigation["Configuration"].append(settings) - # When we get here, if there's nothing in "Configuration" delete it - if not navigation["Configuration"]: - del navigation["Configuration"] - if not state.disabled["mcp_cfg"]: - mcp_config = st.Page("client/content/config/mcp_servers.py", title="MCP Servers", icon="💾") - navigation["Configuration"].append(mcp_config) - - pg = st.navigation(navigation, position="sidebar", expanded=False) - pg.run() + tools = st.Page("client/content/tools/tools.py", title="Tools", icon="🧰") + sidebar_navigation[""].append(tools) + config = st.Page("client/content/config/config.py", title="Configuration", icon="⚙️") + sidebar_navigation[""].append(config) + pg_sidebar = st.navigation(sidebar_navigation, position="sidebar", expanded=False) + pg_sidebar.run() if __name__ == "__main__": # Start Server if not running From 4f4b7f31f9df16d840d1dddda504cd1bae25d76b Mon Sep 17 00:00:00 2001 From: gotsysdba Date: Fri, 15 Aug 2025 06:55:45 +0100 Subject: [PATCH 08/28] GUI --- src/client/content/config/tabs/mcp.py | 77 +++++++++++++++++++++++++++ src/client/mcp/frontend.py | 34 ------------ src/launch_server.py | 17 +++--- src/server/api/v1/mcp.py | 17 +++--- src/server/api/v1/probes.py | 15 +++--- src/server/mcp/proxies/sqlcl.py | 8 +-- 6 files changed, 108 insertions(+), 60 deletions(-) diff --git a/src/client/content/config/tabs/mcp.py b/src/client/content/config/tabs/mcp.py index a066018c..eb76767e 100644 --- a/src/client/content/config/tabs/mcp.py +++ b/src/client/content/config/tabs/mcp.py @@ -6,7 +6,84 @@ import streamlit as st from streamlit import session_state as state +import client.utils.api_call as api_call +import common.logging_config as logging_config + +logger = logging_config.logging.getLogger("client.content.config.tabs.mcp") + + +################################### +# Functions +################################### +def get_mcp_status() -> dict: + """Get MCP Status""" + try: + logger.info("Checking MCP Status") + return api_call.get(endpoint="v1/mcp/healthz") + except api_call.ApiError as ex: + logger.error("Unable to get MCP Status: %s", ex) + return {} + +def get_mcp_tools(force: bool = False) -> list[dict]: + """Get MCP Tools from API Server""" + if force or "mcp_tools" not in state or not state.mcp_tools: + try: + logger.info("Refreshing state.mcp_tools") + state.mcp_tools = api_call.get(endpoint="v1/mcp/tools") + except api_call.ApiError as ex: + logger.error("Unable to populate state.mcp_tools: %s", ex) + state.mcp_tools = {} + + +# @st.cache_data(show_spinner="Connecting to MCP Backend...", ttl=60) +# def get_server_capabilities(fastapi_base_url): +# """Fetches the lists of tools and resources from the FastAPI backend.""" +# try: +# # Get API key from environment or generate one +# api_key = os.getenv("API_SERVER_KEY") +# headers = {"Authorization": f"Bearer {api_key}"} if api_key else {} + +# # First check if MCP is enabled and initialized +# status_response = requests.get(f"{fastapi_base_url}/v1/mcp/status", headers=headers) +# if status_response.status_code == 200: +# status = status_response.json() +# if not status.get("enabled", False): +# st.warning("MCP is not enabled. Please enable it in the configuration.") +# return {"error": "MCP not enabled"}, {"error": "MCP not enabled"}, {"error": "MCP not enabled"} +# if not status.get("initialized", False): +# st.info("MCP is enabled but not yet initialized. Please select a model first.") +# return {"tools": []}, {"static": [], "dynamic": []}, {"prompts": []} + +# tools_response = requests.get(f"{fastapi_base_url}/v1/mcp/tools", headers=headers) +# tools_response.raise_for_status() +# tools = tools_response.json() + +# resources_response = requests.get(f"{fastapi_base_url}/v1/mcp/resources", headers=headers) +# resources_response.raise_for_status() +# resources = resources_response.json() + +# prompts_response = requests.get(f"{fastapi_base_url}/v1/mcp/prompts", headers=headers) +# prompts_response.raise_for_status() +# prompts = prompts_response.json() + +# return tools, resources, prompts +# except requests.exceptions.RequestException as e: +# st.error(f"Could not connect to the MCP backend at {fastapi_base_url}. Is it running? Error: {e}") +# return {"tools": []}, {"static": [], "dynamic": []}, {"prompts": []} + + +############################################################################# +# MAIN +############################################################################# def display_mcp() -> None: """Streamlit GUI""" st.header("Model Context Protocol", divider="red") + try: + get_mcp_tools() + except api_call.ApiError: + st.stop() + mcp_status = get_mcp_status() + if mcp_status.get("status") == "ready": + st.write(f"The {mcp_status['name']} is running. Version: {mcp_status['version']}") + st.write(state.mcp_tools) diff --git a/src/client/mcp/frontend.py b/src/client/mcp/frontend.py index 383bb07f..2c645129 100644 --- a/src/client/mcp/frontend.py +++ b/src/client/mcp/frontend.py @@ -13,41 +13,7 @@ def set_page(): def get_fastapi_base_url(): return os.getenv("FASTAPI_BASE_URL", "http://127.0.0.1:8000") -@st.cache_data(show_spinner="Connecting to MCP Backend...", ttl=60) -def get_server_capabilities(fastapi_base_url): - """Fetches the lists of tools and resources from the FastAPI backend.""" - try: - # Get API key from environment or generate one - api_key = os.getenv("API_SERVER_KEY") - headers = {"Authorization": f"Bearer {api_key}"} if api_key else {} - - # First check if MCP is enabled and initialized - status_response = requests.get(f"{fastapi_base_url}/v1/mcp/status", headers=headers) - if status_response.status_code == 200: - status = status_response.json() - if not status.get("enabled", False): - st.warning("MCP is not enabled. Please enable it in the configuration.") - return {"error": "MCP not enabled"}, {"error": "MCP not enabled"}, {"error": "MCP not enabled"} - if not status.get("initialized", False): - st.info("MCP is enabled but not yet initialized. Please select a model first.") - return {"tools": []}, {"static": [], "dynamic": []}, {"prompts": []} - - tools_response = requests.get(f"{fastapi_base_url}/v1/mcp/tools", headers=headers) - tools_response.raise_for_status() - tools = tools_response.json() - - resources_response = requests.get(f"{fastapi_base_url}/v1/mcp/resources", headers=headers) - resources_response.raise_for_status() - resources = resources_response.json() - prompts_response = requests.get(f"{fastapi_base_url}/v1/mcp/prompts", headers=headers) - prompts_response.raise_for_status() - prompts = prompts_response.json() - - return tools, resources, prompts - except requests.exceptions.RequestException as e: - st.error(f"Could not connect to the MCP backend at {fastapi_base_url}. Is it running? Error: {e}") - return {"tools": []}, {"static": [], "dynamic": []}, {"prompts": []} def get_server_files(): files = ["server/mcp/server_config.json"] diff --git a/src/launch_server.py b/src/launch_server.py index aad1c95f..aa99ca61 100644 --- a/src/launch_server.py +++ b/src/launch_server.py @@ -161,14 +161,14 @@ def register_endpoints(mcp: FastMCP, auth: APIRouter, noauth: APIRouter): # Authenticated auth.include_router(api_v1.chat.auth, prefix="/v1/chat", tags=["Chatbot"]) - auth.include_router(api_v1.databases.auth, prefix="/v1/databases", tags=["Config - Databases"]) auth.include_router(api_v1.embed.auth, prefix="/v1/embed", tags=["Embeddings"]) - auth.include_router(api_v1.models.auth, prefix="/v1/models", tags=["Config - Models"]) - auth.include_router(api_v1.oci.auth, prefix="/v1/oci", tags=["Config - Oracle Cloud Infrastructure"]) - auth.include_router(api_v1.prompts.auth, prefix="/v1/prompts", tags=["Tools - Prompts"]) auth.include_router(api_v1.selectai.auth, prefix="/v1/selectai", tags=["SelectAI"]) - auth.include_router(api_v1.settings.auth, prefix="/v1/settings", tags=["Tools - Settings"]) + auth.include_router(api_v1.prompts.auth, prefix="/v1/prompts", tags=["Tools - Prompts"]) auth.include_router(api_v1.testbed.auth, prefix="/v1/testbed", tags=["Tools - Testbed"]) + auth.include_router(api_v1.settings.auth, prefix="/v1/settings", tags=["Config - Settings"]) + auth.include_router(api_v1.databases.auth, prefix="/v1/databases", tags=["Config - Databases"]) + auth.include_router(api_v1.models.auth, prefix="/v1/models", tags=["Config - Models"]) + auth.include_router(api_v1.oci.auth, prefix="/v1/oci", tags=["Config - Oracle Cloud Infrastructure"]) auth.include_router(api_v1.mcp.auth, prefix="/v1/mcp", tags=["Config - MCP Servers"]) # Auto-discover all MCP tools and register HTTP + MCP endpoints @@ -205,7 +205,12 @@ def fastapi_verify_key( # MCP Server settings.stateless_http = True - mcp = FastMCP(name="Optimizer MCP Server", auth=fastmcp_verifier) + mcp = FastMCP( + name="Oracle AI Optimizer and Toolkit MCP Server", + version=__version__, + auth=fastmcp_verifier, + include_fastmcp_meta=False, + ) mcp_app = mcp.http_app(path="/") @asynccontextmanager diff --git a/src/server/api/v1/mcp.py b/src/server/api/v1/mcp.py index 1b5fdb0d..0344ff1f 100644 --- a/src/server/api/v1/mcp.py +++ b/src/server/api/v1/mcp.py @@ -23,28 +23,23 @@ def get_mcp(request: Request) -> FastMCP: @auth.get( "/tools", description="List available MCP tools", - response_model=dict, + response_model=list[dict], ) -async def mcp_get_tools(mcp_engine: FastMCP = Depends(get_mcp)) -> dict: +async def mcp_get_tools(mcp_engine: FastMCP = Depends(get_mcp)) -> list[dict]: """List MCP tools""" tools_info = [] try: + print(await mcp_engine.get_tools()) client = Client(mcp_engine) async with client: - tools = await client.list_tools() + tools = await client.list_tools_mcp() logger.debug("MCP Tools: %s", tools) for tool_object in tools: - tools_info.append( - { - "name": tool_object.name, - "description": tool_object.description, - "input_schema": getattr(tool_object, "inputSchema", None), - } - ) + tools_info.append(tool_object.model_dump()) finally: await client.close() - return {"tools": tools_info} + return tools_info @auth.get( diff --git a/src/server/api/v1/probes.py b/src/server/api/v1/probes.py index 1e30b820..2eeb50e7 100644 --- a/src/server/api/v1/probes.py +++ b/src/server/api/v1/probes.py @@ -33,9 +33,12 @@ def mcp_healthz(mcp_engine: FastMCP = Depends(get_mcp)): """Check if MCP server is ready.""" if mcp_engine is None: return {"status": "not ready"} - return { - "status": "ready", - "engine_type": str(type(mcp_engine)) if mcp_engine else None, - "available_tools": len(getattr(mcp_engine, "available_tools", [])) if mcp_engine else 0, - "timestamp": datetime.now().isoformat(), - } + else: + server = mcp_engine.__dict__["_mcp_server"].__dict__ + return { + "status": "ready", + "name": server["name"], + "version": server["version"], + "available_tools": len(getattr(mcp_engine, "available_tools", [])) if mcp_engine else 0, + "timestamp": datetime.now().isoformat(), + } diff --git a/src/server/mcp/proxies/sqlcl.py b/src/server/mcp/proxies/sqlcl.py index ab292958..2d857ce6 100644 --- a/src/server/mcp/proxies/sqlcl.py +++ b/src/server/mcp/proxies/sqlcl.py @@ -16,16 +16,18 @@ def register(mcp): """Register the SQLcl MCP Server as Local (via Proxy)""" + tool_name = "SQLclProxy" sqlcl_binary = shutil.which("sql") if sqlcl_binary: env_vars = os.environ.copy() env_vars["TNS_ADMIN"] = os.getenv("TNS_ADMIN", "tns_admin") config = { "mcpServers": { - "sqlcl": { + tool_name: { + "name": tool_name, "command": f"{sqlcl_binary}", "args": ["-mcp", "-daemon", "-thin", "-noupdates"], - "env": env_vars + "env": env_vars, } } } @@ -58,7 +60,7 @@ def register(mcp): except Exception as ex: logger.error("Unexpected error creating connection store: %s", ex) # Create a proxy to the configured server (auto-creates ProxyClient) - mcp_proxy = mcp.as_proxy(config, name="SQLclProxy") + mcp_proxy = mcp.as_proxy(config, name=tool_name) mcp.mount(mcp_proxy) else: logger.warning("Not enabling SQLcl MCP server, sqlcl not found in PATH.") From b70ffb750ad553425e6c0c72aad93e25e0d5dc47 Mon Sep 17 00:00:00 2001 From: gotsysdba Date: Tue, 19 Aug 2025 08:29:27 +0100 Subject: [PATCH 09/28] tests updated for tabs --- src/client/content/api_server.py | 4 - src/client/content/chatbot.py | 104 +++++----- src/client/content/config/config.py | 19 +- src/client/content/config/tabs/databases.py | 5 +- src/client/content/config/tabs/mcp.py | 5 + src/client/content/config/tabs/models.py | 6 +- src/client/content/config/tabs/oci.py | 4 + src/client/content/config/tabs/settings.py | 4 + src/client/content/testbed.py | 4 - src/client/content/tools/tabs/prompt_eng.py | 4 + src/client/content/tools/tabs/split_embed.py | 4 + src/client/content/tools/tools.py | 18 +- src/client/utils/st_common.py | 20 +- src/client/utils/st_footer.py | 39 +--- src/launch_client.py | 1 + src/launch_server.py | 178 ++++++++---------- src/pyproject.toml | 13 +- src/server/api/v1/mcp.py | 54 ++++-- src/server/api/v1/probes.py | 2 +- src/server/mcp/__init__.py | 24 ++- src/server/mcp/prompts/optimizer.py | 21 +++ src/server/mcp/proxies/sqlcl.py | 12 +- src/server/mcp/tools/say_hello.py | 2 +- src/server/patches/litellm_patch.py | 4 +- .../config/{ => tabs}/test_databases.py | 2 +- .../content/config/{ => tabs}/test_models.py | 2 +- .../content/config/{ => tabs}/test_oci.py | 2 +- .../config/{ => tabs}/test_settings.py | 4 +- tests/client/content/test_st_footer.py | 26 +-- .../tools/{ => tabs}/test_prompt_eng.py | 2 +- .../tools/{ => tabs}/test_split_embed.py | 7 +- tests/conftest.py | 3 +- 32 files changed, 300 insertions(+), 299 deletions(-) create mode 100644 src/server/mcp/prompts/optimizer.py rename tests/client/content/config/{ => tabs}/test_databases.py (99%) rename tests/client/content/config/{ => tabs}/test_models.py (95%) rename tests/client/content/config/{ => tabs}/test_oci.py (99%) rename tests/client/content/config/{ => tabs}/test_settings.py (97%) rename tests/client/content/tools/{ => tabs}/test_prompt_eng.py (97%) rename tests/client/content/tools/{ => tabs}/test_split_embed.py (97%) diff --git a/src/client/content/api_server.py b/src/client/content/api_server.py index 3e8bd6b6..8b7d720c 100644 --- a/src/client/content/api_server.py +++ b/src/client/content/api_server.py @@ -16,8 +16,6 @@ import client.utils.client as client import client.utils.api_call as api_call -from client.utils.st_common import style -from client.utils.st_footer import remove_footer import common.logging_config as logging_config logger = logging_config.logging.getLogger("client.content.api_server") @@ -65,8 +63,6 @@ def server_restart() -> None: ##################################################### async def main() -> None: """Streamlit GUI""" - style() - remove_footer() st.header("API Server") st.write("Access with your own client.") left, right = st.columns([0.2, 0.8]) diff --git a/src/client/content/chatbot.py b/src/client/content/chatbot.py index 18ca5010..c79146f7 100644 --- a/src/client/content/chatbot.py +++ b/src/client/content/chatbot.py @@ -84,7 +84,7 @@ async def main() -> None: ######################################################################### # Chatty-Bot Centre ######################################################################### - + if "messages" not in state: state.messages = [] @@ -101,19 +101,19 @@ async def main() -> None: display_role = "assistant" else: continue - + with st.chat_message(display_role): if "tool_trace" in message and message["tool_trace"]: for tool_call in message["tool_trace"]: with st.expander(f"🛠️ **Tool Call:** `{tool_call['name']}`", expanded=False): st.text("Arguments:") - st.code(json.dumps(tool_call.get('args', {}), indent=2), language="json") + st.code(json.dumps(tool_call.get("args", {}), indent=2), language="json") if "error" in tool_call: st.text("Error:") - st.error(tool_call['error']) + st.error(tool_call["error"]) else: st.text("Result:") - st.code(tool_call.get('result', ''), language="json") + st.code(tool_call.get("result", ""), language="json") if message.get("content"): # Display file attachments if present if "attachments" in message and message["attachments"]: @@ -121,14 +121,14 @@ async def main() -> None: # Show appropriate icon based on file type if file["type"].startswith("image/"): st.image(file["preview"], use_container_width=True) - st.markdown(f"🖼️ **{file['name']}** ({file['size']//1024} KB)") + st.markdown(f"🖼️ **{file['name']}** ({file['size'] // 1024} KB)") elif file["type"] == "application/pdf": - st.markdown(f"📄 **{file['name']}** ({file['size']//1024} KB)") + st.markdown(f"📄 **{file['name']}** ({file['size'] // 1024} KB)") elif file["type"] in ("text/plain", "text/markdown"): - st.markdown(f"📝 **{file['name']}** ({file['size']//1024} KB)") + st.markdown(f"📝 **{file['name']}** ({file['size'] // 1024} KB)") else: - st.markdown(f"📎 **{file['name']}** ({file['size']//1024} KB)") - + st.markdown(f"📎 **{file['name']}** ({file['size'] // 1024} KB)") + # Display message content - handle both string and list formats content = message.get("content") if isinstance(content, list): @@ -140,7 +140,7 @@ async def main() -> None: sys_prompt = state.client_settings["prompts"]["sys"] render_chat_footer() - + if human_request := st.chat_input( f"Ask your question here... (current prompt: {sys_prompt})", accept_file=True, @@ -149,7 +149,7 @@ async def main() -> None: ): # Process message with potential file attachments message = {"role": "user", "content": human_request.text} - + # Handle file attachments if hasattr(human_request, "files") and human_request.files: # Store file information separately from content @@ -157,14 +157,16 @@ async def main() -> None: for file in human_request.files: file_bytes = file.read() file_b64 = base64.b64encode(file_bytes).decode("utf-8") - message["attachments"].append({ - "name": file.name, - "type": file.type, - "size": len(file_bytes), - "data": file_b64, - "preview": f"data:{file.type};base64,{file_b64}" if file.type.startswith("image/") else None - }) - + message["attachments"].append( + { + "name": file.name, + "type": file.type, + "size": len(file_bytes), + "data": file_b64, + "preview": f"data:{file.type};base64,{file_b64}" if file.type.startswith("image/") else None, + } + ) + state.messages.append(message) st.rerun() if state.messages and state.messages[-1]["role"] == "user": @@ -172,34 +174,34 @@ async def main() -> None: with st.chat_message("ai"): with st.spinner("Thinking..."): client_settings_for_request = state.client_settings.copy() - model_id = client_settings_for_request.get('ll_model', {}).get('model') + model_id = client_settings_for_request.get("ll_model", {}).get("model") if model_id: all_model_configs = st_common.enabled_models_lookup("ll") model_config = all_model_configs.get(model_id, {}) - if 'api_key' in model_config: - if 'll_model' not in client_settings_for_request: - client_settings_for_request['ll_model'] = {} - client_settings_for_request['ll_model']['api_key'] = model_config['api_key'] + if "api_key" in model_config: + if "ll_model" not in client_settings_for_request: + client_settings_for_request["ll_model"] = {} + client_settings_for_request["ll_model"]["api_key"] = model_config["api_key"] # Prepare message history for backend message_history = [] for msg in state.messages: # Create a copy of the message processed_msg = msg.copy() - + # If there are attachments, include them in the content if "attachments" in msg and msg["attachments"]: # Start with the text content text_content = msg["content"] - + # Handle list content format (from OpenAI API) if isinstance(text_content, list): text_parts = [part["text"] for part in text_content if part["type"] == "text"] text_content = "\n".join(text_parts) - + # Create a list to hold structured content parts content_list = [{"type": "text", "text": text_content}] - + non_image_references = [] for attachment in msg["attachments"]: if attachment["type"].startswith("image/"): @@ -209,22 +211,26 @@ async def main() -> None: mime_type = attachment["type"] if mime_type == "image/jpg": mime_type = "image/jpeg" - - content_list.append({ - "type": "image_url", - "image_url": { - "url": f"data:{mime_type};base64,{attachment['data']}", - "detail": "low" + + content_list.append( + { + "type": "image_url", + "image_url": { + "url": f"data:{mime_type};base64,{attachment['data']}", + "detail": "low", + }, } - }) + ) else: # Handle non-image files as text references - non_image_references.append(f"\n[File: {attachment['name']} ({attachment['size']//1024} KB)]") - + non_image_references.append( + f"\n[File: {attachment['name']} ({attachment['size'] // 1024} KB)]" + ) + # If there were non-image files, append their references to the main text part if non_image_references: - content_list[0]['text'] += "".join(non_image_references) - + content_list[0]["text"] += "".join(non_image_references) + processed_msg["content"] = content_list # Convert list content to string format elif isinstance(msg.get("content"), list): @@ -233,14 +239,12 @@ async def main() -> None: # Otherwise, ensure content is a string else: processed_msg["content"] = str(msg.get("content", "")) - + message_history.append(processed_msg) async with MCPClient(client_settings=client_settings_for_request) as mcp_client: - final_text, tool_trace, new_history = await mcp_client.invoke( - message_history=message_history - ) - + final_text, tool_trace, new_history = await mcp_client.invoke(message_history=message_history) + # Update the history for display. # Keep the original message structure with attachments for i in range(len(new_history) - 1, -1, -1): @@ -249,11 +253,11 @@ async def main() -> None: user_message = state.messages[-1] if "attachments" in user_message: new_history[-1]["attachments"] = user_message["attachments"] - + new_history[i]["content"] = final_text new_history[i]["tool_trace"] = tool_trace break - + state.messages = new_history st.rerun() @@ -261,11 +265,11 @@ async def main() -> None: logger.error("Exception during invoke call:", exc_info=True) # Extract just the error message error_msg = str(e) - + # Check if it's a file-related error if "file" in error_msg.lower() or "image" in error_msg.lower() or "content" in error_msg.lower(): st.error(f"Error: {error_msg}") - + # Add a button to remove files and retry if st.button("Remove files and retry", key="remove_files_retry"): # Remove attachments from the latest message @@ -274,7 +278,7 @@ async def main() -> None: st.rerun() else: st.error(f"Error: {error_msg}") - + if st.button("Retry", key="reload_chatbot_error"): if state.messages and state.messages[-1]["role"] == "user": state.messages.pop() diff --git a/src/client/content/config/config.py b/src/client/content/config/config.py index 5eb4551a..ba954ebe 100644 --- a/src/client/content/config/config.py +++ b/src/client/content/config/config.py @@ -6,20 +6,16 @@ import inspect import streamlit as st from streamlit import session_state as state -from client.utils.st_common import style -from client.utils.st_footer import remove_footer -from client.content.config.tabs.settings import display_settings -from client.content.config.tabs.oci import display_oci -from client.content.config.tabs.databases import display_databases -from client.content.config.tabs.models import display_models -from client.content.config.tabs.mcp import display_mcp +from client.content.config.tabs.settings import get_settings, display_settings +from client.content.config.tabs.oci import get_oci, display_oci +from client.content.config.tabs.databases import get_databases, display_databases +from client.content.config.tabs.models import get_models, display_models +from client.content.config.tabs.mcp import get_mcp_tools, display_mcp def main() -> None: """Streamlit GUI""" - style() - remove_footer() tabs_list = [] if not state.disabled["settings"]: tabs_list.append("💾 Settings") @@ -39,22 +35,27 @@ def main() -> None: # Map tab objects to content conditionally if not state.disabled["settings"]: + get_settings() with tabs[tab_index]: display_settings() tab_index += 1 if not state.disabled["db_cfg"]: + get_databases() with tabs[tab_index]: display_databases() tab_index += 1 if not state.disabled["model_cfg"]: + get_models() with tabs[tab_index]: display_models() tab_index += 1 if not state.disabled["oci_cfg"]: + get_oci() with tabs[tab_index]: display_oci() tab_index += 1 if not state.disabled["mcp_cfg"]: + get_mcp_tools() with tabs[tab_index]: display_mcp() tab_index += 1 diff --git a/src/client/content/config/tabs/databases.py b/src/client/content/config/tabs/databases.py index 850868a0..93acdc1a 100644 --- a/src/client/content/config/tabs/databases.py +++ b/src/client/content/config/tabs/databases.py @@ -113,7 +113,6 @@ def display_databases() -> None: get_databases() except api_call.ApiError: st.stop() - st.subheader("Configuration") database_lookup = st_common.state_configs_lookup("database_configs", "name") # Get a list of database names, and allow user to select @@ -235,3 +234,7 @@ def display_databases() -> None: st.write("Unable to use SelectAI with Database.") elif len(selectai_profiles) == 0: st.write("No SelectAI Profiles Found.") + + +if __name__ == "__main__": + display_databases() diff --git a/src/client/content/config/tabs/mcp.py b/src/client/content/config/tabs/mcp.py index eb76767e..7858449c 100644 --- a/src/client/content/config/tabs/mcp.py +++ b/src/client/content/config/tabs/mcp.py @@ -25,6 +25,7 @@ def get_mcp_status() -> dict: logger.error("Unable to get MCP Status: %s", ex) return {} + def get_mcp_tools(force: bool = False) -> list[dict]: """Get MCP Tools from API Server""" if force or "mcp_tools" not in state or not state.mcp_tools: @@ -87,3 +88,7 @@ def display_mcp() -> None: if mcp_status.get("status") == "ready": st.write(f"The {mcp_status['name']} is running. Version: {mcp_status['version']}") st.write(state.mcp_tools) + + +if __name__ == "__main__": + display_mcp() diff --git a/src/client/content/config/tabs/models.py b/src/client/content/config/tabs/models.py index 704b40db..d2de9f2e 100644 --- a/src/client/content/config/tabs/models.py +++ b/src/client/content/config/tabs/models.py @@ -262,8 +262,6 @@ def display_models() -> None: except api_call.ApiError: st.stop() - # Table Dimensions - st.divider() st.subheader("Language Models") render_model_rows("ll") @@ -271,3 +269,7 @@ def display_models() -> None: st.divider() st.subheader("Embedding Models") render_model_rows("embed") + + +if __name__ == "__main__": + display_models() diff --git a/src/client/content/config/tabs/oci.py b/src/client/content/config/tabs/oci.py index 316efa2b..7ff89ef7 100644 --- a/src/client/content/config/tabs/oci.py +++ b/src/client/content/config/tabs/oci.py @@ -210,3 +210,7 @@ def display_oci() -> None: _ = create_genai_models() st_common.clear_state_key("model_configs") st.success("Oracle GenAI models - Enabled.", icon="✅") + + +if __name__ == "__main__": + display_oci() diff --git a/src/client/content/config/tabs/settings.py b/src/client/content/config/tabs/settings.py index bce3ba47..4890e389 100644 --- a/src/client/content/config/tabs/settings.py +++ b/src/client/content/config/tabs/settings.py @@ -328,3 +328,7 @@ def display_settings(): mime="application/zip", # Mime type for zip file disabled=spring_ai_conf == "hybrid", ) + + +if __name__ == "__main__": + display_settings() diff --git a/src/client/content/testbed.py b/src/client/content/testbed.py index 426e5a61..bb9fe54a 100644 --- a/src/client/content/testbed.py +++ b/src/client/content/testbed.py @@ -19,8 +19,6 @@ import client.utils.st_common as st_common import client.utils.api_call as api_call -from client.utils.st_footer import remove_footer - import common.logging_config as logging_config @@ -239,8 +237,6 @@ def qa_update_gui(qa_testset: list) -> None: ############################################################################# def main() -> None: """Streamlit GUI""" - st_common.style() - remove_footer() try: get_models() except api_call.ApiError: diff --git a/src/client/content/tools/tabs/prompt_eng.py b/src/client/content/tools/tabs/prompt_eng.py index d7b841a9..67cb9afb 100644 --- a/src/client/content/tools/tabs/prompt_eng.py +++ b/src/client/content/tools/tabs/prompt_eng.py @@ -110,3 +110,7 @@ def display_prompt_eng(): if st.button("Save Instructions", key="save_ctx_prompt"): if patch_prompt("ctx", selected_prompt_ctx_name, prompt_ctx_prompt): st.rerun() + + +if __name__ == "__main__": + display_prompt_eng() diff --git a/src/client/content/tools/tabs/split_embed.py b/src/client/content/tools/tabs/split_embed.py index f38427a0..264f5d8b 100644 --- a/src/client/content/tools/tabs/split_embed.py +++ b/src/client/content/tools/tabs/split_embed.py @@ -397,3 +397,7 @@ def display_split_embed() -> None: get_databases(force="True") except api_call.ApiError as ex: st.error(ex, icon="🚨") + + +if __name__ == "__main__": + display_split_embed() diff --git a/src/client/content/tools/tools.py b/src/client/content/tools/tools.py index 19614376..5fcfe3fd 100644 --- a/src/client/content/tools/tools.py +++ b/src/client/content/tools/tools.py @@ -2,24 +2,30 @@ Copyright (c) 2024, 2025, Oracle and/or its affiliates. Licensed under the Universal Permissive License v1.0 as shown at http://oss.oracle.com/licenses/upl. """ + import inspect import streamlit as st -from client.utils.st_common import style -from client.utils.st_footer import remove_footer -from client.content.tools.tabs.prompt_eng import display_prompt_eng +from client.content.tools.tabs.prompt_eng import get_prompts, display_prompt_eng from client.content.tools.tabs.split_embed import display_split_embed +from client.content.config.tabs.models import get_models +from client.content.config.tabs.databases import get_databases +from client.content.config.tabs.oci import get_oci + def main() -> None: """Streamlit GUI""" - style() - remove_footer() prompt_eng, split_embed = st.tabs(["🎤 Prompts", "📚 Split/Embed"]) with prompt_eng: + get_prompts() display_prompt_eng() with split_embed: + get_models() + get_databases() + get_oci() display_split_embed() + if __name__ == "__main__" or "page.py" in inspect.stack()[1].filename: - main() \ No newline at end of file + main() diff --git a/src/client/utils/st_common.py b/src/client/utils/st_common.py index 4f0ffd9d..460d014c 100644 --- a/src/client/utils/st_common.py +++ b/src/client/utils/st_common.py @@ -25,21 +25,6 @@ logger = logging_config.logging.getLogger("client.utils.st_common") -############################################################################# -# GUI Helper -############################################################################# -def style(): - """Beautify""" - return st.html( - """ - - """, - ) - ############################################################################# # State Helpers ############################################################################# @@ -75,6 +60,8 @@ def enabled_models_lookup(model_type: str) -> dict[str, dict[str, Any]]: def bool_to_emoji(value): "Return an Emoji for Bools" return "✅" if value else "⚪" + + def local_file_payload(uploaded_files: Union[BytesIO, list[BytesIO]]) -> list: """Upload Single file from Streamlit to the Server""" # If it's a single file, convert it to a list for consistent processing @@ -194,7 +181,7 @@ def ll_sidebar() -> None: on_change=update_client_settings("ll_model"), disabled=state.client_settings["selectai"]["enabled"], ) - + # If the model has changed, reinitialize the MCP engine if selected_model != previous_model and initialize_mcp_engine_with_model: try: @@ -463,6 +450,7 @@ def vector_search_sidebar() -> None: database_lookup = state_configs_lookup("database_configs", "name") vs_df = pd.DataFrame(database_lookup[db_alias].get("vector_stores")) + def vs_reset() -> None: """Reset Vector Store Selections""" for key in state.client_settings["vector_search"]: diff --git a/src/client/utils/st_footer.py b/src/client/utils/st_footer.py index b8d5b643..8314171e 100644 --- a/src/client/utils/st_footer.py +++ b/src/client/utils/st_footer.py @@ -65,25 +65,7 @@ def _inject_footer(selector, insertion_method, footer_html, cleanup_styles=True) """ components.html(js_code, height=0) - -# --- FUNCTION 1: The Cleanup Crew --- -def remove_footer(): - """ - Injects simple JavaScript to find and remove any existing footer. - This MUST be called at the TOP of every page in your app. - """ - js_code = """ - - """ - components.html(js_code, height=0) - - -# --- FUNCTION 2: The Chat Page Footer --- +# --- The Chat Page Footer --- def render_chat_footer(): """ Standardized footer for chat pages. @@ -97,22 +79,3 @@ def render_chat_footer(): _inject_footer( selector='[data-testid="stBottomBlockContainer"]', insertion_method="afterend", footer_html=footer_html ) - - -# --- FUNCTION 3: The Models Page Footer --- -def render_models_footer(): - """ - Standardized footer for models pages. - """ - footer_html = f""" - {FOOTER_STYLE} - - """ - _inject_footer( - selector='[data-testid="stAppIframeResizerAnchor"]', - insertion_method="beforebegin", - footer_html=footer_html, - cleanup_styles=False, - ) diff --git a/src/launch_client.py b/src/launch_client.py index 945b68ae..0efd2806 100644 --- a/src/launch_client.py +++ b/src/launch_client.py @@ -7,6 +7,7 @@ """ # spell-checker:ignore streamlit, scriptrunner +import asyncio import os from uuid import uuid4 diff --git a/src/launch_server.py b/src/launch_server.py index aa99ca61..a1c55da0 100644 --- a/src/launch_server.py +++ b/src/launch_server.py @@ -2,14 +2,17 @@ Copyright (c) 2024, 2025, Oracle and/or its affiliates. Licensed under the Universal Permissive License v1.0 as shown at http://oss.oracle.com/licenses/upl. """ -# spell-checker:ignore fastapi laddr checkpointer langgraph litellm fastmcp getpid procs -# spell-checker:ignore noauth apiserver configfile selectai giskard ollama llms -# pylint: disable=redefined-outer-name,wrong-import-position +# spell-checker:ignore configfile fastmcp noauth selectai getpid procs litellm giskard ollama +# spell-checker:ignore dotenv apiserver laddr -from contextlib import asynccontextmanager -import os +# Patch litellm for Giskard/Ollama issue +import server.patches.litellm_patch # pylint: disable=unused-import, wrong-import-order +# Set OS Environment before importing other modules # Set OS Environment (Don't move their position to reflect on imports) +# pylint: disable=wrong-import-position +import os + os.environ["LITELLM_LOCAL_MODEL_COST_MAP"] = "True" os.environ["LITELLM_DISABLE_SPEND_LOGS"] = "True" os.environ["LITELLM_DISABLE_SPEND_UPDATES"] = "True" @@ -21,44 +24,38 @@ app_home = os.path.dirname(os.path.abspath(__file__)) if "TNS_ADMIN" not in os.environ: os.environ["TNS_ADMIN"] = os.path.join(app_home, "tns_admin") - -# Patch litellm for Giskard/Ollama issue -import server.patches.litellm_patch # pylint: disable=unused-import +# pylint: enable=wrong-import-position import argparse - -# import json -import queue +import asyncio +from contextlib import asynccontextmanager +from pathlib import Path import secrets import socket import subprocess -import threading +import sys from typing import Annotated -from pathlib import Path -import uvicorn - +# Third Party import psutil - -# from client.mcp.client import MCPClient -from fastapi import APIRouter, Depends, FastAPI, HTTPException, status +import uvicorn +from fastapi import FastAPI, APIRouter, Depends, HTTPException, status from fastapi.security import HTTPAuthorizationCredentials, HTTPBearer from fastmcp import FastMCP, settings from fastmcp.server.auth import StaticTokenVerifier +# Configuration +import server.bootstrap.configfile as configfile # pylint: disable=ungrouped-imports + # Logging import common.logging_config as logging_config from common._version import __version__ -# Configuration -import server.bootstrap.configfile as configfile -# from server.bootstrap import mcp as mcp_bootstrap - logger = logging_config.logging.getLogger("launch_server") ########################################## -# Process Control +# Client Process Control ########################################## def start_server(port: int = 8000, logfile: bool = False) -> int: """Start the uvicorn server for FastAPI""" @@ -83,42 +80,21 @@ def get_pid_using_port(port: int) -> int: continue return None - def start_subprocess(port: int, logfile: bool) -> subprocess.Popen: - """Start the uvicorn server as a subprocess when started via the client.""" - logger.info("API server starting on port: %i", port) - log_file = open(f"apiserver_{port}.log", "a", encoding="utf-8") if logfile else None - stdout = stderr = log_file if logfile else subprocess.PIPE - process = subprocess.Popen( - [ - "uvicorn", - "launch_server:create_app", - "--factory", - "--host", - "0.0.0.0", - "--port", - str(port), - "--timeout-graceful-shutdown", - "5", - ], - stdout=stdout, - stderr=stderr, - ) - logger.info("API server started on Port: %i; PID: %i", port, process.pid) - return process - port = port or find_available_port() if existing_pid := get_pid_using_port(port): logger.info("API server already running on port: %i (PID: %i)", port, existing_pid) return existing_pid - popen_queue = queue.Queue() - thread = threading.Thread( - target=lambda: popen_queue.put(start_subprocess(port, logfile)), - daemon=True, - ) - thread.start() + client_args = [sys.executable, __file__, "--port", str(port)] + if logfile: + log_file = open(f"apiserver_{port}.log", "a", encoding="utf-8") + stdout = stderr = log_file + else: + stdout = stderr = subprocess.PIPE - return popen_queue.get().pid + process = subprocess.Popen(client_args, stdout=stdout, stderr=stderr) + logger.info("Server started on port %i with PID %i", port, process.pid) + return process.pid def stop_server(pid: int) -> None: @@ -148,10 +124,25 @@ def get_api_key() -> str: return os.getenv("API_SERVER_KEY") -def register_endpoints(mcp: FastMCP, auth: APIRouter, noauth: APIRouter): +def fastapi_verify_key( + http_auth: Annotated[ + HTTPAuthorizationCredentials, + Depends(HTTPBearer(description="Please provide API_SERVER_KEY.")), + ], +) -> None: + """FastAPI: Verify that the provided API key is correct.""" + if http_auth.credentials != get_api_key(): + raise HTTPException(status_code=status.HTTP_401_UNAUTHORIZED) + + +########################################## +# Endpoint Registration +########################################## +async def register_endpoints(mcp: FastMCP, auth: APIRouter, noauth: APIRouter): """Register API Endpoints - Imports to avoid bootstrapping before config file read New endpoints need to be registered in server.api.v1.__init__.py """ + logger.debug("Starting Endpoint Registration") # pylint: disable=import-outside-toplevel import server.api.v1 as api_v1 from server.mcp import register_all_mcp @@ -173,49 +164,39 @@ def register_endpoints(mcp: FastMCP, auth: APIRouter, noauth: APIRouter): # Auto-discover all MCP tools and register HTTP + MCP endpoints mcp_router = APIRouter(prefix="/mcp", tags=["MCP Tools"]) - register_all_mcp(mcp, auth) + await register_all_mcp(mcp, auth) auth.include_router(mcp_router) + logger.debug("Finished Endpoint Registration") ############################################################################# # APP FACTORY ############################################################################# -def create_app(config: str = "") -> FastAPI: - """Create and configure the FastAPI app.""" - - def fastapi_verify_key( - http_auth: Annotated[ - HTTPAuthorizationCredentials, - Depends(HTTPBearer(description="Please provide API_SERVER_KEY.")), - ], - ) -> None: - """FastAPI: Verify that the provided API key is correct.""" - if http_auth.credentials != get_api_key(): - raise HTTPException(status_code=status.HTTP_401_UNAUTHORIZED) - - ### Start +async def create_app(config: str = "") -> FastAPI: + """FastAPI Application Factory""" + if not config: config = configfile.config_file_path() config_file = Path(os.getenv("CONFIG_FILE", config)) configfile.ConfigStore.load_from_file(config_file) + # FastMCP Server fastmcp_verifier = StaticTokenVerifier( tokens={get_api_key(): {"client_id": "optimizer", "scopes": ["read", "write"]}} ) - - # MCP Server settings.stateless_http = True - mcp = FastMCP( + fastmcp_app = FastMCP( name="Oracle AI Optimizer and Toolkit MCP Server", version=__version__, auth=fastmcp_verifier, include_fastmcp_meta=False, ) - mcp_app = mcp.http_app(path="/") + fastmcp_engine = fastmcp_app.http_app(path="/") @asynccontextmanager - async def combined_lifespan(app): - async with mcp_app.lifespan(app): + async def combined_lifespan(fastapi_app: FastAPI): + """Ensures all MCP Servers are cleaned up""" + async with fastmcp_engine.lifespan(fastapi_app): yield # Shutdown cleanup logger.info("Cleaning up leftover processes...") @@ -234,8 +215,8 @@ async def combined_lifespan(app): except psutil.NoSuchProcess: continue - # API Server - app = FastAPI( + # FastAPI Server + fastapi_app = FastAPI( title="Oracle AI Optimizer and Toolkit", version=__version__, docs_url="/v1/docs", @@ -247,21 +228,20 @@ async def combined_lifespan(app): }, ) # Store MCP in the app state - app.state.mcp = mcp + fastapi_app.state.fastmcp_app = fastmcp_app + # Register MCP Server into FastAPI + fastapi_app.mount("/mcp", fastmcp_engine) # Setup Routes and Register non-MCP endpoints noauth = APIRouter() auth = APIRouter(dependencies=[Depends(fastapi_verify_key)]) - register_endpoints(mcp, auth, noauth) - - # Register MCP Server into FastAPI - app.mount("/mcp", mcp_app) - - app.include_router(noauth) - app.include_router(auth) + # Register the endpoints + await register_endpoints(fastmcp_app, auth, noauth) + fastapi_app.include_router(noauth) + fastapi_app.include_router(auth) - return app + return fastapi_app if __name__ == "__main__": @@ -273,19 +253,23 @@ async def combined_lifespan(app): default=configfile.config_file_path(), help="Full path to configuration file (JSON)", ) + parser.add_argument( + "--port", + type=int, + default=8000, + help="Port to start server", + ) args = parser.parse_args() PORT = int(os.getenv("API_SERVER_PORT", "8000")) logger.info("API Server Using port: %i", PORT) - app = create_app(args.config) - try: - uvicorn.run( - app, - host="0.0.0.0", - port=PORT, - timeout_graceful_shutdown=5, - log_config=logging_config.LOGGING_CONFIG, - ) - except Exception as ex: - logger.info("Forced Shutdown: %s", ex) + # Sync entrypoint, but calls async factory before running Uvicorn + app = asyncio.run(create_app(args.config)) + uvicorn.run( + app, + host="0.0.0.0", + port=PORT, + timeout_graceful_shutdown=5, + log_config=logging_config.LOGGING_CONFIG, + ) diff --git a/src/pyproject.toml b/src/pyproject.toml index 4494ee4d..72710297 100644 --- a/src/pyproject.toml +++ b/src/pyproject.toml @@ -28,8 +28,8 @@ server = [ "faiss-cpu==1.12.0", "fastapi==0.116.1", "fastmcp==2.11.3", - "giskard==2.17.0", - "langchain-anthropic==0.3.18", + "giskard==2.18.0", + "langchain-anthropic==0.3.19", "langchain-cohere==0.4.5", "langchain-community==0.3.27", "langchain-google-genai==2.1.9", @@ -38,12 +38,13 @@ server = [ "langchain-mistralai==0.2.11", "langchain-ollama==0.3.6", "langchain-openai==0.3.30", - "langgraph==0.6.4", - "litellm==1.75.5.post1", - "llama-index==0.13.1", + "langgraph==0.6.5", + "litellm==1.75.8", + "llama-index==0.13.2", "lxml==6.0.0", "matplotlib==3.10.5", "oci~=2.0", + "openai==v1.99.9", # Remove after https://github.com/openai/openai-python/issues/2564 resolved "psutil==7.0.0", "python-multipart==0.0.20", "torch==2.8.0", @@ -53,7 +54,7 @@ server = [ # GUI component dependencies client = [ - "streamlit==1.48.0", + "streamlit==1.48.1", ] # Test dependencies diff --git a/src/server/api/v1/mcp.py b/src/server/api/v1/mcp.py index 0344ff1f..6a3f2fe9 100644 --- a/src/server/api/v1/mcp.py +++ b/src/server/api/v1/mcp.py @@ -17,7 +17,7 @@ def get_mcp(request: Request) -> FastMCP: """Get the MCP engine from the app state""" - return request.app.state.mcp + return request.app.state.fastmcp_app @auth.get( @@ -29,10 +29,9 @@ async def mcp_get_tools(mcp_engine: FastMCP = Depends(get_mcp)) -> list[dict]: """List MCP tools""" tools_info = [] try: - print(await mcp_engine.get_tools()) client = Client(mcp_engine) async with client: - tools = await client.list_tools_mcp() + tools = await client.list_tools() logger.debug("MCP Tools: %s", tools) for tool_object in tools: tools_info.append(tool_object.model_dump()) @@ -44,29 +43,44 @@ async def mcp_get_tools(mcp_engine: FastMCP = Depends(get_mcp)) -> list[dict]: @auth.get( "/resources", - description="Get MCP resources", - response_model=dict, + description="List MCP resources", + response_model=list[dict], ) -async def mcp_get_resources(mcp_engine: FastMCP = Depends(get_mcp)) -> dict: - """Get MCP Resources""" - resources = await mcp_engine.get_resources() - logger.debug("MCP Resources: %s", resources) - return { - "static": list(getattr(mcp_engine, "static_resources", {}).keys()), - "dynamic": getattr(mcp_engine, "dynamic_resources", []), - } +async def mcp_list_resources(mcp_engine: FastMCP = Depends(get_mcp)) -> list[dict]: + """List MCP Resources""" + resources_info = [] + try: + client = Client(mcp_engine) + async with client: + resources = await client.list_resources() + logger.debug("MCP Resources: %s", resources) + for resources_object in resources: + resources_info.append(resources_object.model_dump()) + finally: + await client.close() + + return resources_info @auth.get( "/prompts", - description="Get MCP prompts", - response_model=dict, + description="List MCP prompts", + response_model=list[dict], ) -async def mcp_get_prompts(mcp_engine: FastMCP = Depends(get_mcp)) -> dict: - """Get MCP prompts""" - prompts = await mcp_engine.get_prompts() - logger.debug("MCP Prompts: %s", prompts) - return {"prompts": list(getattr(mcp_engine, "available_prompts", {}).keys())} +async def mcp_list_prompts(mcp_engine: FastMCP = Depends(get_mcp)) -> list[dict]: + """List MCP Prompts""" + prompts_info = [] + try: + client = Client(mcp_engine) + async with client: + prompts = await client.list_prompts() + logger.debug("MCP Resources: %s", prompts) + for prompts_object in prompts: + prompts_info.append(prompts_object.model_dump()) + finally: + await client.close() + + return prompts_info # @auth.post("/execute", description="Execute an MCP tool", response_model=dict) diff --git a/src/server/api/v1/probes.py b/src/server/api/v1/probes.py index 2eeb50e7..4048328d 100644 --- a/src/server/api/v1/probes.py +++ b/src/server/api/v1/probes.py @@ -13,7 +13,7 @@ def get_mcp(request: Request) -> FastMCP: """Get the MCP engine from the app state""" - return request.app.state.mcp + return request.app.state.fastmcp_app @noauth.get("/liveness") diff --git a/src/server/mcp/__init__.py b/src/server/mcp/__init__.py index ea4a5b85..9538798d 100644 --- a/src/server/mcp/__init__.py +++ b/src/server/mcp/__init__.py @@ -15,7 +15,7 @@ logger = logging_config.logging.getLogger("mcp.__init__.py") -def _discover_and_register( +async def _discover_and_register( package: str, mcp: FastMCP = None, auth: APIRouter = None, @@ -41,9 +41,11 @@ def _discover_and_register( if hasattr(module, "register"): logger.info("Registering via %s.register()", module_info.name) if ".tools." in module.__name__: - module.register(mcp, auth) + await module.register(mcp, auth) if ".proxies." in module.__name__: - module.register(mcp) + await module.register(mcp) + if ".prompts." in module.__name__: + await module.register(mcp) # elif hasattr(module, "register_tool"): # logger.info("Registering tool via %s.register_tool()", module_info.name) # module.register_tool(mcp, auth) @@ -60,16 +62,12 @@ def _discover_and_register( logger.debug("No register function in %s, skipping.", module_info.name) -def register_all_mcp(mcp: FastMCP, auth: APIRouter): +async def register_all_mcp(mcp: FastMCP, auth: APIRouter): """ Auto-discover and register all MCP tools, prompts, resources, and proxies. - - Each module should have one of: - - register_tool(mcp, auth) - - register_prompt(mcp) - - register_resource(mcp) - - register_proxies(mcp, auth) - - register(mcp, auth) # generic """ - _discover_and_register("server.mcp.tools", mcp=mcp, auth=auth) - _discover_and_register("server.mcp.proxies", mcp=mcp) + logger.info("Starting Registering MCP Components") + #await _discover_and_register("server.mcp.tools", mcp=mcp, auth=auth) + await _discover_and_register("server.mcp.proxies", mcp=mcp) + # await _discover_and_register("server.mcp.prompts", mcp=mcp) + logger.info("Finished Registering MCP Components") \ No newline at end of file diff --git a/src/server/mcp/prompts/optimizer.py b/src/server/mcp/prompts/optimizer.py new file mode 100644 index 00000000..52e03226 --- /dev/null +++ b/src/server/mcp/prompts/optimizer.py @@ -0,0 +1,21 @@ +""" +Copyright (c) 2024, 2025, Oracle and/or its affiliates. +Licensed under the Universal Permissive License v1.0 as shown at http://oss.oracle.com/licenses/upl. +""" + +# pylint: disable=unused-argument +# spell-checker:ignore fastmcp +from fastmcp.prompts.prompt import PromptMessage, TextContent + + +# Basic prompt returning a string (converted to user message automatically) +async def register(mcp): + """Register Out-of-Box Prompts""" + optimizer_tags = {"source", "optimizer"} + + @mcp.prompt(name="basic-example-chatbot", tags=optimizer_tags) + def basic_example() -> PromptMessage: + """Basic system prompt for chatbot.""" + + content = "You are a friendly, helpful assistant." + return PromptMessage(role="system", content=TextContent(type="text", text=content)) diff --git a/src/server/mcp/proxies/sqlcl.py b/src/server/mcp/proxies/sqlcl.py index 2d857ce6..d587b4fc 100644 --- a/src/server/mcp/proxies/sqlcl.py +++ b/src/server/mcp/proxies/sqlcl.py @@ -2,8 +2,8 @@ Copyright (c) 2024, 2025, Oracle and/or its affiliates. Licensed under the Universal Permissive License v1.0 as shown at http://oss.oracle.com/licenses/upl. """ +# spell-checker:ignore sqlcl fastmcp connmgr noupdates savepwd -# spell-checker:ignore sqlcl fastmcp import os import shutil import subprocess @@ -14,7 +14,7 @@ logger = logging_config.logging.getLogger("mcp.proxies.sqlcl") -def register(mcp): +async def register(mcp): """Register the SQLcl MCP Server as Local (via Proxy)""" tool_name = "SQLclProxy" sqlcl_binary = shutil.which("sql") @@ -31,8 +31,7 @@ def register(mcp): } } } - - databases = core_databases.get_databases() + databases = core_databases.get_databases(validate=False) for database in databases: # Start sql in no-login mode try: @@ -59,8 +58,9 @@ def register(mcp): logger.error("Failed to create connection store: %s", ex) except Exception as ex: logger.error("Unexpected error creating connection store: %s", ex) + # Create a proxy to the configured server (auto-creates ProxyClient) - mcp_proxy = mcp.as_proxy(config, name=tool_name) - mcp.mount(mcp_proxy) + proxy = mcp.as_proxy(config, name=tool_name) + mcp.mount(proxy) else: logger.warning("Not enabling SQLcl MCP server, sqlcl not found in PATH.") diff --git a/src/server/mcp/tools/say_hello.py b/src/server/mcp/tools/say_hello.py index f77888cb..9305d26c 100644 --- a/src/server/mcp/tools/say_hello.py +++ b/src/server/mcp/tools/say_hello.py @@ -1,4 +1,4 @@ -def register(mcp, auth): +async def register(mcp, auth): @mcp.tool() @auth.get("/hello", operation_id="say_hello") def greet(name: str = "World") -> str: diff --git a/src/server/patches/litellm_patch.py b/src/server/patches/litellm_patch.py index 26cde838..89251046 100644 --- a/src/server/patches/litellm_patch.py +++ b/src/server/patches/litellm_patch.py @@ -40,7 +40,9 @@ def custom_transform_response( api_key: Optional[str] = None, json_mode: Optional[bool] = None, ): - """Custom transform response from .venv/lib/python3.11/site-packages/litellm/llms/ollama/completion/transformation.py""" + """Custom transform response from + .venv/lib/python3.11/site-packages/litellm/llms/ollama/completion/transformation.py + """ logger.info("Custom transform_response is running") response_json = raw_response.json() diff --git a/tests/client/content/config/test_databases.py b/tests/client/content/config/tabs/test_databases.py similarity index 99% rename from tests/client/content/config/test_databases.py rename to tests/client/content/config/tabs/test_databases.py index eafb1a24..f99f7cfd 100644 --- a/tests/client/content/config/test_databases.py +++ b/tests/client/content/config/tabs/test_databases.py @@ -18,7 +18,7 @@ class TestStreamlit: """Test the Streamlit UI""" # Streamlit File - ST_FILE = "../src/client/content/config/databases.py" + ST_FILE = "../src/client/content/config/tabs/databases.py" def test_missing_details(self, app_server, app_test): """Submits with missing required inputs""" diff --git a/tests/client/content/config/test_models.py b/tests/client/content/config/tabs/test_models.py similarity index 95% rename from tests/client/content/config/test_models.py rename to tests/client/content/config/tabs/test_models.py index 0f40f2c3..78be0545 100644 --- a/tests/client/content/config/test_models.py +++ b/tests/client/content/config/tabs/test_models.py @@ -12,7 +12,7 @@ class TestStreamlit: """Test the Streamlit UI""" # Streamlit File - ST_FILE = "../src/client/content/config/models.py" + ST_FILE = "../src/client/content/config/tabs/models.py" def test_model_tables(self, app_server, app_test): """Test that the model tables are setup""" diff --git a/tests/client/content/config/test_oci.py b/tests/client/content/config/tabs/test_oci.py similarity index 99% rename from tests/client/content/config/test_oci.py rename to tests/client/content/config/tabs/test_oci.py index aff3323f..aecf2d5c 100644 --- a/tests/client/content/config/test_oci.py +++ b/tests/client/content/config/tabs/test_oci.py @@ -46,7 +46,7 @@ def _mock_server_get_namespace(): class TestStreamlit: """Test the Streamlit UI""" - ST_FILE = "../src/client/content/config/oci.py" + ST_FILE = "../src/client/content/config/tabs/oci.py" def test_initialise_streamlit_no_env(self, app_server, app_test): """Initialisation of streamlit without any OCI environment""" diff --git a/tests/client/content/config/test_settings.py b/tests/client/content/config/tabs/test_settings.py similarity index 97% rename from tests/client/content/config/test_settings.py rename to tests/client/content/config/tabs/test_settings.py index a22c940b..dbce7da3 100644 --- a/tests/client/content/config/test_settings.py +++ b/tests/client/content/config/tabs/test_settings.py @@ -13,7 +13,7 @@ class TestStreamlit: """Test the Streamlit UI""" # Streamlit File - ST_FILE = "../src/client/content/config/settings.py" + ST_FILE = "../src/client/content/config/tabs/settings.py" def test_settings_display(self, app_server, app_test): """Test that settings are displayed correctly""" @@ -114,7 +114,7 @@ def test_compare_with_uploaded_json(self, app_server, app_test): } # Import the original function to test directly - from client.content.config.settings import compare_settings + from client.content.config.tabs.settings import compare_settings # Call the function directly differences = compare_settings(at.session_state, uploaded_settings) diff --git a/tests/client/content/test_st_footer.py b/tests/client/content/test_st_footer.py index 1e7e1a91..4bc0a790 100644 --- a/tests/client/content/test_st_footer.py +++ b/tests/client/content/test_st_footer.py @@ -5,8 +5,8 @@ # spell-checker: disable # pylint: disable=import-error -from client.utils.st_footer import render_chat_footer, render_models_footer import streamlit.components.v1 as components +from client.utils.st_footer import render_chat_footer ############################################################################# @@ -35,22 +35,22 @@ def mock_html(html, height): # Run the footer rendering render_chat_footer() - def test_models_page_disclaimer(self, app_server, app_test, monkeypatch): - """Verify disclaimer appears on models page""" - assert app_server is not None + # def test_models_page_disclaimer(self, app_server, app_test, monkeypatch): + # """Verify disclaimer appears on models page""" + # assert app_server is not None - # Mock components.html to capture rendered content - def mock_html(html, height): - assert "LLMs can make mistakes. Always verify important information." in html + # # Mock components.html to capture rendered content + # def mock_html(html, height): + # assert "LLMs can make mistakes. Always verify important information." in html - monkeypatch.setattr(components, "html", mock_html) + # monkeypatch.setattr(components, "html", mock_html) - # Initialize app_test and run component - at = app_test(self.ST_FILE) - at = at.run() + # # Initialize app_test and run component + # at = app_test(self.ST_FILE) + # at = at.run() - # Run the models footer rendering - render_models_footer() + # # Run the models footer rendering + # render_models_footer() def test_disclaimer_absence_on_other_pages(self, app_server, app_test, monkeypatch): """Verify disclaimer doesn't appear on non-chat/non-models pages""" diff --git a/tests/client/content/tools/test_prompt_eng.py b/tests/client/content/tools/tabs/test_prompt_eng.py similarity index 97% rename from tests/client/content/tools/test_prompt_eng.py rename to tests/client/content/tools/tabs/test_prompt_eng.py index d8b3929d..b6b8376c 100644 --- a/tests/client/content/tools/test_prompt_eng.py +++ b/tests/client/content/tools/tabs/test_prompt_eng.py @@ -13,7 +13,7 @@ class TestStreamlit: """Test the Streamlit UI""" # Streamlit File - ST_FILE = "../src/client/content/tools/prompt_eng.py" + ST_FILE = "../src/client/content/tools/tabs/prompt_eng.py" def test_change_sys(self, app_server, app_test): """Change the Current System Prompt""" diff --git a/tests/client/content/tools/test_split_embed.py b/tests/client/content/tools/tabs/test_split_embed.py similarity index 97% rename from tests/client/content/tools/test_split_embed.py rename to tests/client/content/tools/tabs/test_split_embed.py index dd96b9b2..2f1af5bc 100644 --- a/tests/client/content/tools/test_split_embed.py +++ b/tests/client/content/tools/tabs/test_split_embed.py @@ -7,7 +7,6 @@ from unittest.mock import patch import pandas as pd -from client.utils.st_common import state_configs_lookup ############################################################################# @@ -17,7 +16,7 @@ class TestStreamlit: """Test the Streamlit UI""" # Streamlit File path - ST_FILE = "../src/client/content/tools/split_embed.py" + ST_FILE = "../src/client/content/tools/tabs/split_embed.py" def test_initialization(self, app_server, app_test, monkeypatch): """Test initialization of the split_embed component""" @@ -324,10 +323,10 @@ def mock_files_data_frame(objects, process=False): data = {"File": objects, "Process": [process] * len(objects)} return pd.DataFrame(data) - monkeypatch.setattr("client.content.tools.split_embed.files_data_frame", mock_files_data_frame) + monkeypatch.setattr("client.content.tools.tabs.split_embed.files_data_frame", mock_files_data_frame) # Mock get_compartments function - monkeypatch.setattr("client.content.tools.split_embed.get_compartments", lambda: mock_compartments) + monkeypatch.setattr("client.content.tools.tabs.split_embed.get_compartments", lambda: mock_compartments) # Initialize app_test at = app_test(self.ST_FILE) diff --git a/tests/conftest.py b/tests/conftest.py index 3897c4a7..264e4450 100644 --- a/tests/conftest.py +++ b/tests/conftest.py @@ -8,6 +8,7 @@ # pylint: disable=import-outside-toplevel import os +import asyncio # This contains all the environment variables we consume on startup (add as required) # Used to clear testing environment from users env; Do before any additional imports @@ -72,7 +73,7 @@ def client(): # Lazy Load from launch_server import create_app - app = create_app() + app = asyncio.run(create_app()) return TestClient(app) From 1ca0c0adf8daa5b79809aea7a84c21e91cf0b8fa Mon Sep 17 00:00:00 2001 From: gotsysdba Date: Tue, 19 Aug 2025 12:06:23 +0100 Subject: [PATCH 10/28] Initial MCP Settings Tab --- src/client/content/config/config.py | 4 +- src/client/content/config/tabs/mcp.py | 171 ++++++++++++++++------- src/client/content/config/tabs/models.py | 2 +- src/client/utils/st_common.py | 5 +- src/server/mcp/__init__.py | 2 +- src/server/mcp/clients/sqlcl.py | 31 ---- src/server/mcp/proxies/sqlcl.py | 5 +- src/server/mcp/tools/say_hello.py | 2 +- 8 files changed, 136 insertions(+), 86 deletions(-) delete mode 100644 src/server/mcp/clients/sqlcl.py diff --git a/src/client/content/config/config.py b/src/client/content/config/config.py index ba954ebe..4c7ccab1 100644 --- a/src/client/content/config/config.py +++ b/src/client/content/config/config.py @@ -11,7 +11,7 @@ from client.content.config.tabs.oci import get_oci, display_oci from client.content.config.tabs.databases import get_databases, display_databases from client.content.config.tabs.models import get_models, display_models -from client.content.config.tabs.mcp import get_mcp_tools, display_mcp +from client.content.config.tabs.mcp import get_mcp, display_mcp def main() -> None: @@ -55,7 +55,7 @@ def main() -> None: display_oci() tab_index += 1 if not state.disabled["mcp_cfg"]: - get_mcp_tools() + get_mcp() with tabs[tab_index]: display_mcp() tab_index += 1 diff --git a/src/client/content/config/tabs/mcp.py b/src/client/content/config/tabs/mcp.py index 7858449c..253097f6 100644 --- a/src/client/content/config/tabs/mcp.py +++ b/src/client/content/config/tabs/mcp.py @@ -2,11 +2,13 @@ Copyright (c) 2024, 2025, Oracle and/or its affiliates. Licensed under the Universal Permissive License v1.0 as shown at http://oss.oracle.com/licenses/upl. """ +# spell-checker:ignore selectbox healthz import streamlit as st from streamlit import session_state as state import client.utils.api_call as api_call +import client.utils.st_common as st_common import common.logging_config as logging_config @@ -26,52 +28,99 @@ def get_mcp_status() -> dict: return {} -def get_mcp_tools(force: bool = False) -> list[dict]: - """Get MCP Tools from API Server""" - if force or "mcp_tools" not in state or not state.mcp_tools: - try: - logger.info("Refreshing state.mcp_tools") - state.mcp_tools = api_call.get(endpoint="v1/mcp/tools") - except api_call.ApiError as ex: - logger.error("Unable to populate state.mcp_tools: %s", ex) - state.mcp_tools = {} - - -# @st.cache_data(show_spinner="Connecting to MCP Backend...", ttl=60) -# def get_server_capabilities(fastapi_base_url): -# """Fetches the lists of tools and resources from the FastAPI backend.""" -# try: -# # Get API key from environment or generate one -# api_key = os.getenv("API_SERVER_KEY") -# headers = {"Authorization": f"Bearer {api_key}"} if api_key else {} - -# # First check if MCP is enabled and initialized -# status_response = requests.get(f"{fastapi_base_url}/v1/mcp/status", headers=headers) -# if status_response.status_code == 200: -# status = status_response.json() -# if not status.get("enabled", False): -# st.warning("MCP is not enabled. Please enable it in the configuration.") -# return {"error": "MCP not enabled"}, {"error": "MCP not enabled"}, {"error": "MCP not enabled"} -# if not status.get("initialized", False): -# st.info("MCP is enabled but not yet initialized. Please select a model first.") -# return {"tools": []}, {"static": [], "dynamic": []}, {"prompts": []} - -# tools_response = requests.get(f"{fastapi_base_url}/v1/mcp/tools", headers=headers) -# tools_response.raise_for_status() -# tools = tools_response.json() - -# resources_response = requests.get(f"{fastapi_base_url}/v1/mcp/resources", headers=headers) -# resources_response.raise_for_status() -# resources = resources_response.json() - -# prompts_response = requests.get(f"{fastapi_base_url}/v1/mcp/prompts", headers=headers) -# prompts_response.raise_for_status() -# prompts = prompts_response.json() - -# return tools, resources, prompts -# except requests.exceptions.RequestException as e: -# st.error(f"Could not connect to the MCP backend at {fastapi_base_url}. Is it running? Error: {e}") -# return {"tools": []}, {"static": [], "dynamic": []}, {"prompts": []} +def get_mcp(force: bool = False) -> list[dict]: + """Get MCP configs from API Server""" + if force or "mcp_configs" not in state or not state.mcp_configs: + logger.info("Refreshing state.mcp_configs") + endpoints = { + "tools": "v1/mcp/tools", + "prompts": "v1/mcp/prompts", + "resources": "v1/mcp/resources", + } + results = {} + + for key, endpoint in endpoints.items(): + try: + results[key] = api_call.get(endpoint=endpoint) + except api_call.ApiError as ex: + logger.error("Unable to get %s: %s", key, ex) + results[key] = {} + + state.mcp_configs = results + + +def extract_servers() -> list: + """Get a list of distinct MCP servers (by prefix)""" + prefixes = set() + + for _, items in state.mcp_configs.items(): + for item in items or []: # handle None safely + name = item.get("name") + if name and "_" in name: + prefix = name.split("_", 1)[0] + prefixes.add(prefix) + + mcp_servers = sorted(prefixes) + + if "optimizer" in mcp_servers: + mcp_servers.remove("optimizer") + mcp_servers.insert(0, "optimizer") + + return mcp_servers + + +@st.dialog(title="Details", width="large") +def mcp_details(mcp_server: str, mcp_type: str, mcp_name: str) -> None: + """MCP Dialog Box""" + st.header(f"{mcp_name} - MCP server: {mcp_server}") + config = next((t for t in state.mcp_configs[mcp_type] if t.get("name") == f"{mcp_server}_{mcp_name}"), None) + if config.get("description"): + st.code(config["description"], wrap_lines=True, height="content") + if config.get("inputSchema"): + st.subheader("inputSchema", divider="red") + properties = config["inputSchema"].get("properties", {}) + required_fields = set(config["inputSchema"].get("required", [])) + for name, prop in properties.items(): + req = '(required)' if name in required_fields else "" + html = f""" +

{name} {req}

+
    +
  • Description: {prop.get("description", "")}
  • +
  • Type: {prop.get("type", "any")}
  • +
  • Default: {prop.get("default", "None")}
  • +
+ """ + st.html(html) + if config.get("outputSchema"): + st.subheader("outputSchema", divider="red") + if config.get("arguments"): + st.subheader("arguments", divider="red") + if config.get("annotations"): + st.subheader("annotations", divider="red") + if config.get("meta"): + st.subheader("meta", divider="red") + + +def render_configs(mcp_server: str, mcp_type: str, configs: list) -> None: + """Render rows of the MCP type""" + data_col_widths = [0.8, 0.2] + table_col_format = st.columns(data_col_widths, vertical_alignment="center") + col1, col2 = table_col_format + col1.markdown("Name", unsafe_allow_html=True) + col2.markdown("​") + for mcp_name in configs: + col1.text_input( + "Name", + value=mcp_name, + label_visibility="collapsed", + disabled=True, + ) + col2.button( + "Details", + on_click=mcp_details, + key=f"{mcp_server}_{mcp_name}_details", + kwargs=dict(mcp_server=mcp_server, mcp_type=mcp_type, mcp_name=mcp_name), + ) ############################################################################# @@ -81,13 +130,39 @@ def display_mcp() -> None: """Streamlit GUI""" st.header("Model Context Protocol", divider="red") try: - get_mcp_tools() + get_mcp() except api_call.ApiError: st.stop() mcp_status = get_mcp_status() if mcp_status.get("status") == "ready": st.write(f"The {mcp_status['name']} is running. Version: {mcp_status['version']}") - st.write(state.mcp_tools) + + selected_mcp_server = st.selectbox( + "MCP Server:", + options=extract_servers(), + # index=list(database_lookup.keys()).index(state.client_settings["database"]["alias"]), + key="selected_mcp_server", + # on_change=st_common.update_client_settings("database"), + ) + if state.mcp_configs["tools"]: + tools_lookup = st_common.state_configs_lookup("mcp_configs", "name", "tools") + mcp_tools = [key.split("_", 1)[1] for key in tools_lookup if key.startswith(f"{selected_mcp_server}_")] + if mcp_tools: + st.subheader("Tools", divider="red") + render_configs(selected_mcp_server, "tools", mcp_tools) + if state.mcp_configs["prompts"]: + prompts_lookup = st_common.state_configs_lookup("mcp_configs", "name", "prompts") + mcp_prompts = [key.split("_", 1)[1] for key in prompts_lookup if key.startswith(f"{selected_mcp_server}_")] + if mcp_prompts: + st.subheader("Prompts", divider="red") + render_configs(selected_mcp_server, "prompts", mcp_prompts) + if state.mcp_configs["resources"]: + st.subheader("Resources", divider="red") + resources_lookup = st_common.state_configs_lookup("mcp_configs", "name", "resources") + mcp_resources = [key.split("_", 1)[1] for key in resources_lookup if key.startswith(f"{selected_mcp_server}_")] + if mcp_resources: + st.subheader("Resources", divider="red") + render_configs(selected_mcp_server, "resources", mcp_resources) if __name__ == "__main__": diff --git a/src/client/content/config/tabs/models.py b/src/client/content/config/tabs/models.py index d2de9f2e..13a25769 100644 --- a/src/client/content/config/tabs/models.py +++ b/src/client/content/config/tabs/models.py @@ -200,7 +200,7 @@ def edit_model(model_type: str, action: Literal["add", "edit"], model_id: str = st.rerun() -def render_model_rows(model_type): +def render_model_rows(model_type: str) -> None: """Render rows of the models""" data_col_widths = [0.07, 0.23, 0.2, 0.28, 0.12] table_col_format = st.columns(data_col_widths, vertical_alignment="center") diff --git a/src/client/utils/st_common.py b/src/client/utils/st_common.py index 460d014c..e17fbb68 100644 --- a/src/client/utils/st_common.py +++ b/src/client/utils/st_common.py @@ -25,6 +25,7 @@ logger = logging_config.logging.getLogger("client.utils.st_common") + ############################################################################# # State Helpers ############################################################################# @@ -34,9 +35,11 @@ def clear_state_key(state_key: str) -> None: logger.debug("State cleared: %s", state_key) -def state_configs_lookup(state_configs_name: str, key: str) -> dict[str, dict[str, Any]]: +def state_configs_lookup(state_configs_name: str, key: str, section: str = None) -> dict[str, dict[str, Any]]: """Convert state. into a lookup based on key""" configs = getattr(state, state_configs_name) + if section: + configs = configs.get(section, []) return {config[key]: config for config in configs if key in config} diff --git a/src/server/mcp/__init__.py b/src/server/mcp/__init__.py index 9538798d..33f21577 100644 --- a/src/server/mcp/__init__.py +++ b/src/server/mcp/__init__.py @@ -67,7 +67,7 @@ async def register_all_mcp(mcp: FastMCP, auth: APIRouter): Auto-discover and register all MCP tools, prompts, resources, and proxies. """ logger.info("Starting Registering MCP Components") - #await _discover_and_register("server.mcp.tools", mcp=mcp, auth=auth) + await _discover_and_register("server.mcp.tools", mcp=mcp, auth=auth) await _discover_and_register("server.mcp.proxies", mcp=mcp) # await _discover_and_register("server.mcp.prompts", mcp=mcp) logger.info("Finished Registering MCP Components") \ No newline at end of file diff --git a/src/server/mcp/clients/sqlcl.py b/src/server/mcp/clients/sqlcl.py deleted file mode 100644 index 9a57f9f3..00000000 --- a/src/server/mcp/clients/sqlcl.py +++ /dev/null @@ -1,31 +0,0 @@ -import asyncio -from fastmcp import Client - -# Your configuration dictionary using multiple servers -config = { - "mcpServers": { - "sqlcl": { - "transport": "stdio", - "command": "sql", - "args": ["-mcp"] - } - } -} - -client = Client(config) - -async def main(): - async with client: - # Test connection - pong = await client.ping() - print("Ping response:", pong) - - # List available tools on the sqlcl server - tools = await client.list_tools() - print("Available tools:", tools) - - # Example: call a tool if you know its name - # result = await client.call_tool("your_tool_name", {"param": "value"}) - # print("Tool result:", result) - -asyncio.run(main()) diff --git a/src/server/mcp/proxies/sqlcl.py b/src/server/mcp/proxies/sqlcl.py index d587b4fc..a8a67848 100644 --- a/src/server/mcp/proxies/sqlcl.py +++ b/src/server/mcp/proxies/sqlcl.py @@ -8,6 +8,8 @@ import shutil import subprocess +from fastmcp.server.proxy import ProxyToolManager + import server.api.core.databases as core_databases import common.logging_config as logging_config @@ -17,6 +19,7 @@ async def register(mcp): """Register the SQLcl MCP Server as Local (via Proxy)""" tool_name = "SQLclProxy" + sqlcl_binary = shutil.which("sql") if sqlcl_binary: env_vars = os.environ.copy() @@ -61,6 +64,6 @@ async def register(mcp): # Create a proxy to the configured server (auto-creates ProxyClient) proxy = mcp.as_proxy(config, name=tool_name) - mcp.mount(proxy) + mcp.mount(proxy, as_proxy=False, prefix="sqlcl") else: logger.warning("Not enabling SQLcl MCP server, sqlcl not found in PATH.") diff --git a/src/server/mcp/tools/say_hello.py b/src/server/mcp/tools/say_hello.py index 9305d26c..b581cd90 100644 --- a/src/server/mcp/tools/say_hello.py +++ b/src/server/mcp/tools/say_hello.py @@ -1,5 +1,5 @@ async def register(mcp, auth): - @mcp.tool() + @mcp.tool(name="optimizer_greet") @auth.get("/hello", operation_id="say_hello") def greet(name: str = "World") -> str: """Say hello to someone.""" From e4d268d3d0239b46b9b497c8d0fc457f1e474bd2 Mon Sep 17 00:00:00 2001 From: gotsysdba Date: Wed, 20 Aug 2025 15:21:58 +0100 Subject: [PATCH 11/28] Update support for models --- src/client/content/config/models.py | 44 +++--- src/common/help_text.py | 8 +- src/common/schema.py | 57 +++---- src/pyproject.toml | 10 ++ src/server/api/core/models.py | 2 +- src/server/api/utils/models.py | 170 ++++++++------------- src/server/api/utils/testbed.py | 6 +- src/server/api/v1/models.py | 17 +-- src/server/bootstrap/models.py | 37 ++--- tests/client/content/config/test_models.py | 2 +- tests/server/test_endpoints_models.py | 25 +-- 11 files changed, 162 insertions(+), 216 deletions(-) diff --git a/src/client/content/config/models.py b/src/client/content/config/models.py index 62882177..c5a0b02f 100644 --- a/src/client/content/config/models.py +++ b/src/client/content/config/models.py @@ -7,7 +7,7 @@ Session States Set: - model_configs: Stores all Model Configurations """ -# spell-checker:ignore selectbox +# spell-checker:ignore selectbox ocigenai import inspect from time import sleep @@ -55,9 +55,9 @@ def get_models(force: bool = False) -> None: @st.cache_data -def get_model_apis(model_type: str = None) -> list: - """Get list of valid APIs; function for Streamlit caching""" - response = api_call.get(endpoint="v1/models/api", params={"model_type": model_type}) +def get_model_providers() -> list: + """Get list of valid Providers; function for Streamlit caching""" + response = api_call.get(endpoint="v1/models/provider") return response @@ -93,7 +93,7 @@ def edit_model(model_type: str, action: Literal["add", "edit"], model_id: str = model_id = urllib.parse.quote(model_id, safe="") model = api_call.get(endpoint=f"v1/models/{model_id}") else: - model = {"id": "unset", "type": model_type, "api": "unset", "status": "CUSTOM"} + model = {"id": "unset", "type": model_type, "provider": "unset", "status": "CUSTOM"} with st.form("edit_model"): if action == "add": model["enabled"] = True # Server will update based on API URL Accessibility @@ -106,22 +106,22 @@ def edit_model(model_type: str, action: Literal["add", "edit"], model_id: str = key="add_model_id", disabled=action == "edit", ) - api_values = get_model_apis(model_type) - api_index = next((i for i, item in enumerate(api_values) if item == model["api"]), None) - disable_for_oci = model["api"] in ["ChatOCIGenAI", "OCIGenAIEmbeddings"] + providers = get_model_providers() + provider_index = next((i for i, item in enumerate(providers) if item == model["provider"]), None) + disable_for_oci = model["provider"] == "ocigenai" model["api"] = st.selectbox( - "API (Required):", - help=help_text.help_dict["model_api"], - placeholder="-- Choose the Model's API --", - index=api_index, - options=api_values, - key="add_model_api", + "Provider (Required):", + help=help_text.help_dict["model_provider"], + placeholder="-- Choose the Model's Provider --", + index=provider_index, + options=providers, + key="add_model_provider", disabled=action == "edit", ) model["url"] = st.text_input( - "API URL:", - help=help_text.help_dict["model_api_url"], - key="add_model_api_url", + "Provider URL:", + help=help_text.help_dict["model_url"], + key="add_model_url", value=model.get("url", ""), disabled=disable_for_oci ) @@ -209,8 +209,8 @@ def render_model_rows(model_type): col1, col2, col3, col4, col5 = table_col_format col1.markdown("​", help="Active", unsafe_allow_html=True) col2.markdown("**Model ID**", unsafe_allow_html=True) - col3.markdown("**API**", unsafe_allow_html=True) - col4.markdown("**API Server**", unsafe_allow_html=True) + col3.markdown("**Provider**", unsafe_allow_html=True) + col4.markdown("**Provider URL**", unsafe_allow_html=True) col5.markdown("​") for model in [m for m in state.model_configs if m.get("type") == model_type]: model_id = model["id"] @@ -228,9 +228,9 @@ def render_model_rows(model_type): disabled=True, ) col3.text_input( - "API", - value=model["api"], - key=f"{model_type}_{model_id}_api", + "Provider", + value=model["provider"], + key=f"{model_type}_{model_id}_provider", label_visibility="collapsed", disabled=True, ) diff --git a/src/common/help_text.py b/src/common/help_text.py index b08b1d41..6bab2a62 100644 --- a/src/common/help_text.py +++ b/src/common/help_text.py @@ -97,11 +97,11 @@ The official name of the model as per the model card. Misconfigured names will result in an error. """, - "model_api": """ - API of the model. If the API of the model is not listed here, try an CompatOpenAI one. - Open an issue if you'd like to request support for an unlisted model. + "model_provider": """ + Provider of the model. If the Provider of the model is not listed here, try openai_compat. + Open an issue if you'd like to request support for an unlisted model provider. """, - "model_api_url": """ + "model_url": """ API URL for accessing the model. """, "model_api_key": """ diff --git a/src/common/schema.py b/src/common/schema.py index b43bbf63..2cd6320b 100644 --- a/src/common/schema.py +++ b/src/common/schema.py @@ -3,6 +3,7 @@ Licensed under the Universal Permissive License v1.0 as shown at http://oss.oracle.com/licenses/upl. """ # spell-checker:ignore ollama hnsw mult ocid testset selectai explainsql showsql vector_search aioptimizer genai +# spell-checker:ignore deepseek groq huggingface mistralai ocigenai vertexai import time from typing import Optional, Literal, Union, get_args, Any @@ -18,22 +19,27 @@ DistanceMetrics = Literal["COSINE", "EUCLIDEAN_DISTANCE", "DOT_PRODUCT"] IndexTypes = Literal["HNSW", "IVF"] -# ModelAPIs -EmbedAPI = Literal[ - "OllamaEmbeddings", - "OCIGenAIEmbeddings", - "CompatOpenAIEmbeddings", - "OpenAIEmbeddings", - "CohereEmbeddings", - "HuggingFaceEndpointEmbeddings", -] -LlAPI = Literal[ - "ChatOllama", - "ChatOCIGenAI", - "CompatOpenAI", - "Perplexity", - "OpenAI", - "Cohere", +# Model Providers +ModelProviders = Literal[ + "oci", + "openai", + "openai_compatible", + "anthropic", + "azure_openai", + "azure_ai", + "google_vertexai", + "google_genai", + "bedrock", + "bedrock_converse", + "cohere", + "mistralai", + "huggingface", + "groq", + "ollama", + "google_anthropic_vertex", + "deepseek", + "xai", + "perplexity", ] @@ -145,24 +151,19 @@ class Model(ModelAccess, LanguageModelParameters, EmbeddingModelParameters): description="OpenAI Compatible Only", ) type: Literal["ll", "embed", "re-rank"] = Field(..., description="Type of Model.") - api: str = Field( - ..., min_length=1, description="API for Model.", examples=["ChatOllama", "OpenAI", "OpenAIEmbeddings"] - ) + provider: str = Field(..., min_length=1, description="Model Provider.", examples=["openai", "anthropic", "ollama"]) openai_compat: bool = Field(default=True, description="Is the API OpenAI compatible?") @model_validator(mode="after") - def check_api_matches_type(self): - """Validate valid API""" - ll_apis = get_args(LlAPI) - embed_apis = get_args(EmbedAPI) + def check_provider(self): + """Validate valid provider""" + providers = get_args(ModelProviders) - if not self.api or self.api == "unset": + if not self.provider or self.provider == "unset": return self - if self.type == "ll" and self.api not in ll_apis: - raise ValueError(f"API '{self.api}' is not valid for type 'll'. Must be one of: {ll_apis}") - if self.type == "embed" and self.api not in embed_apis: - raise ValueError(f"API '{self.api}' is not valid for type 'embed'. Must be one of: {embed_apis}") + if self.provider not in providers: + raise ValueError(f"Provider '{self.provider}' is not valid. Must be one of: {providers}") return self diff --git a/src/pyproject.toml b/src/pyproject.toml index 7650f9da..3b40af3d 100644 --- a/src/pyproject.toml +++ b/src/pyproject.toml @@ -28,11 +28,21 @@ server = [ "fastapi==0.116.1", "faiss-cpu==1.11.0.post1", "giskard==2.17.0", + "langchain-anthropic==0.3.19", + "langchain-azure-ai==0.1.5", + "langchain-aws==0.2.31", "langchain-cohere==0.4.5", "langchain-community==0.3.27", + "langchain-deepseek==0.1.4", + "langchain-google-genai==2.1.9", + "langchain-google-vertexai==2.0.28", + "langchain-groq==0.3.7", "langchain-huggingface==0.3.1", + "langchain-mistralai==0.2.11", "langchain-ollama==0.3.6", "langchain-openai==0.3.29", + "langchain-perplexity==0.1.2", + "langchain-xai==0.2.5", "langgraph==0.6.4", "litellm==1.75.3", "llama-index==0.13.1", diff --git a/src/server/api/core/models.py b/src/server/api/core/models.py index ee0d341b..5f289c56 100644 --- a/src/server/api/core/models.py +++ b/src/server/api/core/models.py @@ -75,7 +75,7 @@ def create_model(model: Model, check_url: bool = True) -> Model: if not model.openai_compat: openai_compat = next( - (model_config.openai_compat for model_config in model_objects if model_config.api == model.api), + (model_config.openai_compat for model_config in model_objects if model_config.provider == model.provider), False, ) model.openai_compat = openai_compat diff --git a/src/server/api/utils/models.py b/src/server/api/utils/models.py index 8c8b90a2..abf7d6fe 100644 --- a/src/server/api/utils/models.py +++ b/src/server/api/utils/models.py @@ -2,15 +2,14 @@ Copyright (c) 2024, 2025, Oracle and/or its affiliates. Licensed under the Universal Permissive License v1.0 as shown at http://oss.oracle.com/licenses/upl. """ -# spell-checker:ignore ollama pplx huggingface genai giskard litellm +# spell-checker:ignore ollama pplx huggingface genai giskard litellm ocigenai from openai import OpenAI from langchain_core.language_models.chat_models import BaseChatModel -from langchain_cohere import ChatCohere, CohereEmbeddings -from langchain_ollama import ChatOllama, OllamaEmbeddings -from langchain_openai import ChatOpenAI, OpenAIEmbeddings -from langchain_huggingface import HuggingFaceEndpointEmbeddings +from langchain.chat_models import init_chat_model +from langchain.embeddings import init_embeddings + from langchain_community.chat_models.oci_generative_ai import ChatOCIGenAI from langchain_community.embeddings.oci_generative_ai import OCIGenAIEmbeddings @@ -50,19 +49,18 @@ def create_genai_models(config: schema.OracleCloudSettings) -> list[schema.Model # Delete previously configured GenAI Models all_models = core_models.get_model() for model in all_models: - if any(x in model.api for x in ("ChatOCIGenAI", "OCIGenAIEmbeddings")): + if model.provider == "oci": core_models.delete_model(model.id) genai_models = [] for model in region_models: model_dict = {} + model_dict["provider"] = "oci" if "CHAT" in model["capabilities"]: model_dict["type"] = "ll" - model_dict["api"] = "ChatOCIGenAI" model_dict["context_length"] = 131072 elif "TEXT_EMBEDDINGS" in model["capabilities"]: model_dict["type"] = "embed" - model_dict["api"] = "OCIGenAIEmbeddings" model_dict["max_chunk_size"] = 8192 else: continue @@ -84,112 +82,64 @@ def create_genai_models(config: schema.OracleCloudSettings) -> list[schema.Model def get_client(model_config: dict, oci_config: schema.OracleCloudSettings, giskard: bool = False) -> BaseChatModel: """Retrieve model configuration""" - - def get_key_value( - models: list[schema.ModelAccess], - model_id: schema.ModelIdType, - model_key: str, - ) -> str: - """Return a models key value of its configuration""" - for model in models: - if model.id == model_id: - return getattr(model, model_key, None) + logger.debug("Model Client: %s; OCI Config: %s; Giskard: %s", model_config, oci_config, giskard) + try: + defined_model = core_models.get_model( + model_id=model_config["model"], + include_disabled=False, + ).model_dump() + except core_models.UnknownModelError: return None - logger.debug("Model Config: %s; OCI Config: %s; Giskard: %s", model_config, oci_config, giskard) - all_models = core_models.get_model() - - model_id = model_config["model"] - model_api = get_key_value(all_models, model_id, "api") - model_api_key = get_key_value(all_models, model_id, "api_key") - model_url = get_key_value(all_models, model_id, "url") - - # Determine if configuring an embedding model - try: - embedding = model_config["enabled"] - except (AttributeError, KeyError): - embedding = False - - # schema.Model Classes - model_classes = {} - if not embedding: - logger.debug("Configuring LL Model") - ll_common_params = {} - for key in [ - "temperature", - "top_p", - "frequency_penalty", - "presence_penalty", - "max_completion_tokens", - "streaming", - ]: - try: - logger.debug("--> Setting: %s; was sent %s", key, model_config[key]) - ll_common_params[key] = model_config[key] or get_key_value(all_models, model_id, key) - except KeyError: - # Mainly for embeddings - continue - logger.debug("LL Model Parameters: %s", ll_common_params) - model_classes = { - "OpenAI": lambda: ChatOpenAI(model=model_id, api_key=model_api_key, **ll_common_params), - "CompatOpenAI": lambda: ChatOpenAI( - model=model_id, base_url=model_url, api_key=model_api_key or "api_compat", **ll_common_params - ), - "Cohere": lambda: ChatCohere(model=model_id, cohere_api_key=model_api_key, **ll_common_params), - "ChatOllama": lambda: ChatOllama( - model=model_id, - base_url=model_url, - **ll_common_params, - num_predict=ll_common_params["max_completion_tokens"], - ), - "Perplexity": lambda: ChatOpenAI( - model=model_id, base_url=model_url, api_key=model_api_key, **ll_common_params - ), - "ChatOCIGenAI": lambda oci_cfg=oci_config: ChatOCIGenAI( - model_id=model_id, - client=util_oci.init_genai_client(oci_cfg), - compartment_id=oci_cfg.genai_compartment_id, + full_model_config = {**defined_model, **{k: v for k, v in model_config.items() if v is not None}} + client = None + provider = full_model_config["provider"] + if full_model_config["type"] == "ll" and not giskard: + common_params = { + k: full_model_config.get(k) for k in ["frequency_penalty", "presence_penalty", "top_p", "streaming"] + } + if provider != "oci": + client = init_chat_model( + model_provider="openai" if provider == "openai_compatible" else provider, + model=full_model_config["id"], + base_url=full_model_config["url"], + api_key=full_model_config["api_key"] or "not_required", + temperature=full_model_config["temperature"], + max_tokens=full_model_config["max_completion_tokens"], + **common_params, + ) + else: + client = ChatOCIGenAI( + model_id=full_model_config["id"], + client=util_oci.init_genai_client(oci_config), + compartment_id=oci_config.genai_compartment_id, model_kwargs={ (k if k != "max_completion_tokens" else "max_tokens"): v - for k, v in ll_common_params.items() + for k, v in common_params.items() if k not in {"streaming"} }, - ), - } - if embedding: - logger.debug("Configuring Embed Model") - model_classes = { - "OpenAIEmbeddings": lambda: OpenAIEmbeddings(model=model_id, api_key=model_api_key), - "CompatOpenAIEmbeddings": lambda: OpenAIEmbeddings( - model=model_id, - base_url=model_url, - api_key=model_api_key or "api_compat", - check_embedding_ctx_length=False, - ), - "CohereEmbeddings": lambda: CohereEmbeddings(model=model_id, cohere_api_key=model_api_key), - "OllamaEmbeddings": lambda: OllamaEmbeddings(model=model_id, base_url=model_url), - "HuggingFaceEndpointEmbeddings": lambda: HuggingFaceEndpointEmbeddings(model=model_url), - "OCIGenAIEmbeddings": lambda oci_cfg=oci_config: OCIGenAIEmbeddings( - model_id=model_id, - client=util_oci.init_genai_client(oci_cfg), - compartment=oci_cfg.compartment, - ), - } - - try: - if giskard: - logger.debug("Creating Giskard Client for %s in %s", model_api, model_classes) - giskard_key = model_api_key or "giskard" - if giskard_key == "giskard" and model_api == "CompatOpenAI": - _client = OpenAI(api_key=giskard_key, base_url=f"{model_url}") - else: - _client = OpenAI(api_key=giskard_key, base_url=f"{model_url}/v1") - client = OpenAIClient(model=model_id, client=_client) + ) + + if full_model_config["type"] == "embed" and not giskard: + if provider != "oci": + client = init_embeddings( + provider="openai" if provider == "openai_compatible" else provider, + model=full_model_config["id"], + base_url=full_model_config["url"], + api_key=full_model_config["api_key"] or "not_required", + ) else: - logger.debug("Searching for %s in %s", model_api, model_classes) - client = model_classes[model_api]() - logger.debug("Model Client: %s", client) - return client - except (UnboundLocalError, KeyError): - logger.error("Unable to find client; expect trouble!") - return None + client = OCIGenAIEmbeddings( + model_id=full_model_config["id"], + client=util_oci.init_genai_client(oci_config), + compartment_id=oci_config.genai_compartment_id, + ) + + if giskard: + logger.debug("Creating Giskard Client") + giskard_key = full_model_config["api_key"] or "giskard" + _client = OpenAI(api_key=giskard_key, base_url=full_model_config["url"]) + client = OpenAIClient(model=full_model_config["id"], client=_client) + + logger.debug("Configured Client: %s", vars(client)) + return client diff --git a/src/server/api/utils/testbed.py b/src/server/api/utils/testbed.py index 41341216..539da87c 100644 --- a/src/server/api/utils/testbed.py +++ b/src/server/api/utils/testbed.py @@ -242,18 +242,18 @@ def build_knowledge_base( def configure_and_set_model(client_model): """Configure and set Model for TestSet Generation (uses litellm)""" model_id, disable_structured_output, params = None, False, None - if client_model.api in ("CompatOpenAI", "CompatOpenAIEmbeddings"): + if client_model.provider == "openai_compatible": model_id, params = ( f"openai/{client_model.id}", {"api_base": client_model.url, "api_key": client_model.api_key or "api_compat"}, ) - elif client_model.api in ("ChatOllama", "OllamaEmbeddings"): + elif client_model.provider == "ollama": model_id, disable_structured_output, params = ( f"ollama/{client_model.id}", True, {"api_base": client_model.url}, ) - elif client_model.api == "Perplexity": + elif client_model.provider == "perplexity": model_id, params = f"perplexity/{client_model.id}", {"api_key": client_model.api_key} else: model_id, params = f"openai/{client_model.id}", {"api_key": client_model.api_key} diff --git a/src/server/api/v1/models.py b/src/server/api/v1/models.py index d7104ebe..fa6fd77f 100644 --- a/src/server/api/v1/models.py +++ b/src/server/api/v1/models.py @@ -20,21 +20,14 @@ @auth.get( - "/api", - description="Get support model APIs", + "/provider", + description="Get support model providers", response_model=list, ) -async def models_list_api( - model_type: Optional[schema.ModelTypeType] = Query(None), -) -> list[schema.Model]: +async def models_list_provider() -> list[schema.Model]: """List all models APIs after applying filters if specified""" - logger.debug("Received models_list_api - type: %s", model_type) - if model_type == "ll": - return list(get_args(schema.LlAPI)) - elif model_type == "embed": - return list(get_args(schema.EmbedAPI)) - else: - return list() + logger.debug("Received models_list_provider") + return list(get_args(schema.ModelProviders)) @auth.get( diff --git a/src/server/bootstrap/models.py b/src/server/bootstrap/models.py index 96dfc7b1..e135b66e 100644 --- a/src/server/bootstrap/models.py +++ b/src/server/bootstrap/models.py @@ -6,6 +6,7 @@ added via the APIs """ # spell-checker:ignore configfile genai ollama pplx docos mxbai nomic thenlper +# spell-checker:ignore huggingface import os @@ -26,7 +27,7 @@ def main() -> list[Model]: "id": "command-r", "enabled": os.getenv("COHERE_API_KEY") is not None, "type": "ll", - "api": "Cohere", + "provider": "cohere", "api_key": os.environ.get("COHERE_API_KEY", default=""), "openai_compat": False, "url": "https://api.cohere.ai", @@ -39,10 +40,10 @@ def main() -> list[Model]: "id": "gpt-4o-mini", "enabled": os.getenv("OPENAI_API_KEY") is not None, "type": "ll", - "api": "OpenAI", + "provider": "openai", "api_key": os.environ.get("OPENAI_API_KEY", default=""), "openai_compat": True, - "url": "https://api.openai.com", + "url": "https://api.openai.com/v1", "context_length": 127072, "temperature": 1.0, "max_completion_tokens": 4096, @@ -52,7 +53,7 @@ def main() -> list[Model]: "id": "sonar", "enabled": os.getenv("PPLX_API_KEY") is not None, "type": "ll", - "api": "Perplexity", + "provider": "perplexity", "api_key": os.environ.get("PPLX_API_KEY", default=""), "openai_compat": True, "url": "https://api.perplexity.ai", @@ -65,7 +66,7 @@ def main() -> list[Model]: "id": "phi-4", "enabled": False, "type": "ll", - "api": "CompatOpenAI", + "provider": "openai_compatible", "api_key": "", "openai_compat": True, "url": "http://localhost:1234/v1", @@ -78,7 +79,7 @@ def main() -> list[Model]: "id": "gpt-oss:20b", "enabled": os.getenv("ON_PREM_OLLAMA_URL") is not None, "type": "ll", - "api": "ChatOllama", + "provider": "ollama", "api_key": "", "openai_compat": True, "url": os.environ.get("ON_PREM_OLLAMA_URL", default="http://127.0.0.1:11434"), @@ -92,7 +93,7 @@ def main() -> list[Model]: "id": "llama3.1", "enabled": os.getenv("ON_PREM_OLLAMA_URL") is not None, "type": "ll", - "api": "ChatOllama", + "provider": "ollama", "api_key": "", "openai_compat": True, "url": os.environ.get("ON_PREM_OLLAMA_URL", default="http://127.0.0.1:11434"), @@ -105,7 +106,7 @@ def main() -> list[Model]: "id": "thenlper/gte-base", "enabled": os.getenv("ON_PREM_HF_URL") is not None, "type": "embed", - "api": "HuggingFaceEndpointEmbeddings", + "provider": "huggingface", "url": os.environ.get("ON_PREM_HF_URL", default="http://127.0.0.1:8080"), "api_key": "", "openai_compat": True, @@ -115,8 +116,8 @@ def main() -> list[Model]: "id": "text-embedding-3-small", "enabled": os.getenv("OPENAI_API_KEY") is not None, "type": "embed", - "api": "OpenAIEmbeddings", - "url": "https://api.openai.com", + "provider": "openai_compatible", + "url": "https://api.openai.com/v1", "api_key": os.environ.get("OPENAI_API_KEY", default=""), "openai_compat": True, "max_chunk_size": 8191, @@ -125,7 +126,7 @@ def main() -> list[Model]: "id": "embed-english-light-v3.0", "enabled": os.getenv("COHERE_API_KEY") is not None, "type": "embed", - "api": "CohereEmbeddings", + "provider": "cohere", "url": "https://api.cohere.ai", "api_key": os.environ.get("COHERE_API_KEY", default=""), "openai_compat": False, @@ -135,7 +136,7 @@ def main() -> list[Model]: "id": "text-embedding-nomic-embed-text-v1.5", "enabled": False, "type": "embed", - "api": "CompatOpenAIEmbeddings", + "provider": "openai_compatible", "url": "http://localhost:1234/v1", "api_key": "", "openai_compat": True, @@ -146,7 +147,7 @@ def main() -> list[Model]: "id": "mxbai-embed-large", "enabled": os.getenv("ON_PREM_OLLAMA_URL") is not None, "type": "embed", - "api": "OllamaEmbeddings", + "provider": "ollama", "url": os.environ.get("ON_PREM_OLLAMA_URL", default="http://127.0.0.1:11434"), "api_key": "", "openai_compat": True, @@ -200,11 +201,11 @@ def values_differ(a, b): # Override with OS env vars (by API type) for model in models_list: - api = model.get("api", "") + provider = model.get("provider", "") model_id = model.get("id", "") overridden = False - if api == "Cohere" and os.getenv("COHERE_API_KEY"): + if provider == "cohere" and os.getenv("COHERE_API_KEY"): old_api_key = model.get("api_key", "") new_api_key = os.environ["COHERE_API_KEY"] if old_api_key != new_api_key: @@ -214,7 +215,7 @@ def values_differ(a, b): overridden = True model["enabled"] = True - elif api == "ChatOCIGenAI" and os.getenv("OCI_GENAI_SERVICE_ENDPOINT"): + elif provider == "oci" and os.getenv("OCI_GENAI_SERVICE_ENDPOINT"): old_url = model.get("url", "") new_url = os.environ["OCI_GENAI_SERVICE_ENDPOINT"] if old_url != new_url: @@ -225,7 +226,7 @@ def values_differ(a, b): overridden = True model["enabled"] = True - elif api == "ChatOllama" and os.getenv("ON_PREM_OLLAMA_URL"): + elif provider == "ollama" and os.getenv("ON_PREM_OLLAMA_URL"): old_url = model.get("url", "") new_url = os.environ["ON_PREM_OLLAMA_URL"] if old_url != new_url: @@ -234,7 +235,7 @@ def values_differ(a, b): overridden = True model["enabled"] = True - elif api == "HuggingFaceEndpointEmbeddings" and os.getenv("ON_PREM_HF_URL"): + elif provider == "huggingface" and os.getenv("ON_PREM_HF_URL"): old_url = model.get("url", "") new_url = os.environ["ON_PREM_HF_URL"] if old_url != new_url: diff --git a/tests/client/content/config/test_models.py b/tests/client/content/config/test_models.py index 0f40f2c3..22496099 100644 --- a/tests/client/content/config/test_models.py +++ b/tests/client/content/config/test_models.py @@ -21,7 +21,7 @@ def test_model_tables(self, app_server, app_test): assert at.session_state.model_configs is not None for model in at.session_state.model_configs: assert at.text_input(key=f"{model['type']}_{model['id']}_enabled").value == "⚪" - assert at.text_input(key=f"{model['type']}_{model['id']}_api").value == model["api"] + assert at.text_input(key=f"{model['type']}_{model['id']}_provider").value == model["provider"] assert at.text_input(key=f"{model['type']}_{model['id']}_server").value == model["url"] assert at.button(key=f"{model['type']}_{model['id']}_edit") is not None diff --git a/tests/server/test_endpoints_models.py b/tests/server/test_endpoints_models.py index a2b2703a..8bc12ade 100644 --- a/tests/server/test_endpoints_models.py +++ b/tests/server/test_endpoints_models.py @@ -6,7 +6,7 @@ from typing import get_args import pytest -from common.schema import LlAPI, EmbedAPI +from common.schema import ModelProviders ############################################################################# @@ -47,20 +47,11 @@ def test_endpoints(self, client, auth_headers, endpoint, api_method, auth_type, class TestEndpoints: """Test Endpoints""" - @pytest.mark.parametrize( - "model_type,models_list", - [ - pytest.param("ll", list(get_args(LlAPI)), id="list_ll_models"), - pytest.param("embed", list(get_args(EmbedAPI)), id="list_embed_models"), - pytest.param(None, [], id="list_no_models"), - ], - ) - def test_models_list_api(self, client, auth_headers, model_type, models_list): + def test_models_list_api(self, client, auth_headers): """Get a list of model APIs to use with tests""" - params = {"model_type": model_type} if model_type else {} - response = client.get("/v1/models/api", headers=auth_headers["valid_auth"], params=params) + response = client.get("/v1/models/provider", headers=auth_headers["valid_auth"]) assert response.status_code == 200 - assert sorted(response.json()) == sorted(models_list) + assert sorted(response.json()) == sorted(list(get_args(ModelProviders))) def test_models_get_before(self, client, auth_headers): """Retrieve each individual model""" @@ -114,10 +105,10 @@ def test_models_add_dupl(self, client, auth_headers): "id": "valid_ll_model", "enabled": True, "type": "ll", - "api": "OpenAI", + "provider": "openai", "api_key": "test-key", "openai_compat": True, - "url": "https://api.openai.com", + "url": "https://api.openai.com/v1", "context_length": 127072, "temperature": 1.0, "max_completion_tokens": 4096, @@ -141,7 +132,7 @@ def test_models_add_dupl(self, client, auth_headers): "id": "test_embed_model", "enabled": False, "type": "embed", - "api": "HuggingFaceEndpointEmbeddings", + "provider": "huggingface", "url": "http://127.0.0.1:8080", "api_key": "", "openai_compat": True, @@ -156,7 +147,7 @@ def test_models_add_dupl(self, client, auth_headers): "id": "unreachable_url_model", "enabled": True, "type": "embed", - "api": "HuggingFaceEndpointEmbeddings", + "provider": "huggingface", "url": "http://127.0.0.1:112233", "api_key": "", "openai_compat": True, From 6378459924500b3f6cc670698ef26c13a12771ae Mon Sep 17 00:00:00 2001 From: gotsysdba Date: Wed, 20 Aug 2025 15:31:01 +0100 Subject: [PATCH 12/28] Update api to provider --- src/client/content/config/models.py | 2 +- src/client/content/config/settings.py | 8 +- src/client/mcp/rag/optimizer_utils/config.py | 90 +++++++++----------- 3 files changed, 46 insertions(+), 54 deletions(-) diff --git a/src/client/content/config/models.py b/src/client/content/config/models.py index c5a0b02f..01c91129 100644 --- a/src/client/content/config/models.py +++ b/src/client/content/config/models.py @@ -109,7 +109,7 @@ def edit_model(model_type: str, action: Literal["add", "edit"], model_id: str = providers = get_model_providers() provider_index = next((i for i, item in enumerate(providers) if item == model["provider"]), None) disable_for_oci = model["provider"] == "ocigenai" - model["api"] = st.selectbox( + model["provider"] = st.selectbox( "Provider (Required):", help=help_text.help_dict["model_provider"], placeholder="-- Choose the Model's Provider --", diff --git a/src/client/content/config/settings.py b/src/client/content/config/settings.py index d4402f05..538ab718 100644 --- a/src/client/content/config/settings.py +++ b/src/client/content/config/settings.py @@ -144,12 +144,12 @@ def spring_ai_conf_check(ll_model: dict, embed_model: dict) -> str: if not ll_model or not embed_model: return "hybrid" - ll_api = ll_model["api"] - embed_api = embed_model["api"] + ll_provider = ll_model["provider"] + embed_provider = embed_model["provider"] - if "OpenAI" in ll_api and "OpenAI" in embed_api: + if "openai" in ll_provider and "openai" in ll_provider: return "openai" - elif ll_api == "ChatOllama" and "Ollama" in embed_api: + elif ll_provider == "ollama" and "ollama" in embed_provider: return "ollama" return "hybrid" diff --git a/src/client/mcp/rag/optimizer_utils/config.py b/src/client/mcp/rag/optimizer_utils/config.py index 223fe6bf..ea6ca93b 100644 --- a/src/client/mcp/rag/optimizer_utils/config.py +++ b/src/client/mcp/rag/optimizer_utils/config.py @@ -16,73 +16,65 @@ import oracledb import logging + logging.basicConfig(level=logging.INFO) + def get_llm(data): logging.info("llm data:") logging.info(data["user_settings"]["ll_model"]["model"]) - llm={} + llm = {} llm_config = data["ll_model_config"][data["user_settings"]["ll_model"]["model"]] - api=llm_config["api"] - url=llm_config["url"] - api_key=llm_config["api_key"] - model=data["user_settings"]["ll_model"]["model"] - logging.info(f"CHAT_MODEL: {model} {api} {url} {api_key}") - if api == "ChatOllama": + provider = llm_config["provider"] + url = llm_config["url"] + api_key = llm_config["api_key"] + model = data["user_settings"]["ll_model"]["model"] + logging.info(f"CHAT_MODEL: {model} {provider} {url} {api_key}") + if provider == "ollama": # Initialize the LLM - llm = OllamaLLM( - model=model, - base_url=url - ) - elif api == "OpenAI": - - llm=llm = ChatOpenAI( - model=model, - api_key=api_key - ) + llm = OllamaLLM(model=model, base_url=url) + elif provider == "openai": + llm = llm = ChatOpenAI(model=model, api_key=api_key) return llm + def get_embeddings(data): - embeddings={} - model=data["user_settings"]["vector_search"]["model"] - api=data["embed_model_config"][model]["api"] - url=data["embed_model_config"][model]["url"] - api_key=data["embed_model_config"][model]["api_key"] - logging.info(f"EMBEDDINGS: {model} {api} {url} {api_key}") embeddings = {} - if api=="OllamaEmbeddings": - embeddings=OllamaEmbeddings( - model=model, - base_url=url) - elif api == "OpenAIEmbeddings": - logging.info("BEFORE create embbedding") - embeddings = OpenAIEmbeddings( - model=model, - api_key=api_key - ) - logging.info("AFTER create emebdding") + model = data["user_settings"]["vector_search"]["model"] + provider = data["embed_model_config"][model]["provider"] + url = data["embed_model_config"][model]["url"] + api_key = data["embed_model_config"][model]["api_key"] + logging.info(f"EMBEDDINGS: {model} {provider} {url} {api_key}") + embeddings = {} + if provider == "ollama": + embeddings = OllamaEmbeddings(model=model, base_url=url) + elif provider == "openai": + logging.info("BEFORE create embbedding") + embeddings = OpenAIEmbeddings(model=model, api_key=api_key) + logging.info("AFTER create emebdding") return embeddings -def get_vectorstore(data,embeddings): - - config=data["database_config"][data["user_settings"]["database"]["alias"]] + +def get_vectorstore(data, embeddings): + config = data["database_config"][data["user_settings"]["database"]["alias"]] logging.info(config) - conn23c = oracledb.connect(user=config["user"], - password=config["password"], dsn=config["dsn"]) - + conn23c = oracledb.connect(user=config["user"], password=config["password"], dsn=config["dsn"]) + logging.info("DB Connection successful!") - metric=data["user_settings"]["vector_search"]["distance_metric"] - - dist_strategy=DistanceStrategy.COSINE - if metric=="COSINE": - dist_strategy=DistanceStrategy.COSINE + metric = data["user_settings"]["vector_search"]["distance_metric"] + + dist_strategy = DistanceStrategy.COSINE + if metric == "COSINE": + dist_strategy = DistanceStrategy.COSINE elif metric == "EUCLIDEAN": - dist_strategy=DistanceStrategy.EUCLIDEAN - - a=data["user_settings"]["vector_search"]["vector_store"] + dist_strategy = DistanceStrategy.EUCLIDEAN + + a = data["user_settings"]["vector_search"]["vector_store"] logging.info(f"{a}") logging.info(f"BEFORE KNOWLEDGE BASE") logging.info(embeddings) - knowledge_base = OracleVS(conn23c, embeddings, data["user_settings"]["vector_search"]["vector_store"], dist_strategy) + knowledge_base = OracleVS( + conn23c, embeddings, data["user_settings"]["vector_search"]["vector_store"], dist_strategy + ) return knowledge_base From 4fac5a6d3f2fd4b04b3bd7f42c2177dccb4022a8 Mon Sep 17 00:00:00 2001 From: gotsysdba Date: Wed, 20 Aug 2025 15:41:02 +0100 Subject: [PATCH 13/28] Update Client --- src/client/content/config/config.py | 12 +++++++----- src/client/content/config/tabs/databases.py | 2 +- src/client/content/config/tabs/models.py | 2 +- src/client/content/testbed.py | 2 +- src/client/content/tools/tabs/split_embed.py | 2 +- 5 files changed, 11 insertions(+), 9 deletions(-) diff --git a/src/client/content/config/config.py b/src/client/content/config/config.py index 4c7ccab1..98a1a8f1 100644 --- a/src/client/content/config/config.py +++ b/src/client/content/config/config.py @@ -16,6 +16,13 @@ def main() -> None: """Streamlit GUI""" + # Ensure all our configs exist + get_settings() + get_databases() + get_models() + get_oci() + get_mcp() + tabs_list = [] if not state.disabled["settings"]: tabs_list.append("💾 Settings") @@ -35,27 +42,22 @@ def main() -> None: # Map tab objects to content conditionally if not state.disabled["settings"]: - get_settings() with tabs[tab_index]: display_settings() tab_index += 1 if not state.disabled["db_cfg"]: - get_databases() with tabs[tab_index]: display_databases() tab_index += 1 if not state.disabled["model_cfg"]: - get_models() with tabs[tab_index]: display_models() tab_index += 1 if not state.disabled["oci_cfg"]: - get_oci() with tabs[tab_index]: display_oci() tab_index += 1 if not state.disabled["mcp_cfg"]: - get_mcp() with tabs[tab_index]: display_mcp() tab_index += 1 diff --git a/src/client/content/config/tabs/databases.py b/src/client/content/config/tabs/databases.py index 93acdc1a..2ce5ce70 100644 --- a/src/client/content/config/tabs/databases.py +++ b/src/client/content/config/tabs/databases.py @@ -73,7 +73,7 @@ def select_ai_profile() -> None: selectai_df.clear() -@st.cache_data +@st.cache_data(show_spinner="Retrieving SelectAI Objects") def selectai_df(profile): """Get SelectAI Object List and produce Dataframe""" logger.info("Retrieving objects from SelectAI Profile: %s", profile) diff --git a/src/client/content/config/tabs/models.py b/src/client/content/config/tabs/models.py index 13a25769..24a001be 100644 --- a/src/client/content/config/tabs/models.py +++ b/src/client/content/config/tabs/models.py @@ -52,7 +52,7 @@ def get_models(force: bool = False) -> None: state.model_configs = {} -@st.cache_data +@st.cache_data(show_spinner="Retrieving Model APIs") def get_model_apis(model_type: str = None) -> list: """Get list of valid APIs; function for Streamlit caching""" response = api_call.get(endpoint="v1/models/api", params={"model_type": model_type}) diff --git a/src/client/content/testbed.py b/src/client/content/testbed.py index bb9fe54a..8e0c5d66 100644 --- a/src/client/content/testbed.py +++ b/src/client/content/testbed.py @@ -129,7 +129,7 @@ def create_gauge(value): # download_file("Download Report", report["html_report"], "evaluation_report.html", "text/html") #CDB -@st.cache_data +@st.cache_data(show_spinner="Retrieving TestSets") def get_testbed_db_testsets() -> dict: """Get Database TestSets; this is cached""" return api_call.get(endpoint="v1/testbed/testsets") diff --git a/src/client/content/tools/tabs/split_embed.py b/src/client/content/tools/tabs/split_embed.py index 264f5d8b..cfa9a47e 100644 --- a/src/client/content/tools/tabs/split_embed.py +++ b/src/client/content/tools/tabs/split_embed.py @@ -32,7 +32,7 @@ ##################################################### # Functions ##################################################### -@st.cache_data +@st.cache_data(show_spinner="Retrieving OCI Compartments") def get_compartments() -> dict: """Get OCI Compartments; function for Streamlit caching""" response = api_call.get(endpoint=f"v1/oci/compartments/{state.client_settings['oci']['auth_profile']}") From 4d6e311aa40b7ea463afe5be6d273e36e588b270 Mon Sep 17 00:00:00 2001 From: gotsysdba Date: Wed, 20 Aug 2025 16:13:14 +0100 Subject: [PATCH 14/28] GUI ReOrg --- src/client/content/api_server.py | 3 - src/client/content/chatbot.py | 2 +- src/client/content/config/config.py | 59 ++++++++++++ .../content/config/{ => tabs}/databases.py | 15 ++-- .../content/config/{ => tabs}/models.py | 22 ++--- src/client/content/config/{ => tabs}/oci.py | 11 +-- .../content/config/{ => tabs}/settings.py | 90 +++++++++++-------- src/client/content/testbed.py | 9 +- .../content/tools/{ => tabs}/prompt_eng.py | 12 +-- .../content/tools/{ => tabs}/split_embed.py | 21 ++--- src/client/content/tools/tools.py | 31 +++++++ src/launch_client.py | 40 +++------ 12 files changed, 187 insertions(+), 128 deletions(-) create mode 100644 src/client/content/config/config.py rename src/client/content/config/{ => tabs}/databases.py (97%) rename src/client/content/config/{ => tabs}/models.py (96%) rename src/client/content/config/{ => tabs}/oci.py (97%) rename src/client/content/config/{ => tabs}/settings.py (85%) rename src/client/content/tools/{ => tabs}/prompt_eng.py (94%) rename src/client/content/tools/{ => tabs}/split_embed.py (97%) create mode 100644 src/client/content/tools/tools.py diff --git a/src/client/content/api_server.py b/src/client/content/api_server.py index 16de7c38..8b7d720c 100644 --- a/src/client/content/api_server.py +++ b/src/client/content/api_server.py @@ -16,7 +16,6 @@ import client.utils.client as client import client.utils.api_call as api_call -from client.utils.st_footer import remove_footer import common.logging_config as logging_config logger = logging_config.logging.getLogger("client.content.api_server") @@ -64,8 +63,6 @@ def server_restart() -> None: ##################################################### async def main() -> None: """Streamlit GUI""" - - remove_footer() st.header("API Server") st.write("Access with your own client.") left, right = st.columns([0.2, 0.8]) diff --git a/src/client/content/chatbot.py b/src/client/content/chatbot.py index d8382ecc..ec276133 100644 --- a/src/client/content/chatbot.py +++ b/src/client/content/chatbot.py @@ -15,7 +15,7 @@ import streamlit as st from streamlit import session_state as state -from client.content.config.models import get_models +from client.content.config.tabs.models import get_models import client.utils.st_common as st_common import client.utils.api_call as api_call diff --git a/src/client/content/config/config.py b/src/client/content/config/config.py new file mode 100644 index 00000000..84a664ba --- /dev/null +++ b/src/client/content/config/config.py @@ -0,0 +1,59 @@ +""" +Copyright (c) 2024, 2025, Oracle and/or its affiliates. +Licensed under the Universal Permissive License v1.0 as shown at http://oss.oracle.com/licenses/upl. +""" + +import inspect +import streamlit as st +from streamlit import session_state as state + +from client.content.config.tabs.settings import get_settings, display_settings +from client.content.config.tabs.oci import get_oci, display_oci +from client.content.config.tabs.databases import get_databases, display_databases +from client.content.config.tabs.models import get_models, display_models + + +def main() -> None: + """Streamlit GUI""" + # Ensure all our configs exist + get_settings() + get_databases() + get_models() + get_oci() + + tabs_list = [] + if not state.disabled["settings"]: + tabs_list.append("💾 Settings") + if not state.disabled["db_cfg"]: + tabs_list.append("🗄️ Databases") + if not state.disabled["model_cfg"]: + tabs_list.append("🤖 Models") + if not state.disabled["oci_cfg"]: + tabs_list.append("☁️ OCI") + + # Only create tabs if there is at least one + tab_index = 0 + if tabs_list: + tabs = st.tabs(tabs_list) + + # Map tab objects to content conditionally + if not state.disabled["settings"]: + with tabs[tab_index]: + display_settings() + tab_index += 1 + if not state.disabled["db_cfg"]: + with tabs[tab_index]: + display_databases() + tab_index += 1 + if not state.disabled["model_cfg"]: + with tabs[tab_index]: + display_models() + tab_index += 1 + if not state.disabled["oci_cfg"]: + with tabs[tab_index]: + display_oci() + tab_index += 1 + + +if __name__ == "__main__" or "page.py" in inspect.stack()[1].filename: + main() diff --git a/src/client/content/config/databases.py b/src/client/content/config/tabs/databases.py similarity index 97% rename from src/client/content/config/databases.py rename to src/client/content/config/tabs/databases.py index 67fa882c..2ce5ce70 100644 --- a/src/client/content/config/databases.py +++ b/src/client/content/config/tabs/databases.py @@ -7,7 +7,6 @@ """ # spell-checker:ignore streamlit, selectbox, selectai -import inspect import json import pandas as pd @@ -16,10 +15,10 @@ import client.utils.api_call as api_call import client.utils.st_common as st_common + import common.logging_config as logging_config -from client.utils.st_footer import remove_footer -logger = logging_config.logging.getLogger("client.content.config.database") +logger = logging_config.logging.getLogger("client.content.config.tabs.database") ##################################################### @@ -74,7 +73,7 @@ def select_ai_profile() -> None: selectai_df.clear() -@st.cache_data +@st.cache_data(show_spinner="Retrieving SelectAI Objects") def selectai_df(profile): """Get SelectAI Object List and produce Dataframe""" logger.info("Retrieving objects from SelectAI Profile: %s", profile) @@ -106,16 +105,14 @@ def update_selectai(sai_new_df: pd.DataFrame, sai_old_df: pd.DataFrame) -> None: ##################################################### # MAIN ##################################################### -def main() -> None: +def display_databases() -> None: """Streamlit GUI""" - remove_footer() st.header("Database", divider="red") st.write("Configure the database used for Vector Storage and SelectAI.") try: get_databases() except api_call.ApiError: st.stop() - st.subheader("Configuration") database_lookup = st_common.state_configs_lookup("database_configs", "name") # Get a list of database names, and allow user to select @@ -239,5 +236,5 @@ def main() -> None: st.write("No SelectAI Profiles Found.") -if __name__ == "__main__" or "page.py" in inspect.stack()[1].filename: - main() +if __name__ == "__main__": + display_databases() diff --git a/src/client/content/config/models.py b/src/client/content/config/tabs/models.py similarity index 96% rename from src/client/content/config/models.py rename to src/client/content/config/tabs/models.py index 01c91129..6f625f3d 100644 --- a/src/client/content/config/models.py +++ b/src/client/content/config/tabs/models.py @@ -7,16 +7,14 @@ Session States Set: - model_configs: Stores all Model Configurations """ -# spell-checker:ignore selectbox ocigenai +# spell-checker:ignore selectbox -import inspect from time import sleep from typing import Literal import urllib.parse import streamlit as st from streamlit import session_state as state -from client.utils.st_footer import render_models_footer import client.utils.api_call as api_call import client.utils.st_common as st_common @@ -24,7 +22,7 @@ import common.help_text as help_text import common.logging_config as logging_config -logger = logging_config.logging.getLogger("client.content.config.models") +logger = logging_config.logging.getLogger("client.content.config.tabs.models") ################################### @@ -54,7 +52,7 @@ def get_models(force: bool = False) -> None: state.model_configs = {} -@st.cache_data +@st.cache_data(show_spinner="Retrieving Model Providers") def get_model_providers() -> list: """Get list of valid Providers; function for Streamlit caching""" response = api_call.get(endpoint="v1/models/provider") @@ -131,7 +129,7 @@ def edit_model(model_type: str, action: Literal["add", "edit"], model_id: str = key="add_model_api_key", type="password", value=model.get("api_key", ""), - disabled=disable_for_oci + disabled=disable_for_oci, ) if model_type == "ll": model["context_length"] = st.number_input( @@ -202,7 +200,7 @@ def edit_model(model_type: str, action: Literal["add", "edit"], model_id: str = st.rerun() -def render_model_rows(model_type): +def render_model_rows(model_type: str) -> None: """Render rows of the models""" data_col_widths = [0.07, 0.23, 0.2, 0.28, 0.12] table_col_format = st.columns(data_col_widths, vertical_alignment="center") @@ -255,7 +253,7 @@ def render_model_rows(model_type): ############################################################################# # MAIN ############################################################################# -def main() -> None: +def display_models() -> None: """Streamlit GUI""" st.header("Models", divider="red") st.write("Update, Add, or Delete model configuration parameters.") @@ -264,8 +262,6 @@ def main() -> None: except api_call.ApiError: st.stop() - # Table Dimensions - st.divider() st.subheader("Language Models") render_model_rows("ll") @@ -274,8 +270,6 @@ def main() -> None: st.subheader("Embedding Models") render_model_rows("embed") - render_models_footer() - -if __name__ == "__main__" or "page.py" in inspect.stack()[1].filename: - main() +if __name__ == "__main__": + display_models() diff --git a/src/client/content/config/oci.py b/src/client/content/config/tabs/oci.py similarity index 97% rename from src/client/content/config/oci.py rename to src/client/content/config/tabs/oci.py index 63c30e1c..7ff89ef7 100644 --- a/src/client/content/config/oci.py +++ b/src/client/content/config/tabs/oci.py @@ -7,7 +7,6 @@ """ # spell-checker:ignore streamlit, ocid, selectbox, genai, oraclecloud -import inspect import pandas as pd import streamlit as st @@ -15,11 +14,10 @@ import client.utils.api_call as api_call import client.utils.st_common as st_common -from client.utils.st_footer import remove_footer import common.logging_config as logging_config -logger = logging_config.logging.getLogger("client.content.config.oci") +logger = logging_config.logging.getLogger("client.content.config.tabs.oci") ##################################################### @@ -79,9 +77,8 @@ def patch_oci(auth_profile: str, supplied: dict, namespace: str, toast: bool = T ##################################################### # MAIN ##################################################### -def main() -> None: +def display_oci() -> None: """Streamlit GUI""" - remove_footer() st.header("Oracle Cloud Infrastructure", divider="red") st.write("Configure OCI for Object Storage Access and OCI GenAI Services.") try: @@ -215,5 +212,5 @@ def main() -> None: st.success("Oracle GenAI models - Enabled.", icon="✅") -if __name__ == "__main__" or "page.py" in inspect.stack()[1].filename: - main() +if __name__ == "__main__": + display_oci() diff --git a/src/client/content/config/settings.py b/src/client/content/config/tabs/settings.py similarity index 85% rename from src/client/content/config/settings.py rename to src/client/content/config/tabs/settings.py index 538ab718..cc635074 100644 --- a/src/client/content/config/settings.py +++ b/src/client/content/config/tabs/settings.py @@ -6,7 +6,6 @@ """ # spell-checker:ignore streamlit, mvnw, obaas, ollama -import inspect import time import os import io @@ -26,11 +25,10 @@ # Utilities import client.utils.api_call as api_call import client.utils.st_common as st_common -from client.utils.st_footer import remove_footer import common.logging_config as logging_config -logger = logging_config.logging.getLogger("client.content.config.settings") +logger = logging_config.logging.getLogger("client.content.config.tabs.settings") ############################################################################# @@ -38,15 +36,32 @@ ############################################################################# def get_settings(include_sensitive: bool = False): """Get Server-Side Settings""" - settings = api_call.get( - endpoint="v1/settings", - params={ - "client": state.client_settings["client"], - "full_config": True, - "incl_sensitive": include_sensitive, - }, - ) - return settings + try: + settings = api_call.get( + endpoint="v1/settings", + params={ + "client": state.client_settings["client"], + "full_config": True, + "incl_sensitive": include_sensitive, + }, + ) + return settings + except api_call.ApiError as ex: + if "not found" in str(ex): + # If client settings not found, create them + logger.info("Client settings not found, creating new ones") + api_call.post(endpoint="v1/settings", params={"client": state.client_settings["client"]}) + settings = api_call.get( + endpoint="v1/settings", + params={ + "client": state.client_settings["client"], + "full_config": True, + "incl_sensitive": include_sensitive, + }, + ) + return settings + else: + raise def save_settings(settings): @@ -144,12 +159,12 @@ def spring_ai_conf_check(ll_model: dict, embed_model: dict) -> str: if not ll_model or not embed_model: return "hybrid" - ll_provider = ll_model["provider"] - embed_provider = embed_model["provider"] + ll_provider = ll_model.get("provider", "") + embed_provider = embed_model.get("provider", "") - if "openai" in ll_provider and "openai" in ll_provider: + if all("openai" in p for p in (ll_provider, embed_provider)): return "openai" - elif ll_provider == "ollama" and "ollama" in embed_provider: + if all("ollama" in p for p in (ll_provider, embed_provider)): return "ollama" return "hybrid" @@ -202,7 +217,7 @@ def spring_ai_zip(provider, ll_config, embed_config): # Source directory that you want to copy files = ["mvnw", "mvnw.cmd", "pom.xml", "README.md"] - src_dir = Path(__file__).resolve().parents[2] / "spring_ai" + src_dir = Path(__file__).resolve().parents[3] / "spring_ai" # Using TemporaryDirectory with tempfile.TemporaryDirectory() as temp_dir: @@ -228,11 +243,12 @@ def spring_ai_zip(provider, ll_config, embed_config): zip_buffer.seek(0) return zip_buffer + def langchain_mcp_zip(settings): """Create LangChain MCP Zip File""" # Source directory that you want to copy - src_dir = Path(__file__).resolve().parents[2] / "mcp/rag" + src_dir = Path(__file__).resolve().parents[3] / "mcp/rag" # Using TemporaryDirectory with tempfile.TemporaryDirectory() as temp_dir: @@ -241,7 +257,7 @@ def langchain_mcp_zip(settings): shutil.copytree(src_dir, dst_dir) - data=save_settings(settings) + data = save_settings(settings) settings_path = os.path.join(dst_dir, "optimizer_settings.json") with open(settings_path, "w") as f: f.write(data) @@ -258,13 +274,11 @@ def langchain_mcp_zip(settings): return zip_buffer - ##################################################### # MAIN ##################################################### -def main(): +def display_settings(): """Streamlit GUI""" - remove_footer() st.header("Client Settings", divider="red") if "selected_sensitive_settings" not in state: state.selected_sensitive_settings = False @@ -314,7 +328,7 @@ def main(): else: st.info("Please upload a Settings file.") - st.header("Export source code templates", divider="red") + st.header("Source Code Templates", divider="red") # Merge the User Settings into the Model Config model_lookup = st_common.state_configs_lookup("model_configs", "id") try: @@ -338,23 +352,23 @@ def main(): """) else: col_left, col_centre, _ = st.columns([3, 4, 3]) - with col_left: + with col_left: st.download_button( - label="Download SpringAI", - data=spring_ai_zip(spring_ai_conf, ll_config, embed_config), # Generate zip on the fly - file_name="spring_ai.zip", # Zip file name - mime="application/zip", # Mime type for zip file - disabled=spring_ai_conf == "hybrid", - ) + label="Download SpringAI", + data=spring_ai_zip(spring_ai_conf, ll_config, embed_config), # Generate zip on the fly + file_name="spring_ai.zip", # Zip file name + mime="application/zip", # Mime type for zip file + disabled=spring_ai_conf == "hybrid", + ) with col_centre: st.download_button( - label="Download LangchainMCP", - data=langchain_mcp_zip(settings), # Generate zip on the fly - file_name="langchain_mcp.zip", # Zip file name - mime="application/zip", # Mime type for zip file - disabled=spring_ai_conf == "hybrid", - ) + label="Download LangchainMCP", + data=langchain_mcp_zip(settings), # Generate zip on the fly + file_name="langchain_mcp.zip", # Zip file name + mime="application/zip", # Mime type for zip file + disabled=spring_ai_conf == "hybrid", + ) -if __name__ == "__main__" or "page.py" in inspect.stack()[1].filename: - main() +if __name__ == "__main__": + display_settings() diff --git a/src/client/content/testbed.py b/src/client/content/testbed.py index 7696374c..8e0c5d66 100644 --- a/src/client/content/testbed.py +++ b/src/client/content/testbed.py @@ -15,12 +15,10 @@ import streamlit as st from streamlit import session_state as state -from client.content.config.models import get_models +from client.content.config.tabs.models import get_models import client.utils.st_common as st_common import client.utils.api_call as api_call -from client.utils.st_footer import remove_footer - import common.logging_config as logging_config @@ -131,7 +129,7 @@ def create_gauge(value): # download_file("Download Report", report["html_report"], "evaluation_report.html", "text/html") #CDB -@st.cache_data +@st.cache_data(show_spinner="Retrieving TestSets") def get_testbed_db_testsets() -> dict: """Get Database TestSets; this is cached""" return api_call.get(endpoint="v1/testbed/testsets") @@ -237,9 +235,8 @@ def qa_update_gui(qa_testset: list) -> None: ############################################################################# # MAIN ############################################################################# -def main(): +def main() -> None: """Streamlit GUI""" - remove_footer() try: get_models() except api_call.ApiError: diff --git a/src/client/content/tools/prompt_eng.py b/src/client/content/tools/tabs/prompt_eng.py similarity index 94% rename from src/client/content/tools/prompt_eng.py rename to src/client/content/tools/tabs/prompt_eng.py index 7010e14b..67cb9afb 100644 --- a/src/client/content/tools/prompt_eng.py +++ b/src/client/content/tools/tabs/prompt_eng.py @@ -9,8 +9,6 @@ """ # spell-checker:ignore selectbox -import inspect - import streamlit as st from streamlit import session_state as state @@ -18,9 +16,8 @@ import client.utils.api_call as api_call import common.logging_config as logging_config -from client.utils.st_footer import remove_footer -logger = logging_config.logging.getLogger("client.tools.prompt_eng") +logger = logging_config.logging.getLogger("client.tools.tabs.prompt_eng") ##################################################### @@ -65,9 +62,8 @@ def patch_prompt(category: str, name: str, prompt: str) -> bool: ############################################################################# # MAIN ############################################################################# -def main(): +def display_prompt_eng(): """Streamlit GUI""" - remove_footer() st.header("Prompt Engineering") st.write("Select which prompts to use and their instructions. Currently selected prompts are used.") try: @@ -116,5 +112,5 @@ def main(): st.rerun() -if __name__ == "__main__" or "page.py" in inspect.stack()[1].filename: - main() +if __name__ == "__main__": + display_prompt_eng() diff --git a/src/client/content/tools/split_embed.py b/src/client/content/tools/tabs/split_embed.py similarity index 97% rename from src/client/content/tools/split_embed.py rename to src/client/content/tools/tabs/split_embed.py index 52c68e87..cfa9a47e 100644 --- a/src/client/content/tools/split_embed.py +++ b/src/client/content/tools/tabs/split_embed.py @@ -4,9 +4,8 @@ This script initializes is used for the splitting and chunking process using Streamlit (`st`). """ -# spell-checker:ignore selectbox, hnsw, ivf, ocids,iterrows -import inspect +# spell-checker:ignore selectbox, hnsw, ivf, ocids,iterrows import math import re @@ -17,24 +16,23 @@ import client.utils.api_call as api_call import client.utils.st_common as st_common -from client.utils.st_footer import remove_footer -from client.content.config.databases import get_databases -from client.content.config.models import get_models -from client.content.config.oci import get_oci +from client.content.config.tabs.databases import get_databases +from client.content.config.tabs.models import get_models +from client.content.config.tabs.oci import get_oci from common.schema import DistanceMetrics, IndexTypes, DatabaseVectorStorage import common.functions as functions import common.help_text as help_text import common.logging_config as logging_config -logger = logging_config.logging.getLogger("client.tools.split_embed") +logger = logging_config.logging.getLogger("client.tools.tabs.split_embed") ##################################################### # Functions ##################################################### -@st.cache_data +@st.cache_data(show_spinner="Retrieving OCI Compartments") def get_compartments() -> dict: """Get OCI Compartments; function for Streamlit caching""" response = api_call.get(endpoint=f"v1/oci/compartments/{state.client_settings['oci']['auth_profile']}") @@ -113,7 +111,7 @@ def update_chunk_size_input() -> None: ############################################################################# # MAIN ############################################################################# -def main() -> None: +def display_split_embed() -> None: """Streamlit GUI""" try: get_models() @@ -122,7 +120,6 @@ def main() -> None: except api_call.ApiError: st.stop() - remove_footer() db_avail = st_common.is_db_configured() if not db_avail: logger.debug("Embedding Disabled (Database not configured)") @@ -402,5 +399,5 @@ def main() -> None: st.error(ex, icon="🚨") -if __name__ == "__main__" or "page.py" in inspect.stack()[1].filename: - main() +if __name__ == "__main__": + display_split_embed() diff --git a/src/client/content/tools/tools.py b/src/client/content/tools/tools.py new file mode 100644 index 00000000..5fcfe3fd --- /dev/null +++ b/src/client/content/tools/tools.py @@ -0,0 +1,31 @@ +""" +Copyright (c) 2024, 2025, Oracle and/or its affiliates. +Licensed under the Universal Permissive License v1.0 as shown at http://oss.oracle.com/licenses/upl. +""" + +import inspect +import streamlit as st + +from client.content.tools.tabs.prompt_eng import get_prompts, display_prompt_eng +from client.content.tools.tabs.split_embed import display_split_embed +from client.content.config.tabs.models import get_models +from client.content.config.tabs.databases import get_databases +from client.content.config.tabs.oci import get_oci + + +def main() -> None: + """Streamlit GUI""" + prompt_eng, split_embed = st.tabs(["🎤 Prompts", "📚 Split/Embed"]) + + with prompt_eng: + get_prompts() + display_prompt_eng() + with split_embed: + get_models() + get_databases() + get_oci() + display_split_embed() + + +if __name__ == "__main__" or "page.py" in inspect.stack()[1].filename: + main() diff --git a/src/launch_client.py b/src/launch_client.py index 4e5e4797..1ec2b1a3 100644 --- a/src/launch_client.py +++ b/src/launch_client.py @@ -7,6 +7,7 @@ """ # spell-checker:ignore streamlit, scriptrunner +import asyncio import os from uuid import uuid4 @@ -131,44 +132,23 @@ def main() -> None: # Left Hand Side - Navigation chatbot = st.Page("client/content/chatbot.py", title="ChatBot", icon="💬", default=True) - navigation = { + sidebar_navigation = { "": [chatbot], } if not state.disabled["tests"]: testbed = st.Page("client/content/testbed.py", title="Testbed", icon="🧪") - navigation[""].append(testbed) + sidebar_navigation[""].append(testbed) if not state.disabled["api"]: api_server = st.Page("client/content/api_server.py", title="API Server", icon="📡") - navigation[""].append(api_server) - - # Tools + sidebar_navigation[""].append(api_server) if not state.disabled["tools"]: - split_embed = st.Page("client/content/tools/split_embed.py", title="Split/Embed", icon="📚") - navigation["Tools"] = [split_embed] - prompt_eng = st.Page("client/content/tools/prompt_eng.py", title="Prompts", icon="🎤") - navigation["Tools"].append(prompt_eng) + tools = st.Page("client/content/tools/tools.py", title="Tools", icon="🧰") + sidebar_navigation[""].append(tools) + config = st.Page("client/content/config/config.py", title="Configuration", icon="⚙️") + sidebar_navigation[""].append(config) - # Administration - if not state.disabled["tools"]: - navigation["Configuration"] = [] - if not state.disabled["db_cfg"]: - db_config = st.Page("client/content/config/databases.py", title="Databases", icon="🗄️") - navigation["Configuration"].append(db_config) - if not state.disabled["model_cfg"]: - model_config = st.Page("client/content/config/models.py", title="Models", icon="🤖") - navigation["Configuration"].append(model_config) - if not state.disabled["oci_cfg"]: - oci_config = st.Page("client/content/config/oci.py", title="OCI", icon="☁️") - navigation["Configuration"].append(oci_config) - if not state.disabled["settings"]: - settings = st.Page("client/content/config/settings.py", title="Settings", icon="💾") - navigation["Configuration"].append(settings) - # When we get here, if there's nothing in "Configuration" delete it - if not navigation["Configuration"]: - del navigation["Configuration"] - - pg = st.navigation(navigation, position="sidebar", expanded=False) - pg.run() + pg_sidebar = st.navigation(sidebar_navigation, position="sidebar", expanded=False) + pg_sidebar.run() if __name__ == "__main__": From aec458e3cfbf773f552b586e5ce2f7970149c1d8 Mon Sep 17 00:00:00 2001 From: gotsysdba Date: Wed, 20 Aug 2025 18:45:39 +0100 Subject: [PATCH 15/28] Fix tests --- .../config/{ => tabs}/test_databases.py | 2 +- .../content/config/{ => tabs}/test_models.py | 2 +- .../content/config/{ => tabs}/test_oci.py | 2 +- .../config/{ => tabs}/test_settings.py | 8 +++--- tests/client/content/test_st_footer.py | 26 +++++++++---------- .../tools/{ => tabs}/test_prompt_eng.py | 2 +- .../tools/{ => tabs}/test_split_embed.py | 7 +++-- 7 files changed, 25 insertions(+), 24 deletions(-) rename tests/client/content/config/{ => tabs}/test_databases.py (99%) rename tests/client/content/config/{ => tabs}/test_models.py (95%) rename tests/client/content/config/{ => tabs}/test_oci.py (99%) rename tests/client/content/config/{ => tabs}/test_settings.py (95%) rename tests/client/content/tools/{ => tabs}/test_prompt_eng.py (97%) rename tests/client/content/tools/{ => tabs}/test_split_embed.py (97%) diff --git a/tests/client/content/config/test_databases.py b/tests/client/content/config/tabs/test_databases.py similarity index 99% rename from tests/client/content/config/test_databases.py rename to tests/client/content/config/tabs/test_databases.py index eafb1a24..f99f7cfd 100644 --- a/tests/client/content/config/test_databases.py +++ b/tests/client/content/config/tabs/test_databases.py @@ -18,7 +18,7 @@ class TestStreamlit: """Test the Streamlit UI""" # Streamlit File - ST_FILE = "../src/client/content/config/databases.py" + ST_FILE = "../src/client/content/config/tabs/databases.py" def test_missing_details(self, app_server, app_test): """Submits with missing required inputs""" diff --git a/tests/client/content/config/test_models.py b/tests/client/content/config/tabs/test_models.py similarity index 95% rename from tests/client/content/config/test_models.py rename to tests/client/content/config/tabs/test_models.py index 22496099..2833efb9 100644 --- a/tests/client/content/config/test_models.py +++ b/tests/client/content/config/tabs/test_models.py @@ -12,7 +12,7 @@ class TestStreamlit: """Test the Streamlit UI""" # Streamlit File - ST_FILE = "../src/client/content/config/models.py" + ST_FILE = "../src/client/content/config/tabs/models.py" def test_model_tables(self, app_server, app_test): """Test that the model tables are setup""" diff --git a/tests/client/content/config/test_oci.py b/tests/client/content/config/tabs/test_oci.py similarity index 99% rename from tests/client/content/config/test_oci.py rename to tests/client/content/config/tabs/test_oci.py index aff3323f..aecf2d5c 100644 --- a/tests/client/content/config/test_oci.py +++ b/tests/client/content/config/tabs/test_oci.py @@ -46,7 +46,7 @@ def _mock_server_get_namespace(): class TestStreamlit: """Test the Streamlit UI""" - ST_FILE = "../src/client/content/config/oci.py" + ST_FILE = "../src/client/content/config/tabs/oci.py" def test_initialise_streamlit_no_env(self, app_server, app_test): """Initialisation of streamlit without any OCI environment""" diff --git a/tests/client/content/config/test_settings.py b/tests/client/content/config/tabs/test_settings.py similarity index 95% rename from tests/client/content/config/test_settings.py rename to tests/client/content/config/tabs/test_settings.py index 17479ac2..1a4909f6 100644 --- a/tests/client/content/config/test_settings.py +++ b/tests/client/content/config/tabs/test_settings.py @@ -13,7 +13,7 @@ class TestStreamlit: """Test the Streamlit UI""" # Streamlit File - ST_FILE = "../src/client/content/config/settings.py" + ST_FILE = "../src/client/content/config/tabs/settings.py" def test_settings_display(self, app_server, app_test): """Test that settings are displayed correctly""" @@ -86,7 +86,9 @@ def test_spring_ai_section_exists(self, app_server, app_test): page_text.append(div.label) # Assert that Export source code templates is mentioned somewhere in the page - assert any("Export source code templates" in text for text in page_text), "Export source code templates section not found in page" + assert any("Source Code Templates" in text for text in page_text), ( + "Export source code templates section not found in page" + ) def test_compare_with_uploaded_json(self, app_server, app_test): """Test the compare_with_uploaded_json function for finding differences in settings""" @@ -114,7 +116,7 @@ def test_compare_with_uploaded_json(self, app_server, app_test): } # Import the original function to test directly - from client.content.config.settings import compare_settings + from client.content.config.tabs.settings import compare_settings # Call the function directly differences = compare_settings(at.session_state, uploaded_settings) diff --git a/tests/client/content/test_st_footer.py b/tests/client/content/test_st_footer.py index 1e7e1a91..4bc0a790 100644 --- a/tests/client/content/test_st_footer.py +++ b/tests/client/content/test_st_footer.py @@ -5,8 +5,8 @@ # spell-checker: disable # pylint: disable=import-error -from client.utils.st_footer import render_chat_footer, render_models_footer import streamlit.components.v1 as components +from client.utils.st_footer import render_chat_footer ############################################################################# @@ -35,22 +35,22 @@ def mock_html(html, height): # Run the footer rendering render_chat_footer() - def test_models_page_disclaimer(self, app_server, app_test, monkeypatch): - """Verify disclaimer appears on models page""" - assert app_server is not None + # def test_models_page_disclaimer(self, app_server, app_test, monkeypatch): + # """Verify disclaimer appears on models page""" + # assert app_server is not None - # Mock components.html to capture rendered content - def mock_html(html, height): - assert "LLMs can make mistakes. Always verify important information." in html + # # Mock components.html to capture rendered content + # def mock_html(html, height): + # assert "LLMs can make mistakes. Always verify important information." in html - monkeypatch.setattr(components, "html", mock_html) + # monkeypatch.setattr(components, "html", mock_html) - # Initialize app_test and run component - at = app_test(self.ST_FILE) - at = at.run() + # # Initialize app_test and run component + # at = app_test(self.ST_FILE) + # at = at.run() - # Run the models footer rendering - render_models_footer() + # # Run the models footer rendering + # render_models_footer() def test_disclaimer_absence_on_other_pages(self, app_server, app_test, monkeypatch): """Verify disclaimer doesn't appear on non-chat/non-models pages""" diff --git a/tests/client/content/tools/test_prompt_eng.py b/tests/client/content/tools/tabs/test_prompt_eng.py similarity index 97% rename from tests/client/content/tools/test_prompt_eng.py rename to tests/client/content/tools/tabs/test_prompt_eng.py index d8b3929d..b6b8376c 100644 --- a/tests/client/content/tools/test_prompt_eng.py +++ b/tests/client/content/tools/tabs/test_prompt_eng.py @@ -13,7 +13,7 @@ class TestStreamlit: """Test the Streamlit UI""" # Streamlit File - ST_FILE = "../src/client/content/tools/prompt_eng.py" + ST_FILE = "../src/client/content/tools/tabs/prompt_eng.py" def test_change_sys(self, app_server, app_test): """Change the Current System Prompt""" diff --git a/tests/client/content/tools/test_split_embed.py b/tests/client/content/tools/tabs/test_split_embed.py similarity index 97% rename from tests/client/content/tools/test_split_embed.py rename to tests/client/content/tools/tabs/test_split_embed.py index dd96b9b2..2f1af5bc 100644 --- a/tests/client/content/tools/test_split_embed.py +++ b/tests/client/content/tools/tabs/test_split_embed.py @@ -7,7 +7,6 @@ from unittest.mock import patch import pandas as pd -from client.utils.st_common import state_configs_lookup ############################################################################# @@ -17,7 +16,7 @@ class TestStreamlit: """Test the Streamlit UI""" # Streamlit File path - ST_FILE = "../src/client/content/tools/split_embed.py" + ST_FILE = "../src/client/content/tools/tabs/split_embed.py" def test_initialization(self, app_server, app_test, monkeypatch): """Test initialization of the split_embed component""" @@ -324,10 +323,10 @@ def mock_files_data_frame(objects, process=False): data = {"File": objects, "Process": [process] * len(objects)} return pd.DataFrame(data) - monkeypatch.setattr("client.content.tools.split_embed.files_data_frame", mock_files_data_frame) + monkeypatch.setattr("client.content.tools.tabs.split_embed.files_data_frame", mock_files_data_frame) # Mock get_compartments function - monkeypatch.setattr("client.content.tools.split_embed.get_compartments", lambda: mock_compartments) + monkeypatch.setattr("client.content.tools.tabs.split_embed.get_compartments", lambda: mock_compartments) # Initialize app_test at = app_test(self.ST_FILE) From 54d984247152fd8882d844d8b0d50a9c590fa54b Mon Sep 17 00:00:00 2001 From: gotsysdba Date: Wed, 20 Aug 2025 18:46:44 +0100 Subject: [PATCH 16/28] ensure random_pet is < 12 chars --- opentofu/locals.tf | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/opentofu/locals.tf b/opentofu/locals.tf index f2db6183..1bf57a97 100644 --- a/opentofu/locals.tf +++ b/opentofu/locals.tf @@ -5,9 +5,10 @@ // Housekeeping locals { compartment_ocid = var.compartment_ocid != "" ? var.compartment_ocid : var.tenancy_ocid - label_prefix = var.label_prefix != "" ? lower(var.label_prefix) : lower(random_pet.label.id) + label_prefix = var.label_prefix != "" ? lower(var.label_prefix) : lower(substr(random_pet.label.id, 0, 12)) } + // Availability Domains locals { ads = data.oci_identity_availability_domains.all.availability_domains From c853648e9924ed2c6404fd4456163c94698ef5e3 Mon Sep 17 00:00:00 2001 From: gotsysdba Date: Wed, 20 Aug 2025 18:51:07 +0100 Subject: [PATCH 17/28] Update reqs --- src/common/schema.py | 49 +++++++++++++++++++++++++++++++++++++++++--- src/pyproject.toml | 1 + 2 files changed, 47 insertions(+), 3 deletions(-) diff --git a/src/common/schema.py b/src/common/schema.py index e756a480..0172c8fd 100644 --- a/src/common/schema.py +++ b/src/common/schema.py @@ -19,6 +19,32 @@ IndexTypes = Literal["HNSW", "IVF"] # ModelAPIs +# https://python.langchain.com/api_reference/langchain/chat_models/langchain.chat_models.base.init_chat_model.html +ModelProviders = Literal[ + "openai", + "anthropic", + "azure_openai", + "azure_ai", + "google_vertexai", + "google_genai", + "bedrock", + "bedrock_converse", + "cohere", + "fireworks", + "together", + "mistralai", + "huggingface", + "groq", + "ollama", + "google_anthropic_vertex", + "deepseek", + "ibm", + "nvidia", + "xai", + "perplexity", +] + + EmbedAPI = Literal[ "OllamaEmbeddings", "OCIGenAIEmbeddings", @@ -44,7 +70,9 @@ class DatabaseVectorStorage(BaseModel): """Database Vector Storage Tables""" vector_store: Optional[str] = Field( - default=None, description="Vector Store Table Name (auto-generated, do not set)", json_schema_extra={"readOnly": True} + default=None, + description="Vector Store Table Name (auto-generated, do not set)", + json_schema_extra={"readOnly": True}, ) alias: Optional[str] = Field(default=None, description="Identifiable Alias") model: Optional[str] = Field(default=None, description="Embedding Model") @@ -85,7 +113,9 @@ class Database(DatabaseAuth): default=[], description="Vector Storage (read-only)", json_schema_extra={"readOnly": True} ) selectai: bool = Field(default=False, description="SelectAI Possible") - selectai_profiles: Optional[list] = Field(default=[], description="SelectAI Profiles (read-only)", json_schema_extra={"readOnly": True}) + selectai_profiles: Optional[list] = Field( + default=[], description="SelectAI Profiles (read-only)", json_schema_extra={"readOnly": True} + ) # Do not expose the connection to the endpoint _connection: oracledb.Connection = PrivateAttr(default=None) @@ -182,6 +212,7 @@ class Model(ModelAccess, LanguageModelParameters, EmbeddingModelParameters): api: str = Field( ..., min_length=1, description="API for Model.", examples=["ChatOllama", "OpenAI", "OpenAIEmbeddings"] ) + provider: str = Field(..., min_length=1, description="Model Provider", examples=["openai", "anthropic"]) openai_compat: bool = Field(default=True, description="Is the API OpenAI compatible?") @model_validator(mode="after") @@ -199,6 +230,16 @@ def check_api_matches_type(self): raise ValueError(f"API '{self.api}' is not valid for type 'embed'. Must be one of: {embed_apis}") return self + def check_provider_matches_type(self): + """Validate valid API""" + providers = get_args(ModelProviders) + if not self.provider or self.provider == "unset": + return self + + if self.provider not in providers: + raise ValueError(f"Provider '{self.provider}' is not valid. Must be one of: {providers}") + return self + ##################################################### # Oracle Cloud Infrastructure @@ -213,7 +254,9 @@ class OracleCloudSettings(BaseModel): """Store Oracle Cloud Infrastructure Settings""" auth_profile: str = Field(default="DEFAULT", description="Config File Profile") - namespace: Optional[str] = Field(default=None, description="Object Store Namespace", json_schema_extra={"readOnly": True}) + namespace: Optional[str] = Field( + default=None, description="Object Store Namespace", json_schema_extra={"readOnly": True} + ) user: Optional[str] = Field( default=None, description="Optional if using Auth Token", diff --git a/src/pyproject.toml b/src/pyproject.toml index 72710297..04327dfe 100644 --- a/src/pyproject.toml +++ b/src/pyproject.toml @@ -36,6 +36,7 @@ server = [ "langchain-groq==0.3.7", "langchain-huggingface==0.3.1", "langchain-mistralai==0.2.11", + "langchain-mcp-adapters==0.1.9", "langchain-ollama==0.3.6", "langchain-openai==0.3.30", "langgraph==0.6.5", From 4d0faa0bbcb8d4c6e3928a615f6af0a19e69b229 Mon Sep 17 00:00:00 2001 From: gotsysdba Date: Wed, 20 Aug 2025 21:20:23 +0100 Subject: [PATCH 18/28] small updates --- opentofu/locals.tf | 2 +- src/pyproject.toml | 5 ++--- tests/conftest.py | 8 ++++---- 3 files changed, 7 insertions(+), 8 deletions(-) diff --git a/opentofu/locals.tf b/opentofu/locals.tf index 1bf57a97..e6ecb45e 100644 --- a/opentofu/locals.tf +++ b/opentofu/locals.tf @@ -5,7 +5,7 @@ // Housekeeping locals { compartment_ocid = var.compartment_ocid != "" ? var.compartment_ocid : var.tenancy_ocid - label_prefix = var.label_prefix != "" ? lower(var.label_prefix) : lower(substr(random_pet.label.id, 0, 12)) + label_prefix = var.label_prefix != "" ? substr(lower(var.label_prefix), 0, 12) : substr(lower(random_pet.label.id), 0, 12) } diff --git a/src/pyproject.toml b/src/pyproject.toml index 91bfc44f..a9a7b970 100644 --- a/src/pyproject.toml +++ b/src/pyproject.toml @@ -45,13 +45,12 @@ server = [ "langchain-openai==0.3.30", "langchain-perplexity==0.1.2", "langchain-xai==0.2.5", - "langgraph==0.6.5", - "litellm==1.75.8", + "langgraph==0.6.6", + "litellm==1.75.9", "llama-index==0.13.2", "lxml==6.0.0", "matplotlib==3.10.5", "oci~=2.0", - "openai==v1.99.9", # Remove after https://github.com/openai/openai-python/issues/2564 resolved "psutil==7.0.0", "python-multipart==0.0.20", "torch==2.8.0", diff --git a/tests/conftest.py b/tests/conftest.py index 264e4450..eccb92de 100644 --- a/tests/conftest.py +++ b/tests/conftest.py @@ -111,14 +111,14 @@ def is_port_in_use(port): server_process = subprocess.Popen(cmd, cwd="src") - # Wait for server to be ready (up to 30 seconds) - max_wait = 30 + # Wait for server to be ready + max_wait = 60 start_time = time.time() while not is_port_in_use(8015): if time.time() - start_time > max_wait: server_process.terminate() server_process.wait() - raise TimeoutError("Server failed to start within 30 seconds") + raise TimeoutError(f"Server failed to start within {max_wait} seconds") time.sleep(0.5) yield server_process @@ -133,7 +133,7 @@ def app_test(auth_headers): """Establish Streamlit State for Client to Operate""" def _app_test(page): - at = AppTest.from_file(page, default_timeout=30) + at = AppTest.from_file(page, default_timeout=60) at.session_state.server = { "key": os.environ.get("API_SERVER_KEY"), "url": os.environ.get("API_SERVER_URL"), From 04495047e33d829143173d0e37c0bb7dc2638374 Mon Sep 17 00:00:00 2001 From: gotsysdba Date: Thu, 21 Aug 2025 08:22:29 +0100 Subject: [PATCH 19/28] removed commented out code --- tests/client/content/test_st_footer.py | 17 ----------------- 1 file changed, 17 deletions(-) diff --git a/tests/client/content/test_st_footer.py b/tests/client/content/test_st_footer.py index 4bc0a790..2489f9df 100644 --- a/tests/client/content/test_st_footer.py +++ b/tests/client/content/test_st_footer.py @@ -35,23 +35,6 @@ def mock_html(html, height): # Run the footer rendering render_chat_footer() - # def test_models_page_disclaimer(self, app_server, app_test, monkeypatch): - # """Verify disclaimer appears on models page""" - # assert app_server is not None - - # # Mock components.html to capture rendered content - # def mock_html(html, height): - # assert "LLMs can make mistakes. Always verify important information." in html - - # monkeypatch.setattr(components, "html", mock_html) - - # # Initialize app_test and run component - # at = app_test(self.ST_FILE) - # at = at.run() - - # # Run the models footer rendering - # render_models_footer() - def test_disclaimer_absence_on_other_pages(self, app_server, app_test, monkeypatch): """Verify disclaimer doesn't appear on non-chat/non-models pages""" assert app_server is not None From 88552ba2b95a6edf89ae567a3708186f0d1a3ffd Mon Sep 17 00:00:00 2001 From: gotsysdba Date: Thu, 21 Aug 2025 08:50:18 +0100 Subject: [PATCH 20/28] remove asyncio from launch_client.py --- src/launch_client.py | 1 - 1 file changed, 1 deletion(-) diff --git a/src/launch_client.py b/src/launch_client.py index b1fdefae..76630e7f 100644 --- a/src/launch_client.py +++ b/src/launch_client.py @@ -7,7 +7,6 @@ """ # spell-checker:ignore streamlit, scriptrunner -import asyncio import os from uuid import uuid4 From d41d4befd1f0991a0244f8419f53b9dcbf6a6195 Mon Sep 17 00:00:00 2001 From: gotsysdba Date: Thu, 21 Aug 2025 13:20:41 +0100 Subject: [PATCH 21/28] MCP GUI updates --- src/client/content/config/tabs/mcp.py | 23 ++++- src/server/api/core/mcp.py | 82 ++++++++++------ src/server/api/utils/mcp.py | 134 ++++++++++++++++++++++++++ 3 files changed, 209 insertions(+), 30 deletions(-) create mode 100644 src/server/api/utils/mcp.py diff --git a/src/client/content/config/tabs/mcp.py b/src/client/content/config/tabs/mcp.py index 253097f6..51d9a446 100644 --- a/src/client/content/config/tabs/mcp.py +++ b/src/client/content/config/tabs/mcp.py @@ -3,6 +3,7 @@ Licensed under the Universal Permissive License v1.0 as shown at http://oss.oracle.com/licenses/upl. """ # spell-checker:ignore selectbox healthz +import json import streamlit as st from streamlit import session_state as state @@ -21,12 +22,20 @@ def get_mcp_status() -> dict: """Get MCP Status""" try: - logger.info("Checking MCP Status") return api_call.get(endpoint="v1/mcp/healthz") except api_call.ApiError as ex: logger.error("Unable to get MCP Status: %s", ex) return {} +def get_mcp_client() -> dict: + """Get MCP Client Configuration""" + try: + params = {"server": {state.server['url']} , "port": {state.server['port']}} + mcp_client = api_call.get(endpoint="v1/mcp/client", params=params) + return json.dumps(mcp_client, indent=2) + except api_call.ApiError as ex: + logger.error("Unable to get MCP Status: %s", ex) + return {} def get_mcp(force: bool = False) -> list[dict]: """Get MCP configs from API Server""" @@ -135,10 +144,18 @@ def display_mcp() -> None: st.stop() mcp_status = get_mcp_status() if mcp_status.get("status") == "ready": - st.write(f"The {mcp_status['name']} is running. Version: {mcp_status['version']}") + st.markdown(f""" + The {mcp_status['name']} is running. + **Version**: {mcp_status['version']} + """) + with st.expander("Client Configuration"): + st.code(get_mcp_client(), language="json") + else: + st.error("MCP Server is not running!", icon="🛑") + st.stop() selected_mcp_server = st.selectbox( - "MCP Server:", + "Configured MCP Server(s):", options=extract_servers(), # index=list(database_lookup.keys()).index(state.client_settings["database"]["alias"]), key="selected_mcp_server", diff --git a/src/server/api/core/mcp.py b/src/server/api/core/mcp.py index 751a8fc0..3154fc30 100644 --- a/src/server/api/core/mcp.py +++ b/src/server/api/core/mcp.py @@ -1,31 +1,59 @@ -from typing import Optional, List, Dict, Any -from common.schema import MCPModelConfig, MCPToolConfig, MCPSettings -from server.bootstrap import mcp as mcp_bootstrap +""" +Copyright (c) 2024, 2025, Oracle and/or its affiliates. +Licensed under the Universal Permissive License v1.0 as shown at http://oss.oracle.com/licenses/upl. +""" + +# spell-checker:ignore streamable +import os + +# from langchain_mcp_adapters.client import MultiServerMCPClient +# from typing import Optional, List, Dict, Any +# from common.schema import MCPModelConfig, MCPToolConfig, MCPSettings +# from server.bootstrap import mcp as mcp_bootstrap import common.logging_config as logging_config logger = logging_config.logging.getLogger("api.core.mcp") -def get_mcp_model(model_id: str) -> Optional[MCPModelConfig]: - """Get MCP model configuration by ID""" - for model in mcp_bootstrap.MCP_MODELS: - if model.model_id == model_id: - return model - return None - -def get_mcp_tool(tool_name: str) -> Optional[MCPToolConfig]: - """Get MCP tool configuration by name""" - for tool in mcp_bootstrap.MCP_TOOLS: - if tool.name == tool_name: - return tool - return None - -def update_mcp_settings(settings: Dict[str, Any]) -> MCPSettings: - """Update MCP settings""" - if not mcp_bootstrap.MCP_SETTINGS: - raise ValueError("MCP settings not initialized") - - for key, value in settings.items(): - if hasattr(mcp_bootstrap.MCP_SETTINGS, key): - setattr(mcp_bootstrap.MCP_SETTINGS, key, value) - - return mcp_bootstrap.MCP_SETTINGS \ No newline at end of file + +def get_client(server: str = "http://127.0.0.1", port: int = 8000) -> dict: + """Get the MCP Client Configuration""" + mcp_client = { + "mcpServers": { + "optimizer": { + "type": "streamableHttp", + "transport": "streamable_http", + "url": f"{server}:{port}/mcp/", + "headers": {"Authorization": f"Bearer {os.getenv('API_SERVER_KEY')}"}, + } + } + } + + return mcp_client + + +# def get_mcp_model(model_id: str) -> Optional[MCPModelConfig]: +# """Get MCP model configuration by ID""" +# for model in mcp_bootstrap.MCP_MODELS: +# if model.model_id == model_id: +# return model +# return None + + +# def get_mcp_tool(tool_name: str) -> Optional[MCPToolConfig]: +# """Get MCP tool configuration by name""" +# for tool in mcp_bootstrap.MCP_TOOLS: +# if tool.name == tool_name: +# return tool +# return None + + +# def update_mcp_settings(settings: Dict[str, Any]) -> MCPSettings: +# """Update MCP settings""" +# if not mcp_bootstrap.MCP_SETTINGS: +# raise ValueError("MCP settings not initialized") + +# for key, value in settings.items(): +# if hasattr(mcp_bootstrap.MCP_SETTINGS, key): +# setattr(mcp_bootstrap.MCP_SETTINGS, key, value) + +# return mcp_bootstrap.MCP_SETTINGS diff --git a/src/server/api/utils/mcp.py b/src/server/api/utils/mcp.py new file mode 100644 index 00000000..13cffcfa --- /dev/null +++ b/src/server/api/utils/mcp.py @@ -0,0 +1,134 @@ +""" +Copyright (c) 2024, 2025, Oracle and/or its affiliates. +Licensed under the Universal Permissive License v1.0 as shown at http://oss.oracle.com/licenses/upl. +""" +# spell-checker:ignore astream selectai + +import time +from typing import Literal, AsyncGenerator + +from langchain_core.messages import HumanMessage +from langchain_core.runnables import RunnableConfig +from langchain_mcp_adapters.client import MultiServerMCPClient +from langgraph.graph.state import CompiledStateGraph + +import server.api.core.settings as core_settings +import server.api.core.oci as core_oci +import server.api.core.prompts as core_prompts +import server.api.utils.models as util_models +import server.api.utils.databases as util_databases +import server.api.utils.selectai as util_selectai +import server.api.core.mcp as core_mcp +import server.mcp.graph as graph + +import common.schema as schema +import common.logging_config as logging_config + +logger = logging_config.logging.getLogger("api.utils.mcp") + + +def error_response(message: str, model: str) -> dict: + """Send the error as a response""" + response = { + "id": "error", + "choices": [{"message": {"role": "assistant", "content": message}, "index": 0, "finish_reason": "stop"}], + "created": int(time.time()), + "model": model, + "object": "chat.completion", + } + return response + + +async def completion_generator( + client: schema.ClientIdType, request: schema.ChatRequest, call: Literal["completions", "streams"] +) -> AsyncGenerator[str, None]: + """MCP Completion Requests""" + client_settings = core_settings.get_client_settings(client) + model = request.model_dump() + logger.debug("Settings: %s", client_settings) + logger.debug("Request: %s", model) + + # Establish LL Model Params (if the request specs a model, otherwise override from settings) + if not model["model"]: + model = client_settings.ll_model.model_dump() + + # Get OCI Settings + oci_config = core_oci.get_oci(client=client) + + # Setup Language Model + ll_model = util_models.get_client(model, oci_config) + if not ll_model: + yield error_response("I'm unable to initialise the Language Model. Please refresh the application.", model) + return + + # Setup MCP and bind tools + mcp_client = MultiServerMCPClient({"optimizer": core_mcp.get_client()["mcpServers"]["optimizer"]}) + tools = await mcp_client.get_tools() + ll_model_with_tools = model.bind_tools(tools) + + # Build our Graph + graph.set_node("tools_node", ToolNode(tools)) + agent: CompiledStateGraph = graph.mcp_graph + + kwargs = { + "input": {"messages": [HumanMessage(content=request.messages[0].content)]}, + "config": RunnableConfig( + configurable={"thread_id": client, "ll_model": ll_model_with_tools, "tools": tools}, + metadata={"use_history": client_settings.ll_model.chat_history}, + ), + } + + yield "End" + + # # Get Prompts + # try: + # user_sys_prompt = getattr(client_settings.prompts, "sys", "Basic Example") + # sys_prompt = core_prompts.get_prompts(category="sys", name=user_sys_prompt) + # except AttributeError as ex: + # # schema.Settings not on server-side + # logger.error("A settings exception occurred: %s", ex) + # raise + + # db_conn = None + # # Setup selectai + # if client_settings.selectai.enabled: + # db_conn = util_databases.get_client_db(client).connection + # util_selectai.set_profile(db_conn, client_settings.selectai.profile, "temperature", model["temperature"]) + # util_selectai.set_profile( + # db_conn, client_settings.selectai.profile, "max_tokens", model["max_completion_tokens"] + # ) + + # # Setup vector_search + # embed_client, ctx_prompt = None, None + # if client_settings.vector_search.enabled: + # db_conn = util_databases.get_client_db(client).connection + # embed_client = util_models.get_client(client_settings.vector_search.model_dump(), oci_config) + + # user_ctx_prompt = getattr(client_settings.prompts, "ctx", "Basic Example") + # ctx_prompt = core_prompts.get_prompts(category="ctx", name=user_ctx_prompt) + + + # try: + # async for chunk in agent.astream_events(**kwargs, version="v2"): + # # The below will produce A LOT of output; uncomment when desperate + # # logger.debug("Streamed Chunk: %s", chunk) + # if chunk["event"] == "on_chat_model_stream": + # if "tools_condition" in str(chunk["metadata"]["langgraph_triggers"]): + # continue # Skip Tool Call messages + # if "vs_retrieve" in str(chunk["metadata"]["langgraph_node"]): + # continue # Skip Fake-Tool Call messages + # content = chunk["data"]["chunk"].content + # if content != "" and call == "streams": + # yield content.encode("utf-8") + # last_response = chunk["data"] + # if call == "streams": + # yield "[stream_finished]" # This will break the Chatbot loop + # elif call == "completions": + # final_response = last_response["output"]["final_response"] + # yield final_response # This will be captured for ChatResponse + # except Exception as ex: + # logger.error("An invoke exception occurred: %s", ex) + # # yield f"I'm sorry; {ex}" + # # TODO(gotsysdba) - If a message is returned; + # # format and return (this should be done in the agent) + # raise From eaca1d43f1811b7c3892b32cfddffa699715d7e6 Mon Sep 17 00:00:00 2001 From: gotsysdba Date: Thu, 21 Aug 2025 13:55:45 +0100 Subject: [PATCH 22/28] Refactor get_client to clarify api_key handling in kwargs --- src/server/api/utils/models.py | 8 +++++--- 1 file changed, 5 insertions(+), 3 deletions(-) diff --git a/src/server/api/utils/models.py b/src/server/api/utils/models.py index 0b0958d4..96bf8143 100644 --- a/src/server/api/utils/models.py +++ b/src/server/api/utils/models.py @@ -107,8 +107,8 @@ def get_client(model_config: dict, oci_config: schema.OracleCloudSettings, giska "max_tokens": full_model_config["max_completion_tokens"], **common_params, } - - if full_model_config.get("api_key"): # only add if present + # Only add the api_key if it is set + if full_model_config.get("api_key"): kwargs["api_key"] = full_model_config["api_key"] client = init_chat_model(**kwargs) @@ -131,8 +131,10 @@ def get_client(model_config: dict, oci_config: schema.OracleCloudSettings, giska "model": full_model_config["id"], "base_url": full_model_config["url"], } - if full_model_config.get("api_key"): # only add if set + # Only add the api_key if it is set + if full_model_config.get("api_key"): kwargs["api_key"] = full_model_config["api_key"] + client = init_embeddings(**kwargs) else: client = OCIGenAIEmbeddings( From 1370bfb2d684bbb430566839e7e11a9009e88bd9 Mon Sep 17 00:00:00 2001 From: gotsysdba Date: Wed, 27 Aug 2025 12:54:05 +0100 Subject: [PATCH 23/28] re-org tests --- .../client/content/config/tabs/test_databases.py | 0 tests/{ => integration}/client/content/config/tabs/test_models.py | 0 tests/{ => integration}/client/content/config/tabs/test_oci.py | 0 .../{ => integration}/client/content/config/tabs/test_settings.py | 0 tests/{ => integration}/client/content/test_api_server.py | 0 tests/{ => integration}/client/content/test_chatbot.py | 0 tests/{ => integration}/client/content/test_st_footer.py | 0 tests/{ => integration}/client/content/test_testbed.py | 0 .../client/content/tools/tabs/test_prompt_eng.py | 0 .../client/content/tools/tabs/test_split_embed.py | 0 tests/{ => integration}/server/test_endpoints_chat.py | 0 tests/{ => integration}/server/test_endpoints_databases.py | 0 tests/{ => integration}/server/test_endpoints_embed.py | 0 tests/{ => integration}/server/test_endpoints_health.py | 0 tests/{ => integration}/server/test_endpoints_models.py | 0 tests/{ => integration}/server/test_endpoints_oci.py | 0 tests/{ => integration}/server/test_endpoints_prompts.py | 0 tests/{ => integration}/server/test_endpoints_settings.py | 0 tests/{ => integration}/server/test_endpoints_testbed.py | 0 19 files changed, 0 insertions(+), 0 deletions(-) rename tests/{ => integration}/client/content/config/tabs/test_databases.py (100%) rename tests/{ => integration}/client/content/config/tabs/test_models.py (100%) rename tests/{ => integration}/client/content/config/tabs/test_oci.py (100%) rename tests/{ => integration}/client/content/config/tabs/test_settings.py (100%) rename tests/{ => integration}/client/content/test_api_server.py (100%) rename tests/{ => integration}/client/content/test_chatbot.py (100%) rename tests/{ => integration}/client/content/test_st_footer.py (100%) rename tests/{ => integration}/client/content/test_testbed.py (100%) rename tests/{ => integration}/client/content/tools/tabs/test_prompt_eng.py (100%) rename tests/{ => integration}/client/content/tools/tabs/test_split_embed.py (100%) rename tests/{ => integration}/server/test_endpoints_chat.py (100%) rename tests/{ => integration}/server/test_endpoints_databases.py (100%) rename tests/{ => integration}/server/test_endpoints_embed.py (100%) rename tests/{ => integration}/server/test_endpoints_health.py (100%) rename tests/{ => integration}/server/test_endpoints_models.py (100%) rename tests/{ => integration}/server/test_endpoints_oci.py (100%) rename tests/{ => integration}/server/test_endpoints_prompts.py (100%) rename tests/{ => integration}/server/test_endpoints_settings.py (100%) rename tests/{ => integration}/server/test_endpoints_testbed.py (100%) diff --git a/tests/client/content/config/tabs/test_databases.py b/tests/integration/client/content/config/tabs/test_databases.py similarity index 100% rename from tests/client/content/config/tabs/test_databases.py rename to tests/integration/client/content/config/tabs/test_databases.py diff --git a/tests/client/content/config/tabs/test_models.py b/tests/integration/client/content/config/tabs/test_models.py similarity index 100% rename from tests/client/content/config/tabs/test_models.py rename to tests/integration/client/content/config/tabs/test_models.py diff --git a/tests/client/content/config/tabs/test_oci.py b/tests/integration/client/content/config/tabs/test_oci.py similarity index 100% rename from tests/client/content/config/tabs/test_oci.py rename to tests/integration/client/content/config/tabs/test_oci.py diff --git a/tests/client/content/config/tabs/test_settings.py b/tests/integration/client/content/config/tabs/test_settings.py similarity index 100% rename from tests/client/content/config/tabs/test_settings.py rename to tests/integration/client/content/config/tabs/test_settings.py diff --git a/tests/client/content/test_api_server.py b/tests/integration/client/content/test_api_server.py similarity index 100% rename from tests/client/content/test_api_server.py rename to tests/integration/client/content/test_api_server.py diff --git a/tests/client/content/test_chatbot.py b/tests/integration/client/content/test_chatbot.py similarity index 100% rename from tests/client/content/test_chatbot.py rename to tests/integration/client/content/test_chatbot.py diff --git a/tests/client/content/test_st_footer.py b/tests/integration/client/content/test_st_footer.py similarity index 100% rename from tests/client/content/test_st_footer.py rename to tests/integration/client/content/test_st_footer.py diff --git a/tests/client/content/test_testbed.py b/tests/integration/client/content/test_testbed.py similarity index 100% rename from tests/client/content/test_testbed.py rename to tests/integration/client/content/test_testbed.py diff --git a/tests/client/content/tools/tabs/test_prompt_eng.py b/tests/integration/client/content/tools/tabs/test_prompt_eng.py similarity index 100% rename from tests/client/content/tools/tabs/test_prompt_eng.py rename to tests/integration/client/content/tools/tabs/test_prompt_eng.py diff --git a/tests/client/content/tools/tabs/test_split_embed.py b/tests/integration/client/content/tools/tabs/test_split_embed.py similarity index 100% rename from tests/client/content/tools/tabs/test_split_embed.py rename to tests/integration/client/content/tools/tabs/test_split_embed.py diff --git a/tests/server/test_endpoints_chat.py b/tests/integration/server/test_endpoints_chat.py similarity index 100% rename from tests/server/test_endpoints_chat.py rename to tests/integration/server/test_endpoints_chat.py diff --git a/tests/server/test_endpoints_databases.py b/tests/integration/server/test_endpoints_databases.py similarity index 100% rename from tests/server/test_endpoints_databases.py rename to tests/integration/server/test_endpoints_databases.py diff --git a/tests/server/test_endpoints_embed.py b/tests/integration/server/test_endpoints_embed.py similarity index 100% rename from tests/server/test_endpoints_embed.py rename to tests/integration/server/test_endpoints_embed.py diff --git a/tests/server/test_endpoints_health.py b/tests/integration/server/test_endpoints_health.py similarity index 100% rename from tests/server/test_endpoints_health.py rename to tests/integration/server/test_endpoints_health.py diff --git a/tests/server/test_endpoints_models.py b/tests/integration/server/test_endpoints_models.py similarity index 100% rename from tests/server/test_endpoints_models.py rename to tests/integration/server/test_endpoints_models.py diff --git a/tests/server/test_endpoints_oci.py b/tests/integration/server/test_endpoints_oci.py similarity index 100% rename from tests/server/test_endpoints_oci.py rename to tests/integration/server/test_endpoints_oci.py diff --git a/tests/server/test_endpoints_prompts.py b/tests/integration/server/test_endpoints_prompts.py similarity index 100% rename from tests/server/test_endpoints_prompts.py rename to tests/integration/server/test_endpoints_prompts.py diff --git a/tests/server/test_endpoints_settings.py b/tests/integration/server/test_endpoints_settings.py similarity index 100% rename from tests/server/test_endpoints_settings.py rename to tests/integration/server/test_endpoints_settings.py diff --git a/tests/server/test_endpoints_testbed.py b/tests/integration/server/test_endpoints_testbed.py similarity index 100% rename from tests/server/test_endpoints_testbed.py rename to tests/integration/server/test_endpoints_testbed.py From adf25e7a8293931e2f48b9c7901541f4e6917f24 Mon Sep 17 00:00:00 2001 From: gotsysdba Date: Wed, 27 Aug 2025 12:55:12 +0100 Subject: [PATCH 24/28] Ignore .delete files --- .gitignore | 1 + 1 file changed, 1 insertion(+) diff --git a/.gitignore b/.gitignore index 57f6a824..9fa693f4 100644 --- a/.gitignore +++ b/.gitignore @@ -8,6 +8,7 @@ **/THIRD_PARTY_LICENSES.txt sbin/** **/*.bak +**/*.delete **/tmp/** **/temp/** **/chatbot_graph.png From cfe3e3ec17b587092d2c0f4c8a2df18165a8c39a Mon Sep 17 00:00:00 2001 From: gotsysdba Date: Wed, 3 Sep 2025 08:20:29 +0100 Subject: [PATCH 25/28] Merge tests from origin/main --- tests/conftest.py | 1 + .../content/config/tabs/test_databases.py | 12 ++-- .../client/content/config/tabs/test_models.py | 27 +++++++-- .../content/tools/tabs/test_split_embed.py | 24 ++++---- .../integration/server/test_endpoints_chat.py | 29 ++++++---- .../server/test_endpoints_databases.py | 13 +++-- .../server/test_endpoints_embed.py | 20 +++---- .../server/test_endpoints_health.py | 1 + .../server/test_endpoints_models.py | 56 +++++++++++-------- .../integration/server/test_endpoints_oci.py | 1 + .../server/test_endpoints_prompts.py | 1 + .../server/test_endpoints_settings.py | 1 + .../server/test_endpoints_testbed.py | 2 +- 13 files changed, 116 insertions(+), 72 deletions(-) diff --git a/tests/conftest.py b/tests/conftest.py index eccb92de..ec1b5bf9 100644 --- a/tests/conftest.py +++ b/tests/conftest.py @@ -138,6 +138,7 @@ def _app_test(page): "key": os.environ.get("API_SERVER_KEY"), "url": os.environ.get("API_SERVER_URL"), "port": int(os.environ.get("API_SERVER_PORT")), + "control": True } response = requests.get( url=f"{at.session_state.server['url']}:{at.session_state.server['port']}/v1/settings", diff --git a/tests/integration/client/content/config/tabs/test_databases.py b/tests/integration/client/content/config/tabs/test_databases.py index f99f7cfd..263d6426 100644 --- a/tests/integration/client/content/config/tabs/test_databases.py +++ b/tests/integration/client/content/config/tabs/test_databases.py @@ -60,7 +60,7 @@ def test_wrong_details(self, app_server, app_test): at.button(key="save_database").click().run() assert at.error[0].value == "Current Status: Disconnected" - assert at.error[1].value == "Update Failed - Database: DEFAULT unable to connect." and at.error[1].icon == "🚨" + assert "cannot connect to database" in at.error[1].value and at.error[1].icon == "🚨" def test_connected(self, app_server, app_test, db_container): """Sumbits with good DSN""" @@ -99,7 +99,7 @@ def test_connected(self, app_server, app_test, db_container): "username": "ADMIN", "password": TEST_CONFIG["db_password"], "dsn": TEST_CONFIG["db_dsn"], - "expected": "Update Failed - Database: DEFAULT invalid credentials.", + "expected": "invalid credential or not authorized", }, id="bad_user", ), @@ -109,7 +109,7 @@ def test_connected(self, app_server, app_test, db_container): "username": TEST_CONFIG["db_username"], "password": "Wr0ng_P4ssW0rd", "dsn": TEST_CONFIG["db_dsn"], - "expected": "Update Failed - Database: DEFAULT invalid credentials.", + "expected": "invalid credential or not authorized", }, id="bad_password", ), @@ -119,7 +119,7 @@ def test_connected(self, app_server, app_test, db_container): "username": TEST_CONFIG["db_username"], "password": TEST_CONFIG["db_password"], "dsn": "//localhost:1521/WRONG_TP", - "expected": "Update Failed - Database: DEFAULT unable to connect.", + "expected": "cannot connect to database", }, id="bad_dsn_easy", ), @@ -129,7 +129,7 @@ def test_connected(self, app_server, app_test, db_container): "username": TEST_CONFIG["db_username"], "password": TEST_CONFIG["db_password"], "dsn": "WRONG_TP", - "expected": "Update Failed - Database: DEFAULT DPY-*", + "expected": "DPY-4026", }, id="bad_dsn", ), @@ -147,7 +147,7 @@ def test_disconnected(self, app_server, app_test, db_container, test_case): at.text_input(key="database_dsn").set_value(test_case["dsn"]).run() at.button(key="save_database").click().run() assert at.error[0].value == "Current Status: Disconnected" - assert re.match(test_case["expected"], at.error[1].value) and at.error[1].icon == "🚨" + assert test_case["expected"] in at.error[1].value and at.error[1].icon == "🚨" # Due to the connection error, the settings should NOT be updated and be set # to previous successful test connection; connected will be False for error handling assert at.session_state.database_configs[0]["name"] == "DEFAULT" diff --git a/tests/integration/client/content/config/tabs/test_models.py b/tests/integration/client/content/config/tabs/test_models.py index 2833efb9..a4b7a16f 100644 --- a/tests/integration/client/content/config/tabs/test_models.py +++ b/tests/integration/client/content/config/tabs/test_models.py @@ -5,6 +5,7 @@ # spell-checker: disable # pylint: disable=import-error + ############################################################################# # Test Streamlit UI ############################################################################# @@ -14,16 +15,34 @@ class TestStreamlit: # Streamlit File ST_FILE = "../src/client/content/config/tabs/models.py" + def test_model_page(self, app_server, app_test): + """Test basic page layout""" + assert app_server is not None + at = app_test(self.ST_FILE).run() + + titles = at.get("title") + assert any("Models" in t.value for t in titles) + + headers = at.get("header") + assert any("Language Models" in h.value for h in headers) + assert any("Embedding Models" in h.value for h in headers) + def test_model_tables(self, app_server, app_test): """Test that the model tables are setup""" assert app_server is not None at = app_test(self.ST_FILE).run() assert at.session_state.model_configs is not None for model in at.session_state.model_configs: - assert at.text_input(key=f"{model['type']}_{model['id']}_enabled").value == "⚪" - assert at.text_input(key=f"{model['type']}_{model['id']}_provider").value == model["provider"] - assert at.text_input(key=f"{model['type']}_{model['id']}_server").value == model["url"] - assert at.button(key=f"{model['type']}_{model['id']}_edit") is not None + assert at.text_input(key=f"{model['type']}_{model['provider']}_{model['id']}_enabled").value == "⚪" + assert ( + at.text_input(key=f"{model['type']}_{model['provider']}_{model['id']}").value + == f"{model['provider']}/{model['id']}" + ) + assert ( + at.text_input(key=f"{model['type']}_{model['provider']}_{model['id']}_api_base").value + == model["api_base"] + ) + assert at.button(key=f"{model['type']}_{model['provider']}_{model['id']}_edit") is not None for model_type in {item["type"] for item in at.session_state.model_configs}: assert at.button(key=f"add_{model_type}_model") is not None diff --git a/tests/integration/client/content/tools/tabs/test_split_embed.py b/tests/integration/client/content/tools/tabs/test_split_embed.py index 2f1af5bc..eecb360d 100644 --- a/tests/integration/client/content/tools/tabs/test_split_embed.py +++ b/tests/integration/client/content/tools/tabs/test_split_embed.py @@ -30,7 +30,7 @@ def mock_get(endpoint=None, **kwargs): "id": "test-model", "type": "embed", "enabled": True, - "url": "http://test.url", + "api_base": "http://test.url", "max_chunk_size": 1000, } ] @@ -42,7 +42,7 @@ def mock_get(endpoint=None, **kwargs): at = app_test(self.ST_FILE) # Mock functions that make external calls to avoid failures - monkeypatch.setattr("common.functions.is_url_accessible", lambda url: (True, "")) + monkeypatch.setattr("common.functions.is_url_accessible", lambda api_base: (True, "")) monkeypatch.setattr("client.utils.st_common.is_db_configured", lambda: True) # Run the app - this is critical to initialize all widgets! @@ -90,7 +90,7 @@ def mock_get(endpoint=None, **kwargs): "id": "test-model", "type": "embed", "enabled": True, - "url": "http://test.url", + "api_base": "http://test.url", "max_chunk_size": 1000, } ] @@ -99,7 +99,7 @@ def mock_get(endpoint=None, **kwargs): monkeypatch.setattr("client.utils.api_call.get", mock_get) # Mock functions that make external calls - monkeypatch.setattr("common.functions.is_url_accessible", lambda url: (True, "")) + monkeypatch.setattr("common.functions.is_url_accessible", lambda api_base: (True, "")) monkeypatch.setattr("client.utils.st_common.is_db_configured", lambda: True) # Initialize app_test @@ -136,7 +136,7 @@ def mock_get(endpoint=None, **kwargs): "id": "test-model", "type": "embed", "enabled": True, - "url": "http://test.url", + "api_base": "http://test.url", "max_chunk_size": 1000, } ] @@ -145,7 +145,7 @@ def mock_get(endpoint=None, **kwargs): monkeypatch.setattr("client.utils.api_call.get", mock_get) # Mock functions that make external calls - monkeypatch.setattr("common.functions.is_url_accessible", lambda url: (True, "")) + monkeypatch.setattr("common.functions.is_url_accessible", lambda api_base: (True, "")) monkeypatch.setattr("client.utils.st_common.is_db_configured", lambda: True) # Initialize app_test @@ -181,7 +181,7 @@ def mock_get(endpoint=None, **kwargs): # Test successful assert True - def test_web_url_validation(self, app_server, app_test, monkeypatch): + def test_web_api_base_validation(self, app_server, app_test, monkeypatch): """Test web URL validation""" assert app_server is not None @@ -193,7 +193,7 @@ def mock_get(endpoint=None, **kwargs): "id": "test-model", "type": "embed", "enabled": True, - "url": "http://test.url", + "api_base": "http://test.url", "max_chunk_size": 1000, } ] @@ -202,7 +202,7 @@ def mock_get(endpoint=None, **kwargs): monkeypatch.setattr("client.utils.api_call.get", mock_get) # Mock functions that make external calls - monkeypatch.setattr("common.functions.is_url_accessible", lambda url: (True, "")) + monkeypatch.setattr("common.functions.is_url_accessible", lambda api_base: (True, "")) monkeypatch.setattr("client.utils.st_common.is_db_configured", lambda: True) # Initialize app_test @@ -237,7 +237,7 @@ def mock_get(endpoint=None, **kwargs): "id": "test-model", "type": "embed", "enabled": True, - "url": "http://test.url", + "api_base": "http://test.url", "max_chunk_size": 1000, } ] @@ -246,7 +246,7 @@ def mock_get(endpoint=None, **kwargs): monkeypatch.setattr("client.utils.api_call.get", mock_get) # Mock functions that make external calls - monkeypatch.setattr("common.functions.is_url_accessible", lambda url: (True, "")) + monkeypatch.setattr("common.functions.is_url_accessible", lambda api_base: (True, "")) monkeypatch.setattr("client.utils.st_common.is_db_configured", lambda: True) # Initialize app_test @@ -307,7 +307,7 @@ def mock_get_response(endpoint=None, **kwargs): "id": "test-model", "type": "embed", "enabled": True, - "url": "http://test.url", + "api_base": "http://test.url", "max_chunk_size": 1000, } ] diff --git a/tests/integration/server/test_endpoints_chat.py b/tests/integration/server/test_endpoints_chat.py index 172849d1..b322cc84 100644 --- a/tests/integration/server/test_endpoints_chat.py +++ b/tests/integration/server/test_endpoints_chat.py @@ -2,10 +2,12 @@ Copyright (c) 2024, 2025, Oracle and/or its affiliates. Licensed under the Universal Permissive License v1.0 as shown at http://oss.oracle.com/licenses/upl. """ +# pylint: disable=too-many-arguments,too-many-positional-arguments,too-few-public-methods, import-error # spell-checker: disable -# pylint: disable=import-error from unittest.mock import patch, MagicMock +import warnings + import pytest from langchain_core.messages import ChatMessage from common.schema import ChatRequest @@ -47,14 +49,19 @@ class TestEndpoints: def test_chat_completion_no_model(self, client, auth_headers): """Test no model chat completion request""" - request = ChatRequest( - messages=[ChatMessage(content="Hello", role="user")], - model="test-model", - temperature=1.0, - max_completion_tokens=256, - ) + with warnings.catch_warnings(): + # Enable the catch_warnings context + warnings.simplefilter("ignore", category=UserWarning) + request = ChatRequest( + messages=[ChatMessage(content="Hello", role="user")], + model="test-provider/test-model", + temperature=1.0, + max_completion_tokens=256, + ) + response = client.post( + "/v1/chat/completions", headers=auth_headers["valid_auth"], json=request.model_dump() + ) - response = client.post("/v1/chat/completions", headers=auth_headers["valid_auth"], json=request.model_dump()) assert response.status_code == 200 assert "choices" in response.json() assert ( @@ -75,7 +82,7 @@ def test_chat_completion_valid_mock(self, client, auth_headers): } ], "created": 1234567890, - "model": "test-model", + "model": "test-provider/test-model", "object": "chat.completion", "usage": {"prompt_tokens": 10, "completion_tokens": 20, "total_tokens": 30}, } @@ -90,7 +97,7 @@ def test_chat_completion_valid_mock(self, client, auth_headers): request = ChatRequest( messages=[ChatMessage(content="Hello", role="user")], - model="test-model", + model="test-provider/test-model", temperature=1.0, max_completion_tokens=256, ) @@ -115,7 +122,7 @@ def test_chat_stream_valid_mock(self, client, auth_headers): request = ChatRequest( messages=[ChatMessage(content="Hello", role="user")], - model="test-model", + model="test-provider/test-model", temperature=1.0, max_completion_tokens=256, streaming=True, diff --git a/tests/integration/server/test_endpoints_databases.py b/tests/integration/server/test_endpoints_databases.py index 76a4feed..3b341afe 100644 --- a/tests/integration/server/test_endpoints_databases.py +++ b/tests/integration/server/test_endpoints_databases.py @@ -2,8 +2,8 @@ Copyright (c) 2024, 2025, Oracle and/or its affiliates. Licensed under the Universal Permissive License v1.0 as shown at http://oss.oracle.com/licenses/upl. """ +# pylint: disable=too-many-arguments,too-many-positional-arguments,too-few-public-methods, import-error # spell-checker: disable -# pylint: disable=import-error import pytest from conftest import TEST_CONFIG @@ -84,7 +84,7 @@ def test_databases_update_db_down(self, client, auth_headers): } response = client.patch("/v1/databases/DEFAULT", headers=auth_headers["valid_auth"], json=payload) assert response.status_code == 503 - assert response.json() == {"detail": "Database: DEFAULT unable to connect."} + assert "cannot connect to database" in response.json().get("detail", "") test_cases = [ pytest.param( @@ -125,7 +125,7 @@ def test_databases_update_db_down(self, client, auth_headers): "DEFAULT", 503, {"user": "user", "password": "password", "dsn": "//localhost:1521/dsn"}, - {"detail": "Database: DEFAULT unable to connect."}, + {"detail": "cannot connect to database"}, id="invalid_connection", ), pytest.param( @@ -136,7 +136,7 @@ def test_databases_update_db_down(self, client, auth_headers): "password": "Wr0ng_P4sswOrd", "dsn": TEST_CONFIG["db_dsn"], }, - {"detail": "Database: DEFAULT invalid credentials."}, + {"detail": "invalid credential or not authorized"}, id="wrong_password", ), pytest.param( @@ -174,7 +174,10 @@ def test_databases_update_cases( assert response.status_code == status_code if response.status_code != 200: - assert response.json() == expected + if response.status_code == 422: + assert response.json() == expected + else: + assert expected["detail"] in response.json().get("detail", "") else: data = response.json() data.pop("config_dir", None) # Remove config_dir as it's environment-specific diff --git a/tests/integration/server/test_endpoints_embed.py b/tests/integration/server/test_endpoints_embed.py index 5ec59a7b..102972ab 100644 --- a/tests/integration/server/test_endpoints_embed.py +++ b/tests/integration/server/test_endpoints_embed.py @@ -2,8 +2,8 @@ Copyright (c) 2024, 2025, Oracle and/or its affiliates. Licensed under the Universal Permissive License v1.0 as shown at http://oss.oracle.com/licenses/upl. """ +# pylint: disable=too-many-arguments,too-many-positional-arguments,too-few-public-methods, import-error # spell-checker: disable -# pylint: disable=import-error from io import BytesIO from pathlib import Path @@ -109,13 +109,13 @@ def embed_strings(self, texts): return self.embed_documents(texts) def setup_mock_embeddings(self, mock_embedding_model): - """Create mock embeddings and get_client function""" + """Create mock embeddings and get_client_embed function""" mock_embeddings = self.MockEmbeddings(mock_embedding_model) - def mock_get_client(model_config=None, oci_config=None, giskard=False): + def mock_get_client_embed(model_config=None, oci_config=None, giskard=False): return mock_embeddings - return mock_get_client + return mock_get_client_embed def create_embed_params(self, alias): """Create embedding parameters with the given alias""" @@ -364,12 +364,12 @@ def test_split_embed_with_different_file_types(self, client, auth_headers, db_co ) # Setup mock embeddings - mock_get_client = self.setup_mock_embeddings(mock_embedding_model) + mock_get_client_embed = self.setup_mock_embeddings(mock_embedding_model) # Test data test_data = self.create_embed_params("test_mixed_files") - with patch("server.api.utils.models.get_client", side_effect=mock_get_client): + with patch("server.api.utils.models.get_client_embed", side_effect=mock_get_client_embed): # Make request to the split_embed endpoint response = client.post("/v1/embed", headers=auth_headers["valid_auth"], json=test_data) @@ -392,7 +392,7 @@ def test_vector_store_creation_and_deletion(self, client, auth_headers, db_conta self.create_test_file() # Setup mock embeddings - mock_get_client = self.setup_mock_embeddings(mock_embedding_model) + mock_get_client_embed = self.setup_mock_embeddings(mock_embedding_model) # Test data for embedding alias = "test_lifecycle" @@ -401,7 +401,7 @@ def test_vector_store_creation_and_deletion(self, client, auth_headers, db_conta # Calculate the expected vector store name expected_vector_store_name = self.get_vector_store_name(alias) - with patch("server.api.utils.models.get_client", side_effect=mock_get_client): + with patch("server.api.utils.models.get_client_embed", side_effect=mock_get_client_embed): # Step 1: Create the vector store by embedding documents response = client.post("/v1/embed", headers=auth_headers["valid_auth"], json=test_data) assert response.status_code == 200 @@ -428,12 +428,12 @@ def test_multiple_vector_stores(self, client, auth_headers, db_container, mock_e aliases = ["test_vs_1", "test_vs_2", "test_vs_3"] # Setup mock embeddings - mock_get_client = self.setup_mock_embeddings(mock_embedding_model) + mock_get_client_embed = self.setup_mock_embeddings(mock_embedding_model) # Calculate expected vector store names expected_vector_store_names = [self.get_vector_store_name(alias) for alias in aliases] - with patch("server.api.utils.models.get_client", side_effect=mock_get_client): + with patch("server.api.utils.models.get_client_embed", side_effect=mock_get_client_embed): # Create multiple vector stores with different aliases for alias in aliases: # Create a test file for each request (since previous ones were cleaned up) diff --git a/tests/integration/server/test_endpoints_health.py b/tests/integration/server/test_endpoints_health.py index 716cb69d..27658ee0 100644 --- a/tests/integration/server/test_endpoints_health.py +++ b/tests/integration/server/test_endpoints_health.py @@ -2,6 +2,7 @@ Copyright (c) 2024, 2025, Oracle and/or its affiliates. Licensed under the Universal Permissive License v1.0 as shown at http://oss.oracle.com/licenses/upl. """ +# pylint: disable=too-many-arguments,too-many-positional-arguments,too-few-public-methods # spell-checker: disable import pytest diff --git a/tests/integration/server/test_endpoints_models.py b/tests/integration/server/test_endpoints_models.py index 8bc12ade..a0a5f695 100644 --- a/tests/integration/server/test_endpoints_models.py +++ b/tests/integration/server/test_endpoints_models.py @@ -2,6 +2,7 @@ Copyright (c) 2024, 2025, Oracle and/or its affiliates. Licensed under the Universal Permissive License v1.0 as shown at http://oss.oracle.com/licenses/upl. """ +# pylint: disable=too-many-arguments,too-many-positional-arguments,too-few-public-methods # spell-checker: disable from typing import get_args @@ -29,10 +30,10 @@ class TestInvalidAuthEndpoints: [ pytest.param("/v1/models/api", "get", id="models_list_api"), pytest.param("/v1/models", "get", id="models_list"), - pytest.param("/v1/models/model_id", "get", id="models_get"), - pytest.param("/v1/models/model_id", "patch", id="models_update"), + pytest.param("/v1/models/model_provider/model_id", "get", id="models_get"), + pytest.param("/v1/models/model_provider/model_id", "patch", id="models_update"), pytest.param("/v1/models", "post", id="models_create"), - pytest.param("/v1/models/model_id", "delete", id="models_delete"), + pytest.param("/v1/models/model_provider/model_id", "delete", id="models_delete"), ], ) def test_endpoints(self, client, auth_headers, endpoint, api_method, auth_type, status_code): @@ -58,7 +59,7 @@ def test_models_get_before(self, client, auth_headers): all_models = client.get("/v1/models?include_disabled=true", headers=auth_headers["valid_auth"]) assert len(all_models.json()) > 0 for model in all_models.json(): - response = client.get(f"/v1/models/{model['id']}", headers=auth_headers["valid_auth"]) + response = client.get(f"/v1/models/{model['provider']}/{model['id']}", headers=auth_headers["valid_auth"]) assert response.status_code == 200 def test_models_delete_add(self, client, auth_headers): @@ -68,17 +69,19 @@ def test_models_delete_add(self, client, auth_headers): # Delete all models for model in all_models.json(): - response = client.delete(f"/v1/models/{model['id']}", headers=auth_headers["valid_auth"]) + response = client.delete( + f"/v1/models/{model['provider']}/{model['id']}", headers=auth_headers["valid_auth"] + ) assert response.status_code == 200 - assert response.json() == {"message": f"Model: {model['id']} deleted."} + assert response.json() == {"message": f"Model: {model['provider']}/{model['id']} deleted."} # Check that no models exists deleted_models = client.get("/v1/models?include_disabled=true", headers=auth_headers["valid_auth"]) assert len(deleted_models.json()) == 0 # Delete a non-existent model - response = client.delete("/v1/models/test_model", headers=auth_headers["valid_auth"]) + response = client.delete("/v1/models/test_provider/test_model", headers=auth_headers["valid_auth"]) assert response.status_code == 200 - assert response.json() == {"message": "Model: test_model deleted."} + assert response.json() == {"message": "Model: test_provider/test_model deleted."} # Add all models back for model in all_models.json(): @@ -97,18 +100,17 @@ def test_models_add_dupl(self, client, auth_headers): payload = model response = client.post("/v1/models", headers=auth_headers["valid_auth"], json=payload) assert response.status_code == 409 - assert response.json() == {"detail": f"Model: {model['id']} already exists."} + assert response.json() == {"detail": f"Model: {model['provider']}/{model['id']} already exists."} test_cases = [ pytest.param( { - "id": "valid_ll_model", + "id": "gpt-3.5-turbo", "enabled": True, "type": "ll", "provider": "openai", "api_key": "test-key", - "openai_compat": True, - "url": "https://api.openai.com/v1", + "api_base": "https://api.openai.com/v1", "context_length": 127072, "temperature": 1.0, "max_completion_tokens": 4096, @@ -121,6 +123,7 @@ def test_models_add_dupl(self, client, auth_headers): pytest.param( { "id": "invalid_ll_model", + "provider": "invalid_ll_model", "enabled": False, }, 422, @@ -133,9 +136,8 @@ def test_models_add_dupl(self, client, auth_headers): "enabled": False, "type": "embed", "provider": "huggingface", - "url": "http://127.0.0.1:8080", + "api_base": "http://127.0.0.1:8080", "api_key": "", - "openai_compat": True, "max_chunk_size": 512, }, 201, @@ -144,18 +146,17 @@ def test_models_add_dupl(self, client, auth_headers): ), pytest.param( { - "id": "unreachable_url_model", + "id": "unreachable_api_base_model", "enabled": True, "type": "embed", "provider": "huggingface", - "url": "http://127.0.0.1:112233", + "api_base": "http://127.0.0.1:112233", "api_key": "", - "openai_compat": True, "max_chunk_size": 512, }, 201, 422, - id="unreachable_url_model", + id="unreachable_api_base_model", ), ] @@ -165,16 +166,21 @@ def test_model_create(self, client, auth_headers, payload, add_status_code, _, r response = client.post("/v1/models", headers=auth_headers["valid_auth"], json=payload) assert response.status_code == add_status_code if add_status_code == 201: - if request.node.callspec.id == "unreachable_url_model": + if request.node.callspec.id == "unreachable_api_base_model": assert response.json()["enabled"] is False else: + print(response.json()) assert all(item in response.json().items() for item in payload.items()) # Model was added, should get 200 back - response = client.get(f"/v1/models/{payload['id']}", headers=auth_headers["valid_auth"]) + response = client.get( + f"/v1/models/{payload['provider']}/{payload['id']}", headers=auth_headers["valid_auth"] + ) assert response.status_code == 200 else: # Model wasn't added, should get a 404 back - response = client.get(f"/v1/models/{payload['id']}", headers=auth_headers["valid_auth"]) + response = client.get( + f"/v1/models/{payload['provider']}/{payload['id']}", headers=auth_headers["valid_auth"] + ) assert response.status_code == 404 @pytest.mark.parametrize("payload, add_status_code, update_status_code", test_cases) @@ -183,12 +189,16 @@ def test_model_update(self, client, auth_headers, payload, add_status_code, upda if add_status_code == 201: # Create the model when we know it will succeed _ = client.post("/v1/models", headers=auth_headers["valid_auth"], json=payload) - response = client.get(f"/v1/models/{payload['id']}", headers=auth_headers["valid_auth"]) + response = client.get( + f"/v1/models/{payload['provider']}/{payload['id']}", headers=auth_headers["valid_auth"] + ) old_enabled = response.json()["enabled"] # Switch up the enabled for the update payload["enabled"] = not old_enabled - response = client.patch(f"/v1/models/{payload['id']}", headers=auth_headers["valid_auth"], json=payload) + response = client.patch( + f"/v1/models/{payload['provider']}/{payload['id']}", headers=auth_headers["valid_auth"], json=payload + ) assert response.status_code == update_status_code if update_status_code == 200: new_enabled = response.json()["enabled"] diff --git a/tests/integration/server/test_endpoints_oci.py b/tests/integration/server/test_endpoints_oci.py index 1b3fd762..0c8f6ceb 100644 --- a/tests/integration/server/test_endpoints_oci.py +++ b/tests/integration/server/test_endpoints_oci.py @@ -2,6 +2,7 @@ Copyright (c) 2024, 2025, Oracle and/or its affiliates. Licensed under the Universal Permissive License v1.0 as shown at http://oss.oracle.com/licenses/upl. """ +# pylint: disable=too-many-arguments,too-many-positional-arguments,too-few-public-methods # spell-checker: disable from unittest.mock import patch, MagicMock diff --git a/tests/integration/server/test_endpoints_prompts.py b/tests/integration/server/test_endpoints_prompts.py index dd569ac4..f2de4fed 100644 --- a/tests/integration/server/test_endpoints_prompts.py +++ b/tests/integration/server/test_endpoints_prompts.py @@ -2,6 +2,7 @@ Copyright (c) 2024, 2025, Oracle and/or its affiliates. Licensed under the Universal Permissive License v1.0 as shown at http://oss.oracle.com/licenses/upl. """ +# pylint: disable=too-many-arguments,too-many-positional-arguments,too-few-public-methods # spell-checker: disable import pytest diff --git a/tests/integration/server/test_endpoints_settings.py b/tests/integration/server/test_endpoints_settings.py index a652c95a..3de3263c 100644 --- a/tests/integration/server/test_endpoints_settings.py +++ b/tests/integration/server/test_endpoints_settings.py @@ -2,6 +2,7 @@ Copyright (c) 2024, 2025, Oracle and/or its affiliates. Licensed under the Universal Permissive License v1.0 as shown at http://oss.oracle.com/licenses/upl. """ +# pylint: disable=too-many-arguments,too-many-positional-arguments,too-few-public-methods # spell-checker: disable import pytest diff --git a/tests/integration/server/test_endpoints_testbed.py b/tests/integration/server/test_endpoints_testbed.py index 54b72ebe..86de334f 100644 --- a/tests/integration/server/test_endpoints_testbed.py +++ b/tests/integration/server/test_endpoints_testbed.py @@ -2,8 +2,8 @@ Copyright (c) 2024, 2025, Oracle and/or its affiliates. Licensed under the Universal Permissive License v1.0 as shown at http://oss.oracle.com/licenses/upl. """ +# pylint: disable=too-many-arguments,too-many-positional-arguments,too-few-public-methods, import-error # spell-checker: disable -# pylint: disable=import-error import json import io From 1b646510b148e83cbcb206523065317cbff908b0 Mon Sep 17 00:00:00 2001 From: gotsysdba Date: Wed, 3 Sep 2025 08:40:52 +0100 Subject: [PATCH 26/28] Merged from origin/main --- src/client/content/api_server.py | 15 +- src/client/content/config/tabs/databases.py | 17 +- src/client/content/config/tabs/models.py | 96 ++++---- src/client/content/config/tabs/oci.py | 99 +++++--- src/client/content/config/tabs/settings.py | 36 +-- src/client/content/testbed.py | 30 ++- src/client/content/tools/tabs/prompt_eng.py | 6 +- src/client/content/tools/tabs/split_embed.py | 13 +- src/client/mcp/rag/README.md | 182 +++++++------- src/client/mcp/rag/images/export.png | Bin 132035 -> 78555 bytes src/client/mcp/rag/optimizer_utils/config.py | 100 +++++--- src/client/mcp/rag/optimizer_utils/rag.py | 68 ++--- .../mcp/rag/rag_base_optimizer_config.py | 18 +- .../mcp/rag/rag_base_optimizer_config_mcp.py | 15 +- src/client/spring_ai/templates/obaas.yaml | 2 +- src/client/spring_ai/templates/start.sh | 4 +- src/client/utils/api_call.py | 4 +- src/client/utils/client.py | 2 +- src/client/utils/st_common.py | 67 ++--- src/common/functions.py | 2 +- src/common/schema.py | 146 ++++++----- src/launch_client.py | 15 +- src/launch_server.py | 8 +- src/server/agents/tools/oraclevs_retriever.py | 2 +- src/server/agents/tools/selectai.py | 5 +- src/server/api/core/bootstrap.py | 3 +- src/server/api/core/databases.py | 206 ++-------------- src/server/api/core/models.py | 28 +-- src/server/api/core/oci.py | 15 +- src/server/api/core/prompts.py | 2 +- src/server/api/core/settings.py | 2 +- src/server/api/utils/README.md | 2 +- src/server/api/utils/chat.py | 148 +++++------ src/server/api/utils/databases.py | 233 ++++++++++++++++-- src/server/api/utils/embed.py | 26 +- src/server/api/utils/models.py | 170 +++++++------ src/server/api/utils/oci.py | 74 +++--- src/server/api/utils/selectai.py | 8 +- src/server/api/utils/testbed.py | 70 ++---- src/server/api/v1/chat.py | 12 +- src/server/api/v1/databases.py | 46 ++-- src/server/api/v1/embed.py | 33 ++- src/server/api/v1/models.py | 34 ++- src/server/api/v1/oci.py | 51 ++-- src/server/api/v1/probes.py | 18 +- src/server/api/v1/prompts.py | 3 +- src/server/api/v1/selectai.py | 17 +- src/server/api/v1/settings.py | 3 +- src/server/api/v1/testbed.py | 78 +++--- src/server/bootstrap/configfile.py | 2 +- src/server/bootstrap/databases.py | 30 +-- src/server/bootstrap/models.py | 157 +++++------- src/server/bootstrap/oci.py | 19 +- src/server/bootstrap/prompts.py | 2 +- src/server/bootstrap/settings.py | 2 +- src/server/patches/litellm_patch.py | 5 +- src/server/wip/settings.py | 2 +- 57 files changed, 1266 insertions(+), 1187 deletions(-) diff --git a/src/client/content/api_server.py b/src/client/content/api_server.py index 8b7d720c..76b0fe40 100644 --- a/src/client/content/api_server.py +++ b/src/client/content/api_server.py @@ -14,18 +14,15 @@ import streamlit as st from streamlit import session_state as state -import client.utils.client as client -import client.utils.api_call as api_call -import common.logging_config as logging_config +from client.utils import client, api_call +from common import logging_config logger = logging_config.logging.getLogger("client.content.api_server") try: import launch_server - - REMOTE_SERVER = False except ImportError: - REMOTE_SERVER = True + pass ##################################################### @@ -72,16 +69,16 @@ async def main() -> None: key="user_server_port", min_value=1, max_value=65535, - disabled=REMOTE_SERVER, + disabled=not state.server["control"], ) right.text_input( "API Server Key:", value=state.server["key"], key="user_server_key", type="password", - disabled=REMOTE_SERVER, + disabled=not state.server["control"], ) - if not REMOTE_SERVER: + if state.server["control"]: st.button("Restart Server", type="primary", on_click=server_restart) st.header("Server Settings", divider="red") diff --git a/src/client/content/config/tabs/databases.py b/src/client/content/config/tabs/databases.py index 2ce5ce70..6d393b0a 100644 --- a/src/client/content/config/tabs/databases.py +++ b/src/client/content/config/tabs/databases.py @@ -13,10 +13,8 @@ import streamlit as st from streamlit import session_state as state -import client.utils.api_call as api_call -import client.utils.st_common as st_common - -import common.logging_config as logging_config +from client.utils import api_call, st_common +from common import logging_config logger = logging_config.logging.getLogger("client.content.config.tabs.database") @@ -24,11 +22,16 @@ ##################################################### # Functions ##################################################### -def get_databases(force: bool = False) -> None: +def get_databases(validate: bool = False, force: bool = False) -> None: """Get Databases from API Server""" if force or "database_configs" not in state or not state.database_configs: try: logger.info("Refreshing state.database_configs") + # Validation will be done on currently configured client database + # validation includes new vector_stores, etc. + if validate: + client_database = state.client_settings.get("database", {}).get("alias", {}) + _ = api_call.get(endpoint=f"v1/databases/{client_database}") state.database_configs = api_call.get(endpoint="v1/databases") except api_call.ApiError as ex: logger.error("Unable to populate state.database_configs: %s", ex) @@ -63,7 +66,7 @@ def patch_database(name: str, supplied: dict, connected: bool) -> bool: def drop_vs(vs: dict) -> None: """Drop a Vector Storage Table""" api_call.delete(endpoint=f"v1/embed/{vs['vector_store']}") - get_databases(force=True) + get_databases(validate=True, force=True) def select_ai_profile() -> None: @@ -221,7 +224,7 @@ def display_databases() -> None: column_config={ "enabled": st.column_config.CheckboxColumn(label="Enabled", help="Toggle to enable or disable") }, - use_container_width=True, + width="stretch", hide_index=True, ) if st.button("Apply SelectAI Changes", type="secondary"): diff --git a/src/client/content/config/tabs/models.py b/src/client/content/config/tabs/models.py index 7eae9fff..24f75d3f 100644 --- a/src/client/content/config/tabs/models.py +++ b/src/client/content/config/tabs/models.py @@ -16,11 +16,8 @@ import streamlit as st from streamlit import session_state as state -import client.utils.api_call as api_call -import client.utils.st_common as st_common - -import common.help_text as help_text -import common.logging_config as logging_config +from client.utils import api_call, st_common +from common import logging_config, help_text logger = logging_config.logging.getLogger("client.content.config.tabs.models") @@ -28,7 +25,7 @@ ################################### # Functions ################################### -def clear_client_models(model_id: str) -> None: +def clear_client_models(model_provider: str, model_id: str) -> None: """Clear selected models from client settings if modified""" model_keys = [ ("ll_model", "model"), @@ -37,7 +34,7 @@ def clear_client_models(model_id: str) -> None: ("testbed", "qa_embed_model"), ] for section, key in model_keys: - if state.client_settings[section][key] == model_id: + if state.client_settings[section][key] == f"{model_provider}/{model_id}": state.client_settings[section][key] = None @@ -61,35 +58,37 @@ def get_model_providers() -> list: def create_model(model: dict) -> None: """Add either Language Model or Embed Model""" - _ = api_call.post(endpoint="v1/models", params={"id": model["id"]}, payload={"json": model}) - st.success(f"Model created: {model['id']}") + _ = api_call.post(endpoint="v1/models", payload={"json": model}) + st.success(f"Model created: {model['provider']}/{model['id']}") def patch_model(model: dict) -> None: """Update Model Configuration for either Language Models or Embed Models""" - _ = api_call.patch(endpoint=f"v1/models/{model['id']}", payload={"json": model}) + _ = api_call.patch(endpoint=f"v1/models/{model['provider']}/{model['id']}", payload={"json": model}) st.success(f"Model updated: {model['id']}") # If updated model is the set model and not enabled: unset the user settings if not model["enabled"]: - clear_client_models(model["id"]) + clear_client_models(model["provider"], model["id"]) -def delete_model(model_id: str) -> None: +def delete_model(model_provider: str, model_id: str) -> None: """Update Model Configuration for either Language Models or Embed Models""" - api_call.delete(endpoint=f"v1/models/{model_id}") - st.success(f"Model deleted: {model_id}") + api_call.delete(endpoint=f"v1/models/{model_provider}/{model_id}") + st.success(f"Model deleted: {model_provider}/{model_id}") sleep(1) # If deleted model is the set model; unset the user settings - clear_client_models(model_id) + clear_client_models(model_provider, model_id) @st.dialog("Model Configuration", width="large") -def edit_model(model_type: str, action: Literal["add", "edit"], model_id: str = None) -> None: +def edit_model( + model_type: str, action: Literal["add", "edit"], model_id: str = None, model_provider: str = None +) -> None: """Model Edit Dialog Box""" # Initialize our model request if action == "edit": model_id = urllib.parse.quote(model_id, safe="") - model = api_call.get(endpoint=f"v1/models/{model_id}") + model = api_call.get(endpoint=f"v1/models/{model_provider}/{model_id}") else: model = {"id": "unset", "type": model_type, "provider": "unset", "status": "CUSTOM"} with st.form("edit_model"): @@ -116,11 +115,11 @@ def edit_model(model_type: str, action: Literal["add", "edit"], model_id: str = key="add_model_provider", disabled=action == "edit", ) - model["url"] = st.text_input( + model["api_base"] = st.text_input( "Provider URL:", help=help_text.help_dict["model_url"], key="add_model_url", - value=model.get("url", ""), + value=model.get("api_base", ""), disabled=disable_for_oci, ) model["api_key"] = st.text_input( @@ -174,20 +173,14 @@ def edit_model(model_type: str, action: Literal["add", "edit"], model_id: str = button_col_format = st.columns([1.2, 1.4, 6, 1.4]) action_button, delete_button, _, cancel_button = button_col_format try: - if action == "add" and action_button.form_submit_button( - label="Add", type="primary", use_container_width=True - ): + if action == "add" and action_button.form_submit_button(label="Add", type="primary", width="stretch"): create_model(model=model) submit = True - if action == "edit" and action_button.form_submit_button( - label="Save", type="primary", use_container_width=True - ): + if action == "edit" and action_button.form_submit_button(label="Save", type="primary", width="stretch"): patch_model(model=model) submit = True - if action != "add" and delete_button.form_submit_button( - label="Delete", type="secondary", use_container_width=True - ): - delete_model(model_id=model["id"]) + if action != "add" and delete_button.form_submit_button(label="Delete", type="secondary", width="stretch"): + delete_model(model_provider=model["provider"], model_id=model["id"]) submit = True if submit: sleep(1) @@ -202,48 +195,47 @@ def edit_model(model_type: str, action: Literal["add", "edit"], model_id: str = def render_model_rows(model_type: str) -> None: """Render rows of the models""" - data_col_widths = [0.07, 0.23, 0.2, 0.28, 0.12] + data_col_widths = [0.08, 0.42, 0.28, 0.12] table_col_format = st.columns(data_col_widths, vertical_alignment="center") - col1, col2, col3, col4, col5 = table_col_format + col1, col2, col3, col4 = table_col_format col1.markdown("​", help="Active", unsafe_allow_html=True) - col2.markdown("**Model ID**", unsafe_allow_html=True) - col3.markdown("**Provider**", unsafe_allow_html=True) - col4.markdown("**Provider URL**", unsafe_allow_html=True) - col5.markdown("​") + col2.markdown("**Model**", unsafe_allow_html=True) + col3.markdown("**Provider URL**", unsafe_allow_html=True) + col4.markdown("​") for model in [m for m in state.model_configs if m.get("type") == model_type]: model_id = model["id"] + model_provider = model["provider"] col1.text_input( "Enabled", value=st_common.bool_to_emoji(model["enabled"]), - key=f"{model_type}_{model_id}_enabled", + key=f"{model_type}_{model_provider}_{model_id}_enabled", label_visibility="collapsed", disabled=True, ) col2.text_input( "Model", - value=model_id, + value=f"{model_provider}/{model_id}", + key=f"{model_type}_{model_provider}_{model_id}", label_visibility="collapsed", disabled=True, ) col3.text_input( - "Provider", - value=model["provider"], - key=f"{model_type}_{model_id}_provider", - label_visibility="collapsed", - disabled=True, - ) - col4.text_input( "Server", - value=model["url"], - key=f"{model_type}_{model_id}_server", + value=model["api_base"], + key=f"{model_type}_{model_provider}_{model_id}_api_base", label_visibility="collapsed", disabled=True, ) - col5.button( + col4.button( "Edit", on_click=edit_model, - key=f"{model_type}_{model_id}_edit", - kwargs=dict(model_type=model_type, action="edit", model_id=model_id), + key=f"{model_type}_{model_provider}_{model_id}_edit", + kwargs={ + "model_type": model_type, + "action": "edit", + "model_id": model_id, + "model_provider": model_provider, + }, ) if st.button(label="Add", type="primary", key=f"add_{model_type}_model"): @@ -255,7 +247,7 @@ def render_model_rows(model_type: str) -> None: ############################################################################# def display_models() -> None: """Streamlit GUI""" - st.header("Models", divider="red") + st.title("Models") st.write("Update, Add, or Delete model configuration parameters.") try: get_models() @@ -263,11 +255,11 @@ def display_models() -> None: st.stop() st.divider() - st.subheader("Language Models") + st.header("Language Models") render_model_rows("ll") st.divider() - st.subheader("Embedding Models") + st.header("Embedding Models") render_model_rows("embed") diff --git a/src/client/content/config/tabs/oci.py b/src/client/content/config/tabs/oci.py index ed2203ac..20db63c2 100644 --- a/src/client/content/config/tabs/oci.py +++ b/src/client/content/config/tabs/oci.py @@ -12,10 +12,8 @@ import streamlit as st from streamlit import session_state as state -import client.utils.api_call as api_call -import client.utils.st_common as st_common - -import common.logging_config as logging_config +from client.utils import api_call, st_common +from common import logging_config logger = logging_config.logging.getLogger("client.content.config.tabs.oci") @@ -57,7 +55,14 @@ def patch_oci(auth_profile: str, supplied: dict, namespace: str, toast: bool = T if differences or not namespace: rerun = True try: - if supplied["security_token_file"]: + if ( + supplied.get("authentication") + not in ( + "instance_principal", + "oke_workload_identity", + ) + and supplied["security_token_file"] + ): supplied["authentication"] = "security_token" with st.spinner(text="Updating OCI Profile...", show_time=True): @@ -87,7 +92,21 @@ def display_oci() -> None: st.stop() st.subheader("Configuration") + # Store supplied values in dictionary + supplied = {} + + disable_config = False oci_lookup = st_common.state_configs_lookup("oci_configs", "auth_profile") + # Handle instance_principal and oke_workload_identity + oci_auth = state.oci_configs[0].get("authentication") + if len(oci_lookup) == 1 and oci_auth in ( + "instance_principal", + "oke_workload_identity", + ): + st.info("Using OCI Authentication Principals", icon="ℹ️") + supplied["authentication"] = oci_auth + supplied["tenancy"] = state.oci_configs[0]["tenancy"] + disable_config = True if len(oci_lookup) > 0: selected_oci_auth_profile = st.selectbox( "Profile:", @@ -95,52 +114,49 @@ def display_oci() -> None: index=list(oci_lookup.keys()).index(state.client_settings["oci"]["auth_profile"]), key="selected_oci", on_change=st_common.update_client_settings("oci"), + disabled=disable_config, ) else: selected_oci_auth_profile = "DEFAULT" - token_auth = st.checkbox( - "Use token authentication?", - key="oci_token_auth", - value=False, - ) + token_auth = st.checkbox("Use token authentication?", key="oci_token_auth", value=False, disabled=disable_config) namespace = oci_lookup[selected_oci_auth_profile]["namespace"] - # Store supplied values in dictionary - supplied = {} with st.container(border=True): - supplied["user"] = st.text_input( - "User OCID:", - value=oci_lookup[selected_oci_auth_profile]["user"], - disabled=token_auth, - key="oci_user", - ) - supplied["security_token_file"] = st.text_input( - "Security Token File:", - value=oci_lookup[selected_oci_auth_profile]["security_token_file"], - disabled=not token_auth, - key="oci_security_token_file", - ) - supplied["fingerprint"] = st.text_input( - "Fingerprint:", - value=oci_lookup[selected_oci_auth_profile]["fingerprint"], - key="oci_fingerprint", - ) - supplied["tenancy"] = st.text_input( - "Tenancy OCID:", - value=oci_lookup[selected_oci_auth_profile]["tenancy"], - key="oci_tenancy", - ) + if not disable_config: + supplied["user"] = st.text_input( + "User OCID:", + value=oci_lookup[selected_oci_auth_profile]["user"], + disabled=token_auth, + key="oci_user", + ) + supplied["security_token_file"] = st.text_input( + "Security Token File:", + value=oci_lookup[selected_oci_auth_profile]["security_token_file"], + disabled=not token_auth, + key="oci_security_token_file", + ) + supplied["key_file"] = st.text_input( + "Key File:", + value=oci_lookup[selected_oci_auth_profile]["key_file"], + key="oci_key_file", + ) + supplied["fingerprint"] = st.text_input( + "Fingerprint:", + value=oci_lookup[selected_oci_auth_profile]["fingerprint"], + key="oci_fingerprint", + ) + supplied["tenancy"] = st.text_input( + "Tenancy OCID:", + value=oci_lookup[selected_oci_auth_profile]["tenancy"], + key="oci_tenancy", + ) supplied["region"] = st.text_input( "Region:", value=oci_lookup[selected_oci_auth_profile]["region"], help="Region of Source Bucket", key="oci_region", ) - supplied["key_file"] = st.text_input( - "Key File:", - value=oci_lookup[selected_oci_auth_profile]["key_file"], - key="oci_key_file", - ) + if namespace: st.success(f"Current Status: Validated - Namespace: {namespace}") else: @@ -150,8 +166,9 @@ def display_oci() -> None: if st.button("Save Configuration", key="save_oci"): # Modify based on token usage - supplied["security_token_file"] = None if not token_auth else supplied["security_token_file"] - supplied["user"] = None if token_auth else supplied["user"] + if not disable_config: + supplied["security_token_file"] = None if not token_auth else supplied["security_token_file"] + supplied["user"] = None if token_auth else supplied["user"] if patch_oci(selected_oci_auth_profile, supplied, namespace): st.rerun() diff --git a/src/client/content/config/tabs/settings.py b/src/client/content/config/tabs/settings.py index 3924c4d3..61aac2ae 100644 --- a/src/client/content/config/tabs/settings.py +++ b/src/client/content/config/tabs/settings.py @@ -23,10 +23,9 @@ from streamlit import session_state as state # Utilities -import client.utils.api_call as api_call -import client.utils.st_common as st_common +from client.utils import api_call, st_common -import common.logging_config as logging_config +from common import logging_config logger = logging_config.logging.getLogger("client.content.config.tabs.settings") @@ -60,8 +59,7 @@ def get_settings(include_sensitive: bool = False): }, ) return settings - else: - raise + raise def save_settings(settings): @@ -161,7 +159,9 @@ def spring_ai_conf_check(ll_model: dict, embed_model: dict) -> str: ll_provider = ll_model.get("provider", "") embed_provider = embed_model.get("provider", "") - + logger.info("llm chat: %s - embeddings: %s", ll_provider, embed_provider) + if all("hosted_vllm" in p for p in (ll_provider, embed_provider)): + return "hosted_vllm" if all("openai" in p for p in (ll_provider, embed_provider)): return "openai" if all("ollama" in p for p in (ll_provider, embed_provider)): @@ -330,12 +330,13 @@ def display_settings(): st.header("Source Code Templates", divider="red") # Merge the User Settings into the Model Config - model_lookup = st_common.state_configs_lookup("model_configs", "id") try: + model_lookup = st_common.enabled_models_lookup(model_type="ll") ll_config = model_lookup[state.client_settings["ll_model"]["model"]] | state.client_settings["ll_model"] except KeyError: ll_config = {} try: + model_lookup = st_common.enabled_models_lookup(model_type="embed") embed_config = ( model_lookup[state.client_settings["vector_search"]["model"]] | state.client_settings["vector_search"] ) @@ -343,24 +344,18 @@ def display_settings(): embed_config = {} spring_ai_conf = spring_ai_conf_check(ll_config, embed_config) + logger.info("config found: %s", spring_ai_conf) + if spring_ai_conf == "hybrid": st.markdown(f""" The current configuration combination of embedding and language models - is currently **not supported** for SpringAI. + is currently **not supported** for Spring AI and LangChain MCP templates. - Language Model: **{ll_config.get("model", "Unset")}** - Embedding Model: **{embed_config.get("model", "Unset")}** """) else: col_left, col_centre, _ = st.columns([3, 4, 3]) with col_left: - st.download_button( - label="Download SpringAI", - data=spring_ai_zip(spring_ai_conf, ll_config, embed_config), # Generate zip on the fly - file_name="spring_ai.zip", # Zip file name - mime="application/zip", # Mime type for zip file - disabled=spring_ai_conf == "hybrid", - ) - with col_centre: st.download_button( label="Download LangchainMCP", data=langchain_mcp_zip(settings), # Generate zip on the fly @@ -368,6 +363,15 @@ def display_settings(): mime="application/zip", # Mime type for zip file disabled=spring_ai_conf == "hybrid", ) + with col_centre: + if spring_ai_conf != "hosted_vllm": + st.download_button( + label="Download SpringAI", + data=spring_ai_zip(spring_ai_conf, ll_config, embed_config), # Generate zip on the fly + file_name="spring_ai.zip", # Zip file name + mime="application/zip", # Mime type for zip file + disabled=spring_ai_conf == "hybrid", + ) if __name__ == "__main__": diff --git a/src/client/content/testbed.py b/src/client/content/testbed.py index 8e0c5d66..b8b0cc28 100644 --- a/src/client/content/testbed.py +++ b/src/client/content/testbed.py @@ -17,10 +17,9 @@ from client.content.config.tabs.models import get_models -import client.utils.st_common as st_common -import client.utils.api_call as api_call +from client.utils import st_common, api_call -import common.logging_config as logging_config +from common import logging_config logger = logging_config.logging.getLogger("client.content.testbed") @@ -195,14 +194,14 @@ def qa_update_gui(qa_testset: list) -> None: prev_col.button( "← Previous", disabled=prev_disabled, - use_container_width=True, + width="stretch", on_click=update_record, kwargs={"direction": -1}, ) next_col.button( "Next →", disabled=next_disabled, - use_container_width=True, + width="stretch", on_click=update_record, kwargs={"direction": 1}, ) @@ -210,7 +209,7 @@ def qa_update_gui(qa_testset: list) -> None: "⚠ Delete Q&A", type="tertiary", disabled=delete_disabled, - use_container_width=True, + width="stretch", on_click=delete_record, ) st.text_area( @@ -248,7 +247,7 @@ def main() -> None: # If there is no eligible (OpenAI Compat.) LL Models; then disable ALL functionality ll_models_enabled = st_common.enabled_models_lookup("ll") - available_ll_models = [key for key, value in ll_models_enabled.items() if value.get("openai_compat")] + available_ll_models = [key for key, value in ll_models_enabled.items()] if not available_ll_models: st.error( "No OpenAI compatible language models are configured and/or enabled. Disabling Testing Framework.", @@ -261,7 +260,12 @@ def main() -> None: # If there is no eligible (OpenAI Compat.) Embedding Model; disable Generate Test Set gen_testset_disabled = False embed_models_enabled = st_common.enabled_models_lookup("embed") - available_embed_models = [key for key, value in embed_models_enabled.items() if value.get("openai_compat")] + # Remove oci/cohere* models as not supported by LiteLLM + available_embed_models = [ + key + for key, value in embed_models_enabled.items() + if not (value.get("provider") == "oci" and "cohere" in value.get("id", "")) + ] if not available_embed_models: st.warning( "No OpenAI compatible embedding models are configured and/or enabled. Disabling Test Set Generation.", @@ -404,7 +408,7 @@ def main() -> None: state.running = True # Load TestSets (and Evaluations if from DB) - if col_left.button(button_text, key="load_tests", use_container_width=True, disabled=state.running): + if col_left.button(button_text, key="load_tests", width="stretch", disabled=state.running): with st.spinner("Processing Q&A... please be patient.", show_time=True): if testset_source != "Database": api_params["name"] = (state.testbed["testset_name"],) @@ -454,7 +458,7 @@ def main() -> None: "Reset", key="reset_test_framework", type="primary", - use_container_width=True, + width="stretch", on_click=reset_testset, kwargs={"cache": True}, ) @@ -462,7 +466,7 @@ def main() -> None: "⚠ Delete Test Set", key="delete_test_set", type="tertiary", - use_container_width=True, + width="stretch", disabled=not state.testbed["testset_id"], on_click=qa_delete, ) @@ -515,7 +519,7 @@ def main() -> None: view.button( "View", type="primary", - use_container_width=True, + width="stretch", on_click=evaluation_report, kwargs={"eid": evaluation_eid}, disabled=evaluation_eid is None, @@ -528,7 +532,7 @@ def main() -> None: st_common.selectai_sidebar() st_common.vector_search_sidebar() st.write("Choose a model to judge the correctness of the chatbot answer, then start evaluation.") - col_left, col_center, _ = st.columns([3, 3, 4]) + col_left, col_center, _ = st.columns([4, 3, 3]) if state.client_settings["testbed"].get("judge_model") is None: state.client_settings["testbed"]["judge_model"] = available_ll_models[0] selected_judge = state.client_settings["testbed"]["judge_model"] diff --git a/src/client/content/tools/tabs/prompt_eng.py b/src/client/content/tools/tabs/prompt_eng.py index 67cb9afb..866cc3d1 100644 --- a/src/client/content/tools/tabs/prompt_eng.py +++ b/src/client/content/tools/tabs/prompt_eng.py @@ -12,10 +12,8 @@ import streamlit as st from streamlit import session_state as state -import client.utils.st_common as st_common -import client.utils.api_call as api_call - -import common.logging_config as logging_config +from client.utils import st_common, api_call +from common import logging_config logger = logging_config.logging.getLogger("client.tools.tabs.prompt_eng") diff --git a/src/client/content/tools/tabs/split_embed.py b/src/client/content/tools/tabs/split_embed.py index cfa9a47e..d984841b 100644 --- a/src/client/content/tools/tabs/split_embed.py +++ b/src/client/content/tools/tabs/split_embed.py @@ -14,17 +14,14 @@ import streamlit as st from streamlit import session_state as state -import client.utils.api_call as api_call -import client.utils.st_common as st_common +from client.utils import api_call, st_common from client.content.config.tabs.databases import get_databases from client.content.config.tabs.models import get_models from client.content.config.tabs.oci import get_oci from common.schema import DistanceMetrics, IndexTypes, DatabaseVectorStorage -import common.functions as functions -import common.help_text as help_text -import common.logging_config as logging_config +from common import logging_config, help_text, functions logger = logging_config.logging.getLogger("client.tools.tabs.split_embed") @@ -75,7 +72,7 @@ def files_data_editor(files, key): return st.data_editor( files, key=key, - use_container_width=True, + width="stretch", column_config={ "to process": st.column_config.CheckboxColumn( "in", @@ -154,7 +151,7 @@ def display_split_embed() -> None: index=0, key="selected_embed_model", ) - embed_url = embed_models_enabled[embed_request.model]["url"] + embed_url = embed_models_enabled[embed_request.model]["api_base"] st.write(f"Embedding Server: {embed_url}") is_embed_accessible, embed_err_msg = functions.is_url_accessible(embed_url) if not is_embed_accessible: @@ -394,7 +391,7 @@ def display_split_embed() -> None: ) st.success(f"Vector Store Populated: {response['message']}", icon="✅") # Refresh database_configs state to reflect new vector stores - get_databases(force="True") + get_databases(validate=True, force=True) except api_call.ApiError as ex: st.error(ex, icon="🚨") diff --git a/src/client/mcp/rag/README.md b/src/client/mcp/rag/README.md index be28b8e8..561cebed 100644 --- a/src/client/mcp/rag/README.md +++ b/src/client/mcp/rag/README.md @@ -16,7 +16,7 @@ You need: - Claude Desktop free ## Setup -With **[`uv`](https://docs.astral.sh/uv/getting-started/installation/)** installed, run the following commands in your current project directory `/src/client/mcp/rag/`: +With **[`uv`](https://docs.astral.sh/uv/getting-started/installation/)** installed, run the following commands in your current project directory ``: ```bash uv init --python=3.11 --no-workspace @@ -26,61 +26,52 @@ uv add mcp langchain-core==0.3.52 oracledb~=3.1 langchain-community==0.3.21 lang ``` ## Export config -In the **AI Optimizer & Toolkit** web interface, after tested a configuration, in `Settings/Client Settings`: +In the **AI Optimizer & Toolkit** web interface, after tested a configuration, in `Configuration/Settings/Client Settings`: ![Client Settings](./images/export.png) -* select the checkbox `Include Sensitive Settings` -* press button `Download Settings` to download configuration in the project directory: `src/client/mcp/rag` as `optimizer_settings.json`. -* in `/src/client/mcp/rag/rag_base_optimizer_config_mcp.py` change filepath with the absolute path of your `optimizer_settings.json` file. +* select the checkbox `Include Sensitive Settings`. +* press button `Download LangchainMCP` to download an VectorSearch MCP Agent built on current configuration. +* unzip the file in a `` dir. ## Standalone client -There is a client that you can run without MCP via commandline to test it: +There is a client that you can run without MCP via command-line to test it: ```bash uv run rag_base_optimizer_config.py "[YOUR_QUESTION]" ``` +In `rag_base_optimizer_config_mcp.py`: -## Quick test via MCP "inspector" +## Claude Desktop setup -* Run the inspector: +Claude Desktop, in free version, not allows to connect remote server. You can overcome, for testing purpose only, with a proxy library called `mcp-remote`. These are the options. +If you have already installed Node.js v20.17.0+, it should work. -```bash -npx @modelcontextprotocol/inspector uv run rag_base_optimizer_config_mcp.py -``` +* In **Claude Desktop** application, in `Settings/Developer/Edit Config`, get the `claude_desktop_config.json` to -* connect to the port `http://localhost:6274/` with your browser on the link printed, like in the following example: -```bash -.. -Open inspector with token pre-filled: -http://localhost:6274/?MCP_PROXY_AUTH_TOKEN=cb2ef7521aaf2050ad9620bfb5e5df42dc958889e6e99ce4e9b18003eb93fffd -.. -``` - -* setup the `Inspector Proxy Address` with `http://127.0.0.1:6277` -* test the tool developed. + * Set **remote sse** execution: + add the references to the local MCP server for RAG in the ``: + ```json + { + "mcpServers": { + ... + , + "rag":{ + "command": "npx", + "args": [ + "mcp-remote", + "http://127.0.0.1:9090/sse" + ] + } + } + } + ``` -## Claude Desktop setup -* In **Claude Desktop** application, in `Settings/Developer/Edit Config`, get the `claude_desktop_config.json` to add the references to the local MCP server for RAG in the `/src/client/mcp/rag/`: -```json -{ - "mcpServers": { - ... - , - "rag":{ - "command":"bash", - "args":[ - "-c", - "source /src/client/mcp/rag/.venv/bin/activate && uv run /src/client/mcp/rag/rag_base_optimizer_config_mcp.py" - ] - } - } -} -``` * In **Claude Desktop** application, in `Settings/General/Claude Settings/Configure`, under `Profile` tab, update fields like: + - `Full Name` - `What should we call you` @@ -94,8 +85,10 @@ Show the rag_tool message as-is, without modification. ``` This will impose the usage of `rag_tool` in any case. -**NOTICE**: If you prefer, in this agent dashboard or any other, you could setup a message in the conversation with the same content of `Instruction` to enforce the LLM to use the rag tool as well. - +* Start MCP server in another shell in with: +```bash +uv run rag_base_optimizer_config_mcp.py +``` * Restart **Claude Desktop**. * You will see two warnings on rag_tool configuration: they will disappear and will not cause any issue in activating the tool. @@ -106,38 +99,45 @@ This will impose the usage of `rag_tool` in any case. If the question is related to the knowledge base content stored in the vector store, you will have an answer based on that information. Otherwise, it will try to answer considering information on which has been trained the LLM o other tools configured in the same Claude Desktop. +* **Optional**: for a **local stdio** execution, without launching the MCP Server: + + * Add the references to the local MCP server for RAG in the ``: + ```json + { + "mcpServers": { + ... + , + "rag":{ + "command":"bash", + "args":[ + "-c", + "source /.venv/bin/activate && uv run /rag_base_optimizer_config_mcp.py" + ] + } + } + } + ``` + * Set `Local` with `Remote client` line in `/rag_base_optimizer_config_mcp.py`: -## Make a remote MCP server the RAG Tool - -In `rag_base_optimizer_config_mcp.py`: - -* Update the absolute path of your `optimizer_settings.json`. Example: - -```python -rag.set_optimizer_settings_path("/Users/cdebari/Documents/GitHub/ai-optimizer-mcp-export/src/client/mcp/rag/optimizer_settings.json") -``` + ```python + #mcp = FastMCP("rag", port=9090) #Remote client + mcp = FastMCP("rag") #Local + ``` -* Substitute `Local` with `Remote client` line: + * Substitute `stdio` with `sse` line of code: + ```python + mcp.run(transport='stdio') + #mcp.run(transport='sse') + ``` -```python -#mcp = FastMCP("rag", port=9001) #Remote client -mcp = FastMCP("rag") #Local -``` -* Substitute `stdio` with `sse` line of code: -```python -mcp.run(transport='stdio') -#mcp.run(transport='sse') -``` +## Alternative way for a quick test: MCP "inspector" -* Start MCP server in another shell with: +* Start MCP server in another shell in with: ```bash uv run rag_base_optimizer_config_mcp.py ``` - -## Quick test - * Run the inspector: ```bash @@ -148,31 +148,47 @@ npx @modelcontextprotocol/inspector * set the Transport Type to `SSE` -* set the `URL` to `http://localhost:9001/sse` +* set the `URL` to `http://localhost:9090/sse` * test the tool developed. -## Claude Desktop setup for remote/local server -Claude Desktop, in free version, not allows to connect remote server. You can overcome, for testing purpose only, with a proxy library called `mcp-remote`. These are the options. -If you have already installed Node.js v20.17.0+, it should work: +**Optional:** run with local **stdio** protocol +* Set as shown before the protolo to run locally in `/rag_base_optimizer_config_mcp.py`: -* replace `rag` mcpServer, setting in `claude_desktop_config.json`: -```json -{ - "mcpServers": { - "remote": { - "command": "npx", - "args": [ - "mcp-remote", - "http://127.0.0.1:9001/sse" - ] - } - } -} + ``` + * Set `Local` with `Remote client` line: + + ```python + #mcp = FastMCP("rag", port=9090) #Remote client + mcp = FastMCP("rag") #Local + ``` + + * Substitute `stdio` with `sse` line of code: + ```python + mcp.run(transport='stdio') + #mcp.run(transport='sse') + ``` + +* Run the inspector: + +```bash +npx @modelcontextprotocol/inspector uv run rag_base_optimizer_config_mcp.py +``` + +* connect to the port `http://localhost:6274/` with your browser on the link printed, like in the following example: +```bash +.. +Open inspector with token pre-filled: +http://localhost:6274/?MCP_PROXY_AUTH_TOKEN=cb2ef7521aaf2050ad9620bfb5e5df42dc958889e6e99ce4e9b18003eb93fffd +.. ``` -* restart Claude Desktop. + +* setup the `Inspector Proxy Address` with `http://127.0.0.1:6277` +* test the tool developed. + + **NOTICE**: If you have any problem running, check the logs if it's related to an old npx/nodejs version used with mcp-remote library. Check with: ```bash @@ -180,6 +196,4 @@ nvm -list ``` if you have any other versions available than the default. It could happen that Claude Desktop uses the older one. Try to remove any other nvm versions available to force the use the only one avalable, at minimum v20.17.0+. -* restart and test as remote server - - +* restart and test as remote server \ No newline at end of file diff --git a/src/client/mcp/rag/images/export.png b/src/client/mcp/rag/images/export.png index 229efce2cf581b59195db647adbafa011b6ce4b7..0b460999dd366df46ecf689cea4e491ab0fa3923 100644 GIT binary patch literal 78555 zcmZ^K1y~%-wk;6c!=OP!aCaRbK>`VG!6CTo;2zxFVUU3Yg1fuBySqEVC2#(7?tAxs z?{J~{x~S^v>aMD;z1Ld1LKWmBF;L&3!oa{_e324Yf`Nf=g@J+HM@EF^NMg`@f`LIT zGZzz6_#!4oq2OR^Vs2#&10xlhn2e+lzku(1_BWpw4xSP`o=yRVQ4%%FkwzkZkAjWN zP6dyr_*O%^qlSV~R-;4pEy5gJzh=dE{kO+npTs3}2+(*`RsqBhkZLn9Sjwugb6#W8zN{B)SZ7!N67jk`z|l4;jJbrY5_T z!kvBCJdy@f$kBVIG(O3+*S7rec>E2^DhTp79PeHAa%3nsS)N6rw|u03NeR~k6G67Q z%)e6^sK#=7WK__pcWiE#pVQ-C(r(i7$&wLr_5g_l?Zb>AbfdUe4XqtvKuCEEz1-Ue zMuUKbjD33i{wK&U^+r0S3fr)Rku-x+$3{fA&k>zjH0yMid%q<5WbLEacw`i{KC7HN zqHAp=4}QRFLGFeXo_c&>9DT2RqjM%2Wnk^B>6pDqQa^Q#)?Iv%QRymS?fIi@%~2+T zQaA1-cV3z@OraZ192T{WiS}y+4{<}rl;sCp)09z+uoMGqIEe2XJ9wsb^bB9-j|_wO zOao-L0WDIMDhx8RLnh@hb2Q)Wx`FAb=pI@gv@pGEr`t2G9v+*rqK;41@}fxXGX!p| z*Hf)V%rKgZ6ac9XgBYZ5KPVO8c>I{NQK$Xf%aM+ekUlZ;AZVIx33rNsyv!D)QD!3JCgJY{3k;e7Rt%T|#>AIJfkajU>3imHxd z7QjB>37~?5SVUXKNejr|`K^A;(#Mqvn)$9XkDH0WsgF;BF4=Xpp~8nY8i=-WZHJ(Z zbmp(MK6P$XaDiF@Ya+_vuR$y0(o57Gi=EuTx&%oTGX`gd z4Te;PBbA#%Z56j)VanX9*UCFZ_=U4YnTn*@IcA@^cp@dWWS71ON)oGd6{h~SpProg zI(<{*RD@AfR47>3SIDYZ_D!U8RZ?<7_JE`@r1gh)jCcHtThJ-`EP6*o4QHB(Hu&O` zm{ymTShiyEM6tctRuzrdlsu;xDBh%K_YHG(($86vk{->ano;wX>2FicbGzmQmgLi| zMQ@JO55*6erhd;IOcG7ZOks>C?teSXobdWBId?THJ;yjDn8TYVKXp4zaF~3MHSII1 zU|LQ{`59r0$#DK-AI11boAR#grSH?@!)(3f?d1#SKi*D+m?s#hm`v+)SryxoSY((k zn|aO!eDCD?!nt5EX@1!;JJE0XLOi^|j zq)S9oTOAmsnrK+N^`O=|)Z%DbR~>>wQx61`fmJrU%$AkhLjrw`6OC&@HUIU>89xqA2?4>zrX})^f&5 z^MX~?g5QFmDV8gtyPc4;>mN4`U_8+0=J<-^eA9i#P3V&6a@XVR z(tEJ-`_HPI3f=qp2PvZl}MU3~)^(*ot4^$755?d787|$Lb$+_*4>Fm}o z*`UY(wDLcF3eXV$)@=(a2T|^-j6qEYK`OxlkwmyAVGZx0SGD|~pnVVaUG~eAD0PeD z=J}v`Ec;gbpY~oCRh!9udUE4mv1zKQH*pLBF4R%O*Xd2hr|ZCcH;F5oQQq`6qn928 zO*C<|DAqidZ5Cicz?U%T#q>Xc#G@r!QBnyh)(At||7Vj?j8$tfvla$XT;ulcCHeLwRTZ-RY#fvxw>wSFNzI*Dc~6#uBdqZy4|D@^+26{O_;kHV5sy<0H(*5tW;jE?W3D z4eF=rx=o4os;xC;lzEVn$$AIFo#evmLgE?WnGtRQZc9sHt1a86#uX0_6u@$*YAF*wUCL$>kF7@{8LfH)M1Q^nSx}X+-5k>v5!|!7ZCD2pj6P5! zKDBAsbog1)SCil3;(Qibq^l;cch%zjBCz+cL#)s1Zri)u>}9!H(z^7tG4VpGcPQDSXnn8ho1BXlwp^_%4c^%#+zm=*DD4YoxAejlcDX zFqF{kPWbA1KX@}bjr`Wr!29rt zWNTo|;%aU8PZb!Ts{k}wroszd)1qBL`Kv=d-ub#ZZFap7dKbueXR=jZ2V zW#eGw;9!Pc!3=V}(29=2|2-`KRr!Aw{{I#K)uj4= zH_6Vy&hn z`MoLPQ}M~SmzTP6f2BFuh0^Am=pb|uh%fG2arby#q@aVAvADHjjy61FtpA`w{=>X|LOKm-fA@PZIFrA74KfQDJ>_beJ)XNkvtiRJ9i3Z{EXE zQoMH9Vd8{Dgoula2Q0aXdcr-m#wdvQQF+@ac>bAi0A9@J78Dn^@3R})@2JjEvan$0 zJt+a^;a6XU2Rtd=UY4oEG#Wo zNHY4Gu&|=r*E{wMm+w&aGbc!2%FA@uT|GKHrk10Nof6 z(Mil6i#l0-UO2b+_PGBTdeoww&YhQgXMCCcTRBu#&eL9Z)-)&da`MJU+{lQk^KdT7 zSak32)2)w2y5{m@Sv;DI@tIz$Fe?e(!NA%yVwwSb}-CpCh$w{TB2er2L zjpDAm-MUlB=*$+9Bl%jZ-nN%}tb~LFc-Gq#4_iIY@lu^iEiKK*o8CsmvQc*gY5cae z@>@dzBvr@bc@&pbC#+4tF`7-*b&fyMMbp~L8vB*UmD^K%yfT}2r#Lz}J5EJK1xA+i z_;|@C&wG|a)nc)5Y}mK||M6q?b9OU?1P>37j*iJrnscLQRQv# z@IyHJ!Js563IY6zi;oA7ch!ASXOZhZ}wlbF*pka>?G8*)aM-MFQ+{L~0)I$q3! zLqm}uS%25AYgKg}eZ}|ACySwA!P>0}$(K;evL;v()5`>NIu;gDRnC#!{{FtItS=DUUpp>*^!#}Ho|hMiQ!i6L>p)lAG50Q@4}ef+#2p&cO-2%y?xrvog$yWZL1pQRU=fBOH@UNLj!MQB*l&14at$-odrd!08LqXqsK0D*M3C~XhKf5% zajVvkaurN`WBvjeV$B_~S4Z;Z)3j$wJ0IcoO?)d-(WGMLmRnNw3c9 zTdr7e)TmR>#eCAHvIodpv7LdD(fS+E(1YXqqry^fp9lp`)aNhu6_1!PKh;Ql9{CEb z$cwCfr14ohB^i@)O~*3p&F+vNfzfy>Ev=1VzQ`f}aa8NVu}n`)6RT&F%6OZ%FxDip zS6@W(nh3HRclYGR7_b!b+crmGC9yceDOzb;`Ck<@M2^pVeow9LYO$O&MmSnGo}r67 z#W#1qrx$V_Vz;MjAVtyhK8%5cakZOJpmDXj#(JTWsI2X!5X7fg4?*4$CjqXEkw&6a*T{ArFK^v=Iv->lALRLu&V)U}{w#B4RD;*z4F{G|CckvcuLo3q5o!7#ZS$vLM|4M^XQGt9~Ed{U0ZAMGpe77NBqHB0obu~f?i z2Z}45zt;<6bu^nuol!5}@USHjKAS13+vy*4;P0`Ji9iVt<+-4xXjYJd`1W?%^V3L_ zsRR1zX=dHV{KV8+_*;rBjYRt6Y^wc{I4@3{&g!v;ei14yYu0}k-XA=i{K<$(!ndBG zvC2o=*jTf;N=L|}E2gSS{mrWxhnz)gx0^mp9xUi05m6G!YI{cVVY!>r=cFR4_SdfI z<2r$als8%A-tB5Uvxr{MsM6hFU5rvt$Y)7OjjY_OT5j&F!&eQeCOIs5AhD0x)P{A5 zaRaU{p}V?%PMA!JTm|iWc31$7oP$lq{q87z|CozG1g%lV@go1|C|FSYPWSK2o0UX} zS_c1MO4(p2E6ut>cTS2Zl6nM+JXjX{o$#gs2vb%6ByC2HE61)!6gFs+Aduk3^69cS z=R7Vh?(m`BQyu@N@7dLhsI`y1xzqkW^SZ;Z(o6blNKI~2*j1VCHT%uk(h-T1<3|L7 zAhk)@zUSyBBfk+m^*4LAU(9X(YVVDb3ZCz#mo@3LG~3a+-=13fsYL+Z9ZYV%%&&P= z$V=;W9Bp}lo(7__7{w@+BSli|vRv?(*|=kdHcnewxI%?a^QogeyyN z4X^^4QMEIX0l-??eue78)!cl`zU{Lfi5?cK=+ffvur&3oXDz194&F2kdqP_ZZ$jta zWMLjP*GBdsljUD{DO>v=DJf0aZN%i9X8ZE$`e5kYAA;q?bL+eB2@M_&i&IBoCH143 znoWh*F6MB7@>?E#0-OMuQax=IBR@5A?e9k+Ji%HK)cMgor?`3bh+zr~3&ATx*x>SW z&LgXQ7k8VAEhoOP-x#efYh=9n09;wet!13nC`pB}SEfn!H4a;mS_p_fFra#v+Of7y zbmK=(uuM|sL`yK5WO!HvhYKd?vxEj_X{@Wusvyf-ug^h3yc4ynP4)b$_;}h z2(S^#%H8v46e+)q`xfU*b&X5!<`q_-w_ILwB>xRN%2#lNbml!eYI_;Db&8Jsm}`<3 zp>5~l-3#rtJODKoru112?T(xV3}PJ3HRYWyW6iagPtj;i;Zsr9bYV-MliTPg=jU#x zSDo%8>s6NzJhRX)?pGw&k0n9W(7Oa>+5Xf|NZhVAU6t=Y2QsInLFQI&J-aS{r4pB} zvO7bfo-!_FKZ2hRndXAmY7{`VVDlGR&E(AR|5(eRt9u3zcIZOYv{R0-kU68A8-Mp) zv(YFP1zR|RgnyC0oRWzu(uNH61CaULA1jGs=+csO82BAdHg4#rM90IvtKnTi2gr-s zZsKU0`@;Mqs@`R<#Tv5snU*bzSfnFvfmWlmQXRC|L$Wn(;CFv2nchAe`#Yn7Y5P=C%zUw3`H+(3F|CHvGmG4t6pXYWBE{)xCu@=W42 ze_f6o)+Zzs75Jl!8$V2PSV&@mj=4k#M|RqPj3$gDGoE%r9CJ5Nv&MoNpVgflJbco{ zgUcxQxm=3N6ado2!^P#h$opw%S3=#>V^iYD>=t^gk!9|;ocq_kKcLKT>rYsCcm{>C zA)HJ>DA$U|pKj_cT7e5{O3KL_BIH{^(dQ^j{Z&v&xT)mUqRba6i3-efP==OXR?XzvIz>AqCN2E&$HL-7N<$uS>Qo%H9t zi+t?G$+M4J71z42;hFU+>hFEW3#?DLMly6})`9xLw&`y|{kqTJevlmxbI9jM+`E97 zarVfmELXbaN`K8VrlsUy8XLm8Hop=7@qwFrrXhyz?s%z=K&Q#LnywF^BZC{JzPY&t z_U*c;cSbcxO&SZc4CCRARlCdZFbJU%W29eqI-Tq}i9OAy6O^3<+%*g5KMr!+MM0o+zQ$U?5<~JTxOz9me@RQm}ud2`L;#> zR4(i4>awCH$wrQGUxK1)-XIdf4cPbBG>GOzz=3JseMo9A(()_j;QM; z1;pSw+H1%WU;{NCqXXg{#_jE&M(M>rg&q}ciO@`+JNC0KUrH!g=?l&6wGcY=PZ1rM zuPn$Mz>eM4<9xpKGHc>Z{Z{nMmTn<%*X(AjnNYr-m_9uAt8wwp#`a5mnP z)!J%{D`?aT6ogl=AQ_Z)S%oUBT6vN7=hdl`eGSNdF!(EYwK-I|8r9d;Sb7%d4j|(6&2qCKJwW2M)OHQiYi=i;0TDxf)REog7mQI`1lg z)SBe0wbf9PKt=qw%F33)GXk`@sqMY|tSyeH229&A_Z6F)n>vt3WYj2{ckJ-f6}-Dy zwQQ-+(wvzt1INn4s=Ox4)HE#Yu`S?rLTg&S#d&Q>P~aFJFOdUjpGQR`t$+jY475dv z?!Vo73Efj|&Ogv&4BbZQ{_8TICDE!gR3ma@dl10X59f)bEfKRK~%m@JoO z@ZPPgQ5iqW^HNc!@|J~(RiJmIFW?|B3CRjd1AG^C4Wo9 zLqd-`V%2?W!|HCQt5wiQ!KXJ|j`Ma)(&z@qJ>}A{9GD3zk$gi4JW{=n)+5ISj4sZLV^eZyXXS&%!=NGtWxVufK7 zDX@IpD^pkxUiiuG_YNi+U!ki3&7QS(e+o-$W{bO^{>^hBTDi%{A>!?~t(%Us)clji zBU=YJqpb=eldmqRdj*CfsFqfe6-O6+FM-@eYB{E3x`g%@J*BM|d$_>l)BkO`*}F5q zWKed~9AT)!GW{UJIu`hhL2h3G^&tFg%1q?=^324N<=ecUCss3b8yfiWGgCtU{J~6; zXY39&;C9BLke3B2Zm8o55MuIvZ>Mlc+b)b&YkblV3`Cf(@=)u-mZ@H?(P|v=AmeDf zMXt|fA}16>a1(gngNtg5K4t{x5yo76rJE66mV{r&Lp&6SoQ5`Zv^Gtc;SX}~KD&=) z)aLNA73#S1@Sv5?be4J>a{{~w9{SneZ%^EG#%Ed`YBVh0o1xEBK`g*a+t-kP7eNUC zJFG7H?_svmwjF0!Z4qOy6EdW_eQ!HD3uHm8!)ncEELkRT)fPmkR`4l6`)$M`csjH- z#yHn;kup{k9_j_0h|TvO%@P?S@oqVJOgLTiIQH9{XnAdJeLSMUzU*rQD%~auw(bW+ z%vMfA{X0OJLx(aceSNo9>2KE|kDl{SVR*85VFZq07b<;lfNzu~)^R)bkaIgx=*vHelUMiie+(qeZOvxoQ%GdVQNVgt z9T6vmGUEbfM;oo`1VBUtvQzNp+p_HuVKNt*%dt`62q1ulo+*~Lk3wE}h`l0gjL>Az zt9PLaHJx=#cbyTVp z-m7q}B+YjX?h)?2d5P2$Bt~@N0}D^gn{ZKCL-t?vg;rZ}gX!xX=dxKk^cif?xLp_; z%@t{fYxOMUm}rSrFSvEC$S`09afyv^NXVEdJ$<=dl&=)1I=hRfIDa32{Tpt>nf610 ztD&M1=&_>WfXUK=sTAyoHP^d&l5QRC{;jRG*O1HmtU_N%Bt3-?&_wrw04=y<;Pq|0AIbtmW2{|yY@kGuh4&tP7Yx&#=#66Gq**i7 znl1WI@(7ya`GS0BDHDR6aX9bp-QWTjfP(2n<63_1CEblwYj2R);MMHiTzNVbwS!nr zP7>c4PMpg^k*OGP5mv{d+v0JS>SC;COHvl;a=lpnXV4qhv7X!({p=kO@u=C!W6Zy7 zL4|uWd{X*O_3{JbEJdtI+>yV2kHH9m*fY?x4qmw zc`nu20Rc&IsWsJN&Ht$kN7$#)yImXjbJjMCFA+b;_oPKZf;X+QCX`0<$3H* zalY4YiD8L&e}lQxM>@Ut%t3VlGS+MHd;x>Y7bXG7@?4t-BuC<)gNE0;gCrUTfmQyG zLH|}Nri)`v)O#9lFk_&og1I^Ur2|M&NeR6*Ix6&&l9Hgfq-2x$Us@dSehedBzhr4- z#|txa?AutWvvFAOwu8t|}d!v!DB#n%4Hvu@<#KdSpd5E{> zHgW=Y^=(hhz`C}mmEas>3@f?zZj*ZryE+bd}?wYSQluBK0fY(N;*~+ zzh!4*nMH=+w`Os4WicUNUmKktoT9!4b7I#YI0v3Juv!w}6-5f8FB41QDVrOBkG#alz5ZgG2${6YwUR)l^kZAsS5ruN!Hd~SyqUy>JGz75i z>Ma=YeNk`s&wmzq``Hq$@PCY7hJ{Rd4%*mWcj=7uf$j*l0)OE$ovRd)0uD-duw05DOr0f(9w-|QYaw^uV zLsZjsVuljnsE@ZNJ@N9uUHIl-yW6gj{K`zthiR54Sfdg)E87K>uT8gc0c3^!#>Y}Y zJDz3TgeBkY{@+d5nBu-Pu#X{A=_4_K*4T(wg;v*gtxkX>Blka2Jd*WzxK5>q~mVgbp z5`O#>KX2i&^$7ttbN?3vTh$m&OHuKC*ogUc$K+4-DtEkJa&068dcZ@%WMYYy%1R|( z7yVB;Ah7*O-;Vy()bGzoq=COSHz}7qwiQqLODoFDv02mi>qomwQc~1q(trp#_$FF9 zIy>EJs6NXeef=LOwmZn2x9U(8ZTBfmtc2b8qBln{!b4h4&K$d~UQS?ozqqLU`n=`V z;*P@c!)sl8o{E!w?o=DrR;ydhuLbZs~Blsrr4jY$;~|ykn-WZ zkN!)$mzx`c^Xa^e%`ZvW?BC*FEl_D>(+;~Cf{-0NQGvMeP%X^dUW8c6TYpmb>V98`(>+<&fQoVlM0Fveg#ZkKvAfcysdXrTqcv_ex1!t!HI! zy$7ccXp|(lLsXW7)xLg}&hYI%mq69)W7I}MBQ-SsY22H)7^Nq7s-le-QGyXQ0Jp}b<$8#Y<9$hI%8bAdeDx+1?p5$@ZNsd<#^iu9DVnVp?j!_~ zn@~0SrmX0dC9RqR&Sk}`P3tW}IVahny0VrbBxnugLrFxJxelg8;;}m~{)+M_kx?mC zP_H5`YBVx$&TpI@yq#-^YzY7gis1w4!#)jv=y@3q?!mFK`GCqMtQXRh|AD?&oF{8$ z!mZmIJvT4?Xu&ko5uy}LCd974NtL`8=2lP1L^2V|H=B7{do)XBvIJlx7u}2#1?hozo7UPT(3kz}v_$l(VXG(}=TXYHUwp)(SHwXR5#+ zx{aA=K^hf21JcGpQLykureVCS9qCazgD$ad5wle+9O2nLB>X5bl~w=xFj}XyVyLu! zGSpAoHt!^Lt!;6!Fi3wP`6?)lj))X+av3=Bey_L6^tew{Zeq7EJT%h2nW*yX=r#@+ z88$XC;!NZ#9U0PYD(6cpz)?pqv|_5y{j#_i!^7(CMiS55hfk{ND4(c_bL|Ldm!a(l zx;|2f(esUnKqqjQ+z1R5MGH*m?E$E{{`Bp2zUk^tPeZJT*hG^^<&&NyGr7$OIXN&% zyXv(fSnap_5~gd_1%KuUkw}4&oWns|Q5(MRd9W;NgN+t>U>%aJMIz=Y<&|(uA!fa5 zhh*LaWIG6-w{1$az8pPYOD{ zz~c76!$-wGX;1-D{rci7K_lODC1Xs|;wp-!Hd>c}5Nz3n3Yi{<@MY?>-y^8!My7*< zhiCL=FyI;12A8mfpikj-=~M_lWi5P1rm2N5xNR~3Prx#Tps<7hRc2*mqgwyjok{&X z6D?G$)ryG6dU0JG@;2#;VN&}$^IPfppr{Xfqa1d)9{YEf4@#~m+#TTtN_O=hK-TOP zf;w{X`7%xr*4a&tC6|_NVHysD1Rp@yJ=kpwf#@6V`Bps;cTi z+!e^yF|t8hpC-@VJ8GvS)G{wboZV_OZ(!bL14`;7rYBo6L5OcQN4yHvGROJ$uIFS@ zwDdQ>(Xnt&6p6V@4jJMf-N^2JV@vP7a*JrWzTyXe_crX?6w0^j^>WJ!ydCDA#Cp|& zeiMUQ&@LhfYM&4$qwbB9E)$qhf=^ZW-BvGx|@o@%f6x%NzT(=a5^+|gm z6S;i`D~sGVCo=G3H%%^Q4wfe~P0=8c2?;#IjUO`TXCp_EJ1u)PlF_T zvZ9I(tMxSNq3h?3<7<$g&Slt%*@r~Y>^>O$E-vP{D2AWm!35k?QPI%wvq!Qpl&%2+ zmbOW=&CqRociJxi>xn@g?2*Ifd`NjF;e8Nn7! zN_TEuX;8$wx+#y>1_$=-IbJd;KXV!L79BJsEB}M=%iV`;wy73Lc>>*(tSqcdu71Fq z)B@qo3?*pT0bfdL=;Y+&6eYZyHa)|H&R(bz3-07@F0wO3G&W=-R`+-H`DdEgTU5Z| z6l`ydXsuA@$L&x(T{8*cGMd4 zF12`U9ZvHG2T@hhkhy$bfKSi1KD}~7J`&0E2dD@?=f#oRYpJUv>q&h)7no3A-TRuK%!^o-p9{5{29e;^{^_SJ7NTgV+eOqS zz@+ii8mMyWNNeJdnN)h=Sp3ECRZy0Z4!@7V5el(RhCnEXtESN0AkfclO55)-&-rBe z`p~jeECJkFZ*tAh4Ao~y_{z!Cd95A%)B*zEE1~C}I|&554-?Yg&}V5={TYalpRyFO zSyaxCX;MiW(3 zRlPLDS)*Cw$6SW?PXlV|R@2^OUP|50U7$)pL@nzEGgNRBM~_6>b%+@FJ@1fR+6?*ORFsM@_uQqmZBwRRUE2Y_>oPDfq{V zMFOfIOvwcD!rKFWP)2K%N}!EUG$lUX8lr*$rQ$vo>{rY&y{T| zP=SXVe@;(PF$#}SxI2zIe8oE$gZQ+QMrW1z;jdL>z7izT5)04*g+>w9a5y+P6B8uH zb1C&H7L;10ftE^7gS9}5k&}CsT{XS`<=aNS!#Tn2N?Y6#jB4)9nzMYoIY8#^2t8$> z(KLl~mLE1SaH9##7o=alG;zCzE}50_m?wXFrYH~*KwH|{V|fG0I@gob;cQ(26f{%$ zANv$O2~-jq3}DNa-fjfH1j!387`#taoV5)blI~?2wv)yow|2rkiHlq0pEwQeBkO3a zvFylumf0MuxHE9@33In?7A9r|{w)R!KKwb^i+j4NmIyO*jEB+xnIa-nSD-4&HL!kf8Q`Nei~ z>h);U+nmw3==4pHCX$V;|Ag0cnVyelwE;gqi!0V}F!ywZoFfGuF8=qKK1Sx)XUv4e zM1B_>gw#mlS$=k_w1z4=K5(h1e&m*p(fcsmSbUb0=~1XtJo`dut(7_{C}N>WwpMx6-tw}RXPu;2CS>ik)=O5_ zg4$OC(kf&A_#Y@c+0Dl92|ZsB%;s2#(5a0w*@9%;Avh96WHc#W8%eAiTZ!cQCyO&I zUqpIXvZ&ilH-x)7pJ;ffmoh(VgcuRfW>2KzSw}&=;^xllspr~(8{5loRcUm$9FF@E zvpmDCVmYI#dv*NvEUo?F*6Mu1V7=#u%wT#Nn%1k?boEwGBB;&EK>o53G4L?Zd$c9M zbK!3Y^0F#=6~a=$jl4bYQZp5O0~WUCT#ltgMM&A+Hj2n%d+WvA9DV$o5|Ho3jRvQmt5|ADZ0I#)zrFh2+0y+-JVb&L+M5*S};Y&rH{S!L@jxczJo2)s@dMp zKGbtNjC4G=@Vgny)e_q_Ecx8GtZ4JN7byzz<M8b+@muCc0g0{{+_z*1C0w95`)FM_ zH(RDrVjBptM5C18z$BXFA8{ICayC1PFw}4b&2NXIge|74iJ;1JuYi*9K+M{gXC%mS zclXED>fEcqibz8%k1rgt6>DzKiu!&8ee2#&JhpWa=%BoxkY2)bP&V@1bOt>>B@wAm z_cazLnPb@RSB-nfM?<(50d*kwxr~`0j22gijx12~^17fv3J?JIJ_@%DsMxZzm9-aiP*7wK}c2I;> z89NVF7a!3mk;x0uw>!#kU zwB8z0T3RfbQeqo{247t3Ptppdy)NLXYq!G3n)h_UbF~eOONt2_cgIJO3)&vD9@ehL ze8%U^o~I)tj7ubC!pzswuSDD@xPsf>m-{mewDVojEN)>od z+YmgLc}v!`E->p6czrCNPvGWu;`(|ZD)x`>aNZUo==4|OaqGd>7nyJ%p)TcaqRQIs82K-@oNCDx$b7^@SZFg zto`@XJaxZyUiE;WgP&mu&r}lE;3yoZyr=yp@A=gT&VNXFVH9ZKW6`^~dLQAsY}Hq3 z=zr!?dj-7bj_S;LV1_nJfnl)ns{s37-dHszTq`P|6)s<(HVVP~b$#O>(Zfyv`HtwY zf9fwZYFfU}_1zXO%JjmgH=f+W4KYqYw<{w#Evsg#s2LS=PbCA;$$Xvt()va7HPRe+ ztF6uPR^*Z^`k9xqA$XW?)%AY~P{n|INgI4eTTmT&Lb4zMRfP+SEVU6NuvkR;?VnjZ+&!<{ZF4iHGMv(Hg@x6E_rX01wX5svzHlXsTA!lpQAPg_iu+hS-?!89vc zP_cR=C)9kFmWua7RXFP|1d)g;0{jk$>yXBta>s+b0tlNR2=-a-WdAy{IpuFF6?@h+B6RcMv z{G)}DQvl_ornEkY1c0|)@k%l7N8phD{S^uu#R!JARs@ac7nIg{b;rBK+YR|Ik|8LH zOf@^kbWh)fQ3=|D(>k@M)B5KWtcLqpgkn8sM)>|ruUTV=#d=;dLm$#i*+<#*xe)VM z?8b5|$KbZsFraz7-;7Gnhj=q6%IX`RCGgk(qJ;>-z4WuS1@bHTno-$!j@}A5FS}}U zc?K*=)a~dya8UFor+zzv;L!ONfsXRYD#7W z4bd`$kfXs+?rTgAVOl}qtEn6#wb_fEB7*RzU4u;$U9HBSe8NkWna}qTw@gjWrym_0 zP$?;+oIR}7+sK66|3G8uRl71sf}hn1h=>jzu7Y`?z-vWSL;m?q41T`%1Y=YUz+D)@5kTlKg6$5 zaJ?UhRMk?7=%Ue7B^3@?9A?-a1qV(OpM4Bz=s&cIM!4Fe@VG%kmX?)V4v|K8yi

usb$yv?9gDZ4*A#*%0j^as7_sk8R@=YRyq|DLNIfbjC_uhiOAiLqa3rR$ zPgN+JYJ03;sJGcfhDyy43XX~sF&@p8^dONlbr7^U3T|#~DXOc7ER?nQNwb3eydG~C zT^SV&a{TS>?Nd0M1UNVv4R23%T)Q$fLce^W`8c!wE8W)qibT`E<26S9_Il( zM-JVhqR#eL$7;3ut#NG>6wL9SmFr73HPW?01H=cey*4i-W0KRu?*ibpo$nJv%I*4z zScQY3$_43MvgkaLbTVJ?ysDbs(cxE-h3}AAQ7YMtpoH``R6R$T7@6&~`k3DO4a4$V zUXf3ujANAEohq~>J=$y}D26n!G%wt^Xllh(H3ZFnlTb{k&wG{{W!s0+A1gx@Q>9uj zoD)*2iue5yLWqFV%UWFu$}z#s_qLS4k`I$-Pnexkf@IXc8Y*NJ@(P@ql`Qqs0;N~Q zc3yON#?-KRJ-Ix0HPQQneq_F{{idgovVYM##rf*jUn|{CKIkbcIm}^gkdNyFM8Ye> zy{4185S}EF7y~}TH@ckt&}zFg26!H_fp*b7>#gVwWZMS^1&rRwaeobyKFNqC(x}Faljye37RKpUoN#PDNwU(MYH-7t)Um9kzKC2i$5uG%T|D zHXrgv$J4NbsWLl1GK2=xEN`4!pS{P?k#EKgHTx_>Ks%W5RXtyi)|Zi>_Y613zSg** z%4OC(w)kCdKQvy8F9&uhHtKVK->Nt-QZ&~Otf`sd2`Z`#Lrt*JK7pqt*U|c>(`rkA zir*Y51q$xb1Cp`72{u@JBO)-FJ)~q0lUHsN!IIQyX_k7`oX3s%CK`Vv=khu6ZBHLU$}pz}GO{ zLSlN^BpYjathA%gyl;a?4|<#;$T;!Q2%3YZr>lda{jYnHj9R;Q>tBM>W2Q^Nfb-8$ zB$2nPUVVY;wbF8*O-yJ6)RAdd^pey2dz~?;sbUP#dN=EPx+?@g&HN3F`;3aMwnz@T z?r_|-i0RG30p*zHb*q8n^L)uoj7I})>1f5v4vtfVwO9U}~)9TkNTlHH00gGG2M;6Fb zyTv$osWU_=l z_{da)GQn}pwe?~>thxCKyV?1@u8T|Leh2&?;ca#Ak{cJ#uAOBF=V5_?VUq1y#HJIu zM(2a>N7t4QL-4=m-{Z^eWA)|q{S7;W0I6i_N$hiJC<@i|01ch7w>L$*or^);18NPA z7DE`lX9qT@vz z#QypE)HbS$+Xft(tZP16WoHXd>t|mk8E8W0Rw(%TbPn;&y3buy(tjznK=V)Ma7qJY z?86v{y5XbB4?jvs5=wj!C`&O6kIE7DlIgDSq5C-E* zljJ|Rc+17Bo-YU1US$oFb99|h#W&%fO2LYtPWW=goEQB*8XCR)5h~Hm7V8G;cLf>t z-B`=nZP7=`D9KM)Y&_kvSw9$;mfia6C#UoLAG)qGAg*Oe2MzAQC6M6m!7W&DcXxO9 z5Zr^iB)GeKaCc{LcZY#t=f2(dHg`Ao7xRO2x=&YEb;(y>+t{DgfXnQSS*I(FTQ5P^ zm-#Da_G8P1g+`B8CJaL_3+FGYy}s?td9C+CTR&-a93L&6Dn;*!^uJEW?c7?16Mr6Ri9uhody{8ROoM7h_Bxh&(I_Rl z_3f}|d_rWX#3ZLSBB;45oL@UM?|z@uclNWBe|sd9--{nOXN==hM@u`4aU*_rvcKwt zwdttm15)3NkXD)=6`HTqwOE}D?7$Dlh&U_L_nntH1t?uPCPVg}A190uh0t24V4UZ+8H(ftFpM*d6UB}vyDQ9xE z@lN10e@l75b^^(Nlx@aG*J;&KH_GLqXZ}bw9JRB34Z_Kp@e%0jrUZR$<6^Z?5?N}H z@efltwF9#0;vSUIJYxz^q2u`l`v1v&fvPjUf*`BP6b5$NJv6trBNhE=C7evhN4oBMt!{=<(vG6iLIV{ZA(DD>?;;}nX%oc(OCeqqLV z9b3i@y9)V}l6q~E`gi#Z#eVj!Xif!>W5@%OqTI_zG<4Q8!t5gD(S;N>UXI{ zRA2;c`-A&%aO`ZWb7Do*2p$y7C@mb<(Tv_GRT z^QUWi$r5+>)ia~V$X25{&ClTpEX;p`ZZuIZh&i@3gz z)a=a4{lBh(4ebvb_ZnCB2-Gy$B#_ShE@!JtyQDvOBAfep7nb+Ua5RhmRM7>;xf+n5 zBbq(Wn#qO@<1c2evQfU_&{We<|Xm>>Ux zD-8bJzr10iE9Xvf54AtmO#f;N4rut?g@a*8iPVr!WC5la_;aFyOdr}C$+4`W0t4w! zX8B)%zPS?gvr7FO+;vFqvM{kgsp0^yk!y{yg97OnDJgjk4TyPZeX4- zKzn<4E~KpuxrGS{xp>J@VD?WRvCjeFryz=VWI3jDi`P^{Wl6-H5>_(Ia&s^R1#X*` z@x`B-nv(ePMcBiG3k)*~EE6%2kdX=jRQ`OHfmF)>vjjumct48nUIabyV`6zUb5YS# ze-;yiHKa?p{2U}(#h}yj9=d*Th|62~(@xma(LaE`Lc5wpyVMjFOiU|l8NF3hRmp(% z*wZsJ=)}b9TWvZ91_lx`GNNI-P^C4@%D>6ltp$GS-&_EH-;_DjkRVH*`RY5?!AlkS zSUZn4;VvdIiZU}6FN*U0xz>HJy*Jz7Kdj+;Q&S3qU}SA{CO1hfW5ZIF1LobydPvQ* z;?MkiG8%3SaD$_mt1QZVxlT;FT?Yg|pb+!9!EPliv(=S-6z2$4l+!84<#b-uAQ@Tt z&v2UekmZ5?l@;@{61-pe%J(Ovn~N6W z=w)3WrJ7}R|A^r~`o7rtxR?I00s!r0(d)M1qVit62@em?s0}|&UF>KncuI-cnNtIF zo#}Yl0~$MQQ7UD@EgY~oF->q_(;=p;qRd@=lp;G&33{J18HwJ1fb{OqDh(UZA^SCP%cv;I-^jHn{r1}SCwAX}t@NprfUd(ex&%gQPKK3Hr62NgkKOnmBstR)3u(GkrtE+!PKt$9D)&HyGzq+FRPIJ|HMn?7k zU%;2ww)K>Q(jJ+B1IcaOqY$%`%ANTfCw`lg+}nX$?i)f@ny|W}qM?Y2W9vunSKz(T zM3&ED+au;ai`P@?KC|lyE>fpCRhH)y7iDl15znx;?x{mB@qn&PFv_gH2|p0+ zsIL!7$4wujI&n1{=Uojd|7R7KceZVp+|n7Jh*3eCjQXM5-@>+6ua7G`#qyFr9!dr;S@3 z!1KODHEmb~Qvyy~*D)MM&>KFMmS&(UMaAvO5+;f7LmX#9#h+*R9?~oftd`LzC4vtC zjkFn=`0r^)!gQ#FpY(ITc_42k?}PG6BN$s++)dPtMMYu0eEAZaogG8SlgUh7y~ZpR z&BSbVOt}(MY**2PnEkWpZm=_psZo`kLK?2AHtl$OZeXBK^r zXIqDUAqC#gyTP|t45=pOCEb2_iaAyMFh=@Un-Fl@%3S?A{$bSniwC zbMMx-E^+g#HUxA)VRLgKLm-Kr zd@w`*0A-364FUmYnG>sTWdB;Lo6$nW8W_C`-|l&D0~djmRVbA_ z4#t~8WqG;6f-@JJ%az*A2&75bR*RFQDZqJN>qh(jMO>*2yVthcyHIFYE|#$=I?`0(<)_;#(t-*SXPg!)a(0ufchHE7~KrsbqIFX(UOxdKh9zhTVAO z5F8?+NTn`cwe=E0@`BaN6LM&^M}HK7&0`OlO?G9@&EQ zr$3Zwy12yO8($^+YI0uZzE(YJK)jt@qL4Q%q+-Yrn1I*jZIAP41oxngDX1J@c{hK) z8+|nyulx1W)jWH>`+IL8J2eG<2U45wsbD{3!5}-fSL1Tcuy+VE)z4|{jX*h-X14eRhHEhyQD#67; zN^hdNXLX;a+eS=`6`sv7GW3Og&ME5IUwP(IrV}%GTAJYepXTyxJtB4k?^Fcp!Ol}1 z`2)0MCF1~UR&VKo%HI#%|9F=hH<%Y8sX3pS_iF9Er&JQteQ=#Ur%2gNN68OnnyW2lgbB;>;m3zKAMyUYM$)fxQxS+sx| zS#V@FRO{`!m6HrOs++LSfcWvlh{<^|bh~cy!#_L)f&K}M&yeTQnF1C5LyZ=!p*G?E zsqJ7uwuv}J$sdIN-#8XQ0q;| zBIZS%Amag!MgbcMuqR9S;bLCPF1My;Iuk7&9Om7~W`e69;YUZ7-f%h}{Nfpjx;VCiSG#bfp<^Gb~NDu6Er%4JTl(GQAjIrAzO|33o$?;klP(9irM z)_O|4dXN8d$u_R#^v_nC7warApM3>NN=oc^=fD&dVcNU9CZUnx{ghh#-J{8-`m+a9 z5|WaAWoo8mymX`Avbn97YhVOk59I`2(KM?9)6z7{G+MDqDm9|MHGOhxvCCwB&#J58 zO=0^Oh5X9X-Lmyv5esK8*Q=2M&Z?WxQc>=`8&T}5wBYAaJV@sZDz^V)Pr)Xh;KGC_B|;nL|j}P9BVhlDxnUR9gWOrao%=m2?qO|%J?s3C9Pv^0|fz&B#y{5mKFu>ux&cP1{eFB>gnZcfF*?IxDuV3Z5j;054UUhyM2NEzXEM_X%k z6_RLYn%^24?+y+kC#RyL7t=6#$FHFr`W0LN)=~<3fcH4ybYRT;(2Guh7xing$tiYx zN{fM0Otdebq5^Vi3OzI`oVO)l{cqL5E^4%kg(bkw!ZM@(zv6~TY`DqP9j>I+_DoGJ zEv;=3pW6PJRVJFMlFI2aOk<(ci8c-C-ML=}6~go*!8`N-<`3dzBYv> z<$ZEKpS*m4YUnNu5O5RJHGuyzJS+nj>Hi`rsvz0Sg*YhT2vtecDyVW(qwQMZ%xKPadzbhqevmuz9;14s6a!wAfe52{&DdcP9ewF%Rowvi@=c1C`S*P6 z@S7m{fuHlg4xvJv;ygZ~63s+sBiewyJKOfRD8_k41I}9axW?+Zw)WJV4Tu1}@N%t2 z9@26zF-|NT9HWQO+%vD;`dwe(Esxt3&;vOCo9!Nlf#gTLga63RkKLOtN(@YS1+1kGE6xV% zm1wyJySMRx+UTIKpOG`2WT^DwDP_7?Cq}!-FpI>Z2yGwdgfG(pprNbMHH2_XN*W&5 z6Uzp8B2!Tc<+ck$#3oLj;UzD5o8YE{6kPc$%{Z;#d@k~$(h?C*#Tu{fGl8lW)KiVn z+^DKHr@18C_f5Y(i*HX$CU1ZpN&?^K`0Unmvx{qwbA~Rc&R1G+TQ4~SF*DtRLhbz~ z1xvY3BZO9iwM_$Z-w)3HaS`8+uy(T|`N7rX#HgQ5>P{}?IX+*$?#3d*&3cR78FcE-ZsE#}KMTfX9R6jW0$Dh=M=>D1bhq_TM-l}%T()RC5!9db) zsq*R;w)tCmJ;51fSfqxAMyEF;aF`!Uw6hqW0tu2YE|{DqU4%S=Y{rXWG2T&a4`k0r z3#eNWdM${oy52~Kho=N?PUoCU#ozrvBfSqRdVUK#PO5ijb3c~M$`6GyFKL)_>sk0w zHsYK{*^8vfz&)68N#Rv)x*z$)e_D z0|pYrDan6pI-t^WgQGdfsuOl#pH=RnaWYOyR7*iK^J-xXa}nMca+&q|s%geu2&+AqH^zQ!E_r)5wK6rmXfliu zlQO#3WB;~$!tasC*o4S+JRNur#Me!|eNwRbDsqppygT7s<7K{kR6xtEk_@n^B)>iflN^Cg0Uh6rF_W^qk6rmABlieF(CB^78vxC^{I6e7LaS}h+nF5`o9tpKd-F@Q;v8;$F0?rlc z4Bwmc1)O+_sKyCKB!aRtl2`Z9FkU1)B7X1CbTA1Vl>5QvK%C2`mF9li&QnC&=M5M- z%^L^~+vP27LvUgM46Kwz^nETq!p{xUVA|C%T7XS9$miZwBdeurihErd%N9VO`YHO?P7KneQ{CLkzj|~(-*=oDs zzgQ9YmwT~+u21Hb&1D9AJM?CYTb=5|>&qS=-_w`Va8iAf)VYUAVxsK_!-4Eq!iV!$ zSl>%T{gj}@mjm$tp_I~yym>o6MBXQ=3m^dM^_h~X%aKc==&1(>vS;os$(wt?#Ex3P z{U}bqF#2hnxV}RGgu%((O2F-LKmb(0TM}s77I^Gdm{o69CPV00%f}k3$lFr_`^=YP zy#NQ<%|b*6HdyK*YS|n8@yZfQ%u~4FzB>|A;qVC%P~OUAGz#kzQa)ZzfG)Ctk&#gb zYnb7fF{YvK@42a#3g;ULh?cbkFt>OiRiI5W`}3co#|9~veJXn7TPKg?Hi z$PDTnt2!P{&a&TqG3*A`Z&GLztf@jPNh#`y#Xaj8B@iF zu?x_6R>!nTQzs7NI-8KgR*BXgc|O%q(1mf<^nwj0);!-1G@;dIU<~&51%kj(e9Zi| z8lM3MUOO+}ry;*jZvn}+z_cKE)iil_A^>pRZS=cW@BL&KhF_t@d}>=0k-$kAFg9I$RBPFBJz0z@ddjfK+ay^jJR;MWd+9ip+dm8RNi6}**@oR}*mM)^=e4`Hi;ygcypeJ2MAb;pm_;I=ti zVMH)!?h`F^(GrvKQ5#z@ps&Gv{5H{7Ij!~Q&~zc{#V z-T+&yz9W&wXoY2}9EFD(%oqG{Hv#7&cxMN}v=^2R8!f>HL-*YPsIDS~D%|d_2*{{M2>AG6=@D{CH8QEBF6D~#1idUdF=UIsDtQHxR2 zjezmgiT&o6B`O!FBtdLW_C%lDaaFJ>mZ1_CZO%SqU&0+v60P{`UZ^DGDgm)Zez>A( zolXX(m)j}z;5fP89>f{wb?c`Bo+Ca@^-6g1K8w5%tS)cyweo21Idm4un-Y?YQb}J06#FD6>Hg zRZX*>XPKO^Rk^!|&#%&gy3C$^TqkDl&L%3jIIK=47>Nv#g39Wwikol0N*U&5Mq0dm z!={3Zk8hKk5T^i)a!SJR|qWZrRW*kS}56AtqECw#;Drar#X z(j-5UK9i7SqJj+=hpoNp^>w$P#qo*l=NykAN_y3ONGFKM1KW20I%qKfLPZAQ;~2fc2sY+b z8B+6wH|K;L=o-iM{hl*xtxljuWru?_XT|Pko5fFBFBdBVb=>wFIj8%fXrH@}%Ny5g(fVu#SxKpy?AyRpl02ISh@5&M?!ZwE za9Tewy%~7!yRVy!b7J;-O^d_vl*nVtS0E!rn^Jl#sLGHxn*R;K6=6m;3iljMJ7}LdM+_36aeaX-))Ggw!7B-toG&BGAcWD z{v%{IIRwI|^@!t*uqc@h=}3J(soJd+t_N>1sw zaYoEo?G8o+{ZSHQTWqz5ESr;Lk9$M%^FTzv%vF!D_yT7K1W!0atXA1NdQb>)%`dTJ zXyK3dXE1lCh;;%>*&HM~dOM;6IGOCVbmxmoNrflHX4$>8wR&z}zfi4lHLx{URa{_= z=|{KHvAysBUcyFB`&Ok!lKmpdr3Bb8SL_MLhi!2wWtpuS$iM-uQLFt$H5`S23xpi~dVQ*Jcx>-Ka9hGZNclA!XLK6cz~_U_VN zUcbJe?h||6&T&N*J}IC3?6KgxopZ_)qes2Kh*Gh1Gj&v8459Pi953~K>%+aV$^Mpi zLTJrMRn0+8g@QcLWao>R%iy;hcX*&|2=`$GON&S3;o$CQhVuQEd{Zgg9^D5qBo|6c z76YCUS*dm3+qs!F_;jpi_hkyi>#vDg9StA~LT2TQ>Va$rNL(j_F>jLwE0L)oV8-k1 zpi<7_ZA|A2@dU5c*K_KGyHkubk;l8X<>l3~dqkFDJoBP%~GlTtr-ZLLl zSALXKdv~S~9PaLY&l;uIo)avNWaf@wkJi^;# z4pbAM0`5lKONHu#>26_1X*ebH0H?4wYZx?Rm?^Ag(eMNwDcmk4@W_{E$(nnGc~nYP zowKd9=f`?Toy%sB)6b9B^cc(#$$GwzaoyC$mkDBsqP=@#U0Z!!HjOZ{9w)Q#IUgd2 z_V&jDO-Mqh5g?MWF;`iU9G@Qq)0Ze{n8b17mq=LD5YFj8j)$3I0jNCNo@f%`^-t6{ ziVGp(&-Hw{nD~K6Bq*bqh*GjJwQ?}p=G|6&HAmQh@|7m^<9R#51b7Z4yPs%HuYv_$ zr}a$TFW9z{tc1M~sXXOnd4xWmcUAJ;-3&j+(?bnhqz(P_{R*V0KxJVcrLq0S*ps&_ zc1?*XIk=xaCXc1oV#hPIL!Wnc-lT9~Qbf9tez%go z@U#Pw-7opPWL0uA7Zo!{L`Yq1MN~G;Kq@K4J1l|IhtthJFuxEt#&(~sxNVi{y|CaK zKK>wije(a?8RTF-@*LspM;nZ1*wdtGsLsmj#C-%w{L|D zwYlM>geIG8M&dE}X*OieKRg{fIZUo<8$Z4-wW4-Wm6rKz$Wg@WN882=lGRpSxoO^%F zZXJUL?M1Y=SP05c!n@x!tC2T5aI)0aFj?*}Q8O+qJ%-l%E<|=qz(2nmuCLumMfjL5 z6~cl&I60tAZJ(_tZ;Y_Ezw<8Tmdv9-g4!8CUtEvWotWFmAXEE`t<-L4dUq0!f&KdU z1(ly&#O3w8+b=yg+Vi2f5QPH}K$+#Od*pk{A|y*>T8d|2u*Ge1w6fP4o0Sv*&b1{V z!@D%U!=V<0K#=!`#L9UC(6xZ^V`l@xwL-Q1 zn(u-xTXWlg3$I2--2mmCse#6T%5OH#$YABiV5!<2x@q>@oEt3l6u49}XEmv4x^hG- z;5}P#h{_12Pi7a*4MC1SLE}J}4G{dSE>ENlu4!BrVdVl^Txh`z#QYzJnYy391d~{n z>3x^{cz{XAT%?l1VuGM4#RR5JBYDLX4^&JG%V)22rVuUf-Cc5S9p5@wW@|k*cd&8l zfL{996?crD$S5_5GQq2)iKZ8^e=LQ#B=Br+Q6-fvsnvC9(q}H~RA6oy zHq-)}Rd(f82kvJ(!R3>y&AHJ=R^`?b9k7yDs;nFX0016E@F|CY5p?uT({%g;soFxe zE@d2<5INoZQK^*X>o>wXz^zGsiyL{RJ?Z!s)hk4zQsZS468=bZ8_lg3FFO(c*=64K zq{Kveh@X(Gx&FuX&SFyZ(3KPr3;+UT`*$M^(@Wes6I42?;$vttkPkC9nlw~UsBY7WgDyK*T{!9J122Bv45g%BVS2Yd<=PqGM1H}g5UTuz z5UBeF%zud;nE;SJUuqW*2l_I)D4g{PrJ5XHPjr;1Y|x6D?pji7;60%p0)E0>NJ0UJ z=B#iYrb*abPgcnVmCc833MOLkg1Zp zf^@*#Ou}$2Dq4ZdY@16gkys*DA%{ zadAzkFw$e`C6CEH5#y6-LI4WM;lW zhT!_kggxVhmWo=v#6<+7Y_|yN8l!+!Fxk^Vq`FF`(iH#8m}lLyd|B`3MesfdM*l?B zi+%m^Sk!NE&YIDT=;QucCIq8dLzwM?e-{Ma{DOG!1-oZFX<7k)3QBck6g_=~-+8+W zNSz;J=V+>&;5){Ol!HU5F{z|}KQtHtOOwbV$yH-64sVS?MKD>Jed$Qt=Y|TwY)xCs zz<+gb_0w0iW2ngSrQ&dk`-jP_VMtVX*X!1t_X|sSdJ}=^@A+E+%&ku^m1yzsN zUm_UfQm9fw3iXCjfoD*WFE$iHU_K1;w`2o@(cH~RajNq-I7L;Hwr5xZ%_ z$y)d-HperVpLG4{ktpGYN-*7(o|*XqL3`<}5)S242lof=C}40@dbOab^K=28{_=9Y zRp5f2-^!?^%hZEIBFZM-WM?+Dy zo6`sVOG>Kf(a+Y}NEJxU&}#Lq7^Ff20xPPXp24H?s_%o&Ct?lTyJ0!yj$WY$tMcBG zw4k03C7huOJbldaRy38Rwb~8wv!u^~rv9l#*WKcb$FfxKM0sbKDq762TabM7^RVt-e0o}Dts(&G?Z$rdbu7j4(h}E$?or5X?6Ltq znige!d)@BN-r&oZN&}7y)PE2=`o-zA+b@yiEVmYFJHUH4UQf){wP@s44kLqQ%M(P& z{c@kUn)roU%zGhrbR&k<7FIx>eP;vx1Bd>1RU1FUxrCF7+FMU zGJ8p4ij{c7AjM%Ip`Oi!{^J|{?}kB!_U_kx@r#zzeN+pKai&YKE*UUur@_;89o1$< z97-As<;y3pKp~DHSbj#HB4uWGe_kfZ!_#;5f`^2J9367S0~5m4wq~q2>p(d@)o4nY z#m$-^o5R2~FW}EyUz-dXOGq4ENb;$rk3X2#Kc&7&hh1Bwl2?O7H#h@yx3n-+iu}J> zU2s5@_9~Foa%&oM^iJn`BhPhXWMj~cyh3^1pY-PD$-v=be1rG>;h^*Ea*KU4-SH@S zhb@mkI_(mHbT_+)e>3gEK9yX1SYtL>dh;pUQHXKvjIoze3h>$%ywoM|pO61bum3YR z{^u^sS_@d^UfA|JGYnN>H7Tpb)@5|;Jwe9ThNk8*dh>VKEANT+Wqy0+x1ViVT?pCJ zqXog^4_A#3n+*_Z2A~F$(7F&q`PDbS#3L;^wWx*Myh+7ZRFTK&jWTZok-7zm%?rW?3cQf-&; zG~OEZ0@o(+`v+1@euLI*D9a9mOqa*8dZAKWS$lisXZ@F}{QGqNXGJb(AWE@Q8&`;Q z=crwSA+gN+AqAc%t+@d;OE(Eb`G0@L-Wn3Sv=1TV$EKMJZ^OW@#y`BYDAhk`o6rij z@5diT1kXd34kCiTqOx+f$?V79NBFm#{Bhvb!}B}TBqhUeT*7p6|NLKX^oJN}zDF{~ zKZyQWtsxNHqx{J`o77ERj2*lxA3ral|55!RKIl`^HFS%E2E_g-J3$CLj4HuluO*_< zhpPhAM4vxTC>CE&b8Tc;gXmG`A6Au z8woa~WGpNQAdv4nK3YP>Z+`n{Iu9oij8iNO*lK#7c+XH!w7f|Kzxipujg;2tU;p{5 zuCJ=V;+LO`YETUr&DniwwXJkj5|icXtX+2`pFK^nH3#}Wd}RCl)xpoykBbn6PQ4A9 zQ7$zcp!OQv!Pl}meYuVIVA(Yh*_ihy$x(jyA3Zt{3~M(C)J4ETXAmJE*OG$LL}`WM z$0muNQc+j`_A>M~E-saNNnbqTOzQdgVzW2p>FHR#+8h1rv$w;3=b5Ox`a_uUnA!i? zqW`mG=7JEVGQp^p8s;ENh&i|sasX)B8rWNI`7#`;F_YS$88o?oWQt#G_A`xYox=t%idSk1<} z+wXb(>u!*ch*%a+R+^n~z_+~Yx>i)QJzpJDm~uRou~vKC+m5C;R;@Cx{&~|)K0so( z!e-z%Rkhcykmd?eC5%>7T}bTUU=77e5iK7;2?&%TIni~4JsOplMENnyi8Z0g5jq-lP&Q?QJbUZm4o0u6)ZRNwE zpa@3{O8goakm^rmMN{)^f$eXw14MI_W1`KGVc_UELqG;Xu2zh_`=c%g6p-rA3C6dh zTY6h;K21(C5q{zGkKSBEU&;$Y@}gtwB`ojg7^J65psTd63k~NGAvU>^{3Yp{By!}- zwX=G(oF^*f@batl^6A*lX#H6f<0rn_AvHNUM=fL5MCY`Tv$*l+Cg@E=rpG=tW{YX$ zgY|M++qOekr|5YapvM`n^-7CQ)&kCc1}lC*0E7vi!P|JHpD}TUHUkZ(JQw$!dLi@h z@DCe-a>gkpw2^qMC999j%u^|h6CFka4CYm~z5OyxuOP2T@kG?&%s(4gmi)XJV~s=i z0WovZ++2xJnXdj@t102H!oC>p)8f=gnp(b!$E6}6A0I|y2f;j?tT)U&v@6ijC0@#z zq>hEgUKSh+MN=QhFY3{PFeI+6fp;WbcAe!t3DkC9GApBTxh$LZNo|icour|3(J|hn zKR=JSpSQEP_!(xYg6DujfdM_Sb~wp!66`(cw2-`*;N!sg5Zn47?#gCGEiV+zT^c|~LBOFBpS zCEzV63Qv*eWPrr%JvsTi`&B0%)e7yXx;iB3Z2pmqr61y)n04Xt))lJdhLil&I!o`~ zB6Gt>n5geI?`M4w1qGs`p~-JPriI~<;6!};=I(s9T7BZp@aI!qnNh!fxQdr3tx)6w z|HtDnQgrIl5=*2V-*Wx2e{2=p&hGxuj|ah!_elv4X>Y#6wu2Gqo3ud>FXTnLcDJQB z4#{-7(adHsNxqLMTaRGEr|vQhf$f_aIrfzdO4p;5Z@PS5pVUfyUVTnUz__#scx7<) ze}4A<>1f0PY2t&Gtl#*6F9AK5C`PksIBKzx0lJY`DCb?8eyhkSeZ7m8ok0bz6HOzIX$OV*wW$g6MP<5I_#F|XbL$i&)KhaK41=>K90m`#k>v1 zWQ~bbEhrGlZ}!c~Bq?QjZraT50~8P?B{zDCCE_PX`EfqXb3JS->xIhHBYmFcMhJiG;6&A1M1JTZU3*GscVN=^=O z19%bGwO*He^Qa&{(RZev*Vac+g2AGBM6O^ z_MXuHKLKsv!qD-vQ0Iz;#c zKsSenBbW>t<>HAI)$$)zi4c8RjFkzr0ev?q=q?HWs{2I8EnQ0?Q@M$EQ!@6*-uyP^ zkAB>khM?H$VMcTm)2PC%6S@0s{{X;n=qSqk9Y8ji9oxvxl7ZuFtZQ)UG)nB33d<5~*PUhC)|>+J%wSazy?3^7>pH+<5tx=~{dqOrLKC`5=|7IS0T zy&;~0&!Eo#A>?3#DFd<0PHEVc2$3ipc?ChkY|n?OZQca<#HM+|i3Y)7tRe`;oUKjO zYq?>@M#OS!ZlZ5Wn6JS;Dx%WS(jqP`X(;ZFye-#!(Qxk&%Fg8cv%7A71vx+2<=;Q_ z@I4i|KEpR*RQr?R!v#(5C|#(c9xWa3YFi0!%I}H6-yBY=_mJ3n5k%zVHE#o_r_bt`j#mmso0XBHfjzLCOD_ zXEtGLTA;n}+L4_C`OGUfIuRu&^Sj!~L0Bm$pnLn2XDm%A1t0%4Bd(4@*#?4$!q*)hAr&W0>tg6Z}U-k%f%xT?| zR)1V2RFOH$jNb0)Dh{^k%ojg>B_u*iPfe`6pIdGTz;1V1Ga)8sRD+F{n!BwG&?nC) z>z(WWi9iCtvC-Vo2RuD&c;D)AwNwNg@FuCm_+-EFp4k1up)9E@*=(ir^-agCLBW#u zQvS9BP{2-DP|$zhAhANt*;)x?(v#H=ba(-P6y~#E`ELDiUG}PKZkOr6*9;NG+cvL2 z1GOiBB45Lw92-_%x!~2b`Gr7Q+sCWQX6Nw~YeB1RyiR;C&u8uX+3$2)3tsrA_$Lr} zMxy?<6$DfL`?j`eYq?ntR49625ahc`p$p`)qptg3@_TZU&BO(O&ji{&r(=^(vg|fs zyN5B`{PV<~=*S)PXqH^gSs&y)BvBgC;6CK2eYbEt0@F^@a<_w1uu)<~(LDF1^q^_k zc7~1toY81;vRXdJJiRWf0IbUWwrv9T81(DkC3cB}_G4eG6~MZ3K@sqXh`lZF>QmOk z0KS8%&w3#@rh=Z974bX5aZB;?ZtGbajGC?oH4AIaWU*2Rh0ZEJJ>QRIkg}Vg<&nYa zU5pnkZ<$$)tptCHqJdypo+JcKn*=gvftm6Ag#_ML%?RIYiEOhd2>18DTj;c6|Jet~ z6O^%sD+_1H^f6V7c4i_nr4zcXO%1#D7t)UBBc7sTrwQJ#U0u^YyR!WK$n}R(HqmnfTnhXXMnAVLewUaz2c#+q z31S&b>5uX_#8opBlg;DGj;?|zf-fPu!%nFbaa<%3sN8Oc!4V8u{$Rc#)9cNuA6PkE z)rf8Ppu*i?x}(@%T6T}=E21yY&rGf-<~PR+1YBIrlZNN@HsCCrn67UJ*Vw?o+n1-i zlA5eUY-7D(H{&u(+t&bpE;)hRFB{2fO$K9uCfN1Hg$~E$cMdBcjS_- zqh{&9ZP$Ua5fjL+5FgQ2%u-62`_w9mQ?O4w`XUU?z0$#ze-)L$#nFZvaprDFUmNMosn>Ckw z@tBH=(%q!`{v9BRcQ}sTcYc*gV$TO^FdGKcC$G^|6E85agNP~YPz|y;&fb_Fk`LEP zGsfhzf@Pb^Y{`+EOZNVyFLBnyj|uO6oqI3a3TK^=!?bgM!rXv%HC-P}67L6%`~f}B zp`aA!nZK@FSMx-2`~9Tq(t;^_WEmZRPR+nmjhdVRH*{~e;>|d$MR9KRATst(X;;=z zP6ea@WP$jV9UL%)$RuGM?KI=zR6GHQ;M8F)_9BCZeAa3A+p}U8#F6WVZH>wz!#q>6 zHHQHo6>Osg4a!&G2By2Fspf2spNUNg^er}>y5f?p(%|dRYrB78mV4E-5zUH0quR~Q znYdMds+IpCxFKO{M~E_Yltzs5A@TJOfyY^6Y2e)e&BW_-gaw7sMXlU=${-<6wG}i7o))gU$Mf*E_e~-!dElt`q@ldHjPQ6r zS45dFP!}gk09B6f5v?NI-K6=fSQ6-JvpJf#LJn}6$Vnz<-Q04(<+E(k+S>rPIuq@h z!{BxQn+u>~0QqlL3k6bDD-0{4F`3O>FG7`0@n$|?23KcOIggrmc?T}@ zGt$0g)Ow@sS}?nnuQ;H?XHxX&XVqWJ72G8o)3x9a;e&tMP1(sbM?WJ4<#68iQd8Xh z6ScwZ+!vGW3s-_=mJ*ckg6_2%G&HpG<0_|I%Q_Oo?6Pj{{%D}7SaXBQcOj0ujHx|t zDeN(}R@>%N;&}8u-m@w3vjK_ZC+&**?u-HVj{CS#TI8|=KAm(^u=NgP^lLvM%kWy) z2!*S3ch8)s@VYKX)s`DDNsbN~FDk-JJwO{=0Ju-I-Q?8OIb-fT0s$4>v(ytKAdEKP zI;9El78m(p)nj0+#XL(xSZib=9wN$p%tQ zab&~=?%*durPD|WP$jslr9nv!FEUxzyTn>aSC^mvb&CLR3)kRiJn?h`@^8mpoPQrf z0HZi^aKmiPI5b{-CVy7=1|}`mAKTi^X026C$87j^u|;>@e=IW9X_cY08=ju`%-SX& z4hYV|d{P^0HMW1@Ranq6s+L6*gyXb&oZtuLC1?|m8Gs=zNAm1e4{SS{(2o#EV{XcnxBcBuY;PyEyw8y*B_T@gpm&hSz0>xEMclG z1F*eRD`GW3!Id`&#I|SZAZEB{JtPNALd=7n?IV@tuQCwQw)cUm7rCOB-f0ZUw>Lu9 zl;1l#deJht`V(3E`tkF{S==vnd=D0SO+MWn-}3FnYs_5iEd)IP1(CRUV)UF z8d0m-n#H|@h~GJZiTg~fOe(GDoPbOJXn15os574FM!FT951R3(vI6H`&F*UH< zGP#8|K{?i*350-OHJdLwZW&}O3fdk2ft2%1UI37~l8n(WvD~)DnhUn;p~BH$>t0C& z3Z~LBQH5*~7uN|(%WJJPD>mHk{parlA{yTV4U6Pp5l}qS0H497L~hjfPHOk zZF0lbp%4td^UK6YHXhfeKd)`N-w;i^ zHdlB#&P!?1+nKM|gSqY{2ky}XHZI`Pi*G`bxph)3%itGKV_&3^oxMpyU-mmztAhnt z(yM&Geq;^WEg!_Bk|ifv2AJCkwXilQ*tqM@;u8F&T?0%^G6Tkmkb;rfB`^IQ%FyY+ z*w?(DqDekxovh_-u5GqOXXvixO_DbQg;uc-K@tvcz&Z<2acKbqT2&uLvT~$fTiR6Z z9s%`U{twsomn$-L?D(u4qJD` zU-H$fcNMiVHe+?~H9@;Y+&UcWm~KLQB)eS6@?GXxG(kbGLq`q+o%lj~<80I@@H&pi zDm*58RJgo5P&^jN+mL+vEj6bLyDi%n*i7Wdox3r|hwcxbr&SLibr-x}5EtgPKb15$vb zC2F6u$=HVx*XMetmdO~{L_}2VpFKbI7c!=LI-j@434mLjfaK``HV_#PJ`HcAl1cl9 z;7x*vxOg>CeePX2IaOFBEKkp!})Iw>t-+#M+t=-T+5Usd@vd zUB5adGF2m8=$_{N#}!vomd)n-jay_7&NZCC%PbKl7&qB8p#d*D6w)~P@>0J+mr5vK)Cx#G)$5TgOHhTHVVcV@tolf7+)U}7%hmCjX#sL044nLd zu*z!Go1%n%#ZOmf$$s9>rxDFt9#dNO zNdP!N3<@8;_6H#EI)4g|7mP!_PIm0m{`fG{l_{RAtrdmkJ2e9*d~=0#xA{OjY`&4|Uj@fbR$VBnD!C|N^#J;KA%ve3*KwEye zvv%Ox+6$8G5#tE?0hnCNkH&f^8-Hi+^?4N!eJsss#HKoBqBVGO#hz&JYBGgoVyArS zQE2emwTVs-Ty9xcRcau$`k5vp$k?4%%gv4Y2hhH@cRNb^U%NKb6<$3gWSx8C20G+b zJbp=>v?=K08Sl~l0#3N!2Wfj8$|R)dah2q2qeUl;sy@`sdSl4*4pYKB+$4msx*R}N&%W}$D}~G+#E$%fTF8>@D{)sadwVQJ;@%0<4A)l(p3TZ$@Hdo=e+Vw!41HxZBey}BAk5Le(aXTx z5k%stQ>C)YwBR<^Ov&StFIO`H{lI3{mA-I$G*Cgpx<5y9JQm6_Qf72b7Hx#X6CMEc zxT?#&uZ_rru8!DKIo)8CsK#i8go=9^i)S1*k}?x?B>d?gJ&zFZUR{zsL)nS2UZ_jl zd=syW>v6I@Wt2NeknVDTyP1Vs2iN2nWGuWL+11MSSvoJA2&hbd$1?Kr{dV4R5XgvWpr;Av(c|daKDx$I(>{9;&>N;S6wkHc@ zcDvj@#Cjb-M_qqB$Zz9|`g!~@{RI}5-sGOEwJ1vw1~v(a)|l)m&#z@u04fqYUM8(4 z)Bfh5xd?nnJd1=>YIH!jR`?zP1XGpj)JeIhlDzJ82Kk!3@gcNtyG}mPIB?Aii}c;i+nLQJ5#8Ae+dGHo zmAw6rw^qzbUEUv2_Zv1B;Rc8t6N^;>nas<3FL?yW`wiPsNWAc$ogGp%dmOh1qNKT~sy6ic3^&0YQ4Y4Y75)hnIz)Qb{3MOm;UYu&21;D4jhjOfo* z+}T4g)osD~tj}UAmy<_QC$D$6V!*=oCccu#U>E-szsd?_aJ~UC?_kF7`88zd6UbD= z2r7B~WQNA(OEV0T`6x%`Y3DA>vY(wnALy+ zMBy%%Y1MUv8s|Sg^z=Je@ll@T0*T|oT2-OsD(9~3-0{WSew$N8mWI8I6#M9VmV94E zqGS*Y3axpGioa`&braGmbIp6U71Wf_c3GC%;d74iJ$x9)142GWbnCm@%0ZPT~2(DA~U+AcR2&sIvwR(WJN5jGtF}gSvTNUE zBC8Y@Z&p4f*+CxPg4)mozhe11eZwM z*SwOBC_2YEPUqD=FrZ+$TDxdZ=_kfyxDyHwB!>(cK5Ok|o8tK9kkrrhjdM(3*MS$D z*Z$`AVmIJ&=@E-Bv@9!Ew5oZ*mb2oj-Xz8Lm3nUb)oD zq?Yx!@#cA%-$5)am`B@oomS!N?CJ4K!BfS0>ra}}9-D&;(nH;`U%W)OXgR+*cF&Y@FikeOU+NG>_Y)O$ zeQm9*V!JhvPhV9(P@Cb^l=ePsy6=t@f_?UkREYT^FYvHuMMHJ~U0rF&8K17M*)?){bdXSTwM8T#Q& zU92gy=mBBU@RyoFRoZeA@5_6+r&dK;O8;$~n$Awu;P2p(Onabzq(qLV+cbPb``?&; zyjdh8yF7r;M1LHQFb$HqXypaH(pI?9JVc^NUmVnBx+n`updfR3$c|-l7hk^Lrf1FH-5d%EYOE!Tl*1Oo zt@MA&;iha*fH3EGew)Tn;E2>8$ucrF?kAda5rx*(>;k!8G7lT@Ihsb0`NJj82&eS7 zpNjrm!Bc!_>j$r16n(sIyY*5k*E5YY1CJ!;&+w!{LWCv(&tX#f)ZSS}7R`1PFYnpX zuh>3Y4=8LcnVw^9Uz49sM$rTc*=+)kibdj=Ns1t=h@3|_IK9b$_n9z0LA?#*hd?oq?g^M6aZ8C!cJUxl|3^NzC!$1rU<~?5fd`)D*rBLLneP6umlKwk^;m;lL?ZhcD73pDa&Al<@Yk14v zSYXj{f{gFu9|24E{A{+wjNOpUWtvH-yT7O7P|KN|T^LQTQ{5?cehCKVj32z@`EN)X zd^G@yJVRODi>M`tRWp^WGl62m7#{bh$Z~%O(vHMIgm3!}xvRZ3wP;z6b;{`tn zR!?EH{z#D>Pd&O8w`I!)%5J3F5vl#@i6Wy@)9c`nfx;Mf=Y0{?H(WpFoJ2AW^8%j1 z_KHvHT9wsdEUP4gL{ygUH_S)%?$ZVErkff8EG5BH=kpiqG<^;g&G^Thjm`6j(OBX( z{AH7Xj0vz#AfM{Q$eo|SU8%mH+Xb>|e3p*K^Pr=ro_d=2P zW*Z>kVlJ}{B|qbxOxMY(;qB8Vna7R49o3-6>DToMdoGOxj~v^Q1n$9}D|hE+ZaxVv zAO`h%2BDF(a_f3&Xhz6|6Y|)SzTrKOr60=QO_NmGT3P}-!Y`x%(|C1D>#Tn`Ie8N3 zMEth!zn&rW2uii+P~`)Q*-^Num&Q%2;4}`tUp#_Ul-m|*Fx6@Q)Kajs)ohm5B{BRF z(jO7t5SksG`qy_EQE_ylCHr&3bgUSyqOJC@QnhZEZed*{p2+-Fhu>o!-Jc&H9|LH) zwy{U|*#;7eWm$jAA%Sz@we|HMNj*a=aS1xD7RtKq!^(lLtKPR8rke?;yWPnMT&%4Q zFE6$Qgvg=q&F&Ukm6(_wu^`$=`_3XBaT8X)SUvuA`+=uuP;H@)p?Uq~WB$p6NGn0! zS6va(XUGDuATO=ZtRK(U42{~!j!SAVC z!H#>2E)Zlhu%AA|i>gb1JB{ABPXRZz209Vg?ri4)d4)AH8d{g|dXPTK``RkMHgf4G zcIEjcr+SB4MdK9YNPL4?KqIEqVe`$|{@fHyighvv!ZqL6SK0EoX+@JhpRha1WZ==bElC<4}e+WvDED0WQ-sa701b+U!zhM1@m6$uMYquODodiXf z8l9iEA87Nl8g41xod@?FKPiJA3^|Nc%HqTj<@y#!nS_52i?MF2KWwlVTxc)~K@i!G z_oSl>I@((^8Of5wT68;Q!fcRLP>_m(De|K%ak-dwI2xQgBrTJqJh&aY|9D*6GhOX1 zY<=HVw4uq;d{u^5H`DCx-Jf*IPOT%1L4$@DI;E2q9G=E+ux`^##wCPWy=2Fa&Gi10 zJONkoX}b4;>)GxS8W8yu^FB^Yt;_e8a-$xTOcBUt3$7Xxjth zU=2&ImMc!`efI#z7_T3M6WWZ9BVU6Hhem18&ud~ZeWbtP=F02($!Y?T z1_j@6a(}d0w5)KyJBp)1R_;4Qk3AA9LTee7Y^mG1IWmu#Pi>%s=lD1Ff#a1GjGEO! z{+Aylw#zqq`x9pAcgKdmi1!Pg3nl0{(+vtf3hd34!x2n^**A~>mE^Eo1f@#!!_?*v zeBJBknTV00;Ak;~c^=CnG&|>NCoQjot4<6;b931b;D#-`RjNgUf3-&Tr%b13tBZx^# zNu{iWC6jH;IDY$5el0`9W|hplNyw)8PTe$+j4xuuWs2-`@tjl|C!)b1u>se1qo1FD z6y14$Jd3t*)&5-l6iny)Av^8j0);0zLp{Yv1TVA3@~y2=J$>U>WHhxr z$3;p#yQ^;Wu0Wu1dUt~+gd&s1N5M@23wOCM^6nj08+n#KL&ZuSSh&5;NOqV_9ZH+3 zngLBx%2dZtXq?&Em$Y4XszOCF~vURMw9X91ruG?e--JGWvT24FBCvf8+lRkEk5 zN`~WAuE{BKyz9Z{Z;gYW56&|J>!IaBbTd4G`6{mOkAjf!^$oQ&$+(yprphYSEUg6M zl5SfmQ2(-NLS{ln|F~(?k0aNr=ds;|f%?(gIqhe&T{NUJ<`x~Y_r-uHBA%u?-|Cm$ z@>j23Wx8Z6PM4XnH>F$<0f@Tg%^6`paap_O;gDN8tpvhxcM&Cp+d8`KqWP( zWfA314*HrR_*S0e= zQ;*I*!*4=~-Le?#+v80i_@%`LBUVpnBoHq|k|q!}gEgR;>N3ctCT0jij53UqnKVm= z1>8Q*XS-ww8S`>;qk&WlzvJ`Lu#rOna2|=Bm3AI&C% z)Crj4I*tkYrl2|RH8YyCF{?T5HCV_?Z5vO+t_0wp=Pfr$&MDBJ6DqRcpd!+-@X-cwpsEkDUptGs4~0Pq|+e@ zqe#-|9$<;gjFLyKreNei#t?=46dWp=^1TW^BG@-O8kuow*TmSET-;ov(mF=Z&%F57 zQ3I(7?%A_MH=Xob^YSM9_1AKqMg>V`Z`t!XGIL?qmL9Lf^QAUy*oOQJr*n&HzR!cp zG<65vcO2lPmlZPP%O@+UG<=#fdho~2ySqp03#Cq(Ks34I3Hu{yu{f}&Jov9Yj*Ay2k;@N`}dv4qbA2~nLhAiPT&KbRKo{8>A4 zn2VUT)Cwad(g!{tdPA!fz01I}vyMk0=nXbD)-u2t?x-r78?fy?#-wgBXA;ILXFlk% zNX3OB-1+#y<(865#V=6J&=(v8q;)5^SukVaZbshBx z&PeQ&zw+{a74e`YRJvtgEaY$pVMLD=?D-iRr3qf7(nqWeZcw#knw;J=%xL}n9;V4q zs%bnWO~y{h$R*@?fxay32(cQ(kCsAP+#eG)tgd)z9z775-!7{^B?iCN1^&t{MOG2j z_d2+g`3r=IDWr`v%DWX`aUpWh{F$<3{A;rHq6BM>(DpK! zu=U2^##(FBKfeil*%tDS@*SpCS^gKxHj)zR=m;H<<_#k-5$OS(=yU!E?5QzhsmNtOC z^xNCBOl!NCPPr4r!^RH0s!^w;q9W(v*_sABg_`gY<`9|g^|t|41PZ{rTShYxVi1P| zY(G*Cjx${Mjrd@EcsgGvQwb$D( zLtM6c-VGNw^(C(%Ky|s0V9AgK(G2(D7RXPZ>CP)=EY)vs-GHJS791)iAlBbWPRUbK z*Jt-T4;Ro9z$lsK+~|1vCR$`+IAVGX-DJGoF7^Uj{T1ku+{|z|A#&#&myWp_BNo90 zxf*W~idIL8t#->Y6Hc$H%8ESet0o2uElSnVoj7ba1*(}BDJUrVjXW4t9)F0lH*#!o zUZlR|+1z_ey;bQxq_fp$UYV>n?KGtm&e-$m)^+f%?jwiJ-{NeMGgl1o zay{_CBpk!n3BhI(2audEw{t(oO=6~;o_a89oPe%x>vfwKYxL%MsXyhmqj_H)(*SjD zb)Ed0^LT>LphYS6#YK-@=&w0B6tuL8+%ioz6TU117o&14M2q=WzJ@ML>NVdPcS#;= z519cBWJ=~V9EoRm)4f}gkIoF_`@f9)r>zW2L!G#Lkvwa}N64rKuoXKxB1kh9V=G}3 zmC1z43!JH!po5y73*dnJv2ZEWT0YeH!B5)r=IlEllwat1I%B)FMi)R&HLwy|B4Y!EuVxjwstMd z_BNOAp9*&4p8?7Kx5aCP+bR>17WQ%~o4Z1r_XYgLb@vN(PEp8}Z)%SG9PAFJ%WcQP zru2bfjlk&L1p7Q}Z0sKZobpFj$X91|5eN<+r)?^n53|+6PxCK$PWxAQF6UVReSHyY zYikWs^VtfLw~U;%gTxNGwHw_SCb9KZo6pi3-y+TMTexix%MBcjy0FjNRU^C3H-h&n zCK2p-Vxu*y1_7}9Z;5ai9chk%hn~q7aOWNJM;pM#`(W?x?-Ui)-xVjg*18%8N=*@A zC+JF~3T#ngzV99Aa}fNF95RlAe`a6h<>{UxiSjB<;F~bO2W4t4oswhq0OHY^YOi)` z$L;wMSHwH$yMeL?sIO(Yo91b3L%EkDNz|Nuy8xsoe!uEQL!Y5EuE@dn_uxUq08zRi z0oA$iM_ziT2jAjTY$3t~3yfdx$J6X9s8fwGD5wuS^->|4YIMi3oUO>2XTfv4kQq2B zC^YkAP9qy}i2n}2MF!|J9T#W(#Qpj|IKD#?xa%HMYGzap6n(%<`}!ua+$?aeY@MMO z#RhEt%|PNKPla*f;36RDv??3KujAB_Q=B5dU-IeGXx6nge~s9$HC6!FfdRvanh#}_ z0Co9imAXL`=o3sj8q8ItO*rRKcfE7L0A4!Z!Pl6Zqy+#|$-plcL}m_olP#a5se}xMH8Y!L6kHS9fJ`P)Pr?RxN+>ZnT2bt*(lrN;0{5=N2`86;q ztkmij)zHK!1rOEWLl%Qa`anwchue!Ce}fn4d-JqCW&Y1ld@_)Y3Jc68RrK^As1?$K zJ7nYg zfgkSvE6@GWN?y{gsjX#@Wd^4^ak|?NQRO*CwLnF$o0%!~x-;%n``(_!+deUNffE+* zT6m2gC!>8f(!*BR0E+#Uo8~j1K@!kC`T4qXL ze-gX*4PFq{nDriSwo%n<&}PnW1%KcT18obB*^88|>5`AHYrn9dJWw`}0RlP{zMo1) z$*ef}YLbY*io>fFX4HxCt1p#LIhyV(=(AO`Qj4vgNc{rrJ5iEGGAKOWX!XYE183w?-O4 zyVlGWMQq=8@r4I=%OfpU2=-=x%v1^=RfV0 z|Hf1zR=^8BOWRU!-ZG-Id_Sk`Dgbo%pJMZA+?{i8g@IIhdU`hQQDjgrTkX!H%tyGt z`O8HLQaoMfts{Iq|5d@p4+>9LPSt~YF>%M&TG!I`Hz9-JH5f*|v#uX~H#+hMF)Pr2 zD|i2O3KAyxNU}GoPe0TkyZ(lp{ok@Nu_8lRNT&B0Etg<~351^i{w@FIn_BGPyK$xP zcDvgyx`waAFAYybO!{PU5XIBAqeRF7j@IRD60{`w1`gg{=o50D`rdj2J2`?KbDh!OeY zLdcef2b6sY@EZ~S)oy$c)d}S;esBZF@R69Wg^A~3K;wsm|8C1)#k)){C~TQaKQOqG zYx79`X>;UtmaF~Y6meR?d-DR*_^ums{i%dlSlFDd4nm~A-^n+Znv}E2LvgfQc$||H zO9%~?c9{sO3-j*g@)S&Z5n7HR^by8Ov!U>)2QLiWWBX8t{^Dme;2L&`ewA(Yz~hS0ewQDFIC8 zzTx2^=fLu;b7V^pijLACsb1#%+ZXmR#|PG#Xm4vZF3lkCY5V3!T}Z7D+?9$*$=k#R zjsYTlB_CJ_jhUX=GydDBPd5!H;NQ5Ue-aT3Q9rxR5G5aXEGMXWmhB9aNh< z(M898K5v*Ff%fZ26hx#5TXW2Zl@!<+4{r1+( zm+AFJoK-@OyrbB14AnzU?TgjKZQ&-=t?_9Gbu(7AJQOeZ({K1MnlEeSoTuWf+%J+c zr{iBt@v7{0;4UE4(6*zSH9E2~9?=dejAeYI5=l$6P5InUq~(5zr}hn=o~2ZT@_{ zjVa}gcNOdf&<}6V?%j8>hCdW0ycc?pCaVa~2q`)^V06d;FnrNYagxp>ms`sA|DJt` z_!ht#dSnx!@`F572EI1}(YC{LctrS?kjF!2LIDpLoOx$%)xtS-{mXl;s(1uDP&&rc zb6h2(;XYS=3q!d8K(kBuB6se1Al3D%>qUZo+ZKCT-mk#R|Mc8Jwg1h|NNgnesWc>McA4s*F&h9lE;JEg8o%80E@>9GGx-o`Q?ezU3A009}yxk z&r3|(5;}g&eI*V?`}YNj_^!Sv;n7QoWeIl@(2-96mG}F zzM#w&J49(acYZ*q>y1SEui%tlu5$)MxgZP3G(fGP2aQ~kZsbEIuhMrFUs4qlF&H2ALp%=7t zkY%I3{Q;;QRavF#7=VV{pYgKvBEdI!W5!j=9>6vK<* zLx(6B>kvfBNQRlv+qFvPfI$t|@6s1))iFLo;y#dhP=YtmZYf z$`Y!*2lq(+NHYkEG>{_>gnu^2fL!A|f3V z>R`nsVoOba57O3GWIkSTFXl+P^F1ZQZ>z=fA}7rGx_OVqE@t@f59{uF&3BKld%g7f zJCu+L{re!_|3xGHSnY$iv$ddYcnI8hgI=$CE-F6WHx-CDq{%GcOl(eP!F~}iEKoV! z4#1SWtgHuNcM~W*<@2uNr3Sp)rbG7&CJD1_+Nek-Ix(cru~|x#kJ0ZCjzr6JH<~%(R1jK|6ujk#vd7Ce_%2F&liuW1$ zv1RpkUq!K~tZ=#)cJhqEHY(ejRn-Sqa!!tfk>}8FeQ2%$=E{OwTgpK{7_yCvGNlP_{N( zz8I*t2!HBZ)U(UUA*FSLSnUhGd1~KrIS9nLPaNbLWgT}|^wdf!FB%m6tZvk_32`hM z>JQ%`<24xU`f0>s8SjzZCrVi z6)Ex!-G|eFv(lbX`|a20XH{K1*LJzx2ZE(AnY4ixw%A2;g@OiN9nmY;|GBp;g2W%L zSrb2ydRe3dAQGGt7s!Eah>+%9ubxv9=ma@cKwBPt{mR>1#aX! zdO@GPFnGg=fck~Tw- zA8sX#^3?K{?0(w`k^yZ7F=#1thL{DDTY=`!_f=Dc@NHI>14I$0Gj(C`V<8Fxfif22 zgWGy{l>FYLFoIyh`MfwpyqB#*$E}DRo-fMdcS9QqQulh1?Q;>esEvqESoPtrS@;R8 zgQy{Cy;!kdQorPS`D)LP^$)pkxXM^R&4LwjsH0nYVDl4Lh7K?Y%E%i8(n!Go7!e3n zZx5397tDl0nRX!?pYFb3>%vc3Al*>v`1y{kb@Lx}J z|Fjq+cnOLQ{s5R*hPVg#C8|DV8zrPiW*N z@DrRu7RdfIjHhoI#dlR|9N(z1yH&hY`!(3RgwJK&pKXLzi(qm{Kz70`DcL0*Q8S$ZT!F%-t?m#I!nZzS%G5HiJn(^Mxrh* z!C0;*T{DXo+0sx(T%s~n(=lOTX-+d!0!#DF8vv+H^P)H5?hZxwrxrlmeSw6lXG>o+ znfQ%f9@WkC_L#|6$-axxydeksxgzckUg4%yyEoE$tt;(Pj>RbyAzqr6=?Vde>y(~1 zjvKsGB?!$E#TV}{hJ-CD6=c_@k5{9q9HWX_cs zw0m_3;jl~UtJu$-J(&M7(_$+q;t_8?!Ic})Cu{RXr%-HA$1pn;anLBy`{L!FE*U4- znA5u8((C9px}0!Rwc7x4wg-9dx*9tBrznNHZ{AXT&66LjD$Nl8xL zM!T^1@%{y`-O0O#0iI3f&4-EPK3kHIggfdm{7LTD#q+fey?}6LKiZ9YwwucK^p_$+ zVJ|TNgq8R47B3HKo{04y8wo!Y4>o_K;a}J-vo7VQT-Jfw9%IFmEXi|Mgg&D-e)}1A ztmZf-=T+G5dLTi8YYMX>Y4hj2ll|(`^;Rw>N&uEtCoMppE#=Kzeok-|$M6_W9)Kjn z<>cB{eQ?+)EEdnAYc@|Q{9qXBk;5!mLfHOe4Ip*J>bDC=Wl17}+{Z|EHaPldYXZGr zzV^B{%Fl;pfbYH<%1d_7S1(PSdAwXgIhjdB@C#s=^(Qc%tzdOpq32Xvz&Z{SX5^+< z2|iay;y;AL<8*7dyV+&asP@X%J66pbWXhGl1?H^DjsX=|hcs3oI&SHI_?F%0d}DDu|$Z zQwfP0#w)M4!(NArjZRh@hL zWuLiwMjQ{i>|8~r@;T8jUg8_rtbMaP85@!{+$>&D`XZ)**=*AoZ(x~~ri=YZO(7K? zC+)<^{Pp#gf+NAHlxUiNVJ+kE$MEp*cnw`I27@m7QPyhlE-kXv$-NpYUeAqC0I1q{ zS8g6Q@8Q7IlgR2HORplvX3}d|1zqWXZL(95G&?nxi)PUIhT~L0rc&L%g>5HfTeqiB zW2w8!YA(mEvLJ8>m@}}RNZ)X>gM*YtywA+G`;K+NU(nEG!@6Oum1+tpTlf)r$I}xx#S#W2vTwwm<(>dqj$ipb-^c-LLIi9 zeQpGmo>ETKpWh>x;ES*!+pn=TX6GaebI4&vF2}o33S3^c>CTnP<)8&o(c@T2=8>r8 zzA6P}H{*OM7~Gu106N3_K(KDR{s+Ug9|<;Ij_!r!jNAgu{iv&!Dih|lmA>VUB>dEhB0Kdii>y!o_m$Q+_=N$M%wwUB#Sf6caBd8J zP5~k#g8h0g-9WOi<@=1{sg7A-9AnrAMw`X?Q85EY(Gg`C?c_`WzZWySPMqJ#v614F z>ZBzY!EWa}@Ytj3l-DuM4?0C5WhP(SDbSJ=wJav@81`pfFlcwtGxmClhk682c6$2o zWlfIe+Cx7e@9U8TXDsfZp2|fqmubXl5l0+~jnpmrTP4fNs8Gr$b7&6>(;f$7Gg%f= zqkGi5UoI*Yzc<@n+(*FUcP`W5`OrFFW`sEgPPjdb1r&#`_uAboJ9?YtQq*lT_+5ld zn=!NsJ%ec0J2g4iYSR_)BwH=RWS=2n)X;n823MP8xGK$7nbDQ_W&S}5Swkqbv&7)s z+P0wNHq+Z^9a_a^IUA=lJ#^Oma3x*#{Za?4QU69~`!-M@YZd3f?6Be1_w9QgmQOtD z7E{Fu=L8Sz*wJVeO}eWGo-B&9Vt!#Jv_o&fQ>P0?3w2?mja^8C8v_sW+>4|x6JQ4K z!YkS2g~yV&@nj-=8?4fq&f&!T#+^$gD-=2EBjm4oMnNvZybc?MpAoL)hJ;Rz#ES;b zrwXExp*kjlNW!>Szv}_5>$%n32A9LUoC|qQ&+xpr>sUYVJ~Z{G3%)Y*zc#uo7F5#R z?DD)tlg>zZq!~E0DJ&kll#Vdbc_+D|G?sp<2Hvd0vuJn#qeLGlH0ymx($2FIg{q91 zcff0shKNCpsEBhXSYQ$l^0Z9UZNSYBzJ+~teK{I3gg@2M=nhSz*U}J_sKaUV&H8{O zUdL9{=F--}RdUl^z3L0H-~Nw|;9h_Z+VAD(t-q&-JOgXMc#_rl$K21T9T5t@k5}$o zCtR3XdxX>3?79FM(%Pczz-K_TUw z?_xr97@%i|QMly&Q2QD^k*`@(B$F988%4cDv?nNlqi{{HW8|xhxJ~^$JQ>sqMxC?^ zt<4u?2g97eTk?6_V*?z}JiY%Y!-su*3vQW=7KMMHmas!ZiR)4i`3d;+fEj4$szrn*2-my~o4z z>h(%U{{at-LL%CNmkE`ob{X?_T~?!Y6Zbp|v~forbU$KI9`(MKLb;d8(m3n^jA`!G z>AfwMz5zZU;W(A_Ipx9H55r$D!t&rT383HH0Y#%a^kLmAEjJ9BEqz;OVqCLjDG5BK znJ7>hadk_r9inP}U@p>h!Gg&Rf-#5Fm7Cry?n~=Cl6(bdT7**B7HP5$)yX9(DP@WX zsbCjOq|tLkWkLiujONJR>;}?_U%Y}T3v#D`c`cQFG^)@)cE$Y!kJ!NVdNE3gbUTs7 z{MN5@HB|re?*Y`OPr>1A-m%Lmo{0bPeT@+D!;5aO8bKebJ?im-REi-Q`uMus;aGPA4ft-X^@ic zE)hW*=?(!YN$HaAlJ3p}N_Y36yFt3UyPN+;@4c_z`+fKKAA`Zz;~dy~t+iLoIiF`f zGe`w?!?Z@%ZhPh@)#k&c8ll3$;iRyV=rkx->4i8W73b%5+gr2BLY;tN&TAbV`pW;y zd_MB0Y5cmr`pe`4Vh)y$&`Sm^77qfpr?k2(B`xHz4BoJ0rf`tzpO_thlkApo#jM5F z>Z+tVcwBJN4QhR3hCL+H+g4M67xk-~ZzozgG=DB}4R(#(=k%+umxG;keR)wzP}71F z1)7GK2{P=5=hIy;S1gIq6Uj?TcAR>QS1m$Fz4;a_=^HGMz2N*qb_o?GDcgI+q+42V z$KAkEk}Nf%mCjesYL(`jh;GMxG5BkEAHMh}%bV1BLM=)SN|YCl2@u&RBk6%=FU`Y@ zv@cdgTzf~QKpP>mFzb`0JzDC_nQ=oQTQBKa-K_LPeFeyvf!B&Xas909^Q!y8FK z9%LV54Of-IrF?zbzG*GuP|KSA<=P=$3q=8@}`fhrjyr6IoEl5iC94@kx$i5eJ zq$tT((}uU*a<=5SCxegu9EX;wUPNi!&_SX#5N}K!;-z!PH;0Cl ziK2iRcMsg@9IWET%KjNjhnKkIoz6#oCiCG?c4Q32kCo%b%Z@i^I8kI!wxKx)Kh}Xd z2Hb5SILg^2#$F^a@J4!S19oE9A#dnf)S2{qB~2!w^MQ%%M%?V=vM(gu;ll5L%oeO4gHoJobNhcj+@@g>!63zTwW zNqi-=7%U1vrtI_(0(1^PA&`|!!B|0;RU^>!xvltQRrpMO;-j4(5M%RDCGh6$Y}Y&X?)b@d_>v=525&6$ zxp?03YPNCsc1Rsc0bQ{P%58%&!Ly!x1S&lQypsjLx~&jJiCno(GuU4GKHnED38hE; zOq;=Z+^sr@$2Pbep>i(!^Yt=3!`fpi5C+}hnlbWn6cS`utgzv{HWQN{O6^pJ`1u^E zYNV6j8hJ_R@(25#=DRxM(FU11(YN0S?BPRLVU%_Y zeJgKpJc$?16GkcGx7mCkl89wsEwv0cGEK4eNRmrnNyL;Y7im4wIa$M@cy+;FnY?v( zMfwe9Wr~PvdT-ihZDQ7(1&5*VJ}{;?;dZ9ND5x}C(4b3(H)*MQah!dZya)9_$mnmhc^O4Jm7rq8tZ8fGvhm6tPulhVEnm zUV?}-n#*OQM6J~**?b%6Fy5cHU2g>6-gCCvJaC395S!W8WFyB$l4+M*?@rtiK~U zAdn!J>QWY!aOiUTYF`T1OESd22X=i*!dL@l@>F}DY*XFtY)cM@)h^O;;|K5koZeP@ zJ>=x^dg8qO@Y1CeT@~QEi)Oz>PGCfJxkWAa<3#QK;4Ge4z9IOyY3V~r~Dd8X4kXe$3>O)oW;j1<1xGkL&2Fawn zGlLwQ?Hl1xpA8gVovWTissg)Qi(mKX(K+);=bU5HeRjKAHXLwKmY@H2z%rq=g>Zo0 zl7mi4ea}O-p{a7o0*gNa{gU)WK(O4$kF;b)POjLmorN>n%6Fb8Xk;i*%1Px838O3` zU}^J{TWHD^N)FS+*`TaqTV2q7T<0_UoV+X{I@b0kn$Ql8EtUCznfD5>e&TxKtzYJ} zr}qn~J8Fqqy>T&7E@h2fbQ_cn?Dl)sQs_dcP?@o|N~i%r<#WDM@>e>ko{@{7rG*F; z=%RG(XLgpt_Tp)F$u@B|i__9+u2kTqhfqdoCQz#vR-g-D!733#Uux@@AdbE4Z9yzL zE%7(YDP2iJsYIXyqbQun473MnUjrfBO2U_oNE}_IhXKKE8dAQ1`io>C0SMbsfYp1$ z|GJgZfs|YPFE}LEk5A|;Tlu@J)UFOL@@Hr@Gtebz0uAweIv<_VLv4769b0-!Jx;6r z+XRN?-|-W#DZ;A;y++e9YVq{&YNCaHXxBbTP!Y|F(E{uDTla3tp^Iw6@*Tx8@%u0+gDl7S*$Q3ytS%M*bzaj!T-k2UdzOmQWamBe!M6?RNjgl`+P51j^L5M|ylyV|eT+?D zuT1P{{Ru}m+R|`nv=0!<&E0%KM33Qsi;|f3=&H57t{ae4GNEHkyr6?_t5sMCC%;rw z2gt$zm4`g{mc>6+?b-6v@8463{$G1$CB6MPPzUsvF{R>r6|3F0NJ`Jqj$E)A~ z0*wppX@X0%w0zwoLy5_%C6}W0AF*6Ne!}cpA0!;Mtq<3C4!(C%(Yuqiwa2Hj?v33q zveSqtx)(B@K)t6p#H}}4kj_E%m?fV>M>n3f6~5jA_nd&nSgbv|M}8%X(aWEAbN-s3ibEf}5s(PJ~q130G?V61Oiy5rA5Ujr)NUTtt3*f18^k%gmOKP z=p1P--FHfFD}vZ=t&eRCCe?+ATrifrySUNTE{dT)prRlYT-YpjWWQTjafm469Fqy{9 z)H~onsz!V3rl?og__#QPmcXO7MtRR&u-eRUeHzHNn-N&-Cv=hz8T|MAXP`tu4Y3gY z1u-w7L2}v{!~qdsTze53ZuJw{s9_`(X97-ZN%0t(RG)H3@jN2g8Xty*aUB>IXVZdp z>xv9Im z8)b{3UKEmS$qrJK90VxMEmWELt@T7nSZ@xlUoe!rzl1C~duO>6UUUd=e1cVZkzNzW zcRWf$Pl(y`J+NSURdJ+JE#_I(fzI;dDR+cEf)G3gHnpZS8Az>0uP--{u@W~(7D0`s z(X_cH!5EhB*!I|qrg3cMc*4 zCD6A(sXc!kF^iYi5ochbM|4}0tn&K>EjdBnFn$*)e<#1aenUC%GLjD#%_Hg4!5rs+ z46GeUgTix|!~@1PMva+4ebPj|I7WhPV#I=Em#sXd(*^w@7hUa8Fy{hmVcznXF&ln~ zh9f|+NjO7*JtEmrAO`kU>aX(|Eu8{oXYW;(YBRes#NV9<)~`J4GrA)S@TZJOwK`KH zqB5%xx1!1B#hslRd%x#*6GTg0$j0~?Hn^l>j~D3zbnLwQ!(e-=8aC~KoXmW^fx?5J zYj#0hRHeIE3~D*$jIYA?WAS#=?5PBQKSLnd1J0ZfGoXrdXLv15^p}$2zczl9Xg;9|w{e_wRRTtU;wvl54b1ajR1LClcsRai;ezx! zM}mKYlTVWkmJ>4d6-QV2E(e$7-+%i1AO88hWDWT`{z}Uym-p}f{Kk_1{H-q3?2Yow z2!+(+?U)|8Dc|PZ&tkTK2~`f`}W{ zFI1)p{%q`@*OgZ+%ll%#Lf?Om%|F-b!hGJd`At74=af=(4FCD~zc2WH%(RV1G={oh z{`WV(c`F{!bHdG}-CMBkB7YxAf7wXCEu~+-YksY19c2;h_p<-mccO0|omYWa$OVmm zyWM}huD=Fji3Bq5Sir@(^U8XJRsZ9^H7OdH;fKe=D?t`ezs$8in(|lUJnCV;1Q&k^ zImTQs(k$ipx3MOcY4*O-thj=|zVv|jzn|7@3oWVK)!i*RX&X%Nm*e!G>GRhcfi;6n z-C{yITZcn7gWr4qZ;jf8b!cTv*NvxX`hN@o1?1~-XCq7JyI+)~|J^m<6Ax!t2kiq) zf`Nk214kgQ;$NNz0qZa}7ew}d_v?w1WDCtLhei+R#}w_p;u89o6^Swf&FybOh=YSO zgE8g&=M?<&dJ`ZR-$US$ z1S=$zpRcRP#=<3ky`J@=ukrVLSLgBE+WMk;K70hlYCd&#{9LHHM?%TcfE-JwQUknJ@bLwn#xU*N%7?! zj5Sp)zlpZmoFO{8%wr#2O~3|oD3lx6UhB1^9P)7dInn}Qd_I)@Ki+8lB63+WdfM)C z^d_=D_VPX!#9-2X?CC(j&Jd3Y1W-wwdusa1uU3)>srjdO$EFiSs+1;^W=uny!|84E znbt75YL#UG5k-@i)_1M^`g&7qRV9&p0}hieTq}{IiIF+Grp){+dxzq=~jA=d}y`Yx4)1HpfQ*aqtaRvEmoWUll@V?kJZHb!#cW3xm`+y`A=&8hQ~Xb zn2Imcn)K%njduv;FleF_kS~Bt%&gcc#6KRvCywq98)X>~94NWHTjKikgqrWr+7X{s zk)|1g`6gA%^<#j#meWB4b`u4vE77Gyj&OU6?y@tLhbz@`DuZhPJrXovz-fC^QT?)N zJ8-8XA}u7Dvy3w5H7$8av!}OTlpV>?pPNF>3LpsD%2uo!{oxv3SGkK;8iY*-^a~Zz za40oomBQ}heccHF;ADImY28TN90J8Xk+}H17vfxv!6T)DpNxPw7qvC7DO+r_^PxLN zy^YcC#jHha>d@D79=w{IqLCV{dren`DzimmiiK~)GJiDzLEZ8!iT#+O9nH_)QEgfyZipMcsvkbfx40$ z(u|sp*N#1a0a(p#Hd7(LdGTrO2?Fo(ZR!R9c*ii9nW>V`QFo1d$o#jPgDG*4FeeFV z6CCpng0ewmHQT}>U4x}cG#{fYj9fMP!w7ka@l2J!;IL%G1!DGI&-Lz%KlTC>N)l%! zV!by;frOp(@RIkA%>5Cp2827Qg#$5I#2eU*TxNM4j>7NCt~jP^ZQ_VLT+#(#GM~`C zTzQzD7*@X_+dZy)cIxIpO!uB54d?dNW6_5@N2$5OJSDVZf3-%J%>JauA7DpxEwiJv zhhEacYWzw5xk@o~z?D37_|UQjyu6dB=iZB@x^ejLo%-9MXyqE>#|oYE_4?z9r*ao` z-y$vjd+X;FC z(dGUu>-%%1uX`~uXWKt-D*}#v?e~W>(=H}jo)|ua?>^A=q)*so-nAo0 z_Oh1DaNF)gJ9gP0%oUyvz3xe1%~rS9fTMQR5Tn+pi&hE&{kbQ*!3z1~I(0x-W!L{8 zxRxu`GnpvlCCY=A#CCvvP$(6v$}*H&Mk(F6k0tD~RT08NUuYcfkb(|_>{ues>L zct_f@T^5qJBPqiyN-m_5s=CorfW){q9lM=18St&jRn_59hJ6#juN-i-KR78MGKeKl zU@?g9dTnO7503%=#Fpq!xCK}D*B%QTz8)?vaMeXv91LKp5hx$5ByHX_YU{6uTAX_FMPO18$-18gOsf1 zuBa2CTP_z0bOHXw;!N-*D|vi}^9Zz}N2|?JvwevEkMf(PYQG*qvrpHR3`tB@iNQEr zJ>j?Tc!P6%jX_pQ9Q?EGO%BCN`jxn8TE~PjhTz|X^jjOXYk9b7dB28e?KQU@^ zEIiyCihSaCO=}IE=lT(-G_(km12jWCeU)VRV~k{8cmBq^6TAd&{j5?dL`8zt)`vSv zsZ<6~Fg{-P&1WT_=`Mx^les31$Q#7r=L?7S3)jtE_mc~DQUl_EBHB;47_$qfzk()D z_N}KZ`4U{vO9c}~8nD1QjCPY)a|raE>Zb-~OaD@CF;n*ljbhbPzk7z2!m1^jY2YYx zjfS2mUAw8k2~ME61SqAhS_E4^&RRWjIn)o8P6|FX$*H{`X3iN!*lb{T$Wc3Le;(#{ zE?#e!0!0J1eCajVhKXt(0EbG2H<4Qzp|P|OKV}-@k9lEP&%h6a-zJWbKZwgHwKfO# zfzl2=qcwAQZ4-+j+qCegB;X7@i(=xf_1HS<1MJztiJZyQsZv&4Hd7Dh8AC_=+@y=K zHQDiJ?6+_&8;A}TL`ZqcK?YSRlX-xkuUm}fM1X8JKXa^#=eSaka#%N!>w)DDBnk(P zw30gY+GN9{d3%I4fW7e4OpB5X)65>u z0HqarY2(GEVEr9XaA$Py!4HQ2aJeWN#nU>|a47Cx`P^_Dnq0z~F8W?`Sng^%KA=oi zgDpiKtfr&@Jqb1h?UR;#u!5(P-@8OMVN?m>Yks~t?cMhSzbK~Uqzs{m*Z9oAS+3cv zM&(_mS6vjja9V@I_WCn7Ozf>B1vh05QTN_hLian)G@ZV|uIW@8-pqK;;;1Rt_7b2# z9IBPHU_%8!wz`UL3xq%S4rsa@&|EBY31oa2U!!JSi&xX8P=tJspt93)|CZT{3=SaL zU~OV-#+T6W8Np62W%RCdLs7-}z13Oq>;Q)xV71$M*O!sMPd&gN)1S`k`a8`7m`AX} zke%alZL=neICDH0S`Zfr&~xHTJq#((y$tny2~fgFZnz5yT56T1dB-j^v-wx1ZD`%O z5CTM2(7iI*%wZ3zU}cTEn;WV)O}wMn{`&(2^xN+2QjD1vF9LI|^WAagqCjQ!@5ns3 z1YNx^5H>`(-qUpiVYAyq`3_Gm_Q!03b~nKFm*WH@3x;$RT`m`%o#-@1J4733b9HnG zBsFff4r~1D5rebennKKW7b(aJ5MWQ|Av(Sw)Sp zS2R`=%BCWHt=UUInDu_~Do&{#|5j#RW|`zwcgMIfS7yBGKIJ+(mbvRK(itZv^D58X zl38h49LdJV>TGA{-Qb5a8-BzhZLiDa$_K{Y0gg#|jv3t(fNAB%njygaGrdg^H!44^ z|K{Wq&eaAA$N&&*z#C?t%JaWos~g-yI82kPR29E5P3vQ;NH18Qh8XNMn_Ub6B;f42^Alx$t|-colLJyL zoHc9EvpLXyph*dc!~PqGIRS?5qKSH)JKKd;y&vI`Cg`u2?C-6;5t`eQ@yMaoRiYNi zV&H?!O@{FL(ZsP9;sNIC&j;8_anfOSQSOiDwk>U9fHF_mD#_GxL{;Y~WN9{v2SH2Y zi^he?OMWu20ru0H$q1+B$6Odz+0(o;Y7&>}Xhk%K0Kf z3N`RGZ3*|mJPv{>K$mFfB}sOF0+6LI4{v2roAHMcF7L-F;FVskj7BA2)kOaL^HjpHw z1c*%JvQGR=S*Y2_;)?Zv2+a5%NjUFK%3!R2OP-(gZkIfy)&Xmc!w3hWupLq&h{v_S z3+K|g+TgqJg^a1fpN_&E`fyV;8&42x&9}!oIsnN5a5-TJRXoGNEAAY9xB|!5#+N?9 z`0Rt(7EN3Q9hNAuB+~pJxKzBSU}6a?Xc28zms*GEy6u8~z&_du; znNuuKk!JW(25~DBvNL(hW}*LZO8s?@XsK3X59iJP`mtHt{TL2yfym{c6%g0CbGoxy z+=SVE1%SpH24;yeH6fK>7TU8BxsItZfCP3RPK#S zcFKbwX9wS3j-eaA5$;19R+f5g`g5+)q$H=j0(^^_XdDaT9#R26Z|9A7q= zU1pY@QFkx{qL1$Ai!F>c!u^R9t8B-=(UsA?&I6^V;cGpMRuj%sG!YwLzNe~FxOxYe z(NE{vub(3sA`&`^LedzmhwGAPAjcyH-DA9G`0_=MvF!_iv=fV_q{C`Jor&cGKmet? zWXmxOJ)GafL&QhW?~81khQP{^a0{tCnW=ORD;E#kTXZfiJ`b`*g>I}yC~ysTWZ{`! zeTzIzVo^ZAy-GgAW)*vYwBS^v-dI|VhL_Z1?Ain(+9wysD1UkD5=)~rXGnOEU}@!p ziH7mT&Wgrhugl9Uo2*=WGUPD+l;G~H9L@`LrT24#US4G$9i3*n%s@(Zd~e!imwy`w z;>j|(C;vP`1Q>cK+1!ng!BGk2jS+1grmfnM9G(KcYrfh?u z3r@5%;f2lW{XR*oI^#KL5Q1ZB{`6*-1e?(@2{IRBZxVmrjxD`>a*1j@_%g-U*WICR zx?MsdPJPrMr|GE+4#3Bq^_sh=*}~$~w$2D-me0!j-26uS-RL;0Nxy#HlSvj&$=gwm zN3pu#B;HJWxx&~%Y1vkR5G~sk1}ecO|8ko~I{Y?O_oeN#Q%Yg@t`K5_+Q)r+vbR*O z)ZY@C?7U(!e-Fge!MyWo{>tT5ZdCSt>>-X|G#mG5THvcJq$vRVah*lZ5= z?BdoJ%B{YDwi@uF1}9I-^t)MeL)9?9v2fE$?i0Y_*ebltYw`N-kl-@v z1ExGrjJzdftJOVFP?%!Nc#wy3i>y$uLlj<_SLR^O>ZQeW9lf-w3yjgI(3N5fE5h22 z-NLcD<@fQfv&7_P1~{dqsiBst@ziqe#(N*8pyCkit_T-m>PT{v*o)CjCtQ)qx>!A0kJjUydT#}vC*Gcdz2b#1 z7|(G~n7DGwc`n8Nj>m}-VZaHAevRMw8vL@Ov3v;F!-*w{lIk?U$2YX{m(p4$flq{D zhOq0?>$>MfTG(&>FJkE%;gbd2=1TW+7}aaOEJ|INWq(@DYnqIY=Tok#<-_hz;wZ8< zPJGT9CW9hF%abcl{>Hyf$$sQ}2L-=JB`93R9iFX*cLT_*8Qgwg5nHJhgy$+~xvQ1! zG#ZxS7iT%OsfET~CaCztLR`zw_~~Mh4~e{UfLRugTD8&aHuUX4TMS3KS3A00wO+Cx zVzG>^Yd>C->9$3m)I^t06;6((^?Rt`PtdnrLr)o0bhbl61`730X6D>tLFr=6m;W+t z$xu)?0^U5+Bqe3<2GR?G?NnU=VtU~sr+$*aeh#ZdM0LClCdi=g9to3vU6xoRnY3nO zxhq%Et8yeT#{E|Phf3*po?FWW-liTt|uE!%HB2dEa`*FuI#v9Vy}9?K*q@z%pTVk=Z4aY?2t)% zL(k?YhzxSnKi{bY_@v`k=Af=W=Excl|70B4%|YP!6}GMMh0syA-Cg*Ux;y?cb$hyt zs^IP%X&QT9oN=s_PSbcn_PsQI$+Jg1e8a|M7X7TZ<&ErLEVGB!;jvDT8o;L4@SxQv zs=8Zo;j#gtn#~<1n)&023d+3{Ysj*KnST-lkAe+ zd}*FhZjOV9CpysvC`a<%+^j$Ty0UWnk~K5o8vF$kes{8XRRLiddC}S3M<&6QvUF(F z&0O5{aJrmhP-V8xw!VRDO%T*KKY5j!rYw!kc%%05V5+ol^A5UZxq_l*;F~fCK zDbI3cu`a;w1K7lw7EPX`DwtHTW%qq6nLJ%`s8(J_y4X}d_j*e|w~2^%BH}vxa7 zvE897KIw7Ol&}7oB}H4P#vWC=O5vE~p_45!xtU>`pC$j}13Qwa`=}+qAL7@RR(0Da*q2ZctF-Mist&LVgJCxp-PqokOB>{mV`PK zMBQ)^DX1zKiZ(FNpezLe;UYG=y+eZ3-Q(3R82qD9fpoaZg`TMFz7gFn{no3nfkmhQ zu(J4=AMGNR(pMmf&Dpid8jh~AGc*ZpQ(h{G?Tp%nCiCc_^`<NQyQ!#Vx!|6dl-U zO!gcmQQeb_4DMqG=j&@=hr!^I^#Z(f(d0g95c8*<$$Z+%G5LbssFbzI%s!Vv?-K|h zZ)|Mq_=C~LVSH2Bpv+e`GtL+Ughnkg;Kmn-j{YCd0qQpomIfdeB1438H(|mP`|hfB z6Y$zHJ|4R9Wb;u6fZ9dHSta_xDz?Y3k#VPF)GD0XX;?tARGh1Vs+i49E%S{NffJ?q z#{rvgL2Z1Hld9P!ApKG!-t2wBus%xCZ?Fq^+1A}uQr_xq3!#-r9VZ(0^Ta@EW<}sy z_2MHFFO^5-`gdO~yz1!;9w;gwkTZ`RL+OGM6C zs%6)#X5E5xSohai=MNf{Vd+x(uJZxuxOLezrPfJF1nW{i#Va z<*naAj&pvgWsps?97Lu7lbDe~OA*1IN&s3%a4Ti{s&=DP1bV@|$+F7+4EuWRW@?}f zt);K8S0A-9B)kd%iKq*cXQi@Bzjo~_(X?o_rUzb|x4A<^-l1)P(?__8_GLErOkl31 zTB%gxPpI)orWrj8OY5eC>GxSDoF@09UT-7#xHE)tisVh!!sh7J!qkvFZl72ejHZ;_;;3tDBZE-ZI#xvxf?tv=r2l>UmDTn zuo@2HVrR_NT8kgA2$Ktbs|lLnBk;x}pP8=z=qvvESN-#9MtJyS9v6k+GM?PVfLg7h#K5~BbJ2*W3kc;I zu88w*sV{u*T{ z#W6W>B+*@0om$fc{RVEGA6j905=iSEFCjznfo;GibE;U^{%2MpT9$4`yum)5ERojt zPEq-rPMiOD#DKIz>q(q37WiN&UbDTY zKUVRQHJrouA1Lep=tcY)$^LzPK%(t6>*v=M@^PieyzE~=Vc-GYFG(*D{td(jf`vlO zdbWtj95;*{=RvN2uGD`#RNw;E3KDe$_vdSzoIlWBOLkZw(NosEq4eA3C=9~drMZpw ze;_(w6d3#f!q*_Jc_|I7Wx`<5i=+d3Tj{ zU$^3e{21f$A_B~y*ZU3K$#8XFs6O75lf$DO>Vo~Wh zvlT_lZMh^I2u@(4EeV0(32`kF{`&#)-#m`3g0nVg^|#7?0igfa*yVo!m>ec)j51eB z_0Kea!_xorRsHU;yqAZXOYVjQE7vdGi~s9SM4P3c+46tvG_sZdzeY}oPlMn`{P>8J zO!U9bZYCg(Xnoea^Z!_H3{UHgHUCFYD%F3eoB#FBwV!wzR0BJrL5z*a|9i>)y>J2P zm%xcvT5<wDd>|2=7b zeB?|(BVNJJtz z0RH|XVm!h?>%g(#*JOny(%oNf6IlnSg4P#%oGH6GYsljqP>&OtG9YOd!$CTL^k^uL z9oGblf6L(B%&_%(lMI_%$nN(XCK^ZaM^SUmO!Vu(10sr|Z#~f|rF$2^x8wk(8_8ku z+D4}{7zb?u!lW*=xy9SBLT024NNDT#Whw27G!did|%XxYvivk zXV?K-n(-Fq>(?#)p}F^BClA2x(Ewd?H2;sT(uoe-bqH{5IN~hbQ>oVw#b{}g984`u z*>~9>#%Nkf!^cn#xc$DHx*5Rq;0YEl&cDZS0ML0)JyHm4K$v5_Cn`w1SntP9(YKN* zA9;*JxCQJ$E?CRG`c=+vI*diNoJNO(vMz{wtiL=nk07|56K^A3v!jT?t)dNkPgkg$3(#GXDV9!f-uaX#VV1h!Xn3SBlU8qX;#$2Z98 zTTQ+YMu?F@E|P`)MLy!AAw%(W{#LJB@gVZcI~bFO01CfgGbYZ|x7;=fKp!>A&hdNb zE)onek2zZjZ<0|p0BQ)T59Qs(Ho{fraJF}QyuZ5!$Vl-mV`7H9qQqTK{l!qrP3HT` z7IT$*hpJgR>t}+x7)Se!tAnxKFk!{t(G!EhvSWjcra}HntjzJ(?&N6_@{2(%Q2tIc zIJjZrAmsAD7Em}obk33Z%RtlL2NAGc`TJYhNc!3je)`S9{%2EymG*!yUUG2&BCf#h z(tHj*HZHfe(t&`6)!83fSK5 zSI34%)87LsjHlUDi@>JS;@8F(_L0;IAGS~Bhu)fL)>!gy^xyMu4Bl%Nx-*K8=f$iI zr<0BrTNJ;wMVDu?8pURDZvjG(8$Wy$Wh_&kJvx7K;+a(fR^yt@V1-fn8UW6WkkOAs z@@Tv>8qG)_%h4RqerYe-Oz5fQu0gTr#<6(Yp*6#1l<{?2{@p`y6w6HlkoN7KohdpE ztj#qb0eek^;IYjQOg^dc@uaNS2FSrPv&p-wIl^`{k#(mAkdW$u)h?NC|1jRDHj&W8 zG65Kb?kJ6S;<);j>}zX1K$vk)X#bIs<#Q`#B(-iQ^kV1#R3xf}O09N0RZd`YtVj=N~j)~%qTwxpEvRm+me%UbF060%84y!9-KqE->8{&3I$E0ppRn*RS zUbn8ySvaXkSkEVmMq^&`j;hb=9=D>>o+PwE4A$>oFZveShFCPFP{JT!9;wOO0)cj* zHl8kq+gpxONR#}vs+vR|-KtUuKyD+l^qbAX$K_V$SjUp#v~WHh*tI^;q@tfKb~OK$ zBLIq0%V&h2?Q_TuroN}8Pvj7jj1tR3nA}LNwWipf)27rPN*%y|dD?E9A_?LFCs+WW z4n{!$tsR{Oib_biq4TJ;gx`rn}<(b}o zK&k}*d|@#%V%dxx;?%lc$Fkmp0uKK3(uE&x#b>dHjh~2CJEdaZRWIsBj&R7mK-|i~RNHa}->> zBmByMv`1RQ_So^~Ym1)-*QY~DqXpYw7Nz+!5)zVN1VVL#yPl>;^KgJeB4^^IV8Fa^ z(GsgkK7QDz+|9eA2^AD-oR{N$o=6TvO^Fc3JE(uq`~*Z7Nq`7j#mj4#RsoZ}8nK;) zQ%~;s!tx9y>{M(mhfb9gpgf?A2BSMm5l@MWO2ZWgwNjoBP}g>nQPNGLPCa(}{T!fc zk;S-0djtM5g7$;k<8}fdZRyQ;3HNb>vrpFhXnxpfUZ#)dKeA6wH9e+9Q`@9O%Nnfr z#U0ntwIE?OXu99)J63J>odhii&cCCs@INDNOOd4*UXFO7A4|h+(0|vNe|5rv^;zj0 z!0Oja29fOZI+RoAYWr0@Xz@FB6GkU{hCdTZnA2N0$bcQXyrVcfk=0o~J38H;$7JT# zod}3MrzPu0xV<0c0P)>l3$2e2AAP(!z&!K_${0YeX&kr+1({?>zHogH&G#py` z;1LQbT%3G%^%s{xr8oI?FEkroQ3I?=OsX8quz202(OQR|!*`Fq67=->TE(H47l0TU zU3`;vZeT5BtTu}KDZzooK%y;+MN8Odojz+%46PsCjgidJ4}38W)rIkR4%r}7sJixecUl}Ny=t;QS1WOQ8WlkO zt!gNhi;>m9g&s)nL2t*X+wR1ZR8QwsM1`HsxI70a7oGQ83m@$9V4j73PB(?q2_rV2 zEUcc>!vguM3K4roQ+QZ?0tmE9#-`zb0W1z=DZg@*z0+yHR+u!R>IcH<`MTywilf(1 z;;*SZ(~bfxxZmEgNv|T4e49rAgJR8P-ct~|v8ibsEU=F^CYAH?Tv=Ag0qQJ|t|twh zH3Hf$?dMK48iuRi{Ax6n`)O&VV@`38_g~Zv%BA2fZE%=giBbdP*IMb~h5NPsgjc)S z#f>-81}3I0*k4})G{|~H7aMkGz%tzy9Zt?$Qk1W)>fPYK+%S!wt- zJ4xOPtm~c|T)+dAAM`hhe;N*M%{lP!+t6(&!fuGePD$Oa(B7A=@|0WLzXNEK0nb#c z)Qy_pM6@$^hgvc5(_DOj`YfHs4A(rDIY}D@%wLTpdA^(e)~0M;hvpvnEXM?Boc=QL z-@>YRj(v7y(!drqQpE9j#TZ_NT*U+`iS=r}LBLWj>8Hsvmsdd)?P)M8FGwml(vz2)QQ$=rcvw|Bf+&)vW*G>6$Q}SFe%|a!cpBaXu z`$Nb<Px4O_gonU13L>6#NX69#Mc#ZWILdDEynW_a+`p{ z9ttnSn-FoKaW7T!mHQp_d!t$S)8(E8$*Ih-HDosiyr0ODLeTAbrVs54EuZM2@9VfX zW%Z*3i47}#On@X9G&Twg8AQ_XLw15s3@nJ*TH+9B!98{BeX33UJcxuUhFSX4?iGJQ zeF~R7UY{7Wre6p82Bpe})*VE1M{G7R7>t#0uEU!71=H`Qo2_y#3}8Jqm4E%wF*%aCvubK)_DbW z(?<=s&-XBJY>O=?RSPMl6FP7yy6HZC19;9wF ze7u``01jqBh95rG6(Mu(7qqE`6c#=>%y~q4Pc7fzv-+~;x5)2==%Hn!XC#-kO zq*NS+W>(ogcA2{dA_|{-Vcn75@U9AEHA#0Y{j4OBAyxDZ_$I6F=FX%yBE~9CP3JSJ zdV$Jkg>PCaZ}TnE?H6a7zvE0$ZAna060!%e(cy>Kb^=N{J0veN$SaNWzDr~3)jW^3 zQfYXnYp!aW0xEeY7RQsKTCnS^5gpACClD0+}HYE=RL@6%g+_vBiuJs!bcG6<p%{sltGA%R1_h+9^yfQ=d9*Q4oG6nM|JTQlKxI5pUfA zz`1jNiy1s5hXkD^7Xarjy1gG)5e2^?qChFNwJ=dG{uyi=)3E7uSKtysj~c|I0^%bHj36Cn ztl$+dmHTq$BwN*px?|q1t|kXVJ`b-LQ7tS{K4IspvU7ehG*h~jM29d`1bBw7?hJnw z1yZ0eB)RRut2bX|zgzT`aSI^w(pD*UN;3Bn_cQt<`+Mwy!fsXQKUCwz393WewBai? z@TZ#%NKuIpqLEq(^0@9)6x`c`7kAqyvC4!P#$S0bUN}na%-NMAI$qk^AjeY@OT;n! zjQ3?$)+v#nck6*?ldbpMsOLL}idnVLln56_hpz!tNfji@d7?wVUx$Arg#55ziL8${YL(l)JIfjvc|E5NCFEx9eYr=&Z4sAVYr7Fe^9kVm zBr+l>7parqvq2@3OO2DGSUH+IW)#XT0bZ{n4-TjDO%pC2*%bU&Q~KC&c81=nS6ucN z$(=QIUlL7SEc$hvKr~4ue6u+SjIio{C}%Thbfd&y5Mt3Sy8t;GEe^ApVHKMewP(}) z!yGC`s*zf(GsK$M7_u?!si@EsII_m$WwmszG4NiuUYH^;D)mY(mS>FOPI3DXb?6%y zlbg~CbRI(ZK_g8$5(TJJuLB@?@Oh@F&`Y~3pb-wixHYpyXRXXk8fnV5QQ7?%bVbCJ z((N^*Epdz@EvF?>(8p0AeVX?mWclj|NFN=VB9YYLZTqnel z=#V)xFEu}wztl|o%F1TxGGABSO}bCWrfi}d4w(I?N%7i%6BVa}dy9hgL*Xl3 z+X{xbtv|b8^E^7_Yg*-Yw`ba*L#9n)HlYIb+Z`Ruk zeM%pRsxm5}QTy@kIPOe=LU7?4v^E49!f$ouzxH=TGS3l-!Gk^q7d#|-(H_&1V`wA3 zbSF>6FaAvnv-E}aG3;0W{>Um*&H8Amm2Psv0&Ej;{6IdpN=Ju2+v z@jIyP#qqZl-Ya_BmZ^C43E!FX<`EvRbVC*dD@{u@%KL~lffcoqTRy;;-0!l`#*&KB z%QlLhNh!_P2pfhAeI8?(HDV2dDZla>3YjARSND^+Ce9FI7Tx@YnAhE9SdecMZi zNUvL8;%N zCD$v0XwhS^(^9D5w1bVyI`{W2>5j;7zY-&o&m8W(b)5F1xjW-hR

WZGM*f?P;xn zW{P{xCE59kmI0P#BbYsIG1pj(t12=ZabtiF+1yLSlHe|KHYv!+i@D+5x$tcx)8{Ud zkEq^2k(j^|6wHt;DHdL?icpnsYCV;%Vt?;m24wO@0JlKg`XI}}=9%5<7QBdIy?u8C z-`DoRq9;_bieXFZi_3&PKHRzc3|dI?9J)x5+GPwiCsJ&Z(8pv>)|1m;`?dv0xH!lr zp@1${ybUJ;`j8eAJQ9%qPQ&go20pK1WE7qgS7YV?E_2PFe9g@o7UYUW*RH z*rgPqxPW>aONkq}Pr4Cg}nJ@=CgS#X_*`_KfmkH{hpk&E@vE=LW-u7T;& zQ4fY(oZz4y2Dzbk;M96JZZGqEAbGj)tdIHX~@p+5Coe>H1<>^r_x4q=5K!cW= z0aS+Uf~d73>&NN4%6;1kS zRl(hww|H5IvnI6nGD{S%ARMgp0E!BR?H`_+_?WH$J_UfQ1gXjOWy|kR-IedX-v_4G zHl)a5S%n$)dIu>5VY*IO(d@-QEE``Q}QyXg7?rz3=99} zKfZsDpTg^~rvk|!Wq@&R85LB+4S}1-v@tD&*9LgC`TgQCsfsr^fra$g&eOO%Zg#F# z`ZpGB?0<2~oO+8RxIX0y=D}-j;(jK$hJ^VKb&7^w-7^MQIk))DHjfVN{Gr36sIYHW z4+nXu(?xT3WcT3HA?#N6AHZq{yauS zdIDw|O`~PjXtt{VQ)_&e`Gj!*4w1A56K}Dd`2k8yaj`-_UdKs(Fy85Y_Y)H<{Pqq6 zaR@r6{t5NShj8Bozs{LAa8O+KFjtFY0mzs@(qGu+qUFFHF<|NFs(dTO$j=y?;dr`% zV!x<+-j;M<#7_6l!We6d?;7We&=p$XJOtBV!E_dxLDEITLr?*!qw-`T(qwL+wB0Av z-{_lRw+(1%x;~s&^X~)?P_}NtB%Gb6jeOGRT5dwOi8UU9{nne39)hJXjyesI5TPCF zin&c)F?Az1cQ|fKI3VRhGPBSsr&u?5WrCeM=U}?B`X|68YR1sJHe$B3ts2xF7xcRI zP$SDYh=(JI5^}T!JI^(B8%udVr~3(nh7{@1=Y18xMHD>s^gSl-7&b~K~GpYW`H@#;ml z(ijK_Kt!eyBXe^j?y)vQNF*{6Hzuc$1-#ll+JG(o(Xwi3)<+nCPL}cz!*g!CES3$-G_3#CU;abE(_WQKbvu9#>u}8_5W)$M=uN720CXUw zZ+SQb2@{gWM1p-n3o(TTIq5S>(-V4sANr>a){U&GU0+G9)QI0U{r~Z8ONL@v zSt|9s^LMfTZriq&)eJjKthT*ykG%=zfNU*E(qeiE?+)TKlV1*8n_MZOkn4J{SZjaL zCFr&tkuW&hmvjDjy9wm0CtoRhzH+)uG{yHbpSy;s;4fE8Rd$Rzu{{?G`O-Jl6F74| zH=|5V5XBZNglY`PnMk@WB8jvW2c<7hHlS{%HGsuH9!jcx2_R@$ z05w0}1ffK?6I%JkOnmo^{*Squ<&gQZRzCmBBl`3jhkYWRtO5-x04KB)`TtVb= z6KP_T?pPyjF@Jmd*U)tVO+6mpfN zxVVz6xHyH9lf9XxjVTnAOvL9D1l7bv?10mkqOf2*L{hO+8M=TZOzr)SMMT*EB{4); zTxBdR-JXWr*MJcKi&{?uRINIbrsgZ+8#pXnS3Rci$dO+lafjQi+cnQbj1VdnT9ODJ$3Ayz2~4a!P*YhEv#Idj(3iBU zNyYAxA)!48p*uTIU7y!+LO;-2zn(Ccoin48zI!(T5;PE2>`t9b!=);T8NIwpkBdm^ zN2wE}_D1?4Mt$D?mOMh3kxsXki?}88(psF_Jhk@*CCvmAsuzS-4ToxoaTGu!!Z5O1 zF`R7|oJOhpW{Zx-{0G6X_4c$TAU1?>eChiVdcZ*wzc=UFYMN&vCa&-)jtaEHmBgCd zYCBLAHvs*oPzmO)uNNBBa6i;k_9Rx6lbP@S0T7h}K$r`q8bCV*C)G`&Np;N&JRRa-!z{gMM5{A6PFoz;o^w2sz3QEwWqNEnE2;G!Z(6K=r8-Qj&B}ivOhya#q zgN6Wxyqg+UlqVcjnF3b^<$KtBO6zd+7BPJ#s8{dUx~=(71P@$@kYiP=Lae~7E2@{Ho&VzGlp7P2Nh5Z1Ii{_rGNy3%rCj(QNFLz55Cub@Ln%#W*uE0bp`zdf$@Vf0 zy)qhMy-qDiy-d|f#Tr9k!jl^yGA6C=T+p;ZcEfeUal>dt^Cnu3BOQ$0cHqIyd@})u zA8oQ->nPd0+Wge~q4}^GYel;Kl|G7n%~-AKRbw1yxw~6U{zOCSngP zixG_xiV=BUl*4CMu~h02EVKxgenewrN79w=(k7C9Nc!~XA#=pRSxBRa6ou56;M#PP7D8BAn#y9z&bw8zl#(jTfiDH@j zR>P~prO>7A%JpEL9hT6SFmy~MO*f4_t&d&9RR0_HN3@S=wKj{KwV!K;J|cV^+M#z+ zcj|FEaLV|ZzQgiOFlFc)@v!$d=N*)t!=IBQ7$1~g=^-cf=9L#66=W7g(;MQV;%Xwl zL8d`Qc=hmV@YN2kEpEXp4SIEkY$cK}`Ni<6c$_hv0PEZ8SNqKJMD~OPBOYWCga%IvKZ*7kO)In|lsOb5dRWR%Dov_EiaVxe&}Cb7 zTX1mxy&(>N;>W~yiDh4_OFG`HyTn;`5e|+`zc*C3<}y@ZH&5M5ZA!(^Qep-$Av5tY z=>w;9Fq;gQ1GC}QnU0x$Fx_gcH6=7%)G<_pYrU*(jmK*$>pLyQ8_yfHtbbTu4$jRa zo43v1#F7rMOxXqLHLYrRBY4OAmOT{*WsA3qf9`|Q1CfcXOLXmrJ6<{H9(t^tCnk1$ zX#7E0)Rj4&^&u-GD+l;icUY&{ezQ6%7G=3`&~)naF45^j-C=G6aU3rhyKfe-y5GcgVuWS% z(nv2~X}-{^bUy)>O_lMKwO-a=&h4O8n3Y#*!Bog<{^(G5x7}Qd_sMx2bnSO7IrIZo zGto19bP0L+YUD{7`E8zI2npA}lEsgRgy-V1FMh^)qIwzXjNu}7w|`V0WA4;7)>2Ww zFvT(dv6EBo$M9ll<-E1q$$X=8V>e)jYtzv&ZmVNbqHAe)aCXyue9UjbAL-t{-eR*? z$i7?)*6Y{X(hJ&w-pOjacC%l7Dtz6trCum)s`J@vcCa);a7A$bw)NDuVy>8_bx$K@tP%9 z*jxS2pRExDFkBkbo9613m*1fW25kia{bF9OZzx~HSHlj%bmI2OOZeXLYOkO-RN0Up z%A61!MIRpt@4A<_jkX-_+v9a{lFSR~dy1X+CnxqNhR_u$>|_%OGYdR{-+xcrJTYCp zogB>U@MO8tUH5#s#0Pp9@GST3^SVy&Y@N#~q{QldH8^QD_Al(1ue{wqb?YKOt~<5v z0y{KZrf)Y?K8HNfRki5}JNI1sf5Y+;yqI-{g1W38m!+Qa@=r?m!Pn~DFkX$h6!fcn2jkaJ z_VK(R5)>%7w9$wtGxI}2kl72y80MX#0KflaxFD2k5p?)40tHk}8^Ww=x` z%)+2lzO(b$*q9JA^e)s#FAfvI$7lMfVT6}Yi=NMItuK}!>cjP;E-0f(ggy}PYbL~k zH8$0hHItWzVt~+yPylFbC>RI@4f%r#vxI{EgNA~lh1{W_U=l*15FmGK$iGr9;6J6X zjJYuXp#k9E6-7UY%gRFTA55G~P3@d5>|Lg-4i+I+vyi2#ri-Thdp;9;TNa~__Qs|x z9<~m@S)c?w_#mXMsf!VXhpmmBGoOdx>pyDnLFnI)fv+k4sN!NR_*zq5i9+1o$&`YN zg_VW%wGc7|1%-gqM>9SZ38{axL%s>Vws3KA-~$5P-Q8K-IautS%z!myw|_F6ZqX2pOU4Asg0I|r7c9N zkf8~2zvUA6qnm$}{BNND;#6}sbrQF?g)q7Z{cl?S&HU%Wzcc>PrRM*-WMk#x{&Syy zD*6-a?|0x+bh3nWXY^Z$LTm!Se?0qleF5Na3I9pr|C-J}oX8sDsr>NLH#NC=)9}y*ZAemcnXjb$w;Dd$j@`0!++9x`Ipz} zo?oxR$K51k9HdWBNT0glL@7!fSKy{G+yY@`0{(YR(1Fe_7kQRxp|L6cd4UW8uNVU( z&eH$K@IU5_cI;eDNB#3U$N$Ir|HSmi3i-b-iTECFBCxBaWywa^D?B{)O$+>@`|Xj3 zx)!g;Td>zj75Sc|WC%7Pp;{iFzxVSca#|pZZpR7170=JYF1r_A_JZuC8Qfav4nke~ zVxPPV*d|g2#p2e|Hhf*Wi_p^foxW*7cj=^a;i2m^Y7Jm44Gm3#GdL+JDcOQpyX>h% z@Aw=E2^$N`=*i8sAav2tzG_rMO^%vt1_ zdhv7?PM*YLPNKIxt!z8}tdPZrusaY_Rb8$0Ro+*Q%jQl+yWZlB`UW&`-yE ztL3KXXjIRK5@A_UQBX9odx-8=VF&4p`4-1*__T|MvUfGAEL}eB(H%z@B0@j)y8N=* zpLSn)#~!xQl1BJkMx4r6S=EU=Nym~T3qL+LC8T-QR?d!WuRk_k9SV2DqNoPekP7+8 z==&ezejAplYFukx;E}cZE}@d~_1$Tw@3COdFOYu@36*z8kd zbTCiB^Z+)bOB}4t4!@14`@XuqF!XQJRMKC>!(!R}dezc_rGe1;8J(iJ9w&=k-_``Z z%NJ;IsimSxY%h_;Nsw$!Viirz5gwX7f6%tL^wW=qJ%@#bmF$TtY_#AC1PK?-M7?HX zd&AoJ9oxBxCx~dEpCB5)ePw~QDD3(v!0^M5w~Fg=KYxDoei@7*29Bq5V}vufKHcVi zwdElH-)jEXvgQGx>6hPG%!NVc0fLsAjXG@BJLUP@Px1|JI)E2N9JPk!rG4#eZ`7eF zxw#c3qkNv=D+kMo{AG&D`q)1|iTdg^*~NDW+)H!3ewuVxD^{kmT;Wm9*~y^9sDXW> zgSOjX-TBGYdVaY7kam=FULT)TVA{`toI<^SU?8o=R-B$VNKO3)!51_i$SrkB+aFXY zhc{aKnepdgoB&IQTC<2UB4@US^!)|2hDHR@jArW=I3gm#F^++>h8o%=@SKE{w9xb& z!1LnrLjKkFITwLVT8UhRviS^v4j$WO(195%KL%zve9tz9D$y3~b%m-TUONg)o+2+p ze*d7CJ_jqf<6KquK}A*f%c)3ge}6w1V~q}LZnAWt`Js$Y?5)EaAt8O_D~QRZIJDiN z;pk;Qv(Lu$vQMQMl*V0D`Ej2L9FRJjZz?Cs&vW}?eEk$YKHku~rpY)L#aAzp$z6L zz7mh`9o`t11l>-E)IXWk9Dfc^Q`L&cSTSLN+@VkUG>0w%bnH6ByL#!=F`0F>d?a61 zUmj)A&OVq07jz^Tb2yuCY zUc$^|_P}>2D9DA)V>)ohSyNS)nko)p)kb9In4P>Za^(U7eS{GWK@1Wq5=U~=X_jc+ z=ph<{8B8XMR~zO#4be4cJ3&}ab4lDkvwxj%D`L(Z`c+nfV=?`zuVB|0S@I?XI9JZ~ zB_t(vGKz7=)rX6oLJdP4bK^Y19xw>#yPn5i2o42&)%S7ibf0>p#|D-f42p-wq;C*S zNO`TI8}u|0De1Hift=^ybZa`Kc4emqf3CQt(+yA5O7i)40|faw1VIJ_R#o zL^C?Yr(ymIdcGbQwd@?nPYYj+*C(?0)ixNao&2AFknbF>d%V|f0&$}T7cD*g+F06% z|GODvC7#<8iF<$Vd9jJW0Wj&fD7F5XsJNT1++jXx$LnC&OO*?bM<2Qvh@HoEtdZA@eHn!GGJG+)YBX9rFzTK_sw7{%M!;ouWOF-j;vb=A55Qm zR8W~>0z}(w4zMpPMK*!zRoF>g59tx=_^2w>-00@*8Ojx|DH6#tvdu($J4*ps`6L3A zD(PIf{ZUXEyyW9tMzC^fW|Qe}+}FLkBt$%>gwnb()fhGh^lcDWdjwMgP<-<9-vUv+ zY?onCh)FuH`f&2zhdYFf#A32`-_F23_h&&fIwIV559#; z69h? z+l8jc1o)~^w@b?^Ik(9NxuOr_%>3zXm+BifB3Lom&`DM?=Gh)X$mXX9rkC7bEJnN# zO>`Bp@ByxF$4ld#U;4aZ4n~3V+BQMS4>V-(&Y5!ugrnLDD1k8jl=Yj~63EeT`c%`c zs=YYatnci#01u|wZ>;DuuJzQX2Cd{sl$z!>PB*TnXXB_~gLXz!WaF|s1kJR~T`%`X zvU%e3^a-n$Jyr!tD8&pPd0HEvCRceS5UVeeO*{hO2N5VK6*!EZ+uf;Qt;@d5Y-NiS zpCx+-%MQh6F`>hQyZMnEp-n_PHjo{quc_O@guI^0O8w?PkaW2Bcr1JGcn0Pw%*5RH zJK<%vd5u>8Vo)X{sGC5MhzbsdZF!7D!Xv;n#dJ2k+OGjU=QF>wZIp!<{bhO}v!w)t zCHkTXs=Bo9y6c_W?{ff`wOMp~fLB_|d?SWbf<7}QTeA)*<2gh1kYp)q0%ycj0ozU$ zgXtmxGSsk2?dQ8XG|F%&^)aNp#xt0~Vd6@ywKW5D9*!5Ml|V2#!{ zeZJpZ?P7FWnMI^y(sDAvHX#9+(OcCuwmE(8yBv%@gQy6PsKKJO9H2bJ5;{MlGbfhFSb4+I3GXjAyzxauyqoN$JfMRM;s%TEF z)5G^Hx+;g~kH?xonQN${!bDtMHA|$5V^EG|I2}k<>Hs=cKJQDhC#d$Alezo?Ca~+e zwBsi0f?)&)nk!T)H6FTcOcehpC+>%A;FN1|WER@O}Y&`yE_80pD z&=kr8te+AJqherD*$A#STUVeA!i-Se4wlCKh6D64xyoNu29Q_bHkrIN3`bS#LBSQW zSYz014AoaKui;QNP7j;RS2Ro|_@)7-e69z^TLAET{!rjD9PRhu3zp*2&W&GweJ)k- zuU(Ohk=-H@E0;gJ->*!!RcV;Ib&c)`x3;_T=I4lqp`jo$AnLZdS0QtaZWU zqUAxg*;8;@uj#hB9om6}VU9DDu$oJMMELvk>|vIzd0o0jeE^MkAF4!>33_XhQ)r78 zcQ=+`7P1N@-O@$(2(!5BtxkWw-E+Ks9NOqqcaQ1ZTqk@lE!(ayds*a?fbA9rp$R;G$J+5+j88U}{Qlvd)ov^6THeOXs#X66HQu`}GC> z)P-NwV-3Ugr8dT`T7LIrwGE(W`SqNE6&>g$x^d&lcMgc4r7M!j>3p)0M(eM-`?#iS zx!!4#w*SHL8_Zq%<9A_s8|cHbs%(pFXu|{SFVOx>=^Rd3Cj2Yk4V^LE`~HCEyv3R`e;6B7r6`vT!|GSWLz` zTJKU!Q9McrLb%Ong30dl5X2F$EY*NzBiFTB;h8qJ(CnI-a<-lOqL+eTin$GdMvJc= zgEeT3;xr4`d)gSlcDUDD=H%Uqy1zO&T}(j!8OFAnCqBf^g4=a7|E04o9zlvbnG%)I zE5c@}xyQ|KvC}t$$)HhUFq%}OF$vyR5_8g6>+D;503Fnpo*}UUBF=Lms#hOx;EZ)j z&0Eu@8Sb5CqC1&0aPz@eo$(C#(|`fw>08__wPJVZwiLs8eEg$&mMrTZUn^Kh-ywON z=Ek9LJorKW|%FXUSKMe z39)ZaCAljbkurQ&H@8j0sIq-=A(6%W^|p&NBCh?R0`7wP4;l|B!~EO zm@RLdkl_)cwMKTKNPulePPjt?apXXSkdtTS5GByXPNME@tdTy-Rhat<7_}7n*6JdN z;%<-bg+Nmw$erMY#fas#ivla~WEIsaFy$Zeu!kjzHkXW#L?<85HigGWEoNy=4Dn=< zFd2f?GlMqD9s3k$#OX9+!C{EC4Yh1=qJj~XE$FSCc6P?$oQNUWC7;0sS|%hsz$=1! za`Z(OC?7ydwt59dN0mWbI^gDV#F*DH9Q*&Jcz{UQ4P{-9U>St>AF9 z))BUcgnb5RVSQW9x)a80SJc*l+VL?Ju5xT-Bxy%3Ww>Epo(qO(JN5b5yYW5zo+ej9 zYN}@AqZmv&d=&1msv^?X^mm#l+a_#Pr0kSp4e7QUQl{8<_gsZ*bG2$6C<_fg*PquX znOQCL^|O(zcLhHL`H6W$(?t{Zl1OmevQV7#SL(IdplEgw)^h!n%vfApY&|{^T{azh z@w+;j)mg{DWpv?1X=gk;B^&%E@zlJ)Y|UiYY_cp*pn%C> zIK_tnPoUBq2RurNC7?kOe(E1#(tB)iIa*Nesvp|F9O*%ueC2dFw_`l8DIR9iGMF}( zmhAQ(O&%Bv-ALprRa}NYc);QBy%eqL4!lDoZYVCEYG;3(?6zGv+icv12M9}*?85+I zO-cq&Jr%7m`ueC|e_zfs4xZS#f%qRgzLHS0rl!yu8CFU^M2T`>^Pwb= zne0Ihu3Xr=#xKvl7i(=l0J9i#tJ2jraK{CGC*KC)Sx@vWjQe18$CxozH|90!(J=C0E@2CU zrmo+;hgOYGiLX|uk>^=A^m07(4tTjOzhlrRAP)n&r=1{{B!N~wU6maG6I9&uEqOdc z{$l{vl6KQYzj|>v@kX>tKVCP!W6FxJ`*G}q$6)ihIP3W6kU@zk4sijWi6wt>F$gUW z;aXKKrR-9dl+BD_Yhzj3eg5w8QW7uxBFobL8>#ym6}cZz9|7eg8uzMkKFS0o-o|S8 z_mH}H)*+z4OPq*q*bOP)@Y!ZcIsyQeZMjt4y{JZr{==B4E7A%j1?8q719Od1@+@wR zxk7ok(j>xsat42U6lUAmDTa!gZ@2H|&j6=9q|+f=_mx694deHS5wQ_0IQjzTL$mg2 z_hEk*-2?!HqF>&PzaB$mcpf}RBht%)qA896gVKXzw9uo}=h}i*2wzF$A2Z9@Y2O7+ ztNGC^yFN>FBmeYs1Z&^p`?P%dM?(?}lDC@6+Gcjw3nQ=Om?-GGG0R7k%1fUFTPya{ zIm?dOI&F$dp(B9%XUILVbTT~#eM5@PBPYD*Cx{?G58wH<5F|LXb-hB^Vi?YYaxMx@ zQ31Mq99jUU+E&h9n)Iq^jLzBW!1CKS+lpb7;G?MIbo$b?$VTqQOtm<)RqvxS6=5NP z+50~f)$V*wF?c(PoHVo&N64YjG!awh&Zyw|x)>4>i##>mnPZi@wWFwn+eJP_0ca&z zB78=Cdkl0Q ztviWJF2yuLxg%RxdZ;OAIL4e?)S8Vf>s|+5ytumk4iu-=yk(h&`@75 zvCOzG_2Zgz25Svl@%i*cGv{1DGKJev8lE5>(`*FI!93Ed;YxiHCi;yS+KmG)=a_9T zndki&M_;l=E}0&K@-7GyN2jxcL354{zslV8=@3bD?J-m?0e-n1q7`?@C!fb9(jXBC zodHce*f~sROjR8CMsrQ?<$<||Yo#3uD5ggjP`Q4?ce0?_WA`=N(O6dDGo^^Ap$iO; z64)qD}zf6sUoGlIarKMu961wRM%gk+Je z;IH@hQ(Hn}43IF*0DSX!*n>*+uF+i&#pz-}3SmMsLdxCma6P~^4# z|5nbn@+s==?pdY7@iwW%x^;rC5Drtlu3l_$-_z9BmurVDxL{<~z#@tENQPa@Aa{xN zphNriZvw1JZgE~WHf!y&CZQHe@=q-_&3>;j{*{;fPrd~bq0VFm;MZ${MEzM!yt*Ci zEBNiYN1>!%qlyt!%@QhWVndmbj8f@pw-~2=+`owq30WANS$?QZm_22m-$JB92Xy~D zG&chE;nhVw`;DG@#z{{GV2zelL_TTClr@4u#*h~k;6h6aZXu&o>E;RP0e7Zq46!w5T{@z5 zVp>tOMw1E7twCSGfKKZWI z7DXtnN(YXd{^gdsxU2nd&JqQvPI4nqAKvL|AM5_HE`-aCz(hO@or7w2xuy4#-9gHY zUW6V722D;{DE%ofSBR-*A@`hGCH-sZ0(6rbni)Z<1M>;5IF@bU*RLajw}SLlT@STz z2K`*0$R{!>A#OeGynL&E2e5US9r~wapoJ5}{ZHd|k7Z%jG?IFKvVGqT3L5_avv-y( z!UT`VIe{r9V|uSUTb=2hQlIY z4?md!o0+L0=^K0SWzZp&>za90e+I<@xxjxcb<}2AGhENm-nn$o^WJl$(Ta2`IkH$O zIJic%xHq|+222R!k64jER}$|vD$-uYZpn{Hxe*iWPNkmm+>L@D7nvCVa9a>HPaF*Y zZH{}s?%>^V^?cM>uen`%YIDo$0;;iZ!2ER+gW$zC0&y8vQuL&;bv~zufm)Zn;or0~ zv}ZBulD$k5saKnWprjlt~!ILx5v3Yo;U7~7pphtiv-GcVjy}!Wr@?zo5 zQ|GVjUxhliWVGo4(#65u9o3%cP{apx*X(hb>@ccd$`G}KXvUO6m>8=l{+5X?Rtd?Z z*Qb{sx*)_M1LKrx4S%xuHrq%?2~o*aHp~?vQNS02HD@0f z*tOL$Ha1o+=c*2Gih_c2v>sDBSY>!dz8xg+Og@N57{Nndn2?#tq@nK`)6ToYxxBbr zCzADhz&fDZ{~p7hk^+qSvw`eo;eQYI#T7^@b4A8NsQ zk*P6o5KSu(23l_SL}~fB;DACBUbS^F6o(=};eZ}glS5?E!rPLNn)I>lV|UCtq4Q3& zf`4eEp-2X>t8ul>ot)#s?R4o)+ZBQ$GC>h5lX91BSLyw7gLTz?t3`Q))b2j0YE9XT zT$Ft@h%4FqU3YOX{s1I$U~`LgBof$o?n?g6(Q)XQwR^PVmUKw zuP|RMSmlt**0w3OH(x9&-q6%{M~UomQELdpE?hdWnfMI0TbPA1hucmMuOJY27g5gS z6GA@D?(|`b2E3;-M)Hr`V?1VU_-uq*US67kWE4=r&X3j#-wNbwL%-_GVwsfVSC$Uy z?cDo};2srZ{`>+jJ-yx!{raL)YMBLHXUGS_hPfTY9;eFOOXZeg%SaDvB8Zn=O_(j! z#OTS@)RqoQGDiw3H~I+k#~vFUeZQhaY6{+RAsVaa(q(Yj*87uZNy9(vjlj+iSpRj8 zLu2!N02tA+J6qY|W?5QuGGT}#N)bnU*%YJ}w0U%g2V?EPg+@13efR6DZ!I-?+?3;S zgW>(``|hGazj?hcHR^-O+^^QGs&m9EEk6)T0phKj>=Z@PowmUbw<(0H)oXIw-heTU zzPDWz>w^J~n?iy1z15P-yRjHQ)T*ZO0J6P?(z?(dsD>-U+oxgHFjByWm>Nn^nSc$D zpH4>+TXw~i<#R6c*^nmYB}|W4e6=b4Zb50I03OZYTROU-UF=Yyc1c5B@o>;EQ~|zo zQdJF^4HE;=XNwF+upsDxa_(+uYgR|Qrx=0w@lz&N4BxbUeT)~13wpI4V_>CNzX-%| zxGlzT-r5;$bSBubAikgC+C`hJtE)Q-f7Di5zG{qUXsO|yxsea8u2g-1VMEFXH4T(_ zjqMXm8?eB|9VC`OF{sGL4#Ws=KR7?{DV&E+Oug)zJV)i8mUP>|cCLw483ZLoH|+ z^guwg)}$ej4ga>)4h1O)$5d{6mP+(cLM&IogKh(bPa;SY7k=!HiR1YZ-bub_iEyNn z300O!D}p7Qi)vvxq~BK%ScPjMsec7C3S;PF+#)A3HYL>Bs`o~BoA=`Nrkt;fVB{-x z%SG}k`!zf4jgUdZvsOp=+luyb{81Q_zpLMX7CN(} zc?}k+Tw8zSlOT;E{5shD0NY=sQcXGv+ZQH0YXX+!%sKe&vzHs^?X)W^8+yS6&@gK> zmu3$5v!o4_^>BUUZEbgppb+lcHC^p$d0cUYjA)(I4#r~qCzo%t5klKS4@jMMhdSJT zDdf@`WYO-<_EL||w({F%_%!_4!l)BJrh@erpi&d!r?RTlj(zBu63-RI=evaZ3A0N%)UOsFZ0 zpCO=Fs>)Shv#o~k0|pHLyEtuS!}lki4lS*^ZLRJ1Q;joIbAw}d?vG-#QBE=hQMa5z zx&z3sbOX5K2+44ACVEwvMxTED0% z3sh7~M$ z3Ug}Bhg!NsOORSTPnbTDdkOCGwMRlY@t$-jUn(w>{bhmit>G{R<>~CCg-_Q&clo(s z6vyWZeDQEJ2n*9dYZ`P|`mG@+#~S9Ca0` ztMyl<{=9qDv6B@2;Zh%)s*r|ocFSI6w72>XswHW5$I|zRJ+2PI)*rgIaRhEF6-cJ- z{e|ie9s(~nUg_dZ#;>nSIS}g4V?eXOl#s`khy2Ma&NaemhM)%wUEwqM`L;($4+GZ- zjp&I>$^3QlvgZrslN_&GP1LUMmalR&KCjP)6$p2|w3pU4bQshyC*7c)rq_qmR<=W? z>Dh1QZBEzN!GvBLPb=5zv_WI5&$Ib6yQ|=BQ{3f4@%rpR?7oge$4MH0cI6BGDfleq zJSV3TU8sr_Iuf^Gdtl|y@-~qFZNHz-N=zunL(F=SSy*~?)$vfp{{aRAU|Glj=8c1! zTbupy34b#E{|CFGf&l3zuq?R5|8gk&yBj4ANU7CeM9ZI>{4YI$1sT*H;@b?$znUli zf;AQsLP|Ne`*r_3Wzr0!A?#5zDtX-`I`&=?S?WRb*u?87A&3EAmova_By+`+aD%UDcKcT0QTt z{UZAW2`z^mVMs(A9WgC-Sf?=Q)oM1LF3_gYDW(TtVqv`*Pv=aWt5}wS1Q7}q*Zsaj z07Mbd(d8d~=IgmzT3VbQJ<@$@ygvUg|r(5|*>A4fWRZ`Vh<%O-ac( zH4BSEW{m-<^v|^GO@vyXdld*sDBkamQ2t|V$J`Q zRIB#Rx#c-D$|ca?sc{w2|>Nc0h6f;&R!R4Y?|_i(=bd5NBdC7~}2V-cel0Glf0`)gz>ukQ=Qa_3?}WalK*QBa62*R9d@ z9~}!1+rY`Jt!1aNmiub6TC0V}tV^NS=9akT`!G_-0L5vQlzZ%98)AjAMu_a%hm91I=WuJz>R=5{~$wE8XQ-u)wp$Kn9UmUIKT zU+B7HNFDu#ksxra)tO?m&LE%@F|D>MA>1nSybya%h0kjw>%yM3*U&tBo;Gy zXbt+E$TA(rFN?|O5{>3yWBV5UGB)=9kS@D#c&*tn2ol8j)*thW++(<%DRW`V9>vz9 z>mr?!>t~4rROs;G$mhM{hq2i`ClSqWwkcxI|qYO*2vuxty+j5PEXyWYTGs zsGT_rrI@O>{1BlQ4>ef80Tx%)+r*2W4I}fDK>Ml7@eySPkwv`+>X@o$!T0(z{`fZZ ze4PIwKQ@*=Ez!P8kQ0@5j0BHii# zTu++nK3->YV2l&3G&wu3TSVeZ#*(TBd>b)~i%W=`>aKw>!uDI~{rM6RrxB@n-9Kh( zmi>0#S3ryef$0PN_d;ye?v5QYqwnGaQ4p+Tv8ewD>S?<`^PTq~XY_PBEuEd_YCIOU zI<}b<4=tG_De-W`rcRGhnlo5H#GV>v-lWoAtE=^Rv70FK=B&eU z|KS(4GA?y8&nscCN?R~ev~x604K6wv9|Z)_T0?`%>bK`3u-win71uJhC6QY-$G|Y< zqLPyH-t(y;KndhcB^|@1Of4$Ny+Y7EP$Qa&OmG(-SECG<0GBf+&Bp6=SKVIbNO97# z=O?QSjrb%+c!h=Z1j%XR;^C&tDCv?Av)-)MrIS1j3Too+v&W`x5mbz#z=Nw`^eKRr zsF7-5t)O-!qWNvGHElY5ko6eCUEA}+uL!aNfx+!rv=e>5c5azD{IRy@AafR@H1vLw zxSUIe^X$+9bYJ_G_BM8bzncsq;*um!KPnjSq{0D?VhR*zY3!iErVqP&W#HF>SJbF$R6BUSzfux4Pxe4UYWfs< zE8hKSo-IG8ruChw?e;m37W64BQw=6Xv3o z3raSVVF14@@8E|A|WU}?lcAjroEZ6+BS4=<-kFmnE{UeSHK~*iM zB6-Vm=O5MFbje{i8izE$vYjdBL7@G`RfANk{{DTszc zWDAm?X&@h@jh@xxci(3Z5|E9RQ?GeFfvO@9(AkRZcD3zKbHq+}Qn}6h6i__Mdq;>#$STEDv7aYrz0q=h5O_amv{{rhyg?~mSQtq_ zkkXZ-@43FtPPOh{bW)V9uaqb7z+>{@eR01RbtL5W4*JJLR$J;kVydwag_)7x)Vx*V zM2Dw@LG6# zqqG=4CA;UqmoSVCxnS)js0SR3sEQ!|$~T}WkPcRLERGM^1tAi{)mkwH9_*H0(#rfM zIkuY$Mh@T5rf)xFf72dgkPDU&cD28GBc9D_MpNT6{DiJ)TLdJou20Tdx;OEpnF7kJ z7N8e)n8Gv(dCns{Isu7f(^e-%Q0r${jB<41yv2aci7yz`Y%4~sp~pM8$qtuR{<0Op zu@95QD@|u{JN~9>bclSM9Sh--&FqNhV5!1eq&Pe(?#H>OJ~t-nO1t{JWV_-pO3iBz z7RO_J0i~Q2{52f?0EHn;>7UYquNX?HCd~h9hX0E|i-0Jz+(oC*-7kmsZwtucu->1K zx>|qy)o6Qh+a<_s{T7ot`g(UT|EUbd34>5-YBn7?+Cdf2lvLmzl zu8}fNN=iHiebVxA(seN$KLyrV)pJdG&S3EK&OcDbM6V(CIi(qzl)-3n#Ssc$w1`a% zQUNZ%2}L%bopGtb8aQul6H3ilUDN(ryMuJ%=V2q^m!--Vf_!VYQS%}7kuH;=8YK%k_{;su5|!LQ}MmbTLfytRbHX_Ju-M#yhf>_@pM)F}weD(!|Mf@^N4 zvVH6Ss!y( zUEW+k%%|CyJlJ0)hX%4Z;(Hax)K@2e=IfXl|}MeK$DK&1@?qtYq#lbl^`B;W>-8IxTA-{Vh7%3*KTB$Ii;Eo1cpONxY^13$?0cw4x=k z-JyaS^3n6+LxFby#YpiWl@1VO(nrCd>SW@j{681ZiEu|0LTe|+im1udX2=)r(6E-x;q z85u`Mcz^+Q$4HnnzBs5Wf&^3H_5r305J2Uml~*ujEFWN7Nr*tF#*PrIIEB^wwlWY2 z-4wq4<^Et|TH`6stF(Bt>ZLi@Z93UgJvY8C6QX|eIFC- zm4fyZ5JjrLY3O^2wz?eC-mbrJ;_w(AQDsE|PxlcTn_Q53-t_hHrMklfa(D%bv|w&r zsJXh_*TBMWO9z^ZCV43Y;10%d#X~%=3cqN4{?a&1iNALn8W-qW1NOM*Zluc_Z3WaENj<6f(3_-Tae)H z?(Xhx!QI{6og}!sd$8aH3r=tg?yleDoZS0f`vYIqu3a<547+E#`{_s4S`Bi+)sGQm zAo+I}@(rxc4CJ2X#wTA$C-3J*b!%uitkN`nM!^p{9*zCv;p)w=<<%J!E3Dz9@Uzj; z&K{>9nif82kR5t5o}W-e7FDOw2&CNl5gT2kq$K=o#SbFe=;wJO}V!=8ko{ zQL_P?XH@=J0W&2I#vp2*uZMT%TTLRQ@wKt!{bc=h;%VzdhG8IkesxM_=IKob1K-E1 zWAb@DuP?Bw02Qv5&VX*s%dN~~*^gGQ@E9q~Kl8sDXDywXIv5ot7fEs?unRQa%d&A?hsk;j6aIUI$3Ggm5 z#NU7`_IA7I!J5i`nH2_5jS$y%bEY(JajbfH(4=_s+nNCHp=&+qK7fnc`3U`ts#`6; z7N;Hp)`5&|52MpMRJrMjcGqbGF=bdo!iKuEr>zjrATCC6ZfqWZoZNWT) zu>>g|m%EU4y+0vGWomdJM)|uxE}WqZKxNc84b~M#*-(aDQj|(+Q@le8xxeINvjgb} zlj7uR4~FjJ<U=mzJ-Yh7N`!SL(?(*>stQ&RnbY`2?%7h@;Th zWQ_2eQSqPj=YV)1fthS5xGq7%E+E7c8TbSqp+bVKhhcjQZ9N>J%U_dXUPdS1a#`h7 z;DSq^6#$yyXV{}HHN-cBx@ldULk;EeMb-#K=wcx%=)T;_Ut~6YeR)1GR75_zDv=ZE zuHPD*HaYldH_`*~Xv499V!B86&00ng!(>;=}sA~7B# zuWoAN)n!gK=mGova7C()K1+cK7=*ouCLX)kdP)t_kKGF4idblUrNO~sSrUbpF^@pr zCiRoEq=ZZ~W}h>Fh5xX-{p(i-W>l2n#@9Db(*JGp)q&m}8R{`666`7<=^4u6_lw28 zIU_jhpy7G{<`)AHT^YnElgwdPna&Ok#2oGf(}no=ARs~FsS0x(>VZ`~Gw9}lRr^N( zC`M!4+uE^%QTmtZCX@bg0xZJd5Cwj$a0v?sL?5n1Zl1E^z9p#>0O*9^iwba%lk}@BHZibCZWiRfatL}Up1cR3gU=vZE5uGWC}F%ezo^*Ozo4A!gQRw_LLdKYRE= z*G`Pnn-1`%1VYHW0dC07wYokV5~P2?R4f*X#bE6IT(R9q5R#8@5R+#ui3%n|&!#nm za~5b%y18m)d+G4QaITfp#6PqEoMXTOD%GZ^<9;rr(&-Y<%VyHxb2~GA!!v?Ig+C?{ zN3=Q{%BVAwy+0u=v1*~a-kWWYbMT)BgZEs9W`+Ta!C_CtwQcJUlIMs z$L){^kIf~s_0ixLvhGOkYx2ho9+2!LqLIq>M|&mGD?$QO9-sG6v~IE$K+?n6P8&Kpyp0MmC_%1f4TeH z`M>Cc2T`6Oy(p{p{`gGzyj?u^OUk1GZ%I&p`R+;R?TO$}_eH$6UAyqAuT`Y=B9K-#(eW=kIrk#YytoCdr;Y#ik6qXkK7LDbSLSp zsoPA3eyzg9)p2x?=re9LkP|;-$CJTu)q=K^fbA6Jp2XKrxzZVR%OT-TBE;QR!{B5n@p3hB8?wgoWJ2$x?Z)v{YqD z4IihZzRDwx0NHR&cqDg4EaA((@CFX_u}{qmjD@13scPA15SvD-C6hZu3yY9?xGixG z9#0|wYlBi(H8|iFs>#H~xS`gn^8mFE3PwQ!Sz|J_*-JX1fX&LxCPZBk`oPU6*bVj} zZh?r zs7~K>&G{GL7xI5G|L4;95exFSw#eC5^*9LL5C)u~#8Bf~&e()9M0<$>94j5zCLd8; zRPOqBS*(WzEi`U5q9V~!ILwTU3^Sd~fa;%L1_q78VCjnj&F!;ZcI=-8^=tB>H@R~B zO3=VzV`n|^a)6bSWYTRWNMBH}bHK$vLErk8k!_;ph6ihs`D%)JL^XYi3_&9@fL14e z_Ab=jV4!LDoV+1qhjcyc=HlX_g10Ze=5eG};HP*8T#eAAj5<>6hsY9f(P%hyWbiX5 zp|=l#+vz#J48wDkN+=97!FXfFM*gs1pht|o;#Dz%XxLtv&#|!D`MXy7wO*0|Hy}Nd z^(ce#h5P<%Muo7f9Ntr1%1MvmMz^;K z-r~8~8ga@zS-9(c3&#gF%2)3~7*f{QN2!>wpnoe<2DBmPn2TbwVihvH*?7D0#n?kQ z85SgJsnN?pOG`QLB2zGV2d&ZcCV%d2K(GdwFi4E*zyZpQBER_qTBtkb!U}}Vx5==n z$YUNweV1Y0FvG)Z;kO`sZ9&0?x4FUL0r;}(puk4BGaLt>V3Zigp|~3cJ{|6@rCI!C zzATael8byA9_el%F;I)MWE!K%XNBa^h_@v48KIiDVdt8AfkGU+A;|kI(tj{uEM`uF z7KIktWaxANTpmPoEATVCAEi|2?ZJS{858oM{UYH!`L^vc7w2BH99bK*GO1p|E%IbgW&jeJ%14)n0QfH_lW)4F3$o4bo+>`cq>GC^27IP<4XzN~y5XXn~k_-`}Ja?}F?XRNpvGuZVEOAzOv3fl(t zlN97S`$gHAfZ!X$r%h;xpTQ8gNwLxhhDip?Ei=lH?Up(4A}9@~*Im~4LLjIn+;*#X z#P3?rG0QoWAcM4t;S z4nCdT?r_%85R3o=8A|56g4hdVSQ#u$LSFt?#vHy5l8q6h%dL@bQ6jU{Td_|Do2&FX z(8ZC)S3KO^qg$Hw9!OP5|3%82oP@E*mI%iVatPmsMPI!@UcSWbXGk{G=>6ydh@4z_ zK(%_H?A`ieW!{~f!YgiU_CY&F)%N(B+zT)%j9|@8*0P6xV2jbp54je4XuSzKYjvVs_`GJ>N?Y z;K;cQDwqakhm4>thf)XS17L+750B^B_X4$><;-b#u>Z0M2&}*YZ;AED0BdfJ!})%t zHuJ#hYDhoswL>gK*u>GL#WO}fKkxYG z!T*o95?utI;B&jF!2(Eyk>DQ}@b=aXeJ6Ya;FlN4Srw^6b*__Xx&^mva0Z{?r# zKrZ@6FEI+#fa=paFLgqB>+FBJpbX4d)j3#T1oKI+#%MIg+z2KG1QoZ^4<;UWMVvR4T zHqgfwqA3Jk?7bCwxW*t(_fP*Bu;Rc4M5$G1-0g#Fezg=e$m*R5mjzU4Ja3L@gP@Sq zzVWb~9hG}36%4#2Q&tbFs_SbPYMf6UA(=iuxH#??k{n_N;jH!-*KKbHE+#fw)}j+| z8$tP$hyjfxYcs`_Z1c`z!SOB)9vsN1^|Ksf;=I_Lrot*&P;7)UG!nEfw*7Gx>P1@Q zQyfnW8T_eZ>~QDoQr@zwuJ9gWEwN8}(DLifrbLe2p4Xh@a5}TDT%|6h=>DM0It&le z=Mw)MADhQ~{o#mKW-_O99!We_z08&K_+Zge1BbQ(AF2@!I&!`Yvc-{-JCo1|?%YUG zHVZ1+&KKT@!yhCw#{UV`B?^ENUC#qiJ37iqTGsCd3(GNsF_LWKf*62V4Qag8E)zaS8zNiVwc`H=vLQ>LFq~_CFoDT=g ziOo*syv)@|`dM#2;43GcCy|y}IH8#l4WXN7RV;B5D^Hj9bybV@KNJvPX?Fx>^P9dhrVjYmc2ca!* zRc|{0pn$>*I5t?=7@^qTrRc=e)DLJ?ZCu-zTr0uN0-l)#U#|n5V zq7oP_h8#c^ws{kKjlm7y?dc9{y<~Vw^+}bBzp_qxizuO4_?{Z0t`rHcXT28enY1MeC~dzx6XtzzMM`4zl6MnMQRwL(N#z<)?#G}+G}8_)MdKGDx0uG=baQ5Q*C#;TGU zoum>si+lq!fL(!>k%{}?4VVA~nf+8G=uYe8)hPBpY7*=17cQ5C?PT=f`RW^q{s&dx z#~YiA#a?;SpEs3>Nl7F&TFnW#wjG+DS4U);85tJ<8IwZZ`iA06)?MNGzJ{|ClILJ)A?uEFPs2DgctD*KETx4uwOw)m_B64c@kL zRab}e`U85dt_=Eut41&5PFWPX=w?TM2`O#)EP<@Op^I z@arkai=?7j{O8IMrz`qWDC)!tZJV%pEI|L0O_kDa4@t`(I!4|nvYcdI6k<|Abx#vF zIdL-kA%u%6dJ_vH@?JvQxP79-rBqYQqY1lVR_De1QPf%D1Cx||Xxmsw9F~2bZ$kSb zf}_tQ^_5esTmspa00+B%Eh#X_i%jbA$8I}++5#Q2X4wgMhv<5Y;wP0dJ-&>nU5S?- zpE?f)w9o~aV8%QRK@Y8;rtlh2ln@pvc!I4bJ!{j>3g zX4+-Uss;>E=%{SeyPOBIXd%)$jePYpkj+O}1^Sf6J%Y80S+rMfo{ zZP}4M$@Xw_NG?5_{%SDbj7DeMcCQvtQ)9mYe*f$8HD3SeS2dYd*PbxVLWf5Z9*?`g zQVAA=_Z&?-k7~Jg?b-m7*X_Hmm`X9vj(0=^TGT<(O z$E(LnlUnN5Pdu!3-`bZFfd+=A8xj`3LGFSrDd}6ac+n%s997Ru=5xc{RzRw5)P9}1 zORg)tWZ-lZDPSK;sWPv#nn z4=ZjFj7sO_<(b@`=zcI-eq9%K+&(h|!RsY*pSI2OQ0lDUbGsz2z1&ut6nkm`1mT}c z{%O4c3T=D5{nTaAk6ywxiWGnUPE+yNpttx>< z-{;3s#_JmYmMMTc-~GIvbls*5^MfSLM|r7nnheIY)U+KlAy4+s zeqw4(lbOGU?1TG>`~q*j#Fh-vxNrv?sArY-rK9JUMl8zO*JcwyHW|W^K&`ensu3(8 zuYgE@%b-F*5xhM}g(jk7pRm)Wd8_TZu8?2|Ebd)j&yZe|1SkQvumhDB?kdP z^$i@{{6afTnMq^4|H7pNk=Rc~fNV9zBXEN0KznIN&bk*hVll5kzGwtO-)n&fMy*<_ zRGL*x9hpi2s^)F7I-uI1%O;!#CktV=vhN~ zcfCNEOy=}lG-foOxpbI$_IR zuR)2KOEPjLdo@<`avpcMacY^^;kbjb_(>SF+9mcVWHGqxsWv~GGQ0f?u~DeNqj7lE z(xj~8ul1{#YmM6Z`0eexp0?L~i$;Z+WY7is-(mVH<@zp+oWsVjREcooKdGc&1V zo!jRWV2NQ|h4-g~jhP?|pC4~CCZ_;tFy8%&bC$VEZL42rFd2-`yczB*CQ&gcDl;M> z=}q{m3LHd(F1Imvok1!c;Yj0Z_gX#ZSrTdA3*XI9ASEpj_{`?f>jwrHq{fKry+^@` zz8HGvH>b<%`soaLrL3~gb&t4qx@i0+8;gz;DkJUym6K!am7b;|nDht@obbb` z&CWp^P6^eN-YwFO58AnEs$jp3+2>RN*Bi>`wV+7wBZ%7-N;Gmk&D;uFLhoCMjQfqj z57zQ+Czh?}p%Jriu^_5-(IE0}x11jN_t1Y!df5jcw*b;SG`bQWlMS?!+PV{Ca9B~@ zaqp%nSC(dszzn2i^?P4oGhSZWf3P%`_J0(}YPoX<(Z|AV3JWB=U!B`!KR=kXJid@DQP9xTIpoH@G)RBtCF+w&&(VVtoY3iJa9<@#@{eH&4 zLT0OP0LLxA%T!z@GR|zGjsvHjRZZMWSBq(5WYXmuviy%0I2u}u>%F+`p>n_JU%21B zNDPN4B=PvfdR;u?5En$Ek7lLGympLM1WClwyk+JDGV^x|=x!-EYSa&v`eFPW{K-Wv zYHeyW3xy=>xd~Ds{O;+8^}dS4?#F(RrJFA#!I{V_>&z#xnwD#j0}UWI=eI$^!3NQ% z(Abcu1JI#K-`qG9DxL0n5|!QyoG%Q=bjE7Io8P=h*U1$W6iQxsy;Tqi`G^NqdE``j zS~cs7ezZD?Kc4dX>AA(G3;k?5woh~YK8c?cI8FYJFKO-NWVrj7zEh)kbZ=>B@;wB^ zE!{4fJNwB}qj(xIL0S?y^RXXwPiIu!pmyh;+N7*C4*cny$94!t1%nnBLif^&c`=H#|?u>9#hl#I9;g>WLKfbWn%xnR~ z$x`uttQEzA`WW!6cg4<{(5^ouB;{aX&?MU+AsiTMgvBQazFPl63`f^yIU{pDN-yAc zAwu3&1mV?eL-f!-mjWIklhZD|W;u6I_aQj!tGMU+uP9Q<=JP%!G#Ca(%kX@@aNLDF zbdR}8U6SdduVlqDR&Q$Ag~;6X1Sp1Q$S}{+{qOvfi?aw|kR#VKZ&z>qJ(Ab?Qj-lh zzn52cTGujlULX6FDnSP9p;qf5%2uUQ>@ycj4Gf-2kMGix71=$AZc`uDu9DQzUwRe_~c~NtD30!%4~ItCG2DpXMQCa1Wd6mb`89 zNnA9~(?o<#GBp*+Pu&rPuvY~xS|%y$m+emz?)cB29|IchH>zox`Xf9DB_5#dv_8YT zM+T$A=5Up?#Cu=I1aQ5d#Pc|(Y`41(yFHx<`Ngsuv=qoJR01&wHQP66!R9|(xUs>m#j5mw& z`5PtO5)T{Pgh;@Haradhu56l%3Q0IMahJ|rYU#iH^VLs>j9VBkA`Q4!X zOuZV~5XLBQ?DiMLSOdRDJmiG|qokB|z?L}sh*et4f}FIObbqN5{;^2A(31cRD7ZK8 z1T8qm!ND!uT%~p0xjkFX_5<31Xs~_o)`CkN9(%9C;jXoZ;s9X4MRM>FC{@baJSsuL zh%(5+fDHCfJ7|cy{n4^GQ3iRD_(BKCB@QZ)(u)NiqS?v(ng_1CQ}cSTN2@mA!KwpF zFl-9$zsBu5#!B-#yg^!b2+Uri7Ou``orB<^K@{hs)Evc-T57MBQWVQ%Y5#?SCh6F0 zrzRf-Y2kZIf|_x3-mirGL`4dsj%13J9v@(BbKc2;L-0#sTYHEQVUcSvrJ1|cUAk5H zGXK)wa7aPhFoANQ2$2gms;?lgIviSdsyu?=CY>*sR>qO^F;C(}&otPzj3T@{e|RJX zP)W;S0`ng8yO{7xo7eMezYt!-$C`R|KQX<(+mhe*^L>@xUz$bB^3jy@;xZ7>&p;#9 z01^>3B7J`MC-NrW1}?fg?K*Cdp4YJ5yJl(e`Ln9HRi0%!pB#--K%akbNS?~SPWrx{ zUlI;qCskJ}T~*a3YVnjDV_7A=mY*TcfRok#ao@T%N$e9MK*d+1W2!%ztDC)Zxyq0U zk=?+~2@nacqDMR1eh%gc3KsxDBq*}UVp?rLmrpO|Q7^?vK*ym!Q!4IdB zk#=*o-3SmI*i8w@Nm*%gL87S?(CZsd_YU$+K}xXwdhB{#_7S)Y5h#jjHxnK68wiBx zawFS;>me>QLXOI{9kTYt!-Hr%Z|BmHR0||@9|F7c0NjrrP|>o^N!uEf#RhC@d;C_l6L#~nrx znUW|exr_kJM<82(n(M;Z)1wat8KM^VtPo^;)BJ^}mzBYCb-c0(kMz2zos~|6`?`1U zm|p2#=co8`INRE5;f+(ub)iZ~(#)sRdV?>;a~5mU)6O1YQ7#+hLTm47=wauvNGZ|m z6az?xU1?z}t&>xxZ#GpKBIYV3-uIH~yM2P1z%#Y^M$(5BqCkV1EbDCf@qBomBrCC7 zRD8X_GyHjPuqz%!tv2s|M!MUnw7tyKRN?U&5t+1p7Ym=GoTJ9>1JkPl>r(=VWcj1B5>jl3bJ>kT?NY=yU zsmxV1ottZQ`R@T;mj^ub8V&Ywg!AdeFKW#~Js;lqqM8bTMHd9*rcZ9_&ZyzM-Q zLn%!qsq;$=8}btxBfr9J{lrstfkpS!>FfHSYxUe#_bZB+!D!j4$RJc?1b?ezyww@a zork6#46NahRsZNgjTNt(%i|$}OY9ReX+wzKSigVQh{6e#*C#`%? zx1&o_V)pPUEPEx5pabkvRtj**80vuu3xzLT-CR+xz7Hq2FR~auYMon&T~YW;%}H}M zS_h~d$DnAZH1nmO^!s(4h^|1rjcltb{fJ=kT)*p;w)NM^PpV}nOonlitF=ixKWo_cp{2KB)L3MBmqNL55Zsa{Af-uq?`Z5dSF6OBF98Z{KRQ08Nca zBhbQS>Db^()u5B4^j#PnlQqEN^g9ftiVi1OElMW_GVvQ&F!t=1L#L)F=&J08(Nij( z!LLgNZCJOBrLWdIV?!K@cB<97?VX7V0WCybFS8RSIdo})I%K3Ug5j5E#pJIaG!TCj zyUE{AA$2L+J;PL$G#OXe_|EGI`#|fwdi){v2VDNn_1|X&KrQgO691-jkphad@OT^x zsbL1<$RB>!4q6mo42mOO0g+<0gYh7wW!g30P23a_BS@F0>er`I9VX{^bc^UHMUpUe zD(xg(Ak9d%zN<2{)Z&m&<%aEmZHQd0-z9l-%%8)}3DSy}JR%1F_O%Rh;%)SFAq+@m zDplI+_N1BXJcOT;QnAefKGt@HOC6gBodELjM*YT7U@gyw+eKthvsgllMYA+$|dG$HSG!bZ7kDHKGWTzV~9d zt$w%>ja)aFEBXyk3%QSK5ZUydM>>O>lR+Y<_9BHC}_zY+;tgSJ~Ix z_1yfWlJuV+kluflT3o8Z*`u1b#*7@1mB}liVxCwoE417+T+PqfcP29!_T!>%4zfOh|8DR0u>Jr^QUR; zCmpR~y@_2k|Ilg5DT1ULJ!GXF_Wl#l+6SPfGa9QEW<8(jXO|dssEydi+8MY-x2wRE zNRa3Z1OkS4jjWHnU@}d|s+Do0o z^}ghTO;$P2HKW_>Ly#X5B||W3Od0O{#OoG2t7n)3bfC?*q>z7U7aasAu8Jo4U%BsY z4-$V<?!A4gHVAsZFRY^^1RP+ zQu^nLuf}maCGBkusP5!XJNxFQ#kOe&KlEQNqIJB+AaTuY1{dl~qdd(wA6(juZFUh| zDs`Ln`1Cs}gc@N;A5Zxob7SxYW&B>E)`ogRqNum;PxW8hb(<|#Mq=;W25s68nT+OB zp->s7ZK4U28x>;Wdpxd-^Kh@*i8E+>Tx*`F@jXvOH7vV=|bPkw^&(4LVFnXf(yEzCE4G9 zFjN$SAM+4WQ(nq~M>Lng{6uJ+X*LQF`?qsOc(qd690-EMUo^pe0R; z?E5u|bVuO>4?=V_&SuZhfHl1r)6$2x+rB=m`{CeO^@X=MX%^Fpn)-R#B<>r>Zg6KZ zz5WMDt);j@xg2@EFMSq9@<|0cA%C~lU%(8-A6LRGVSwlFH=ZHXJxX1Q^tHg~I~*z= zUZM%MzWjXvDM1ip0L_-brd%;b36 zM1GYSa+E^$ql=DQosu7xo#1F7TL0{fA_bK)GMd~>O2DHTKi64iP|jDGjafE0FH)^w z$T<`NVxusQ&d!rSSsgs39u2?v%slk0%>e;NpVOs5)N-+=X>r@wcqwOpn zr8F8!FYI4$T|f-k-&9mgp+Ei4u!;7sB|mZFyfacmj^H$u*@pVLFj`Z|SdQ|IZD72z zvTk7hYoFZSZ27xN?e_E{xeNivODh>t|K)u^;owMdXp{@A0EehdlgdxMxxzzT%+4>QOBU>DjL7_JcFd zL7D#D93lfsaLujxs6y8u2Z1+`z{im{pkq~%mU=vkH=C?QBjaOM_Og+aRHlSS;98(A zXDZ!G5NUoeWH3vza9A7;M&bVQAh})UgK4oGAK$9=`I)}Inr3^Sn8$Ua9*gq=g=Ywa zmF+?e*;Hz%BL;{TH`+Hu=TqEZbV2opIC-@U(vf&WyRK{-rEdqPNP~a3X))~0vOwKL zW3QCpUoDa$S|4S%P25;NS@CL{BbUok{BRZu2IYdlwBP-cdq2jhxvH8Bwjg8KF%p9^ zSx2>rCQSn$wPY%+6&~-(Hjj6?!$aHcByECSQA(+n6pPE*zlyIALLO-gTTJO73I8y( zi#dEt>ND|ug%z=;V#9X|t&EPY%Nrsbj+|y+#YAJtLh5<5*MM&g&iiW(UK0VNHl&^5 zEx}mIo}?Dh;J;@Q@L@=LlMFHg7k2vVJfK2WpaN6xcp=v8<;g2AWc=k>{=x8JK4K*L zOL%z*kM1~3LNt6Ru=7#w4?&z%^OVcI^af~h1Xz!e9;R;;e!gAk&Bihj=oeQRRdJ9o zAWTSETZg6ZT#Z%=p0uhB=igWITaOz(m@b}4sVT2Opn+W~^(y$UoHcP$JC+rzH8RPP z2)Hw<)zs?M~p4lvijy+U^Qo(H=0&mWUdi)?c(4d$u!sM zgqf6_p@a+qiIS|n2J0fXmHWpe!jf^edhci@mi30f?Tz4^A=Gz<W5x>R;EZWAqW}uk-jFSad_T2!a)Ea?M}zua7YUK5Q8T zmt$3(%U!SjMY69Md^pb;a6%m3cj6Jt64us%AJYmB@~5)NGv&Ji zG|;h_d*nXf5P7ioLl3$jUQ+BChr~qV$Wtm#sK* zQtW!EoyayO2XTgEShOzHE$r0MRufqivUotf4;LPOC z9gV-Il)T&?PC8)9%g+Zi)^6`u1L<_7$^@b!A|#o_vUpr2M-=|K77_hkixm2>{+=9i%qKz%sN?qm4L}+FAX0lSN&DU)wO7?hSgdlZF{)c7e`XlI85EElqf=pobk=ca;rErhhBGTCHJiN ztF>0{u5=ypI}CIx6^7xDiBeYt#fUaPcgZe(q1W_BVXD@`C$h{?YYbShAV6?MZVCnR zMuu|-YRC<>m`~_V9%k56fU#>gzf5q6fk)k(&L{0Yw^Ar-V^{0`OeK}MJRfT{8d4N zi1=E_I-7cbN$=!5pD$j`zMqn&tz4-!E0Ih%Z?~K$`?+^U!E&a=Oe6U3ISPhc0|Tsw zG=V5Vf0jp9a7X5wm6?dE@iQbeUYEJK44pH<%9kDf9zdRE9P3^_4N;j>df>5>dB$J>>ONFm@ z-e{{2NsAoBXmaf}+YP58i)(V$nvNnq8?DsZ&zP5%yZbVjj$lgK7OfGm#mC}tR<&o$ zJy>t!e)PajA~bsvLO7V0Ll5d(=FR-My1>QMR>9-PhOqOAk{=6$j0=-&T$(nMs+m+> z9kYM-?6TSS8$W6FcEQJlUX@QN*vNkj)esE0q2F954XOryo|>0q|f z^x`N{(quSqt@N4N2K9P>?s`__*HVPh~@d`5&T?g+9zc730LkvvHgxi;0>k~`NC$TI!vrWgAIjSBUb+v#RAXeW3$I60=& zRxg%jpF~*ZmhR!<1i%6>RA}}!?p?yJQCOv!PY9LD>?g8>rWg+V7{bBTSdx8w86}k> zGqzEL`qOBO_is!^wMuf4{$6F9-mh!>Lb4$DuB(VP|p-e+W(?bp&)mz)pmz8@@ZbW`oqQ~(0kLX$rA!%I^6tS+x)_k zZ}vdpAyJT7DXcDsb3j&3yF$lS#3Jo8Ks*bXtI`zdj|53p!H)dB7zz-rKvo0_^=?k{ z!a@DL^Z>)yZT@P_TFc};r+fT*JaU`ceD>2bp#nNIdh#df2wue|8Opjw*%j`Kfw7;? zUxM_c7=*;t>{Y5%vP9k|5+oeGGs=BmMV9-NteuP~8iipNBKRUg2fv^3ZMn0q=<)V> zf9YBqZ+a_YDId~?Mz>5PbKd!I4!><;S#F-C7G zRa%lgY~5x%KQ{&yf9AV^Gq;2_C?{|zM~%vPdSaS=5WRjIg8puPrkVeMFs%^ToqmeBu75eSzN5@H8_b}j!Ezx+aY>J`S`}Nn^T}~A5 zt>682cLa#qQ2{U0^{b->`yV&iU!aPd2!k^-PskvdR+~ywG-O6wTRW*z@Fp|yC=v3{ z>KRZW_GToq!d0|};LptZ?FA$V&}d8TMHL+KdL9+{CY)$gP+`#N$N}}Qxs+kS=>Y`P zFI$j*{q7scI&iR-0F#h%f7aj6(Cq|n578qI1}V^u2fnd^GE?L5lT;UntwcJ8gSRv) zin)Jq(CH}wy20%DLz~yFUS3mTf*C9{@mSIzog$~)-_D!Bx+t1|-KMBM*!REILQlw? zipih=VW5RcFf^JE>^{sp&`C=N?3Qy4?|pd8Nbm354>7l}{?Q^xz#!K3nRXNZ^=1M9 zm*$QXXk0-^_pq|Lh9P4RDagW-7>t$w-W^?1OXr_~g(}b)CbQH*E&*>~wxxiR?UB(~ z*&u&W7zvq32yDGJbbF<%8VgT!|qBXD8Sf&h+MP#I=trA2H|6!se zNPuSifI(dOs}&?L0-6aZDvTfg(LKLA+?B-NnrQJj((vn_@$iq8@%L%`H(217Z}bdJ z(0{DOzh1N$13cFMU$^?(xN0-Zz5DYp!>H+eU-k}WP0AI){^j}Nd11Qih`n^kD^UM* z=bth0w_A?@R-O}IbEi|UZH@l>?{yqtZt8cwc|KAk>(d2+p9$+4T|JV2Z&to%Wz)OSI_z3qejs9o1#h(L1b;x}n9hd-r zHTd7YyWxQ`l@9h!_Ky+#_bZsP{A~v06P!cu?%X4zq6*FBvz0V7D1LAFnQUQXWhE{* zKEA!XE6B#iW@2husJemwzkY)#3~k8SR8&U?FC-XpYC7J)(b1VmKf7aleLW``wU>*H zEtQyrq*WGXrQIt^M|Wj-Y~{-#vrj4K(+^{>e?H{@9mKQ3YP<~zem4IR-(!U?+6VFa zh>i~uB$GlX$HYuSH;)bv&jDoJ6m>d4_jkJ_+S(K}N|2#a$ZrCy<-1pA8GoLF;r`#d z1zh%kE7YNxbWngifwIO&7@#1N{CRPVgcT(!syF0a5~zP_YY;3#M^x!B%JYBr&XFY1 zbTUX$QSoYTN_ZrVo^WS4X>gkZ-PP4~duvNLS6+C5n1pI9nUD`YCxu#F*}y;{b5KD1 zfBvs%@^}khP*0DbiLs5SoSYmLUkKz(a(Bi>75->zUqigxvBrUj;l}p%&`9hXDuo(_ zIu()k4x4?HTwXVyaq49saNc4yy8p%EQtERD0vXHgxe+&wuU#LSYqj0b# zlvRX|%+=W&!pW!)d6aZlqZO5vs$E0j_RpIlgTKaga~!rl{CK$jH1zFT5F9#%008~U zC-i&bRBXTc{F%oib0GSjW(pY<{%SHe|ML`SWJd?j{$jm(gW)%`hpTB==j($;5gTPL zSVY9!T4R24Kfl-H%v{9(F}@sm0%Ynd!VN@&>Xn7&kQDjCX*6mCOkeh<@}ZBHTa#>; zTLiy_BINUW-2fZIKwC`a2!8IIi7_%VQn4RPU%<-8Bqb(Jd;yBE<9M4w**nG!b(_@B`4 zUph75v)u1-kq9f2jJSD{69GAi-XEf&9^N0)5E#VMSEVNmtKvE2dMWOeIiAP)D@M<}pn!n@{@o^ieri z=xvGHDZ%H~g7kxW^9f(p8FhVG)zi~1ej&e)E45)^?OI{Ej>f;nR~r0Y1=U8D^MIt2 zGElSC%uxK?XJkB5n%h?68$b;DN4n1HCh}V_ZlUQ@I)bWyiK!&&vlyvF`o_` zQ)sohW=C%htDQ;=dcbG$#W7K%q7iV}_8$NWWMGv}>xfh^28;8aK;t->bZYqnlc|*A z%aiXxi)4}*DgCob8R{r81w}fYPK$_iT9wKOT0~rKL`x`*RzozEYMH=k2XpW(wzzVU zjB;d@_x)3e^S)A<1rVsrE?C5r=K|UKf9afmbk|T7B-2=3@3(JBbf5W!S65f542m?A zhQ>Pks*yv24UgbB(;OA8!=ps(?HM>ZxUhG;59e%5$=+jNmkw*&eU+RoS63Y)mFpXb zc`e~24L6U%ke!wGqpdf4&P7Ku9Z4>7`bAf!{w?%6QRZ@9U%Ev`50*F;4X{<}V3~}M-gDI5R3iu@T({uI;8 z0@l{nr6#3JR2Y5Nhx7Zubfj_$)sPht5yA42)HN_5lJs(OlXo;N3(cO3A>pFR46%J zMFdT!46tOBjZ`G06cj%~Lh|JGibg7(qZTH%M1I7qazD>Iu-_S({VceRpvzr59`B#& z>)WX{jlPyfuP+MN_yWDJZRM^;rn&S@O>74$bLJ&`lXm2TaRj_-qDH)%Tgn3X*}Pt{ zWonfbv8K_$ws?xIj;1r7jpu!tkl=qU;|AE^J)cnymSzvH;=81UvO}~?6wEQrFLu_) z$T$S*+CP&}bJ^m_=-+R#svnF=#7v1e^}CYU%$uTtBm>7~c6aww43Sd3Gh1WtCuB1ko2)li2X!mi zK9JQEcAPHO$Hfxzi>~>-HnRCdo}%tu>`xb0ka`SZ;0rdmI|$j!Rw_^BiOy^UK!m9l zA?oWBR!UvB`aXvqxLqHxW_j_0D3!uNw;R1*{-FiHbU)ikI^N&)dZHS@0>m^TM*Z)> z_;A@Ost4kmh=H8YW_RBB_R&!vy}4>A|KIEXE*gi^E{|@TbN^Fp=@oL}po@oYmn5op zt5V@b>D7?W>_yG`dJjp1(>z}atW36lU%BO60hsptxgWeblhuQa|L5w$1jwkXNz_OV zJENd%2)tNxDi-Oyy|}(s?qLyCe9v!hy=dAOT8V|nnNvh5$ZSdisEDSr7ed~xz51#S z=Jnm}fMPT3qAi?6Sk!Q0SOD4n*w{#o_o zYRB8MkIOAn9b)1x9hS3Pl^IG(L!Npbg_S2^8rgij962Qjl9oy+K#~E6qox|Jg7Q~l zQ=`q#)CK^7v()Mor_}k;E79}s$iROe5|BHTa?2jXr&6n6D2|}(ISGcX3cg6zPaa{n zU1sy?HqnY?B2CuCIGnque^h3YWiD{%r=B27nmu!aG9S8QDmk*dIl7-Ia&a<6NM91k zJ6daTpw``z(N*PicMgFwrOeh3bXhehEn$#hAyws@D1*?RMcKuIhtToPG zRY5}I01TU^kBp5i)OMvsPCkoADHSHATt&I+edfF1kfIIrQu!3JPz~~Q>O6)7LM=T# z0L&0`orYc^qPCq1b-DKc$a~AEs=6F#b2q+7b-&@J7aA_5|P=x&hi25Ata zyE~-2yYEJPve??D|T5B`t3l{WI05KZX zfORDa9N>P~K>;B+)C-f4slHb;-bNsz#pi&8F?M>Hjn|>L-A`j~ zPo-IcS9&4+gaeg_csx9|Pt{$i)zagJ&sC=TkFAxDF}f&X!b^5gZ$xl15dU&N2vv}~ zDxW0^s4UBI!+sWUa>31Y|Jk7kNU9?}!s6{6g#yCRg?;*nz$hS6%!(gF6NJ;EBk8$e z_j+34N$CRH?XARBS6O&Z0V^TZ^8ipZdgisTNrB$V!XmStFTNmr{v8Afy~J1 z{`^g4c+t+PFJC>l#@%1orzu@;a^H>lp(Kmh^w^oSllbwp5#?+Ce|GL4Vf@+DG)YNG zc}7KbidklktT$3>L^?HcNq;OMzZn#1T(*CWJE<`!jnB6-{CfhaMFCO*b8$(C^@4=h2AySQ(P?JWznkR?z6pYy=m0qYqCb1XwaF&{Pc0J&T0RHv1rk>*M zBlQ8LyUt3lLCQzzVXPf-b>XfGPG*kVp zbRJ5UcXlo!O<-cR)sFM5~{YkizTfw_Il~K**E=4f+(Fps^X<_i`jU#ecfBreZpn9bSj1s7DCRyH`LXS zkGRC8WDxx0ljGkf`n9_(rME;VyDeQ6S30KP7C%c`R-Ufv)cH25#$cJ%1Po{Tq1H}# zd~dTXa;!luLrBnH$y-(1VSkJlbZ7<=6T8BbSyQjEc^?s^fWvB)=5~WHJWN8NXePhd z3!5j*LIcj86QyX-RuEdD+Y*C^)rgii#}_r7 z8cG5okfQ$PLv5?4Y(_@LS>RY%Wi;;y#?$8hJ37C9#E^14^`Bmyh4eR}?q#z(`;%uf0Y6e+n!e1{mpuJ5yVEYJQogDT4bwIKJ~r`V_iOnBAd>!S zyTAG@;kw*aR)F;ltWK>V(5W)_WT&JqT+Ih z<)b^HM2wL|IMMTWCEE4jx#}JDHcNKtOr|83S+Wvv2|~#>e~&oK{1GrRr)OdP4K#41 zeyLBSG41&Rptd==rHDY(Qs&*2#8gcspCO_I6sO1pVx*CfWLH&j+XzyaY*md)K-e2W z zjZp&M`0qdGC%2W=xZTj-F`2*FTTK1-EF7FIjgudTu_*fx&uTH19tE4x`MLNy?l^~D z!!!ygYIUz&MIYXpZXT<`K_YHGj|iCH|C^coy@5685)C{>yV|dT9jA@_mca;Z+GE1Y zFJwe3WSCICP*pd&M=>yvuo!XqT9+yi{M83eZl05+|0_8d3IyRl!Y>qsLE?*$29!wJ z>bN?qc9DpeYj(Fx7D&_es$;(DwO!rbPM-=Vs&&Mhg!1L7 z@MMW2!ZLvWeiqKy+S=MZ$ws8@_nMTEVt@W(W9(a#h!(9-S{Owr9tRi28}B?ifhg>Y zJAyE0ZHDsd>PfJ)d+Zg0o!nq@{dqXLq~H$i4dNt1S#Bv4c<`xNncK>*cr&dY^YA>%oQ>68zqy;)XP7t~DyiABBuO zh4S}&e+B{2pN2lQZA18v`#w|HBYuZy#fo!%e+PXE6w3#^cRN-Mj+npjL*Ob~k3iiu zmLAmOqAtaJq``cVAp!9A@4&-AA3{tkA(#O_<}Z@<&)|OB1dedvUOA5P^<@8j^snIu zKfrUi zA}uW`)_i<#S7uKL7fQxQV+{v>)l;hi!3FtOApLJdO%AF&x12ZuZhcUcak$K3<;W7w z4P6A(vV@9`7t;F|)AT=g2tdmQ9ELusic@N8k#t~nl;h`APd9i zf6?oIM>dZix`v_3qso`nHs6&KE6cIZ47b-$QccYJeoapSSqO2fO{<1i1oysb#fm*9hQDVM7{}l|3+zPAN*cZS_>)Q_O_^)9^!jLxJcs#Fn zML&)zhMvpBL@>YB`{{6PzxbKc()DX0v3_3G2L9?r0!-)r>0buj@ft%G;R&Y)}gs3@6u-G0{PxFbO0(9jf!S+_=}A)s))m=FHXGNceeUq2xxLJDU{O{FIn27szO0O+EY@j3ULu5+ZQt*Mb4OquT*ANPp?Kn1`*ksp8m zzJ=Lw`!x#y{_*5$H*!{5OfNcY4k0Nq*uHJLv3f}(W2NwL-xwXSGFSU*X6PgI|9y3O zYD)iLGJ0i19HdLuk~JGkHAdp=G&d#LY@H|wLL<&qMIyl797?=K-4d5MHw|S;Yfb{f zvw`w5kr&gS_o~kGC1|OeZjUwUoc1_*T#$2n04pd3aGS$al$oV}Fe~5?H5#=de^um> z5P)sP&s19%gzSvviIa$iWS?v>$Xy*9ZvATcN&R+Xs*E5aF_C7xSlR)=Bn+oP)mE0A zHoxBTxLpVF-X1=54GyXV18}$4Bcc{^1pQY8_HQfo?lF{avu6zsbS@rR^XGCPKcwzo ztoLu_`I@K0YrVZP7k!PsB{&c=fL>1yOfHZ<;AX@E^x^gAxsPY}m$rm7q}0^HZL8f1 zdGEdI#wtzqfv8$6|AdR{AxElgot6x6z>H!;E5>S13VM>;T3S+o_Aa?NqEW?tJFoASQ6nalY+-k6(XPw^>3 z?t`79L)GS)KNu{(0#sCq0Oe4)Lb_JAm8J)L4zt(9XcoT&8kOlzs2Il6H00*++kOpf@`lxal9v-I7T?eBTh?Tqgiy4Ii zUVpy6EfUa5myCww`^Z4DEBlG4irHL^XMQ9mS~|dL`764;e6w5dYC!E9zdJVLvSmnk znYYovt#nnaE!l}5B@KYX3To9gWHb3|He5!^=ZdM1p6##|e7}VJ4`#47{hCcc*qo7n6#oy$^kp0>KC3Vc=cb^#c%)h7O&GKW))>9A7x)^jo#km3 z50N}G^ud3P`xK==O`%nFi1_Tnt}5>`+s7&Xhf9@6+ky)rtNg4lz@4_55bSzEmoJwu zWH(OvZ>#vx6N-uM$NH-|lI%a?T|+2s>sg6Cb`Bq(JTTZ5fneCWS7mr%g<{Lr<}IEk zv!8{fg&mf(1;#(7h$)p{z-pi`731BXQStjCBl-x(I>Ac3x(kp}QWQ%aUVIQrwf|uU^# zwe-~fIW6quF(x}HrgDP%{ffVd@++uDF({4fVxl1asxKYmW$&L0*h0yCs1uN6vsQX#GFrM2r~za2k4f^>l=wkS zNlMEbyN@^fc-WIx^pT+LEuyNXr=%tRihY42&^e%$IYjah5$PppCG6?;H|p?(KO5zX zp>Q@yr}xv4r#-(=Qovnb=VyrsiyHfp=?DLFD`EPNk4c}NjP=Pr>zG13_w+OqqZ`W3 z>g@a@Jb(K24#3COO?oBqXI_4hz@RFmq9%oOlUPuT$3MC$kI%0}ex zgHxb1)K2uu8IUh%`MG6k5|_Zd-KJe$AxvDf``3datOj(I#MRm^yEuSOM)L8? zj#fk^mq3iT z|AZG`_$8naD~QQBbN|yb2c+qMpoG&-3g$m70$~_3Fg@hcy*U5NN%WKgsA)zTz4VN& z{X1w>95ivOl>OE1Wr=f_V?;Hzthz0Kf0**6%^m+ADb&==PpkDUkV=&yy1KV#nfX%q zpZ5X8o#Fi1R5bg1FyG8oN)$9LP^agsNkm;KsMQPKx_tZy0%VyY({1hT!@-GcCf((0 ztC54D*N!_fF1WZs--U_(b;EzhR{R5mVWvG`RhcUzOA=+&2?I+*X`nuf!$vXRi`Oj-0z}8aOVVNoLHH+ ze-5v%Z^!J4$o*SV@)TqM|A)PUF-S1?d0^?LU0K; zwsmGEK~qbsYw_Xv-b`(vnM#Skb7yP02ey}TKszzJm$v^oUVcL1N1Ir>qLVN{z2ME| zdK}3^yz_@fjuM|eL%5HKzMtq2{M{iDM3p}o(%%_)vFBQ zz?ksRP&UWiONsR0gPAHnfECQ|X`65B?F|H@QV9V-{CI7uW}Sl%kXj)LkdK7W&^JPN zYoxTaNr3b~K&4nyX@^R!oW1c>{+*q@SGy>(D&_yUB2V6_Co{5`7Gzk5XQWfbat&$- zYBxG+u@%0BIFfML?#tFK{;q+67)a}mk?u$eN@DILlJ6TC-z{h4*Qld8V2l}+ElBbD zRlB!Fjkf?PL8beh5ge>bLT)#(^KAjn|LWFOS|KN(`IG=W$oG7A%#yX)Vk;D~DY^II z;o$+-Jpq&oP4}Ou0jWUZ?d@%@&5}T+^TBg$HY;NB@5oPd3;#apVF#bN{YFQb`9Dje ziytE1NR4RX^#DEn>DyoU(GE1S_$ev7$jEt`{JgG79bo)InDofyl?Ctn9s9f}jS+q*bOz1C2f0v#=ru?Oez2#bn_2Uo#s z(0bb@y%H@St_>s&C1qv~YAe?p`gT9Z5CN3ANDxxfovK$ZN{~>qUFj%Nj7DQ>@_$96 z$!u$92Z#&uO>(xiXc^A;=Vhy{=F`Avy`WHht?tQ5(eb;pNj<=`rqporF6HW0u4c4 z5xjgEfxw3v?njjE-yveYv-?ZTDk+PWM$$l$nFkg&`?Jf*_M960o$Qje}!K;PY^=@&j0w($?iNd2N5C&T>PKinA^x7RR?a?mQn4 zVeam4xbr_W<2!DTfjq?7|2RIR5DBfVb6fNaHR6q;p*hJGc$7V^94U=@Rkc4eXLx#auy(UA)$|*^CNg)SMz`;qJ%RpVzIko!oTy zWV9PHC#X~7S64~y@4oHj^$%S{m>W6OGIqqDajd%UGc< z3A@ilc*VvRBh3`qWhL$h<5yf_>qv-_k%2ogc`Adkvjz)oSlpuoksWzF(l~b>SllQN z=q}Gw^#j}{?)N~XJ>{IyG;)c)YBctmIWhgYpRSlH!}RkEpRZ`)QD8(pcs(xEu*%}Kx~Ruamx>b^D>hIkNBn7}y32R1y=FzzVvKuC%YHwl6> zLiu+_{CWY@m&SwI-x62~s4fuVMax})vi9ByEatR15}EV2SAmLJ@l09c@J*_E{#-s( z%Ro{FiD*+VJ$^M1<|JF_mMKv2@!nqwDoa4+5!(LMD6PHhP1mg_BySI|}z=EXw#a^OJ_trV$jMv_}wC=wf5&0m*0r3Rgd@O@n z$WsqoC{8$huvSn>uf^%AuX({)QJ+fDsoF!yctpdHk=&ME-SZnznW? zI?6V^*3|xY4B8F312}A!A|V4-sBItQ!wchMzeFD$*(Pz2g7qvoeq>j{uPw!)58Mh4 z#OW=K&=2fOdv)T zHJ$?GlcQR}G^SE#(MUkoQ+XVE$wj2+6i-0Y^g(&Q&!PjCipCm;572?fi>w-I8= zY4{^tkB#J7yWoAE*WU&g@TK0N(<+xyWxFv`55xZ&w#T>;wWV;~)FWD@&8YXm z)k4gOz*!dXITFxmDQrw~phpHqlJsj-c=|fH1utw*s8y%*yS^FH44Ea}iLUiMzA%Gq z{0L&2sJsd^U5mlXm-SnJ;K>|BIV2yx1rT?^2Zd8c&Su z6^5c)1^8kg5D?tE`v+FZFPb0?M|B089bXNf=@0TLh7H7W;( z5Nfa{%=`k{XUI}n0PvHs=j+?7DS=c;xBDyA`5i9=D!Eo0oAuX`^sthWeO0By)E7IfqnigF>HC)IBOM>I75t2X zH;Z6DjAkWPD@JzS^3{-3IK$wr52Kx4i#ltg z@-y9A)9`BqMyF?8OQ(A`2P0dODkkU0O1{$a+;#8_2=_}lwm&|sEH@9pl_m1yI`b@$nCQS^1L`*-cKf;R>7=ya<*5P&^YUL7H zLz;0p&o-F~)`#K;{kqR*O!GCL0*YWP*p`1_ zXBhQw2jn+2p2!p3w>WimVYsIIlBbCM>H|#4FreL?Vp)O1Jty!N{@HLb1xcc_wlA+o3w~)H4y#Yor*uH0P z4_8Mm(rm3~s5hy+p1d9%%p_Qdo^Pv3{__$8==mI&FkbxrY?|&!zbOqptHo`AldSxr zO}wsu@k9D1C8*|EG>vx9b@NQh#KDZD9wTA7&kC>0QREHmEz}2E?iuDn$s!<|({cSC zYpNEqs!&D45od0VB{@s%MFtO`Z6dkI;!1%0F9&h@XYNhAO^U~G~ z;dyn%Y!f8xh5Ca0QDjW=bUrMm=g8oxJh$u+!vll z$a4X*vgR1M(R=GWg{9c&wKHY9%{P39X1(uaHf|f~X&BJV9{~bSTH(k2UqEf?aJF^4 z^5D|txc`HwTtG5uJxhjm0{cJ{91EabuyQbQ~IV-6FoLs>4YS(Z7R;^>-q;!Qd~ z;tnLBqc##IG@RH46A{y6DM#59M(2SyJWiYaBv2)6q}g4su-7im*C|d^lnN#U7se0L zLj3j!M@FBScW+AaC?y4qjR*gZ}y@? z5o+rZ%;eTkA*aYbmMm`OSldW3#zHYi6ohq&K@8KNadxu;k!+-oAzt9PA(@AL;JAt9 z^4N&gpB&1~#r)L8So&I+KrJubXWV!Vmai+^+z^Zl)PkcbG;{VK0iA%ezy&IM*za<$`&l$0q06K{DJB=SxL(7%d5 zJ|DtFq%K86VeWR2*e^l2Pd0wIjkr`X?@UVJU$p!1WFrf|0=2`T<%*RdqvGOr?Gt)h zr1We_)yhgScNXjHz!lU$Rmiz@z~f997kFP^3)ij0a->%`2wekxsc2;P`B7gUGm-1v z4JF@+uaUhMSkq+k-V9{9aRU0k2G2Tx@ZozW3iZdJH@Rw(%u*kB(RW(*P=$z zswDwVUB=w|HtVsUBt*QvV_OHHfj?k7N8p#a!gWXIoRpHvY-d-;ip_Nyn`_B%~n;r~po1 zkDd=lIYEqiv{Fpm-Jg({o>i|lLK%rB2)4N|`^BQv(7b=3c}Foxrvk2g6c_ThaXFB# zel+6fd!~>rngWATa8iQSHSM)i`f!=Fu0>@=yiX{*)uiBesk|S}E>hu+@0wM>@x*jo z`It0&>|!o<$2NNwbv!rk>MGAF|zy zbpE^?s@ZemV4l#Tv2SK*jw_r2EBu;GE{XcW z?Rg(jZ%*Ys*^0EZYFWAzJj&F=a5h=EoD(acB4;Ijbysm+^lrBCRq{+(4ir zHAz!_)~C@J0vW1JUb~dcY7L4w>R(dv(&92~zJS0F@1t^MRsydMc3QnQ+?=gF*?J4T z7z6!U55CMu^y@4*Z)NUW)<2vK3}M@h)Dg&0lh3GDZc5{07lc&Cs#b3M%JCCbo}^H; zU6x{E@~#gA})XF zF$mdEK9su;>6-%fO1?UrLK9f*iZds$hspt4~+(vJDYXdC-DZz z0Iu+uAO2>R~ef`8sF1uuv{;gr*y}e?{}KosvP*U`BwQw zK$HON2vvG8QD@#!Bx{0LLm2j2ul&jLpNhkm)h|H@=dj8N>WNVc9;t^^ZcHo2+wa!j zX;skUR`8=TL?kF{#GV(kl&>uEBrxZxMPpCqiU6idqn5bwS{{Y-^L4|7P%sHPTY5u0 z^A3U%eVGJlX-=AdpzMRn2he-9tYY|1%y`Cg$Z@Ij8xjJ^mvMRa7`aRVe#h-xf?{*t zQig?`*(;$2C`ir}Y)yG=O0Der3H~JYuX0FJ9B1W!tT%T}{7l?^g(~|!FIk1RXGI|_ z9b!L1bT6j+dnDW>bujzeh9Y^3j}w-$dK$3BRE#u)Pm6CIsT2+lHju64mHHD8VSxIW z>lcL0sS)T@Q{9k+#`wHec(E&bRD=&22(=){ay}4g0^+3>#irDFM z!@p7}lc-co>8^UUk$97nw$yFMJXf*A4umMX`rEatY2L@zb4e&46Gvn*F)Z%E zYu}a>eJbjcHRHd%W|~0%Qv^l)QLhvQEAhyh{PKBBCDtF>Km{@xhsthqNyufD)EI+; zX|V`a#XX!NuW;ElDsCq;Sxw_e$W@%nYfy+7a;tRi1_AbUt~Bw$3R=*_*!KYjZmKhsT?UX-DXPa+e>a zt;J-&bW%mLJo?R^-5!53tRmCpd7KzwMKGj7^F7Lv9ePi`$iG>l#}P3-j_7#2ec}K3 z!Q)p#oJaZxN1K@z>py6R|39C~l=K7Vp|Jmu;|ZyH;=NW`AH~MsX+rLv0B+T&AG!~& zP(D7w;d*cX!R84mX%310a1EXg$X##X0)ZUXx`$09VG-dR`)cueXg)WQ_e8Rh$_-y4 zrlwS@1~J{)l$cptfvVwy0@8a4hBNIQ?ZbJJv0M9I&m!(HC4>}FyB)tijRMG%ek_yV zR4cDa@tca`k0B3J_Gt&0+PsU+qso&*MsP>~sIx+)@cX6q+Ir(O&d<#@A4zlq*O}Bn zlaUe+)z5R4uP@)emD@X+aPx?)7ZvWz%Hw=im*w{t&nfi-9rf>tNGr&XivCU(KdZW6g4yMVe-|aE-8r|T)pSx} zQQ??v8N`+JeyHyjg-&W`2z{|e_RH%e>kz>dLxxx01Q@hgym&JsLsrS%9Wx27BqWji zE?MG&)LNA=98me)%w>{B?|->ycdv0!Oo(KS6B&OJZR zWI?00>CW);JTCEN)850EN5m&R2#_NCXCV0qm8i^QVo84D%Xmci5HW>>U#-3~hu0e> zI=*R>?VfLMFKoh^ud()XBK-&5iyeNa_xblQ+@Q*R07(j`y6LE;WktK4{bCLb*ArC! zt&)-&Dls%CCq1WeUdpRz+wE{!*tl!BML`!T2I*2pgCo%LFVg=&XInNt-0tO zhxHzox{J48cwJrJ_N$i0wdv{1h-M(qlj8F@hiBi_#)Tv|^G8d@rVlRS3q?!Bj`d&o zTT|!UGNz_tr;-NP4;6#lu}YLCd71eNv(c4~}MWafvI|39ms; zo(gIbFWRoOxik8dTqL2HgloEx0MDGcV}6RR0b^3`GWa~9{!^oRNgqq4Ifa>A5m;IJ zA26*@@T8-T^Uf>ka;v(c?I;1S-lTm34=j81^+m4a4m$I%s&fe}vXx%xONXYHvj}); zKwzS<5vxww3uC!I8e1~dCro2v6RTp%c_3X}KuO~3=#dO}`KiFjE_osO7Vm|xckW3w zg|x1r4?G9w)M!NBhy3#92`l0&3zOxYHSr3H&*{nA3)Q=PLVC$WI{?!YhAm3nd~?Y2 zy@S%?*jSP6Tg&@U1U@DGz_a(`2{kyGTKc(|GhMNf?ef|I>JR%EL>i_H%q?-N5#i4#i>)ix4R zv*k--XGeT?U}b+eLdM$=vUOk-&+D}J5MJBwy?kSF6}PZ8?Sn=YNC_s8_qSMZ4HNl% zhc}+5>1q9gLSc*8$Mi=I%GOYPMI9FTKtcv8exyi)usua+j&Yi9z6@W7f!VEtm)fsi z)oTTtKM&k9S>dpn}l+?Ts{*nCz5Ta~LJ z87+P#d1e+_HU($L(vb6||x*mio1c*Nj$yH=|BG7`Ls142o;K znOr1+jH*Z9IHw1YqeP4uWoxzWU8^LP4OvQ<^H(eV5E`J3|=>bla8>#vTuv z$Wk_;O?NbNlY0W0o|gjQ^Zg5EqTXI8Mk~^h z{JIAnDu)#!NGM;ViS?=0Rxpblap!fJdaiOYvsSac?6|oHdE-q?xo?=giZR6}w^hvO z=OoS5Y2BUcVASVHVe$&8!TCpb z-lOEGwuFBp9a?8xbJN*)Q%A7}AkiJlQJICD5Ghcg_k0BEO@=ru1xkWy;awlgQw4Jd zhqo>3b7iu?5_x6*9v+{2%_DK_!8n0fubW2_=m%2vxl9hD)9i>io9EjVQ+^x+;rqk3-U$Tq9crMC!- z9^4uz0TQ|$o(W%tMMIEoq5DGq+3Lk0pWA;mC1-1xi!)!|+e*XBcNS-(0)&CQVX;jn z%24t~{l%EX%MrR~-0y0Yv#C=ah9BO<;FyM2#FpgYbdSas=br$ekJ7=lhk$@!cW;02 zKxs68mOPmwJ^Zz09}oJ8J1jM|p5@(Q8SB7s1;*yX_4$y@cs-@AQ|XPjzv>JBC70aK7pBY*281rT@Ggc6nkj5Qc9nTn9GT)U z!sn|f_kaOJTh5)p8mK69+}=6^Y~|&6&jYDw;J8CI@zK$X32ST!GeR0yd*4Q34euHdWwYY%d0@nHE(o>BLI&P%YOAQIL3bfyfiQ`WXpfri7Xih zkO?5#hx1_wJ+Pw}T;VkYEey9BhkP6X^&Yof(EYkjxZW1y2><)c=KD%qK0W7Oexa#y zJ`wHG+q_0{9@xVVjnNaZLB>>$Q zAD9Y~x};wA67&dc&15%s%MHh6GQvH%NqxjRWpQX!{o8}G-c1kpqIGs_)KwqNq;q5`E|ciB8-gY($QJ`4QKJnbJ9u!V%sYZo#iKBN%YmxDGku^~^D?zfbKF8| zpfbm?%`?U)X$hd*P=wH$Z4lz4)6AZGMq2T;LD}w^s6G|7fiRYJ6*Qw753H2mc!1l z)U{3<_*IjgEkr#^0wWFIO;tbilX$|D@vk+E?5TB!YeFsKV_{Z{)MjWvLk-P zaC&w5bu@LK!##pHBYg|-QbVp76Y&rG>%=N%SX?xR)p^;hoWAk!1jRQ>iy*6)O)!uh3GseFr2=F-0u{UEfO8ZUT{>5 zZBlb-yPiOx)^Mgc$@T3?cV#Njen6jK*5hGb(%k}r%WMpWB`6Lbyw=9^Mqq6(nVq<< zCf;A{^Z+2LB{HWoVE0tZwR;EWh)|@$nx& zaps6QQ0PzkP>&SzBLx2DO^jJIyum>1ii0I-*GBn4vwREpZ1X#bs&IGsh_O0g~Xtad0@uZunFJVqRL zj%VCGP^+eUVR0X5!PC&)#NQKxll{Hwq({`{Gn~BQ53S?e`4O8Qp^ftITnPB&)IXn6 zR=*g+JXY>$*D%WU2DK%TGr;{-3jp3=wQG2H85HjeFL}v$+RIR}1tck+uEnNOt$DQ- zecUO!=!%w>T68_IOlaHs4&+#`jDch`Lqb9I@aXPZ54n>x%^+a@bArK}!?;FpLew>u%n zA`1#owZp78Wt~u_L6Phg^=w&PG5kz7LeFn&3&N7AboV`2Ka7m}76GSNZsjk=9k8k= zB9^aKl+2pL8Vf}tZPm<_0$pDw@oNNw>_dNwsHeqy;`i^{zY-)|$`l4wgtbJ@z=OaG8_xqo{-_ROsI(!EHRDTU(@I*AL^P9G(ub zAJNkbYX-B!^r((5YpDzv-LY3zA`umLCyKc0;`iso*(VVC@{}ZS1BnD*jtCBrH0!lf z&$JE?0UNgYJTWsSK0-d|Xn7jj40(t5pVwv?$u6G+BC zk{K)}wUWH+bWh6;;w+oYUGC)dU{td1A&8tUZY$@u-H=>a_%s@o zH~jK9DDy)KB0jw^S3av8EDm%cL=I6Kjw&J!DKM0ngN2rCgVWPL+4$L_8vH`tD)s{3H@(yva5dL+_KBU)JUwT3aN zaashHIb)LkWP@4{F?q@RI@O;}>`xMRAnQuX7BYM-8K}2}PNy#7X>m*m2R`4MwG|0N zH(LXwtIW>(pE;d&GdW`|h`e4lzE9a*p(Lg!TE-rUWZ4B-IMun|k)uHhb^r+-CC&Hw zW`4ixaD>gn{(vXdFV>N6QL#{sTZ=tCuW7M7WmIDJKrIvl~l<+DF^mwYCusBELmR1}Kjzx-`rZ<0de#0D{g1@<##EQv3m23Zu;Yny4 zb_b(s+)Sq5{y#_m65(>hKjvIsZR=V8$B)C{A2CdL2JD#E|Dl-v|NN;~GQWa7CX6m| z;{Yh~uYYzpk9g%QTBrLHoJlM3tMq~AYinz|7$fFyxa+s(8WAXBi1^pW$qCD^8!q!@ zy7Fdx)_Du?PEc`h%pIT+2%zOZe{LA&MEzTjnkfa8cZ|CFhW_V*QpEoc$Qi5YK;Z)A zsv0E#8yK7!CNws^nzWYRc)0P`#y|Hs-CxZcyYrHb46;2OQ!|_MfVnF-FpS_-fNM(N z30+8G7G}{ZGxWRGQk2Dg2w!j& zc-45ZfP^Mjr;~O9oe2X2CZv=WMtt$&xiDfLx)2l+lqtkWR>0|ktGL*~9^VPQt7fZ~ zohWtp>(t)8Cbyd&=Zg^63I!ddZ(nBU)iO(`H#XM6l9R=vVyf)Cm~(t=2&2noMRN^k z3;6i#*R!%VmDQq(QhoZCQPk>CmIy{b}%yS|_sAEjeG?s1k%-mmF^|kCYaJN>Lmhi@+q95z(*I=0nbGrh| zZ7=vR$E$PX6%TJ!b;H$f8+aJa`~1h+jSgJv>PFmtP!3@(9{dSW^bwYBItDY&RrXqAv}^{xz8(39$N6;^X0u1KJ~zW}3hiTFZEbG&L8 zEe`uQB5!^tl#j^5FG7bWM-||eTp4zEf&~^9sC@a{&$$(8wd8rU7ArRhs$lRYGTw2r zcqo53SoM*-JfB=-yeeXQd*Zh?bv;NV)Y-A@Ckr$qvNKfVaq5lxz6pKncKBgxX6jda z2mIS|YlJ(!#2Z_(tnLo2n_oL?0lW-VFX~CgiZwxW4VGf@cFV28efPF6)LHXGv&<(} zdWvREhD-6u;c=h{5S+&mVy!c6?YN<11SuT%p+erAt1`NQsaNkLsFZ+94HsJH)V7#=@{p%pA9_JwfvN>o|2W4YWu8Laekmx z^Y9jCa4auixa8ui$9q1vUha#HoynHrijA^QJ8v#Oei~App$fz(8sSnfUS4m!fq4zv zvYV=`vo*svo;Iy7`jw6YNW$X6 zyhrYZmEPeZWE^;hj-N*@c_Crpd6mT8?*v^zmYQtAA?qI{ElTlU6Nu zYSRh%OSfArnFvXNe(;*fdwj2X(UAOW1<}_(&$yi5>TeDHV#m?HzEEB(Y{*s0gNSDE z3amhRNmh@hs)i=lc{K>oT}Q)guiidnl*Mrvkrt2QP9Ds8L9D{*2iREnFRYL+d*IFd z%E==r_!1*IQTVgb58b zChlVCW~$touVWygAc`p=;#crMvZ7`<{XvLes8i8UsfZHG7;#h6_WZy^Oq8(K=}!;0 zRs(Mlq0I{yFEonAP8mfwwDks~8yP7IK#(KLf3@6x=BTEN1~k4uJUf=_LA+~mV68zw zdR#w`Yf6^O(?UtPEz*vdo)J6T76~RKn*xRL0udX--$q_ph}`E#N6&KvE&OkV=I}LB zEr#i-a+TXgei7fXfJoTM6g_&2n^DkMx zTUlx=uB}O;3;x8vD?4b`|420*;fZj%H6>H)eCTwA++J#PM!+tmOq5^<)oHJztCB2# zFN~jvQGbLv5M9JA->U`YYl*??iz!m6`|kdpibfMRS)MYFv&zJGszzk{w8v0Fe=7<> zfz8k^wMhFr-Sx{_s&((~mY&}>?C~9t!~9=#ePvjc|I)S6AtkAlG@BHpySux)yHi?1 z8cFGp-gI|KNQZQHhjjDa{MGZEbFTA#)JykW^P5;RvzAdxrGL#m?8s5)W0Pjg4k|RK zD7D)BOp%^}3N&~}Je0YG*p9{pn=UX_i0rD7a$O!$h9%*ve{xbj2I8Ne>3a>405!qH z{6bp3y?6GZJqbqx?<6}36B(F+k;($EoB^}6lQX+)jyIK5Wdex0(tApU-&g?Y)@)T3 zvniCRsq?4mcslc>3gNn}mqO|qGt2d!C@7$_VAaap;Hlt3*V^mr%Pxj~V#6c);+4ZA z8N{mu9I5p>(&0{i2O|A2SjA2oDw>PpUGO`%loI)(INCtP#L{naD)mh}27-3@q7FkV z47s)8D~4~-R3s`Uzh7+#bo$XuAI_8{7t8I%C_`hH3w+Y00ndPhY={yz3-@~VEo2hD~^|74q9ul zrn`nog1`)4Id_{4DE^*P_@bF6;#v z4dL!n!q(mF!oLhAd}$jnV|{jNY%UulGHDNRYxe zM3wYwUo+!Pz`z-^muJJZ$FUry|DI4)jEdtyFNukDPk6UCX=(2V5p4ciEH9a#pMTD# z@fZbUHXVeNGEs8%4KqCAm-sv**YVcSyv0$>`MRpmZ;$Ul@e(=ohdsd&43q&?x1QdZ z0eRAgwY_RS6G^{aNpFQPPvp3p$x2MBa$|D`^KvD%iuSNa>pK(>S=Rt90rcI^IV#;s zHkXIHjQ)jo6#SV*p;*FcYo#e@tBBRTjZclr)P;kT@aInzWaF!|dHrN2v$sS22*e?C zcQ%{HGlFykG^vFF+J;zVI{5}33$28i{^6$;_Q%1Vp50QU zWZy|PPFRGNKb~vS48?9_{>hBYA!fH=Ix&h9%7P08+dAU9(SIF(hJ=fFC3vG?EZ0{R zEisLif+`s^kb{Pe8TFmAou>y=cu|75haFAj(Hw>O3q+Xp(mghS7787as!$4g=hA9$ z!LoiBl1E36-MP0uAw8RS_|e)7@X+FejlOa?e`$nwW5|e(hMV5NFep3@C&92gc>54- zG+Sed0^%Xy?jGAyDbvBc*qa=4q4i*85ea+yUj7DP*0ea11`#%+Kj>kg{kE6|QD1G6 z#)#xh45$N<@{L#&B`r8d(f+VY%nx~b->cQlc%8m+l!y=QU9VO}DMHm4O-o@NY9Eef zK%9@rA!l#|$ZpZA2;-+)hB83j;t;6JbI;W(d==Gk-}b5*nZm~18d9P|!h2dx$xvOJ zfR?=2w!cIe<}>(gH~9Jb8Y(``aJ&w2ve5ZQ%jV5(6r-IKJ_82f#|K)r6*5HAqig4$ ze2-~tI>SV4!D%cy!wN=~0#2i;<)7PC7vhH!m}6%TD2k~gju{_ps)gy^gWkTM`6V^% zpKYAPz!tYEJ-r9GzBv&P78h={colclZOMsUuZ41NiAWXGz6Xn01Z;|re?&0KL0Ftb zRq4OZ5Pv40otcq>ixe{v(M#B<#m<`I)%xmP$pg=F_&NzpUrRJxjPtX=loC#>?8DVB zE1wNya^?H;A*gOu1sU32*W-G_rt|2WI9-bW3Xs5MeG>c1w5#|@&bN%i*`g!eI~t$k zoPKR>?Y;G!5gTbf)WjaH=Qc|aGQOY9qE7eO$bLHV;d~vTiprFknEa5s_oF+sfMC~q zx04r!(ko%~CV;+0kC{~AO6!yN8#VW_Our+o!%x6MU5?9P@?5F#aZit#JR~{Qlc;P) z>w^m#x$NT$ZzozIAh`Fj0{|BX;YBJg|Iz_FFuugY!y|jR;if;Ign<~))aw84wryBR z3(=9H&mPCkzO-BD{S{YfF9Y$T=q!y)sd{8V5)MM7H4=rW_MWujArw?_bR{o8^!$b! zZcyju$AzxRi;BQoXpa|q2cii1+Qxx`7W&1|F5C>v^e&tcA(0+~+GNh0>J4od839Q* z>kc%@O#IrAjBFjW{hj01=TK6(>@>n0bK&76q%>OUML0bi;>9gEl{U@=58Vw9T7q@$1&nq>m!%!QE^cfs*3B;YhBbE63kqOD=&nVTDGW3fGtu?-G9 zR6KAfXDhRWI&2`pHK#N4EcS29=Ms8n?HDMPN=aYsqtlWm#b-UX=7FQ33)j={Cdw5q z?Q831&s6Xerlx2rD__WXRm`Ml6&~$8;|f!&u^E4iA*;Y#7bTsp$Hz>k$pz`Euln4% z|Lan@z8CWza$M&D7c{{KQF(of$QTF>l|D1gr;|~I z@0bE5KplSs2S`z_U=*&R$$$GKt}x5Twh6|9s{Z(S?dxbjDZDGXO6IO}J4h$2W+$`g zP>myMe0|FM0D~IV^ke|9;^O>6esYnaZvD3(jj3xjoFFGA``oyC;yp@kLpg0P%IETx;CUbdXei_e{q8F*i?^LCyUSDH|Dc?td23XgohH< z*{yGBeU}7Z{NlJ^(xz~ic?V910nZ^EbjvZzy&KbI`PeGgSuY@+xf>vrDWoC z-|O{XKMg(-WUPyk1G3+9XpUQh)4f%CSJBz$f`tHIhQ=A2@K!m%o(e@xIf&+heRH#| zI|eY)g0nuJ0Glep@l=>&0FyDWe*6ZP)usd3_8qG>7c82H0LJG6vEkqOYzl|9_5zCx zlh-qgxyA$A7b+C@Q1-OKZ>A&97)o=vwHpNaTc+gS*7-bfhXO?JS#I_|jzKie|D?tO z?}Fhl$ckBOx%!aS_?}E1MkQz!#Z)49`}zqhD{t`3nsc zqz6+uxgtZK{bpl?`y;GD>k`*)I3rTA=L{V^ff68GU4L4e2y+}+dShyCi{3Bh zUt>;uldH8d#}qZ_@F&|D%_?!|62tdgrOQ>CZ}B7q7nZa z8_q5~0SUF_v-bi(h<8`7ukRP@fW)yVkv_NtaeE{A7ne1peaPwtW|>P-*Ux zk#lFExTD6kpNNf&>z)u@%l+#=(e8JC6kct_z}q}t%L6f`#qZmKc+hM$N=*K??ovzW z^&4b24vwyl1~%}=19@L5W_`=xkgEI@onkr!wxr8-lkV?3W5)$aubba*s}3ZLWI69+ zD?)XF`C>+ck#N7!ykhIy1U4M|ZdX2u8wajhx7$-cK6+9BJoX?uVTmi$VT03UpI-6Z z(WMg%1tlnh&QIdEjI{N6@eIn(54T|*kIe4NCCzb*$ALS59C_Kj3Sn~`?ec;)Y82F@=(n6H+K8V}EE ztk&I{hW!*JGJy*AD!W@>I{Bk0LY%U%4%&Cx)SL)KQU^`O4=tE@pHn(Y-N@e)2vbI4 zDKmqbPY4*+XdTk(`KMqw<1RWX3@P5b9GJB@Lp3>>m7*@l%~l#y!fi2VGzZ6UcE{fF zq;tCZX-PH5O9rmKXbA89Ccc+xpmlV3n2Rq`Veq&C;UEm6s*i>YbinoV^Sk9}UOl)M zv+p`0+S~8Hk5hIw6^tk&k8^N1%?*pG!t15j=gQHK+I2ty~nVO;h4S9$x} zwf(7e*~Q;!#$$4gbq0Ekf|WlAEjNW`=k}8@q$99PCo|{1TK7@GyW)Jwg6v{f*B}Vu z8|f@_aL1rpLvRcy4a24v@nd<)U*N`~yC-;F<;Fr-aJA4^eT5|hpNz(CzxJ(P;7cQt z3Mmd1i2|W)i>;Ko9!@c_vzIk*5LglDtpBuea$>UDHIL9&L0OtwbNCb-Tzp#Tc4C+475I_q1Y^{wlqt{{9Xu#hnlv=l9Mo_Rr(Vn7)aYTnAuu zxi~PwTn%kxwsG1Ih%Tna_V(a7LvEhH8a^U~$L>x)Cg~sW5^Y=J<_&C=A#4dJXIF5{)8#qvX{HMv-a6H zSRO7^>{E^xm+@SAU{gMQC>AxcXlUEY(UzHle8)Rjl}&{C-3Kc;nBef0OLv)v+lMHO_r}U7!l4*F^R{h;{s7q2#20UmKYZW8z(A!!6E&@~ z|BW#um6KI;2;c(QKH1JlQP%#8$NtCR5_p#nj{%1qF{sua^lES=BOt%epBRB4{piHb z(LcE9Lnp(^9ODnT4S>GK8T5lz28Ib zwE2MN6g$G2N6KI&S{n!?Sv)`n90jpsqFd(PmhbEP!Z;@3%9QHE@8J8-7v)=ecXDmP zjk`UWX=7ZIwAJlkK^;VaAC#Y;|3guItR3;L+F$1_Odit|YDU?les@ocCrY12mVZT& zx<=0+(AJnZ2%x2sf~Pk8=;bAapWyJHX7bnbh`bYx#{_4VSN4w)8NqRxqVJC7gjJUJ z$q73Ab;SQ=V3`!rIX3TZ<&C;}0CNAhCdghV)PF2-Lj__D<_oi`2%zH`R9sEl%&aE%eYve5&{WL%f8LA0I|?|pO`-hp{67)kzb^a^N(v3tI1t3B@YgW`0zqDN z;14+oO8;>b%0B^cJWUpvGw`PWX&HPYw%a+f4(TMu#F)i|_?LC?!N7+A0%9*Vo&MfW zc6qetzk9o{l6-NPUk4O5(Es^gejg=P#4)k9OLYWCu6Vyk3W}m)6PDFtKi0C0C`w64 zC^)8F4cJrf*X`HEMr)0+8m({#47|wf&+8;p&Qo947UT3%rsEwI$J>d-`%j5M5dwjiX=Bbm-N18|_@Arwrb&5- z_#0TO37E7dcUkor=@#pA@l}o~=(Re_K@(^gZ$nYG2SwM@A?2DJZSB|*n+ygYjN|8~ z9v8j^ez@{veJ4jPz$f>9Ddktq08|TwE}mLB&!sMsCAg~cf*~Ql zjH%&6Mp zYph6{jcmDABm2RJ6|e6-z5a%@QmIWZbX%_?If9?K+;Bbt!=rleAar1N8O_$|C?urM zY-WCTZy$rGJA5lhesmzL=Tz(_yUfU7J%(2J%jdT}VTV@E4^_q@HrRqH4PIbIp1)uG ztMu~$lUADhd%%R>k%S^3{k#-*{E;kx&8gpiLS!eSqG#5t?q|>X9@Vr=tNVkqMV4@v z$6hXe8}>D7ia5i`WRk0VQ{}L=o>-k~1<-@)ugsF!4AoE;5)#gGo)B>>7RC1ZT0534 z&#e$tU%V2TrdcDWf_f17-g@JG3Bn}-cMz`YvHsXM_>IzR$JHIdex%y!=LBCb)(W$o z!tP{LIF=&QyyWfe84@ec+gq5_gF_|x?I~Is1(k5`7VO3vcQmq43`VQ*jmxhCstnuP z@%;|pCuiZ2ij*1DS@ThTM3 z+CX2JZS#d1ZWbEaiGwGGZ2%@j9tP$|M$OM=YZh4pkecyH#RV%oHp@PIXLFg2mKsgu zK<`vD7ec+{+TEQ>7#&tG?etO)a92}dWb1!?1`Wx({!ki0YNUUG|9B91U71&s){|j| zqE<^yx*%G$ijIC?#_oG%QAyq#)Kqz5Zubp=Uv5LHx>k|EQxp#34NTdJEXy1!t;m*` z1S*jI-0IkrT#-2NjVl5Qq1K)EJ)BQ8Zb)ut0$Q28#K;Kt( zxSg(O)Q5&jBB(!AMjIL>D&AXjb)3U{5wR`m=fCa~0g|T1LDbn9N@8-Le79YGE`9Zcy&HwYF!%wwR(-=W z%0+eWdNxK;;B=o70x{lbh5E9LbZPPHu4h!PG1sT5>`Pe(|PHw1b#QL?4C#{q* z!R};4pPNGwE1I>+6K*`?Z(*z@_)Zl>{pHISRJ_;W>%CEHKq3YOf>+tlO%{4Ae|+p7 zE3ikOndNyrCz&3N^ZTdFW)uS)?HeewH79wMq!+PKVUPiW?Fm#o1O#?sG8xGeZ2n!H zlyu_Zp^{(F6R6m^eRg&K7?LlMpB847KP?q7niYW%Pn9JR(O6zlDZ-R%5%x{Vj6xQ# z>%wySRWIHA-z*0BR&K-)OBld1Zl-RjywV9N5L)DCrYS_VEYjh!Q%V1RWJLH405}-J zWwqO$l0I0ffJ`ryDOfHivI4zO0VK@?1On_IZYODkh@}6%=D)}D-S3}h@Z&3$R{;vv z^_>b5O4U|~Qmw`iBugJbDCp$V3s8s{2bM6XM?jE_%EhMM-00Q6-QoZJQso!ETm9my z%Bzi9c9rhi3m?jX&y)&M7}+6*6vL@K-PB`sOAJpek`{t!w3~vxbUo0|x|fx8OOFCJD?c|+r9Cn-G z{dJW6)*!oK{zhmeKrg8O^eEV1K*I?4H?sc6rGX}8q^1fUS3RNWaJtB*gtAg=q_!rm45YbF1lv$szOKbIANX{aV(y!1mEZQMkxm!9QGOc1I0ha9$ z-p3i=9MLGyKsdlNm~Gg5*)I?v`*V1Iwu*WRtRAeGKb$QQAG$MlD7#@LJ7k$?+ z&15}?%I)bA5Fd~E`t|FRo9z!{%{Pv5K^IUm>0C&nqN3Gizwk6_KN7rr>398jXI#_< zRN|eyPk4OmJxIR}s-!`<{oWq`m@Ve3ttrhSvP(L0{k4sP}l^-G7@coq|2q;%Ta2qtzb%rV425)6BUm=6&Sv z4`6iL&8?Nt|21nm@oT@V_}AbEX+YZp|6sO?P_=YNp;!S9d2)KKo40*vD1PoR`&OgI z0tqBe=Zgt?mW@12q}6;2V6DQRUv~q9iP8!N_fki6#tare=Zkn%tU~?|qx(05>XU_X z_+Cj&L-WehAR2!F%b_z{&1S;%i=Jjv6V=DTO^9G&`T`c;DN-lf`tu$Y>_Ci z)4V*TJCpxGp?V^dpWA8wGMUP70It?!XR??&naTWHKNlBQoWOctjN!KsGcd1fU-U~V zJqqj-cPo7cuUiZSf%V~L<;<7!5$u4W|*#gjtFyZGl15BTWS9l;{W)N-_I4T3un}1EKH-$cCgTZZ#t6J zaR|7j-FtT^7#N`3Kxp-D#o_iCLp}DL3`_gA(=FqDL2L-&r zi_fyG!G^sudjkr#O2sOHS~{UC6SDc#tZ21R{~tSBhBGQQhVFHksdB~cwr2~qlVJ6T zj6?A2=qRtK;J-+&IhoQtj-Kow>GsRT&XLVXzkQLCH%u#+>H8>WWtBQ-VjMvJabao- ztD>SpW6bb4$l456{Y^~z_wPa^3=ADc%oG$9;cw39fO8)TC>!`|OY69Al2uzwj&HC# z{CkD`FRS3wA&GuO;6yiVw%*6W<8XW7cXhPBvnuF@^BTfZLCNX(h8NI(r4%S{b8J2 zRJk?NbVZ0c8VhMtc>x1E?d0mJc(EDv+v#n{Ji`!OD z=pWPlErt~)*-f6YeoAOaY_p3RSGo^V8S(MVfS~P7xenWYF8~NChbjznjh8JyDSRI_ zTfBwFG&;+P%F9$K*)tL!d(rk#3-c^iVSU05D9w)7QQ-cu_&YefMo--A@;*k z$vH1&qn`Y`RD9067-RWryTNX&-70Q|Cvr+98WC#@Qhar#+zt`h{(c-B944XJ#GBpX z3Mx1U2nJ?f_0@s{!fxfUFr?)>dbPAth2{0(#GmmvVBGBG%c0KB@vQVF4)FUh!jJwO+8*mo@s^d%ypEi}XeU2&o{1NXe$Tk^Ryz5` zZ*zGUg$`H>bZ@}Ucnv2O?c>W5jXL=^e5Y)hU(^3s*#`)4d6MUWZkP!^K6+SJJ{zYB z9GFrE%~rR;eLkDz)MIm1vF#b(`M^FzDG|2h(Ei>4$D$IM(9WS;^ooT(yN3Vm8Ix>Z zc0i)-Qs>x*ZRP!|AHoS`?otjxl{9;=f!bPlJI1iWdz+v-XGU#uAaSUul*XpcXpDI0 z=e_y@306*?PvH7Cy~aXN>L2In9`*bM`aE}}M*8OF5RO~#Sk$GilS)ZN|2)fDKwbFSq< zQKM`AwGvG!oy^{1Dlc}~vQUKpG_8YigT(BxnvPEcBsr32SE@~AiHte|+hfPd;|aYQ z>H|+wc$$%MTuK~h7Dj;MPg#lqC8Qn$A-Z)?J|KFdb^e{$(S)qzcVy$!fJzsV?>M~i z@F+5=x$10Pq7<@^e6<4i3lFR1;D)u$lG^pOs@pVeIiFg?C3;} z?a}o`UpA{R?JU3_f^0Abc6N0G7Ok1gXA20Hj%JnTm5s_>XtB)ql#iVmJ0GWn(*uE7kTsd(SeLw6;b+tbwYPrMXaVfLXZ{fSMzJsL zd*}jrWVE_hri)rjR4NPlcG+S(j>b;a7sFqByrZ_AMpm~rEO?+D(g8?>+h*hCmoztC z+yGh?gY|%qhW5rfCfh$i&A|#v1@0r$H{M)ol-|a)CN%yIFf+T$5kzC;+hBfeot}be z&(-mW6dX|mNLFlYC+kH8W!w`B3Wul=;Y8QoEX4SV&H!OeH%{~>21t+fxkFAI?sdy% zQT4iJ?giyE;?^iIF6=m7cDA9?P3k7K{WiY@-NzNG8V09+Y@BNiiov@RFDs(J;Td(u zu}XQb7Ggh4%;}?r?zytI7rlLMX)_VHGpZnDrSO?Q^(p#FPDA81Jw*y zrxc2L+654an2-5Lsw+C#&I8B}9*W)v5K-&fGdc^^(#%Mt-Ak&e#}>qVdTsu+e$0yt zb+k&eQz#~~{ZI-t(NLrg8w682-R_dRm(M%tB9})o*@eq>heCm0$lm*65Ys?=HJ^+- zj_7qAwRji{cLRV9gG+?|e$-pc{z9)q6#Djt@H@jojoLxOPy#x7-E6it&@XY436+e< z!}mqDvdU#2KO3lt^`~}GjAXh8G!`AS|57x%8RH*{*jCN80+rWpfk?6)I1Ta3M` zu#Zqmi64mT83z#-Svx+KpBl|)+h(d8eRGvDa$@31Z$ib>ip@3d^4LxLV%R&I9Be$= zo1yp3Ct15;!=A`jkr ze}e@g>mTn82}Ph#iz;ofzE&;$(3F^hD-w(Qy{*kV>SB4&7aMx_fW^b#yFxn;NNt6C*REv@f@VH7RtDHdG z7ntS&>w-`eP0~0Cmh+cC?5PAgo$cRMpALGPQp1^y91#`Q+Q`-_xEsZEKIFRMGjfo- z#It!nLUd!ge-jcfmX>8zU6`pwZX3SFNDVZ)2^u0VrV#qHvXR-tcB*0iiD1#Vl7O)M zo6YrZz;&4=d+LrZiar08bnO60N?rcfkNE(5+i7yMP?lJVzUPbrc<#Q5=V*~nUnV8< zyS6=r@KcGv)jN#TrjmhZvTDBTKERkjcm?0>iBaVTsX{(#kdTq7N;Zv{@NEMJ`uElxqY3#bAM59>#1CA+iaQ(eJ?&DoB-|>z@A&ybsf1%`kyK(PAvFccnfdwT z>_R!dR-By$N$)HaL3=-(Dq=2FDo+G~l9{(TzOk*syRaPH1S>>GD)O_?vbV&w4QCS* zQ_XFdcigcO-A<8_iioZ(5iWlP_rlT?dir#|RF!K`CJV#rNr@vP zelKL|*I@&b(Q-y!{WD3+7693Co+i1@-=3izQ@I}oZavT5OQaOV8=tqlhpr3H^v$G4 z@sTA|ie2Y8sb_Z2lac<&ZG*w1dry)_ubmg#Fx)G+EZ2`!LeS>OH@aj;=R3 zE+*YP!eJ_QE$nBe-H{$!fjY8{qZsOthEqEYZ1V>a3=X({&u+qq!~Z(58tqZT^!iHX z%)M9F*@a&`*|_AhA6|69vAnC&q22$2iifSkUapPgn;PM$k^;sn_rkp^X1d9njc%x60bNB z`M~5Lm)K*T()DSU;*jfJqkevcHudew2^&x34tllN)S64!WS7 z-Ku3L31xuEn_JB+#GH41N&R^%oDiv2XSA{iC%sDr6Fx@2{6d1aJeSOQ_MxkP32jP- z!#GSo<0Eec=CG+z#MyddMAFlk8m4V`d8E}J_xVWjE&=oAIC&uaxBpxLkpU{w{`tmc z;=n|r?CYQMmiuSIyaiz5xz^RY$lNoz89vbWFmp$UAP+uYWS+)?jA~(A^3lbGGJ|hJ{`%w$C8+6>eD~T<4#EQ32SJd7)COm{N=ocXD$Nv-K@mmlEpyGyo zk3O^CQQ*HS?hQ`};6AVFd|&)Uz5a2VfByhzRRc|RS8bFx5r6Z&zxVZg8-5>UfnUFQ z#6o@XpLg}=Dh}AsucmaLDCd6zY{$pzr%<%9ou%|TKB4`+%?*PoFXKET&X)M__G&GcbCatVfca8=A z@|O$1hGeR2xd6Eh0OtUZ={2xV{vANHq|?4wZvsM_-3POd&oCX=)xw9_sWPYktE&I) z9ltAkrn`mKY;%#z3xx&$A*CR2Aks+p1;3;ys_AFD|ZJST^ROLX3rX+kG$1kL#eN?Z*gxu*Bk;+ zKvN4NHG%Pfez+Cpv#9jj?EyfFP)P!rz%u~m!VbW#@z@ZUKB|35e=> zTHou|N3RVWZ;d5h5p{EO8(nL8<#M!;gMk|viAH_Vw`0Dv&}#VUFjCbfh_h=o&RS~BNV z2NpJsmHa6%i?zDB`590*(H}{FxZW#vm-w6&@ldxswf3wdh~$z6OkNgqs6PHZ6n#Tb z5dMO8!H-Xy(*eOzkG9OrTV|C)uf!jp#+2|ub>VzgEBN0R8j5r(Ue--;vtM@5`qF5g zHItGF3{Bc)GucL9w)F<{FmWYY&;Qcr2ld3nCAsKbUa!0$?GZAbzhF;SI9KbLJf<0Qh*-il!hcFl}DXYBZ zbUDa-;v{_7p3M@}Jl~x_2jryWhEjnsjMCE5{(gT%V4yjBPHeHa#&TL&{%mU`rdvQ8 zp;`SU3kwS%NU5?{M1EDQR2bsA^d_j*S~JW0f#dpm9sV2GOadV(c`p@!e%k>hQT0!D zFR2CCyn}*+JwsX48LAw&bufT2ycm+@o_4>o;>#I>bwD@wwh@ymPWhq2A+UsrO)URU zeBk1S8rE(JN0YUkki_dV>?V_%zl9N-T z^9O}h7;ZL(c+f#qn`_~|;~oDCVDS-Ldv)(hL$dglC((QQdHahM>tp7gR{AB(aD@A; z(*p&Y!)neE8$-F@(PW!WGVLe4d&FMdvIty6Ua2Y_5oD3&<`<7yY4M#BA)ornnVRY! z7hx?*iGu5P_sM!hcnyRH>7?;9c@xrbT~!997$OU(^QB!fPzl0wT8dWAPq3w?e(bz- zbjzC-9c;m^Z}Vn2df-8kpB~vQ-p;=}yByJ7xvy8faFn&|ntAxD;w2eX;Sh%DpicYd zu{m~}{Inm`XgbVW@tT>36m>QKlH_PLr$%|ty%$)Z3ORe~m|5dJTIJ?o-5i*4#mPuP zgQn1pJ*f)rkSIAPy?m6uz(;cqs3(=&V>o>@TpxTlLOrqk{j00<671E~$KuB)Mp5|E z2nOoq540xGu!%+ucB{k>*15GvPi71h5$4(YxvcB4qPdWm&%8f;YMz9L@hMU+1{So@P*Fn&eeT}?ZsTW@iQQNBP8D(* zEBtKn-SON$vJKrzm1o>%-ATSs4ZXL94aj+557czk>fpgma&@ELdW@pJF%Ur|RhzMLl> zQ(UWgXu}l2LcmSQyq%daw+6$a=g7*4QXuS%ojiMD?~c&&5;3 zubLgJ-_b1GSYJ06cxDUV zRXgcP81^;A?X+~2v85@i8;b>EekJobI58$*>qM&fVgIaW?iC=wTHjl#v zOootFEl!p{&e_T#*2YaCQQd#xAgIk31U6YZ-|}vC5$bP7;|2M)#t!@Z6a;;!N0z(+ z74LYK#{k`wm5Qaw$=cP7{;ImpLMdit4EJ5Oqk9fqqTok`yBgJff^gg^S=+nS$jN#x zuaLLcj*f+D*qC{x{4a6m&t;T*hu)Zaa0x~os`O)(nQ|l_#Kx11{XlrFV&IU{&|O3g zNa8{x-hw3IRbH1hEf`>BLW_Mt$w@dc16i{T6P}vHcd^+ zdoMVdbfValB1y5nlzcnfEpXWrR3awg~wq*wbT8rw;o1 z;-}bN&;q>}mi$g+x%Zsoh8xdoKW7Ie)S6Y7{;3oTf5@id-teTUoecM}-YU8mqy z-u14^rEX9Tz&IOfyRQv5nfRq!Q0tWpk|)9Rb_DaCw`^#bGD78BUOYtvp2MiHSkIg(`t29DR^}?m@ojSn0v#5D60Fj% z3kg*+*%FEb5JnX>Duj$Ef8b76slC+O zZ_VWP$=u+q(m%Yue)43oSt@rJH?H2W#=>P?e9|>RVnU<_l!yJXPqdp_Lyy~@zC!2@ zw|^L6dYON{P;E{jy^X? zJ~5~1y;a5?)8%y^j>RLVrt7Fx4DUJwHyG`qnW+eAb+|>y&j5+S7X^OSX;uzb0Z)Xfp|zBIpL&W1a9nno#oF8|uG+w5y zi*fiN-Frln<7|X{o!zPAn7?e} z2XT%Yl`D4Tx;md1H7B!=G7^D}ro4R^R8!SdbZfVLT;H^3sr2a4G{?*P$Skzn3Z@}0 z^CzgWZyO`jkbb<`P8ADp|Bw}!p1uUqaGT6ykr1Wt`;x2EnOKVl)^Ik6aj z2@t!LVTtUf72|KuKm)Xf(y~q{(f(AgWpazNuTv&gv`X_-$cC=Y&^hw$^7ZyZ*94u; zCS|1dlPga5-P$-PKbX9>QJgM}HhocB1r9gX_e z;IDahuj1Pe+twZ%g+m%WV5Uk5POKkzF}x40lZ%0gj3syE`>$o!H-vg&6r*8niF7_Z zAsE;ddw1(cwvMPL)AR*QUM-a~tHO_OD!-3{i}3CTvhO!AZyV~B3d?Gd7WoORZeK#{ zzL8i~B%5S-e8mu3zJ9l&kyE(T*x%8Hr&eJlHG|-cy3k+BRQA`)G&O>lAVQ-|{<*aGk_)5!_ zh#s)jw?gxV<{%Hy&&95>-iz8I&uU32>b1e50p>lV!T(zjOr>jHhdt=w;S0zI_A=l)5a5<0P`+5j7Glw{suED zZQfox_k{6Z{0DCCUqap%QP^gK-;EaleVeR>5i@jF0YP(kDy7Zj|p_AKfTnT(<0@EKm z4>zEU;Ku#j2on3+w&iurQyX1rasX~Hn=Rm3pe#=OpdIksQ+hjz``l8RKK410i6m40 zq(#FF+F8%VV7<5N4lvNKxOPvu+UR>U$aN}CCGT-Wil zE!LsDnQ{`+;V8tKyC3JR!f1$RiHb` zrrn!U*LrfH2_54)XhtmNsS)<~enHN{1`~MNk#}I9{5=pV`pv1ZHMZ~*E7s?8sLO?! z#Lud(xBSa8_!HhzRRVk|mV=AM7Gp^mDVKsEuk|0eTqByxKB42sKB8e^F#U-WNRGFoAzMxl zgwpu9bn6|eFmB$@#M3j)10q7+WGqq0}W0tz0ZnOX$ZhJ-BZtewBdlwGn7e z@~&TJ)p|5|5A5+PT*-J`B`Dq`?^WTHUtmKW>f}6eoy@3Jm_?pw9hd1sWF#z_G0Y2r z;BG!Qx7adVSMPlVWw^H5s-=puqMp-YFdE35AUc0;hQ|l_j_{9A@zkN zzCf$;_6wN z+m&kq01t*CJMGsZ@XC*J17F?Lyq}TjYj%0Bq}kXuuYVoOQz`J6BpUdQ*pqLKJV_Kw z^dmK4+yQFDCv2MZI7fh-v#2^#_*-Tzb&K-&?!m8)O1JU^DEaW4dMOnZ_I47F_Jz&~ zjka`U8a0eOPxt6=Ex#@03Kx$Y)3@Efo~yBxo+6P42A+b}SARK5OyHcF%4Ft)@G=Bq ze;RI|K5w<*C%FCwCEY^dtaOk#t zo^dRm&n?mHm7g`cL@x%SZORO2p(BiP1)dwClmFHbjS1W6tWQy}C`-R!FN)4i%Z2J* zU8%=Q5JTf7JPF$b`pF`)+Lb2w9ur-k5+a;sdm6uxZQ1%fL;xN1{dmle2G3p3(DC4^ zIEwurf!_+)Kc0pwqywa1nJ~tS4&RocX_N;N+@^MqHI!oxKI{!N%%gPdde2;W zWz*}T=z{7UnGqB)zQ>s&0ZXBFPt#BoFTX?(9Yrd|0i~6Tw6tNY6)O$S`xFWbBg^rN zPg(cTmvQpF`rB#jG@U z&wkV4)TVGJdEVuN?Hl5;+?REoK~YLEI8vRTE4YVT zJ@eqZ7Q1!qG+M9anbop}r%d^`cQ6!6Y&hWx1p^15vsru2xjQcM(U(C=3S{ff)KLP8 zl=}s(M{{ts)vmUZ9tLsO?%hjnidvxKByBPkJ=REA`IPB_nyX?qsXJ$iu;aMK+BvAKVRGj$^>|ra7qv5D{#n(Rv{M}*zVKz2cWI!sr$Wt0&$im1}><6OQ;-N z`=bRQo08SIODObeSiWE)8`qZkiV%pBK=XKx>)W7l1c9gn2ha>0Rn7S7k}Cw{iT}if zU43}v2A*=Dnn*cA{v-JP5djpibA1@ABuFwN-Xqla8bd8bB43Y6HVOTTq#Wz zVA`b0`d0b^+EW6Z=$&*atC?|L4KR6B*QQj9Kzhbd^Q7&jR8&?vv5g^dzPeN7lj6zh)( z=zziWg7&U{%w82=`u<%e2Dw?iiIX03hol)j#4$Q+U9-AV{G->ya1yr5ct_s)&$(Ta zp0%|_d^u}a^EKJI_dsWoo_>7 zO12Qgm`=ZoZg3bLc#NxJEibQ$Ahve=9J(0n@OtnK1YtU?ONy04XZkAKNf5Va3GpkF zBdBTSM}l7Wmn|3Cc{>}sR%fq!M6A>Hg?@-gBJvm^1xmzy4uYz#nMoChE<=eb==A4O zZVmNR*iQF2Dy%v305HW$i6lCrANCOV5?Q+P<(2f2AOs<|7D!A~XQm~WP&)5_dQc0OCgjW_%iM{GEqn5EwUxuR@e`PyIz$fp^U%y3eT2mvT8CA2hD1iB1aBd>p za8GeNv_}QAs7nD^yQ5mUMKT@M>Wu)TL~BDV40TxNn*9?1+DD`vV$XzdX5<9K>|wL} zJ@uvzn{Ce@U@5gONg5;4d;}=s$yI{r4MA;{0G(hwSf9Ws_v_8%PJ!K6=Z;`=J;o&^snr|n`MAXj!e7@DrlO!U8&uV*Rb5v_ z%MH!b?87XwkWK>QYA$29K~19d&YeiMe+R)lIPJx2&kW-#xgmY&$C&Pil($o|6rb5!daZ zNg6y-0u50K3LdFDh0sj|5V&J%8m7C6m#(u1d0x(>Syh4|ctQmz*+C^DsvlZ8Tquiw z=o-x}ukJ(}iInwz{c-TAlK1S#YgVPIt`R&1#hrUJRSzrk7mYGH!xA}pGNbuizgFi6 zDDTrsA_aZPoQh82!HGx8i#zNv6(5It^#hOE%HaIdzQfDjBF|cKy^xFs40XiOT_TKX0Aro^AuGrHRt^foJ$9%t=nps7hj ziO}#Ue)WH&j<``Ss5vCBeHUeQQ5UAz z8u+YKrmC#wXDb=n_rr9_>!Yp<>+xvQ+I?RUzw7 zmW~Jl*6DBf+T)oGs$T&4M!5=4)UVdp^boW)Ls^}s{_6Nt575q@L0?hazuw+u0U0;67wp>tP z{gTzay}w-wr;1ij2$I5xdTdfLlBs?1XU487T3|;!#ZeJ%xno0NCkph=@3Bn7l?KoJ zoP%s;B2wpV>4Ie<?wfp0eW&zCcDb(E7Pu)8=0F<F2O;TH{XfrR7$dDPgvM{;MVG|jn)!FaKQF( z+2?M$$D!M^tDK~qioBe#a{VNln~vl;9eTK_9G!NKH<^S}M>0>>N@I-&^M*fTZe=7g za7!23yj)~=tveK|3>WdQL`)56xEZ!!j%jHNl&%gpFw zbv1O2(Og06KMOkC3#U~S({2^#qA*d$S^cw~vhiiGbZaCs+90UIq2*CRxgjR^T%aIU58yWUApIm=uasvd_8Bus|6du9=Wa#Kh-1iy1)(n;BGN z+s?o=?XtvkvYz3mcgI03Me3X9KgH&;Zu6ZIPy?N4V}-LiKD_%xxcvY0R; zUI3iJ-bo%+t3Y>|V~LteT8?Qlw}v}JX}K(G4o)WJf*)^*ov~c>5J*bsmw86uZ@>U} zqN(!CXLNv@MJBaE>B%Im)Te*Aolmm~tE`Be8;*&k8p(igHaV?t#E zgQF2!`h(?<$a8p4X>U_S`C+A^&afCr%y@ZB_NtextF0Q_I)tzGRi*MdXZ#9C%-b{* z4%M|qD@ zEZYz=Xsgc))}2u0R!-nI>6nIbgQcRfF`faM_P+8qXqWaEO|nz>wyC@p7kaQH*CVn^ zSRd%{1+`HYM{k_+=MKpd$x^%ZryLyL9kM$|KqCuI*SI0&+^QK$B>k}1wbj#yceGLq z={f;DjJ}WXuVcHTgJd^8x43N#?aXX(7{&skC--STTJD0+Pf@2WPF@rMDzyCV9Iwp2?~o~as@17%2Ob-8uSZbu-tx|@%JG$XP_j2ZqdfMdgOAgFsw%1 z3^)?DPp1b0RzE4IszT0__Y;U?9R~n~-F1H75-nIijeKw81t!&`jt}(gE!FOHUSCmp zm2cpx&!+G7F^jqj@a9{!x>H>IEZX8Rj|ei8h%pZdm+5Pr>fN<+F&RcvnTu3j;+}ZhEl! zGo9CKJ%n!jz^t@lQ#0=&i=?JFV?2#KrO={yX$mWRnn^7lPR5JA>?DMd)q+m4m@Ffc z%oXEAM!{fqV7?;+gk#Oj$t2Y%#k^CZ_9Y7$ps zg?4vkV=cnYu%N3H)@(VyU~$JY^0{rGzPdD@o#y*!Q$;Q8AZs8vgq~KL_ZPAiOQrUBq(Hha9~L(^&yi^a2lWkIhGLHydBQ$R%}<2y7Bt-FT5C8jP3BDd zvuzHMj!qd2P(1=vBq|mDp5J-Hc)y5A$%}X_K=C1C+EFY#|tlRo3OzTRc>=*IOLPTS->_p_EbzMo0J)3o2}tDRqIbD%c>Ob*v*K+D2# zSO^60$JWV9{!Suy1!wIeXi^UWLTb9)zMOHeT~A~ZQvY1%UnULj4Z=qup__{Mv zJSWoc_rR(DI7kt}NtKa92K(av^X%WR|Nnm4){G}ZM9GPTS$pae6a+aTKlxVzOqE8; zZq9(^;3C^RfIth_FR%f!&e%j?U~g0oC{Tacg@KFu{O=j7S@+XTv}d6z<8h`ARv^h zGamk|5Juom=A!?J_wE0Ch-dJ2L#x=W%N9Y^l$m<#K;h(ij0CUo%MnLD&io)@&Zmc&2-k*K!8#lc{V0>og2$5bQ)!XiWv zD55~2m9cD;=M~yv_r=fgkXiKgYjGoMvC8=;O+TNVJS%SB}mDdl&bBs$DvNFcA|m;;--U(JgxV$=4FU(*cAVxDR|iFu)wwp`9a_xbf9 z?$nzx|LP%M;k;{cvy!8EkQ*f}(T8D!ewhDPpT0W2g`iXxor6Na9Z`Ej{wdiww5R0k zoR8O*_N~7kkVi@}gS`KSg~%ho4opmH8b>V*Wd6Ov>G+);MMzLxFcy%r-TswKM}H`> zg4Oln>*TBppznpoc43siScU(mQ@m4n`SlTJPUiy3gGKAb$B34>U5*6P1kB`Gahyie z&9p&-Uq>#_v0+({ckdI=mL)}kR7t?q8GnQUDhL_!^Yn18tb;W9c35!Q!4xbu((rkZ8%s;JI^586!lY`nZ{czMN+ zgDHZ1)@m|XeZ%;}E<&MI$PuxoSQr2I>P5QtR|=pr(YvTo=}{k|ivR;#zkjepeuAe> zyq2f^ae3x}m8~?-nfNU4$izT({fvF4PzkKrZTU+QgLlVD^P_B)F|44V;93+lq;T+mqgOZ)zX~G(}E1ZA{ZyOT)E> z1#2WY>H?S=ekB@pog!Y~K*b#}cE`pQk9D)?i2xXH)P#iVVC3<9>*a;|>ly7>v)r%G z6ZKNRmz!uAaPfEVl4D9nD~bH&gef78C4P44J-9qjQYLtBPFv;39Vgc}OORZhcr@QT zYcCdlTNhb<@e!g>+05W+|$oA2aQjxRG#tE7H9qK$vxSe43a|bruo3 zfw1)Yq0?%+4~m*-@x@vsEOL;;ZYQnJ%%3gihZvKXO}M?joeFsR6&o$zXzfgjP@y$}r;LtjqX*FAqn0)>JlSK2ROK!cl|sVb zo`%5-(u%h%C)*i7LZFxoDzPsS~`8Z_YV%+Cr$>CA$ioJt77Z!JcsXBPg(8`>yP4I>K zz%yZ98s$9u@aUBGId75HsuyL5rykw}Y2#dVbEm$Q4~et+$yt$1`)w4nx%I-K#_Bzf zzG-vdo?K0%RXEE`X`4+DJOTfgm}Fl*mm({XDHIr!-FhxeGeg&B)^@u?z z%xUHqJd$`$s6G(Yc35f=bcKc*CZ{9mcGw?<7ZFEiHle8LAf?3le3d;`gTvvp6pVnN zul%&){wkoW``0_Tqo~bZndHm!+uOMFWXv~$LhD65f3`Lp>nR>a{{$Kzy6fDWC`kLb+q z>1)4;O_U1lquU|VB^s8;(GS?c&mvKJS3&8)q#HqA%j_vP7%BQwcOht=s~@YamOgri z@X{XE5zRL`CPyZ%q^{o{KMA=UPm;MDo(F~Fo%bF%x~$dI(VcGI%2nADU1IVFWHpI2 z`^j~rSxzIFvpFc4F&V*$6(CC%*CW3ZM~ zX9yRmwzzj0#=EDIq6)??))nd*otOe3Kt2pcd|x_s|zHBw}PcK@sV6+26c} zMqlAVLm~t|OMc+wTtWb0N2uP5u+8RyCbGL+6*Oz4$0jN^9e0dgqh4~v(h1eQ)(W<} z0iBXziDj|kSS?%-Z1Kg@%s@j=X5!PB`{l5TC69Pnb1gTs_0p%y zBYORSk&-f?24V`+9(!@6@i04KS~w0Df7g3&M%0+6t^PP!3OEo&BsRx8EdhWiPc#z* zf6x~t@wa#2M^tWjAXq^u>V5Fgvk9Ly=lZ5oJpdZ*A?H5bKWMDQ%wTW0xTV;h{fH8! zAL;Tn!UIF9i(Y2X|M_ukmnQ*JuKlIIb-H(acVJk<_G-#Z((@R?p|iy5-RcC=gQ0VA z@Om=4W;)|*jU9t=6mFQ?bWyS%c>)E6G7igJ;1tQ&gS^9Sv=5>wX?$o>*h+Zebw{(O zHC>Q+5cvAe;i5si4uzF|cT`wH#%Tn$5iFM^CP=vD*(k>ZWkaOGNwdFHiY~Hyay}NviU&_ys6=X0tQ#)W z4l}xt_xblTqzZV3e9LTcg64U=tV*9rV!rXL(zlGbpVlbDcq4mj^KI3|+*LmWl)v>_ zRNg1Lv{Hj4{tjYbbvr_~qGPmomYDS56-iz3sz9?e9ARJa$DpRRmkX=M=xa|?vqrvV z2*_aD=ERo+pjOdd)+gN-#lQDy@%!l^E3aSAM593MfV9|Br%L9%_^kY6TF83fJkit= znk!&-Q8S3yh$69+FuAtF>e5)xVb83ZEI~UVg>F9nmuqzaB+G?@+z`!nC&G0zWnlFM z>q$vi7V56*#VWmXC^|y)BbmM1`Hy&P|MI%{BHRr-jJ2UT6&jw%c$OEhzbkdGm?JwF zSd-OLWYJe~$u}n5AI0Im-tL8L@~8@^Lzbx5g#H9`zc~^za81rRR3+p|bP`=-Dv^jL zdF$)jE0v;q@4G1ORbALLP;0el*9kkz2LbE3k#~ZRL919oQfz-?S&u;bS%-=jNc{N} zk57~2h1~^CFWM5V_Qy4Zj^w?c*q4cL?D<&nWl+<4e~!;%X*1%m+W}XS)*97$WvY8g z(@dkeVKPf*pCN%1;p98j;!REyh7HNbQ@mxUg<>Q&SbQiGe1SF5ySMq^iRO!>5|>OZ7fkJemjW70G} z!^G-niFg+Wp>nB(+kXT79kBCS}Y;jz9wndelEKv-IO zC>5YWky+pRX)`aSaoy>@BbECZ^4wXH$?D3gFWT31xsfQIS}RhftFE#I{K#!9=22)$ z^y48S{8vOBv7)_NqHsl&$r(BW?H*w7O{Cp}D3;lXCc}OI-aL$l5$2DBxQ>^x@syEb zLtmy~(jln{4Wd$O>VYGV%{ODww}W46*`<;Lq<4fNKjXzNGYtDY~_XG802|$ znkLT~&87P^^%k#bgJ_jU4d*qZ*J_n)*t_Hkyya#rQNhI7 z&_0qpM=S~{$MSPAHp<*0bVv3^eE0L~5?GuD71D0GpK!I^FRfgg{hfQggTFU9cPe;%K3?2$awSI_ zLa36hk^G@i7rD=EF@t57^uxqYM|l=k-KjmU+V|bdRMw?BQ9ida$K$A0(+geGH*%$~ ziKr=(W5e#J*wC=W1^WKYn4!#5o**8;)ZZ$Tm zGtA%BzkEJkK)3mM9tu7~8Dcazk&ZQoVe9`J23iY67(0zqA(n2p@h$`LJc1)db=V)4 zKdmrb=e}F^P|?ROiSvF{8iIp`>D9CYvr|+LxnACARY^%H>O|ZX^%pTWn>Sq@tWqImNDM1$^E?Dx10H| zHG%C?$BVkz9j`HV2;%46$`2~dq~zbK7CSlf$5R?c`wPv zO3QyS2Isdh)|))J!bq6xe2C+nKA_L$WTf`tc2v<;zv7dymT-DX#V+!k6n>BBoZ7qk z(pJB0=p<*m!@k9+qRmiBypf`wYkCCv+$1}Qx?D#yQe9!=O}1!~ofYmr{T?B0$Lk8G zC(2`jwi@^F(6tp~5XPzq1!?$TmX4az?fJ5hlj}yH8i#hMOT^A;h}{=|j+7c5vs+8x z^#V>vhrO4LDwlv`812R!aKqZ=dRZ!-}g9swsklvf@GACZpz&O`AIP+&}PG{p+V=6KyD?Kcc8)$|xG z=h_U}$HVb3s^bEF53-E2kEWXWd4`mUks>QDv$doVn`5=-HMO1*4Cha;-0f{KG>Zp} zEB?l;WBDsLLN-I`&ufH?O$Ox!X3gRh*7w`N#)K=w zOy!Eg(N9}L#y7d+nPnCiI6u+R(-h1wHsC9zjMmGURZ4P-cy<#A~4DNX| zmNk!~4O;bT?w`FF`|fEc`o(#$IXA_SSWb?(O4kO%Zth-T1`C)V*Ugn`A9aE(PGD-ogK-FQNL2%10|Vxv zP2WTH!h#0%k8hH3>@{?}m4$Pt!=?0kSMN7>eHX7xvr_srFWm^TNEU_tEqa z16Z=#El?)U4U?CiN^Z=w3i|XrI(sq2w6P(Vn#sc6T=8Z3Db;gf#KV4JUW<(`DxVX58 zrcFPkxgF3|WU%_GRe90-a!G-?Ha(tjj7xh>na$H6EVe#c3%^4!D1SPeV$3Bs1oDOX zk-!8Nl@oN$Zdbj_pn3`(f8DtXt}o)te#h~2-9<;UK5R7>V(bZ`ORjRE%zd`O#z!pG zj`(x(OZ6+U>WAe7WYm};0->E+j6?;01oi=xtau2w$lxjY$Qfj6ncQW~N3}%^B>vgx`B|MRQ>@q{n8?tc_;MM@LaWsE ze0bI=2z(0a$Q^sd{F%QV7}afDXzI4QPVIZ^9yl7|RAMVeN_I=*@mDUIdgbi$BC@?k zrk<&1BsT>=DdNit;oeWSosY?G&Sd*4GvVbcU7c#o7QW#+Hc(RSrMYF0?177a^|sew zJQN9iiln5P=eDOHNd2~2-5ucW^vnobIFdOpSZR^4=vc+|+r0<*`XNzlX>3GoOVFKf zhS(#WrdVs?^Jq_*6VQ>#Ny<%!U#Fh$ULvA?d|uBnPN2R>qSSG5Ubf?KyE!uQ`lvP1 zU*wXZ48jPeFT%BzyXcfqh%TRa?sQO@DdTkYM32g~v%YRk$wQAj`P|j@No*X0 zC?1Uu&YRKP`d6{ zxH`L@Geep6enFzfEI%BO18j<8T=-L5ghiyiS_`;UJQJdw9)h^#f$^>dYffMQKgaI}ltgC=85y5GCJ{}~5C zHhYUq5qf1_8r$)Y7Qps)|9=UVv`vfn=yy*$57+DekI>eiAQtJwuK?Hn;9wl#e}ujM z32+rf0e4h%nrzqoYgzeUhineef20!$2_!&g{;$t|O~p5%AeB|7&CvgGmw#?ILlh}P zRW65yMkewqY6_KS-emMumilgmDCIv?Q$d$(|mKe5;3RtaV&; z-YBTp96vc(ufNTe&}gdKO-ykz=^z}tisUop(nZ7ZJ_Hl&>DtQUSd9H+(KaKX1MKGC zv;AY>932EN0IO4aaUm z3q_zlLj>Er2v?|9nIxZ@Rd3(gh_c~w6dfCyrTJ!<#u|(7nJe?(DeSQ2Cnv@(GxKRr ziuO&%GpHMDt+tQsFPpXKba1aw(}}av>Z<227 zRsu0T);kD87zo!@f?XysjN@71GL6AG)oQ~hIn4I=gl+psimV@Cv7zSj{q9}quKY(G zy^j-|0xqt9f4){oU`xgykfz(;#_NzaV=^DxPKtz;y1q2I1@)AJ>(w#pP0hkmcbvjZ z&k_p~PZ$`pF2#C1zvZcL8+7_{Ex1aJO?NeuCJY`TBg)%qqTM(464LaTZ7^tzevmpZ zCMLy1{Yhe`v@_BZa{Pyr)ROn36?%_+UFf-vXB3geY_|4w;ofoq;k%myW6T!-Y?n9+ zLHh#!wQcs&E1(g3X+9g~?6BYg6Cp-Q0-EXQkm;jjh(udO5H+wKOVj%S(~ zNa8-SGe1|M;(AzAEVchvUBnvFP5MpbgA)t_c zu`v<`vi^ei9BU9!N@k*QZTe&-OCL8-!JO%GKO@CSAMT6u@vm$Cy;!eN`VJzH7qi7n zekYwl<}<#H=*9ig15+OQZu!Z4yS-5~gG zEw0Ao(rNOqB27K8C3@kGLY?YNmX8S!&yA;jbWeZKObbfl&us(S16j_yn3e{wEp8?A zIFDC^vN1Vg%M1E!2-J4)DDz#=R-w9(cQUex;j2g)vp<2>TV&#j#`# ze+E`PG7VbD(t|U3s=&x1P6u=G=7d3DwmDBcUcqv?3cdtmhJ`<1`p0(7wH-O-2b1H8 z$OqI*Cg4<__GCQ6?)Y z7BSx{af~t!DT;+&sg?G|=`}RNkF>0z50e|H|7@AEfWofb&=GFf7zmNxNCqE2wN!sA zR2C?)ILvfhRsJ!a4`;rtjcWB+C-PnRT2?MHK=Fsq*_O)};lPx9VHHGIzr@6mv}XoD zn)zUgo?qaV>A15y>f0aJ(FdIR8*ev#uGB2{8JC-<#_PugI_QUS zdd)T?llMxB1Ce4o{W~&Ge~EadhwaU=hCe?Y4h1nB1&yTm!Pb54T~Rq1kUcfsZR6;a zT8v}h`TTiKIROJRh97 z`ETaIVxRIWEtW1)`u^X{uQk8}qmN=Hk*) z91oAH7E%Wiuy^|e`)V+arn>TuLP=K_qC+mHuuI;(Ry|b2Wiy%Qe2vSmtn9GvXmobx z((o2ga%|mjY;*=#BOylSTW%qI_LR$&-Q%D7^}o5@l%@Z&a3v}QfijI?$N96UL~!?h zLpmGA$W*tOt}+!6QSf{D{tx2_*iDg+*Fx&d z6F}XKT!pE~`0PD7*l6r_%+;wKIOA!m z5eL_rw-rZyeFLT*VY$&vYZRsL#%fhH@-PZ1{GZ+27TcSI;2-r4uH1$GZ$|qkm5%KlA=fN4TokY?vS}v*~65T1^QEV#MYDL2<3*~33WI&eAU!n5r z%YU3&cdHlAC>`3?am_mSdwM@kuW0XAXFlCAv(2CqPuA`|;HK)1e!nd!Lswv@;h13oNcLm*C=eIv+J&FKI76wk0u` zd<5Kb@|d7blsj2-Is^)ICdNHOWg!NIFKMtM(Ot6T){Ir=DGl1oXvLU&L%oW4{hVp) zN~sayu~$WJY7;S&9?jH80TzsG%%(WNTm@3ial#lb#b-FEL{I*Hnrv)1>a9)&3KgqzEwI&x{* zeZFNb=1zX!ij=?Jr2EECqwWn>Flc7NJPf8knd#6{U#MOXM7?>t6N{QtJ}G z0czFpEZPpXzo_Qk#l!}C-4q98s|^ZEgw%0|w1QkAIbo1G+eJ-hH7DN?00mv6DuseV z8GxXe!!@V`>_Zf=X@&Ay{0Re#vRdyM`Ou@6nXa5ft19+ zux?Fwqv`Saw(27SLWqwK;#|F5SnwMnxjaeq)M#8{WwmnM7zA{;0fl%R{rE;r`&mGG z4lY}m%bz?pO~TWfL@L=2D5e8cFwpo&$@36Tsb=oTlFr`ie)k5T4{ufcXs~DJA#|%R^E}KiAmKT^8Bk(O)nBgaYpdG+#vz@9*RX+r|^$*=liilRG=EK zF0271E8}GE-Z-J4$63O;PaKJi{`NpCb||Pf5}Scv7)*>SpN{FX^gs5%^L-*2%NEes zlbFnF?C7q~-Fht^8C^TxhN8$0yKKIw%gv)K;XNFE3K$yy11(e%MK3pd*J3-k%eG#T zE0_5y#f*!>Ll%8GW6dI!6$vd5PyaixV!0ghswRPK~1bBfq}J1$P@qk9QxscYSZx{Rp}pt z0njLK-k^ILF*$uf0bv}4)!ii@(7|?rNF2;#hAAcdz$fHmvK0$pV&)tdCDI~U$-fcp zJ|)u87)%#V-yPQx5f2~4$#DAtN$l+ABkcbLK)M@6wB<&f+a{QJKd)5OPW9uHbf5@6 zf!c@& zH%`oy&=^8gJQSE_81+VeSN;aRD(Rocs9Ka<{tHAD?X&cP`U%|mJcm@J1AZM* zm0@TZhQx1BPpuNw={}(Y*Y4yl-v#{%;ncU24|G-*L?h41*$wdtaXa)8p9Mc(kyci3 zZNcJrnRQ}xO2R&~&T0`FsJ`-b2n!S7ZkavZZDN_;L;<>bDZ;Rr%(U^S>SgmnH>W(o z{c${_KH@^qH6w}iMi{l0^MSNb$c222R4hm}jbmA*_!#95K;7%H51$T*?Kirh;t&M= zeg4J*|1Ws;8?Z6_B&%zU$6iwQ9)g9*CY;T?r00CuT~?AnDjlgP)Zrq1kWG$WzWa0| zl|mmW&gTxiYcQh4#**=&+6;~-y;#A|jPnDPvSirKaEf@_U|yc|>Q#NwVL&2Ma3d=z2TkX(vg#WY2E8x7EVu!x?m-cW4(MfO;iroHugnsg#8R1<#YMTK)@6`4^NX z-WnnTC_&0F5C-G8eB9d&Q#TsiFs_KL zFBJo;5BY%~abb>boci^vVufumV}TepY_Wx0SO`$5IOPsGa>4X_qyL_(QSH|;_G6pw znz8e#N`5MO4^M~4)|hFJVgSA+Z3Olx`T0tejV3NeBS%9?3>zD$_D00J#eZ)mf8$aO z#IkGp0j09jF*!MgIeEFsKJk&tCCZ5A2A$no7@vYm>5;i$e@g1^yZslUU{zLR9bCN>7V5dfW&~W z6gFzQR?huFQK@BmHt_y!7wRuak`8I+@3GEX3Z+@7Tk(I1?+E>5WXA3TQ9QeDUQT^U zZQ~00G0DlvRstpq#q*6|zv%7x48|rL%3io076S<`rbDU>!%?lzW-m@7UqrSJs~=d{ z0SI#MaZ8l3+2A9>eQx=B6}Ct-^0_0T6^UwwSvL6B8xc_fK>tXbp1 z_St6koz&s%h<&jitL@sHo#jeRuOtY=Y3Awp6EV^5;grs>mQQ|9WN%Dd{`hWBBq=u5 z*Waz00pw>TQj}F&^pH-l&fmtyUM2`x&-B#(X-xykTG}J^5#K+3vKHLd37fK>87PH~ zJy2O%uS4e`$+Nt)BsS&DirbV?0ovuI#(HB{H-u-2mg8d%J(*bK#?>r*4H;M{#vcpkahqZ z(gY7JJ=!NKe>9e)CneLGFv5Jc>sX6D(|PxB{UF);Qxd@mEUxDx7)ps|gnzG0hE=x3 zucAn6$)Vd;23D(0xaluvNZQ_?>*=;%pHCeDx$+Jbo0jkgrqcGqbZH@5UMK*uEh`zN zYdCk`yRNu9`X)z;6x%}W$b|NIKdV|{7q-9JtYyK;V)>DBU5H~;^^PM;Uw^FDPC)YM zMdaz`7Q;b1jKmw9Mh1TIj?K#B^-!NuwFTOu_L%nN#P;2y){tvNO0KOoKcO#s`p#Th1uY(~ZXXWup^-oNN1gjsA?*YXCNQ zxY!!ZRnz-Sd(U?cE{7pOw5~IwJ5b*L@Vl!|ASl8;MIzU0WbzUn3RUwMJ(`<`(K;WN z!Zob-bZb>C7yM39p8}Oq>N-|uQIC@uyUrGE1h(ysEHA&sA0psjTisB>K)O@c2Es9~ zJa1rnoKJFcx{u<0I9wj&gXvY8xobdR#ftjsT|@q0c8+BIJB$oWbedm5oYR`kfxjmO zl&=Jc{PVq63-=D?m05EQ;NBHQYL8N&XL8Ul~?q*KMsdC?MTk0#eeAv~+ieba!_*(p}QsUDAzo zcb9a-cOyRUdCqyiKU{3~&E6~L8gtAs=X!*2-p=CwfG>GOl}Y%S4dSu8tMyx~NW#s! zjj+g9?;aRO{dGOp--k#$itis>n6N_>OQNvWZ_%fBP#eZu!7Hqw8xSe3u(`bBxzj?w z(nNIDM@g>r1$|FwGZlak^|aWb=`=}~=unN|*5mi)Rw(-kTGkTggx6+K3=s^y5_JvN zslCU({ur^j{a{Yix63;i`=0K=E;kGY8_+Pf9XDSbmuCEhYV5l%eC+=;kHVV2H;xPA(yUukBDJXk4ArdQwrO$ELP3LIJ z!`=L_O3UB()U5A8z{ItTUAHVZ8O%YW5c3{}_LzLSB87`)R%Vl0IG^MqUtdt0rU9WF z=Dxww?Pat5)&y+sdYj{v)5zbgD7b3v3uA&Hnk#`^t!TH=&Q?8u_1=ZgJ z4wv^nH*CB>-sZe=J|)f(JcJ+pW~DE0Jjc9o?*6ZeRd?sf2P19&0M>%J!uLh_ReW4K zDp8Ugf-chuM%LV7LT2~?%9!V|Qy^q&reMAd0?W&KS%YRu4do-S=&0c9Ss}>{bA2vZ zQ{%H-e<1G2UQt>fqZ3x6f32Y_ok9ATIDFTZKo9Q&laMIWRTKg|OS29uQ^zkoqRdRaQK6=O)^7DfctF}0AIz!WlNn9aNjuXLIKNyZ6RJ)&qe{e?PRV_v%}Eus*jVa4;tEv5RM4)on3-%uYyZnN}#oaY$ZvH%^+ zxA~OY-KTb>dgV{@%Q4S+FjXXdD1P1jFS{k^8_$Ai8e8X=Ghi1}n@9c;EM7OlBi_ zS2RR}eekYZZu57p0_znu2`fWy(3r>8iH?`ibm>oK zr?a3ypm(x+wfSYST4KD!ZXr1ANnRt-Uf%c_l9!N6uHT(f*^s`i%j6qocKQduoslcn zilZ|d5*-VLVvT)A{RXY>aN-)xR9XtcoXs>8Q> z^Lo<86oH_Xg4w*kV)qiU_#zbpwckh?NKYzfBTF@Mg=R!N)EgB--7GWI_gB4k3yDmL z_nY>b0^a^Lg&V*7WgXSJeqgTO(K&RwFe&1gG-D#(oS{JL+&(<=g)Kz%Ksh-MH8D9L z7A^g7H{Slf1Q{V*9BqtItX+vIS>i;fNE5~Ap{0{uyxh11R%JWM#mI!%l(rw9Q!bnT z)X9KsG1(zaFN;=XLv_{4_U73AZT{mj&2OkrvHA;RPO_G^1A`xzZ`dXsx0*gJxKUTI z@OHx*%~eZ|XDV-eS~z_Lm^^Ew?UO(J?(Zz^UU&bq6$s|WYHW~f*MK$^Lti!*XPq}= z?vGpyw%CCA}F(=xFQ1ofWN(Qjam!+9T0q|Cyw` zTU}B&uNFiizM1RMj-RBc{%%dpvkGX)H9Ra>DEi)FOhuZ#V5ij)@foQ2`)L`cbnEL# zLnc=z9=sMkN1}ivnYp8?OMt%wd$L;q)NdZmV1Bl)Eyj!!&2CPbIkW+<#h^0SP)h1P zYv%WG{{-y)lid>vYiaqsI>zuw>8~qj~G&Jk-(r>_bm zI+wh}*eem$zDg-+_sR;xVs5+MG?AECeq9qnA6l(7YRx2v&;x7=$OF}$ArZrm1}n`j zZii@M2V;jTVc}l^C$c+8HQ=ev1X2?JS${VXNPy~%w8w)JC!+bZG^}e?)mb<<%*5;- zlW+a@*$o6X%?Jer$Jy2}x#M#uTinu)XWEkG!p3OQ5HdkDPFJfT{#YVjv;LyAGuJ~5 zYfdSYBs{JsQ?wT`&5N>nT^E7yqGyTXF2}W8DN$+b)a!BcL55DFbnnHP$jtN_UUECB|aW~YRu)}0+JT>Wb8#t`jVw7Niu zzTbL&#BiV|1uU~uI5)Na<6nOP+aFwV*^5Z%z`|LiB8CdP1zXzHd=rwSia%|Cl zeo#S`w`W@HEPjKPUB5LC?yDzJPY9+)n6*Bl9+k*S{wl8!09-4B_r;FVpbBb;Ui1bd z_{7AV=RZkcrG(iEYItnLurCB<)9cwOlDK54AVh*g0%JSJrJI`bWkn%c5)~Yf4+NLy ztgc0J;f1()7-oN5UMypZQVERk^NT1TK7XnTG9-m#e4M!`P`E-WYJ1F6@lN4#na-)1 zjL8TOrZ>aBw8=eosEEWM>--)p>I|zthsaMqsi!0j+}FaFrMt)K*YD&a;tF@qf<7{| zA%-PBS7^mxEj!uGOCM`EKMi?zDTq>`@!R@?say$#%tqE4f-S+6vBEiVKtSizAf$OK zYuP_Ea#!TgMnRnp0mmzMJwl9UZf^M8}{Q@C5svkn4z+jof=tb^k2qjNZiq$ zc|G`Jofj$%WKk}2jZDh6eF}gJwW=`7Pl|ZE0kV2t5o7$v#jPnh2HfmXzEZR-mRTts z(ZDvo8tX)l`M#PNZZvWsUBxNkWD5Np&t&TQ>M7E<&?LmX&emO4R+xI@m2K-<8%nud zwK|7A-)j<={+qHk=CU6XTvq5!ol{Nd7h7We)Di;QPa66~dZm>|Z4BDcR?xppNwSZz z#4^9=0@J-bPP|4r)AY=B-BN4 zyzQs{b{h-Xhw`$Mc{C6K|2Ce1CRCte9ps%k3LMDS{(h-CKdm6>tZCZ6$QEcV2DBO- zbEF<1CX)C{r^8>Jzyr}AZwJkdN2HJLHzjHcYpaU!rka=XE#UHeuuUJNnDf;BZ4PC3R$sjeO&=21#DpPs1;_(56 zgxhT}Zdf``f@;EaG|lbipe*(!8MnEGh99vG&u8J)+W()Qw|uL?37@(G2LsCZSE<)d z-qAQPJBql%Fn@m%-SBP->=sA@OAq#mP?G2G?IsW?rnROpnJ(^4pwq2zzQhFb3KJOiZXNZp@&7kiH#YBz1g02uu{pzYE1;e#`we zsU(x{gsB?Hqo9sMdtRMZb;)wMrp}d+^(oP8Ltr#fV7_<#|LEJ^BGy}j!3i{=x{ho7elRnmgQUm75RxsMIwd8s0(I^TD3OR z{hyTgS9tQzPh{Sg`Vx-8QV3Y-#AWNLF8#I2M=W^4o7`5eZrH4GE43F((M= zSYe|O^{P~EE8!Fs3S@w?3ymPaO~FtwFhgV8H>cmfwYlD&k-`?hpbACc4v+b>|DUb* z`qD4pWh$37aPvQwVv_wN{C$#$POxh18^jQ%nc0d*t%E|KnncI{Om9wLY(!w!8tq?g zohXACGogA#O+8g8PZw*mJ@k|F`h6v1U|?WZr(a0sTgH}A1H=N=Iwpl;rLcoi6Ph_b ziuw68PAv0JG#tcC5U&YA|BUosDy==1NJ>k`FD(lObxQ;SJwBqVOK;;iGm1$down0& z7o{hOcZqefvU6%!TqfQ*OE04hZ0%w=I=Uxgccl%!_4!HYtH=Jg&o22=r@eR{NnAjb zr}pw_rYr(%jw2LcUyy7M?1_L&X9(j^dx=~2)NGFz5n7z>>T0$i{viYZ-6Eb2q>oY+ zypobt&M}l2AL+;^-FbsykoGk2dMbGjuO+hu|~By zj*);nmjM;?`F(9Y-IkIa{b5#Hl51m^-x(ZHPa`nt<9v=-Rvf{Se9U>$&C0t+KP$d zmjW98mlC(;X488=ZKBU?f}!N21R3>h6N9Wo@OaU((c&%+ip()X$ova?UNHHYb|v1y z@sYMi;ksG^-8~}TsvQ@CMfyRJU>VZTwq($Ex|^|I^M!!)MjxJKSN84R0T$A~1EeMZ z_NjxgPIQ^dDh7h%8ZoYP;Un|*E7iK-|5ZeyADBhiE{g~1&e7&ZzAoT$*b^4z zHk6JIDRm<~F5L0!I2$D;1P=e4DEGb$PBv z)02RF^!MOd>@5>KU093e|9?`4|qtBD${1LAg@kzL*hFmN)W|i7b;pdK{v zUxe)hl|yNl3JckvVdl943(mn3zRDZQ{%b2Dx&KGe+Cdc$_c-bK>pyZy9 zmjy4Uvgv4*nPW`X-KMvoUtc;Xwdk{}Fc!lzqbY)c>mh4b50kBocK*7=8fHB}zrJW? z9o1o(r7;|d)=y~Fs<0Z*m;doO4cnH?+|cTa(l47CH@mfj$9oxHFbJ)_5Ca^xOb!{1#5uIxKxjTNz>hA|L;ZBV zwywnr7Tso~0%*|`T#g7#mBj4_SGRLRez?~-NaPIIE=2SUKP&E^>23E2Z~p;<0y7|4 zsc1xwl0vN__fZ~5;Dtk8U8oq78d9&sw&1>{g~MhC1l}V($&wttD^D6eM=tOQvDjQz zB11cxF>j*N61cAHE_TOoU*x4+$+zFlen55~t)`)A2BZxxLZ7Ua0vV)0+PPV;o$laq z%Pvq6VX{yG``CBF^z6nBe$t{x1*2}3$vghU!EGckC$+jG2VY{kKKJTiN! z6TtM4i9~k#8|!2rhl$H+kG0z3*N5yLNnePr>fj!b0{I$5(w5IK-!xU> z;PQAdgFbHCU=qVRkA|*hRq>5nM23x-(^Ny&A^xz_g_iy^kA^!y9TL8iOCyLr)NaHY zs63=2U>gT1*)dJEcxJ1tn3-&t%||}TIuJ} zs6Pf2e+5b{TuPhDxPLN8M>+C%=J}t*dp3>8^vA{(sLO*gg-j0flX+-B(x#&(8=k@u z1mtC{UQd|}Tla81LS5v_l~9lP2JnGemE1K~j zEX0R#4qyUS0Uu__e0TtbPX9`+c%}6nK;YBc(JXpI+=hmS{Cpzn9j~_+f4I3%cU*hQ z#3=sK!*S^)a^Ph1R|ambJ3umDwf`4eU6IFX@@DR>GDO*`&U)o`_><1X>QL% zp>B85x@+t2byJ_W0s?-{P;$oJA1$#FP@%=40Mc~+=VNuL1~m*Eir&hDb^@^*b5hda zn$R?dL5NH1Lz^F%wm?{VVtYvF8ZxbYnAx^H`RrL;yB4fLCWVFOWyz|21q^5@vD?UN z@f@JyQJ+~Tj1ZYxtL<0Snnpae{`LK&Cyu4z;VFC2V=4t?g}Fw9^ag~wv@{d9L6d_G z8!I$``0>kF@9u`t<=$6p>w!X1Xd6n$a(Du@BQsjI5ccQWSBQ=mC_%pHd444u4-;lX zPq9R9Zs*|c9bNb~IA>KA9$K8RmY>hqUr76~{Yl@Dhdd);#(TeH9HRSV4HUQP!w-YV zKvemK=H&WIbJ}u_e4cjius@Z2u-QAyAYV=eq7tP@na?8Ff#8in_Trcr9i4~Bopmbm z6qzwOol6bR_zZnVSP`5+MDl|M88r}*Tx}Bv@?t#h@6HeZx|oC@=7hwC6cdNRJ+Z=H zzYdr7>*Gvlg{+Kd8j3wEZGKY-fgoq_`tmqY^2U3U^m()ft1>lDZ-qJOpcoiJme_ z1y)lFdGqDew4Q2}ZrkzB!kHl&6^13FZJ8otec2lOJG5_)i4g~_^r1MHgW~EyjL{yC zgYcB8aBe+TZ>CMQXgg4?1q=vtzI{;*{lhMjecMqvbhCT2_y{6(-=evSpfvEpF0xs` zgiAPKbYp1-V3xVYzi10hRZ(Vm1#DHAj$~~Or&$*0oB*)%;;RjO&>9M+{Fz$yk2I_l zI)AX6!YYsD^aR6KK=4Zm#OHVgovrellPM`h@GzRS-ES;5>5GS&E6oK%IOK1M^N1Ke zVYfZEOb4R5HQ81}L(c$O1px1mhWnLT0qHCWO&4YFCl-m72^rpOZ7SOgu{27Uqk6cmRcIgdBOC$!h)2&Mm=_xzuHXXk3a+sJUIFL zU*y^T1Iciu8X-wIsT<3mE077)U;C*t77aF+dD}UA@i4qYirIT95kysEl0eq4NbhJ& z$4w<*YU@i#qmh`qK9KHS-hbs2fMw!SnpCi!)>M)kEzc)Ye|4a%Myj&NihLq|kD9tw z(DaV_;c(367D$>+sQB$bFVUiUw*j!KU4R^iDZD207ztC7*GGq`_0hbU>d0Ca7jJe{ zi9Zzkx?-bwW8mTjs06TY0x{0v2#Rrt1S;PkTb7;)92_#@{+<0nxg#{81jG`xK>N;$%`xQ@d?Qi!VNg zSFShYlTo+F&Q}819QI$vEd8yvZ>^i~faFYD=#iLil02(Vr5cD6#6^f>Sw%-ur~Wvm z3*k&4s<{S9R`5_>iVcTLzA0uV4Rom}Yhiyglr0f0g93vG)dP1rop9AqUJM*EGziJV z1U(;e(06ivGAluEEdM@O3u*E-M6Ax?DJnk9!J!>{w!>o9 zOt9sq8-r(|;$RMTA@UtYun*3*N}AUa@sEw0q$FoiqyE&u_8~hyjsO@)CIu3TRXytd zwM@&)Gp}Tekqxpm4zN)fl-2dPDo7i9GkIZF$3rk6>{wah%G%Hg7v3m`>w`aPQfb#Cli2C_tqdRXy6^|^B}J% zKjI@(o&_*;2S2P0NPop=2hm1|BE=OF=J9-Rajwu8uy;E@P~x|(R@x=TEW#3sqqt?H z>3t0r^>KQonC)3}=O!4E_Mv@gT-hSAH9&hoz4cjOa#~?C@el0V8%G3t*Ue?S&Md5d zPQWlQZ_r=58pyddkitEfZhxieKpX%~$fa?JQHZ}=HosoX{}6!7QjSphTA9zK|H#+> zac6;2kQ^wKlUam$4=3Ob#wmZ4E@%FpPffYct^f7#doK<^!72!@&JDAjWd#Txy}ZdX zo?G|YUemV!#Rc6;;*WO0z~=V z2a#l@K0TeAmrwF*jvKsOp$s@KZdx=I@ziEWL|912o^0M4buVy{@CQ7!0LOkRTr@mL zqd-CV!k=ZPeuuehbT5O|#|*;y$R2NjWTW-RV|We3y+yoF^n4iQgnfg=~@oma|JD&?nxO}4)*>MK{cYD^Yspd%oGFB0?dt)`|Xk%FU> zQ%7%aZy<7iLBFuL=ob;|&oVMHmdA^%IX`~%S1=%uH%p1#Iet~8(k7GKWPpwM|KyDV zh-V87Myyy0p_8Jqq-?TUj!0y8LS#w7AyN;8>Vjk;l6rn{|9}b=^ct_K`)Lh4GEhL(N1OXeH@R(qkP%b9~b9y$;Q2R);U{qHqLZ_lW zzpJN5qkzOxDSw0ScWB!}cTAtzQ(#240>cT}R06B7_-?H<+N^ zUc?}Kkpn_S4KP^d2{N)wULS|2##P0jkW+X-J1LzlXlL74K`#!(scVjeC!(?usD5N19$Ln-y!;;UYax%1CR z0mBze_^0wNv1jf5M{WXPU#%%%q96M*D^Z|-)Ir{~x7y`@c0&bCL>Q^nf1mHM-d z`bDe};i+tyvO5wOBOOfW|Gy=Y5s3JawqiY{(I(JN;8v#NqjL89EZv9II3Fc~ue={* zI^o$Rz<#_#(b-$g2k!A+k}DF&-#1s|E_Ot7{(XOvyOB4?jdAb}g<`r)HK4`i zYBlw`hj7?j&5rN%>@2RP!JzzMj7@Xp0=Y0xBCYGm?cVGy6A>Zd@Ke)CC?mP@R3*{p z&xjLkk8sTP4dY$yv&}|GT-eOZU1Pg0E}9Pwo01cG3P&ox(Z<)?y+C_l1xomAAAtqw z3xkHkZf9#d@JFNhLVUjmd*#~@^}8@>DGypcLchnMq)RH6q4+4=dQvJ}YO5G84sqx1t@QEZz3SjpM2>y9ai2OHp&gs)8-A zG1JgEcOD-Ts$Y;?GYGmqN+4X3~Nn_Qs*1J>p3TkA7{` z482%t3vc-R;pi5ip`rKaIEHXnUabNQme*1M+y)>wTW@Wx%QD+Aq1(KjvE&QMt*;xU z0&kt9YPw!MmM+s;kX-4I1yNw90K6pMQ%%V_oICw>7P&HF>UhNrVv@YoI|BImZ^z@A zR4=6;7u>hQ66zhR>uo-&Fy#l7EjyE(uW>BY@uyd=SMKLGGj55#IKxPqj;P#ES7hIw zpUAiXRKmOYMZi7QCki2-*u2^t@jm3Aq>hn8O#@Fi&fPnaXmKa9w_)4H_)~ZdZEL$BkB#oL}U_sm#t54?@~lPi84j<2d<|qr(|yY{YfXOXQ{zI^1OwS zY9|NB#w&I-g;C^?t0Q&0*o_(vDE7dXo^gz87qk^P?ZG5lARD*PwS#`kkJdNbM7IU3WZ z%5+9&GmfH5FpMlL=N>Q9h#@rOEz-|&t6bd7ok!xww~#0; zord}WpY{g)<8MM37E{c?Rgv-zC=Mlv52hWWZv9`hO?8WwYfNU!)wetiuk3AbK13R! z`@sO$`p_O1SP;UXP!uz_8z{hfL&(7HNuli$fPHZMV}Z zR-XHnmC@)uBZl@x5`bE74tagnAKZTuL0XTJ*y=LO%gt#nu~vDH!@$2&U0*Ed-Y18O z3L^#d)ROBho}}aw`37rKMEnQ_LQP(q`6H<4v{@&h3zj0*)VQ#i5tKxWa)zHeiQjgE z+z4^!q;u(}IApm%qlo?JUrLpn$$Of$o%^If1c6qz?8*Jinwu-n#edGM(d`0zK$PsT z$*$1fx?Q5?bFNsn7G^9@Tj-$Z*!|ga9nBF7X`8C2^rsL)t~)oQR5k{LgE^Y61hkC- zk;Y8{0@-)8r@+U;(YO28MPh5{W38|-Tf;~-g~Qi)k|{8W=DIO6Me>~iW~vw#!=s}n z&Ce&g#=%39_1iA*qD8Z@Orcx1fO;fN!Yisf z;{VPCI-c>(4yYZ+nv~?}k#Lu+|46YIRF5^!ZsH-hABYgoIUvVV)Y*M`#aUTw7~o!?9nTluZm+d0f?=o!_y;L;?i+Pmq_MP#7y+<bLk?qmn0kW`$8pHi*4pDP%wSde@1+g;d^}yectHqFbD`pB&7qjuisKHaOm5$!b)ED9zQgkU#RmF zCq$_XGsYPaenuoRsWr8l!>P*zmO0N`NGQEP;pUX?LFDSbSdw?4Gd;&7l3lxrHopZ{ z(%A4%?^{84xtuwsUD=`irUaTEG-5)m`kCo2e69><>fe5&x(C-B(g;NP-bS0`rM8RO zpAMP2VqWY{!_{N3;+_|+@4r@sn>uW_*`2#|PuJ>Up(fLxK}L0hV59I*2nriTA#g4j ztyKHu$O5x7=(Cp&CwTMIGE~tCQ&1uu2PngpFMf9inFZVf+ZRvC-g9}72||jibl*$; zU~^|$?2V$X!qCBa$u2ukO)_ZpI6aY*(!;g!vp$e*nEh3RmKk@Ngi$e*-TAjn3U!x3%o`q!DeS3RMa;OxNsV6K%DOot_=NTSfH3 z5<`vLz9?5n8Qd|S<&B#yr8@49W>+n}TNr7tT-E)PG9k${iOyL3KEKtxI+OIo-2~o>?MSDM~I9;@L~L>}TJGd=U$&UUzT?j}A1;Cnf1}Erdt$$WZ)99zWOb$m&th zc+?b1+M(?q)pM~Ir)VTh^!v&)l^Qq6Qk7(n&jy5KD>-T;HyyW^_I9_-1Elk#_Xs&dAs%TvI0R8hCGb4yk2aKw*ngnntdV++1uv9AV6c=}k zTnOv0gF%EU-&yeM-AAjLjI~JpRj8W8DZQXVX>M-5JKONx{pSAmr##78bOCCdfS{r4 zSj{^&Hz2>9ginxOII{Dj(L}Dm%<NOu5)`Yo)}V!ah8hCHb`#?jdQ zPp&&{Q_2RKtYw{Q`2&T>0O>dy)m+VR^xMQn@WN!cPSd$&<)t)RNT9~PcX+Ct|9PSO z2gFmeu%;xWLEb-Y9-)b(KbUwon(yEnDGg)2)@C@&_E^r5#J=wkreBdF63Myl5V>eP z38iqzc?M5fK#o!e0wP_jq@5%6+;1kVFA^x)^})kP5`ql#ht~3<^YdE@G?{Og>_cg1 zqXkusDodqqX$91e6?Hg?XM_`6p*-F6VciqnHw?9TqS5jExH80+NOqVKVrE_6hy}&W zFb-6asZsNI1UxbI39@OX)mgIf1YOQP5iu(LK^~qULFB9DT7K!6;g~twMY&eu0qqMN zY#w}*pz-6#Ju6Cx8Ikv0d0&xAz2VL4ZTGwzhYfz@Cv%)Hiw={7mG%V&n2w8;M7pA# z1*&8%kQ%F-oJ8QUm!gT{lawWueR85bUTnyYNBN{Q)(q`~{0*k{1r|hZf)l^ZR?HPM zJXcV^aH`W!i6DFE0KK?-pu8|Xhht+f%NLUAOBI?vnMX294hT=3M4OfdeQ&*kNIosd zirQ~6@cm*Z`j!?^Sg;8Ri}GCoPk)yD!9_fhv=ElyQWuFn45P}L_GGtxO9YLsu0g5s zonILYRDV1L-Q~h;&$5A$>`3zF;qOp%I;4r(&z-!%V5p{H#ll8U2jtc!MpI~vGI|t& z0Y&8SGj1VKC5cmLUrGxfx4}bn9Ahz~n77$q&o3UrcPxkI_$Spzf!ahpnTGwrP>uIc zYY`J1=Z;zxhaE_YEl(2YQY{MVu1wzuF_~T1WC)^`bXr`#K9O&IC-^y*t8a2~JpL{K z9!KyI;!>U~tgym70_z2zWbpj@U?^P%m-|?coqTmDls`a65D{y9*TaeBlu(Ql8;Tn# zjobrXx>=uamu=LgF?lD-Y&vdNS73xLI%6S7&v=P%d@9|LVqMfERkG3`biJ`&bFfV@ zH5fDfHoT&0d*h;e@Gk3d^EPDGX4Aq3$3>%ArNLW5?sjidxjs{nbKul`wg$~c{%kb= zN3#eq#gS&y7No~zw?DRpsfa5&);(p$nMhF0=1inkxVA;HN7{})mWNO*Q>rZ>m00MH zA7{AK9}n`dymv?MoD@z<&U84MZ;q*hSCh$+3u2DgHzUUDet!Ba9?yswxrZnE#O^O| zAi1ZP7Ck0=MN%iG|9Ysx5a;qVhZS2|eJwz8C)w^`Id5IbzlRVh;(jxl&R&-1tT&tl z&T6ye9WMf?N8iE_!@j+J#(oE`Ly7N%xmiW8cUE^ts8m~~lAGfk!^?)J$QdjZS70KN zEa3faX>#d{zeHjw9-`syY&F6a&ZD9vh5gm8R=gd;uz?7CXx?~_M{o!aPFPqM*s+Cy>VSy(I)Tf|A0@T=X4+nA ztajdGlC6nBifpzs@;NK%wnx9kE|X9OvRo?&9cy&qTg>^d;m2yVTMYL5L3HH;w+3!# zndZN%;TEr`!2|LO_JrW~wR}T|&{Z1PTDhuM)I9iOlZ1clyEX2> zzh7t(r=i(h3sSV%@=JCxw&DDq5#SXG? zJQL&MM6$mOnH)TQL-KYfncO{%ag)2d`|t2>a2;Nj{tLlGjYvAaJC+@*`aE+<3}w*uzlTrx#^2m}Z7oxJUMd0@5!&=}Nn(@3uL2{#uT;c~OQ= zSbH#Nko=DaW(9JJTI~4yHv)(cA=x{dMQuyT7Z(Omqn-XcmQ3E~VLZwF7CNOU+RBj+ z#li_hqA3c4b%qAGBsz1T&~l~;mtk9kt+{`|;EQ>%LJX|a@;gk;rd+p$t49EA9vB!l zT_GslIc~&44rlGRDH1o!f${zt->q$ zEbM7s_4U>8ynYF>vX>*QLZG)$k8Pv{zVeGTCx!$bPJHkP`|EzabnM03N#XH+?*lwE z(U>bZ`v|$;x)EEN_j}I4l2!=?k zL?Cho$@SbPX`haU`{C7TeQEgL{^EClue)IidtWE8|MRDRzpMO?Rw=4jk_JBffBxm~ z`;ix5z=$~SgmV7R*!}ad#3pYcD5Ed;wmPK$^=YraMyI{;LHT-;QqlX*(|_#dOMATZ zsM@szD!E!XSpGd;f8OW$g``zZxR*+QmGIw9DFz3cGMyI>DfBvS|LTgcGst@d;uf18 z+&_-;f2R56%cE<6ru5%koUQ&DHvCL*HdB@FV@+E3|7qH)AMmy7e(YNuT7Q}pDCq^t zTL&(JJ>&2YGFCyA2Ero7O2%#zF1+{3qX~Y01qU&p8z z_>0zT6YiU1b#U!vCyop_ArFS-5X-k>W3 zLaI2N{Lq~R4wwDy^THS5L_$CJM-H^>TbpejKyf6UsW+~4NyJ5YZ8(MfT%sNh&@$Zy zRSFBi7TiVb6_u91XC`Q^?@aZEB5_YzmEp-J8*Ji!9bV3VR4k@-e=y5F&aS!$dHg%S zU4r=XrW93wsTfa?T5#vsg1|I=irG>v{V=sdkthQ+G_)Mac(}C0&ng8*D_Ng3(h@%B zw7JE8|5CDZ`JJ4k4#a}UBHXm%S0nR-OPE6wg3CQuw zjc0bQ`Id8e-Pt*2y!u;s@D8Y_S0-P8ZZvmWG(&69O<{U}{-Lw;r*r{7ovraQy1YpI z*d=jYS6mBZxz70si|rwC4h_}*^p^wX146SThN$^Fy92z0I&0XRVdg^lj{IfyHIza8 zL4W|E+sH$ib^l$?wSA2_HS=io_11QEXMk#re+P3?+veKH6#`6>qyMe!w*=(mZT!Fe|7?AL9>QU&an5m)+8_B6wQl}7UUQ1|KmVAd)?7}n|3W;t_4McbPrUUtJKPDbd1;890Q6y zGxl8U<;M3%g#Ya2iB=P&`k6Z*tChaD$GsB;m#JM3x*Pp? zb%SA7!pZGyDk5X7q*A17mY5*w^g#R+9iXx>dQMG$ciVhq>GHbAJ+px&U|G#_{c3Oe zMOK|aP$0Ud8z!duyoc_NcHCU>-T2?|q55(=dXgM4trtbyB?yUJm@n{ZR%00<>fHry z<$LC52Gbj5{K zd}DuuFZ@_3@QwigBl=3&(uX3gZW`lY-Y8e6(~br4i>FFw1Hp}X#tQQtbDQ$@9dXsu z5v;OIx9@efsoqZ3Yml)41p9{myAPitM&}PuzV~j}3qw`9xTvZ=4t<8s_@q)JL3UfL zUI5%Rvy@)2@#)>Nf<1lQQ|R5&9mNFmScNj5!5-%QmscAVDDBx1j2@)q^=gl`RPk$B zDT-@i=!+tpD(dnAZflw&jZkHRQTp=I(|39!sgLLs#X&J>wPB8zn*wGi6!~XR#pOTu z5N?0llE7V|dInFNWC+N*JtyRNAlS6elnz1o7_|j{zTLBh#fLxPx-?K1g-i5RfJf!&OiQyX!UkEeL|X zWJ6GF9+S%87N%fHiw1dvOSLLVyUl!Pun9a>V%7I3?Q7`MPRS)d5~jLq z?6WT}i<~!*186i^dHKGDt^~EgCrPXpG)=a(*^D$$71>xO9BEwUSU*ccn8l zsVyR8rZoAWS-FZ0E}bQ4j$+0ybdmPT;dnsa zCKa$#)3Q=b$Mboh6z>!tbA>DuD7$`!OoMWNscQB4y~EvmXg*A3rm`v5Adu6?fTOxK z|Eu5eY?r_r?kT*S(!f-HV(_Ts9)A@A#J=>`k%lIwTlu=ZWnio9*U53VaeH6NzlcnN zu-yn>!kTav_U||_dd2LIpd&%J1}>+2{6LZsN!2i09+#I@8tD$Kte`9GP&zC`t>Vxrljb1zMcrbaS5j~soVFa z{PMrBDmsfNc%dUxQr4KPn@B7DWt@nKyu^-k+4+dG`K*KaPS4MM>QbSRQ?q_Du@QWw z*~(HuROOO{hH3B-+NVa9&ND*?ZeeP82OW*>Fi#sNle~k6sX!*Ev+cvOjD7~&5Dlf z`Gj;%lP;B5j_FZ0$@4(JOz$tZxxY24+~kFcEoR8oaZtBAK3=JZnmL>X#h-C~v|L|w zYJU1+5ksx^K}a#l`wQJs`oZ66A0ckn@}p0My#nyFq zSPP?WL9k|)v%q!NW0uJaX2nN{ck8xKCWKX8o%9p!i)DOA3V$5ou-}ThK&q7fYSSWj zU%=xXxfbLLK;!@**?NJ1YpYH$Lwb_g`*U!V5+D>2mzz@NU=vH0(%^lJc3(ens)^88Pa|8WAm zfbMAt{9@BQr26ls%)R&;hmv&gk*`Dd*9UsRdXd#HzJ|(9NXw7^Zi>rGQy$5;-@cl1 zUKrciY5*x1Ed1x|SN#B}y6(qu#9{bnbOLGq0r9`|*#GlXJ8@+2hkssiqs@?*Trt^SO(Y75Bqi5O1HMLExy28kw4USjNx*^IP=74iOS` zXlmMUV26Uo~E+2^auM>*wC-R<7U|h;BNQ!?NtpQ zgrW&arhYDqZv;X`3<|8n=bHkev{y||75k$ZLC^GfiY?}Dx;17xid;@lgh|&}zRWHc zeCbw5;{Y%oM)Ok! z9q!=2_{AMr{+$(h z-QzC7#V6U0W1sg;c%?KqNo`LQGP%N;nmdD0OF)H;q{$nLBkM=^%?nnjR-4}D5l86; zJ!qTM8SY)PPZDRN=RNL)5!y>5OXyE+%3t>#<17|IXyD2C^ljIxxrv; zxZc3Bs;EK87-k?*G+vs;0yjtM%V(Ftxj{wSDe0ERj4#9cSLWN1&Si3+;GNW3_CmT% zrZ;92Z2)H|knd_Z_0=(c0QL>SbFOLBpKYVT<5dqv;oILk5mVOgIC8wCYn@3C>%cLKL z-MFmJ7)WWJ5bndDLJIXoI!OdX^sWN2eWTdu-BrARQc?%HaZGHC7VqO@PPs)dPh|5> zm(Jv0xXB#x?+|uHieK8URg-NDg`WbI2i-dg&DdA5J_G_M;`&DT34!ADr@MMwE@$6= zMQR_1-F_f;kJaeY%Smww?lHT6F3k_o2M%V}^GBIaN|(tRm&0Rk`_kzlpV&_ExbB(G zcU+;+q`e6()UxHuWn2ov0xVbu4uA1qddjrML>`{gFuA+ zIX5C@l)tR{Y@_IQMfZuugf?H6-PYXPqZB3kaTbiF_>k)h8b-sH&-HU?^|Ze|sbN{B~VCL7?DCxb_e29M}}q4gJa&Xkw^RU;QOes=0 zCftgqYjj|ZSrdW#L|np{twyJi{4MYH_3A-x_LPfB{p4>VKT})Q+_P9m<)}7 zId%I=Z^Bnoi@;5}0a;z`sZ_6wb{@?HrPj&y@zael}+QQ}BCx z>gdC<*b2*s!5r?m0_y=0aOGcl9rewwN*&-U541I(9wD(mUg|f4mR#;-AY`ESbJf-;fH7*isHPjpYNqcaUgh0 zv4Ho{<3$~I&XWYoxiFI92VMM^C51Ty=xFA3746jDELR^?=ezjqBJckANl0?yY)%Jf zr?_*Rnf%o70lDmtuG>dzkz?$*eAyBho5ym77nKO>*mf!!SvDfB&XXQ1TIz?(~@Ae3FyM%Egf|gTjO+%CgFf+|ElvNftP5UXRG*g(X||jJKm$o!_Qj@ zd9JKp)Z|<_kzl8(l+AMk^EBS-mMv$o5O$TE#C)0NLIh=H<7vzow7ytK75VV(T2}i3 zMw8JDounU{2cW-VcBdq;ADMA1WBk44AQ%PvEKk5=rjMl_5d@rJiZ7tFSvMuZ+YQ)# z+N`iVNa!cfhmncF=?(ictr_@hGEl%_(_G5Z?`Ymx)x23Rv+DOTWV9ApO^@x953Y?u z@r79dG#N&w=p-cB&}0tdV(aTAj_9;gc8&hz@y0JyKrcO%b~v zry^~-tE8;#QTi~6>X;A6>FG(oEGu3BS_QCXv^BRpSJlmCH1OQisM+GU<-GC=J1A9= zQe0qpB76F(0QGEB>;vi9JUVr$&H(A3u+h02aV)52p3>h%=qU80{NaruQ{hKQkc`EE z->9f{7uY>n)XTLa{9$7YVSA%sMUWg1V^R~|AqA&}8=C8?Duh_gRNCY7^735@6gQz~ zOkLj`jLk(;?2yBJN2m5IaVMxgc$AZPJ?jDNkqz*^K8t2qIc558l!bF1G2fsonlEGK zl`%~wlK|*1FH+$te?G!Zl%p>ixhjfA6$yoX#%<*Tz;W)bhpCd)Ux#Sqd7>rO?BxMx z#S=+~yozW(l$B%86h=yO3;7CgTOobZ(}Y39`pMm^%yA4FlALTLZp;w~|?+f-vLd?b0fO#<1n!jaPC_VSX5YMWQh@K6m-Z^Jr{*0%PJU$CR=^iWSm zLtCx;ISPn<$+zmGs;uv)8>4(+xDUA6O2adS*jCdY7vGn3QG1vwZo&Kt?iQ-Aw+b!h z+WCGoP#!!!bjg(*@uoYZ0`(FX}fMuReG2i}Evi;qU=Y_JEc zj1H{r`_qBMm?O-PoI?mr@=jOmvWjCo(2ZWWxP2q!_mHbntz=Bp^}b?#@x*Yiqy)0hJjfz`!^(RZ~~r_PyYCw|5V za%M=z*cftAAo5FLGvc;oQ*UE`btU&2bS4lav1;g|zfBt+#RD4`?sBx=4YPU%{&J31 ze%23MTwD$XuuB`VdU_rJm=rD-RNKrJ9eckYk_UQb3gl^-7EF?I@!FWGEDBRcU9r>G zCz=~!RcRFx2aeoc3M;tgF0akTb<@rauB+ulh%t&kbf(Mt#)U)c)Sg zy}s4rYGsN?{#>n#rOx3VJUVEYMf8rRD1*2hpSR&72u2iab7H3O5vQR!6PvF@8*PT@ z>2jOk`%#02Q$r_fTo0G>Snx*kRy#_6F(5PNH`4$<@c(x`v$EWd^E` zHCsGBs#xu61#DV5QhP?7D;4LlRl8)LD=aLO@oX0dsD>T?+UpY-L0RYtj4?|ckp7NU ztyBbk__`D-#78tb6ZeN*^Us@Wd0;LS3s z&sE-OR}$BtxaK@dubXCWUt>)v)gT?>7{xb!BZnf)c)O;%(ayjY+64mLx*)?7PqxQZ zhaDhZ_aV|lbU6Hm0_97fsgX<#!n-`S@8T*C-`z|!KY#*z&}VT5^6_kc_5<=fj*1yM ziD}EBlo5kP%j(`I?Lkb5vX%L+1^_l0or=i<1UJL#!Esl( zfKMPp$V0dpA>NIlw8J@9qqi9{r)#vgL5o|CPdh>q2fjf6j>xvoTL0%P0` zB?Etf>~~~;*^QsB_bR?DY@IO8^RNM=3;hpP zwM1F@7|vW~N&I^c_ku4=K_cJN@7bjIEE22b{Dzp!VXw^9_t>|+IR0qbfB2MNizqh$ zo7=)(|9(q<(d{n;EeRl8U%6xiJHP)zvD^Sqy}|qsZR+o@t$8m2I)A)v_&?kDbJGrbKvfeu%fdqPo!;=DomS<57BaJZF5IJ%w#F9m%#VCf;^E zODA>6c}9}|GK&BimKXj8OU|`_D4KMxhK$+KdGF_bt=>2XUt=n$ule3f7 zh}4!IKb2*z)0>EqO4&Mj%a6G-xQ^w9&IZXGuz&5$`7d_n3gWh0AF3drbf*WjoP65B zyCt{vPkNXAN#PAa3iFfwF}#a=3T5j*HNTvnTKI4YD+ejthDQI6P*WgzI!)kA|BT#x z?h<;m(F$?!f_)R}!8pPu4bl-B8XDnL8eDb;{Yr($x>)}OXxAgg8SX)lKY$pZOB9qC zQ>beS<;$5jkf+ynkBT`dkDSiMk1#JPoFSMR5}Y%$(~|0JFn!Orcwo>-Dr%CNTm+PG zCktS)CJN1t4G{9KMZ3LX%nit-1o4^_CZ)e@BLvmtbJt|MoYB0*)FW zmc|?B!p>0@bDFf+13TN;NCYj~?A;gS-Y)uSLQ^zD$yiX8*8GXn7Ar`}yfyV1XM9{F zq2-if$ygbGwo$_L@cAAiJD0KecBviS-TmVc$IpdsCO$|4_hFoH*cobPM6Ir^Wy_rz z5n)Z$i6ZYQ^C^M3k4A%+n_VSp&#jHo#uiU|pBiO~U<5CZU`hfAf|?E=D&5{p9nN?I zmSOPo4c?~Mf^@!dzuved)Ygc{m#P_mAhvNVpymnXY=x{w(%X6Sb!|6n7L;!HF5fBd zDVBJ-9oP5?366I)Z-m8F9iSJ^Rf9Ia_RLlJLB)A=gJ-2bM6~$x7Jz!V!L)amod;+I z7d6+>qNse+F+z#9S2FdQ6Z{DZBX6t-)yGk`-ZF7^xe79jk5;xvSUElMjH=E|=gCLfOJo?|~@ zOyuFRGMVcY1y1S<-j_jJD4V{2jV}6AN)&?W9)t;c28~hTR{$9yY}M+rUa^$>9Tq&G zIzLGh&Kp>WkxBKg>BFdQ^hb6*sFp}`bq^+lP3{pnt+gUXP^!J9dYH6%Z4$q3by=PK zK9UP@L+5HVqi4_ShHaq<-EYLnW@-o(00(@WU;HH+u2*B1pIhTN7lXGVI(md!+-^=H zU;<18I+VU@DaRtz=7V74hbCsyX6{et8WhvgG9E64ojy7j?7o_&GinQqt5{QlJ6ae% zvXfMA%%q(&bY(>S?hn_RWjtMW6{a51yvcR9Y!tn(jZvTH(aMd+9BzK;@5%s*$)Lww za91LdD(H9oMS)trC)G;Wud1 zkZ>J)=45U2#NU?$&w?7cY)Jgve&QWsG5u_VpEc{+pdD;Do=eX|cw6=G1>*&^=f{xe zO=EV`ZVu~Lw^$C~0TT$N zc%ygq^EKIdKZXW7t{8Fyf1Z(S&J?v6cmHgVF;H_MxheGk|JTe?2FT?0df3=M^z;FW zLkhQVnfHrJRt7Y?4oE&Y^t?cR4W~kTeiU0cA5g-%=RKV-Kauz%eulWsysOF(F&Ez&ED+E7wIuZcE3Y7D437JkNjvgnqZ5O z?nPaMJux`|yFCz+e{9#C(h{0(*(nU6a=PiU^p!+JUp+^+Lh#FT-}?mw1Ug8Lz?LY@ zlJC3hx7M2u-Lqp$d=l%Wie|YFZ|LbW1e#a@HY6|f5?;8Jh>x{K*t0O(_G{jVKY4)s zLl%fK;KMZ@Qc-`GA-JL3wUlDDwg4`EZ^@EfAVYJsyC|I{5ADn-7R5@>YuHv|Jeuar z<$Bbe=PozQjzS_6Ui)jZh;Rb{m&77+M&Q@fvC#0G*~%@oBVnmX5M=CSHJ1t#R?tiE zjnV->+ci|?a(U7D`ogzXEQ%bbdMlE+#Gp0C7D4e21wocx6iMf_4_kHkrvgO zio6NNVmo@Icm){%Qh;YE0Z>1)Jv_C%8d*%8pgnYzTrYshqN_qeK!iw+@<5mU zpgdJ}$(ofQs0)w+bK$#uK?>YLq9JGcwynG^mhJfTf$xK0&Ty2rB~fPFp0w>DD3eau z%fw!mTpeH=MD&iZ_|I04B%S-+r%j%I%gZ4GloN+CauJwBHD6*uRy<%jB>b*?vD_}=sV*REM zns9Swwt|(Lu!Qud^}(RyxiGBCRxZ(YmUx6$ZLXQbypqHl^2}^EKyn^UDrDfivpjxs zeXxH*5v9P^jS{mt?C*_Ab5&DtZ|r-S<*;&*#hPJk7q*03K!*$6zKjN zxVO4(;uHDG!v-A1zjpxJZv!5b0#wzsejQ+^Qnt3a3PN`yH2=WTBOx+=>Osv=w@pT zTO?2Foc8#|F~>Be{J)KGy092V3_}7~`jDDFnir)^wC;`5$d*CPwJG<^WQ7Bc9XgYe zyJ1C2;fXpEe%KMj`qPjY)I!s{x%%lC$?oq$h{!f4_CD$?x)o-$D9>{Vm_kbT><5TW~9 zv@RQ-C7|5Uag;A94mTpXdfUfB*vGedghOw|9~g||SXE9A8PJHo+=9;ei)=^~wCr_@ zk-&n~4Gh&zCVJhx^^LV%KrlCoRuR<{GAz6}J}4OFg=aYbhFHj_?~M)Y?>P;`j{W&g=yed!M-gL+H``NX_loORPMflg3DV`$>KCKeZb3O zZ(L32xRNIzhW_Hwe~+A)M-$=P{YP?|0ULV))S}M37UqhQMVMllDDCLCoq67_W8%A?$Na35U zm86jAj^uR;aO0a-(~FlD$KuTjFso8H4tq^ivhX8K#-=Pq8tAdwNwb3RT(u8?(+Irk zg956E>PQPWEy87Bt$vtpI0e)nKAu36BNM>**+b1|hX?L~Tq(+6Qt4vKt!5 z-M>t9-8}-SbnD|s$*C%a8SY+K(tm4w@UrW8vz4yXYv{uR#PsBSb&Eb#DA=(*K_02 znWJ*;L8(*g`$=<%nfqbjMP)L1rM2=z*b`FS>12J}iM|TCIA0jiy=Q}`+)W?|xQqBB*Ry1Z@Inul1 zs@G9*b;*8{=aK*x?)(;l{Q}j5F#z0#(jDCv=wqp008L3S{#oyFRrwt6sg3F*4P-~&TV#H!2%8!K-q64&S>BZWBXzEuIIa=>l5IFQEGg01I{xlyQs zKOL{Z@A)5UJ-akI$(hseNWoh2+D@Ka?&1&s+cKy5VTWa*2?+1w1pzG!i;1q0%csS* z)HOgpPb~F}&F;4t8sjPEDI#s4Rf82C`eR>DkG`T%tchZ-_ zI##Wws#Iwtjecn?Ur|lzPt9h(NfDaFoOQjukDvB@*awvW&=Hak-h)QttE~;mr40(s zg=S-8#fy{u@sK@pw&RG`AB_&?{Gxx2y;mzAk=$VZ3`0xQUVwC1l1)+n{5f0${`d;e z-b0I6rVZ#PQb`@57V7S8V7!Wm%X30a5@jWvF=h*biVSe77vv;-&j$4p6u*=2r_H~5 z^{GRIpGUrr!+WXZg@3qqe>iYN?Twa`Ff8M8C_Ov_zo1l_W%u=#V>#F|__fWpeAz-u znk)qTrukmoI5>TUc*#meFL!hlNUuYG>t`NY*c!?wL@+Mc|CT!Vt?ho=?S9pzZ>qdo zuHUxQqc$ev(USj5lJRDF)~QI1qyxGAu&`;@g`i)XR9p>40|LH)vI8?#u6YZjy%{8N zuFiq+)>!n+-S%r7HfxRR95vu1?IKZu#Ro6VX`RR7CT7>uayR#tBRa9Up7zIeyNy~IAp0)^2i=Sz zqlux8CyFcuAx@zB{F2tqzh9Cq4|0!!++Nh|W6Lk=Ux*yrtEKH4mvkB^h3C-%LXD59 zJ094^hAJp=>)bh z>0jd`Ekq(v;yHBiJMWAh-xbc6LEb;J_yKm+A4>xYz**vadgA6Vk$r6f_ISWjp4Jlw z4b^N6!(N5Wi1Gqb-vL_h3x01+VJ&a#d6Tm@QnIA{=+LA}>M~OueX8j0m-sLDF|qwf zD=)iUXgpw$E8Un|r*X2_fWc;sSAD>iHUu2Sp+=L6v0EkZox{JYmj9)p_ z7qr*Eey)4tb$@nz(aST)W+z|raCFHeGcyirkLRP?tXh5apz;|8nisjM(6lVlp;~ z_4;|NamV;%8H-jS{_9`3Jir}@$7{<`)W24eR#+l}KB6qWQ+jY}EB!tlH&diRMHZ?l zlVx9(y|p>9koueZ3CsuQtt1v{!b=Dj7R(%Z3$0$r!%Og+#^{1kqZm=m;bo-+|oxJpF!` z{u4@ZB2N{1GZ3u}@M;tAIWY+n@QLM6;V*HzOiPzY7F-X+z|N?D6SNq7PbLJ*Y9gQ zZ95MrWiMcV3ccRDCB;vPBF5WL@E$n6QNw>V!{$3cPlWy~y=V0xY0F^A;2Isk=XCLj z-ow}+(hS|tKc;qJHjCsm?5>2$TizX~V`Z*zn&bam>jauH!IG;{95t%mkRqkt=rhus z9HNp6-?c#qY9WHVKF4K8=$l}s7Zo?m=X7}s_;}eq%g?*98&N>mI^k>J#b0c(xHMi? zaM)(*#J-iVkctx&Gkdpd&vSHb2{wS_a!a))&GR0~g{JEZ27*hH_ZXza zTAFlmAzx4{c;*~3-(cWd62*>|RJ@A=7o!HVKZ|*zZ=u<)B{w+9(^-=?f;^bdBH#DP zfpD%2NP`KZG*B|)>!Vf=E=FCqMZv2BFWZ;!Ou_;T!^j14UH@O`rZpa{dN{e5CP{qx zz-_4pa${|zT=>gBHMN$*Kkn@ITq_(7&|;puV}exc;fq{4A!Jm>AUU#*|n#_ZcM}$ap1HWpdzSQg5_(!)*j?PWX)q`}z%b zWIV`@y}q`2yx^CNk*PoKcW5myx2QJ;_cD&%2zkiJflT3uIZ`xHG&hkjemXC-#L2qB z!UMtUqx>9*Z{r%DcvnKZ771)X=^nz*Nd-Vq=+n7DgwIN;=#n8Zaf!C21iOUERZg-D zy^O9VB1~bohlr)>9h~C?`7n;DQ(JI;Vw=ks6at>xDX2p(%q$^u413;LAQ9*jNKV94 zaNo8#r$2ppFBGG&rvB|WYjpV!y;8|RAQ(T7)CbgerX}_#cyGMF-r1hU!f3T^6Q|_q z8uN^^maHsvYlbrfg%wdm#Kd0q1bv&+6t3~H;1nCHmA zlQ^HhW!O>eNA>wOkrvJ5IKlV>5r0MVS*bDISMS+_}CqxI%h-qoS`up_PJlIL+ z@I3T0(-v`1ql|2L3^~-$RF0eicV@T9O@lkz6WsfA!3XQT!li7pY0i=A48J8qMPj%o z7FWR%$|u=o!@W*}HtkPXe9ZzS6Zw3ZL&2{Cs1}N7ehRUCsPWtk0PVvkdJT!$rDI;Y z*2yyN`2GitONcCwgl;&%&N<>1hED?=S-^43Gf3a}KOC8lxCC8)OU}q%n#aYeW8j8d z9E(msbHkv`r^Rkcxp%lwdrNz!=hS?MEQllH{0fw60+TUpY#&?gQXyi3^9KYZQ7zO+~@MCVT^O=#c%jW_@hD}V90r0fUvn? zv>~(QcPk5ITLjP5fMQSd1}sp-`UMwTD7c;}j58ik)0>hggA-@ta{%NVJ$$D6>DZTo zY%gJ!LO~T_qsFiCZq*N#K(4RL*lcP^hn3o|Jjj*77s2m*R(tu^CX1jCwpYNoq1$|~ z2cFA4IhNP!Ey_&pk8GjFD7IM6Zg5?pG}T#II=qt4aeX#Exmf}A7BOc)lJ^}i>zZGMoTntj-NnNp8t>Cb1;yE_N~GScc6GGCWkeG+l^Hvi+?v9n$H}- zyftaK=_&Gn_cbVci@#N*&%DF?F`UxG^BQgwT}hpk6oq0lZ*lqimK7e?o2iRp|lx!!?X ztyN(=K{ZVjXk=F+5CkWG(j@DNa!qA-m)XwvFh`!}Hz9`fqXetVQgxF*oE<(g&2n(C z&Z$L^!!c$7J(A=!uD^uv7D|u8M`lU>8#E$7tD%rT<=<0u+-!QOn~@oz#={VNV`T!e zIZ)V_y4n?5c8w^yot#tm^GIR~s*iupiv8pWW8!sX*6hJgoQ$w5_v}%LzVp|OEgj{W5yaU(`>j<<>DRXL;wT}x>b;r zNcUHjgFIaouHq8+&o!?B-(XvLNssnbuXe6d#@Ed3jpf$bSy=g$5NuWz<5Q5^aHU6V z$A^vCSL&JYx~B%S-ODmr5@gAG3~v)>3!S#A6HubYz^&9|b7@InD@jqM|FV6|jJJ}*p0cu2 zlz5{qfvEigu){xt%K{ogh`Q|76(cFC&4#B) zt5%3;;CuAqNZR)Ikv-a_qIO@z(ZazZQyMgF$6gVb3$DE#b09SPGqz6k9|dQsf(+BZ#y61@j2XHKNAD736yIK5K$+7^Jsr z=#S3n_4Ng!6DY~4IAYJVIdM4yz3M8>ceN#w!4!wWt$AUhx#W&qneyZcAb?f@40y>_ ztEfIEK(xKc;@W48tl-TK$~6{q`BWOFVK9P->3VV)HgIW!mC;P{1Ls;B5m2>0z9q~) z*OHIgNCKE2LrM8rflcg)jog^76joVHK>?tu0Vktv_<5=+x`m*->45_9XlF2Eu7oYkGp;yw zE~@K&6da*P``KjUm^b`VzJy@*C6VJmk{l5Lq{96#nHz(%Vuf11o1?wzRJ=z%?E?&Q zne)!7QxUM7x83tX63At6ILcFW8e%$4AfY z!D;j~$%$$FsHU+?9w$)Y05-jkvFMUq+V%W6y;-5Ukt4sXh<$+Rb_}u(JWD@f7sm=6 z(OBRN4}%9;w+T#T!;E4o5R8)K!)TL&HCVpWl$AgKA`L6oZpDJvWbZd5TeZ5byf{pI z?{D=QO9Mr^$NA@4MI%-`c=;gsQ-Po3#Ky!j0-BOQ5u;(-K7|;u`j-u#v1FeU*pfQf zF%WQH&L75~Ygg(^;bxebq?Rn6@pYttmfi%(IKlM3nZ&Pq+$kkzn1~8qxU0?)Or%ff z(ZsdKvK@?Z{1ALOa!-}&8V{ngzbs@jA8Y<(mQTQI5dAB<^bX@+D|T`n8TP`-x<<{Pk`Th`qm)9!fTBClE+sn)WLz0wYhSq+7(AnDWgg*>M5T6tx7Y1M(*iu`VG(ifO9zS5| zMp_up;bZ-T=JlsKf|i5_u-@faVE)T%a(hAb%D+*ok(LO@EtmO4V+lusG};M|60V6uwR>-AJ51es{hM|dh)*ocD4~xuk5A%+keHa2V9%T`2_dB?iMKZLj~kb z5v9I5O8Kwb<@W3aW;_4Y%Aevk|KpxeQ~+t1pY!#?Z-)P`rRz8G8v#6Lz(Ly|r6d@v zdqgDwNc{hyJ@Wd-wC70AMvoo!pL-8)1eYZ}?pPoF_28{RRMP)zs?43hyVbJ4RxEB$ z62ccge(ht~_uqYj?g;?$9`r}2bbvb|*f4t!ICs!v4Qk>4eP*Dz9AJj~z-<3Jg(wu= z!qRe-ZD?g{>n7Pc?AK*}-+tLkUry;Q{HC=d^F55QBwvf00P$poeQ}Pc&^xaI z6%v)JwPC%{EMqqO{<()%I*>-}0NtcI^xp%H-_xyn0FHNL~y;PKuUTQh4gQ}x@ zxtp6ey&EYsPguHQwYVr=&f3=zL-VlYvYBa6|Gq(P6lCDdd`o(7_Z=XY>3}|Upc`fg zi~M)!5;(n<1pT(;Z~Py3_qSP#QeYY(Kx`lV{ovpJLk)OIE!=UfjrsRdzaI_wE+mi! zwW?)t^f!b1KPH&x&dW&u|C{@#uYQfS|4(&hrq#()?Vkwd(E)a}2-|Tu&jSVDIn5*^CuwQi5u2=cXc zb%f4Wg?AtmP7*X(t#YrSO?>_Q*4ll=ShWaKasSjKdFCreZ@k@~)VD_LWb4L>Q`;x+fugP(PXr0aK{MGhaEnkrSaDwv-@Zk0YdmBFg zy`O273N=9L^{x2nRLv=QfAyc|qkBm+tUxyYd+)!4@UIuJ^5Sg92^doU??v%F(o4+{ zI_xrtf8)bH@elz>%We0ZSwQ<={znI!8CVDNf45djXt4$N!4SdX0OMriTZkxfkX+^~ z(WqCPH?Q6+f4HmvEas_4bMjJqr_lkEho|jMm_ffkpwZE~qpOQF9>31jzemg0RkBdv zcXY6*^>{#LIGkt6Rmq^}kEQ*tb-!F~N;a%n>m!Lf|89o!z=)Y-1Ho+RLSdQ~{2*_b z71*UPKnbU%yFbTyqY7%0#+@FwWShQ|GH7kLSUra&1!QfunU-gQ>f;%2Ah$@6^-4ZZ z_J>{3Uao8wk*u)AF*psJVq+^=>YqUV&J&HRq+8Sdv1mCHsN(>fmY62nW%XWuCySrd z>EVX%Esd&Ip^7WYM`g>CpU5T)m6HagYYTO-H%7an56%Z9E{zNsvRP+eX45gpGDS>d zr7SFdp8S`A+PL*qViDA8iq`5crNWYwxdr7rKPI%RUh8Ae+C4ZJ z8{?Ln%${jROiiMNU^4}rKOOn`<##9tJK1UqjHdgpFG%W`I^12TZ1hIJ?~dox%=+$6 zn-l5xqwd+uyiK%Ca`j~Z?*Iig*L}RBW(K_0{ZTaE*1CcS67M)@H@k<_nB5+@OlIpr zw};jFy&+13?iP0shEEr{wh9BxW&WfJi%704yNBue-`Npc8%S}WS}#4||IC*#98CJe z)rTwQVt?~+8|09q4|%XqhmJO}g8`2r9YJTiT~OR zcXfRoH;-l+7_E|21ae#lRER;b+&?2X!wuDS%8DkQt=gGb(~RbBZrw%0?e6g#QVM>Q z0=hu1%icvDa}_a<5j4d0t;L5W*AmKZ*VC$I-|G}!1&W_+I8Z%fa4ML|&Q zxp4FB$wqd2g8B2;uOM0VI)>D$_YaqyDvI4C5xBZTQiG!@r_*Avt~3?k@0WA3A$q^G z+@5FenlHXN!_epV1q&#U&(m580gHSD1)Cn<$5g436huCa`*oNMac!E9(c9? zF0F8)1Dh$4_%;>pUO22ej={c+MVXK%7?WXJx1j&j;uuKT6HCW(x;%h`w&}%JdxUiH zl1yIeD5x_2(y9+6E+Yn@2 z^z5x4{2^j|#n&SKX~?|e{r#M$S{2XahO0cnO!N!+nAJUH(P#=YGf6=aswX#cy*q#L zU5E7M*$Rc=G~RCUqb&+>&38( z7}Hm#{!Y4PbnNV(At|t!^1~uh;YYE1kE24cYtrTWa-K&*>#}Mm!lC=9@bW9H8Ma3k9u`mP2^ak1a%IC1~E{$(C)*k1<4*?e!wTfZ|}xBIo`gcpKu zV5ih951)@>QTMaSX3aSNCPx8T$8XiWAZxN*+?!CoQ2n~@ ze&hS|RicB3>cdq7V})GbvqS(mO$%J6^Tp1dYIWl<#6!>n-H*^6XFJ!lg9gQEn1L3{ z0!OX~w{~IsgB?2md*jlu823F0IH%Si!u6hzlR6e8YLr>qY4vsc%bn5XYX)ng4|dHT zt&6dlo}j01l;9U?={gplTp>TVG|0J(m13LKE=99eL^Y&+MVl_u#hxxy(M31Mz9hKu zSs;8!#!c1jX|`W%Sk~7WkzsnQ>noP)sZPN~w1qdxM$#tReDcSwuy^nykTtnGbKmfM z{W%^A^@C>yE?W;fS-L{c8wycs2U1m%kyPZ5r@V0-+i26puJ%eLZVXIkqsxZE%DvCe zS4%*$EAh+ZJhqIKa!wIX^n8ou*t2ev&?z>9`a&hsZKJRyg>24xB zu6l4wya(T&xi9uKn=Urj)vqmHN@Z<7CN4yL9c#VrG<=JU;N500TfS*_E0lV77#5tg zQX}`gK{n1OAjnRCR9nh1*TiMCl9url=f0+sc9WJ=&eOGfk2l)y+N|0SfBxV@fJlNYkzVc~ z9y>22S;Ly^eHhNi|3D@rP`dX{d_`jPu_n1m2t2(jhT0vr!Br)6bw;o9W}LB4n0K2B zgoxnfCmu>ZFEV8#A|6|DK)L>a*gqCz2~l~of4hGExOTqD5Cohi8}8uwVOZ0}g}jGA zentP}2geN^w`ScLgF8+ioA>11Chfw7(RbJK{Uv}y&d=ZPOmIDXSog{r!%&b z7uEf*nCc=)#heH|9CXyFxReKTwYN)Fj z&Ilk;_eUyZe}cnzqHchv2-R#lUwt+$ZBUMxN$cpI*U-a@NmHL&zuaPf z%!$A?%;@suOUhc9q|G*cy0``JNB2cs1oMdR69mG}eyz}8VhsV`?a!_55Y75|XB%rf zx+_9ou7_nrvl~8g%>Ht5(rYL%O^AubdOMrReD&}ja2gP*8|KU>ru1 zfcZ+J3Uo7y6zfoQZ5L9mC%K;D*#$XSIebgky9Kj4EVn}>%5(_qBRczpX&uCdkw*4= zG+Muws~(l7=7hSI4Oiu=uOasa+cfS76J1Rd6&NYFOrg$SZZK-@Ds0Urav_Hj&jPUT zRx+OMdBtIL9N3F-ut&Vmg@v<4FxlTbXmjFpvP@fFRALI4v{We)Q=g^RfAB`u0l$wcn=?VQV_#4xa`JS)aXL87 zdE{H|+0y2D&->BfEp1R~J_Mo)PK9pIbQxnhzSS6HJ2q#9e?iULmURxQxYKrr@D0Ln zhn?l8Y-{mcEls;RJgn#Z9~>AXj6d}1oKL%Em*m#v&bwQ!bvwEx>1#L92TP74o6lIs%~aDdNMqAw8J@{EPpdTa* zP!MMb!Jqp_%&&nqq^!bwTHu&0CGD0u2VI9MH@6ci9?VuS^`x5;S+Z2Nv3Wu0fP)}i za};004h4lvBlEvjwBdap$8pc1)aMg`UGxyUHR>18Nx=1W;k$wI4E}X!4;kmWX3OqW zUx~PHZK#sCMyJIY?+J$h_}Q0+(UEVp$mty{M1D+7jE_teHoHPcVsD(W7;DR0yDWTQ zc)59p1Ha&ZzpdyD)O)(JJ^CGlR8W+SDi{JFJ1^;jyrOB6k3w1vA)0)ceOxd1vdH{D zYcpuWtvT-GDqX`or}ZA76gQ9z_ZRrumG6NvnA9FbCa!wkL)L>bdh{s7K#Jl$loGt# z$i`3F|0ID`_bvq63H=K@hVpPI65PRpXf$vtz^(I1n%hw-HNKFx>gC?^j;g9G<3h7i zk?KB&(r}4Oim}^cRhhvzUxVr+#FFMMr4Kv~j#g*2Cg2Im;CDdS3?~NR34rnfPZua{ zN1@2&qU5pO%(pSGet<$<#c|Ly<^}WH!DKj>4p2pkUao1|h7{wvXuxRHWgFU0+XGj4 z*ghFiF^s;%b9NB0cF0% z*0ipB4gRd!_al*F;iuRB=l7Q(L$1y~f&^<^hN~ED%AwZVVQ}r$CvHE~dvYX_cg}H# z;u~6r;_vgho@Z&fuwk!T-;gA^*Se$mU`XPlSn@fv?dul!R)XEnE2-(GAULiDlEFiS ziP6wt=U*PPV<#eE2EGNa(>9)L7k{p+a6xYTV3F0WjO^pD19$v}opDX2 zFEW5*H(hWB-&}1bcHCYkZMtZA#}IKKhWc=Cxh6i*&Tf2m5`SXEb zFwSvHO~NPIJ|hMSGT^=OfylHG;mF4E3Vb0Rb?$}sG~|<{hVxsGMqIK}iH0_fJgAY+ zV{rLY$jkG7Zg!L2kC}g-8*In;2U89^f=k4!=g~15%7v_;JX&Vk?HHX^xj5Nf*UB!;*i^dL@V>Sxm;j&D8 zD!WIgbX@6@o{iHIAoUZfvpn801sP*jqhT5v{^pEv-C;D6?Tj>5!vC3tF@>;{;kIm; z*Ob?5NIKL-^fb5}r=zUz3lJ$!)uvg3enmbR45kLg>2*a!(jSfINE9`{cZV$KzDd1upDy}-WYm^VNaY=`NX*ct*dKr;@?M0S@#}?daq>`Lk!oGHWpMyTJT>)U z>Hf5PfbLd_;mDC2X&7zu%z-kYFNs#AZ$t;^wZ4HHk`;ogEWJ<4QOw4hhPA+fMUeB( z6`qo$x<~r&Z_N{FlY-ol0);mOpNpByVZg$7(tIB93VX*2woR{+leZboL=nGzduKf$ zU`;^*yh>2uRi-oh!qlr4Owqc8ag}lVO0`98m_~K@D!K-yAi#hMolQd#_lSq+bIsvDQy0Y~Wis z4m%jMdTU3zx8s>_Ghu(LzcbH-G2xuxO|X40-TBRW3KA7uf0(M(PGjII+!3A6%|l_@ z>8+mxCzXGrkYQX-*D+O@U1iC)+IJ`&yJ8II{f^UMc#$T8;IMJgj67sM&p+LHB^8S_ z)>UU7skyd!)X54&3Yk?}ST5DT__@LO(^{b(4h(Y~5x;3PLEiXx0t5M~Z>Dz`7Y{I9 zIN0^CAGP@l-Usi090u(>XGl4#L*_NaV$470LJ^W+r|B+y8dN!N+&2;a+IKZhRhjP7 zgo24kR*1ywRkk%$VmlG9jxQ`Ln0)nLe2M+(-?77lqAXpku#$-S?=cHWD5owC%ue7(c_1LBz!7sFypyXD~7s^Xyd=JoP(Nkf&;a_Ioa zN2)H`PuTNmh`@?)OWaCT?i()v-?sK~K$nLmh>E%m&L*a3Lx&6+r>EO3kq-u{0FTp>jx^5>tCc4rOK(?_ILK z_r0~c>#V!(+54QcerNB!_WhlE|JY$JJ=$>@m_gG~uzVH1dPd91(OVO7UWdX-e{~_8 zF*H#*=o&L%qvWYrmW;b&{3e4?yk;AyWsJsTOKVw zZ0@-5_Idl7rn@(@zZDV^4%Y||gQ-c{MIm-RF@Ubsv~KUq44jl}CbxAC)sU!b*4RiN z)W0iJ8B2-%i*ZM!4bSZ0f?3)W!T>_B>hadTlmaXmJd7 z#A?S!)!C>`hv;kL=VToAH3;VCo_UN1-9?}0hYCjueA=-VFTuNz;mb)wDr}AF+|dVS zdb}ac<2DOSfZ1ZFC1JQnp2MM8+UAN*3%hHYGh#9FE(I!@nLKADO15bY@+xs`X%0l) ze`Zhh!Re&s)e)2E8(%Tp+*XEie|_-FBKcFvG-XI)Bh0*G4+rCNxbSF&EfE>d{WN@= zd!o~JzC*Q^7t(VUx9>{E-tfvl!17#&b4SOtZ?uH93BKvhMkfdaJE6zkPd;R)*cM7< z24AD_cUMi}YYs(&?1FOP=hG%a#=O2%pXdxXqm`B7E83xVdr_I6g7CJmqTn*wU zUN#k}EhOXiub{y7tX{)p#a9T#(daH{YeZL)d78@OQERN}aGvJ(FFc6Q*f7AyyC>q~ z{WT8jp4h1!gM#FLebkX#HnP$6ZZtrl)}Atrpyy?;L|#QqVKI0+U#pZ#(rsbcn_C5| zD{9Pqs9-@&C#0bnk{_Sh9kqG1b>CS>S=qL!Kaq#G%sgD{&Dn7fi`M1SS`wjY;8fCyBN*?7g?{}x z%E82|XVJ8gsV9HRK6tG)Cf`SW#t<;Q&mcG`lThU&Q5-87ty*n=sqP{kDK#&Bt#F92 z9UU_gB7C>KEl?)j+HExf@M8=M{8wo%^?Ubw8Dt%N5^c@>kzEQv)-_#fkuu1S?8Vb2 z7gw@hj5D@B;%|R+0Nr%IyNPcy?htbSzS!C%vF8?9uh!*xSTze*RqB$;y5Z0#v-3PX z!m8;emC&h?@}IlT2yUsA^Cr?~R$PKtNh+;n_Jp~oO!3)+ zuqcfD6+^+!=XvPUjZcOxqoS_4O-GPURi}A@5Nd2$t(HY0Io$~;D?A-f@G6yFJejSi zul%&ey?)j#p9u0UcQiEuDYlovzj_e8>kA*7XYrLr);A6q6c);2`eJyI7V}0>v)2b( zyF1+p`g_hK-kLVnl^2skii=ZZC|mdV)p{QOTq*^(((9RcanCJI-k5YjHQnZPy=|a%b>2l9@#o= zXU4YG<}9jBl;Mi24`xf791|tC3xwL7r*!8vbn@*Ubqm4?*Zz!impbuQD=(w6u{`Pz`)hmWTD~a(1LS#E zHSl!kRKi7d(UjuY4%)Zx;JqciYGx|e3u0qDsILOjMW-`5O%lX=J=Qjc786Ez$i0UX z?t1avCM#T)=}rydN1|V@S%a$N#Po9CW;u9kgUA5u z{bl(_>JS|S-Q6|b6=~$I=tP6B+0(SB57sr zc&64O&0>xoJcNi{=4MfUK~+!zKw7cg^)@65264|T`adPuMiCSY7Bs4a+^`o(n`1l- zeu4qVl_Fqur3-G=nm;6Qa2T$FvwjCl-IjLfhP~v9ZLBH8qbb-BeI1cDm1F;d4~Qs1 zfOHO!?)i&W+;aDgtyyDBE+6RZ)b`Eh@=eH>d7nQ_%=ekx&3dDRETc=bN)?p9kwY!@ zs?#Y0veFQnua*O6HVmm?BC@LxQYj(Y;gUSiGpj>VoPgg+%kfeY1E<(&5NvK0PeGei zrKa%$TZvm>qlCnFpG4WiT+%T>{3c`Iq09V67Yn=zp}~QyLfKg3W53{ str: """ with open(_optimizer_settings_path, "r") as file: data = json.load(file) - logging.info("Json loaded!") + logger.info("Json loaded!") try: - embeddings = config.get_embeddings(data) - - logging.info("Embedding successful!") - knowledge_base = config.get_vectorstore(data,embeddings) - logging.info("DB Connection successful!") - - logging.info("knowledge_base successful!") + logger.info("got embeddings!") + knowledge_base = config.get_vectorstore(data,embeddings) + logger.info("knowledge_base connection successful!") user_question = question - logging.info("start looking for prompts") - for d in data["prompts_config"]: - if d["name"]==data["user_settings"]["prompts"]["sys"]: - - rag_prompt=d["prompt"] - - logging.info("rag_prompt:") - logging.info(rag_prompt) - template = """DOCUMENTS: {context} \n"""+rag_prompt+"""\nQuestion: {question} """ - logging.info(template) + logger.info("start looking for prompts") + ctx_prompt=data["client_settings"]["prompts"]["ctx"] + sys_prompt=data["client_settings"]["prompts"]["sys"] + + prompt_by_name= {m["name"]: m for m in data["prompt_configs"]} + rag_prompt= prompt_by_name.get(sys_prompt)["prompt"] + + logger.info("rag_prompt:") + logger.info(rag_prompt) + template = rag_prompt+"""\n# DOCUMENTS :\n {context} \n"""+"""\n # Question: {question} """ + logger.info(template) + logger.info(f"user_question: {user_question}") prompt = PromptTemplate.from_template(template) - logging.info("before retriever") - logging.info(data["user_settings"]["vector_search"]["top_k"]) - retriever = knowledge_base.as_retriever(search_kwargs={"k": data["user_settings"]["vector_search"]["top_k"]}) - logging.info("after retriever") - + logger.info(data["client_settings"]["vector_search"]["top_k"]) + retriever = knowledge_base.as_retriever(search_kwargs={"k": data["client_settings"]["vector_search"]["top_k"]}) + + docs = knowledge_base.similarity_search(user_question, k=data["client_settings"]["vector_search"]["top_k"]) + + for i, d in enumerate(docs, 1): + logger.info("----------------------------------------------------------") + logger.info(f"DOC index:{i}") + logger.info(f"METADATA={d.metadata}") + logger.info("CONTENT:\n"+d.page_content) + logger.info("END CHUNKS FOUND") + - # Initialize the LLM llm = config.get_llm(data) chain = ( @@ -70,13 +79,14 @@ def rag_tool_base(question: str) -> str: | llm | StrOutputParser() ) - logging.info("pre-chain successful!") + answer = chain.invoke(user_question) - except Exception as e: - logging.info(e) - logging.info("Connection failed!") + logger.info(e) + logger.info("Connection failed!") answer="" return f"{answer}" + + diff --git a/src/client/mcp/rag/rag_base_optimizer_config.py b/src/client/mcp/rag/rag_base_optimizer_config.py index 54e208d8..ae9a6cae 100644 --- a/src/client/mcp/rag/rag_base_optimizer_config.py +++ b/src/client/mcp/rag/rag_base_optimizer_config.py @@ -7,11 +7,15 @@ import os from dotenv import load_dotenv import logging -logging.basicConfig(level=logging.INFO) +logger = logging.getLogger(__name__) +logging.basicConfig( + level=logging.INFO, + format="%(name)s - %(levelname)s - %(message)s" +) -logging.info("Successfully imported libraries and modules") +logger.info("Successfully imported libraries and modules") from optimizer_utils import config @@ -32,7 +36,7 @@ def similarity_search(question: str, max_results: int = 5) -> List[str]: List of information related to the question """ - logging.info(f"Results provided for question: {question} with top {max_results}") + logger.info(f"Results provided for question: {question} with top {max_results}") chunks=["first chunk", "second chunk"] return chunks @@ -41,13 +45,13 @@ def similarity_search(question: str, max_results: int = 5) -> List[str]: # Initialize and run the server # Load JSON file file_path = os.path.join(os.getcwd(), "optimizer_settings.json") - logging.info(file_path) + logger.info(file_path) rag.set_optimizer_settings_path(file_path) if len(sys.argv) > 1: question = sys.argv[1] print(question) - logging.info(f"Question: {sys.argv[1]}") - logging.info(f"\n\nAnswer: {rag.rag_tool_base(question)}") + logger.info(f"Question: {sys.argv[1]}") + logger.info(f"\n\nAnswer: {rag.rag_tool_base(question)}") else: - logging.info("No question provided.") \ No newline at end of file + logger.info("No question provided.") \ No newline at end of file diff --git a/src/client/mcp/rag/rag_base_optimizer_config_mcp.py b/src/client/mcp/rag/rag_base_optimizer_config_mcp.py index 48239b04..c16d11f7 100644 --- a/src/client/mcp/rag/rag_base_optimizer_config_mcp.py +++ b/src/client/mcp/rag/rag_base_optimizer_config_mcp.py @@ -13,7 +13,12 @@ from langchain_core.output_parsers import StrOutputParser import json import logging -logging.basicConfig(level=logging.INFO) +logger = logging.getLogger(__name__) + +logging.basicConfig( + level=logging.INFO, + format="%(name)s - %(levelname)s - %(message)s" +) from optimizer_utils import rag @@ -24,8 +29,8 @@ data = {} # Initialize FastMCP server -#mcp = FastMCP("rag", port=9001) #Remote client -mcp = FastMCP("rag") #Local +mcp = FastMCP("rag", port=9090) #Remote client +#mcp = FastMCP("rag") #Local @mcp.tool() @@ -65,5 +70,5 @@ def rag_tool(question: str) -> str: # Set optimizer_settings.json file ABSOLUTE path rag.set_optimizer_settings_path("optimizer_settings.json") - mcp.run(transport='stdio') - #mcp.run(transport='sse') \ No newline at end of file + #mcp.run(transport='stdio') + mcp.run(transport='sse') \ No newline at end of file diff --git a/src/client/spring_ai/templates/obaas.yaml b/src/client/spring_ai/templates/obaas.yaml index 5ad36615..ae25cae4 100644 --- a/src/client/spring_ai/templates/obaas.yaml +++ b/src/client/spring_ai/templates/obaas.yaml @@ -14,7 +14,7 @@ spring: initialize-schema: True index-type: {vector_search[index_type]} openai: - base-url: \"{ll_model[url]}\" + base-url: \"{ll_model[api_base]}\" api-key: \"{ll_model[api_key]}\" chat: options: diff --git a/src/client/spring_ai/templates/start.sh b/src/client/spring_ai/templates/start.sh index 30341432..33ab7339 100644 --- a/src/client/spring_ai/templates/start.sh +++ b/src/client/spring_ai/templates/start.sh @@ -6,14 +6,14 @@ if [[ "{provider}" == "ollama" ]]; then export OPENAI_CHAT_MODEL="" unset OPENAI_EMBEDDING_MODEL unset OPENAI_URL - export OLLAMA_BASE_URL="{ll_model[url]}" + export OLLAMA_BASE_URL="{ll_model[api_base]}" export OLLAMA_CHAT_MODEL="{ll_model[model]}" export OLLAMA_EMBEDDING_MODEL="{vector_search[model]}" else PREFIX="OP"; UNSET_PREFIX="OL" export OPENAI_CHAT_MODEL="{ll_model[model]}" export OPENAI_EMBEDDING_MODEL="{vector_search[model]}" - export OPENAI_URL="{ll_model[url]}" + export OPENAI_URL="{ll_model[api_base]}" export OLLAMA_CHAT_MODEL="" unset OLLAMA_EMBEDDING_MODEL fi diff --git a/src/client/utils/api_call.py b/src/client/utils/api_call.py index c6a995b9..9255b549 100644 --- a/src/client/utils/api_call.py +++ b/src/client/utils/api_call.py @@ -11,7 +11,7 @@ import streamlit as st from streamlit import session_state as state -import common.logging_config as logging_config +from common import logging_config logger = logging_config.logging.getLogger("client.utils.api_call") @@ -37,7 +37,7 @@ def sanitize_sensitive_data(data): else sanitize_sensitive_data(v) for k, v in data.items() } - elif isinstance(data, list): + if isinstance(data, list): return [sanitize_sensitive_data(i) for i in data] return data diff --git a/src/client/utils/client.py b/src/client/utils/client.py index 68947e6b..e2b15934 100644 --- a/src/client/utils/client.py +++ b/src/client/utils/client.py @@ -9,7 +9,7 @@ from langchain_core.messages import ChatMessage from common.schema import ChatRequest -import common.logging_config as logging_config +from common import logging_config logger = logging_config.logging.getLogger("client.utils.client") diff --git a/src/client/utils/st_common.py b/src/client/utils/st_common.py index e17fbb68..218e313d 100644 --- a/src/client/utils/st_common.py +++ b/src/client/utils/st_common.py @@ -11,11 +11,9 @@ import streamlit as st from streamlit import session_state as state -import client.utils.api_call as api_call - -import common.help_text as help_text -import common.logging_config as logging_config -from common.schema import PromptPromptType, PromptNameType, SelectAISettings, ClientIdType +from client.utils import api_call +from common import logging_config, help_text +from common.schema import PromptPromptType, PromptNameType, SelectAISettings # Import the MCP initialization function try: @@ -50,7 +48,7 @@ def enabled_models_lookup(model_type: str) -> dict[str, dict[str, Any]]: """Create a lookup of enabled `type` models""" all_models = state_configs_lookup("model_configs", "id") enabled_models = { - id: config + f"{config.get('provider')}/{id}": config for id, config in all_models.items() if config.get("type") == model_type and config.get("enabled") is True } @@ -84,7 +82,7 @@ def local_file_payload(uploaded_files: Union[BytesIO, list[BytesIO]]) -> list: def switch_prompt(prompt_type: PromptPromptType, prompt_name: PromptNameType) -> None: """Auto Switch Prompts when not set to Custom""" current_prompt = state.client_settings["prompts"][prompt_type] - if current_prompt != "Custom" and current_prompt != prompt_name: + if current_prompt not in ("Custom", prompt_name): state.client_settings["prompts"][prompt_type] = prompt_name st.info(f"Prompt Engineering - {prompt_name} Prompt has been set.", icon="ℹ️") @@ -144,7 +142,7 @@ def history_sidebar() -> None: key="selected_ll_model_chat_history", on_change=update_client_settings("ll_model"), ) - if button_col.button("Clear", disabled=not chat_history_enable, use_container_width=True): + if button_col.button("Clear", disabled=not chat_history_enable, width="stretch"): # Clean out history try: api_call.patch(endpoint="v1/chat/history") @@ -191,9 +189,11 @@ def ll_sidebar() -> None: # Instead of creating a new event loop, we'll set a flag to indicate # that the MCP engine needs to be reinitialized state.mcp_needs_reinit = selected_model - logger.info(f"MCP engine marked for reinitialization with model: {selected_model}") - except Exception as e: - logger.error(f"Failed to mark MCP engine for reinitialization with model {selected_model}: {e}") + logger.info("MCP engine marked for reinitialization with model: %s", selected_model) + except Exception as ex: + logger.error( + "Failed to mark MCP engine for reinitialization with model %s: %s", selected_model, str(ex) + ) # Temperature temperature = ll_models_enabled[selected_model]["temperature"] @@ -229,8 +229,8 @@ def ll_sidebar() -> None: on_change=update_client_settings("ll_model"), ) - # Top P if not state.client_settings["selectai"]["enabled"]: + # Top P st.sidebar.slider( "Top P (Default: 1.0):", help=help_text.help_dict["top_p"], @@ -242,28 +242,29 @@ def ll_sidebar() -> None: ) # Frequency Penalty - frequency_penalty = ll_models_enabled[selected_model]["frequency_penalty"] - user_frequency_penalty = state.client_settings["ll_model"]["frequency_penalty"] - st.sidebar.slider( - f"Frequency penalty (Default: {frequency_penalty}):", - help=help_text.help_dict["frequency_penalty"], - value=user_frequency_penalty if user_frequency_penalty is not None else frequency_penalty, - min_value=-2.0, - max_value=2.0, - key="selected_ll_model_frequency_penalty", - on_change=update_client_settings("ll_model"), - ) + if "xai" not in state.client_settings["ll_model"]["model"]: + frequency_penalty = ll_models_enabled[selected_model]["frequency_penalty"] + user_frequency_penalty = state.client_settings["ll_model"]["frequency_penalty"] + st.sidebar.slider( + f"Frequency penalty (Default: {frequency_penalty}):", + help=help_text.help_dict["frequency_penalty"], + value=user_frequency_penalty if user_frequency_penalty is not None else frequency_penalty, + min_value=-2.0, + max_value=2.0, + key="selected_ll_model_frequency_penalty", + on_change=update_client_settings("ll_model"), + ) - # Presence Penalty - st.sidebar.slider( - "Presence penalty (Default: 0.0):", - help=help_text.help_dict["presence_penalty"], - value=state.client_settings["ll_model"]["presence_penalty"], - min_value=-2.0, - max_value=2.0, - key="selected_ll_model_presence_penalty", - on_change=update_client_settings("ll_model"), - ) + # Presence Penalty + st.sidebar.slider( + "Presence penalty (Default: 0.0):", + help=help_text.help_dict["presence_penalty"], + value=state.client_settings["ll_model"]["presence_penalty"], + min_value=-2.0, + max_value=2.0, + key="selected_ll_model_presence_penalty", + on_change=update_client_settings("ll_model"), + ) ##################################################### diff --git a/src/common/functions.py b/src/common/functions.py index 9798f576..b24515fa 100644 --- a/src/common/functions.py +++ b/src/common/functions.py @@ -10,7 +10,7 @@ import requests -import common.logging_config as logging_config +from common import logging_config logger = logging_config.logging.getLogger("common.functions") diff --git a/src/common/schema.py b/src/common/schema.py index 52719181..164dc258 100644 --- a/src/common/schema.py +++ b/src/common/schema.py @@ -2,16 +2,15 @@ Copyright (c) 2024, 2025, Oracle and/or its affiliates. Licensed under the Universal Permissive License v1.0 as shown at http://oss.oracle.com/licenses/upl. """ -# spell-checker:ignore ollama hnsw mult ocid testset selectai explainsql showsql vector_search aioptimizer genai -# spell-checker:ignore deepseek groq huggingface mistralai ocigenai vertexai +# spell-checker:ignore hnsw ocid aioptimizer explainsql genai mult ollama selectai showsql import time -from typing import Optional, Literal, Union, get_args, Any +from typing import Optional, Literal, get_args, Any from pydantic import BaseModel, Field, PrivateAttr, model_validator from langchain_core.messages import ChatMessage import oracledb -import common.help_text as help_text +from common import help_text ##################################################### # Literals @@ -20,27 +19,86 @@ IndexTypes = Literal["HNSW", "IVF"] # Model Providers +# spell-checker:disable ModelProviders = Literal[ - "oci", + "ai21", + "aiml", + "aiohttp_openai", "anthropic", + "azure", "azure_ai", - "azure_openai", + "base_llm", + "base.py", + "baseten", "bedrock", - "bedrock_converse", + "bytez", + "cerebras", + "clarifai", + "cloudflare", + "codestral", "cohere", + "cometapi", + "dashscope", + "databricks", + "datarobot", + "deepgram", + "deepinfra", "deepseek", - "google_anthropic_vertex", - "google_genai", - "google_vertexai", + "elevenlabs", + "empower", + "featherless_ai", + "fireworks_ai", + "friendliai", + "galadriel", + "gemini", + "github", + "github_copilot", + "gradient_ai", "groq", + "hosted_vllm", "huggingface", - "mistralai", + "hyperbolic", + "infinity", + "jina_ai", + "lambda_ai", + "litellm_proxy", + "llamafile", + "lm_studio", + "meta_llama", + "mistral", + "moonshot", + "morph", + "nebius", + "nlp_cloud", + "novita", + "nscale", + "nvidia_nim", + "oci", "ollama", + "oobabooga", "openai", - "openai_compatible", + "openrouter", "perplexity", + "petals", + "pg_vector", + "predibase", + "recraft", + "replicate", + "sagemaker", + "sambanova", + "snowflake", + "together_ai", + "topaz", + "triton", + "v0", + "vercel_ai_gateway,vertex_ai", + "vllm", + "voyage", + "watsonx", "xai", + "xinference", ] +# spell-checker:enable ##################################################### @@ -151,7 +209,9 @@ class LanguageModelParameters(BaseModel): context_length: Optional[int] = Field(default=None, description="The context window for Language Model.") frequency_penalty: Optional[float] = Field(description=help_text.help_dict["frequency_penalty"], default=0.00) - max_completion_tokens: Optional[int] = Field(description=help_text.help_dict["max_completion_tokens"], default=256) + max_completion_tokens: Optional[int] = Field( + description=help_text.help_dict["max_completion_tokens"], default=4096 + ) presence_penalty: Optional[float] = Field(description=help_text.help_dict["presence_penalty"], default=0.00) temperature: Optional[float] = Field(description=help_text.help_dict["temperature"], default=1.00) top_p: Optional[float] = Field(description=help_text.help_dict["top_p"], default=1.00) @@ -168,7 +228,7 @@ class ModelAccess(BaseModel): """Patch'able Model Parameters""" enabled: Optional[bool] = Field(default=False, description="Model is available for use.") - url: Optional[str] = Field(default=None, description="URL to Model API.") + api_base: Optional[str] = Field(default=None, description="Model API Base URL.") api_key: Optional[str] = Field(default=None, description="Model API Key.", json_schema_extra={"sensitive": True}) @@ -190,7 +250,6 @@ class Model(ModelAccess, LanguageModelParameters, EmbeddingModelParameters): ) type: Literal["ll", "embed", "re-rank"] = Field(..., description="Type of Model.") provider: str = Field(..., min_length=1, description="Model Provider.", examples=["openai", "anthropic", "ollama"]) - openai_compat: bool = Field(default=True, description="Is the API OpenAI compatible?") @model_validator(mode="after") def check_provider(self): @@ -400,66 +459,18 @@ def recursive_dump_excluding_marked(cls, obj: Any, incl_sensitive: bool, incl_re return output - elif isinstance(obj, list): + if isinstance(obj, list): return [cls.recursive_dump_excluding_marked(item, incl_sensitive, incl_readonly) for item in obj] - elif isinstance(obj, dict): + if isinstance(obj, dict): return {k: cls.recursive_dump_excluding_marked(v, incl_sensitive, incl_readonly) for k, v in obj.items()} - else: - return obj + return obj ##################################################### # Completions ##################################################### -class ChatLogprobs(BaseModel): - """Log probability information for the choice.""" - - content: Optional[dict[str, Union[str, int, dict]]] = Field( - default=None, description="A list of message content tokens with log probability information." - ) - refusal: Optional[dict[str, Union[str, int, dict]]] = Field( - default=None, description="A list of message refusal tokens with log probability information." - ) - - -class ChatChoices(BaseModel): - """A list of chat completion choices.""" - - index: int = Field(description="The index of the choice in the list of choices.") - message: ChatMessage = Field(description="A chat completion message generated by the model.") - finish_reason: Literal["stop", "length", "content_filter", "tool_calls"] = Field( - description=( - "The reason the model stopped generating tokens. " - "This will be stop if the model hit a natural stop point or a provided stop sequence, " - "length if the maximum number of tokens specified in the request was reached, " - "content_filter if content was omitted due to a flag from our content filters, " - "tool_calls if the model called a tool." - ) - ) - logprobs: Optional[ChatLogprobs] = Field(default=None, description="Log probability information for the choice.") - - -class ChatUsage(BaseModel): - """Usage statistics for the completion request.""" - - prompt_tokens: int = Field(description="Number of tokens in the prompt.") - completion_tokens: int = Field(description="Number of tokens in the generated completion.") - total_tokens: int = Field(description="Total number of tokens used in the request (prompt + completion).") - - -class ChatResponse(BaseModel): - """Represents a chat completion response returned by model, based on the provided input.""" - - id: str = Field(description="A unique identifier for the chat completion.") - choices: list[ChatChoices] = Field(description="A list of chat completion choices.") - created: int = Field(description="The Unix timestamp (in seconds) of when the chat completion was created.") - model: str = Field(description="The model used for the chat completion.") - object: str = Field(default="chat.completion", description="The model used for the chat completion.") - usage: Optional[ChatUsage] = Field(default=None, description="Usage statistics for the completion request.") - - class ChatRequest(LanguageModelParameters): """ Request Body (inherits LanguageModelParameters) @@ -474,7 +485,7 @@ class ChatRequest(LanguageModelParameters): "json_schema_extra": { "examples": [ { - "model": "gpt-4o-mini", + "model": "openai/gpt-4o-mini", "messages": [{"role": "user", "content": "Hello, how are you?"}], "response_format": {"type": "text"}, "temperature": 1, @@ -530,6 +541,7 @@ class EvaluationReport(Evaluation): DatabaseNameType = Database.__annotations__["name"] VectorStoreTableType = DatabaseVectorStorage.__annotations__["vector_store"] ModelIdType = Model.__annotations__["id"] +ModelProviderType = Model.__annotations__["provider"] ModelTypeType = Model.__annotations__["type"] ModelEnabledType = ModelAccess.__annotations__["enabled"] OCIProfileType = OracleCloudSettings.__annotations__["auth_profile"] diff --git a/src/launch_client.py b/src/launch_client.py index 76630e7f..60af0451 100644 --- a/src/launch_client.py +++ b/src/launch_client.py @@ -17,12 +17,12 @@ from common.schema import ClientIdType from common._version import __version__ -import common.logging_config as logging_config +from common import logging_config logger = logging_config.logging.getLogger("launch_client") # Import launch_server if it exists -REMOTE_SERVER = False +LAUNCH_SERVER_EXISTS = True try: from launch_server import start_server, get_api_key @@ -30,7 +30,8 @@ logger.debug("Imported API Server.") except ImportError as ex: logger.debug("API Server not present: %s", ex) - REMOTE_SERVER = True + os.environ.pop("API_SERVER_CONTROL", None) + LAUNCH_SERVER_EXISTS = False ############################################################################# @@ -40,9 +41,11 @@ def init_server_state() -> None: """initialize Streamlit State server""" if "server" not in state: logger.info("Initializing state.server") + api_server_control: bool = os.getenv("API_SERVER_CONTROL") is not None state.server = {"url": os.getenv("API_SERVER_URL", "http://localhost")} state.server["port"] = int(os.getenv("API_SERVER_PORT", "8000")) state.server["key"] = os.getenv("API_SERVER_KEY") + state.server["control"] = api_server_control logger.debug("Server State: %s", state.server) @@ -70,8 +73,8 @@ def main() -> None: layout="wide", initial_sidebar_state="expanded", menu_items={ - "Get Help": "https://oracle-samples.github.io/ai-optimizer/", - "Report a bug": "https://github.com/oracle-samples/ai-optimizer/issues/new", + "Get Help": "https://oracle.github.io/ai-optimizer/", + "Report a bug": "https://github.com/oracle/ai-optimizer/issues/new", "About": f"v{__version__}", }, ) @@ -154,7 +157,7 @@ def main() -> None: if __name__ == "__main__": # Start Server if not running init_server_state() - if not REMOTE_SERVER: + if LAUNCH_SERVER_EXISTS: try: logger.debug("Server PID: %i", state.server["pid"]) except KeyError: diff --git a/src/launch_server.py b/src/launch_server.py index a1c55da0..3bd28ba3 100644 --- a/src/launch_server.py +++ b/src/launch_server.py @@ -14,10 +14,6 @@ import os os.environ["LITELLM_LOCAL_MODEL_COST_MAP"] = "True" -os.environ["LITELLM_DISABLE_SPEND_LOGS"] = "True" -os.environ["LITELLM_DISABLE_SPEND_UPDATES"] = "True" -os.environ["LITELLM_DISABLE_END_USER_COST_TRACKING"] = "True" -os.environ["LITELLM_DROP_PARAMS"] = "True" os.environ["GSK_DISABLE_SENTRY"] = "true" os.environ["GSK_DISABLE_ANALYTICS"] = "true" os.environ["USER_AGENT"] = "ai-optimizer" @@ -45,10 +41,10 @@ from fastmcp.server.auth import StaticTokenVerifier # Configuration -import server.bootstrap.configfile as configfile # pylint: disable=ungrouped-imports +from server.bootstrap import configfile # pylint: disable=ungrouped-imports # Logging -import common.logging_config as logging_config +from common import logging_config from common._version import __version__ logger = logging_config.logging.getLogger("launch_server") diff --git a/src/server/agents/tools/oraclevs_retriever.py b/src/server/agents/tools/oraclevs_retriever.py index 0dcd2740..aa130add 100644 --- a/src/server/agents/tools/oraclevs_retriever.py +++ b/src/server/agents/tools/oraclevs_retriever.py @@ -16,7 +16,7 @@ from langchain_community.vectorstores.oraclevs import OracleVS from langgraph.prebuilt import InjectedState -import common.logging_config as logging_config +from common import logging_config logger = logging_config.logging.getLogger("server.tools.oraclevs_retriever") diff --git a/src/server/agents/tools/selectai.py b/src/server/agents/tools/selectai.py index e28ef797..1b24a943 100644 --- a/src/server/agents/tools/selectai.py +++ b/src/server/agents/tools/selectai.py @@ -10,8 +10,9 @@ from langchain_core.tools import BaseTool, tool from langchain_core.runnables import RunnableConfig -import common.logging_config as logging_config -from server.api.core.databases import execute_sql +from server.api.utils.databases import execute_sql + +from common import logging_config logger = logging_config.logging.getLogger("server.tools.selectai_executor") diff --git a/src/server/api/core/bootstrap.py b/src/server/api/core/bootstrap.py index e034d6f7..5db865e0 100644 --- a/src/server/api/core/bootstrap.py +++ b/src/server/api/core/bootstrap.py @@ -3,8 +3,9 @@ Licensed under the Universal Permissive License v1.0 as shown at http://oss.oracle.com/licenses/upl. """ # spell-checker:ignore genai + from server.bootstrap import databases, models, oci, prompts, settings, mcp -import common.logging_config as logging_config +from common import logging_config logger = logging_config.logging.getLogger("api.core.bootstrap") diff --git a/src/server/api/core/databases.py b/src/server/api/core/databases.py index d8fde138..4c7feb98 100644 --- a/src/server/api/core/databases.py +++ b/src/server/api/core/databases.py @@ -5,203 +5,49 @@ # spell-checker:ignore clob genai nclob privs selectai from typing import Optional, Union -import json - -import oracledb - from server.api.core import bootstrap -from common.schema import Database, DatabaseAuth, DatabaseNameType, DatabaseVectorStorage, SelectAIProfileType -import common.logging_config as logging_config +from common.schema import Database, DatabaseNameType +from common import logging_config logger = logging_config.logging.getLogger("api.core.database") -##################################################### -# Exceptions -##################################################### -class DbException(Exception): - """Custom Database Exceptions to be passed to HTTPException""" - - def __init__(self, status_code: int, detail: str): - self.status_code = status_code - self.detail = detail - super().__init__(detail) - - ##################################################### # Functions ##################################################### -def connect(config: Database) -> oracledb.Connection: - """Establish a connection to an Oracle Database""" - logger.info("Connecting to Database: %s", config.dsn) - include_fields = set(DatabaseAuth.model_fields.keys()) - db_config = config.model_dump(include=include_fields) - logger.debug("Database Config: %s", db_config) - # If a wallet password is provided but no wallet location is set - # default the wallet location to the config directory - if db_config.get("wallet_password") and not db_config.get("wallet_location"): - db_config["wallet_location"] = db_config["config_dir"] - # Check if connection settings are configured - if any(not db_config[key] for key in ("user", "password", "dsn")): - raise DbException(status_code=400, detail="missing connection details") - - # Attempt to Connect - try: - logger.debug("Attempting Database Connection...") - conn = oracledb.connect(**db_config) - except oracledb.DatabaseError as ex: - if "ORA-01017" in str(ex): - raise DbException(status_code=401, detail="invalid credentials") from ex - if "DPY-6005" in str(ex): - raise DbException(status_code=503, detail="unable to connect") from ex - else: - raise DbException(status_code=500, detail=str(ex)) from ex - logger.debug("Connected to Databases: %s", config.dsn) - return conn - - -def disconnect(conn: oracledb.Connection) -> None: - """Disconnect from an Oracle Database""" - logger.debug("Disconnecting Databases Connection: %s", conn) - return conn.close() - - -def execute_sql(conn: oracledb.Connection, run_sql: str, binds: dict = None) -> list: - """Execute SQL against Oracle Database""" - logger.debug("SQL: %s with binds %s", run_sql, binds) - try: - # Use context manager to ensure the cursor is closed properly - with conn.cursor() as cursor: - rows = None - cursor.callproc("dbms_output.enable") - status_var = cursor.var(int) - text_var = cursor.var(str) - cursor.execute(run_sql, binds) - if cursor.description: # Check if the query returns rows - rows = cursor.fetchall() - lob_columns = [ - idx - for idx, fetch_info in enumerate(cursor.description) - if fetch_info.type_code in (oracledb.DB_TYPE_CLOB, oracledb.DB_TYPE_BLOB, oracledb.DB_TYPE_NCLOB) - ] - if lob_columns: - # Convert rows to list of dictionaries with LOB handling - rows = [ - { - cursor.description[idx].name: (value.read() if idx in lob_columns else value) - for idx, value in enumerate(row) - } - for row in rows - ] - else: - cursor.callproc("dbms_output.get_line", (text_var, status_var)) - if status_var.getvalue() == 0: - logger.info("Returning DBMS_OUTPUT.") - rows = text_var.getvalue() - return rows - except oracledb.DatabaseError as ex: - if ex.args: - error_obj = ex.args[0] - if hasattr(error_obj, "code") and error_obj.code == 955: - logger.info("Table exists") - if hasattr(error_obj, "code") and error_obj.code == 942: - logger.info("Table does not exist") - else: - logger.exception("Database error: %s", ex) - logger.info("Failed SQL: %s", run_sql) - raise - else: - logger.exception("Database error: %s", ex) - raise - - except oracledb.InterfaceError as ex: - logger.exception("Interface error: %s", ex) - raise - +def get_database(name: Optional[DatabaseNameType] = None) -> Union[list[Database], None]: + """ + Return all Database objects if `name` is not provided, + or the single Database if `name` is provided. + If a `name` is provided and not found, raise exception + """ + database_objects = bootstrap.DATABASE_OBJECTS -def get_vs(conn: oracledb.Connection) -> DatabaseVectorStorage: - """Retrieve Vector Storage Tables""" - logger.info("Looking for Vector Storage Tables") - vector_stores = [] - sql = """SELECT ut.table_name, - REPLACE(utc.comments, 'GENAI: ', '') AS comments - FROM all_tab_comments utc, all_tables ut - WHERE utc.table_name = ut.table_name - AND utc.comments LIKE 'GENAI:%'""" - results = execute_sql(conn, sql) - for table_name, comments in results: - comments_dict = json.loads(comments) - vector_stores.append(DatabaseVectorStorage(vector_store=table_name, **comments_dict)) - logger.debug("Found Vector Stores: %s", vector_stores) + logger.debug("%i databases are defined", len(database_objects)) + database_filtered = [db for db in database_objects if (name is None or db.name == name)] + logger.debug("%i databases after filtering", len(database_filtered)) - return vector_stores + if name and not database_filtered: + raise ValueError(f"{name} not found") + return database_filtered -def selectai_enabled(conn: oracledb.Connection) -> bool: - """Determine if SelectAI can be used""" - logger.debug("Checking %s for SelectAI", conn) - is_enabled = False - sql = """ - SELECT COUNT(*) - FROM ALL_TAB_PRIVS - WHERE TYPE = 'PACKAGE' - AND PRIVILEGE = 'EXECUTE' - AND GRANTEE = USER - AND TABLE_NAME IN ('DBMS_CLOUD','DBMS_CLOUD_AI','DBMS_CLOUD_PIPELINE') - """ - result = execute_sql(conn, sql) - if result[0][0] == 3: - is_enabled = True - logger.debug("SelectAI enabled (results: %s): %s", result[0][0], is_enabled) - return is_enabled +def create_database(database: Database) -> Database: + """Create a new Model definition""" + database_objects = bootstrap.DATABASE_OBJECTS + _ = get_database(name=database.name) -def get_selectai_profiles(conn: oracledb.Connection) -> SelectAIProfileType: - """Retrieve SelectAI Profiles""" - logger.info("Looking for SelectAI Profiles") - selectai_profiles = [] - sql = """ - SELECT profile_name - FROM USER_CLOUD_AI_PROFILES - """ - results = execute_sql(conn, sql) - if results: - selectai_profiles = [row[0] for row in results] - logger.debug("Found SelectAI Profiles: %s", selectai_profiles) + if any(not getattr(database_objects, key) for key in ("user", "password", "dsn")): + raise ValueError("'user', 'password', and 'dsn' are required") - return selectai_profiles + database_objects.append(database) + return get_database(name=database.name) -def get_databases( - name: Optional[DatabaseNameType] = None, validate: bool = True -) -> Union[list[Database], Database, None]: - """ - Return all Database objects if `name` is not provided, - or the single Database if `name` is provided and successfully connected. - If a `name` is provided and not found, raise exception - """ +def delete_database(name: DatabaseNameType) -> None: + """Remove database from database objects""" database_objects = bootstrap.DATABASE_OBJECTS - - for db in database_objects: - if name and db.name != name: - continue - if validate: - try: - db_conn = connect(db) - db.vector_stores = get_vs(db_conn) - db.selectai = selectai_enabled(db_conn) - if db.selectai: - db.selectai_profiles = get_selectai_profiles(db_conn) - except DbException as ex: - logger.debug("Skipping Database %s - exception: %s", db.name, str(ex)) - db.connected = False - if name: - return db # Return the matched, connected DB immediately - - if name: - # If we got here with a `name` then we didn't find it - raise ValueError(f"{name} not found") - - return database_objects + bootstrap.DATABASE_OBJECTS = [d for d in database_objects if d.name != name] diff --git a/src/server/api/core/models.py b/src/server/api/core/models.py index 5f289c56..a8114224 100644 --- a/src/server/api/core/models.py +++ b/src/server/api/core/models.py @@ -7,9 +7,9 @@ from server.api.core import bootstrap -from common.schema import Model, ModelIdType, ModelTypeType +from common.schema import Model, ModelIdType, ModelProviderType, ModelTypeType from common.functions import is_url_accessible -import common.logging_config as logging_config +from common import logging_config logger = logging_config.logging.getLogger("api.core.models") @@ -37,6 +37,7 @@ class UnknownModelError(ValueError): # Functions ##################################################### def get_model( + model_provider: Optional[ModelProviderType] = None, model_id: Optional[ModelIdType] = None, model_type: Optional[ModelTypeType] = None, include_disabled: bool = True, @@ -51,6 +52,7 @@ def get_model( for model in model_objects if (model_id is None or model.id == model_id) and (model_type is None or model.type == model_type) + and (model_provider is None or model.provider == model_provider) and (include_disabled or model.enabled) ] logger.debug("%i models after filtering", len(model_filtered)) @@ -70,24 +72,20 @@ def create_model(model: Model, check_url: bool = True) -> Model: """Create a new Model definition""" model_objects = bootstrap.MODEL_OBJECTS - if any(d.id == model.id for d in model_objects): - raise ExistsModelError(f"Model: {model.id} already exists.") + try: + _ = get_model(model_id=model.id, model_provider=model.provider, model_type=model.type) + raise ExistsModelError(f"Model: {model.provider}/{model.id} already exists.") + except UnknownModelError: + pass - if not model.openai_compat: - openai_compat = next( - (model_config.openai_compat for model_config in model_objects if model_config.provider == model.provider), - False, - ) - model.openai_compat = openai_compat - if check_url and model.url and not is_url_accessible(model.url)[0]: + if check_url and model.api_base and not is_url_accessible(model.api_base)[0]: model.enabled = False model_objects.append(model) + return get_model(model_id=model.id, model_provider=model.provider, model_type=model.type) - return get_model(model_id=model.id, model_type=model.type) - -def delete_model(model_id: ModelIdType) -> None: +def delete_model(model_provider: ModelProviderType, model_id: ModelIdType) -> None: """Remove model from model objects""" model_objects = bootstrap.MODEL_OBJECTS - bootstrap.MODEL_OBJECTS = [model for model in model_objects if model.id != model_id] + bootstrap.MODEL_OBJECTS = [m for m in model_objects if (m.id, m.provider) != (model_id, model_provider)] diff --git a/src/server/api/core/oci.py b/src/server/api/core/oci.py index c88def61..86b55542 100644 --- a/src/server/api/core/oci.py +++ b/src/server/api/core/oci.py @@ -8,23 +8,11 @@ from server.api.core import bootstrap, settings from common.schema import OracleCloudSettings, ClientIdType, OCIProfileType -import common.logging_config as logging_config +from common import logging_config logger = logging_config.logging.getLogger("api.core.oci") -##################################################### -# Exceptions -##################################################### -class OciException(Exception): - """Custom OCI Exceptions to be passed to HTTPException""" - - def __init__(self, status_code: int, detail: str): - self.status_code = status_code - self.detail = detail - super().__init__(detail) - - ##################################################### # Functions ##################################################### @@ -43,7 +31,6 @@ def get_oci( raise ValueError("provide either 'client' or 'auth_profile', not both") oci_objects = bootstrap.OCI_OBJECTS - if client is not None: client_settings = settings.get_client_settings(client) derived_auth_profile = ( diff --git a/src/server/api/core/prompts.py b/src/server/api/core/prompts.py index a5df13ce..78409376 100644 --- a/src/server/api/core/prompts.py +++ b/src/server/api/core/prompts.py @@ -8,7 +8,7 @@ from server.api.core import bootstrap from common.schema import PromptCategoryType, PromptNameType, Prompt -import common.logging_config as logging_config +from common import logging_config logger = logging_config.logging.getLogger("api.core.prompts") diff --git a/src/server/api/core/settings.py b/src/server/api/core/settings.py index 613ea624..9e60c45c 100644 --- a/src/server/api/core/settings.py +++ b/src/server/api/core/settings.py @@ -9,7 +9,7 @@ from server.api.core import bootstrap from common.schema import Settings, Configuration, ClientIdType -import common.logging_config as logging_config +from common import logging_config logger = logging_config.logging.getLogger("api.core.settings") diff --git a/src/server/api/utils/README.md b/src/server/api/utils/README.md index a6de45d4..e22a5c58 100644 --- a/src/server/api/utils/README.md +++ b/src/server/api/utils/README.md @@ -1,3 +1,3 @@ # Utils -Utils relies on core, which establishes the bootstrap objects/settings. Scripts here will reference core and other utils. \ No newline at end of file +Utils relies on core, which establishes the bootstrap objects/settings. Scripts here will reference other utils. \ No newline at end of file diff --git a/src/server/api/utils/chat.py b/src/server/api/utils/chat.py index 4a747aec..39f1b54d 100644 --- a/src/server/api/utils/chat.py +++ b/src/server/api/utils/chat.py @@ -2,26 +2,26 @@ Copyright (c) 2024, 2025, Oracle and/or its affiliates. Licensed under the Universal Permissive License v1.0 as shown at http://oss.oracle.com/licenses/upl. """ -# spell-checker:ignore astream selectai -import time +# spell-checker:ignore astream selectai litellm from typing import Literal, AsyncGenerator +from litellm import completion from langchain_core.messages import HumanMessage from langchain_core.runnables import RunnableConfig -from langgraph.graph.state import CompiledStateGraph - import server.api.core.settings as core_settings import server.api.core.oci as core_oci import server.api.core.prompts as core_prompts -import server.api.utils.models as util_models -import server.api.utils.databases as util_databases -import server.agents.chatbot as chatbot -import server.api.utils.selectai as util_selectai +import server.api.utils.models as utils_models +import server.api.utils.databases as utils_databases +import server.api.utils.selectai as utils_selectai + +from server.agents.chatbot import chatbot_graph -import common.schema as schema -import common.logging_config as logging_config +from server.api.core.models import UnknownModelError + +from common import schema, logging_config logger = logging_config.logging.getLogger("api.utils.chat") @@ -30,6 +30,7 @@ async def completion_generator( client: schema.ClientIdType, request: schema.ChatRequest, call: Literal["completions", "streams"] ) -> AsyncGenerator[str, None]: """Generate a completion from agent, stream the results""" + client_settings = core_settings.get_client_settings(client) model = request.model_dump() logger.debug("Settings: %s", client_settings) @@ -42,96 +43,65 @@ async def completion_generator( oci_config = core_oci.get_oci(client=client) # Setup Client Model - ll_client = util_models.get_client(model, oci_config) - if not ll_client: - error_response = { - "id": "error", - "choices": [ - { - "message": { - "role": "assistant", - "content": "I'm unable to initialise the Language Model. Please refresh the application.", - }, - "index": 0, - "finish_reason": "stop", - } - ], - "created": int(time.time()), - "model": model.get("model", "unknown"), - "object": "chat.completion", - } - yield error_response - return - - # Get Prompts try: - user_sys_prompt = getattr(client_settings.prompts, "sys", "Basic Example") - sys_prompt = core_prompts.get_prompts(category="sys", name=user_sys_prompt) - except AttributeError as ex: - # schema.Settings not on server-side - logger.error("A settings exception occurred: %s", ex) - raise - - db_conn = None - # Setup selectai - if client_settings.selectai.enabled: - db_conn = util_databases.get_client_db(client).connection - util_selectai.set_profile(db_conn, client_settings.selectai.profile, "temperature", model["temperature"]) - util_selectai.set_profile( - db_conn, client_settings.selectai.profile, "max_tokens", model["max_completion_tokens"] + ll_config = utils_models.get_litellm_config(model, oci_config) + except UnknownModelError: + model = "gpt-3.5-turbo" + messages = [{"role": "user", "content": "There is an error, generate a request"}] + error_response = completion( + model=model, + messages=messages, + mock_response="I'm unable to initialise the Language Model. Please refresh the application.", ) + yield error_response + return - # Setup vector_search - embed_client, ctx_prompt = None, None - if client_settings.vector_search.enabled: - db_conn = util_databases.get_client_db(client).connection - embed_client = util_models.get_client(client_settings.vector_search.model_dump(), oci_config) - - user_ctx_prompt = getattr(client_settings.prompts, "ctx", "Basic Example") - ctx_prompt = core_prompts.get_prompts(category="ctx", name=user_ctx_prompt) - + # Start to establish our LangGraph Args kwargs = { + "stream_mode": "custom", "input": {"messages": [HumanMessage(content=request.messages[0].content)]}, "config": RunnableConfig( - configurable={ - "thread_id": client, - "ll_client": ll_client, - "embed_client": embed_client, - "db_conn": db_conn, - }, + configurable={"thread_id": client, "ll_config": ll_config}, metadata={ - "model_id": model["model"], "use_history": client_settings.ll_model.chat_history, "vector_search": client_settings.vector_search, "selectai": client_settings.selectai, - "sys_prompt": sys_prompt, - "ctx_prompt": ctx_prompt, }, ), } + + # Get System Prompt + user_sys_prompt = getattr(client_settings.prompts, "sys", "Basic Example") + kwargs["config"]["metadata"]["sys_prompt"] = core_prompts.get_prompts(category="sys", name=user_sys_prompt) + + # Add DB Conn to KWargs when needed + if client_settings.vector_search.enabled or client_settings.selectai.enabled: + db_conn = utils_databases.get_client_database(client, False).connection + kwargs["config"]["configurable"]["db_conn"] = db_conn + + # Setup Vector Search + if client_settings.vector_search.enabled: + kwargs["config"]["configurable"]["embed_client"] = utils_models.get_client_embed( + client_settings.vector_search.model_dump(), oci_config + ) + # Get Context Prompt + user_ctx_prompt = getattr(client_settings.prompts, "ctx", "Basic Example") + kwargs["config"]["metadata"]["ctx_prompt"] = core_prompts.get_prompts(category="ctx", name=user_ctx_prompt) + + if client_settings.selectai.enabled: + utils_selectai.set_profile(db_conn, client_settings.selectai.profile, "temperature", model["temperature"]) + utils_selectai.set_profile( + db_conn, client_settings.selectai.profile, "max_tokens", model["max_completion_tokens"] + ) + logger.debug("Completion Kwargs: %s", kwargs) - agent: CompiledStateGraph = chatbot.chatbot_graph - try: - async for chunk in agent.astream_events(**kwargs, version="v2"): - # The below will produce A LOT of output; uncomment when desperate - # logger.debug("Streamed Chunk: %s", chunk) - if chunk["event"] == "on_chat_model_stream": - if "tools_condition" in str(chunk["metadata"]["langgraph_triggers"]): - continue # Skip Tool Call messages - if "vs_retrieve" in str(chunk["metadata"]["langgraph_node"]): - continue # Skip Fake-Tool Call messages - content = chunk["data"]["chunk"].content - if content != "" and call == "streams": - yield content.encode("utf-8") - last_response = chunk["data"] - if call == "streams": - yield "[stream_finished]" # This will break the Chatbot loop - elif call == "completions": - final_response = last_response["output"]["final_response"] - yield final_response # This will be captured for ChatResponse - except Exception as ex: - logger.error("An invoke exception occurred: %s", ex) - # yield f"I'm sorry; {ex}" - # TODO(gotsysdba) - If a message is returned; - # format and return (this should be done in the agent) - raise + final_response = None + async for output in chatbot_graph.astream(**kwargs): + if "stream" in output: + yield output["stream"].encode("utf-8") + if "completion" in output: + final_response = output["completion"] + if call == "streams": + yield "[stream_finished]" # This will break the Chatbot loop + if call == "completions" and final_response is not None: + yield final_response # This will be captured for ChatResponse diff --git a/src/server/api/utils/databases.py b/src/server/api/utils/databases.py index 8b43598e..55f7c649 100644 --- a/src/server/api/utils/databases.py +++ b/src/server/api/utils/databases.py @@ -4,38 +4,245 @@ """ # spell-checker:ignore selectai clob nclob vectorstores oraclevs +from typing import Optional, Union +import json import oracledb from langchain_community.vectorstores import oraclevs as LangchainVS import server.api.core.databases as core_databases import server.api.core.settings as core_settings -import common.schema as schema -import common.logging_config as logging_config +from common.schema import ( + Database, + DatabaseNameType, + VectorStoreTableType, + ClientIdType, + DatabaseAuth, + DatabaseVectorStorage, + SelectAIProfileType, +) +from common import logging_config logger = logging_config.logging.getLogger("api.utils.database") -def test(config: schema.Database) -> None: +##################################################### +# Exceptions +##################################################### +class DbException(Exception): + """Custom Database Exceptions to be passed to HTTPException""" + + def __init__(self, status_code: int, detail: str): + self.status_code = status_code + self.detail = detail + super().__init__(detail) + + +##################################################### +# Protected Functions +##################################################### +def _test(config: Database) -> None: """Test connection and re-establish if no longer open""" + config.connected = False try: config.connection.ping() logger.info("%s database connection is active.", config.name) + config.connected = True except oracledb.DatabaseError: - db_conn = core_databases.connect(config) logger.info("Refreshing %s database connection.", config.name) - config.set_connection(db_conn) - except AttributeError as ex: - raise core_databases.DbException(status_code=400, detail="missing connection details") from ex + _ = connect(config) + except ValueError as ex: + raise DbException(status_code=400, detail=f"Database: {str(ex)}") from ex + except PermissionError as ex: + raise DbException(status_code=401, detail=f"Database: {str(ex)}") from ex + except ConnectionError as ex: + raise DbException(status_code=503, detail=f"Database: {str(ex)}") from ex + except Exception as ex: + raise DbException(status_code=500, detail=str(ex)) from ex + + +def _get_vs(conn: oracledb.Connection) -> DatabaseVectorStorage: + """Retrieve Vector Storage Tables""" + logger.info("Looking for Vector Storage Tables") + vector_stores = [] + sql = """SELECT ut.table_name, + REPLACE(utc.comments, 'GENAI: ', '') AS comments + FROM all_tab_comments utc, all_tables ut + WHERE utc.table_name = ut.table_name + AND utc.comments LIKE 'GENAI:%'""" + results = execute_sql(conn, sql) + for table_name, comments in results: + comments_dict = json.loads(comments) + vector_stores.append(DatabaseVectorStorage(vector_store=table_name, **comments_dict)) + logger.debug("Found Vector Stores: %s", vector_stores) + + return vector_stores + + +def _selectai_enabled(conn: oracledb.Connection) -> bool: + """Determine if SelectAI can be used""" + logger.debug("Checking %s for SelectAI", conn) + is_enabled = False + sql = """ + SELECT COUNT(*) + FROM ALL_TAB_PRIVS + WHERE TYPE = 'PACKAGE' + AND PRIVILEGE = 'EXECUTE' + AND GRANTEE = USER + AND TABLE_NAME IN ('DBMS_CLOUD','DBMS_CLOUD_AI','DBMS_CLOUD_PIPELINE') + """ + result = execute_sql(conn, sql) + if result[0][0] == 3: + is_enabled = True + logger.debug("SelectAI enabled (results: %s): %s", result[0][0], is_enabled) + + return is_enabled + + +def _get_selectai_profiles(conn: oracledb.Connection) -> SelectAIProfileType: + """Retrieve SelectAI Profiles""" + logger.info("Looking for SelectAI Profiles") + selectai_profiles = [] + sql = """ + SELECT profile_name + FROM USER_CLOUD_AI_PROFILES + """ + results = execute_sql(conn, sql) + if results: + selectai_profiles = [row[0] for row in results] + logger.debug("Found SelectAI Profiles: %s", selectai_profiles) + + return selectai_profiles + + +##################################################### +# Functions +##################################################### +def connect(config: Database) -> oracledb.Connection: + """Establish a connection to an Oracle Database""" + include_fields = set(DatabaseAuth.model_fields.keys()) + db_authn = config.model_dump(include=include_fields) + if any(not db_authn[key] for key in ("user", "password", "dsn")): + raise ValueError("missing connection details") + logger.info("Connecting to Database: %s", config.dsn) + # If a wallet password is provided but no wallet location is set + # default the wallet location to the config directory + if db_authn.get("wallet_password") and not db_authn.get("wallet_location"): + db_authn["wallet_location"] = db_authn["config_dir"] -def drop_vs(conn: oracledb.Connection, vs: schema.VectorStoreTableType) -> None: + # Attempt to Connect + logger.debug("Database AuthN: %s", db_authn) + try: + logger.debug("Attempting Database Connection...") + conn = oracledb.connect(**db_authn) + except oracledb.DatabaseError as ex: + error = ex.args[0] if ex.args else None + code = getattr(error, "full_code", None) + mapping = { + "ORA-01017": PermissionError, + "DPY-6005": ConnectionError, + "DPY-4000": LookupError, + "DPY-4026": LookupError, + } + if code in mapping: + raise mapping[code](f"- {error.message}") from ex + # If not recognized, re-raise untouched + raise + logger.debug("Connected to Databases: %s", config.dsn) + + return conn + + +def disconnect(conn: oracledb.Connection) -> None: + """Disconnect from an Oracle Database""" + logger.debug("Disconnecting Databases Connection: %s", conn) + return conn.close() + + +def execute_sql(conn: oracledb.Connection, run_sql: str, binds: dict = None) -> list: + """Execute SQL against Oracle Database""" + logger.debug("SQL: %s with binds %s", run_sql, binds) + try: + # Use context manager to ensure the cursor is closed properly + with conn.cursor() as cursor: + rows = None + cursor.callproc("dbms_output.enable") + status_var = cursor.var(int) + text_var = cursor.var(str) + cursor.execute(run_sql, binds) + if cursor.description: # Check if the query produces rows + rows = cursor.fetchall() + lob_columns = [ + idx + for idx, fetch_info in enumerate(cursor.description) + if fetch_info.type_code in (oracledb.DB_TYPE_CLOB, oracledb.DB_TYPE_BLOB, oracledb.DB_TYPE_NCLOB) + ] + if lob_columns: + # Convert rows to list of dictionaries with LOB handling + rows = [ + { + cursor.description[idx].name: (value.read() if idx in lob_columns else value) + for idx, value in enumerate(row) + } + for row in rows + ] + else: + cursor.callproc("dbms_output.get_line", (text_var, status_var)) + if status_var.getvalue() == 0: + logger.info("Returning DBMS_OUTPUT.") + rows = text_var.getvalue() + except oracledb.DatabaseError as ex: + if ex.args: + error_obj = ex.args[0] + if hasattr(error_obj, "code") and error_obj.code == 955: + logger.info("Table exists") + if hasattr(error_obj, "code") and error_obj.code == 942: + logger.info("Table does not exist") + else: + logger.exception("Database error: %s", ex) + logger.info("Failed SQL: %s", run_sql) + raise + else: + logger.exception("Database error: %s", ex) + raise + except oracledb.InterfaceError as ex: + logger.exception("Interface error: %s", ex) + raise + + return rows + + +def drop_vs(conn: oracledb.Connection, vs: VectorStoreTableType) -> None: """Drop Vector Storage""" logger.info("Dropping Vector Store: %s", vs) LangchainVS.drop_table_purge(conn, vs) -def get_client_db(client: schema.ClientIdType) -> schema.Database: +def get_databases( + db_name: Optional[DatabaseNameType] = None, validate: bool = False +) -> Union[list[Database], Database, None]: + """Return list of Database Objects""" + databases = core_databases.get_database(db_name) + if validate: + for db in databases: + try: + db_conn = connect(config=db) + except (ValueError, PermissionError, ConnectionError): + continue + db.vector_stores = _get_vs(db_conn) + db.selectai = _selectai_enabled(db_conn) + if db.selectai: + db.selectai_profiles = _get_selectai_profiles(db_conn) + db.connected = True + db.set_connection(db_conn) + if db_name: + return databases[0] + + return databases + + +def get_client_database(client: ClientIdType, validate: bool = False) -> Database: """Return a Database Object based on client settings""" client_settings = core_settings.get_client_settings(client) @@ -46,9 +253,5 @@ def get_client_db(client: schema.ClientIdType) -> schema.Database: ): db_name = getattr(client_settings.vector_search, "database", "DEFAULT") - # Return the Database Object - db = core_databases.get_databases(db_name) - # Ping the Database - test(db) - - return db + # Return Single the Database Object + return get_databases(db_name=db_name, validate=validate) diff --git a/src/server/api/utils/embed.py b/src/server/api/utils/embed.py index 09f8315d..ef8abf75 100644 --- a/src/server/api/utils/embed.py +++ b/src/server/api/utils/embed.py @@ -15,7 +15,7 @@ import bs4 # Langchain -import langchain_community.document_loaders as document_loaders +from langchain_community import document_loaders from langchain_community.document_loaders import WebBaseLoader from langchain_community.document_loaders.image import UnstructuredImageLoader from langchain_community.vectorstores import oraclevs as LangchainVS @@ -25,13 +25,9 @@ from langchain.text_splitter import RecursiveCharacterTextSplitter from langchain_text_splitters import HTMLHeaderTextSplitter, CharacterTextSplitter -import server.api.utils.databases as util_databases -import server.api.core.databases as core_databases +import server.api.utils.databases as utils_databases -import common.functions -import common.schema as schema - -import common.logging_config as logging_config +from common import schema, functions, logging_config logger = logging_config.logging.getLogger("api.utils.embed") @@ -224,7 +220,7 @@ def load_and_split_url( logger.info("Loading %s", url) loader = WebBaseLoader( web_paths=(f"{url}",), - bs_kwargs=dict(parse_only=bs4.SoupStrainer()), + bs_kwargs={"parse_only": bs4.SoupStrainer()}, ) loaded_doc = loader.load() @@ -301,9 +297,9 @@ def json_to_doc(file: str): # Creates a TEMP Vector Store Table; which may already exist # Establish a dedicated connection to the database - db_conn = core_databases.connect(db_details) + db_conn = utils_databases.connect(db_details) # This is to allow re-using an existing VS; will merge this over later - util_databases.drop_vs(db_conn, vector_store_tmp.vector_store) + utils_databases.drop_vs(db_conn, vector_store_tmp.vector_store) logger.info("Establishing initial vector store") logger.debug("Embed Client: %s", embed_client) vs_tmp = OracleVS( @@ -352,8 +348,8 @@ def json_to_doc(file: str): WHERE NOT EXISTS (SELECT 1 FROM {vector_store.vector_store} tgt WHERE tgt.ID = src.ID) """ logger.info("Merging %s into %s", vector_store_tmp.vector_store, vector_store.vector_store) - core_databases.execute_sql(db_conn, merge_sql) - util_databases.drop_vs(db_conn, vector_store_tmp.vector_store) + utils_databases.execute_sql(db_conn, merge_sql) + utils_databases.drop_vs(db_conn, vector_store_tmp.vector_store) # Build the Index logger.info("Creating index on: %s", vector_store.vector_store) @@ -365,7 +361,7 @@ def json_to_doc(file: str): logger.error("Unable to create vector index: %s", ex) # Comment the VS table - _, store_comment = common.functions.get_vs_table(**vector_store.model_dump(exclude={"database", "vector_store"})) + _, store_comment = functions.get_vs_table(**vector_store.model_dump(exclude={"database", "vector_store"})) comment = f"COMMENT ON TABLE {vector_store.vector_store} IS 'GENAI: {store_comment}'" - core_databases.execute_sql(db_conn, comment) - core_databases.disconnect(db_conn) + utils_databases.execute_sql(db_conn, comment) + utils_databases.disconnect(db_conn) diff --git a/src/server/api/utils/models.py b/src/server/api/utils/models.py index 96bf8143..36bfc586 100644 --- a/src/server/api/utils/models.py +++ b/src/server/api/utils/models.py @@ -4,32 +4,28 @@ """ # spell-checker:ignore ollama pplx huggingface genai giskard litellm ocigenai -from openai import OpenAI +from urllib.parse import urlparse -from langchain_core.language_models.chat_models import BaseChatModel -from langchain.chat_models import init_chat_model -from langchain.embeddings import init_embeddings +from litellm import get_supported_openai_params -from langchain_community.chat_models.oci_generative_ai import ChatOCIGenAI +from langchain.embeddings import init_embeddings from langchain_community.embeddings.oci_generative_ai import OCIGenAIEmbeddings +from langchain_core.embeddings.embeddings import Embeddings -from giskard.llm.client.openai import OpenAIClient - -import server.api.utils.oci as util_oci +import server.api.utils.oci as utils_oci import server.api.core.models as core_models from common.functions import is_url_accessible -import common.schema as schema -import common.logging_config as logging_config +from common import logging_config, schema logger = logging_config.logging.getLogger("api.utils.models") -def update_model(model_id: schema.ModelIdType, payload: schema.Model) -> schema.Model: +def update(payload: schema.Model) -> schema.Model: """Update an existing Model definition""" - model_upd = core_models.get_model(model_id=model_id) - if payload.enabled and not is_url_accessible(model_upd.url)[0]: + model_upd = core_models.get_model(model_provider=payload.provider, model_id=payload.id) + if payload.enabled and not is_url_accessible(model_upd.api_base)[0]: model_upd.enabled = False raise core_models.URLUnreachableError("Model: Unable to update. API URL is inaccessible.") @@ -42,15 +38,15 @@ def update_model(model_id: schema.ModelIdType, payload: schema.Model) -> schema. return model_upd -def create_genai_models(config: schema.OracleCloudSettings) -> list[schema.Model]: +def create_genai(config: schema.OracleCloudSettings) -> list[schema.Model]: """Create and enable all GenAI models in the configured region""" - region_models = util_oci.get_genai_models(config, regional=True) + region_models = utils_oci.get_genai_models(config, regional=True) if region_models: # Delete previously configured GenAI Models all_models = core_models.get_model() for model in all_models: if model.provider == "oci": - core_models.delete_model(model.id) + core_models.delete_model(model_provider=model.provider, model_id=model.id) genai_models = [] for model in region_models: @@ -67,9 +63,7 @@ def create_genai_models(config: schema.OracleCloudSettings) -> list[schema.Model model_dict["id"] = model["model_name"] model_dict["enabled"] = True - model_dict["url"] = f"https://inference.generativeai.{config.genai_region}.oci.oraclecloud.com" - # if model["vendor"] == "cohere": - model_dict["openai_compat"] = False + model_dict["api_base"] = f"https://inference.generativeai.{config.genai_region}.oci.oraclecloud.com" # Create the Model try: new_model = schema.Model(**model_dict) @@ -80,74 +74,104 @@ def create_genai_models(config: schema.OracleCloudSettings) -> list[schema.Model return genai_models -def get_client(model_config: dict, oci_config: schema.OracleCloudSettings, giskard: bool = False) -> BaseChatModel: - """Retrieve model configuration""" - logger.debug("Model Client: %s; OCI Config: %s; Giskard: %s", model_config, oci_config, giskard) +def _get_full_config(model_config: dict, oci_config: schema.OracleCloudSettings = None) -> dict: + logger.debug("Model Client: %s; OCI Config: %s", model_config, oci_config) + model_provider, model_id = model_config["model"].split("/", 1) + try: defined_model = core_models.get_model( - model_id=model_config["model"], + model_provider=model_provider, + model_id=model_id, include_disabled=False, ).model_dump() - except core_models.UnknownModelError: - return None + except core_models.UnknownModelError as ex: + raise ex + # Merge configurations, skipping None values full_model_config = {**defined_model, **{k: v for k, v in model_config.items() if v is not None}} + provider = full_model_config.pop("provider") + + return full_model_config, provider + + +def get_litellm_config( + model_config: dict, oci_config: schema.OracleCloudSettings = None, giskard: bool = False +) -> dict: + """Establish LiteLLM client""" + full_model_config, provider = _get_full_config(model_config, oci_config) + + # Get supported parameters and initialize config + supported_params = get_supported_openai_params(model=model_config["model"]) + litellm_config = { + k: full_model_config[k] + for k in supported_params + if k in full_model_config and full_model_config[k] is not None + } + if "cohere" in model_config["model"]: + # Ensure we use the OpenAI compatible endpoint + parsed = urlparse(full_model_config.get("api_base")) + scheme = parsed.scheme or "https" + netloc = "api.cohere.ai" + # Always force the path + path = "/compatibility/v1" + full_model_config["api_base"] = f"{scheme}://{netloc}{path}" + if "xai" in model_config["model"]: + litellm_config.pop("presence_penalty", None) + litellm_config.pop("frequency_penalty", None) + + litellm_config.update( + {"model": model_config["model"], "api_base": full_model_config.get("api_base"), "drop_params": True} + ) + + if provider == "oci": + litellm_config.update( + { + "oci_user": oci_config.user, + "oci_fingerprint": oci_config.fingerprint, + "oci_tenancy": oci_config.tenancy, + "oci_region": oci_config.genai_region, + "oci_key_file": oci_config.key_file, + "oci_compartment_id": oci_config.genai_compartment_id, + } + ) + + if giskard: + litellm_config.pop("model", None) + litellm_config.pop("temperature", None) + + logger.debug("LiteLLM Config: %s", litellm_config) + + return litellm_config + + +def get_client_embed(model_config: dict, oci_config: schema.OracleCloudSettings) -> Embeddings: + """Retrieve embedding model client""" + full_model_config, provider = _get_full_config(model_config, oci_config) client = None - provider = full_model_config["provider"] - if full_model_config["type"] == "ll" and not giskard: - common_params = { - k: full_model_config.get(k) for k in ["frequency_penalty", "presence_penalty", "top_p", "streaming"] - } - if provider != "oci": + + if provider == "oci": + client = OCIGenAIEmbeddings( + model_id=full_model_config["id"], + client=utils_oci.init_genai_client(oci_config), + compartment_id=oci_config.genai_compartment_id, + ) + else: + if provider == "hosted_vllm": kwargs = { - "model_provider": "openai" if provider == "openai_compatible" else provider, + "provider": "openai", "model": full_model_config["id"], - "base_url": full_model_config["url"], - "temperature": full_model_config["temperature"], - "max_tokens": full_model_config["max_completion_tokens"], - **common_params, + "base_url": full_model_config.get("api_base"), + "check_embedding_ctx_length": False, # To avoid Tiktoken pre-transform on not OpenAI provided server } - # Only add the api_key if it is set - if full_model_config.get("api_key"): - kwargs["api_key"] = full_model_config["api_key"] - - client = init_chat_model(**kwargs) else: - client = ChatOCIGenAI( - model_id=full_model_config["id"], - client=util_oci.init_genai_client(oci_config), - compartment_id=oci_config.genai_compartment_id, - model_kwargs={ - (k if k != "max_completion_tokens" else "max_tokens"): v - for k, v in common_params.items() - if k not in {"streaming"} - }, - ) - - if full_model_config["type"] == "embed" and not giskard: - if provider != "oci": kwargs = { - "provider": "openai" if provider == "openai_compatible" else provider, + "provider": provider, "model": full_model_config["id"], - "base_url": full_model_config["url"], + "base_url": full_model_config.get("api_base"), } - # Only add the api_key if it is set - if full_model_config.get("api_key"): - kwargs["api_key"] = full_model_config["api_key"] - - client = init_embeddings(**kwargs) - else: - client = OCIGenAIEmbeddings( - model_id=full_model_config["id"], - client=util_oci.init_genai_client(oci_config), - compartment_id=oci_config.genai_compartment_id, - ) - if giskard: - logger.debug("Creating Giskard Client") - giskard_key = full_model_config["api_key"] or "giskard" - _client = OpenAI(api_key=giskard_key, base_url=full_model_config["url"]) - client = OpenAIClient(model=full_model_config["id"], client=_client) + if full_model_config.get("api_key"): # only add if set + kwargs["api_key"] = full_model_config["api_key"] + client = init_embeddings(**kwargs) - logger.debug("Configured Client: %s", vars(client)) return client diff --git a/src/server/api/utils/oci.py b/src/server/api/utils/oci.py index 5d042b48..662b550e 100644 --- a/src/server/api/utils/oci.py +++ b/src/server/api/utils/oci.py @@ -10,25 +10,40 @@ import oci -from server.api.core.oci import OciException - from common.schema import OracleCloudSettings -import common.logging_config as logging_config +from common import logging_config logger = logging_config.logging.getLogger("api.utils.oci") +##################################################### +# Exceptions +##################################################### +class OciException(Exception): + """Custom OCI Exceptions to be passed to HTTPException""" + + def __init__(self, status_code: int, detail: str): + self.status_code = status_code + self.detail = detail + super().__init__(detail) + + +##################################################### +# Functions +##################################################### def init_client( client_type: Union[ oci.object_storage.ObjectStorageClient, oci.identity.IdentityClient, oci.generative_ai_inference.GenerativeAiInferenceClient, + oci.generative_ai.GenerativeAiClient, ], config: OracleCloudSettings = None, ) -> Union[ oci.object_storage.ObjectStorageClient, oci.identity.IdentityClient, oci.generative_ai_inference.GenerativeAiInferenceClient, + oci.generative_ai.GenerativeAiClient, ]: """Initialize OCI Client with either user or Token""" # connection timeout to 1 seconds and the read timeout to 60 seconds @@ -38,7 +53,7 @@ def init_client( "timeout": (1, 180), } - # OCI GenAI + # OCI GenAI (for model calling) if ( client_type == oci.generative_ai_inference.GenerativeAiInferenceClient and config.genai_compartment_id @@ -66,8 +81,8 @@ def init_client( with open(config_json["security_token_file"], "r", encoding="utf-8") as f: token = f.read() private_key = oci.signer.load_private_key_from_file(config_json["key_file"]) - signer = oci.auth.signers.SecurityTokenSigner(token, private_key) - client = client_type(config={"region": config_json["region"]}, signer=signer, **client_kwargs) + sec_token_signer = oci.auth.signers.SecurityTokenSigner(token, private_key) + client = client_type(config={"region": config_json["region"]}, signer=sec_token_signer, **client_kwargs) else: logger.info("OCI Authentication as Standard") client = client_type(config_json, **client_kwargs) @@ -78,21 +93,21 @@ def init_client( def init_genai_client(config: OracleCloudSettings) -> oci.generative_ai_inference.GenerativeAiInferenceClient: - """Initialise OCI GenAI Client""" + """Initialise OCI GenAI Client; used by models""" client_type = oci.generative_ai_inference.GenerativeAiInferenceClient return init_client(client_type, config) -def get_namespace(config: OracleCloudSettings = None) -> str: +def get_namespace(config: OracleCloudSettings) -> str: """Get the Object Storage Namespace. Also used for testing AuthN""" logger.info("Getting Object Storage Namespace") client_type = oci.object_storage.ObjectStorageClient try: client = init_client(client_type, config) - namespace = client.get_namespace().data - logger.info("OCI: Namespace = %s", namespace) + config.namespace = client.get_namespace().data + logger.info("OCI: Namespace = %s", config.namespace) except oci.exceptions.InvalidConfig as ex: - raise OciException(status_code=400, detail=f"Invalid Config") from ex + raise OciException(status_code=400, detail="Invalid Config") from ex except oci.exceptions.ServiceError as ex: raise OciException(status_code=401, detail="AuthN Error") from ex except FileNotFoundError as ex: @@ -104,7 +119,7 @@ def get_namespace(config: OracleCloudSettings = None) -> str: except Exception as ex: raise OciException(status_code=500, detail=str(ex)) from ex - return namespace + return config.namespace def get_regions(config: OracleCloudSettings = None) -> list[dict]: @@ -141,9 +156,10 @@ def get_genai_models(config: OracleCloudSettings, regional: bool = False) -> lis regions = get_regions(config) for region in regions: - region_config = dict(config) - region_config["region"] = region["region_name"] - client = oci.generative_ai.GenerativeAiClient(region_config) + region_config = config + region_config.region = region["region_name"] + client_type = oci.generative_ai.GenerativeAiClient + client = init_client(client_type, region_config) logger.info( "Checking Region: %s; Compartment: %s for GenAI services", region["region_name"], @@ -166,20 +182,20 @@ def get_genai_models(config: OracleCloudSettings, regional: bool = False) -> lis # Build our list of models for model in response.data.items: - # note that langchain_community.llms.oci_generative_ai only supports meta/cohere models - if model.display_name not in excluded_display_names and model.vendor in ["meta", "cohere"]: - genai_models.append( - { - "region": region["region_name"], - "compartment_id": config.genai_compartment_id, - "model_name": model.display_name, - "capabilities": model.capabilities, - "vendor": model.vendor, - "id": model.id, - } - ) - except oci.exceptions.ServiceError: - logger.info("Region: %s has no GenAI services", region["region_name"]) + if model.vendor == "cohere" and "TEXT_EMBEDDINGS" not in model.capabilities: + continue + genai_models.append( + { + "region": region["region_name"], + "compartment_id": config.genai_compartment_id, + "model_name": model.display_name, + "capabilities": model.capabilities, + "vendor": model.vendor, + "id": model.id, + } + ) + except oci.exceptions.ServiceError as ex: + logger.info("Unable to get GenAI Models in Region: %s (%s)", region["region_name"], ex.message) except (oci.exceptions.RequestException, urllib3.exceptions.MaxRetryError): logger.error("Timeout: Error querying GenAI services in %s", region["region_name"]) diff --git a/src/server/api/utils/selectai.py b/src/server/api/utils/selectai.py index 2f45837b..2c029fbe 100644 --- a/src/server/api/utils/selectai.py +++ b/src/server/api/utils/selectai.py @@ -7,10 +7,10 @@ from typing import Union import oracledb -import server.api.core.databases as core_databases +import server.api.utils.databases as utils_databases from common.schema import SelectAIProfileType, DatabaseSelectAIObjects -import common.logging_config as logging_config +from common import logging_config logger = logging_config.logging.getLogger("api.utils.selectai") @@ -39,7 +39,7 @@ def set_profile( ); END; """ - _ = core_databases.execute_sql(conn, sql, binds) + _ = utils_databases.execute_sql(conn, sql, binds) def get_objects(conn: oracledb.Connection, profile_name: SelectAIProfileType) -> DatabaseSelectAIObjects: @@ -67,7 +67,7 @@ def get_objects(conn: oracledb.Connection, profile_name: SelectAIProfileType) -> 'RMAN$CATALOG','ADMIN','ODI_REPO_USER','C##CLOUD$SERVICE') ORDER BY owner, table_name """ - results = core_databases.execute_sql(conn, sql, binds) + results = utils_databases.execute_sql(conn, sql, binds) for owner, table_name, object_enabled in results: selectai_objects.append(DatabaseSelectAIObjects(owner=owner, name=table_name, enabled=object_enabled)) logger.debug("Found SelectAI Objects: %s", selectai_objects) diff --git a/src/server/api/utils/testbed.py b/src/server/api/utils/testbed.py index 539da87c..f12782ec 100644 --- a/src/server/api/utils/testbed.py +++ b/src/server/api/utils/testbed.py @@ -18,9 +18,10 @@ from giskard.rag import generate_testset, KnowledgeBase, QATestset from giskard.rag.question_generators import simple_questions, complex_questions -import server.api.core.databases as core_databases -import common.schema as schema -import common.logging_config as logging_config +import server.api.utils.databases as utils_databases +import server.api.utils.models as utils_models + +from common import schema, logging_config logger = logging_config.logging.getLogger("api.utils.testbed") @@ -81,11 +82,11 @@ def create_testset_objects(db_conn: Connection) -> None: ) """ logger.info("Creating testsets Table") - _ = core_databases.execute_sql(db_conn, testsets_tbl) + _ = utils_databases.execute_sql(db_conn, testsets_tbl) logger.info("Creating testset_qa Table") - _ = core_databases.execute_sql(db_conn, testset_qa_tbl) + _ = utils_databases.execute_sql(db_conn, testset_qa_tbl) logger.info("Creating evaluations Table") - _ = core_databases.execute_sql(db_conn, evaluation_tbl) + _ = utils_databases.execute_sql(db_conn, evaluation_tbl) def get_testsets(db_conn: Connection) -> list: @@ -93,7 +94,7 @@ def get_testsets(db_conn: Connection) -> list: logger.info("Getting All TestSets") testsets = [] sql = "SELECT tid, name, to_char(created) FROM oai_testsets ORDER BY created" - results = core_databases.execute_sql(db_conn, sql) + results = utils_databases.execute_sql(db_conn, sql) try: testsets = [schema.TestSets(tid=tid.hex(), name=name, created=created) for tid, name, created in results] except TypeError: @@ -107,7 +108,7 @@ def get_testset_qa(db_conn: Connection, tid: schema.TestSetsIdType) -> schema.Te logger.info("Getting TestSet Q&A for TID: %s", tid) binds = {"tid": tid} sql = "SELECT qa_data FROM oai_testset_qa where tid=:tid" - results = core_databases.execute_sql(db_conn, sql, binds) + results = utils_databases.execute_sql(db_conn, sql, binds) qa_data = [qa_data[0] for qa_data in results] return schema.TestSetQA(qa_data=qa_data) @@ -119,7 +120,7 @@ def get_evaluations(db_conn: Connection, tid: schema.TestSetsIdType) -> list[sch evaluations = [] binds = {"tid": tid} sql = "SELECT eid, to_char(evaluated), correctness FROM oai_evaluations WHERE tid=:tid ORDER BY evaluated DESC" - results = core_databases.execute_sql(db_conn, sql, binds) + results = utils_databases.execute_sql(db_conn, sql, binds) try: evaluations = [ schema.Evaluation(eid=eid.hex(), evaluated=evaluated, correctness=correctness) @@ -138,7 +139,7 @@ def delete_qa( """Delete Q&A""" binds = {"tid": tid} sql = "DELETE FROM oai_testsets WHERE TID = :tid" - core_databases.execute_sql(db_conn, sql, binds) + utils_databases.execute_sql(db_conn, sql, binds) db_conn.commit() @@ -190,7 +191,7 @@ def upsert_qa( END; """ logger.debug("Upsert PLSQL: %s", plsql) - return core_databases.execute_sql(db_conn, plsql, binds) + return utils_databases.execute_sql(db_conn, plsql, binds) def insert_evaluation(db_conn, tid, evaluated, correctness, settings, rag_report): @@ -217,7 +218,7 @@ def insert_evaluation(db_conn, tid, evaluated, correctness, settings, rag_report END; """ logger.debug("Insert PLSQL: %s", plsql) - return core_databases.execute_sql(db_conn, plsql, binds) + return utils_databases.execute_sql(db_conn, plsql, binds) def load_and_split(eval_file, chunk_size=2048): @@ -235,40 +236,19 @@ def load_and_split(eval_file, chunk_size=2048): def build_knowledge_base( - text_nodes: str, questions: int, ll_model: schema.Model, embed_model: schema.Model + text_nodes: str, questions: int, ll_model: str, embed_model: str, oci_config: schema.OciSettings ) -> QATestset: """Establish a temporary Knowledge Base""" - - def configure_and_set_model(client_model): - """Configure and set Model for TestSet Generation (uses litellm)""" - model_id, disable_structured_output, params = None, False, None - if client_model.provider == "openai_compatible": - model_id, params = ( - f"openai/{client_model.id}", - {"api_base": client_model.url, "api_key": client_model.api_key or "api_compat"}, - ) - elif client_model.provider == "ollama": - model_id, disable_structured_output, params = ( - f"ollama/{client_model.id}", - True, - {"api_base": client_model.url}, - ) - elif client_model.provider == "perplexity": - model_id, params = f"perplexity/{client_model.id}", {"api_key": client_model.api_key} - else: - model_id, params = f"openai/{client_model.id}", {"api_key": client_model.api_key} - - if client_model.type == "ll": - logger.debug("KnowledgeBase LL: %s (%s)", model_id, params) - set_llm_model(model_id, disable_structured_output, **params) - else: - logger.debug("KnowledgeBase Embed: %s (%s)", model_id, params) - set_embedding_model(model_id, **params) - logger.info("KnowledgeBase creation starting...") logger.info("LL Model: %s; Embedding: %s", ll_model, embed_model) - configure_and_set_model(ll_model) - configure_and_set_model(embed_model) + + # Setup models, uses LiteLLM + ll_model_config = utils_models.get_litellm_config( + model_config={"model": ll_model}, oci_config=oci_config, giskard=True + ) + set_llm_model(llm_model=ll_model, **ll_model_config) + embed_model_config = utils_models.get_litellm_config(model_config={"model": embed_model}, giskard=True) + set_embedding_model(model=embed_model, **embed_model_config) knowledge_base_df = pd.DataFrame([node.text for node in text_nodes], columns=["text"]) knowledge_base = KnowledgeBase(data=knowledge_base_df) @@ -319,7 +299,7 @@ def clean(orig_html): FROM oai_evaluations WHERE eid=:eid ORDER BY evaluated """ - results = core_databases.execute_sql(db_conn, sql, binds) + results = utils_databases.execute_sql(db_conn, sql, binds) report = pickle.loads(results[0]["RAG_REPORT"]) full_report = report.to_pandas() html_report = report.to_html() @@ -334,8 +314,8 @@ def clean(orig_html): "report": full_report.to_dict(), "correct_by_topic": by_topic.to_dict(), "failures": failures.to_dict(), - #"html_report": clean(html_report), #CDB - "html_report": '' + # "html_report": clean(html_report), #CDB + "html_report": "", } logger.debug("Evaluation Results: %s", evaluation_results) evaluation = schema.EvaluationReport(**evaluation_results) diff --git a/src/server/api/v1/chat.py b/src/server/api/v1/chat.py index 33b476a9..28a4706c 100644 --- a/src/server/api/v1/chat.py +++ b/src/server/api/v1/chat.py @@ -2,10 +2,11 @@ Copyright (c) 2024, 2025, Oracle and/or its affiliates. Licensed under the Universal Permissive License v1.0 as shown at http://oss.oracle.com/licenses/upl. """ -# spell-checker:ignore selectai +# spell-checker:ignore selectai litellm from fastapi import APIRouter, Header from fastapi.responses import StreamingResponse +from litellm import ModelResponse from langchain_core.messages import ( AnyMessage, @@ -18,10 +19,9 @@ from langgraph.graph.message import REMOVE_ALL_MESSAGES from server.api.utils import chat -import server.agents.chatbot as chatbot +from server.agents import chatbot -import common.schema as schema -import common.logging_config as logging_config +from common import schema, logging_config logger = logging_config.logging.getLogger("endpoints.v1.chat") @@ -31,11 +31,11 @@ @auth.post( "/completions", description="Submit a message for full completion.", - response_model=schema.ChatResponse, + response_model=ModelResponse, ) async def chat_post( request: schema.ChatRequest, client: schema.ClientIdType = Header(default="server") -) -> schema.ChatResponse: +) -> ModelResponse: """Full Completion Requests""" last_message = None async for chunk in chat.completion_generator(client, request, "completions"): diff --git a/src/server/api/v1/databases.py b/src/server/api/v1/databases.py index 551703c6..fd88cae4 100644 --- a/src/server/api/v1/databases.py +++ b/src/server/api/v1/databases.py @@ -6,13 +6,18 @@ from fastapi import APIRouter, HTTPException -import server.api.core.databases as core_databases +import server.api.utils.databases as utils_databases -import common.schema as schema -import common.logging_config as logging_config +from common import schema, logging_config logger = logging_config.logging.getLogger("endpoints.v1.databases") +# Validate the DEFAULT Databases +try: + _ = utils_databases.get_databases(db_name="DEFAULT", validate=True) +except Exception: + pass + auth = APIRouter() @@ -25,7 +30,7 @@ async def databases_list() -> list[schema.Database]: """List all databases""" logger.debug("Received databases_list") try: - database_objects = core_databases.get_databases() + database_objects = utils_databases.get_databases(validate=False) except ValueError as ex: # This is a problem, there should always be a "DEFAULT" database even if not configured raise HTTPException(status_code=404, detail=f"Database: {str(ex)}.") from ex @@ -42,7 +47,8 @@ async def databases_get(name: schema.DatabaseNameType) -> schema.Database: """Get single database""" logger.debug("Received databases_get - name: %s", name) try: - db = core_databases.get_databases(name) + # Validate when looking at a single database + db = utils_databases.get_databases(db_name=name, validate=True) except ValueError as ex: raise HTTPException(status_code=404, detail=f"Database: {str(ex)}.") from ex @@ -62,29 +68,39 @@ async def databases_update( logger.debug("Received databases_update - name: %s; payload: %s", name, payload) try: - db = core_databases.get_databases(name) + db = utils_databases.get_databases(db_name=name, validate=False) except ValueError as ex: raise HTTPException(status_code=404, detail=f"Database: {str(ex)}.") from ex + db.connected = False try: payload.config_dir = db.config_dir payload.wallet_location = db.wallet_location logger.debug("Testing Payload: %s", payload) - db_conn = core_databases.connect(payload) - except core_databases.DbException as ex: - db.connected = False - raise HTTPException(status_code=ex.status_code, detail=f"Database: {name} {ex.detail}.") from ex - + db_conn = utils_databases.connect(payload) + except (ValueError, PermissionError, ConnectionError, LookupError) as ex: + status_code = 500 + if isinstance(ex, ValueError): + status_code = 400 + elif isinstance(ex, PermissionError): + status_code = 401 + elif isinstance(ex, LookupError): + status_code = 404 + elif isinstance(ex, ConnectionError): + status_code = 503 + else: + raise + raise HTTPException(status_code=status_code, detail=f"Database: {db.name} {ex}.") from ex for key, value in payload.model_dump().items(): setattr(db, key, value) + + # Manage Connections; Unset and disconnect other databases db.connected = True db.set_connection(db_conn) - - # Unset and disconnect other databases - database_objects = core_databases.get_databases(validate=False) + database_objects = utils_databases.get_databases() for other_db in database_objects: if other_db.name != name and other_db.connection: - other_db.set_connection(core_databases.disconnect(db.connection)) + other_db.set_connection(utils_databases.disconnect(db.connection)) other_db.connected = False return db diff --git a/src/server/api/v1/embed.py b/src/server/api/v1/embed.py index e3a240ed..ee44ddb3 100644 --- a/src/server/api/v1/embed.py +++ b/src/server/api/v1/embed.py @@ -14,16 +14,13 @@ from pydantic import HttpUrl import requests -import server.api.core.databases as core_databases import server.api.core.oci as core_oci -import server.api.utils.databases as util_databases -import server.api.utils.embed as util_embed -import server.api.utils.models as util_models +import server.api.utils.databases as utils_databases +import server.api.utils.embed as utils_embed +import server.api.utils.models as utils_models -import common.functions as functions -import common.schema as schema -import common.logging_config as logging_config +from common import functions, schema, logging_config logger = logging_config.logging.getLogger("api.v1.embed") @@ -41,10 +38,10 @@ async def embed_drop_vs( """Drop Vector Storage""" logger.debug("Received %s embed_drop_vs: %s", client, vs) try: - client_db = util_databases.get_client_db(client) - db_conn = core_databases.connect(client_db) - util_databases.drop_vs(db_conn, vs) - except core_databases.DbException as ex: + client_db = utils_databases.get_client_database(client) + db_conn = utils_databases.connect(client_db) + utils_databases.drop_vs(db_conn, vs) + except utils_databases.DbException as ex: raise HTTPException(status_code=400, detail=f"Embed: {str(ex)}.") from ex return JSONResponse(status_code=200, content={"message": f"Vector Store: {vs} dropped."}) @@ -59,7 +56,7 @@ async def store_web_file( ) -> Response: """Store contents from a web URL""" logger.debug("Received store_web_file - request: %s", request) - temp_directory = util_embed.get_temp_directory(client, "embedding") + temp_directory = utils_embed.get_temp_directory(client, "embedding") # Save the file temporarily for url in request: @@ -96,7 +93,7 @@ async def store_local_file( ) -> Response: """Store contents from a local file uploaded to streamlit""" logger.debug("Received store_local_file - files: %s", files) - temp_directory = util_embed.get_temp_directory(client, "embedding") + temp_directory = utils_embed.get_temp_directory(client, "embedding") for file in files: filename = temp_directory / file.filename file_content = await file.read() @@ -119,7 +116,7 @@ async def split_embed( """Perform Split and Embed""" logger.debug("Received split_embed - rate_limit: %i; request: %s", rate_limit, request) oci_config = core_oci.get_oci(client=client) - temp_directory = util_embed.get_temp_directory(client, "embedding") + temp_directory = utils_embed.get_temp_directory(client, "embedding") try: files = [f for f in temp_directory.iterdir() if f.is_file()] @@ -135,7 +132,7 @@ async def split_embed( detail=f"Embed: Client {client} no files found in folder.", ) try: - split_docos, _ = util_embed.load_and_split_documents( + split_docos, _ = utils_embed.load_and_split_documents( files, request.model, request.chunk_size, @@ -144,14 +141,14 @@ async def split_embed( output_dir=None, ) - embed_client = util_models.get_client({"model": request.model, "enabled": True}, oci_config) + embed_client = utils_models.get_client_embed({"model": request.model, "enabled": True}, oci_config) # Calculate and set the vector_store name using get_vs_table request.vector_store, _ = functions.get_vs_table(**request.model_dump(exclude={"database", "vector_store"})) - util_embed.populate_vs( + utils_embed.populate_vs( vector_store=request, - db_details=util_databases.get_client_db(client), + db_details=utils_databases.get_client_database(client), embed_client=embed_client, input_data=split_docos, rate_limit=rate_limit, diff --git a/src/server/api/v1/models.py b/src/server/api/v1/models.py index fa6fd77f..cd660ef1 100644 --- a/src/server/api/v1/models.py +++ b/src/server/api/v1/models.py @@ -9,10 +9,9 @@ from fastapi.responses import JSONResponse import server.api.core.models as core_models -import server.api.utils.models as util_models +import server.api.utils.models as utils_models -import common.schema as schema -import common.logging_config as logging_config +from common import schema, logging_config logger = logging_config.logging.getLogger("endpoints.v1.models") @@ -47,18 +46,19 @@ async def models_list( @auth.get( - "/{model_id:path}", - description="Get a single model", + "/{model_provider}/{model_id:path}", + description="Get a single model (provider/name)", response_model=schema.Model, ) async def models_get( + model_provider: schema.ModelProviderType, model_id: schema.ModelIdType, ) -> schema.Model: """List a specific model""" - logger.debug("Received models_get - model_id: %s", model_id) + logger.debug("Received models_get - model: %s/%s", model_provider, model_id) try: - models_ret = core_models.get_model(model_id=model_id) + models_ret = core_models.get_model(model_provider=model_provider, model_id=model_id) except core_models.UnknownModelError as ex: raise HTTPException(status_code=404, detail=str(ex)) from ex @@ -66,18 +66,15 @@ async def models_get( @auth.patch( - "/{model_id:path}", + "/{model_provider}/{model_id:path}", description="Update a model", response_model=schema.Model, ) -async def models_update( - model_id: schema.ModelIdType, - payload: schema.Model, -) -> schema.Model: +async def models_update(payload: schema.Model) -> schema.Model: """Update a model""" - logger.debug("Received models_update - model_id: %s; payload: %s", model_id, payload) + logger.debug("Received models_update - payload: %s", payload) try: - return util_models.update_model(model_id=model_id, payload=payload) + return utils_models.update(payload=payload) except core_models.UnknownModelError as ex: raise HTTPException(status_code=404, detail=str(ex)) from ex except core_models.URLUnreachableError as ex: @@ -98,13 +95,14 @@ async def models_create( @auth.delete( - "/{model_id:path}", + "/{model_provider}/{model_id:path}", description="Delete a model", ) async def models_delete( + model_provider: schema.ModelProviderType, model_id: schema.ModelIdType, ) -> JSONResponse: """Delete a model""" - logger.debug("Received models_delete - model_id: %s", model_id) - core_models.delete_model(model_id) - return JSONResponse(status_code=200, content={"message": f"Model: {model_id} deleted."}) + logger.debug("Received models_delete - model: %s/%s", model_provider, model_id) + core_models.delete_model(model_provider=model_provider, model_id=model_id) + return JSONResponse(status_code=200, content={"message": f"Model: {model_provider}/{model_id} deleted."}) diff --git a/src/server/api/v1/oci.py b/src/server/api/v1/oci.py index 41b7e706..3151877e 100644 --- a/src/server/api/v1/oci.py +++ b/src/server/api/v1/oci.py @@ -8,15 +8,22 @@ from fastapi.responses import JSONResponse import server.api.core.oci as core_oci -import server.api.utils.embed as util_embed -import server.api.utils.oci as util_oci -import server.api.utils.models as util_models +import server.api.utils.embed as utils_embed +import server.api.utils.oci as utils_oci +import server.api.utils.models as utils_models -import common.schema as schema -import common.logging_config as logging_config +from common import schema, logging_config logger = logging_config.logging.getLogger("endpoints.v1.oci") +# Validate the DEFAULT OCI Profile and get models +try: + default_config = core_oci.get_oci(auth_profile="DEFAULT") + _ = utils_oci.get_namespace(config=default_config) + _ = utils_models.create_genai(config=default_config) +except utils_oci.OciException: + pass + auth = APIRouter() @@ -62,9 +69,9 @@ async def oci_list_regions( logger.debug("Received oci_list_regions - auth_profile: %s", auth_profile) try: oci_config = await oci_get(auth_profile=auth_profile) - regions = util_oci.get_regions(oci_config) + regions = utils_oci.get_regions(oci_config) return regions - except core_oci.OciException as ex: + except utils_oci.OciException as ex: raise HTTPException(status_code=ex.status_code, detail=f"OCI: {ex.detail}.") from ex @@ -77,12 +84,12 @@ async def oci_list_genai( auth_profile: schema.OCIProfileType, ) -> list: """Return a list of compartments""" - logger.debug("Received oci_list_regions - auth_profile: %s", auth_profile) + logger.debug("Received oci_list_genai - auth_profile: %s", auth_profile) try: oci_config = await oci_get(auth_profile=auth_profile) - all_models = util_oci.get_genai_models(oci_config, regional=False) + all_models = utils_oci.get_genai_models(oci_config, regional=False) return all_models - except core_oci.OciException as ex: + except utils_oci.OciException as ex: raise HTTPException(status_code=ex.status_code, detail=f"OCI: {ex.detail}.") from ex @@ -98,9 +105,9 @@ async def oci_list_compartments( logger.debug("Received oci_list_compartments - auth_profile: %s", auth_profile) try: oci_config = await oci_get(auth_profile=auth_profile) - compartments = util_oci.get_compartments(oci_config) + compartments = utils_oci.get_compartments(oci_config) return compartments - except core_oci.OciException as ex: + except utils_oci.OciException as ex: raise HTTPException(status_code=ex.status_code, detail=f"OCI: {ex.detail}.") from ex @@ -118,9 +125,9 @@ async def oci_list_buckets( try: compartment_obj = schema.OracleResource(ocid=compartment_ocid) oci_config = await oci_get(auth_profile=auth_profile) - buckets = util_oci.get_buckets(compartment_obj.ocid, oci_config) + buckets = utils_oci.get_buckets(compartment_obj.ocid, oci_config) return buckets - except core_oci.OciException as ex: + except utils_oci.OciException as ex: raise HTTPException(status_code=ex.status_code, detail=f"OCI: {ex.detail}.") from ex @@ -137,9 +144,9 @@ async def oci_list_bucket_objects( logger.debug("Received oci_list_bucket_objects - auth_profile: %s; bucket_name: %s", auth_profile, bucket_name) try: oci_config = await oci_get(auth_profile=auth_profile) - objects = util_oci.get_bucket_objects(bucket_name, oci_config) + objects = utils_oci.get_bucket_objects(bucket_name, oci_config) return objects - except core_oci.OciException as ex: + except utils_oci.OciException as ex: raise HTTPException(status_code=ex.status_code, detail=f"OCI: {ex.detail}.") from ex @@ -158,12 +165,12 @@ async def oci_profile_update( oci_config = await oci_get(auth_profile=auth_profile) try: - namespace = util_oci.get_namespace(payload) + namespace = utils_oci.get_namespace(payload) oci_config.namespace = namespace for key, value in payload.model_dump().items(): if value not in ("", None): setattr(oci_config, key, value) - except core_oci.OciException as ex: + except utils_oci.OciException as ex: oci_config.namespace = None raise HTTPException(status_code=ex.status_code, detail=f"OCI: {ex.detail}.") from ex except AttributeError as ex: @@ -192,9 +199,9 @@ async def oci_download_objects( ) oci_config = await oci_get(auth_profile=auth_profile) # Files should be placed in the embedding folder - temp_directory = util_embed.get_temp_directory(client, "embedding") + temp_directory = utils_embed.get_temp_directory(client, "embedding") for object_name in request: - util_oci.get_object(temp_directory, object_name, bucket_name, oci_config) + utils_oci.get_object(temp_directory, object_name, bucket_name, oci_config) downloaded_files = [f.name for f in temp_directory.iterdir() if f.is_file()] return JSONResponse(status_code=200, content=downloaded_files) @@ -212,7 +219,7 @@ async def oci_create_genai_models( logger.debug("Received oci_create_genai_models - auth_profile: %s", auth_profile) try: oci_config = await oci_get(auth_profile=auth_profile) - enabled_models = util_models.create_genai_models(oci_config) + enabled_models = utils_models.create_genai(oci_config) return enabled_models - except core_oci.OciException as ex: + except utils_oci.OciException as ex: raise HTTPException(status_code=ex.status_code, detail=f"OCI: {ex.detail}.") from ex diff --git a/src/server/api/v1/probes.py b/src/server/api/v1/probes.py index 4048328d..34a986c6 100644 --- a/src/server/api/v1/probes.py +++ b/src/server/api/v1/probes.py @@ -33,12 +33,12 @@ def mcp_healthz(mcp_engine: FastMCP = Depends(get_mcp)): """Check if MCP server is ready.""" if mcp_engine is None: return {"status": "not ready"} - else: - server = mcp_engine.__dict__["_mcp_server"].__dict__ - return { - "status": "ready", - "name": server["name"], - "version": server["version"], - "available_tools": len(getattr(mcp_engine, "available_tools", [])) if mcp_engine else 0, - "timestamp": datetime.now().isoformat(), - } + + server = mcp_engine.__dict__["_mcp_server"].__dict__ + return { + "status": "ready", + "name": server["name"], + "version": server["version"], + "available_tools": len(getattr(mcp_engine, "available_tools", [])) if mcp_engine else 0, + "timestamp": datetime.now().isoformat(), + } diff --git a/src/server/api/v1/prompts.py b/src/server/api/v1/prompts.py index 4ffeecd1..c6362fb7 100644 --- a/src/server/api/v1/prompts.py +++ b/src/server/api/v1/prompts.py @@ -9,8 +9,7 @@ import server.api.core.prompts as core_prompts -import common.schema as schema -import common.logging_config as logging_config +from common import schema, logging_config logger = logging_config.logging.getLogger("endpoints.v1.prompts") diff --git a/src/server/api/v1/selectai.py b/src/server/api/v1/selectai.py index fe3c06d6..f712ffe9 100644 --- a/src/server/api/v1/selectai.py +++ b/src/server/api/v1/selectai.py @@ -9,11 +9,10 @@ from fastapi import APIRouter, Header import server.api.core.settings as core_settings -import server.api.utils.databases as util_databases -import server.api.utils.selectai as util_selectai +import server.api.utils.databases as utils_databases +import server.api.utils.selectai as utils_selectai -import common.schema as schema -import common.logging_config as logging_config +from common import schema, logging_config logger = logging_config.logging.getLogger("endpoints.v1.selectai") @@ -30,8 +29,8 @@ async def selectai_get_objects( ) -> list[schema.DatabaseSelectAIObjects]: """Get DatabaseSelectAIObjects""" client_settings = core_settings.get_client_settings(client) - db_conn = util_databases.get_client_db(client).connection - select_ai_objects = util_selectai.get_objects(db_conn, client_settings.selectai.profile) + database = utils_databases.get_client_database(client=client, validate=False) + select_ai_objects = utils_selectai.get_objects(database.connection, client_settings.selectai.profile) return select_ai_objects @@ -48,6 +47,6 @@ async def selectai_update_objects( logger.debug("Received selectai_update - payload: %s", payload) client_settings = core_settings.get_client_settings(client) object_list = json.dumps([obj.model_dump(include={"owner", "name"}) for obj in payload]) - db_conn = util_databases.get_client_db(client).connection - util_selectai.set_profile(db_conn, client_settings.selectai.profile, "object_list", object_list) - return util_selectai.get_objects(db_conn, client_settings.selectai.profile) + db_conn = utils_databases.get_client_database(client).connection + utils_selectai.set_profile(db_conn, client_settings.selectai.profile, "object_list", object_list) + return utils_selectai.get_objects(db_conn, client_settings.selectai.profile) diff --git a/src/server/api/v1/settings.py b/src/server/api/v1/settings.py index 60303099..512303f4 100644 --- a/src/server/api/v1/settings.py +++ b/src/server/api/v1/settings.py @@ -11,8 +11,7 @@ import server.api.core.settings as core_settings -import common.schema as schema -import common.logging_config as logging_config +from common import logging_config, schema logger = logging_config.logging.getLogger("endpoints.v1.settings") diff --git a/src/server/api/v1/testbed.py b/src/server/api/v1/testbed.py index 0c8dcba6..9d28bfbf 100644 --- a/src/server/api/v1/testbed.py +++ b/src/server/api/v1/testbed.py @@ -12,23 +12,22 @@ import json from typing import Optional from giskard.rag import evaluate, QATestset +from giskard.llm import set_llm_model from fastapi import APIRouter, HTTPException, Header, UploadFile from fastapi.responses import JSONResponse import litellm from langchain_core.messages import ChatMessage -import server.api.core.models as core_models import server.api.core.settings as core_settings import server.api.core.oci as core_oci -import server.api.utils.embed as util_embed -import server.api.utils.testbed as util_testbed -import server.api.utils.databases as util_databases -import server.api.utils.models as util_models +import server.api.utils.embed as utils_embed +import server.api.utils.testbed as utils_testbed +import server.api.utils.databases as utils_databases +import server.api.utils.models as utils_models from server.api.v1 import chat -import common.schema as schema -import common.logging_config as logging_config +from common import logging_config, schema logger = logging_config.logging.getLogger("endpoints.v1.testbed") @@ -44,7 +43,7 @@ async def testbed_testsets( client: schema.ClientIdType = Header(default="server"), ) -> list[schema.TestSets]: """Get a list of stored TestSets, create TestSet objects if they don't exist""" - testsets = util_testbed.get_testsets(db_conn=util_databases.get_client_db(client).connection) + testsets = utils_testbed.get_testsets(db_conn=utils_databases.get_client_database(client).connection) return testsets @@ -58,8 +57,8 @@ async def testbed_evaluations( client: schema.ClientIdType = Header(default="server"), ) -> list[schema.Evaluation]: """Get Evaluations""" - evaluations = util_testbed.get_evaluations( - db_conn=util_databases.get_client_db(client).connection, tid=tid.upper() + evaluations = utils_testbed.get_evaluations( + db_conn=utils_databases.get_client_database(client).connection, tid=tid.upper() ) return evaluations @@ -74,7 +73,9 @@ async def testbed_evaluation( client: schema.ClientIdType = Header(default="server"), ) -> schema.EvaluationReport: """Get Evaluations""" - evaluation = util_testbed.process_report(db_conn=util_databases.get_client_db(client).connection, eid=eid.upper()) + evaluation = utils_testbed.process_report( + db_conn=utils_databases.get_client_database(client).connection, eid=eid.upper() + ) return evaluation @@ -88,7 +89,9 @@ async def testbed_testset_qa( client: schema.ClientIdType = Header(default="server"), ) -> schema.TestSetQA: """Get TestSet Q&A""" - return util_testbed.get_testset_qa(db_conn=util_databases.get_client_db(client).connection, tid=tid.upper()) + return utils_testbed.get_testset_qa( + db_conn=utils_databases.get_client_database(client).connection, tid=tid.upper() + ) @auth.delete( @@ -100,7 +103,7 @@ async def testbed_delete_testset( client: schema.ClientIdType = Header(default="server"), ) -> JSONResponse: """Delete TestSet""" - util_testbed.delete_qa(util_databases.get_client_db(client).connection, tid.upper()) + utils_testbed.delete_qa(utils_databases.get_client_database(client).connection, tid.upper()) return JSONResponse(status_code=200, content={"message": f"TestSet: {tid} deleted."}) @@ -117,12 +120,12 @@ async def testbed_upsert_testsets( ) -> schema.TestSetQA: """Update stored TestSet data""" created = datetime.now().isoformat() - db_conn = util_databases.get_client_db(client).connection + db_conn = utils_databases.get_client_database(client).connection try: for file in files: file_content = await file.read() - content = util_testbed.jsonl_to_json_content(file_content) - db_id = util_testbed.upsert_qa(db_conn, name, created, content, tid) + content = utils_testbed.jsonl_to_json_content(file_content) + db_id = utils_testbed.upsert_qa(db_conn, name, created, content, tid) db_conn.commit() except Exception as ex: logger.error("An exception occurred: %s", ex) @@ -140,16 +143,19 @@ async def testbed_upsert_testsets( async def testbed_generate_qa( files: list[UploadFile], name: schema.TestSetsNameType, - ll_model: schema.ModelIdType = None, - embed_model: schema.ModelIdType = None, + ll_model: str, + embed_model: str, questions: int = 2, client: schema.ClientIdType = Header(default="server"), ) -> schema.TestSetQA: """Retrieve contents from a local file uploaded and generate Q&A""" - # Setup Models - giskard_ll_model = core_models.get_model(model_id=ll_model, model_type="ll") - giskard_embed_model = core_models.get_model(model_id=embed_model, model_type="embed") - temp_directory = util_embed.get_temp_directory(client, "testbed") + # Get the Model Configuration + try: + oci_config = core_oci.get_oci(client) + except ValueError as ex: + raise HTTPException(status_code=400, detail=str(ex)) from ex + + temp_directory = utils_embed.get_temp_directory(client, "testbed") full_testsets = temp_directory / "all_testsets.jsonl" for file in files: @@ -162,8 +168,8 @@ async def testbed_generate_qa( file.write(file_content) # Process file for knowledge base - text_nodes = util_testbed.load_and_split(filename) - test_set = util_testbed.build_knowledge_base(text_nodes, questions, giskard_ll_model, giskard_embed_model) + text_nodes = utils_testbed.load_and_split(filename) + test_set = utils_testbed.build_knowledge_base(text_nodes, questions, ll_model, embed_model, oci_config) # Save test set test_set_filename = temp_directory / f"{name}.jsonl" test_set.save(test_set_filename) @@ -195,9 +201,9 @@ async def testbed_generate_qa( description="Evaluate Q&A Test Set.", response_model=schema.EvaluationReport, ) -def testbed_evaluate_qa( +def testbed_evaluate( tid: schema.TestSetsIdType, - judge: schema.ModelIdType, + judge: str, client: schema.ClientIdType = Header(default="server"), ) -> schema.EvaluationReport: """Run evaluate against a testset""" @@ -208,7 +214,7 @@ def get_answer(question: str): messages=[ChatMessage(role="human", content=question)], ) ai_response = asyncio.run(chat.chat_post(client=client, request=request)) - return ai_response.choices[0].message.content + return ai_response["choices"][0]["message"]["content"] evaluated = datetime.now().isoformat() client_settings = core_settings.get_client_settings(client) @@ -217,10 +223,10 @@ def get_answer(question: str): # Change Grade vector_search client_settings.vector_search.grading = False - db_conn = util_databases.get_client_db(client).connection - testset = util_testbed.get_testset_qa(db_conn=db_conn, tid=tid.upper()) + db_conn = utils_databases.get_client_database(client).connection + testset = utils_testbed.get_testset_qa(db_conn=db_conn, tid=tid.upper()) qa_test = "\n".join(json.dumps(item) for item in testset.qa_data) - temp_directory = util_embed.get_temp_directory(client, "testbed") + temp_directory = utils_embed.get_temp_directory(client, "testbed") with open(temp_directory / f"{tid}_output.txt", "w", encoding="utf-8") as file: file.write(qa_test) @@ -229,18 +235,18 @@ def get_answer(question: str): # Setup Judge Model logger.debug("Starting evaluation with Judge: %s", judge) oci_config = core_oci.get_oci(client) - judge_client = util_models.get_client({"model": judge}, oci_config, True) - try: - # report = evaluate(get_answer, testset=loaded_testset, llm_client=judge_client, metrics=[correctness_metric]) #CDB - report = evaluate(get_answer, testset=loaded_testset, llm_client=judge_client, metrics=None) # CDB + judge_config = utils_models.get_litellm_config(model_config={"model": judge}, oci_config=oci_config, giskard=True) + set_llm_model(llm_model=judge, **judge_config) + try: + report = evaluate(get_answer, testset=loaded_testset, metrics=None) except KeyError as ex: if str(ex) == "'correctness'": raise HTTPException(status_code=500, detail="Unable to determine the correctness; please retry.") from ex logger.debug("Ending evaluation with Judge: %s", judge) - eid = util_testbed.insert_evaluation( + eid = utils_testbed.insert_evaluation( db_conn=db_conn, tid=tid, evaluated=evaluated, @@ -251,4 +257,4 @@ def get_answer(question: str): db_conn.commit() shutil.rmtree(temp_directory) - return util_testbed.process_report(db_conn=db_conn, eid=eid) + return utils_testbed.process_report(db_conn=db_conn, eid=eid) diff --git a/src/server/bootstrap/configfile.py b/src/server/bootstrap/configfile.py index 5509d222..2dc3dbcb 100644 --- a/src/server/bootstrap/configfile.py +++ b/src/server/bootstrap/configfile.py @@ -9,7 +9,7 @@ from threading import Lock from common.schema import Configuration -import common.logging_config as logging_config +from common import logging_config logger = logging_config.logging.getLogger("bootstrap.configfile") diff --git a/src/server/bootstrap/databases.py b/src/server/bootstrap/databases.py index a3146abc..cb2b7b1d 100644 --- a/src/server/bootstrap/databases.py +++ b/src/server/bootstrap/databases.py @@ -7,9 +7,8 @@ import os from server.bootstrap.configfile import ConfigStore -import server.api.core.databases as core_databases from common.schema import Database -import common.logging_config as logging_config +from common import logging_config logger = logging_config.logging.getLogger("bootstrap.databases") @@ -28,7 +27,7 @@ def main() -> list[Database]: raise ValueError(f"Duplicate database name found in config: '{db.name}'") seen.add(db_name_lower) - db_objects = [] + database_objects = [] default_found = False for db in db_configs: @@ -46,9 +45,9 @@ def main() -> list[Database]: if updated.wallet_password: updated.wallet_location = updated.config_dir logger.info("Setting WALLET_LOCATION: %s", updated.config_dir) - db_objects.append(updated) + database_objects.append(updated) else: - db_objects.append(db) + database_objects.append(db) # If DEFAULT wasn't in config, create it from env vars if not default_found: @@ -63,26 +62,7 @@ def main() -> list[Database]: if data["wallet_password"]: data["wallet_location"] = data["config_dir"] logger.info("Setting WALLET_LOCATION: %s", data["config_dir"]) - db_objects.append(Database(**data)) - - # Validate Configuration and set vector_stores/status - database_objects = [] - for db in db_objects: - database_objects.append(db) - try: - conn = core_databases.connect(db) - db.connected = True - except core_databases.DbException: - db.connected = False - continue - db.vector_stores = core_databases.get_vs(conn) - db.selectai = core_databases.selectai_enabled(conn) - if db.selectai: - db.selectai_profiles = core_databases.get_selectai_profiles(conn) - if not db.connection and len(database_objects) > 1: - db.set_connection = core_databases.disconnect(conn) - else: - db.set_connection(conn) + database_objects.append(Database(**data)) logger.debug("Bootstrapped Databases: %s", database_objects) logger.debug("*** Bootstrapping Database - End") diff --git a/src/server/bootstrap/models.py b/src/server/bootstrap/models.py index e135b66e..d6e845f2 100644 --- a/src/server/bootstrap/models.py +++ b/src/server/bootstrap/models.py @@ -6,14 +6,14 @@ added via the APIs """ # spell-checker:ignore configfile genai ollama pplx docos mxbai nomic thenlper -# spell-checker:ignore huggingface +# spell-checker:ignore huggingface vllm import os from server.bootstrap.configfile import ConfigStore from common.schema import Model from common.functions import is_url_accessible -import common.logging_config as logging_config +from common import logging_config logger = logging_config.logging.getLogger("bootstrap.models") @@ -22,6 +22,20 @@ def main() -> list[Model]: """Define example Model Support""" logger.debug("*** Bootstrapping Models - Start") + def update_env_var(model: Model, provider: str, model_key: str, env_var: str): + if model.get("provider") != provider: + return + + new_value = os.environ.get(env_var) + if not new_value: + return + + old_value = model.get(model_key) + if old_value != new_value: + logger.debug("Overriding '%s' for model '%s' with %s environment variable", model_key, model.id, env_var) + model[model_key] = new_value + logger.debug("Model '%s' updated via environment variable overrides.", model.id) + models_list = [ { "id": "command-r", @@ -29,8 +43,7 @@ def main() -> list[Model]: "type": "ll", "provider": "cohere", "api_key": os.environ.get("COHERE_API_KEY", default=""), - "openai_compat": False, - "url": "https://api.cohere.ai", + "api_base": "https://api.cohere.ai/compatibility/v1", "context_length": 127072, "temperature": 0.3, "max_completion_tokens": 4096, @@ -42,8 +55,7 @@ def main() -> list[Model]: "type": "ll", "provider": "openai", "api_key": os.environ.get("OPENAI_API_KEY", default=""), - "openai_compat": True, - "url": "https://api.openai.com/v1", + "api_base": "https://api.openai.com/v1", "context_length": 127072, "temperature": 1.0, "max_completion_tokens": 4096, @@ -55,8 +67,7 @@ def main() -> list[Model]: "type": "ll", "provider": "perplexity", "api_key": os.environ.get("PPLX_API_KEY", default=""), - "openai_compat": True, - "url": "https://api.perplexity.ai", + "api_base": "https://api.perplexity.ai", "context_length": 127072, "temperature": 0.2, "max_completion_tokens": 28000, @@ -66,10 +77,9 @@ def main() -> list[Model]: "id": "phi-4", "enabled": False, "type": "ll", - "provider": "openai_compatible", + "provider": "huggingface", "api_key": "", - "openai_compat": True, - "url": "http://localhost:1234/v1", + "api_base": "http://localhost:1234/v1", "context_length": 131072, "temperature": 1.0, "max_completion_tokens": 4096, @@ -81,8 +91,19 @@ def main() -> list[Model]: "type": "ll", "provider": "ollama", "api_key": "", - "openai_compat": True, - "url": os.environ.get("ON_PREM_OLLAMA_URL", default="http://127.0.0.1:11434"), + "api_base": os.environ.get("ON_PREM_OLLAMA_URL", default="http://127.0.0.1:11434"), + "context_length": 131072, + "temperature": 1.0, + "max_completion_tokens": 2048, + "frequency_penalty": 0.0, + }, + { + "id": "meta-llama/Llama-3.2-1B-Instruct", + "enabled": os.getenv("ON_PREM_VLLM_URL") is not None, + "type": "ll", + "provider": "hosted_vllm", + "api_key": "", + "api_base": os.environ.get("ON_PREM_VLLM_URL", default="http://localhost:8000/v1"), "context_length": 131072, "temperature": 1.0, "max_completion_tokens": 2048, @@ -95,8 +116,7 @@ def main() -> list[Model]: "type": "ll", "provider": "ollama", "api_key": "", - "openai_compat": True, - "url": os.environ.get("ON_PREM_OLLAMA_URL", default="http://127.0.0.1:11434"), + "api_base": os.environ.get("ON_PREM_OLLAMA_URL", default="http://127.0.0.1:11434"), "context_length": 131072, "temperature": 1.0, "max_completion_tokens": 2048, @@ -107,19 +127,17 @@ def main() -> list[Model]: "enabled": os.getenv("ON_PREM_HF_URL") is not None, "type": "embed", "provider": "huggingface", - "url": os.environ.get("ON_PREM_HF_URL", default="http://127.0.0.1:8080"), + "api_base": os.environ.get("ON_PREM_HF_URL", default="http://127.0.0.1:8080"), "api_key": "", - "openai_compat": True, "max_chunk_size": 512, }, { "id": "text-embedding-3-small", "enabled": os.getenv("OPENAI_API_KEY") is not None, "type": "embed", - "provider": "openai_compatible", - "url": "https://api.openai.com/v1", + "provider": "openai", + "api_base": "https://api.openai.com/v1", "api_key": os.environ.get("OPENAI_API_KEY", default=""), - "openai_compat": True, "max_chunk_size": 8191, }, { @@ -127,19 +145,17 @@ def main() -> list[Model]: "enabled": os.getenv("COHERE_API_KEY") is not None, "type": "embed", "provider": "cohere", - "url": "https://api.cohere.ai", + "api_base": "https://api.cohere.ai/compatibility/v1", "api_key": os.environ.get("COHERE_API_KEY", default=""), - "openai_compat": False, "max_chunk_size": 512, }, { - "id": "text-embedding-nomic-embed-text-v1.5", + "id": "nomic-ai/nomic-embed-text-v1", "enabled": False, "type": "embed", - "provider": "openai_compatible", - "url": "http://localhost:1234/v1", + "provider": "hosted_vllm", + "api_base": "http://localhost:8001/v1", "api_key": "", - "openai_compat": True, "max_chunk_size": 8192, }, { @@ -148,9 +164,8 @@ def main() -> list[Model]: "enabled": os.getenv("ON_PREM_OLLAMA_URL") is not None, "type": "embed", "provider": "ollama", - "url": os.environ.get("ON_PREM_OLLAMA_URL", default="http://127.0.0.1:11434"), + "api_base": os.environ.get("ON_PREM_OLLAMA_URL", default="http://127.0.0.1:11434"), "api_key": "", - "openai_compat": True, "max_chunk_size": 8192, }, ] @@ -158,16 +173,19 @@ def main() -> list[Model]: # Check for duplicates unique_entries = set() for model in models_list: - if model["id"] in unique_entries: - raise ValueError(f"Model '{model['id']}' already exists.") - unique_entries.add(model["id"]) + key = (model["provider"], model["id"]) + if key in unique_entries: + raise ValueError(f"Model '{model['provider']}/{model['id']}' already exists.") + unique_entries.add(key) # Merge with configuration if available configuration = ConfigStore.get() if configuration and configuration.model_configs: logger.debug("Merging model configs from ConfigStore") - config_model_map = {m.id: m.model_dump() for m in configuration.model_configs} - existing = {m["id"]: m for m in models_list} + + # Use (provider, id) tuple as key + config_model_map = {(m.provider, m.id): m.model_dump() for m in configuration.model_configs} + existing = {(m["provider"], m["id"]): m for m in models_list} def values_differ(a, b): if isinstance(a, bool) or isinstance(b, bool): @@ -178,80 +196,41 @@ def values_differ(a, b): return a.strip() != b.strip() return a != b - for model_id, override in config_model_map.items(): - if model_id in existing: + for key, override in config_model_map.items(): + if key in existing: for k, v in override.items(): - if k not in existing[model_id]: + if k not in existing[key]: continue - if values_differ(existing[model_id][k], v): + if values_differ(existing[key][k], v): log_func = logger.debug if k == "api_key" else logger.info log_func( - "Overriding field '%s' for model '%s' (was: %r → now: %r)", + "Overriding field '%s' for model '%s/%s' (was: %r → now: %r)", k, - model_id, - existing[model_id][k], + key[0], # provider + key[1], # id + existing[key][k], v, ) - existing[model_id][k] = v + existing[key][k] = v else: - logger.info("Adding new model from ConfigStore: %s", model_id) - existing[model_id] = override + logger.info("Adding new model from ConfigStore: %s/%s", key[0], key[1]) + existing[key] = override models_list = list(existing.values()) # Override with OS env vars (by API type) for model in models_list: - provider = model.get("provider", "") - model_id = model.get("id", "") - overridden = False - - if provider == "cohere" and os.getenv("COHERE_API_KEY"): - old_api_key = model.get("api_key", "") - new_api_key = os.environ["COHERE_API_KEY"] - if old_api_key != new_api_key: - # Exposes key if in DEBUG - logger.debug("Overriding 'api_key' for model '%s' with COHERE_API_KEY environment variable", model_id) - model["api_key"] = new_api_key - overridden = True - model["enabled"] = True - - elif provider == "oci" and os.getenv("OCI_GENAI_SERVICE_ENDPOINT"): - old_url = model.get("url", "") - new_url = os.environ["OCI_GENAI_SERVICE_ENDPOINT"] - if old_url != new_url: - logger.info( - "Overriding 'url' for model '%s' with OCI_GENAI_SERVICE_ENDPOINT environment variable", model_id - ) - model["url"] = new_url - overridden = True - model["enabled"] = True - - elif provider == "ollama" and os.getenv("ON_PREM_OLLAMA_URL"): - old_url = model.get("url", "") - new_url = os.environ["ON_PREM_OLLAMA_URL"] - if old_url != new_url: - logger.info("Overriding 'url' for model '%s' with ON_PREM_OLLAMA_URL environment variable", model_id) - model["url"] = new_url - overridden = True - model["enabled"] = True - - elif provider == "huggingface" and os.getenv("ON_PREM_HF_URL"): - old_url = model.get("url", "") - new_url = os.environ["ON_PREM_HF_URL"] - if old_url != new_url: - logger.info("Overriding 'url' for model '%s' with ON_PREM_HF_URL environment variable", model_id) - model["url"] = new_url - overridden = True - model["enabled"] = True - - if overridden: - logger.debug("Model '%s' updated via environment variable overrides.", model_id) + update_env_var(model, "cohere", "api_key", "COHERE_API_KEY") + update_env_var(model, "oci", "api_base", "OCI_GENAI_SERVICE_ENDPOINT") + update_env_var(model, "ollama", "api_base", "ON_PREM_OLLAMA_URL") + update_env_var(model, "huggingface", "api_base", "ON_PREM_HF_URL") + update_env_var(model, "meta-llama", "api_base", "ON_PREM_VLLM_URL") # Check URL accessible for enabled models and disable if not: url_access_cache = {} for model in models_list: - url = model["url"] + url = model["api_base"] if model["enabled"]: if url not in url_access_cache: logger.debug("Testing %s URL: %s", model["id"], url) diff --git a/src/server/bootstrap/oci.py b/src/server/bootstrap/oci.py index 2e5dfc3d..c40c297f 100644 --- a/src/server/bootstrap/oci.py +++ b/src/server/bootstrap/oci.py @@ -9,10 +9,8 @@ import oci from server.bootstrap.configfile import ConfigStore -import server.api.utils.oci as util_oci -import server.api.utils.models as util_models -import common.logging_config as logging_config +from common import logging_config from common.schema import OracleCloudSettings logger = logging_config.logging.getLogger("bootstrap.oci") @@ -110,21 +108,6 @@ def override(profile: dict, key: str, env_key: str, env: dict, overrides: dict, oci_config = OracleCloudSettings(**profile_data) oci_objects.append(oci_config) - if oci_config.auth_profile == oci.config.DEFAULT_PROFILE: - try: - oci_config.namespace = util_oci.get_namespace(oci_config) - except Exception: - logger.warning("Failed to get namespace for DEFAULT OCI profile") - continue - - # Attempt to load OCI GenAI Models after OCI and MODELs are Bootstrapped - try: - oci_config = [o for o in oci_objects if o.auth_profile == "DEFAULT"] - if oci_config: - util_models.create_genai_models(oci_config[0]) - except Exception as ex: - logger.info("Unable to bootstrap OCI GenAI Models: %s", str(ex)) - logger.debug("*** Bootstrapping OCI - End") return oci_objects diff --git a/src/server/bootstrap/prompts.py b/src/server/bootstrap/prompts.py index 03fec05d..2c27d799 100644 --- a/src/server/bootstrap/prompts.py +++ b/src/server/bootstrap/prompts.py @@ -8,7 +8,7 @@ from server.bootstrap.configfile import ConfigStore from common.schema import Prompt -import common.logging_config as logging_config +from common import logging_config logger = logging_config.logging.getLogger("bootstrap.prompts") diff --git a/src/server/bootstrap/settings.py b/src/server/bootstrap/settings.py index 50e219be..dc5d8bb8 100644 --- a/src/server/bootstrap/settings.py +++ b/src/server/bootstrap/settings.py @@ -7,7 +7,7 @@ from server.bootstrap.configfile import ConfigStore from common.schema import Settings -import common.logging_config as logging_config +from common import logging_config logger = logging_config.logging.getLogger("bootstrap.settings") diff --git a/src/server/patches/litellm_patch.py b/src/server/patches/litellm_patch.py index 89251046..de15f0fa 100644 --- a/src/server/patches/litellm_patch.py +++ b/src/server/patches/litellm_patch.py @@ -13,7 +13,7 @@ from litellm.types.utils import ModelResponse from httpx._models import Response -import common.logging_config as logging_config +from common import logging_config logger = logging_config.logging.getLogger("patches.litellm_patch") @@ -40,7 +40,8 @@ def custom_transform_response( api_key: Optional[str] = None, json_mode: Optional[bool] = None, ): - """Custom transform response from + """ + Custom transform response from .venv/lib/python3.11/site-packages/litellm/llms/ollama/completion/transformation.py """ logger.info("Custom transform_response is running") diff --git a/src/server/wip/settings.py b/src/server/wip/settings.py index 315916a0..67691b51 100644 --- a/src/server/wip/settings.py +++ b/src/server/wip/settings.py @@ -9,7 +9,7 @@ from oracledb import Connection import server.api.utils.databases as databases from common.schema import ClientIdType -import common.logging_config as logging_config +from common import logging_config logger = logging_config.logging.getLogger("server.api.utils.settings") From e71d91318bddf520bde8e51335049a9ec4bf71dc Mon Sep 17 00:00:00 2001 From: gotsysdba Date: Wed, 3 Sep 2025 09:54:23 +0100 Subject: [PATCH 27/28] Small fix to database configs --- src/client/content/config/tabs/databases.py | 13 +++++++------ src/client/content/tools/tabs/split_embed.py | 2 +- src/launch_server.py | 4 ++-- src/server/api/utils/databases.py | 2 +- src/server/mcp/__init__.py | 4 ++-- 5 files changed, 13 insertions(+), 12 deletions(-) diff --git a/src/client/content/config/tabs/databases.py b/src/client/content/config/tabs/databases.py index 6d393b0a..764746a3 100644 --- a/src/client/content/config/tabs/databases.py +++ b/src/client/content/config/tabs/databases.py @@ -22,16 +22,17 @@ ##################################################### # Functions ##################################################### -def get_databases(validate: bool = False, force: bool = False) -> None: +def get_databases(force: bool = False) -> None: """Get Databases from API Server""" if force or "database_configs" not in state or not state.database_configs: try: - logger.info("Refreshing state.database_configs") # Validation will be done on currently configured client database # validation includes new vector_stores, etc. - if validate: - client_database = state.client_settings.get("database", {}).get("alias", {}) - _ = api_call.get(endpoint=f"v1/databases/{client_database}") + client_database = state.client_settings.get("database", {}).get("alias", {}) + logger.info("Validating Database: %s", client_database) + _ = api_call.get(endpoint=f"v1/databases/{client_database}") + # Update the state + logger.info("Refreshing state.database_configs") state.database_configs = api_call.get(endpoint="v1/databases") except api_call.ApiError as ex: logger.error("Unable to populate state.database_configs: %s", ex) @@ -66,7 +67,7 @@ def patch_database(name: str, supplied: dict, connected: bool) -> bool: def drop_vs(vs: dict) -> None: """Drop a Vector Storage Table""" api_call.delete(endpoint=f"v1/embed/{vs['vector_store']}") - get_databases(validate=True, force=True) + get_databases(force=True) def select_ai_profile() -> None: diff --git a/src/client/content/tools/tabs/split_embed.py b/src/client/content/tools/tabs/split_embed.py index d984841b..7db4640c 100644 --- a/src/client/content/tools/tabs/split_embed.py +++ b/src/client/content/tools/tabs/split_embed.py @@ -391,7 +391,7 @@ def display_split_embed() -> None: ) st.success(f"Vector Store Populated: {response['message']}", icon="✅") # Refresh database_configs state to reflect new vector stores - get_databases(validate=True, force=True) + get_databases(force=True) except api_call.ApiError as ex: st.error(ex, icon="🚨") diff --git a/src/launch_server.py b/src/launch_server.py index 3bd28ba3..fa6da21e 100644 --- a/src/launch_server.py +++ b/src/launch_server.py @@ -83,12 +83,12 @@ def get_pid_using_port(port: int) -> int: client_args = [sys.executable, __file__, "--port", str(port)] if logfile: - log_file = open(f"apiserver_{port}.log", "a", encoding="utf-8") + log_file = open(f"apiserver_{port}.log", "a", encoding="utf-8") # pylint: disable=consider-using-with stdout = stderr = log_file else: stdout = stderr = subprocess.PIPE - process = subprocess.Popen(client_args, stdout=stdout, stderr=stderr) + process = subprocess.Popen(client_args, stdout=stdout, stderr=stderr) # pylint: disable=consider-using-with logger.info("Server started on port %i with PID %i", port, process.pid) return process.pid diff --git a/src/server/api/utils/databases.py b/src/server/api/utils/databases.py index 55f7c649..9d97a225 100644 --- a/src/server/api/utils/databases.py +++ b/src/server/api/utils/databases.py @@ -228,7 +228,7 @@ def get_databases( for db in databases: try: db_conn = connect(config=db) - except (ValueError, PermissionError, ConnectionError): + except (ValueError, PermissionError, ConnectionError, LookupError): continue db.vector_stores = _get_vs(db_conn) db.selectai = _selectai_enabled(db_conn) diff --git a/src/server/mcp/__init__.py b/src/server/mcp/__init__.py index 33f21577..8cb3ea53 100644 --- a/src/server/mcp/__init__.py +++ b/src/server/mcp/__init__.py @@ -10,7 +10,7 @@ from fastapi import APIRouter from fastmcp import FastMCP -import common.logging_config as logging_config +from common import logging_config logger = logging_config.logging.getLogger("mcp.__init__.py") @@ -70,4 +70,4 @@ async def register_all_mcp(mcp: FastMCP, auth: APIRouter): await _discover_and_register("server.mcp.tools", mcp=mcp, auth=auth) await _discover_and_register("server.mcp.proxies", mcp=mcp) # await _discover_and_register("server.mcp.prompts", mcp=mcp) - logger.info("Finished Registering MCP Components") \ No newline at end of file + logger.info("Finished Registering MCP Components") From 6bac754c9681dc4bed300374cd1b195cd882621d Mon Sep 17 00:00:00 2001 From: gotsysdba Date: Wed, 3 Sep 2025 11:39:46 +0100 Subject: [PATCH 28/28] from pop --- src/client/content/chatbot.py | 254 +++------- src/client/content/chatbot.py.mcp | 300 ++++++++++++ src/client/content/config/tabs/mcp.py | 18 +- src/hello_world.py | 81 ++++ src/launch_server.py | 11 + src/server/agents/chatbot.py | 521 +++++++++------------ src/server/api/core/mcp.py | 8 +- src/server/api/core/models.py | 10 +- src/server/api/core/settings.py | 21 +- src/server/api/utils/mcp.py | 130 ++--- src/server/api/v1/chat.py | 14 +- src/server/api/v1/mcp.py | 21 +- src/server/bootstrap/mcp.py | 26 +- src/server/bootstrap/oci.py | 2 +- src/server/mcp/graph.py | 171 +++++++ src/server/mcp/proxies/sqlcl.py | 13 +- tests/unit/server/api/utils/test_models.py | 36 ++ 17 files changed, 1036 insertions(+), 601 deletions(-) create mode 100644 src/client/content/chatbot.py.mcp create mode 100644 src/hello_world.py create mode 100644 src/server/mcp/graph.py create mode 100644 tests/unit/server/api/utils/test_models.py diff --git a/src/client/content/chatbot.py b/src/client/content/chatbot.py index c79146f7..d1f7f6b1 100644 --- a/src/client/content/chatbot.py +++ b/src/client/content/chatbot.py @@ -2,11 +2,11 @@ Copyright (c) 2024, 2025, Oracle and/or its affiliates. Licensed under the Universal Permissive License v1.0 as shown at http://oss.oracle.com/licenses/upl. -This file merges the Streamlit Chatbot GUI with the MCPClient for a complete, -runnable example demonstrating their integration. +Session States Set: +- user_client: Stores the Client """ -# spell-checker:ignore streamlit, oraclevs, selectai, langgraph, prebuilt +# spell-checker:ignore streamlit, oraclevs, selectai import asyncio import inspect import json @@ -16,14 +16,9 @@ from streamlit import session_state as state from client.content.config.tabs.models import get_models - -import client.utils.st_common as st_common -import client.utils.api_call as api_call - +from client.utils import st_common, api_call, client from client.utils.st_footer import render_chat_footer -import common.logging_config as logging_config -from client.mcp.client import MCPClient -from pathlib import Path +from common import logging_config logger = logging_config.logging.getLogger("client.content.chatbot") @@ -68,224 +63,89 @@ async def main() -> None: ######################################################################### # Sidebar Settings ######################################################################### + # Get a list of available language models, if none, then stop ll_models_enabled = st_common.enabled_models_lookup("ll") if not ll_models_enabled: st.error("No language models are configured and/or enabled. Disabling Client.", icon="🛑") st.stop() + # the sidebars will set this to False if not everything is configured. state.enable_client = True st_common.tools_sidebar() st_common.history_sidebar() st_common.ll_sidebar() st_common.selectai_sidebar() st_common.vector_search_sidebar() + # Stop when sidebar configurations not set if not state.enable_client: st.stop() ######################################################################### # Chatty-Bot Centre ######################################################################### - - if "messages" not in state: - state.messages = [] - + # Establish the Client + if "user_client" not in state: + state.user_client = client.Client( + server=state.server, + settings=state.client_settings, + timeout=1200, + ) + user_client: client.Client = state.user_client + + history = await user_client.get_history() st.chat_message("ai").write("Hello, how can I help you?") - - for message in state.messages: - role = message.get("role") - display_role = "" - if role in ("human", "user"): - display_role = "human" - elif role in ("ai", "assistant"): - if not message.get("content") and not message.get("tool_trace"): - continue - display_role = "assistant" - else: + vector_search_refs = [] + for message in history or []: + if not message["content"]: continue - - with st.chat_message(display_role): - if "tool_trace" in message and message["tool_trace"]: - for tool_call in message["tool_trace"]: - with st.expander(f"🛠️ **Tool Call:** `{tool_call['name']}`", expanded=False): - st.text("Arguments:") - st.code(json.dumps(tool_call.get("args", {}), indent=2), language="json") - if "error" in tool_call: - st.text("Error:") - st.error(tool_call["error"]) - else: - st.text("Result:") - st.code(tool_call.get("result", ""), language="json") - if message.get("content"): - # Display file attachments if present - if "attachments" in message and message["attachments"]: - for file in message["attachments"]: - # Show appropriate icon based on file type - if file["type"].startswith("image/"): - st.image(file["preview"], use_container_width=True) - st.markdown(f"🖼️ **{file['name']}** ({file['size'] // 1024} KB)") - elif file["type"] == "application/pdf": - st.markdown(f"📄 **{file['name']}** ({file['size'] // 1024} KB)") - elif file["type"] in ("text/plain", "text/markdown"): - st.markdown(f"📝 **{file['name']}** ({file['size'] // 1024} KB)") - else: - st.markdown(f"📎 **{file['name']}** ({file['size'] // 1024} KB)") - - # Display message content - handle both string and list formats - content = message.get("content") + if message["role"] == "tool" and message["name"] == "oraclevs_tool": + vector_search_refs = json.loads(message["content"]) + if message["role"] in ("ai", "assistant"): + with st.chat_message("ai"): + st.markdown(message["content"]) + if vector_search_refs: + show_vector_search_refs(vector_search_refs) + vector_search_refs = [] + elif message["role"] in ("human", "user"): + with st.chat_message("human"): + content = message["content"] if isinstance(content, list): - # Extract and display only text parts - text_parts = [part["text"] for part in content if part["type"] == "text"] - st.markdown("\n".join(text_parts)) + for part in content: + if part["type"] == "text": + st.write(part["text"]) + elif part["type"] == "image_url" and part["image_url"]["url"].startswith("data:image"): + st.image(part["image_url"]["url"]) else: - st.markdown(content) + st.write(content) sys_prompt = state.client_settings["prompts"]["sys"] render_chat_footer() - if human_request := st.chat_input( f"Ask your question here... (current prompt: {sys_prompt})", accept_file=True, - file_type=["jpg", "jpeg", "png", "pdf", "txt", "docx"], - key=f"chat_input_{len(state.messages)}", + file_type=["jpg", "jpeg", "png"], ): - # Process message with potential file attachments - message = {"role": "user", "content": human_request.text} - - # Handle file attachments - if hasattr(human_request, "files") and human_request.files: - # Store file information separately from content - message["attachments"] = [] - for file in human_request.files: - file_bytes = file.read() - file_b64 = base64.b64encode(file_bytes).decode("utf-8") - message["attachments"].append( - { - "name": file.name, - "type": file.type, - "size": len(file_bytes), - "data": file_b64, - "preview": f"data:{file.type};base64,{file_b64}" if file.type.startswith("image/") else None, - } - ) - - state.messages.append(message) - st.rerun() - if state.messages and state.messages[-1]["role"] == "user": + st.chat_message("human").write(human_request.text) + file_b64 = None + if human_request["files"]: + file = human_request["files"][0] + file_bytes = file.read() + file_b64 = base64.b64encode(file_bytes).decode("utf-8") try: - with st.chat_message("ai"): - with st.spinner("Thinking..."): - client_settings_for_request = state.client_settings.copy() - model_id = client_settings_for_request.get("ll_model", {}).get("model") - if model_id: - all_model_configs = st_common.enabled_models_lookup("ll") - model_config = all_model_configs.get(model_id, {}) - if "api_key" in model_config: - if "ll_model" not in client_settings_for_request: - client_settings_for_request["ll_model"] = {} - client_settings_for_request["ll_model"]["api_key"] = model_config["api_key"] - - # Prepare message history for backend - message_history = [] - for msg in state.messages: - # Create a copy of the message - processed_msg = msg.copy() - - # If there are attachments, include them in the content - if "attachments" in msg and msg["attachments"]: - # Start with the text content - text_content = msg["content"] - - # Handle list content format (from OpenAI API) - if isinstance(text_content, list): - text_parts = [part["text"] for part in text_content if part["type"] == "text"] - text_content = "\n".join(text_parts) - - # Create a list to hold structured content parts - content_list = [{"type": "text", "text": text_content}] - - non_image_references = [] - for attachment in msg["attachments"]: - if attachment["type"].startswith("image/"): - # Only add image URLs for user messages - if msg["role"] in ("human", "user"): - # Normalize image MIME types for compatibility - mime_type = attachment["type"] - if mime_type == "image/jpg": - mime_type = "image/jpeg" - - content_list.append( - { - "type": "image_url", - "image_url": { - "url": f"data:{mime_type};base64,{attachment['data']}", - "detail": "low", - }, - } - ) - else: - # Handle non-image files as text references - non_image_references.append( - f"\n[File: {attachment['name']} ({attachment['size'] // 1024} KB)]" - ) - - # If there were non-image files, append their references to the main text part - if non_image_references: - content_list[0]["text"] += "".join(non_image_references) - - processed_msg["content"] = content_list - # Convert list content to string format - elif isinstance(msg.get("content"), list): - text_parts = [part["text"] for part in msg["content"] if part["type"] == "text"] - processed_msg["content"] = str("\n".join(text_parts)) - # Otherwise, ensure content is a string - else: - processed_msg["content"] = str(msg.get("content", "")) - - message_history.append(processed_msg) - - async with MCPClient(client_settings=client_settings_for_request) as mcp_client: - final_text, tool_trace, new_history = await mcp_client.invoke(message_history=message_history) - - # Update the history for display. - # Keep the original message structure with attachments - for i in range(len(new_history) - 1, -1, -1): - if new_history[i].get("role") == "assistant": - # Preserve any attachments from the user message - user_message = state.messages[-1] - if "attachments" in user_message: - new_history[-1]["attachments"] = user_message["attachments"] - - new_history[i]["content"] = final_text - new_history[i]["tool_trace"] = tool_trace - break - - state.messages = new_history - st.rerun() - - except Exception as e: - logger.error("Exception during invoke call:", exc_info=True) - # Extract just the error message - error_msg = str(e) - - # Check if it's a file-related error - if "file" in error_msg.lower() or "image" in error_msg.lower() or "content" in error_msg.lower(): - st.error(f"Error: {error_msg}") - - # Add a button to remove files and retry - if st.button("Remove files and retry", key="remove_files_retry"): - # Remove attachments from the latest message - if state.messages and "attachments" in state.messages[-1]: - del state.messages[-1]["attachments"] - st.rerun() - else: - st.error(f"Error: {error_msg}") - - if st.button("Retry", key="reload_chatbot_error"): - if state.messages and state.messages[-1]["role"] == "user": - state.messages.pop() + message_placeholder = st.chat_message("ai").empty() + full_answer = "" + async for chunk in user_client.stream(message=human_request.text, image_b64=file_b64): + full_answer += chunk + message_placeholder.markdown(full_answer) + # Stream until we hit the end then refresh to replace with history + st.rerun() + except Exception: + message_placeholder.markdown("An unexpected error occurred, please retry your request.") + if st.button("Retry", key="reload_chatbot"): + st_common.clear_state_key("user_client") st.rerun() -if __name__ == "__main__" or ("page" in inspect.stack()[1].filename if inspect.stack() else False): +if __name__ == "__main__" or "page.py" in inspect.stack()[1].filename: try: asyncio.run(main()) except ValueError as ex: diff --git a/src/client/content/chatbot.py.mcp b/src/client/content/chatbot.py.mcp new file mode 100644 index 00000000..c79146f7 --- /dev/null +++ b/src/client/content/chatbot.py.mcp @@ -0,0 +1,300 @@ +""" +Copyright (c) 2024, 2025, Oracle and/or its affiliates. +Licensed under the Universal Permissive License v1.0 as shown at http://oss.oracle.com/licenses/upl. + +This file merges the Streamlit Chatbot GUI with the MCPClient for a complete, +runnable example demonstrating their integration. +""" + +# spell-checker:ignore streamlit, oraclevs, selectai, langgraph, prebuilt +import asyncio +import inspect +import json +import base64 + +import streamlit as st +from streamlit import session_state as state + +from client.content.config.tabs.models import get_models + +import client.utils.st_common as st_common +import client.utils.api_call as api_call + +from client.utils.st_footer import render_chat_footer +import common.logging_config as logging_config +from client.mcp.client import MCPClient +from pathlib import Path + +logger = logging_config.logging.getLogger("client.content.chatbot") + + +############################################################################# +# Functions +############################################################################# +def show_vector_search_refs(context): + """When Vector Search Content Found, show the references""" + st.markdown("**References:**") + ref_src = set() + ref_cols = st.columns([3, 3, 3]) + # Create a button in each column + for i, (ref_col, chunk) in enumerate(zip(ref_cols, context[0])): + with ref_col.popover(f"Reference: {i + 1}"): + chunk = context[0][i] + logger.debug("Chunk Content: %s", chunk) + st.subheader("Reference Text", divider="red") + st.markdown(chunk["page_content"]) + try: + ref_src.add(chunk["metadata"]["filename"]) + st.subheader("Metadata", divider="red") + st.markdown(f"File: {chunk['metadata']['source']}") + st.markdown(f"Chunk: {chunk['metadata']['page']}") + except KeyError: + logger.error("Chunk Metadata NOT FOUND!!") + + for link in ref_src: + st.markdown("- " + link) + st.markdown(f"**Notes:** Vector Search Query - {context[1]}") + + +############################################################################# +# MAIN +############################################################################# +async def main() -> None: + """Streamlit GUI""" + try: + get_models() + except api_call.ApiError: + st.stop() + ######################################################################### + # Sidebar Settings + ######################################################################### + ll_models_enabled = st_common.enabled_models_lookup("ll") + if not ll_models_enabled: + st.error("No language models are configured and/or enabled. Disabling Client.", icon="🛑") + st.stop() + state.enable_client = True + st_common.tools_sidebar() + st_common.history_sidebar() + st_common.ll_sidebar() + st_common.selectai_sidebar() + st_common.vector_search_sidebar() + if not state.enable_client: + st.stop() + + ######################################################################### + # Chatty-Bot Centre + ######################################################################### + + if "messages" not in state: + state.messages = [] + + st.chat_message("ai").write("Hello, how can I help you?") + + for message in state.messages: + role = message.get("role") + display_role = "" + if role in ("human", "user"): + display_role = "human" + elif role in ("ai", "assistant"): + if not message.get("content") and not message.get("tool_trace"): + continue + display_role = "assistant" + else: + continue + + with st.chat_message(display_role): + if "tool_trace" in message and message["tool_trace"]: + for tool_call in message["tool_trace"]: + with st.expander(f"🛠️ **Tool Call:** `{tool_call['name']}`", expanded=False): + st.text("Arguments:") + st.code(json.dumps(tool_call.get("args", {}), indent=2), language="json") + if "error" in tool_call: + st.text("Error:") + st.error(tool_call["error"]) + else: + st.text("Result:") + st.code(tool_call.get("result", ""), language="json") + if message.get("content"): + # Display file attachments if present + if "attachments" in message and message["attachments"]: + for file in message["attachments"]: + # Show appropriate icon based on file type + if file["type"].startswith("image/"): + st.image(file["preview"], use_container_width=True) + st.markdown(f"🖼️ **{file['name']}** ({file['size'] // 1024} KB)") + elif file["type"] == "application/pdf": + st.markdown(f"📄 **{file['name']}** ({file['size'] // 1024} KB)") + elif file["type"] in ("text/plain", "text/markdown"): + st.markdown(f"📝 **{file['name']}** ({file['size'] // 1024} KB)") + else: + st.markdown(f"📎 **{file['name']}** ({file['size'] // 1024} KB)") + + # Display message content - handle both string and list formats + content = message.get("content") + if isinstance(content, list): + # Extract and display only text parts + text_parts = [part["text"] for part in content if part["type"] == "text"] + st.markdown("\n".join(text_parts)) + else: + st.markdown(content) + + sys_prompt = state.client_settings["prompts"]["sys"] + render_chat_footer() + + if human_request := st.chat_input( + f"Ask your question here... (current prompt: {sys_prompt})", + accept_file=True, + file_type=["jpg", "jpeg", "png", "pdf", "txt", "docx"], + key=f"chat_input_{len(state.messages)}", + ): + # Process message with potential file attachments + message = {"role": "user", "content": human_request.text} + + # Handle file attachments + if hasattr(human_request, "files") and human_request.files: + # Store file information separately from content + message["attachments"] = [] + for file in human_request.files: + file_bytes = file.read() + file_b64 = base64.b64encode(file_bytes).decode("utf-8") + message["attachments"].append( + { + "name": file.name, + "type": file.type, + "size": len(file_bytes), + "data": file_b64, + "preview": f"data:{file.type};base64,{file_b64}" if file.type.startswith("image/") else None, + } + ) + + state.messages.append(message) + st.rerun() + if state.messages and state.messages[-1]["role"] == "user": + try: + with st.chat_message("ai"): + with st.spinner("Thinking..."): + client_settings_for_request = state.client_settings.copy() + model_id = client_settings_for_request.get("ll_model", {}).get("model") + if model_id: + all_model_configs = st_common.enabled_models_lookup("ll") + model_config = all_model_configs.get(model_id, {}) + if "api_key" in model_config: + if "ll_model" not in client_settings_for_request: + client_settings_for_request["ll_model"] = {} + client_settings_for_request["ll_model"]["api_key"] = model_config["api_key"] + + # Prepare message history for backend + message_history = [] + for msg in state.messages: + # Create a copy of the message + processed_msg = msg.copy() + + # If there are attachments, include them in the content + if "attachments" in msg and msg["attachments"]: + # Start with the text content + text_content = msg["content"] + + # Handle list content format (from OpenAI API) + if isinstance(text_content, list): + text_parts = [part["text"] for part in text_content if part["type"] == "text"] + text_content = "\n".join(text_parts) + + # Create a list to hold structured content parts + content_list = [{"type": "text", "text": text_content}] + + non_image_references = [] + for attachment in msg["attachments"]: + if attachment["type"].startswith("image/"): + # Only add image URLs for user messages + if msg["role"] in ("human", "user"): + # Normalize image MIME types for compatibility + mime_type = attachment["type"] + if mime_type == "image/jpg": + mime_type = "image/jpeg" + + content_list.append( + { + "type": "image_url", + "image_url": { + "url": f"data:{mime_type};base64,{attachment['data']}", + "detail": "low", + }, + } + ) + else: + # Handle non-image files as text references + non_image_references.append( + f"\n[File: {attachment['name']} ({attachment['size'] // 1024} KB)]" + ) + + # If there were non-image files, append their references to the main text part + if non_image_references: + content_list[0]["text"] += "".join(non_image_references) + + processed_msg["content"] = content_list + # Convert list content to string format + elif isinstance(msg.get("content"), list): + text_parts = [part["text"] for part in msg["content"] if part["type"] == "text"] + processed_msg["content"] = str("\n".join(text_parts)) + # Otherwise, ensure content is a string + else: + processed_msg["content"] = str(msg.get("content", "")) + + message_history.append(processed_msg) + + async with MCPClient(client_settings=client_settings_for_request) as mcp_client: + final_text, tool_trace, new_history = await mcp_client.invoke(message_history=message_history) + + # Update the history for display. + # Keep the original message structure with attachments + for i in range(len(new_history) - 1, -1, -1): + if new_history[i].get("role") == "assistant": + # Preserve any attachments from the user message + user_message = state.messages[-1] + if "attachments" in user_message: + new_history[-1]["attachments"] = user_message["attachments"] + + new_history[i]["content"] = final_text + new_history[i]["tool_trace"] = tool_trace + break + + state.messages = new_history + st.rerun() + + except Exception as e: + logger.error("Exception during invoke call:", exc_info=True) + # Extract just the error message + error_msg = str(e) + + # Check if it's a file-related error + if "file" in error_msg.lower() or "image" in error_msg.lower() or "content" in error_msg.lower(): + st.error(f"Error: {error_msg}") + + # Add a button to remove files and retry + if st.button("Remove files and retry", key="remove_files_retry"): + # Remove attachments from the latest message + if state.messages and "attachments" in state.messages[-1]: + del state.messages[-1]["attachments"] + st.rerun() + else: + st.error(f"Error: {error_msg}") + + if st.button("Retry", key="reload_chatbot_error"): + if state.messages and state.messages[-1]["role"] == "user": + state.messages.pop() + st.rerun() + + +if __name__ == "__main__" or ("page" in inspect.stack()[1].filename if inspect.stack() else False): + try: + asyncio.run(main()) + except ValueError as ex: + logger.exception("Bug detected: %s", ex) + st.error("It looks like you found a bug; please open an issue", icon="🛑") + st.stop() + except IndexError as ex: + logger.exception("Unable to contact the server: %s", ex) + st.error("Unable to contact the server, is it running?", icon="🚨") + if st.button("Retry", key="reload_chatbot"): + st_common.clear_state_key("user_client") + st.rerun() diff --git a/src/client/content/config/tabs/mcp.py b/src/client/content/config/tabs/mcp.py index 51d9a446..75621ba5 100644 --- a/src/client/content/config/tabs/mcp.py +++ b/src/client/content/config/tabs/mcp.py @@ -2,16 +2,16 @@ Copyright (c) 2024, 2025, Oracle and/or its affiliates. Licensed under the Universal Permissive License v1.0 as shown at http://oss.oracle.com/licenses/upl. """ + # spell-checker:ignore selectbox healthz import json import streamlit as st from streamlit import session_state as state -import client.utils.api_call as api_call -import client.utils.st_common as st_common +from client.utils import api_call, st_common -import common.logging_config as logging_config +from common import logging_config logger = logging_config.logging.getLogger("client.content.config.tabs.mcp") @@ -27,16 +27,18 @@ def get_mcp_status() -> dict: logger.error("Unable to get MCP Status: %s", ex) return {} + def get_mcp_client() -> dict: """Get MCP Client Configuration""" try: - params = {"server": {state.server['url']} , "port": {state.server['port']}} - mcp_client = api_call.get(endpoint="v1/mcp/client", params=params) + params = {"server": {state.server["url"]}, "port": {state.server["port"]}} + mcp_client = api_call.get(endpoint="v1/mcp/client", params=params) return json.dumps(mcp_client, indent=2) except api_call.ApiError as ex: - logger.error("Unable to get MCP Status: %s", ex) + logger.error("Unable to get MCP Client: %s", ex) return {} + def get_mcp(force: bool = False) -> list[dict]: """Get MCP configs from API Server""" if force or "mcp_configs" not in state or not state.mcp_configs: @@ -145,8 +147,8 @@ def display_mcp() -> None: mcp_status = get_mcp_status() if mcp_status.get("status") == "ready": st.markdown(f""" - The {mcp_status['name']} is running. - **Version**: {mcp_status['version']} + The {mcp_status["name"]} is running. + **Version**: {mcp_status["version"]} """) with st.expander("Client Configuration"): st.code(get_mcp_client(), language="json") diff --git a/src/hello_world.py b/src/hello_world.py new file mode 100644 index 00000000..2028682b --- /dev/null +++ b/src/hello_world.py @@ -0,0 +1,81 @@ +import asyncio +from mcp import ClientSession +from mcp.client.streamable_http import streamablehttp_client + +from langgraph.prebuilt import create_react_agent +from langchain_mcp_adapters.client import MultiServerMCPClient +from langchain_mcp_adapters.tools import load_mcp_tools + + +client = MultiServerMCPClient( + { + "optimizer": { + "transport": "streamable_http", + "url": "http://localhost:8000/mcp/", + "headers": {"Authorization": "Bearer demo_api_key"}, + } + } +) +async def call_tool(name: str): + tools = await client.get_tools() + agent = create_react_agent("openai:gpt-4o-mini", tools) + math_response = await agent.ainvoke({"messages": "what's (3 + 5) x 12?"}) + print(math_response) + +# async def call_tool(name: str): +# async with client.session("optimizer") as session: + # tools = await load_mcp_tools(session) + # agent = create_react_agent("openai:gpt-4o-mini", tools) + # # math_response = await agent.ainvoke({"messages": "what's (3 + 5) x 12?"}) + # # weather_response = await agent.ainvoke({"messages": "what is the weather in nyc?"}) + # database_response = await agent.ainvoke({"messages": "connect to OPTIMIZER_DEFAULT"}) + # database_response = await agent.ainvoke({"messages": "show me a list of table names"}) + # print(database_response) + # # print(weather_response) + +asyncio.run(call_tool("Ford")) + +# async def call_tool(name: str): +# async with streamablehttp_client(config) as (read, write, _): +# async with ClientSession(read, write) as session: +# # Initialize the connection +# await session.initialize() + +# # Get tools +# tools = await load_mcp_tools(session) +# agent = create_react_agent("openai:gpt-4o-mini", tools) +# math_response = await agent.ainvoke({"messages": "what's (3 + 5) x 12?"}) +# print(math_response) + + +# asyncio.run(call_tool("Ford")) + +# client = Client(config) +# async def call_tool(name: str): +# async with client: +# print(f"Connected: {client.is_connected()}") +# tools = await client.load_mcp_tools(client) +# # agent = create_react_agent("openai:gpt-4o-mini", tools) +# # math_response = await agent.ainvoke({"messages": "what's (3 + 5) x 12?"}) +# # print(math_response) +# # result = await client.call_tool("optimizer_greet", {"name": name}) +# # print(result) +# # result = await client.call_tool("optimizer_multiply", {"a": 5, "b": 3}) +# # print(result) + + +# from mcp import ClientSession +# from mcp.client.streamable_http import streamablehttp_client + +# from langgraph.prebuilt import create_react_agent +# from langchain_mcp_adapters.tools import load_mcp_tools + +# async with streamablehttp_client("http://localhost:8000/mcp/") as (read, write, _): +# async with ClientSession(read, write) as session: +# # Initialize the connection +# await session.initialize() + +# # Get tools +# tools = await load_mcp_tools(session) +# agent = create_react_agent("openai:gpt-4.1", tools) +# math_response = await agent.ainvoke({"messages": "what's (3 + 5) x 12?"}) diff --git a/src/launch_server.py b/src/launch_server.py index fa6da21e..a14d8ca4 100644 --- a/src/launch_server.py +++ b/src/launch_server.py @@ -31,6 +31,15 @@ import subprocess import sys from typing import Annotated +from pathlib import Path +import uvicorn + +from fastapi import FastAPI, APIRouter, Depends, HTTPException, status +from fastapi.security import HTTPAuthorizationCredentials, HTTPBearer +from fastmcp import FastMCP, settings +from fastmcp.server.auth import StaticTokenVerifier +from langgraph.checkpoint.memory import InMemorySaver + # Third Party import psutil @@ -49,6 +58,8 @@ logger = logging_config.logging.getLogger("launch_server") +# Establish LangGraph Short-Term Memory (thread-level persistence) +graph_memory = InMemorySaver() ########################################## # Client Process Control diff --git a/src/server/agents/chatbot.py b/src/server/agents/chatbot.py index 5ffe5644..9b4faa89 100644 --- a/src/server/agents/chatbot.py +++ b/src/server/agents/chatbot.py @@ -2,153 +2,101 @@ Copyright (c) 2024, 2025, Oracle and/or its affiliates. Licensed under the Universal Permissive License v1.0 as shown at http://oss.oracle.com/licenses/upl. """ -# spell-checker:ignore langgraph, oraclevs, checkpointer, ainvoke -# spell-checker:ignore vectorstore, vectorstores, oraclevs, mult, selectai +# spell-checker:ignore acompletion checkpointer litellm mult oraclevs vectorstores selectai -from datetime import datetime, timezone -from typing import Literal -import json import copy import decimal +import json +from typing import Literal + +from langgraph.checkpoint.memory import MemorySaver +from langgraph.config import get_stream_writer +from langgraph.graph import StateGraph, START, END, MessagesState from langchain_core.documents.base import Document -from langchain_core.messages import SystemMessage, ToolMessage -from langchain_core.output_parsers import StrOutputParser, PydanticOutputParser +from langchain_core.messages import AIMessage, SystemMessage, ToolMessage +from langchain_core.messages.utils import convert_to_openai_messages from langchain_core.prompts import PromptTemplate from langchain_core.runnables import RunnableConfig + from langchain_community.vectorstores.oraclevs import OracleVS -from langgraph.checkpoint.memory import MemorySaver -from langgraph.graph import MessagesState, StateGraph, START, END +from litellm import acompletion, completion +from litellm.exceptions import APIConnectionError -from pydantic import BaseModel, Field +from server.api.utils.databases import execute_sql -from server.api.core.databases import execute_sql -from common.schema import ChatResponse, ChatUsage, ChatChoices, ChatMessage from common import logging_config logger = logging_config.logging.getLogger("server.agents.chatbot") -############################################################################# -# AGENT STATE -############################################################################# -class AgentState(MessagesState): +class DecimalEncoder(json.JSONEncoder): + """Used with json.dumps to encode decimals""" + + def default(self, o): + if isinstance(o, decimal.Decimal): + return str(o) + return super().default(o) + + +class OptimizerState(MessagesState): """Establish our Agent State Machine""" - logger.info("Establishing Agent State") - final_response: ChatResponse # OpenAI Response cleaned_messages: list # Messages w/o VS Results - context_input: str # Contextualized User Input + context_input: str # Contextualized User Input (for VS) documents: dict # VectorStore documents + final_response: dict # OpenAI Response ############################################################################# # Functions ############################################################################# -def get_messages(state: AgentState, config: RunnableConfig) -> list: +def clean_messages(state: OptimizerState, config: RunnableConfig) -> list: """Return a list of messages that will be passed to the model for completion Filter out old VS documents to avoid blowing-out the context window - Leave the state as is for GUI functionality""" + Leave the state as is (deepcopy) for GUI functionality""" + use_history = config["metadata"]["use_history"] - # If user decided for no history, only take the last message - state_messages = state["messages"] if use_history else state["messages"][-1:] + state_messages = copy.deepcopy(state.get("messages", [])) + if state_messages: + # If user decided for no history, only take the last message + state_messages = state_messages if use_history else state_messages[-1:] - messages = [] - for msg in state_messages: - if isinstance(msg, SystemMessage): - continue - if isinstance(msg, ToolMessage): - if messages: # Check if there are any messages in the list - messages.pop() # Remove the last appended message - continue - messages.append(msg) + # Remove System Prompt from top + if isinstance(state_messages[0], SystemMessage): + state_messages.pop(0) - # insert the system prompt; remaining messages cleaned - if config["metadata"]["sys_prompt"].prompt: - messages.insert(0, SystemMessage(content=config["metadata"]["sys_prompt"].prompt)) + # Remove ToolCalls + state_messages = [msg for msg in state_messages if not isinstance(msg, ToolMessage)] - return messages + return state_messages -def document_formatter(rag_context) -> str: - """Extract the Vector Search Documents and format into a string""" - logger.info("Extracting chunks from Vector Search Retrieval") - logger.debug("Vector Search Context: %s", rag_context) - chunks = "\n\n".join([doc["page_content"] for doc in rag_context]) - return chunks +def use_tool(_, config: RunnableConfig) -> Literal["vs_retrieve", "selectai_completion", "stream_completion"]: + """Conditional edge to determine if using SelectAI, Vector Search or not""" + selectai_enabled = config["metadata"]["selectai"].enabled + if selectai_enabled: + logger.info("Invoking Chatbot with SelectAI: %s", selectai_enabled) + return "selectai_completion" + enabled = config["metadata"]["vector_search"].enabled + if enabled: + logger.info("Invoking Chatbot with Vector Search: %s", enabled) + return "vs_retrieve" -class DecimalEncoder(json.JSONEncoder): - """Used with json.dumps to encode decimals""" + return "stream_completion" - def default(self, o): - if isinstance(o, decimal.Decimal): - return str(o) - return super().default(o) +def rephrase(state: OptimizerState, config: RunnableConfig) -> str: + """Take our contextualization prompt and reword the last user prompt""" + ctx_prompt = config.get("metadata", {}).get("ctx_prompt") + retrieve_question = state["messages"][-1].content -############################################################################# -# NODES and EDGES -############################################################################# -def respond(state: AgentState, config: RunnableConfig) -> ChatResponse: - """Respond in OpenAI Compatible return""" - ai_message = state["messages"][-1] - logger.debug("Formatting Response to OpenAI compatible message: %s", repr(ai_message)) - model_id = config["metadata"]["model_id"] - if "model_id" in ai_message.response_metadata: - ai_metadata = ai_message - else: - ai_metadata = state["messages"][1] - logger.debug("Using Metadata from: %s", repr(ai_metadata)) - - finish_reason = ai_metadata.response_metadata.get("finish_reason", "stop") - if finish_reason == "COMPLETE": - finish_reason = "stop" - elif finish_reason == "MAX_TOKENS": - finish_reason = "length" - - openai_response = ChatResponse( - id=ai_message.id, - created=int(datetime.now(timezone.utc).timestamp()), - model=model_id, - usage=ChatUsage( - prompt_tokens=ai_metadata.response_metadata.get("token_usage", {}).get("prompt_tokens", -1), - completion_tokens=ai_metadata.response_metadata.get("token_usage", {}).get("completion_tokens", -1), - total_tokens=ai_metadata.response_metadata.get("token_usage", {}).get("total_tokens", -1), - ), - choices=[ - ChatChoices( - index=0, - message=ChatMessage( - role="ai", - content=ai_message.content, - additional_kwargs=ai_metadata.additional_kwargs, - response_metadata=ai_metadata.response_metadata, - ), - finish_reason=finish_reason, - logprobs=None, - ) - ], - ) - return {"final_response": openai_response} - - -def vs_retrieve(state: AgentState, config: RunnableConfig) -> AgentState: - """Search and return information using Vector Search""" - ## Note that this should be a tool call; but some models (Perplexity/OCI GenAI) - ## have limited or no tools support. Instead we'll call as part of the pipeline - ## and fake a tools call. This can be later reverted to a tool without much code change. - logger.info("Perform Vector Search") - # Take our contextualization prompt and reword the question - # before doing the vector search; do only if history is turned on - history = copy.deepcopy(state["cleaned_messages"]) - retrieve_question = history.pop().content - if config["metadata"]["use_history"] and config["metadata"]["ctx_prompt"].prompt and len(history) > 1: - model = config["configurable"].get("ll_client", None) + if config["metadata"]["use_history"] and ctx_prompt and len(state["messages"]) > 2: ctx_template = """ - {ctx_prompt} + {prompt} Here is the context and history: ------- {history} @@ -159,22 +107,115 @@ def vs_retrieve(state: AgentState, config: RunnableConfig) -> AgentState: ------- Return ONLY the rephrased query without any explanation or additional text. """ - rephrase = PromptTemplate( + rephrase_template = PromptTemplate( template=ctx_template, input_variables=["ctx_prompt", "history", "question"], ) - chain = rephrase | model - logger.info("Retrieving Rephrased Input for VS") - result = chain.invoke( - { - "ctx_prompt": config["metadata"]["ctx_prompt"].prompt, - "history": history, - "question": retrieve_question, - } + formatted_prompt = rephrase_template.format( + prompt=ctx_prompt.prompt, history=state["messages"], question=retrieve_question + ) + ll_raw = config["configurable"]["ll_config"] + try: + response = completion(messages=[{"role": "system", "content": formatted_prompt}], stream=False, **ll_raw) + context_question = response.choices[0].message.content + except APIConnectionError as ex: + logger.error("Failed to rephrase: %s", str(ex)) + + if context_question != retrieve_question: + logger.info( + "**** Replacing User Question: %s with contextual one: %s", retrieve_question, context_question + ) + retrieve_question = context_question + + return retrieve_question + + +def document_formatter(rag_context) -> str: + """Extract the Vector Search Documents and format into a string""" + logger.info("Extracting chunks from Vector Search Retrieval") + chunks = "\n\n".join([doc["page_content"] for doc in rag_context]) + return chunks + + +############################################################################# +# NODES and EDGES +############################################################################# +async def initialise(state: OptimizerState, config: RunnableConfig) -> OptimizerState: + """Initialise our chatbot""" + logger.debug("Initializing Chatbot") + cleaned_messages = clean_messages(state, config) + return {"cleaned_messages": cleaned_messages} + + +async def vs_grade(state: OptimizerState, config: RunnableConfig) -> OptimizerState: + """Determines whether the retrieved documents are relevant to the question.""" + logger.info("Grading Vector Search Response using %i retrieved documents", len(state["documents"])) + # Initialise documents as relevant + relevant = "yes" + documents_dict = document_formatter(state["documents"]) + if config["metadata"]["vector_search"].grading and state.get("documents"): + grade_template = """ + You are a Grader assessing the relevance of retrieved text to the user's input. + You MUST respond with a only a binary score of 'yes' or 'no'. + If you DO find ANY relevant retrieved text to the user's input, return 'yes' immediately and stop grading. + If you DO NOT find relevant retrieved text to the user's input, return 'no'. + Here is the user input: + ------- + {question} + ------- + Here is the retrieved text: + ------- + {documents} + """ + grade_template = PromptTemplate( + template=grade_template, + input_variables=["question", "documents"], + ) + question = state["context_input"] + formatted_prompt = grade_template.format(question=question, documents=documents_dict) + logger.debug("Grading Prompt: %s", formatted_prompt) + ll_raw = config["configurable"]["ll_config"] + + # Grade + try: + response = await acompletion( + messages=[{"role": "system", "content": formatted_prompt}], stream=False, **ll_raw + ) + relevant = response["choices"][0]["message"]["content"] + logger.info("Grading completed. Relevant: %s", relevant) + if relevant not in ("yes", "no"): + logger.error("LLM did not return binary relevant in grader; assuming all results relevant.") + except APIConnectionError as ex: + logger.error("Failed to grade; marking all results relevant: %s", str(ex)) + else: + logger.info("Vector Search Grading disabled; assuming all results relevant.") + + if relevant.lower() == "yes": + # This is where we fake a tools response before the completion. + logger.debug("Creating ToolMessage Documents: %s", state["documents"]) + logger.debug("Creating ToolMessage ContextQ: %s", state["context_input"]) + + state["messages"].append( + ToolMessage( + content=json.dumps([state["documents"], state["context_input"]], cls=DecimalEncoder), + name="oraclevs_tool", + tool_call_id="tool_placeholder", + ) ) - if result.content != retrieve_question: - logger.info("**** Replacing User Question: %s with contextual one: %s", retrieve_question, result.content) - retrieve_question = result.content + logger.debug("ToolMessage Created") + return {"documents": documents_dict} + + return {"documents": {}} + + +async def vs_retrieve(state: OptimizerState, config: RunnableConfig) -> OptimizerState: + """Search and return information using Vector Search""" + ## Note that this should be a tool call; but some models (Perplexity/OCI GenAI) + ## have limited or no tools support. Instead we'll call as part of the pipeline + ## and fake a tools call. This can be later reverted to a tool without much code change. + retrieve_question = rephrase(state, config) + logger.info("Perform Vector Search with: %s", retrieve_question) + try: logger.info("Connecting to VectorStore") db_conn = config["configurable"]["db_conn"] @@ -182,7 +223,7 @@ def vs_retrieve(state: AgentState, config: RunnableConfig) -> AgentState: vector_search = config["metadata"]["vector_search"] logger.info("Initializing Vector Store: %s", vector_search.vector_store) try: - vectorstore = OracleVS(db_conn, embed_client, vector_search.vector_store, vector_search.distance_metric) + vectorstores = OracleVS(db_conn, embed_client, vector_search.vector_store, vector_search.distance_metric) except Exception as ex: logger.exception("Failed to initialize the Vector Store") raise ex @@ -192,10 +233,10 @@ def vs_retrieve(state: AgentState, config: RunnableConfig) -> AgentState: search_kwargs = {"k": vector_search.top_k} if search_type == "Similarity": - retriever = vectorstore.as_retriever(search_type="similarity", search_kwargs=search_kwargs) + retriever = vectorstores.as_retriever(search_type="similarity", search_kwargs=search_kwargs) elif search_type == "Similarity Score Threshold": search_kwargs["score_threshold"] = vector_search.score_threshold - retriever = vectorstore.as_retriever( + retriever = vectorstores.as_retriever( search_type="similarity_score_threshold", search_kwargs=search_kwargs ) elif search_type == "Maximal Marginal Relevance": @@ -205,7 +246,7 @@ def vs_retrieve(state: AgentState, config: RunnableConfig) -> AgentState: "lambda_mult": vector_search.lambda_mult, } ) - retriever = vectorstore.as_retriever(search_type="mmr", search_kwargs=search_kwargs) + retriever = vectorstores.as_retriever(search_type="mmr", search_kwargs=search_kwargs) else: raise ValueError(f"Unsupported search_type: {search_type}") logger.info("Invoking retriever on: %s", retrieve_question) @@ -222,108 +263,9 @@ def vs_retrieve(state: AgentState, config: RunnableConfig) -> AgentState: return {"context_input": retrieve_question, "documents": documents_dict} -def grade_documents(state: AgentState, config: RunnableConfig) -> Literal["generate_response", "vs_generate"]: - """Determines whether the retrieved documents are relevant to the question.""" - logger.info("Grading Vector Search Response using %i retrieved documents", len(state["documents"])) - - # Data model - class Grade(BaseModel): - """Binary score for relevance check.""" - - binary_score: str = Field(description="Relevance score 'yes' or 'no'") - - if config["metadata"]["vector_search"].grading: - # LLM (Bound to Tool) - model = config["configurable"].get("ll_client", None) - try: - llm_with_grader = model.with_structured_output(Grade) - except NotImplementedError: - logger.error("Model does not support structured output") - parser = PydanticOutputParser(pydantic_object=Grade) - llm_with_grader = model | parser - - # Prompt - grade_template = """ - You are a Grader assessing the relevance of retrieved text to the user's input. - You MUST respond with a only a binary score of 'yes' or 'no'. - If you DO find ANY relevant retrieved text to the user's input, return 'yes' immediately and stop grading. - If you DO NOT find relevant retrieved text to the user's input, return 'no'. - Here is the user input: - ------- - {question} - ------- - Here is the retrieved text: - ------- - {context} - """ - grader = PromptTemplate( - template=grade_template, - input_variables=["context", "question"], - ) - documents = document_formatter(state["documents"]) - question = state["context_input"] - logger.debug("Grading %s against Documents: %s", question, documents) - chain = grader | llm_with_grader - try: - scored_result = chain.invoke({"question": question, "context": documents}) - logger.info("Grading completed.") - score = scored_result.binary_score - except Exception: - logger.error("LLM is not returning binary score in grader; marking all results relevant.") - score = "yes" - else: - logger.info("Vector Search Grading disabled; marking all results relevant.") - score = "yes" - - logger.info("Grading Decision: Vector Search Relevant: %s", score) - if score == "yes": - # This is where we fake a tools response before the completion. - logger.debug("Creating ToolsMessage Documents: %s", state["documents"]) - logger.debug("Creating ToolsMessage ContextQ: %s", state["context_input"]) - - state["messages"].append( - ToolMessage( - content=json.dumps([state["documents"], state["context_input"]], cls=DecimalEncoder), - name="oraclevs_tool", - tool_call_id="tool_placeholder", - ) - ) - logger.debug("ToolsMessage Created") - return "vs_generate" - else: - return "generate_response" - - -async def vs_generate(state: AgentState, config: RunnableConfig) -> None: - """Generate answer when Vector Search enabled; modify state with response""" - logger.info("Generating Vector Search Response") - - # Generate prompt with Vector Search context - generate_template = "SystemMessage(content='{sys_prompt}\n {context}'), HumanMessage(content='{question}')" - prompt_template = PromptTemplate( - template=generate_template, - input_variables=["sys_prompt", "context", "question"], - ) - - # Chain and Run - llm = config["configurable"].get("ll_client", None) - generate_chain = prompt_template | llm | StrOutputParser() - documents = document_formatter(state["documents"]) - logger.debug("Completing: '%s' against relevant VectorStore documents", state["context_input"]) - chain = { - "sys_prompt": config["metadata"]["sys_prompt"].prompt, - "question": state["context_input"], - "context": documents, - } - - response = await generate_chain.ainvoke(chain) - return {"messages": ("assistant", response)} - - -async def selectai_generate(state: AgentState, config: RunnableConfig) -> None: +async def selectai_completion(state: OptimizerState, config: RunnableConfig) -> OptimizerState: """Generate answer when SelectAI enabled; modify state with response""" - history = copy.deepcopy(state["cleaned_messages"]) - selectai_prompt = history.pop().content + selectai_prompt = state["cleaned_messages"][-1:][0].content logger.info("Generating SelectAI Response on %s", selectai_prompt) sql = """ @@ -341,86 +283,87 @@ async def selectai_generate(state: AgentState, config: RunnableConfig) -> None: # Execute the SQL using the connection db_conn = config["configurable"]["db_conn"] try: - completion = execute_sql(db_conn, sql, binds) + response = execute_sql(db_conn, sql, binds) except Exception as ex: logger.error("SelectAI has hit an issue: %s", ex) - completion = [{sql: "I'm sorry, I have no information related to your query."}] + response = [{sql: f"I'm sorry, I ran into an error: str({ex})"}] # Response will be [{sql:, completion}]; return the completion - logger.debug("SelectAI Responded: %s", completion) - response = list(completion[0].values())[0] - - return {"messages": ("assistant", response)} + logger.debug("SelectAI Responded: %s", response) + response = list(response[0].values())[0] + return {"messages": [AIMessage(content=response)]} -async def agent(state: AgentState, config: RunnableConfig) -> AgentState: - """Invokes the chatbot with messages to be used""" - logger.debug("Initializing Agent") - messages = get_messages(state, config) - return {"cleaned_messages": messages} +async def stream_completion(state: OptimizerState, config: RunnableConfig) -> OptimizerState: + """LiteLLM streaming wrapper""" + writer = get_stream_writer() + full_response = [] + collected_content = [] -def use_tool(_, config: RunnableConfig) -> Literal["selectai_generate", "vs_retrieve", "generate_response"]: - """Conditional edge to determine if using SelectAI, Vector Search or not""" - selectai_enabled = config["metadata"]["selectai"].enabled - if selectai_enabled: - logger.info("Invoking Chatbot with SelectAI: %s", selectai_enabled) - return "selectai_generate" - - enabled = config["metadata"]["vector_search"].enabled - if enabled: - logger.info("Invoking Chatbot with Vector Search: %s", enabled) - return "vs_retrieve" - - return "generate_response" - - -async def generate_response(state: AgentState, config: RunnableConfig) -> AgentState: - """Invoke the model""" - model = config["configurable"].get("ll_client", None) - logger.debug("Invoking on: %s", state["cleaned_messages"]) + messages = state["cleaned_messages"] try: - response = await model.ainvoke(state["cleaned_messages"]) - except Exception as ex: - if hasattr(ex, "message"): - response = ("assistant", f"I'm sorry: {ex.message}") + # Get our Prompt + sys_prompt = config.get("metadata", {}).get("sys_prompt") + if state.get("context_input") and state.get("documents"): + documents = state["documents"] + new_prompt = SystemMessage(content=f"{sys_prompt.prompt}\n {documents}") else: - raise - return {"messages": [response]} - + new_prompt = SystemMessage(content=f"{sys_prompt.prompt}") + + # Insert Prompt into cleaned_messages + messages.insert(0, new_prompt) + # Await the asynchronous completion with streaming enabled + logger.info("Streaming completion...") + ll_raw = config["configurable"]["ll_config"] + response = await acompletion(messages=convert_to_openai_messages(messages), stream=True, **ll_raw) + async for chunk in response: + content = chunk.choices[0].delta.content + if content is not None: + writer({"stream": content}) + collected_content.append(content) + full_response.append(chunk) + + # After loop: update last chunk to a full completion with usage details + if full_response: + last_chunk = full_response[-1] + full_text = "".join(collected_content) + last_chunk.object = "chat.completion" + last_chunk.choices[0].message = {"role": "assistant", "content": full_text} + delattr(last_chunk.choices[0], "delta") + last_chunk.choices[0].finish_reason = "stop" + final_response = last_chunk.model_dump() + + writer({"completion": final_response}) + except APIConnectionError as ex: + logger.error(ex) + full_text = "I'm not able to contact the model API; please validate its configuration/availability." + except Exception as ex: + logger.error(ex) + full_text = f"I'm sorry, an unknown completion problem occurred: {str(ex).split('Traceback', 1)[0]}" + return {"messages": [AIMessage(content=full_text)]} -############################################################################# -# GRAPH -############################################################################# -workflow = StateGraph(AgentState) -# Define the nodes -workflow.add_node("agent", agent) +# Build the state graph +workflow = StateGraph(OptimizerState) +workflow.add_node("initialise", initialise) +workflow.add_node("rephrase", rephrase) workflow.add_node("vs_retrieve", vs_retrieve) -workflow.add_node("vs_generate", vs_generate) -workflow.add_node("selectai_generate", selectai_generate) -workflow.add_node("generate_response", generate_response) -workflow.add_node("respond", respond) - -# Start the agent with clean messages -workflow.add_edge(START, "agent") +workflow.add_node("vs_grade", vs_grade) +workflow.add_node("selectai_completion", selectai_completion) +workflow.add_node("stream_completion", stream_completion) -# Branch to either "selectai_generate", "vs_retrieve", or "generate_response" -workflow.add_conditional_edges("agent", use_tool) -workflow.add_edge("generate_response", "respond") +# Start the chatbot with clean messages +workflow.add_edge(START, "initialise") -# If selectAI -workflow.add_edge("selectai_generate", "respond") +# Branch to either "selectai_completion", "vs_retrieve", or "stream_completion" +workflow.add_conditional_edges("initialise", use_tool) +workflow.add_edge("vs_retrieve", "vs_grade") +workflow.add_edge("vs_grade", "stream_completion") +workflow.add_edge("selectai_completion", END) -# If retrieving, grade the documents returned and either generate (not relevant) or vs_generate (relevant) -workflow.add_conditional_edges("vs_retrieve", grade_documents) -workflow.add_edge("vs_generate", "respond") +# End the workflow +workflow.add_edge("stream_completion", END) -# Finish with OpenAI Compatible Response -workflow.add_edge("respond", END) - -# Compile +# Compile the graph memory = MemorySaver() chatbot_graph = workflow.compile(checkpointer=memory) - -## This will output the Graph in ascii; don't deliver uncommented -# chatbot_graph.get_graph(xray=True).print_ascii() diff --git a/src/server/api/core/mcp.py b/src/server/api/core/mcp.py index 3154fc30..a0a5d128 100644 --- a/src/server/api/core/mcp.py +++ b/src/server/api/core/mcp.py @@ -10,12 +10,12 @@ # from typing import Optional, List, Dict, Any # from common.schema import MCPModelConfig, MCPToolConfig, MCPSettings # from server.bootstrap import mcp as mcp_bootstrap -import common.logging_config as logging_config +from common import logging_config logger = logging_config.logging.getLogger("api.core.mcp") -def get_client(server: str = "http://127.0.0.1", port: int = 8000) -> dict: +def get_client(server: str = "http://127.0.0.1", port: int = 8000, client: str = None) -> dict: """Get the MCP Client Configuration""" mcp_client = { "mcpServers": { @@ -27,8 +27,8 @@ def get_client(server: str = "http://127.0.0.1", port: int = 8000) -> dict: } } } - - return mcp_client + if client == "langgraph": + del mcp_client["mcpServers"]["optimizer"]["type"] # def get_mcp_model(model_id: str) -> Optional[MCPModelConfig]: diff --git a/src/server/api/core/models.py b/src/server/api/core/models.py index a8114224..b86f2a8a 100644 --- a/src/server/api/core/models.py +++ b/src/server/api/core/models.py @@ -44,9 +44,13 @@ def get_model( ) -> Union[list[Model], Model, None]: """Used in direct call from list_models and agents.models""" model_objects = bootstrap.MODEL_OBJECTS - - logger.debug("%i models are defined", len(model_objects)) - + logger.debug( + "Filtering %i models for id: %s; type: %s; disabled: %s", + len(model_objects), + model_id, + model_type, + include_disabled + ) model_filtered = [ model for model in model_objects diff --git a/src/server/api/core/settings.py b/src/server/api/core/settings.py index 9e60c45c..81de678e 100644 --- a/src/server/api/core/settings.py +++ b/src/server/api/core/settings.py @@ -43,27 +43,26 @@ def get_client_settings(client: ClientIdType) -> Settings: def get_server_config() -> Configuration: """Return server configuration""" database_objects = bootstrap.DATABASE_OBJECTS - database_configs = [db for db in database_objects] + database_configs = list(database_objects) model_objects = bootstrap.MODEL_OBJECTS - model_configs = [model for model in model_objects] + model_configs = list(model_objects) oci_objects = bootstrap.OCI_OBJECTS - oci_configs = [oci for oci in oci_objects] + oci_configs = list(oci_objects) prompt_objects = bootstrap.PROMPT_OBJECTS - prompt_configs = [prompt for prompt in prompt_objects] + prompt_configs = list(prompt_objects) - # Add MCP configs as a list (similar to other configs) - mcp_objects = bootstrap.mcp.MCP_MODELS # Get list of models from bootstrap - mcp_configs = [model for model in mcp_objects] # Convert to list like other configs + # mcp_objects = bootstrap.MCP_OBJECTS + # mcp_configs = list(mcp_objects) full_config = { "database_configs": database_configs, "model_configs": model_configs, "oci_configs": oci_configs, "prompt_configs": prompt_configs, - "mcp_configs": mcp_configs, # Now it's a list like other configs + # "mcp_configs": mcp_configs, } return full_config @@ -96,11 +95,9 @@ def update_server_config(config_data: dict) -> None: if "prompt_configs" in config_data: bootstrap.PROMPT_OBJECTS = config.prompt_configs or [] - - # Add MCP config handling (similar to other configs) + if "mcp_configs" in config_data: - from server.bootstrap import mcp - mcp.MCP_MODELS = config.mcp_configs or [] # Store as list like other configs + bootstrap.MCP_OBJECTS = config.mcp_configs or [] def load_config_from_json_data(config_data: dict, client: ClientIdType = None) -> None: diff --git a/src/server/api/utils/mcp.py b/src/server/api/utils/mcp.py index 13cffcfa..e70f72e5 100644 --- a/src/server/api/utils/mcp.py +++ b/src/server/api/utils/mcp.py @@ -4,8 +4,11 @@ """ # spell-checker:ignore astream selectai +import os import time from typing import Literal, AsyncGenerator +import json +import oci from langchain_core.messages import HumanMessage from langchain_core.runnables import RunnableConfig @@ -21,21 +24,37 @@ import server.api.core.mcp as core_mcp import server.mcp.graph as graph -import common.schema as schema -import common.logging_config as logging_config +from common import logging_config, schema logger = logging_config.logging.getLogger("api.utils.mcp") +def get_client(server: str = "http://127.0.0.1", port: int = 8000) -> dict: + """Get the MCP Client Configuration""" + mcp_client = { + "mcpServers": { + "optimizer": { + "type": "streamableHttp", + "transport": "streamable_http", + "url": f"{server}:{port}/mcp/", + "headers": {"Authorization": f"Bearer {os.getenv('API_SERVER_KEY')}"}, + } + } + } + + return mcp_client -def error_response(message: str, model: str) -> dict: +def error_response(call: str, message: str, model: dict) -> dict: """Send the error as a response""" - response = { - "id": "error", - "choices": [{"message": {"role": "assistant", "content": message}, "index": 0, "finish_reason": "stop"}], - "created": int(time.time()), - "model": model, - "object": "chat.completion", - } + response = message + if call != "streams": + response = { + "id": "error", + "choices": [{"message": {"role": "assistant", "content": message}, "index": 0, "finish_reason": "stop"}], + "created": int(time.time()), + "model": model["model"], + "object": "chat.completion", + } + logger.debug("Returning Error Response: %s", response) return response @@ -69,66 +88,53 @@ async def completion_generator( # Build our Graph graph.set_node("tools_node", ToolNode(tools)) agent: CompiledStateGraph = graph.mcp_graph + # Setup MCP and bind tools + mcp_client = MultiServerMCPClient( + {"optimizer": core_mcp.get_client(client="langgraph")["mcpServers"]["optimizer"]} + ) + tools = await mcp_client.get_tools() + try: + ll_model_with_tools = ll_model.bind_tools(tools) + except NotImplementedError as ex: + yield error_response(call, str(ex), model) + raise + + # Build our Graph + agent: CompiledStateGraph = graph.main(tools) kwargs = { "input": {"messages": [HumanMessage(content=request.messages[0].content)]}, "config": RunnableConfig( configurable={"thread_id": client, "ll_model": ll_model_with_tools, "tools": tools}, + metadata={"use_history": client_settings.ll_model.chat_history}, ), } yield "End" - # # Get Prompts - # try: - # user_sys_prompt = getattr(client_settings.prompts, "sys", "Basic Example") - # sys_prompt = core_prompts.get_prompts(category="sys", name=user_sys_prompt) - # except AttributeError as ex: - # # schema.Settings not on server-side - # logger.error("A settings exception occurred: %s", ex) - # raise - - # db_conn = None - # # Setup selectai - # if client_settings.selectai.enabled: - # db_conn = util_databases.get_client_db(client).connection - # util_selectai.set_profile(db_conn, client_settings.selectai.profile, "temperature", model["temperature"]) - # util_selectai.set_profile( - # db_conn, client_settings.selectai.profile, "max_tokens", model["max_completion_tokens"] - # ) - - # # Setup vector_search - # embed_client, ctx_prompt = None, None - # if client_settings.vector_search.enabled: - # db_conn = util_databases.get_client_db(client).connection - # embed_client = util_models.get_client(client_settings.vector_search.model_dump(), oci_config) - - # user_ctx_prompt = getattr(client_settings.prompts, "ctx", "Basic Example") - # ctx_prompt = core_prompts.get_prompts(category="ctx", name=user_ctx_prompt) - - - # try: - # async for chunk in agent.astream_events(**kwargs, version="v2"): - # # The below will produce A LOT of output; uncomment when desperate - # # logger.debug("Streamed Chunk: %s", chunk) - # if chunk["event"] == "on_chat_model_stream": - # if "tools_condition" in str(chunk["metadata"]["langgraph_triggers"]): - # continue # Skip Tool Call messages - # if "vs_retrieve" in str(chunk["metadata"]["langgraph_node"]): - # continue # Skip Fake-Tool Call messages - # content = chunk["data"]["chunk"].content - # if content != "" and call == "streams": - # yield content.encode("utf-8") - # last_response = chunk["data"] - # if call == "streams": - # yield "[stream_finished]" # This will break the Chatbot loop - # elif call == "completions": - # final_response = last_response["output"]["final_response"] - # yield final_response # This will be captured for ChatResponse - # except Exception as ex: - # logger.error("An invoke exception occurred: %s", ex) - # # yield f"I'm sorry; {ex}" - # # TODO(gotsysdba) - If a message is returned; - # # format and return (this should be done in the agent) - # raise + + try: + async for chunk in agent.astream_events(**kwargs, version="v2"): + # The below will produce A LOT of output; uncomment when desperate + # logger.debug("Streamed Chunk: %s", chunk) + if chunk["event"] == "on_chat_model_stream": + if "tools_condition" in str(chunk["metadata"]["langgraph_triggers"]): + continue # Skip Tool Call messages + if "vs_retrieve" in str(chunk["metadata"]["langgraph_node"]): + continue # Skip Fake-Tool Call messages + content = chunk["data"]["chunk"].content + if content != "" and call == "streams": + yield content.encode("utf-8") + last_response = chunk["data"] + except oci.exceptions.ServiceError as ex: + error_details = json.loads(ex.message).get("message", "") + yield error_response(call, error_details, model) + raise + + # Clean Up + if call == "streams": + yield "[stream_finished]" # This will break the Chatbot loop + elif call == "completions": + final_response = last_response["output"]["final_response"] + yield final_response # This will be captured for ChatResponse diff --git a/src/server/api/v1/chat.py b/src/server/api/v1/chat.py index 28a4706c..b9b8bbee 100644 --- a/src/server/api/v1/chat.py +++ b/src/server/api/v1/chat.py @@ -18,8 +18,8 @@ from langgraph.graph.state import CompiledStateGraph from langgraph.graph.message import REMOVE_ALL_MESSAGES -from server.api.utils import chat -from server.agents import chatbot +import server.api.utils.mcp as utils_mcp +import server.mcp.graph as graph from common import schema, logging_config @@ -38,7 +38,7 @@ async def chat_post( ) -> ModelResponse: """Full Completion Requests""" last_message = None - async for chunk in chat.completion_generator(client, request, "completions"): + async for chunk in utils_mcp.completion_generator(client, request, "completions"): last_message = chunk return last_message @@ -54,7 +54,7 @@ async def chat_stream( ) -> StreamingResponse: """Completion Requests""" return StreamingResponse( - chat.completion_generator(client, request, "streams"), + utils_mcp.completion_generator(client, request, "streams"), media_type="application/octet-stream", ) @@ -66,7 +66,8 @@ async def chat_stream( ) async def chat_history_clean(client: schema.ClientIdType = Header(default="server")) -> list[ChatMessage]: """Delete all Chat History""" - agent: CompiledStateGraph = chatbot.chatbot_graph + agent: CompiledStateGraph = graph.main(list()) + # agent: CompiledStateGraph = chatbot.chatbot_graph try: _ = agent.update_state( config=RunnableConfig( @@ -88,7 +89,8 @@ async def chat_history_clean(client: schema.ClientIdType = Header(default="serve ) async def chat_history_return(client: schema.ClientIdType = Header(default="server")) -> list[ChatMessage]: """Return Chat History""" - agent: CompiledStateGraph = chatbot.chatbot_graph + agent: CompiledStateGraph = graph.main(list()) + # agent: CompiledStateGraph = chatbot.chatbot_graph try: state_snapshot = agent.get_state( config=RunnableConfig( diff --git a/src/server/api/v1/mcp.py b/src/server/api/v1/mcp.py index 6a3f2fe9..68e40b9a 100644 --- a/src/server/api/v1/mcp.py +++ b/src/server/api/v1/mcp.py @@ -8,6 +8,14 @@ from fastapi import APIRouter, Request, Depends from fastmcp import FastMCP, Client +import server.api.utils.mcp as utils_mcp + +from common import logging_config +from fastapi import APIRouter, Request, Depends +from fastmcp import FastMCP, Client + +import server.api.core.mcp as core_mcp + import common.logging_config as logging_config logger = logging_config.logging.getLogger("api.v1.mcp") @@ -20,12 +28,23 @@ def get_mcp(request: Request) -> FastMCP: return request.app.state.fastmcp_app +@auth.get( + "/client", + description="Get MCP Client Configuration", + response_model=dict, +) +async def get_client(server: str = None, port: int = None) -> dict: + "Get MCP Client Configuration" + return utils_mcp.get_client(server, port) + + + @auth.get( "/tools", description="List available MCP tools", response_model=list[dict], ) -async def mcp_get_tools(mcp_engine: FastMCP = Depends(get_mcp)) -> list[dict]: +async def get_tools(mcp_engine: FastMCP = Depends(get_mcp)) -> list[dict]: """List MCP tools""" tools_info = [] try: diff --git a/src/server/bootstrap/mcp.py b/src/server/bootstrap/mcp.py index 95e2e34a..c958e102 100644 --- a/src/server/bootstrap/mcp.py +++ b/src/server/bootstrap/mcp.py @@ -2,12 +2,13 @@ Copyright (c) 2024, 2025, Oracle and/or its affiliates. Licensed under the Universal Permissive License v1.0 as shown at http://oss.oracle.com/licenses/upl. """ + from typing import List, Optional import os from server.bootstrap.configfile import ConfigStore from common.schema import MCPSettings, MCPModelConfig, MCPToolConfig -import common.logging_config as logging_config +from common import logging_config logger = logging_config.logging.getLogger("bootstrap.mcp") @@ -16,18 +17,19 @@ MCP_MODELS: List[MCPModelConfig] = [] MCP_TOOLS: List[MCPToolConfig] = [] + def load_mcp_settings(config: dict) -> None: """Load MCP configuration from config file""" global MCP_SETTINGS, MCP_MODELS, MCP_TOOLS - + # Convert to settings object first mcp_settings = MCPSettings( models=[MCPModelConfig(**model) for model in config.get("models", [])], tools=[MCPToolConfig(**tool) for tool in config.get("tools", [])], default_model=config.get("default_model"), - enabled=config.get("enabled", True) + enabled=config.get("enabled", True), ) - + # Set globals MCP_SETTINGS = mcp_settings MCP_MODELS = mcp_settings.models @@ -35,6 +37,7 @@ def load_mcp_settings(config: dict) -> None: logger.info("Loaded %i MCP Models and %i Tools", len(MCP_MODELS), len(MCP_TOOLS)) + def main() -> MCPSettings: """Bootstrap MCP Configuration""" logger.debug("*** Bootstrapping MCP - Start") @@ -48,7 +51,7 @@ def main() -> MCPSettings: models=configuration.mcp_configs, tools=[], # No tools in the current schema default_model=configuration.mcp_configs[0].model_id if configuration.mcp_configs else None, - enabled=True + enabled=True, ) else: # Default MCP configuration @@ -61,22 +64,19 @@ def main() -> MCPSettings: enabled=True, streaming=False, temperature=1.0, - max_tokens=2048 + max_tokens=2048, ) ], tools=[ MCPToolConfig( name="file_reader", description="Read contents of files", - parameters={ - "path": "string", - "encoding": "string" - }, - enabled=True + parameters={"path": "string", "encoding": "string"}, + enabled=True, ) ], default_model=None, - enabled=True + enabled=True, ) logger.info("Loaded %i MCP Models and %i Tools", len(mcp_settings.models), len(mcp_settings.tools)) @@ -86,4 +86,4 @@ def main() -> MCPSettings: if __name__ == "__main__": - main() \ No newline at end of file + main() diff --git a/src/server/bootstrap/oci.py b/src/server/bootstrap/oci.py index c40c297f..dd2d7470 100644 --- a/src/server/bootstrap/oci.py +++ b/src/server/bootstrap/oci.py @@ -10,9 +10,9 @@ from server.bootstrap.configfile import ConfigStore -from common import logging_config from common.schema import OracleCloudSettings +from common import logging_config logger = logging_config.logging.getLogger("bootstrap.oci") diff --git a/src/server/mcp/graph.py b/src/server/mcp/graph.py new file mode 100644 index 00000000..00664032 --- /dev/null +++ b/src/server/mcp/graph.py @@ -0,0 +1,171 @@ +""" +Copyright (c) 2024, 2025, Oracle and/or its affiliates. +Licensed under the Universal Permissive License v1.0 as shown at http://oss.oracle.com/licenses/upl. +""" +# spell-checker:ignore ainvoke checkpointer + +from datetime import datetime, timezone + +from langchain_core.messages import SystemMessage, ToolMessage +from langchain_core.runnables import RunnableConfig + +from langgraph.graph import StateGraph, MessagesState, START, END +from langgraph.prebuilt import ToolNode, tools_condition + +from common.schema import ChatResponse, ChatUsage, ChatChoices, ChatMessage +from launch_server import graph_memory + +import common.logging_config as logging_config + +logger = logging_config.logging.getLogger("mcp.graph") + + +############################################################################# +# AGENT STATE +############################################################################# +class OptimizerState(MessagesState): + """Establish our Agent State Machine""" + + final_response: ChatResponse # OpenAI Response + cleaned_messages: list # Messages w/o VS Results + + +############################################################################# +# NODES and EDGES +############################################################################# +def respond(state: OptimizerState, config: RunnableConfig) -> ChatResponse: + """Respond in OpenAI Compatible return""" + ai_message = state["messages"][-1] + logger.debug("Formatting to OpenAI compatible response: %s", repr(ai_message)) + if "model_name" in ai_message.response_metadata: + model_id = ai_message.response_metadata["model_name"] + ai_metadata = ai_message + else: + logger.debug("Using Metadata from: %s", repr(ai_metadata)) + model_id = config["metadata"]["ll_model"] + ai_metadata = state["messages"][1] + + finish_reason = ai_metadata.response_metadata.get("finish_reason", "stop") + if finish_reason == "COMPLETE": + finish_reason = "stop" + elif finish_reason == "MAX_TOKENS": + finish_reason = "length" + + openai_response = ChatResponse( + id=ai_message.id, + created=int(datetime.now(timezone.utc).timestamp()), + model=model_id, + usage=ChatUsage( + prompt_tokens=ai_metadata.response_metadata.get("token_usage", {}).get("prompt_tokens", -1), + completion_tokens=ai_metadata.response_metadata.get("token_usage", {}).get("completion_tokens", -1), + total_tokens=ai_metadata.response_metadata.get("token_usage", {}).get("total_tokens", -1), + ), + choices=[ + ChatChoices( + index=0, + message=ChatMessage( + role="ai", + content=ai_message.content, + additional_kwargs=ai_metadata.additional_kwargs, + response_metadata=ai_metadata.response_metadata, + ), + finish_reason=finish_reason, + logprobs=None, + ) + ], + ) + return {"final_response": openai_response} + + +async def client(state: OptimizerState, config: RunnableConfig) -> OptimizerState: + """Get messages from state based on Thread ID""" + logger.debug("Initializing OptimizerState") + messages = get_messages(state, config) + + return {"cleaned_messages": messages} + + +############################################################################# +def get_messages(state: OptimizerState, config: RunnableConfig) -> list: + """Return a list of messages that will be passed to the model for completion + Leave the state as is for GUI functionality""" + use_history = config["metadata"]["use_history"] + + # If user decided for no history, only take the last message + state_messages = state["messages"] if use_history else state["messages"][-1:] + + messages = [] + for msg in state_messages: + if isinstance(msg, SystemMessage): + continue + if isinstance(msg, ToolMessage): + if messages: # Check if there are any messages in the list + messages.pop() # Remove the last appended message + continue + messages.append(msg) + + # # insert the system prompt; remaining messages cleaned + # if config["metadata"]["sys_prompt"].prompt: + # messages.insert(0, SystemMessage(content=config["metadata"]["sys_prompt"].prompt)) + + return messages + + +def should_continue(state: OptimizerState): + """Determine if graph should continue to tools""" + messages = state["messages"] + last_message = messages[-1] + if last_message.tool_calls: + return "tools" + return END + + +# Define call_model function +async def call_model(state: OptimizerState, config: RunnableConfig): + """Invoke the model""" + try: + model = config["configurable"].get("ll_model", None) + messages = state["messages"] + response = await model.ainvoke(messages) + return {"messages": [response]} + except AttributeError as ex: + # The model is not in our RunnableConfig + return {"messages": [f"I'm sorry; {ex}"]} + + +# ############################################################################# +# # GRAPH +# ############################################################################# +def main(tools: list): + """Define the graph with MCP tool nodes""" + # Build the graph + workflow = StateGraph(OptimizerState) + + # Define the nodes + workflow.add_node("client", client) + workflow.add_node("call_model", call_model) + workflow.add_node("tools", ToolNode(tools)) + workflow.add_node("respond", respond) + + # Add Edges + workflow.add_edge(START, "client") + workflow.add_edge("client", "call_model") + workflow.add_conditional_edges( + "call_model", + should_continue, + ) + workflow.add_edge("tools", "call_model") + workflow.add_edge("call_model", "respond") + workflow.add_edge("respond", END) + + # Compile the graph and return it + mcp_graph = workflow.compile(checkpointer=graph_memory) + logger.debug("Chatbot Graph Built with tools: %s", tools) + ## This will output the Graph in ascii; don't deliver uncommented + # mcp_graph.get_graph(xray=True).print_ascii() + + return mcp_graph + + +if __name__ == "__main__": + main(list()) diff --git a/src/server/mcp/proxies/sqlcl.py b/src/server/mcp/proxies/sqlcl.py index a8a67848..a9f3e445 100644 --- a/src/server/mcp/proxies/sqlcl.py +++ b/src/server/mcp/proxies/sqlcl.py @@ -8,10 +8,9 @@ import shutil import subprocess -from fastmcp.server.proxy import ProxyToolManager +import server.api.utils.databases as utils_databases -import server.api.core.databases as core_databases -import common.logging_config as logging_config +from common import logging_config logger = logging_config.logging.getLogger("mcp.proxies.sqlcl") @@ -34,7 +33,7 @@ async def register(mcp): } } } - databases = core_databases.get_databases(validate=False) + databases = utils_databases.get_databases(validate=False) for database in databases: # Start sql in no-login mode try: @@ -50,7 +49,11 @@ async def register(mcp): # Prepare commands: connect, then exit commands = [ f"connmgr delete -conn OPTIMIZER_{database.name}", - f"conn -savepwd -save OPTIMIZER_{database.name} -user {database.user} -password {database.password} -url {database.dsn}", + ( + f"conn -savepwd -save OPTIMIZER_{database.name} " + f"-user {database.user} -password {database.password} " + f"-url {database.dsn}" + ), "exit", ] diff --git a/tests/unit/server/api/utils/test_models.py b/tests/unit/server/api/utils/test_models.py new file mode 100644 index 00000000..04728f13 --- /dev/null +++ b/tests/unit/server/api/utils/test_models.py @@ -0,0 +1,36 @@ +""" +Copyright (c) 2024, 2025, Oracle and/or its affiliates. +Licensed under the Universal Permissive License v1.0 as shown at http://oss.oracle.com/licenses/upl. +""" + +# spell-checker: disable +import os +import pytest + +import server.api.core.models as core_models +import server.api.utils.models as utils_models + +os.environ["LITELLM_LOCAL_MODEL_COST_MAP"] = "True" +os.environ["LITELLM_DISABLE_SPEND_LOGS"] = "True" +os.environ["LITELLM_DISABLE_SPEND_UPDATES"] = "True" +os.environ["LITELLM_DISABLE_END_USER_COST_TRACKING"] = "True" +os.environ["LITELLM_DROP_PARAMS"] = "True" +os.environ["LITELLM_DROP_PARAMS"] = "True" + +@pytest.fixture(name="models_list") +def _models_list(): + model_objects = core_models.get_model() + for obj in model_objects: + obj.enabled = True + return model_objects + + +def test_get_litellm_client(models_list): + """Testing LiteLLM Functionality""" + assert isinstance(models_list, list) + assert len(models_list) > 0 + + for model in models_list: + print(f"My Model: {model}") + if model.id == "mxbai-embed-large": + utils_models.get_litellm_client(model.dict())