Skip to content

Commit 2d9c075

Browse files
fix: reduce func complexity
1 parent c930476 commit 2d9c075

File tree

1 file changed

+37
-22
lines changed

1 file changed

+37
-22
lines changed

litellm/proxy/proxy_server.py

Lines changed: 37 additions & 22 deletions
Original file line numberDiff line numberDiff line change
@@ -653,28 +653,11 @@ async def proxy_startup_event(app: FastAPI):
653653
)
654654

655655
### PRELOAD USERS INTO CACHE ###
656-
if prisma_client is not None and general_settings is not None:
657-
preload_limit = general_settings.get("preload_users_limit", 0)
658-
if preload_limit > 0:
659-
from litellm.proxy.utils import preload_users_into_cache
660-
verbose_proxy_logger.info(
661-
f"Starting background user preload into cache: limit={preload_limit}"
662-
)
663-
664-
async def _preload_users_background():
665-
try:
666-
await preload_users_into_cache(
667-
prisma_client=prisma_client, # type: ignore
668-
user_api_key_cache=user_api_key_cache,
669-
limit=preload_limit
670-
)
671-
verbose_proxy_logger.info("User preload background task completed")
672-
except Exception as e:
673-
verbose_proxy_logger.error(f"Failed to preload users in background: {e}")
674-
# Don't fail startup if preload fails
675-
pass
676-
677-
asyncio.create_task(_preload_users_background())
656+
ProxyStartupEvent._start_user_preload_background_task(
657+
prisma_client=prisma_client,
658+
general_settings=general_settings,
659+
user_api_key_cache=user_api_key_cache
660+
)
678661

679662
### START BATCH WRITING DB + CHECKING NEW MODELS###
680663
if prisma_client is not None:
@@ -3921,6 +3904,37 @@ def _init_dd_tracer(cls):
39213904
prof.start()
39223905
verbose_proxy_logger.debug("Datadog Profiler started......")
39233906

3907+
@classmethod
3908+
def _start_user_preload_background_task(
3909+
cls,
3910+
prisma_client: Optional[PrismaClient],
3911+
general_settings: Optional[dict],
3912+
user_api_key_cache: DualCache,
3913+
):
3914+
"""Start background task to preload users into cache"""
3915+
if prisma_client is not None and general_settings is not None:
3916+
preload_limit = general_settings.get("preload_users_limit", 0)
3917+
if preload_limit > 0:
3918+
from litellm.proxy.utils import preload_users_into_cache
3919+
verbose_proxy_logger.info(
3920+
f"Starting background user preload into cache: limit={preload_limit}"
3921+
)
3922+
3923+
async def _preload_users_background():
3924+
try:
3925+
await preload_users_into_cache(
3926+
prisma_client=prisma_client, # type: ignore
3927+
user_api_key_cache=user_api_key_cache,
3928+
limit=preload_limit
3929+
)
3930+
verbose_proxy_logger.info("User preload background task completed")
3931+
except Exception as e:
3932+
verbose_proxy_logger.error(f"Failed to preload users in background: {e}")
3933+
# Don't fail startup if preload fails
3934+
pass
3935+
3936+
asyncio.create_task(_preload_users_background())
3937+
39243938

39253939
#### API ENDPOINTS ####
39263940
@router.get(
@@ -4082,6 +4096,7 @@ async def model_info(
40824096
tags=["chat/completions"],
40834097
responses={200: {"description": "Successful response"}, **ERROR_RESPONSES},
40844098
) # azure compatible endpoint
4099+
@profile_endpoint(0.8)
40854100
async def chat_completion( # noqa: PLR0915
40864101
request: Request,
40874102
fastapi_response: Response,

0 commit comments

Comments
 (0)