Skip to content
Open
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
55 changes: 45 additions & 10 deletions fastdeploy/cache_manager/cache_messager.py
Original file line number Diff line number Diff line change
Expand Up @@ -143,12 +143,18 @@ def __init__(

self.gpu_id = gpu_id
self.cache_info = dict()
self.dp_rank_id = self.rank + local_data_parallel_id * self.nranks
self.rank_id = (
self.rank + local_data_parallel_id * self.nranks
) # align with engine worker rank (paddle.distributed.launch)

layerwise_send_cache_thread = threading.Thread(target=self._prefill_layerwise_send_cache_thread)
layerwise_send_cache_thread.daemon = True
layerwise_send_cache_thread.start()

connect_rdma_thread = threading.Thread(target=self._handle_connect_task)
connect_rdma_thread.daemon = True
connect_rdma_thread.start()

logger.info(f"cache messager init finished, use {transfer_protocol}")

def _prefill_layerwise_send_cache_thread(self):
Expand All @@ -161,29 +167,29 @@ def _prefill_layerwise_send_cache_thread(self):
prefilled_layer_idx_data = np.zeros(shape=[1], dtype=np.int32)
try:
step_shm_value = IPCSignal(
name=f"splitwise_complete_prefilled_step_{self.dp_rank_id}",
name=f"splitwise_complete_prefilled_step_{self.rank_id}",
array=prefilled_step_idx_data,
dtype=np.int32,
suffix=self.gpu_id,
create=True,
)
layer_shm_value = IPCSignal(
name=f"splitwise_complete_prefilled_layer_{self.dp_rank_id}",
name=f"splitwise_complete_prefilled_layer_{self.rank_id}",
array=prefilled_layer_idx_data,
dtype=np.int32,
suffix=self.gpu_id,
create=True,
)
except:
step_shm_value = IPCSignal(
name=f"splitwise_complete_prefilled_step_{self.dp_rank_id}",
name=f"splitwise_complete_prefilled_step_{self.rank_id}",
array=prefilled_step_idx_data,
dtype=np.int32,
suffix=self.gpu_id,
create=False,
)
layer_shm_value = IPCSignal(
name=f"splitwise_complete_prefilled_layer_{self.dp_rank_id}",
name=f"splitwise_complete_prefilled_layer_{self.rank_id}",
array=prefilled_layer_idx_data,
dtype=np.int32,
suffix=self.gpu_id,
Expand All @@ -196,6 +202,9 @@ def _prefill_layerwise_send_cache_thread(self):
self.last_step_idx = -1
self.last_layer_idx = -1 # int32

max_step_idx = 100003
engine_recycled_count = 0

while True:

cache_info = self.engine_worker_queue.get_cache_info()
Expand All @@ -215,7 +224,6 @@ def _prefill_layerwise_send_cache_thread(self):
current_info["status"] = "init"
logger.info(f"start cache_infos: {current_info}")
self.cache_info[info["request_id"]] = current_info
self.last_step_idx = min(self.last_step_idx, current_info["current_id"])
else:
self.cache_info[info["request_id"]] = info
prefilled_layer_idx = layer_shm_value.value[0]
Expand All @@ -231,7 +239,17 @@ def _prefill_layerwise_send_cache_thread(self):
if not self.cache_info:
time.sleep(0.001)
continue
logger.debug(f"prefilled_layer_idx: {prefilled_layer_idx}, prefilled_step_idx: {prefilled_step_idx}")
if self.last_step_idx > prefilled_step_idx:
engine_recycled_count += 1
self.last_step_idx = prefilled_step_idx # only copy value read from shm memory
prefilled_step_idx = (
prefilled_step_idx + max_step_idx * engine_recycled_count
) # remap prefilled_step_idx for comparison

logger.debug(
f"prefilled_layer_idx: {prefilled_layer_idx}, prefilled_step_idx in shm: {self.last_step_idx},"
f"prefilled_step_idx: {prefilled_step_idx} engine_recycled_count {engine_recycled_count}"
)
for req_id, item in list(self.cache_info.items()):
if "status" not in item:
continue
Expand Down Expand Up @@ -305,9 +323,26 @@ def _prefill_layerwise_send_cache_thread(self):
self.engine_worker_queue.put_finished_req([(item["request_id"], "finished")])
logger.info(f"put write cache {item['request_id']}")
del self.cache_info[req_id]

self.last_step_idx = prefilled_step_idx
self.last_layer_idx = prefilled_layer_idx
self.last_layer_idx = prefilled_layer_idx

except Exception as e:
logger.error(f"prefill layerwise send cache thread has exception: {e}, {str(traceback.format_exc())}")

def _handle_connect_task(self):
while True:
try:
task = self.engine_worker_queue.get_connect_rdma_task()
if task is None:
time.sleep(0.001)
continue
logger.info(f"_handle_connect_task recv task: {task}")
task_id = task["task_id"]
ip, rdma_port = task["ip"], task["rdma_port"]
status = self.messager["rdma"].connect(ip, rdma_port)
if not status:
response = {"task_id": task_id, "success": False}
else:
response = {"task_id": task_id, "success": True}
self.engine_worker_queue.put_connect_rdma_task_response(response)
except Exception as e:
logger.error(f"handle_connect_task has exception: {e}")
Original file line number Diff line number Diff line change
Expand Up @@ -61,18 +61,12 @@ def connect(self, ip, port):
Connect to remote gpu and write cache.
"""
assert self.splitwise_role == "prefill", "only prefill can call this method"
addr = f"{ip}:{port!s}"
if addr in self.connected_rdma:
return True
ret = self.messager.is_connected(ip, str(port))
if ret:
self.connected_rdma.add(addr)
return True

ret = self.messager.connect(ip, str(port))
logger.info(f"connect to remote rdma address {ip}:{port} status is {ret}")
if ret == 0:
self.connected_rdma.add(addr)
return ret == 0

def write_cache(self, ip, port, local_block_ids, remote_block_ids, layer_idx):
Expand Down
1 change: 1 addition & 0 deletions fastdeploy/engine/args_utils.py
Original file line number Diff line number Diff line change
Expand Up @@ -884,6 +884,7 @@ def create_scheduler_config(self) -> SchedulerConfig:
"max_num_partial_prefills",
"max_long_partial_prefills",
"long_prefill_token_threshold",
"splitwise_role",
]

all = asdict(self)
Expand Down
49 changes: 36 additions & 13 deletions fastdeploy/engine/engine.py
Original file line number Diff line number Diff line change
Expand Up @@ -47,12 +47,14 @@
EngineCacheQueue,
EngineWorkerQueue,
IPCSignal,
ZmqClient,
ZmqIpcServer,
ZmqTcpServer,
)
from fastdeploy.metrics.metrics import main_process_metrics
from fastdeploy.metrics.trace_util import start_span, start_span_request
from fastdeploy.model_executor.guided_decoding import schema_checker
from fastdeploy.output.token_processor import TokenProcessor, WarmUpTokenProcessor
from fastdeploy.splitwise.internal_adapter_utils import InternalAdapter
from fastdeploy.splitwise.splitwise_connector import SplitwiseConnector
from fastdeploy.utils import EngineError, console_logger, envs, llm_logger

Expand Down Expand Up @@ -180,9 +182,19 @@ def start(self, api_server_pid=None):
self.data_processor = self.input_processor.create_processor()

if api_server_pid is not None:
self.zmq_server = ZmqClient(name=api_server_pid, mode=zmq.PULL)
self.zmq_server.start_server()
self.zmq_server.create_router()
if envs.FD_ENABLE_INTERNAL_ADAPTER:
self.recv_request_server = ZmqTcpServer(port=envs.FD_ZMQ_RECV_REQUEST_SERVER_PORT, mode=zmq.PULL)
self.send_response_server = ZmqTcpServer(port=envs.FD_ZMQ_SEND_RESPONSE_SERVER_PORT, mode=zmq.ROUTER)
self.external_adapter = InternalAdapter(
cfg=self.cfg, engine=self, dp_rank=self.cfg.node_rank * self.cfg.worker_num_per_node
)
else:
self.recv_request_server = ZmqIpcServer(name=api_server_pid, mode=zmq.PULL)
self.send_response_server = ZmqIpcServer(name=api_server_pid, mode=zmq.ROUTER)
self.recv_result_handle_thread = threading.Thread(
target=self.send_response_server.recv_result_handle, daemon=True
)
self.recv_result_handle_thread.start()
time.sleep(3)

if self.do_profile == 0 and (
Expand Down Expand Up @@ -259,7 +271,7 @@ def _zmq_send_generated_tokens(self):
time.sleep(0.005)
continue
for request_id, contents in results.items():
self.zmq_server.send_multipart(request_id, contents)
self.send_response_server.send_response(request_id, contents)

except Exception as e:
llm_logger.error(f"Unexcepted error happend: {e}, {traceback.format_exc()!s}")
Expand All @@ -276,7 +288,7 @@ def _insert_task_to_worker(self):
Insert task to engine thread, monitor scheduler request queue.
if the engine has resource, insert task to engine
"""
current_id = -1
current_id = 0
while self.running:
try:
if self.resource_manager.available_batch() == 0:
Expand Down Expand Up @@ -314,12 +326,15 @@ def _insert_task_to_worker(self):
time.sleep(0.001)
continue

current_id = (current_id + 1) % 100003
if self.cfg.splitwise_role != "mixed":
llm_logger.info("Inserting splitwise tasks")
self.split_connector.send_splitwise_tasks(tasks, current_id)

self.insert_tasks(tasks, current_id)
insert_successful = self.insert_tasks(tasks, current_id)
if insert_successful:
current_id = current_id + 1
else:
continue

main_process_metrics.num_requests_waiting.dec(len(tasks))
main_process_metrics.num_requests_running.inc(len(tasks))
Expand Down Expand Up @@ -383,14 +398,18 @@ def _insert_zmq_task_to_scheduler(self):
if self.api_server_pid is None:
return

if envs.FD_ENABLE_INTERNAL_ADAPTER:
if self.cfg.splitwise_role == "decode":
return

added_requests: Dict[str, int] = dict()
while self.running:
try:
block = True if len(added_requests) == 0 else False
if not self.cfg.model_config.enable_mm:
err, data = self.zmq_server.receive_json_once(block)
err, data = self.recv_request_server.receive_json_once(block)
else:
err, data = self.zmq_server.receive_pyobj_once(block)
err, data = self.recv_request_server.receive_pyobj_once(block)
if err is not None:
llm_logger.error("Engine stops inserting zmq task into scheduler, err:{err}")
break
Expand Down Expand Up @@ -438,7 +457,7 @@ def _insert_zmq_task_to_scheduler(self):
)
# Since the request is not in scheduler
# Send result by zmq directly
self.zmq_server.send_multipart(request_id, error_result)
self.send_response_server.send_response(request_id, [error_result])
except Exception as e:
llm_logger.error(
f"Error happend while receving new request from zmq, details={e}, "
Expand Down Expand Up @@ -1003,8 +1022,12 @@ def _exit_sub_services(self):
console_logger.error(f"Error extracting sub services: {e}, {str(traceback.format_exc())}")

self.engine_worker_queue.cleanup()
if hasattr(self, "zmq_server") and self.zmq_server is not None:
self.zmq_server.close()
if hasattr(self, "send_response_server") and self.send_response_server is not None:
self.send_response_server.close()
if hasattr(self, "recv_request_server") and self.recv_request_server is not None:
self.recv_request_server.close()
if hasattr(self, "recv_control_cmd_server") and self.recv_control_cmd_server is not None:
self.recv_control_cmd_server.close()
if hasattr(self, "dp_processed"):
for p in self.dp_processed:
p.join()
Expand Down
39 changes: 29 additions & 10 deletions fastdeploy/engine/expert_service.py
Original file line number Diff line number Diff line change
Expand Up @@ -16,6 +16,7 @@

from __future__ import annotations

import copy
import os
import signal
import threading
Expand All @@ -29,8 +30,9 @@
from fastdeploy.inter_communicator import EngineWorkerQueue, IPCSignal
from fastdeploy.metrics.metrics import main_process_metrics
from fastdeploy.output.token_processor import TokenProcessor
from fastdeploy.splitwise.internal_adapter_utils import InternalAdapter
from fastdeploy.splitwise.splitwise_connector import SplitwiseConnector
from fastdeploy.utils import EngineError, console_logger, llm_logger
from fastdeploy.utils import EngineError, console_logger, envs, llm_logger


class ExpertService:
Expand Down Expand Up @@ -59,7 +61,7 @@ def __init__(self, cfg, local_data_parallel_id):
self.cfg.disaggregate_info = None

self.scheduler = cfg.scheduler_config.scheduler()
if cfg.splitwise_role != "mixed":
if self.cfg.scheduler_config.name == "splitwise":
self.scheduler.reset_nodeid(f"{self.scheduler.infer.nodeid}_{local_data_parallel_id!s}")

self.cfg.parallel_config.local_data_parallel_id = local_data_parallel_id
Expand Down Expand Up @@ -111,8 +113,12 @@ def __init__(self, cfg, local_data_parallel_id):
)

self._finalizer = weakref.finalize(self, self._exit_sub_services)
if envs.FD_ENABLE_INTERNAL_ADAPTER:
self.external_adapter = InternalAdapter(cfg=self.cfg, engine=self, dp_rank=local_data_parallel_id)

def start(self, ipc_signal_suffix, local_data_parallel_id):
def start(
self, ipc_signal_suffix, local_data_parallel_id, request_queues_for_dp_ipc=None, result_queue_for_dp_ipc=None
):
"""
Initializes the engine and starts its sub-services.
If `api_server_pid` is defined, will launch a thread
Expand Down Expand Up @@ -145,7 +151,11 @@ def start(self, ipc_signal_suffix, local_data_parallel_id):
role = self.cfg.splitwise_role
host_ip = self.cfg.host_ip
disaggregate = self.cfg.disaggregate_info
self.scheduler.start(role, host_ip, disaggregate)
if self.cfg.scheduler_config.name == "dp":
assert (request_queues_for_dp_ipc is not None) and (result_queue_for_dp_ipc is not None)
self.scheduler.start(local_data_parallel_id, request_queues_for_dp_ipc, result_queue_for_dp_ipc)
elif self.cfg.scheduler_config.name == "splitwise":
self.scheduler.start(role, host_ip, disaggregate)
self.cfg.print()

launched_expert_service_signal_data = np.zeros(
Expand All @@ -171,7 +181,7 @@ def _insert_task_to_worker(self):
Insert task to engine thread, monitor scheduler request queue.
if the engine has resource, insert task to engine
"""
current_id = -1
current_id = 0
while True:
try:
if self.resource_manager.available_batch() == 0:
Expand Down Expand Up @@ -206,9 +216,11 @@ def _insert_task_to_worker(self):
llm_logger.info("Inserting splitwise tasks")
self.split_connector.send_splitwise_tasks(tasks, current_id)

current_id = (current_id + 1) % 100003

self.insert_tasks(tasks, current_id)
insert_successful = self.insert_tasks(tasks, current_id)
if insert_successful:
current_id = current_id + 1
else:
continue

main_process_metrics.num_requests_waiting.dec(len(tasks))
main_process_metrics.num_requests_running.inc(len(tasks))
Expand Down Expand Up @@ -283,6 +295,9 @@ def insert_tasks(self, tasks, current_id=-1, allocated=False):
cur_task_idx = self.resource_manager.req_dict[task.request_id]
del self.resource_manager.req_dict[task.request_id]
cur_task = self.resource_manager.tasks_list[cur_task_idx]
cur_task.prompt_token_ids[0] = task.outputs.token_ids[0]
if self.cfg.speculative_config.method in ["mtp"] and self.cfg.splitwise_role == "decode":
cur_task.draft_token_ids = copy.deepcopy(task.outputs.draft_token_ids)
if task.error_code != 200:
self.resource_manager.stop_flags[cur_task_idx] = True
self.resource_manager.tasks_list[cur_task_idx] = None
Expand Down Expand Up @@ -369,13 +384,17 @@ def _exit_sub_services(self):
self.zmq_server.close()


def start_expert_service(cfg, local_data_parallel_id, ipc_signal_suffix):
def start_expert_service(
cfg, local_data_parallel_id, ipc_signal_suffix, request_queues_for_dp_ipc=None, result_queue_for_dp_ipc=None
):
"""
Start expert service
"""
expert_service = ExpertService(cfg, local_data_parallel_id)
try:
expert_service.start(ipc_signal_suffix, local_data_parallel_id)
expert_service.start(
ipc_signal_suffix, local_data_parallel_id, request_queues_for_dp_ipc, result_queue_for_dp_ipc
)
expert_service.split_connector.start_receiver()
except Exception as e:
llm_logger.exception(f"Expert service failed to start: {e}, {str(traceback.format_exc())}")
Loading
Loading