Skip to content
Closed
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
31 changes: 15 additions & 16 deletions model.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -2024,6 +2024,7 @@ bool ModelLoader::load_tensors(on_new_tensor_cb_t on_new_tensor_cb, int n_thread
const size_t total_tensors_to_process = processed_tensor_storages.size();
const int64_t t_start = ggml_time_ms();
int last_n_threads = 1;
std::mutex tensor_backend_mutex;

for (size_t file_index = 0; file_index < file_paths_.size(); file_index++) {
std::string file_path = file_paths_[file_index];
Expand Down Expand Up @@ -2211,26 +2212,24 @@ bool ModelLoader::load_tensors(on_new_tensor_cb_t on_new_tensor_cb, int n_thread
i64_to_i32_vec((int64_t*)read_buffer.data(), (int32_t*)read_buffer.data(), tensor_storage.nelements());
}

if (tensor_storage.type == dst_tensor->type) {
// copy to device memory
t1 = ggml_time_ms();
convert_time_ms.fetch_add(t1 - t0);
t0 = ggml_time_ms();
ggml_backend_tensor_set(dst_tensor, read_buffer.data(), 0, ggml_nbytes(dst_tensor));
t1 = ggml_time_ms();
copy_to_backend_time_ms.fetch_add(t1 - t0);
} else {
// convert first, then copy to device memory
auto* tensor_buffer = &read_buffer;

if (tensor_storage.type != dst_tensor->type) {
// convert first
convert_buffer.resize(ggml_nbytes(dst_tensor));
convert_tensor((void*)read_buffer.data(), tensor_storage.type, (void*)convert_buffer.data(), dst_tensor->type, (int)tensor_storage.nelements() / (int)tensor_storage.ne[0], (int)tensor_storage.ne[0]);
t1 = ggml_time_ms();
convert_time_ms.fetch_add(t1 - t0);
t0 = ggml_time_ms();
ggml_backend_tensor_set(dst_tensor, convert_buffer.data(), 0, ggml_nbytes(dst_tensor));
t1 = ggml_time_ms();
copy_to_backend_time_ms.fetch_add(t1 - t0);
tensor_buffer = &convert_buffer;
}

t1 = ggml_time_ms();
convert_time_ms.fetch_add(t1 - t0);

// copy to device memory
std::lock_guard<std::mutex> lock(tensor_backend_mutex);
t0 = ggml_time_ms();
ggml_backend_tensor_set(dst_tensor, tensor_buffer->data(), 0, ggml_nbytes(dst_tensor));
t1 = ggml_time_ms();
copy_to_backend_time_ms.fetch_add(t1 - t0);
}
}
if (zip != NULL) {
Expand Down
Loading