Skip to content
Open
Show file tree
Hide file tree
Changes from 3 commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
6 changes: 3 additions & 3 deletions modules/llama_cpp_plugin/src/compiled_model.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -27,11 +27,11 @@ LlamaCppModel::LlamaCppModel(const std::string& gguf_fname,
: ICompiledModel(nullptr, plugin),
m_gguf_fname(gguf_fname),
m_num_threads(num_threads) {
OPENVINO_DEBUG << "llama_cpp_plugin: loading llama model directly from GGUF... " << std::endl;
OPENVINO_DEBUG("llama_cpp_plugin: loading llama model directly from GGUF... ");
llama_model_params mparams = llama_model_default_params();
mparams.n_gpu_layers = 99;
m_llama_model_ptr = llama_load_model_from_file(gguf_fname.c_str(), mparams);
OPENVINO_DEBUG << "llama_cpp_plugin: llama model loaded successfully from GGUF..." << std::endl;
OPENVINO_DEBUG("llama_cpp_plugin: llama model loaded successfully from GGUF...");

auto input_ids = std::make_shared<ov::opset13::Parameter>(ov::element::Type_t::i64, ov::PartialShape({-1, -1}));
auto fake_convert = std::make_shared<ov::opset13::Convert>(input_ids->output(0), ov::element::Type_t::f32);
Expand Down Expand Up @@ -71,7 +71,7 @@ std::shared_ptr<const ov::Model> LlamaCppModel::get_runtime_model() const {
}

void LlamaCppModel::set_property(const ov::AnyMap& properties) {
OPENVINO_DEBUG << "llama_cpp_plugin: attempted to set_property (did nothing)";
OPENVINO_DEBUG("llama_cpp_plugin: attempted to set_property (did nothing)");
}

ov::Any LlamaCppModel::get_property(const std::string& name) const {
Expand Down
8 changes: 4 additions & 4 deletions modules/llama_cpp_plugin/src/infer_request.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -28,7 +28,7 @@ void allocate_tensor_impl(ov::SoPtr<ov::ITensor>& tensor,
LlamaCppSyncInferRequest::LlamaCppSyncInferRequest(const std::shared_ptr<const LlamaCppModel>& compiled_model,
size_t num_threads)
: ov::ISyncInferRequest(compiled_model) {
OPENVINO_DEBUG << "llama_cpp_plugin: infer request ctor called\n";
OPENVINO_DEBUG("llama_cpp_plugin: infer request ctor called");
llama_context_params cparams = llama_context_default_params();
cparams.n_threads = num_threads ? num_threads : std::thread::hardware_concurrency();
cparams.n_ctx = 0; // this means that the actual n_ctx will be taken equal to the model's train-time value
Expand All @@ -51,7 +51,7 @@ LlamaCppSyncInferRequest::LlamaCppSyncInferRequest(const std::shared_ptr<const L
}
void LlamaCppSyncInferRequest::set_tensors_impl(const ov::Output<const ov::Node> port,
const std::vector<ov::SoPtr<ov::ITensor>>& tensors) {
OPENVINO_DEBUG << "llama_cpp_plugin: set_tensors_impl called\n";
OPENVINO_DEBUG("llama_cpp_plugin: set_tensors_impl called");
}

void llama_batch_add_reimpl(struct llama_batch& batch,
Expand Down Expand Up @@ -131,12 +131,12 @@ void LlamaCppSyncInferRequest::infer() {
llama_batch_free(batch);
};
std::vector<ov::ProfilingInfo> LlamaCppSyncInferRequest::get_profiling_info() const {
OPENVINO_DEBUG << "llama_cpp_plugin: get_profiling_info() called\n";
OPENVINO_DEBUG("llama_cpp_plugin: get_profiling_info() called");
return std::vector<ov::ProfilingInfo>{};
};

std::vector<ov::SoPtr<ov::IVariableState>> LlamaCppSyncInferRequest::query_state() const {
OPENVINO_DEBUG << "llama_cpp_plugin: query_state() called\n";
OPENVINO_DEBUG("llama_cpp_plugin: query_state() called");
return {std::static_pointer_cast<ov::IVariableState>(std::make_shared<LlamaCppState>(m_llama_ctx))};
}

Expand Down
15 changes: 1 addition & 14 deletions modules/nvidia_plugin/src/ops/result.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -62,21 +62,8 @@ std::optional<std::size_t> ResultOp::GetOutputTensorSubIndex(const ov::Output<ov
}

std::vector<std::string> ResultOp::GetOutputTensorName(const ov::op::v0::Result& node) {
std::vector<std::string> outputNames;

const auto& input = node.input_value(0);
auto name = ov::op::util::get_ie_output_name(input);
outputNames.push_back(name);

auto resultName = node.get_friendly_name();

// NOTE: New way of getting the fused names for OpenVINO 2.0 API
// TODO: When support for old OpenVINO API will be stopped, consider using only this approach.
// Also see any issues with Tacatron2 network
const auto& fusedResults = ov::getFusedNamesVector(input.get_node()->shared_from_this());
outputNames.insert(outputNames.end(), fusedResults.begin(), fusedResults.end());

return outputNames;
return ov::getFusedNamesVector(input.get_node()->shared_from_this());
}

void ResultOp::Capture(InferenceRequestContext& context,
Expand Down
Loading