diff --git a/paddle/fluid/framework/new_executor/interpreter/static_build.cc b/paddle/fluid/framework/new_executor/interpreter/static_build.cc index 9e530761b76245..cbbe36191452bd 100644 --- a/paddle/fluid/framework/new_executor/interpreter/static_build.cc +++ b/paddle/fluid/framework/new_executor/interpreter/static_build.cc @@ -348,7 +348,7 @@ void FakeInitializeTensor(const phi::DeviceContext& dev_ctx, PADDLE_ENFORCE_EQ(place, dev_ctx.GetPlace(), common::errors::Unavailable( - "The place %s for fack alloc is not equal to " + "The place %s for fake alloc is not equal to " "the place %s of DeviceContext.", place, dev_ctx.GetPlace())); @@ -525,7 +525,7 @@ void RunWhileBlockPreStaticBuild(const framework::Scope& scope, // note(lvyongkang): The assign op in while loop may change the place of // variable. However, InterpreterCore fix the kernel of every ops during its // first run. A cpu tensor may become gpu tensor after first run. This will - // lead to segmetation fault when it's used in a cpu kernel. Here we record + // lead to segmentation fault when it's used in a cpu kernel. Here we record // the place of every inputs and restore their place after // InterpreterCore.run(). std::map input_var_original_places; @@ -696,7 +696,7 @@ void FakeInitializeOutputsForOperatorBase( if (following_input_vars.count(var_name)) { PADDLE_THROW(common::errors::PreconditionNotMet( "The output %s s' dtype/place of %s is " - "changed after static build. Befer static build, the " + "changed after static build. Before static build, the " "dtype is %s, place is %s. After static " "build, the dtype is %s, place is %s.", op_type, diff --git a/paddle/fluid/framework/new_executor/pir_adaptor/pir_adaptor_util.cc b/paddle/fluid/framework/new_executor/pir_adaptor/pir_adaptor_util.cc index 7ecef0a54751f5..11ed55043c723b 100644 --- a/paddle/fluid/framework/new_executor/pir_adaptor/pir_adaptor_util.cc +++ b/paddle/fluid/framework/new_executor/pir_adaptor/pir_adaptor_util.cc @@ -450,8 +450,8 @@ void BuildValue(pir::Value value, } else { PADDLE_THROW(common::errors::PreconditionNotMet( "Output only support DenseTensorType " - "or SelectedRowsType or VectorType or StackType or SpasrCooTensorType " - "or SpasreCsrTensorType")); + "or SelectedRowsType or VectorType or StackType or SparseCooTensorType " + "or SparseCsrTensorType")); } } @@ -710,9 +710,9 @@ bool IsNeedVarInplace(pir::Operation* op, // NOTE(chenxi67): Here, we only perform inplace processing for variables that // need to be inplaced by var (mostly, whose type is TensorArray or re-Allocated -// Densetensor). For other types of variables, we only share the holder of +// DenseTensor). For other types of variables, we only share the holder of // DenseTensor but not the var*. The reason is that vector in -// TensorArray (or re-Allocated Densetensor) cannot be shared totally. +// TensorArray (or re-Allocated DenseTensor) cannot be shared totally. void HandleForInplaceVarOp(pir::Operation* op, const std::string& var_name_prefix, ValueExecutionInfo* value_exe_info) { diff --git a/paddle/fluid/framework/new_executor/workqueue/nonblocking_threadpool.h b/paddle/fluid/framework/new_executor/workqueue/nonblocking_threadpool.h index ae0125f8204089..11d45d7d9560e7 100644 --- a/paddle/fluid/framework/new_executor/workqueue/nonblocking_threadpool.h +++ b/paddle/fluid/framework/new_executor/workqueue/nonblocking_threadpool.h @@ -50,7 +50,7 @@ class ThreadPoolTempl { // and NonEmptyQueueIndex. Iteration is based on the fact that if we take // a random starting thread index t and calculate num_threads - 1 subsequent // indices as (t + coprime) % num_threads, we will cover all threads without - // repetitions (effectively getting a presudo-random permutation of thread + // repetitions (effectively getting a pseudo-random permutation of thread // indices). assert(num_threads_ >= 1 && num_threads_ < kMaxThreads); all_coprimes_.reserve(num_threads_);