From 8a85fbcc60374a6dd7522ebbe74ed17c2715d60c Mon Sep 17 00:00:00 2001 From: Zeref996 <825276847@qq.com> Date: Thu, 4 Jan 2024 16:49:50 +0800 Subject: [PATCH 1/5] apibm multi, test=model --- framework/e2e/api_benchmark_new/runner_ci_multipro.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/framework/e2e/api_benchmark_new/runner_ci_multipro.py b/framework/e2e/api_benchmark_new/runner_ci_multipro.py index a6697e575c..6b03408882 100644 --- a/framework/e2e/api_benchmark_new/runner_ci_multipro.py +++ b/framework/e2e/api_benchmark_new/runner_ci_multipro.py @@ -168,7 +168,7 @@ def _run_ci(self): :return: """ - multiprocess_cases = self.split_list(self.all_cases) + multiprocess_cases = self.split_list(lst=self.all_cases, n=self.multiprocess_num) processes = [] result_queue = multiprocessing.Queue() From 33479a81ed6e894a9bd736d9708c91061a60210c Mon Sep 17 00:00:00 2001 From: Zeref996 <825276847@qq.com> Date: Thu, 4 Jan 2024 18:12:55 +0800 Subject: [PATCH 2/5] update apibm multi, test=model --- framework/e2e/api_benchmark_new/runner_ci_multipro.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/framework/e2e/api_benchmark_new/runner_ci_multipro.py b/framework/e2e/api_benchmark_new/runner_ci_multipro.py index 6b03408882..3ca2db710c 100644 --- a/framework/e2e/api_benchmark_new/runner_ci_multipro.py +++ b/framework/e2e/api_benchmark_new/runner_ci_multipro.py @@ -168,7 +168,7 @@ def _run_ci(self): :return: """ - multiprocess_cases = self.split_list(lst=self.all_cases, n=self.multiprocess_num) + multiprocess_cases = self.split_list(lst=list(self.all_cases), n=self.multiprocess_num) processes = [] result_queue = multiprocessing.Queue() From 8c949c5a293d64115f5eddb22e673f0a4386d60d Mon Sep 17 00:00:00 2001 From: Zeref996 <825276847@qq.com> Date: Thu, 4 Jan 2024 19:31:10 +0800 Subject: [PATCH 3/5] update apibm multi, test=model --- framework/e2e/api_benchmark_new/runner_ci_multipro.py | 7 ++++--- 1 file changed, 4 insertions(+), 3 deletions(-) diff --git a/framework/e2e/api_benchmark_new/runner_ci_multipro.py b/framework/e2e/api_benchmark_new/runner_ci_multipro.py index 3ca2db710c..aaf365ae87 100644 --- a/framework/e2e/api_benchmark_new/runner_ci_multipro.py +++ b/framework/e2e/api_benchmark_new/runner_ci_multipro.py @@ -4,7 +4,7 @@ # encoding=utf-8 vi:ts=4:sw=4:expandtab:ft=python """ 注意!!!!!!! -修复 self.comment 和 self.wheel_link +修复 self.core_index、self.comment 和 self.wheel_link """ import os @@ -60,8 +60,9 @@ def __init__(self, yaml_path, python): :param baseline: 性能baseline键值对, key为case名, value为性能float """ # 测试控制项 - self.core_index = args.core_index # 第一个cpu核序号 - self.multiprocess_num = 3 # 并行进程数 + # self.core_index = args.core_index # 第一个cpu核序号 + self.core_index = 7 + self.multiprocess_num = 4 # 并行进程数 self.loops = 50 # 循环次数 self.base_times = 1000 # timeit 基础运行时间 self.default_dtype = "float32" From 8b0e945cbd552b61f4b25f975b093a13870c8800 Mon Sep 17 00:00:00 2001 From: Zeref996 <825276847@qq.com> Date: Tue, 9 Jan 2024 10:38:45 +0800 Subject: [PATCH 4/5] add apibm sort yaml, test=model --- .../e2e/yaml/sort_api_benchmark_fp32.yml | 9714 +++++++++++++++++ 1 file changed, 9714 insertions(+) create mode 100644 framework/e2e/yaml/sort_api_benchmark_fp32.yml diff --git a/framework/e2e/yaml/sort_api_benchmark_fp32.yml b/framework/e2e/yaml/sort_api_benchmark_fp32.yml new file mode 100644 index 0000000000..f1e5082512 --- /dev/null +++ b/framework/e2e/yaml/sort_api_benchmark_fp32.yml @@ -0,0 +1,9714 @@ +""" +['Transformer_0', 'TripletMarginWithDistanceLoss_0', +'triplet_margin_with_distance_loss_0', 'pinv_0', +'quantile_0', 'corrcoef_0', 'nanquantile_0', 'linalg_cov_0', 'LSTMCell_0', +'HingeEmbeddingLoss_0', 'hinge_embedding_loss_0', 'MultiHeadAttention_0', +'cosine_embedding_loss_0', 'CosineEmbeddingLoss_0', 'istft_0', 'GRUCell_0', +'stft_0', 'lcm_0', 'SimpleRNNCell_0', 'std_0', 'median_0', 'SimpleRNN_0', 'var_0', +'Conv1D_0', 'tensordot_0', 'LSTM_0', 'logspace_0', 'local_response_norm_0', +'fftfreq_0', 'PairwiseDistance_0', 'CrossEntropyLoss_0', 'AlphaDropout_0', 'conv1d_0', +'cross_entropy_0', 'rfftfreq_0', 'cosine_similarity_0', 'CosineSimilarity_0', +'Conv1DTranspose_0', 'linspace_0', 'Tensor_lerp__0', 'GRU_0', 'arange_0', 'where_0', +'normalize_0', 'StickBreakingTransform_0', 'Dropout3D_0', 'Dropout2D_0', 'randint_like_0', +'ifft2_0', 'nanmean_0', 'ifftn_0', 'Multinomial_0', 'Conv3DTranspose_0', 'Conv2DTranspose_0', +'CTCLoss_0', 'gcd_0', 'hfft2_0', 'irfft2_0', 'ctc_loss_0', 'irfftn_0', 'hfftn_0', 'ihfft2_0', +'rfft2_0', 'conv1d_transpose_0', 'rfftn_0', 'lstsq_0', 'ihfftn_0', 'Uniform_0', +'conv2d_transpose_0', 'conv3d_transpose_0', 'AdaptiveAvgPool1D_0', 'ifft_0', +'adaptive_avg_pool1d_0', 'cond_0', 'Normal_0', 'Conv2D_0', 'Conv3D_0', 'fft2_0', +'conv3d_0', 'conv2d_0', 'SoftmaxTransform_0', 'Categorical_0', 'MaxUnPool1D_0', +'irfft_0', 'hfft_0', 'max_unpool1d_0', 'AvgPool1D_0', +'affine_grid_0', 'ReflectionPad3d_0', 'ihfft_0', 'Pad1D_0', 'ReflectionPad2d_0', +'rfft_0', 'ZeroPad2D_0', 'ReflectionPad1d_0', 'MaxPool1D_0', 'avg_pool1d_0', 'Pad2D_0', +'ReplicationPad1d_0', 'diff_0', 'nansum_0', 'ReplicationPad2d_0', 'BatchNorm1D_0', +'pad_0', 'UpsamplingNearest2D_0', 'fftn_0', 'BatchNorm3D_0', 'max_pool1d_0', 'UpsamplingBilinear2D_0', +'BatchNorm2D_0', 'Upsample_0', 'Beta_0', 'KLDivLoss_0', 'fft_0', 'glu_0', 'AdaptiveMaxPool1D_0', +'upsample_0', 'ifftshift_0', 'AdaptiveAvgPool2D_0', 'take_along_axis_0', 'interpolate_0', +'MarginRankingLoss_0', 'grid_sample_0', 'adaptive_max_pool1d_0', 'kl_div_0', 'adaptive_avg_pool2d_0', +'margin_ranking_loss_0', 'Fold_0', 'AdaptiveAvgPool3D_0', 'fftshift_0', 'rot90_0', +'fold_0', 'LayerNorm_0', 'adaptive_avg_pool3d_0', 'eig_0', 'pixel_unshuffle_0', +'outer_0', 'MSELoss_0', 'BCEWithLogitsLoss_0', 'Unfold_0', 'mse_loss_0', 'InstanceNorm1D_0', +'InstanceNorm3D_0', 'L1Loss_0', 'matrix_rank_0', 'binary_cross_entropy_with_logits_0', +'InstanceNorm2D_0', 'inner_0', 'lu_unpack_0', 'unfold_0', 'Linear_0', +'cholesky_solve_0', 'lerp_0', 'l1_loss_0', 'frac_0', 'svd_0', 'SmoothL1Loss_0', +'avg_pool3d_0', 'Bilinear_0', 'nanmedian_0', 'Pad3D_0', 'GroupNorm_0', 'ReplicationPad3d_0', +'max_pool3d_0', 'MaxUnPool2D_0', 'Tensor_subtract__0', 'AvgPool2D_0', 'Tensor_add__0', 'clip_0', +'linear_0', 'avg_pool2d_0', 'MaxUnPool3D_0', 'AvgPool3D_0', 'smooth_l1_loss_0', 'Tensor_clip__0', +'solve_0', 'unique_consecutive_0', 'instance_norm_0', 'BCELoss_0', 'kthvalue_0', 'MaxPool2D_0', +'concat_0', 'Tensor_fill_diagonal__0', 'diagflat_0', 'max_pool2d_0', 'Tensor_squeeze__0', +'linalg_triangular_solve_0', 'Dirichlet_0', 'MaxPool3D_0', 'max_unpool2d_0', 'lu_0', +'Embedding_0', 'empty_like_0', 'rad2deg_0', 'eigvalsh_0', 'AdaptiveMaxPool3D_0', +'max_unpool3d_0', 'eigh_0', 'Tensor_uniform__0', 'NLLLoss_0', 'AffineTransform_0', +'AdaptiveMaxPool2D_0', 'deg2rad_0', 'binary_cross_entropy_0', 'PReLU_0', 'elu__0', +'addmm_0', '__getitem___0', '__rtruediv___0', 'Dropout_0', '__rdiv___0', 'eigvals_0', +'repeat_interleave_0', '__rpow___0', 'bilinear_0', 'adaptive_max_pool3d_0', 'logsumexp_0', +'unique_0', 'adaptive_max_pool2d_0', 'zeros_0', 'ones_0', 'Tensor_erfinv__0', +'linalg_norm_0', 'hflip_0', 'slice_0', 'Tensor_ceil__0', 'full_0', 'ChannelShuffle_0', +'nll_loss_0', 'Tensor_sqrt__0', 'Tensor_exp__0', 'Tensor_floor__0', 'Tensor_rsqrt__0', +'normal_0', 'Tensor_round__0', 'Tensor_tanh__0', 'multi_dot_0', 'RReLU_0', 'Flatten_0', +'__matmul___0', 'Tensor_reciprocal__0', 'empty_0', 'relu__0', 'stack_0', 'index_select_0', +'argsort_0', 'sort_0', 'roll_0', 'broadcast_to_0', 'tile_0', 'PixelShuffle_0', +'Tensor_fill__0', 'split_0', 'chunk_0', 'LogSoftmax_0', 'expand_as_0', 'topk_0', 'prod_0', +'mode_0', 'bmm_0', 'dropout_0', 'randn_0', 'Softplus_0', 'LogSigmoid_0', 'rand_0', +'PowerTransform_0', 'dist_0', 'expand_0', 'Mish_0', 'qr_0', '__mul___0', '__truediv___0', +'Softmax_0', '__div___0', 'Softshrink_0', '__sub___0', 'ELU_0', '__neg___0', 'CELU_0', +'Hardtanh_0', '__pow___0', '__add___0', 'prelu_0', 'Tensor_zero__0', 'trace_0', +'Hardshrink_0', 'ThresholdedReLU_0', 'Silu_0', 'linalg_qr_0', +'slogdet_0', '__pow__scalar_0', 'Hardswish_0', 'Tanhshrink_0', +'eye_0', 'scatter_0', 'Hardsigmoid_0', '__radd___0', 'renorm_0', 'mm_0', +'matmul_0', '__rsub___0', 'bincount_0', '__truediv__scalar_0', '__xor___0', +'__rmul___0', '__sub__scalar_0', '__eq___0', '__mul__scalar_0', 'LeakyReLU_0', +'Softsign_0', '__le___0', 'flatten_0', '__ge___0', '__div__scalar_0', 'add_0', +'__add__scalar_0', 'subtract_0', 'GELU_0', 'rrelu_0', '__and___0', '__or___0', +'__ne___0', '__gt___0', '__lt___0', 'SELU_0', 'Sigmoid_0', 'pixel_shuffle_0', +'__floordiv___0', 'cross_0', 'ReLU6_0', 'moveaxis_0', 'mv_0', 'sum_0', 'Tanh_0', +'__mod___0', 'ReLU_0', 'unbind_0', 'log_softmax_0', 'det_0', 'TanhTransform_0', +'kron_0', 'SigmoidTransform_0', 'pow_0', 'amin_0', 'ExpTransform_0', 'divide_0', +'matrix_power_0', 'mean_0', 'softplus_0', 'cumsum_0', 'gumbel_softmax_0', +'multiply_0', 'argmax_0', '__invert___0', 'logcumsumexp_0', 'min_0', +'AbsTransform_0', 'amax_0', 'masked_select_0', 'celu_0', 'elu_0', 'max_0', +'argmin_0', 'log_sigmoid_0', 'mish_0', 'softshrink_0', 'softmax_0', 'flip_0', +'diag_0', 'heaviside_0', 'hardtanh_0', 'log1p_0', 'dot_0', 'fmax_0', +'thresholded_relu_0', 'hardshrink_0', 'fmin_0', 'complex_0', 'minimum_0', +'maximum_0', 'hardsigmoid_0', 'gelu_0', 'neg_0', 'logit_0', 'inv_0', 'tanhshrink_0', +'hardswish_0', 'log10_0', 'atan2_0', 'embedding_0', 'silu_0', 'cos_0', 'cosh_0', 'sin_0', +'atanh_0', 'log2_0', 'unsqueeze_0', 'zeros_like_0', 'acosh_0', 'ones_like_0', 'sinh_0', +'softsign_0', 'erf_0', 'atan_0', 'asin_0', 'randint_0', 'selu_0', 'full_like_0', +'cumprod_0', 'sigmoid_0', 'acos_0', 'tan_0', 'asinh_0', 'log_0', 'relu6_0', 'square_0', +'squeeze_0', 'tanh_0', 'sqrt_0', 'exp_0', 'tril_indices_0', 'erfinv_0', 'digamma_0', 'relu_0', +'abs_0', 'lgamma_0', 'expm1_0', 'cholesky_0', 'randperm_0', 'rsqrt_0', 'angle_0', +'reciprocal_0', 'poisson_0', 'sign_0', 'ceil_0', 'floor_0', 'all_0', 'round_0', +'equal_0', 'tril_0', 'triu_0', 'trunc_0', 'conj_0', 'diagonal_0', 'diag_embed_0', +'clone_0', 'any_0', 'equal_all_0', 'nonzero_0', 'one_hot_0', 'isclose_0', 'transpose_0', +'searchsorted_0', 'remainder_0', 'allclose_0', 'logical_and_0', 'logical_xor_0', +'histogram_0', 'as_complex_0', 'multinomial_0', +'logical_or_0', 'greater_equal_0', 'mod_0', 'greater_than_0', 'bitwise_or_0', +'as_real_0', 'less_equal_0', 'not_equal_0', 'floor_divide_0', 'less_than_0', +'bitwise_xor_0', 'bitwise_and_0', 'imag_0', 'real_0', 'logical_not_0', 'isinf_0', +'bitwise_not_0', 'isnan_0', 'isfinite_0', 'numel_0', 'ReshapeTransform_0', 'Tensor_exponential__0', +'dropout2d_0', 'dropout3d_0', 'reshape_0', 'broadcast_shape_0', 't_0', 'alpha_dropout_0'] +""" + +Transformer_0: + desc: "Transformer模型" + paddle: + api_name: "paddle.nn.Transformer" + inputs: + data0: + random: true + type: "Tensor" + dtype: "float32" + shape: [1, 1, 1] + range: [ -1, 1] + data1: + random: true + type: "Tensor" + dtype: "float32" + shape: [1, 1, 1] + range: [ -1, 1] + params: + d_model: 1 + nhead: 1 + num_encoder_layers: 1 + num_decoder_layers: 1 + dim_feedforward: 1 + dropout: 0.1 + activation: 'relu' + pytorch: + api_name: "torch.nn.Transformer" + mapping: + ins: { data0: src, data1: tgt, + d_model: d_model, nhead: nhead, num_encoder_layers: num_encoder_layers, num_decoder_layers: num_decoder_layers, + dim_feedforward: dim_feedforward, dropout: dropout, activation: activation } + +TripletMarginWithDistanceLoss_0: + desc: "计算输入 input 和 positive 和 negative 间的 triplet margin loss 损失" + paddle: + api_name: "paddle.nn.TripletMarginWithDistanceLoss" + inputs: + input: + random: true + type: "Tensor" + dtype: "float32" + shape: [1, 1] + range: [ -1, 1] + positive: + random: true + type: "Tensor" + dtype: "float32" + shape: [1, 1] + range: [ -1, 1] + negative: + random: true + type: "Tensor" + dtype: "float32" + shape: [1, 1] + range: [ -1, 1] + params: + margin: 1. + reduction: 'mean' + pytorch: + api_name: "torch.nn.TripletMarginWithDistanceLoss" + mapping: + ins: { input: input, positive: positive, negative: negative, + margin: margin, reduction: reduction } + +triplet_margin_with_distance_loss_0: + desc: "输入 input 和 positive 和 negative 间的 triplet margin loss 损失" + paddle: + api_name: "paddle.nn.functional.triplet_margin_with_distance_loss" + inputs: + input: + random: true + type: "Tensor" + dtype: "float32" + shape: [1, 1, 1, 1] + range: [ -1, 1] + positive: + random: true + type: "Tensor" + dtype: "float32" + shape: [1, 1, 1, 1] + range: [ -1, 1] + negative: + random: true + type: "Tensor" + dtype: "float32" + shape: [1, 1, 1, 1] + range: [ -1, 1] + params: + reduction: 'mean' + pytorch: + api_name: "torch.nn.functional.triplet_margin_with_distance_loss" + mapping: + ins: { input: anchor, positive: positive, negative: negative, reduction: reduction } + +pinv_0: + desc: "该 API 通过奇异值分解(svd)来计算伪逆矩阵,支持单个矩阵或批量矩阵" + paddle: + api_name: "paddle.linalg.pinv" + inputs: + x: + random: true + type: "Tensor" + dtype: "float32" + shape: [1, 1, 1] + range: [ -1, 1] + params: + hermitian: False + pytorch: + api_name: "torch.linalg.pinv" + mapping: + ins: { x: input } + +quantile_0: + desc: "沿给定的轴 axis 计算 x 中元素的分位数 0" + paddle: + api_name: "paddle.quantile" + inputs: + x: + random: true + dtype: "float32" + shape: [ 1, 1, 1, 1 ] + range: [ -1, 1 ] + params: + q: 0.25 + axis: + keepdim: False + pytorch: + api_name: "torch.quantile" + mapping: + ins: {x: input, q: q, axis: dim, keepdim: keepdim} + +corrcoef_0: + desc: "相关系数矩阵 0" + paddle: + api_name: "paddle.linalg.corrcoef" + inputs: + x: + random: true + dtype: "float32" + shape: [ 1, 1 ] + range: [ -1, 1 ] + pytorch: + api_name: "torch.corrcoef" + mapping: + ins: {x: input} + +nanquantile_0: + desc: "paddle.nanquantile计算" + paddle: + api_name: "paddle.nanquantile" + inputs: + x: + random: true + type: "Tensor" + dtype: "float32" + shape: [1, 1, 1, 1] + range: [ -1, 1] + params: + q: 0.5 + pytorch: + api_name: "torch.nanquantile" + mapping: + ins: { x: input, q: q } + +linalg_cov_0: + desc: "计算输入Tensor的协方差矩阵 0" + paddle: + api_name: "paddle.linalg.cov" + inputs: + x: + random: true + dtype: "float32" + shape: [ 1, 1 ] + range: [ -1, 1 ] + params: + rowvar: True + ddof: True + fweights: + aweights: + pytorch: + api_name: "torch.cov" + mapping: + ins: {x: input} + +LSTMCell_0: + desc: "LSTM单元基类" + enable_backward: False + paddle: + api_name: "paddle.nn.LSTMCell" + inputs: + inputs: + random: true + type: "Tensor" + dtype: "float32" + shape: [ 3 ] + range: [ -1, 1 ] + params: + input_size: 3 + hidden_size: 12 + pytorch: + api_name: "torch.nn.LSTMCell" + mapping: + ins: { inputs: input, input_size: input_size, hidden_size: hidden_size } + +HingeEmbeddingLoss_0: + desc: "hinge embedding loss 损失" + paddle: + api_name: "paddle.nn.HingeEmbeddingLoss" + inputs: + input: + random: true + type: "Tensor" + dtype: "float32" + shape: [1, 1] + range: [ 0, 1] + label: + random: false + type: "Tensor" + dtype: "float32" + value: [[1]] + params: + margin: 1.0 + reduction: 'mean' + pytorch: + api_name: "torch.nn.HingeEmbeddingLoss" + mapping: + ins: { input: input, label: target, margin: margin, reduction: reduction, } + +hinge_embedding_loss_0: + desc: "计算输入 input 和标签 label(包含 1 和 -1) 间的 hinge embedding loss 损失" + paddle: + api_name: "paddle.nn.functional.hinge_embedding_loss" + inputs: + input: + random: true + type: "Tensor" + dtype: "float32" + shape: [1, 1] + range: [ 0, 1] + label: + random: false + type: "Tensor" + dtype: "float32" + value: [[1]] + params: + margin: 1.0 + reduction: 'mean' + pytorch: + api_name: "torch.nn.functional.hinge_embedding_loss" + mapping: + ins: { input: input, label: target, margin: margin, reduction: reduction } + +MultiHeadAttention_0: + desc: "多头注意力机制" + enable_backward: false + paddle: + api_name: "paddle.nn.MultiHeadAttention" + inputs: + data0: + random: true + type: "Tensor" + dtype: "float32" + shape: [1, 1, 1] + range: [ -1, 1] + data1: + random: true + type: "Tensor" + dtype: "float32" + shape: [1, 1, 1] + range: [ -1, 1] + data2: + random: true + type: "Tensor" + dtype: "float32" + shape: [1, 1, 1] + range: [ -1, 1] + params: + embed_dim: 1 + num_heads: 1 + dropout: 0.0 + pytorch: + api_name: "torch.nn.MultiheadAttention" + mapping: + ins: { data0: query, data1: key, data2: value, embed_dim: embed_dim, num_heads: num_heads, dropout: dropout } + +cosine_embedding_loss_0: + desc: "该函数计算输入 input1, input2 和 label 之间的 CosineEmbedding 损失" + paddle: + api_name: "paddle.nn.functional.cosine_embedding_loss" + inputs: + input1: + random: true + type: "Tensor" + dtype: "float32" + shape: [1, 1] + range: [ -1, 1] + input2: + random: true + type: "Tensor" + dtype: "float32" + shape: [1, 1] + range: [ -1, 1] + label: + random: true + type: "Tensor" + dtype: "float32" + shape: [1] + range: [ -1, 1] + params: + margin: 0 + reduction: 'mean' + pytorch: + api_name: "torch.nn.functional.cosine_embedding_loss" + mapping: + ins: { input1: input1, input2: input2, label: target, margin: margin, reduction: reduction } + +CosineEmbeddingLoss_0: + desc: "该函数计算给定的输入 input1, input2 和 label 之间的 CosineEmbedding 损失" + paddle: + api_name: "paddle.nn.CosineEmbeddingLoss" + inputs: + input1: + random: true + type: "Tensor" + dtype: "float32" + shape: [1, 1] + range: [ -1, 1] + input2: + random: true + type: "Tensor" + dtype: "float32" + shape: [1, 1] + range: [ -1, 1] + label: + random: true + type: "Tensor" + dtype: "float32" + shape: [1] + range: [ -1, 1] + params: + margin: 0 + reduction: 'mean' + pytorch: + api_name: "torch.nn.CosineEmbeddingLoss" + mapping: + ins: { input1: input, input2: input2, label: label, margin: margin, reduction: reduction } + +istft_0: + desc: "逆短时傅里叶变换" + paddle: + api_name: "paddle.signal.istft" + inputs: + x: + random: true + type: "Tensor" + dtype: "complex64" + shape: [1, 2, 4] + range: [-1, 1] + params: + n_fft: 2 + hop_length: 1 +# win_length: 2 +# window: +# random: false +# type: "Tensor" +# dtype: "float32" +# value: [1.0, 1.0] +# center: True +# normalized: False +# length: 2 +# return_complex: False + pytorch: + api_name: "torch.istft" + mapping: + ins: { x: input, n_fft: n_fft, hop_length: hop_length } +# mapping: +# ins: { x: input, n_fft: n_fft, hop_length: hop_length, win_length: win_length, window: window, +# center: center, normalized: normalized, length: length, return_complex: return_complex } + +GRUCell_0: + desc: "门控循环单元Cell" + enable_backward: false + paddle: + api_name: "paddle.nn.GRUCell" + inputs: + data0: + random: true + type: "Tensor" + dtype: "float32" + shape: [1, 1] + range: [ -1, 1] + data1: + random: true + type: "Tensor" + dtype: "float32" + shape: [1, 1] + range: [ -1, 1] + params: + input_size: 1 + hidden_size: 1 + pytorch: + api_name: "torch.nn.GRUCell" + mapping: + ins: { data0: input, data1: hidden, input_size: input_size, hidden_size: hidden_size } + +stft_0: + desc: "短时傅里叶变换将输入的信号先进行分帧,然后逐帧进行离散傅的里叶变换计算" + paddle: + api_name: "paddle.signal.stft" + inputs: + x: + random: true + type: "Tensor" + dtype: "float32" + shape: [4, 4] + range: [ -1, 1] + params: + n_fft: 4 + pytorch: + api_name: "torch.stft" + mapping: + ins: { x: input, n_fft: n_fft } + +lcm_0: + desc: "计算两个输入的按元素绝对值的最小公倍数,输入必须是整型" + enable_backward: false + paddle: + api_name: "paddle.lcm" + inputs: + x: + random: true + type: "Tensor" + dtype: "int32" + shape: [1, 1, 1, 1] + range: [ -1, 1] + y: + random: true + type: "Tensor" + dtype: "int32" + shape: [1, 1, 1, 1] + range: [ -1, 1] + pytorch: + api_name: "torch.lcm" + mapping: + ins: { x: input, y: other } + +SimpleRNNCell_0: + desc: "循环神经网络单元基类" + enable_backward: False + paddle: + api_name: "paddle.nn.SimpleRNNCell" + inputs: + inputs: + random: true + type: "Tensor" + dtype: "float32" + shape: [ 3 ] + range: [ -1, 1 ] + params: + input_size: 3 + hidden_size: 3 + activation: "tanh" + pytorch: + api_name: "torch.nn.RNNCell" + mapping: + ins: { inputs: input, input_size: input_size, hidden_size: hidden_size, activation: nonlinearity } + +std_0: + desc: "沿给定的轴 axis 计算 x 中元素的标准差" + paddle: + api_name: "paddle.std" + inputs: + x: + random: true + type: "Tensor" + dtype: "float32" + shape: [1, 1, 1, 1] + range: [ -1, 1] + params: + axis: 0 + unbiased: True + keepdim: False + pytorch: + api_name: "torch.std" + mapping: + ins: { x: input, axis: dim, unbiased: unbiased, keepdim: keepdim } + +median_0: + desc: "沿给定的轴 axis 计算 x 中元素的中位数 0" + enable_backward: false + paddle: + api_name: "paddle.median" + inputs: + x: + random: true + dtype: "float32" + shape: [ 1, 1, 1, 1 ] + range: [ -1, 1 ] + params: + axis: 0 + keepdim: False + pytorch: + api_name: "torch.median" + mapping: + ins: {x: input, axis: dim, keepdim: keepdim} + +SimpleRNN_0: + desc: "循环神经网络单元基类" + enable_backward: False + paddle: + api_name: "paddle.nn.SimpleRNN" + inputs: + inputs: + random: true + type: "Tensor" + dtype: "float32" + shape: [ 1, 1, 3 ] + range: [ -1, 1 ] + params: + input_size: 3 + hidden_size: 3 + num_layers: 1 + activation: "tanh" + dropout: 0.0 + direction: "bidirectional" + time_major: False + pytorch: + api_name: "torch.nn.RNN" + mapping: + ins: { inputs: input, input_size: input_size, hidden_size: hidden_size, num_layers: num_layers, + activation: nonlinearity, dropout: dropout} + +var_0: + desc: "沿给定的轴 axis 计算 x 中元素的方差" + paddle: + api_name: "paddle.var" + inputs: + x: + random: true + type: "Tensor" + dtype: "float32" + shape: [2, 2, 2, 2] + range: [ -1, 1] + pytorch: + api_name: "torch.var" + mapping: + ins: { x: input } + +Conv1D_0: + desc: "一维卷积层 0" + paddle: + api_name: "paddle.nn.Conv1D" + inputs: + x: + random: true + dtype: "float32" + shape: [ 1, 1, 1 ] + range: [ -1, 1 ] + params: + in_channels: 1 + out_channels: 1 + kernel_size: 1 + stride: 1 + padding: 0 + dilation: 1 + groups: 1 + padding_mode: 'zeros' + pytorch: + api_name: "torch.nn.Conv1d" + mapping: + ins: {x: input, in_channels: in_channels, out_channels: out_channels, kernel_size: kernel_size, stride: stride, padding: padding, dilation: dilation, groups: groups, padding_mode: padding_mode} + +tensordot_0: + desc: "张量缩并运算,即沿着 axes 给定的多个轴对两个张量对应元素的乘积进行加和操作。" + paddle: + api_name: "paddle.tensordot" + inputs: + x: + random: true + type: "Tensor" + dtype: "float32" + shape: [ 1, 1, 1, 1 ] + range: [ -1, 1 ] + y: + random: true + type: "Tensor" + dtype: "float32" + shape: [ 1, 1, 1, 1 ] + range: [ -1, 1 ] + params: + axes: 2 + pytorch: + api_name: "torch.tensordot" + mapping: + ins: { x: a, y: b, axes: dims } + +LSTM_0: + desc: "LSTM长短期记忆网络" + enable_backward: False + paddle: + api_name: "paddle.nn.LSTM" + inputs: + inputs: + random: true + type: "Tensor" + dtype: "float32" + shape: [ 1, 1, 3 ] + range: [ -1, 1 ] + params: + input_size: 3 + hidden_size: 12 + num_layers: 1 + direction: "forward" + dropout: 0 + time_major: False + pytorch: + api_name: "torch.nn.LSTM" + mapping: + ins: { inputs: input, input_size: input_size, hidden_size: hidden_size, num_layers: num_layers, + dropout: dropout } + +logspace_0: + desc: "返回一个 Tensor,Tensor 的值为在区间 [bases^tart,base^stop] 上按对数均匀间隔的 num 个值,输出 Tensor 的长度为 num" + enable_backward: false + paddle: + api_name: "paddle.logspace" + params: + start: -1.0 + stop: 1.0 + num: 1 + base: 10.0 + pytorch: + api_name: "torch.logspace" + mapping: + ins: { start: start, stop: end, num: steps, base: base } + +local_response_norm_0: + desc: "局部响应正则化" + paddle: + api_name: "paddle.nn.functional.local_response_norm" + inputs: + x: + random: true + type: "Tensor" + dtype: "float32" + shape: [1, 1, 1, 1] + range: [ -1, 1] + params: + size: 1 + alpha: 0.0001 + beta: 0.75 + k: 1.0 + data_format: 'NCHW' + pytorch: + api_name: "torch.nn.functional.local_response_norm" + mapping: + ins: { x: input, size: size, alpha: alpha, beta: beta, k: k } + +fftfreq_0: + desc: "离散傅里叶变换的频率窗口(frequency bins)中心序列" + enable_backward: false + paddle: + api_name: "paddle.fft.fftfreq" + params: + n: 1 + d: 1.0 + pytorch: + api_name: "torch.fft.fftfreq" + mapping: + ins: { n: n, d: d } + +PairwiseDistance_0: + desc: "计算两个tensor之间pairwise的距离" + paddle: + api_name: "paddle.nn.PairwiseDistance" + inputs: + data0: + random: true + type: "Tensor" + dtype: "float32" + shape: [1, 1] + range: [ -1, 1] + data1: + random: true + type: "Tensor" + dtype: "float32" + shape: [1, 1] + range: [ -1, 1] + params: + p: 2. + pytorch: + api_name: "torch.nn.PairwiseDistance" + mapping: + ins: { data0: input1, data1: input2, p: p } + +CrossEntropyLoss_0: + desc: "交叉熵损失 0" + paddle: + api_name: "paddle.nn.CrossEntropyLoss" + inputs: + input: + random: true + dtype: "float32" + shape: [ 1, 1 ] + range: [ -1, 1 ] + label: + random: false + dtype: "float32" + value: [[0.]] + params: + soft_label: True + weight: + ignore_index: -100 + reduction: 'mean' + pytorch: + api_name: "torch.nn.CrossEntropyLoss" + mapping: + ins: {input: input, label: target, weight: weight, ignore_index: ignore_index, reduction: reduction} + +AlphaDropout_0: + desc: "AlphaDropout是一种具有自归一化性质的dropout。均值为0,方差为1的输入,经过AlphaDropout计算之后,输出的均值和方差与输入保持一致。" + paddle: + api_name: "paddle.nn.AlphaDropout" + inputs: + data: + random: true + type: "Tensor" + dtype: "float32" + shape: [1, 1] + range: [ -1, 1] + params: + p: 0.5 + pytorch: + api_name: "torch.nn.AlphaDropout" + mapping: + ins: { data: input, p: p } + +conv1d_0: + desc: "1维卷积" + paddle: + api_name: "paddle.nn.functional.conv1d" + inputs: + x: + random: true + type: "Tensor" + dtype: "float32" + shape: [1, 1, 1] + range: [ -1, 1] + params: + weight: + random: true + type: "Tensor" + dtype: "float32" + shape: [1, 1, 1] + range: [ -1, 1] + bias: + random: true + type: "Tensor" + dtype: "float32" + shape: [1] + range: [ -1, 1] + stride: 1 + padding: 0 + pytorch: + api_name: "torch.nn.functional.conv1d" + mapping: + ins: { x: input, weight: weight, bias: bias, stride: stride, padding: padding } + +cross_entropy_0: + desc: "实现了 softmax 交叉熵损失函数" + paddle: + api_name: "paddle.nn.functional.cross_entropy" + inputs: + input: + random: true + type: "Tensor" + dtype: "float32" + shape: [1, 1] + range: [ -1, 1] + params: + label: + random: false + type: "Tensor" + dtype: "float32" + value: [[0]] + soft_label: True + ignore_index: -100 + reduction: 'mean' + pytorch: + api_name: "torch.nn.functional.cross_entropy" + mapping: + ins: { input: input, label: target, ignore_index: ignore_index, reduction: reduction } + +rfftfreq_0: + desc: "返回离散傅里叶变换的频率窗口(frequency bins)中心,以 循环/采样间隔 为单位" + enable_backward: false + paddle: + api_name: "paddle.fft.rfftfreq" + params: + n: 1 + d: 1.0 + pytorch: + api_name: "torch.fft.rfftfreq" + mapping: + ins: { n: n, d: d } + +cosine_similarity_0: + desc: "计算x1与x2沿axis维度的余弦相似度" + paddle: + api_name: "paddle.nn.functional.cosine_similarity" + inputs: + x1: + random: true + type: "Tensor" + dtype: "float32" + shape: [1, 1, 1, 1] + range: [ -1, 1] + x2: + random: true + type: "Tensor" + dtype: "float32" + shape: [1, 1, 1, 1] + range: [ -1, 1] + params: + axis: 1 + eps: 0.00000001 + pytorch: + api_name: "torch.nn.functional.cosine_similarity" + mapping: + ins: { x1: x1, x2: x2, axis: dim, eps: eps } + +CosineSimilarity_0: + desc: "比较两个tensor的余弦相似度" + paddle: + api_name: "paddle.nn.CosineSimilarity" + inputs: + data0: + random: true + type: "Tensor" + dtype: "float32" + shape: [1, 1] + range: [ -1, 1] + data1: + random: true + type: "Tensor" + dtype: "float32" + shape: [1, 1] + range: [ -1, 1] + params: + axis: 1 + eps: 0.00000001 + pytorch: + api_name: "torch.nn.CosineSimilarity" + mapping: + ins: { data0: input1, data1: input2, axis: dim, eps: eps } + +Conv1DTranspose_0: + desc: "1维反卷积" + paddle: + api_name: "paddle.nn.Conv1DTranspose" + inputs: + data: + random: true + type: "Tensor" + dtype: "float32" + shape: [1, 1, 1] + range: [ -1, 1] + params: + in_channels: 1 + out_channels: 1 + kernel_size: [1] + stride: 1 + padding: 0 + dilation: 1 + pytorch: + api_name: "torch.nn.ConvTranspose1d" + mapping: + ins: { data: input, in_channels: in_channels, out_channels: out_channels, kernel_size: kernel_size, stride: stride, padding: padding, dilation: dilation } + +linspace_0: + desc: "返回一个Tensor,Tensor的值为在区间start和stop上均匀间隔的num个值,输出Tensor的长度为num" + enable_backward: False + paddle: + api_name: "paddle.linspace" + params: + start: 1 + stop: 1 + num: 1 + dtype: "float32" + pytorch: + api_name: "torch.linspace" + mapping: + ins: { start: start, stop: end, num: steps } + +Tensor_lerp__0: + desc: "基于给定的 weight 计算 x 与 y 的线性插值" + enable_backward: false + paddle: + api_name: "paddle.Tensor.lerp_" + inputs: + x: + random: true + type: "Tensor" + dtype: "float32" + shape: [1, 1, 1, 1] + range: [ -1, 1] + params: + y: + random: true + type: "Tensor" + dtype: "float32" + shape: [ 1, 1, 1, 1 ] + range: [ -1, 1 ] + weight: 0.5 +# pytorch: +# api_name: "torch.Tensor.lerp_" +# mapping: +# ins: { x: input, y: end, weight: weight } + +GRU_0: + desc: "门控循环单元网络 0" + enable_backward: false + paddle: + api_name: "paddle.nn.GRU" + inputs: + inputs: + random: true + type: "Tensor" + dtype: "float32" + shape: [1, 1, 1] + range: [ -1, 1] + initial_states: + random: true + type: "Tensor" + dtype: "float32" + shape: [1, 1, 1] + range: [ -1, 1] + params: + input_size: 1 + hidden_size: 1 + pytorch: + api_name: "torch.nn.GRU" + mapping: + ins: {inputs: input, initial_states: h_0, input_size: input_size, hidden_size: hidden_size} + +arange_0: + desc: "该OP返回以步长 step 均匀分隔给定数值区间[start, end)的1-D Tensor" + enable_backward: False + paddle: + api_name: "paddle.arange" + params: + start: 1.0 + end: 2.0 + step: 1.0 + dtype: "float32" + pytorch: + api_name: "torch.arange" + mapping: + ins: { start: start, end: end, step: step } + +where_0: + desc: "根据 condition 来选择 x 或 y 中的对应元素来组成新的 Tensor" + paddle: + api_name: "paddle.where" + inputs: + x: + random: true + type: "Tensor" + dtype: "float32" + shape: [1, 1, 1] + range: [ -1, 1] + y: + random: true + type: "Tensor" + dtype: "float32" + shape: [1, 1, 1] + range: [ -1, 1] + params: + condition: + random: false + type: "Tensor" + dtype: "bool" + value: True + pytorch: + api_name: "torch.where" + mapping: + ins: { condition: condition, x: input, y: other } + +normalize_0: + desc: "使用 Lp 范数沿维度 axis 对 x 进行归一化" + paddle: + api_name: "paddle.nn.functional.normalize" + inputs: + x: + random: true + type: "Tensor" + dtype: "float32" + shape: [1, 1, 1, 1] + range: [ -2, 3] + params: + p: 2 + axis: 1 + epsilon: 0.000000000001 + pytorch: + api_name: "torch.nn.functional.normalize" + mapping: + ins: { x: input, axis: dim, p: p, epsilon: eps } + +StickBreakingTransform_0: + desc: "将一个长度为 K 的向量通过 StackBreaking 构造过程变换为标准 K-单纯形 " + enable_backward: false + paddle: + api_name: "paddle.distribution.StickBreakingTransform" + params: + method: + forward: + x: + value: [1.] + pytorch: + api_name: "torch.distributions.transforms.StickBreakingTransform" + mapping: + ins: { } + method: + __call__: + x: + value: [1.] + +Dropout3D_0: + desc: "3维Dropout" + paddle: + api_name: "paddle.nn.Dropout3D" + inputs: + data: + random: true + type: "Tensor" + dtype: "float32" + shape: [1, 1, 1, 1, 1] + range: [ -1, 1] + params: + p: 0.5 + pytorch: + api_name: "torch.nn.Dropout3d" + mapping: + ins: { data: input, p: p } + +Dropout2D_0: + desc: "根据给定的丢弃概率 p ,在训练过程中随机将一些神经元输出设置为0, 2D 0" + enable_backward: false + paddle: + api_name: "paddle.nn.Dropout2D" + inputs: + x: + random: true + dtype: "float32" + shape: [ 1, 1, 1, 1 ] + range: [ -1, 1 ] + params: + p: 0.5 + pytorch: + api_name: "torch.nn.Dropout2d" + mapping: + ins: {x: input, p: p} + +randint_like_0: + desc: "返回服从均匀分布的、范围在[low, high)的随机Tensor,输出的形状与x的形状一致" + enable_backward: false + paddle: + api_name: "paddle.randint_like" + inputs: + x: + random: true + type: "Tensor" + dtype: "float32" + shape: [1, 1, 1, 1] + range: [ -1, 1] + params: + high: 5 + pytorch: + api_name: "torch.randint_like" + mapping: + ins: { x: input, high: high } + +ifft2_0: + desc: "二维傅里叶变换(fft2)的逆变换" + paddle: + api_name: "paddle.fft.ifft2" + inputs: + x: + random: true + type: "Tensor" + dtype: "complex" + shape: [1, 1, 1, 1] + range: [ -1, 1] + params: + s: !!python/tuple [3, 3] + axes: !!python/tuple [-2, -1] + norm: 'backward' + pytorch: + api_name: "torch.fft.ifft2" + mapping: + ins: { x: input, s: s, axes: dim, norm: norm } + +nanmean_0: + desc: "沿 axis 计算 x 的平均值, 且忽略掉 NaNs 值 0" + paddle: + api_name: "paddle.nanmean" + inputs: + x: + random: true + dtype: "float32" + shape: [ 1, 1, 1, 1 ] + range: [ -1, 1 ] + params: + axis: + keepdim: False + pytorch: + api_name: "torch.nanmean" + mapping: + ins: {x: input, axis: dim, keepdim: keepdim} + +ifftn_0: + desc: "N 维离散傅里叶变换的逆变换" + paddle: + api_name: "paddle.fft.ifftn" + inputs: + x: + random: true + type: "Tensor" + dtype: "complex" + shape: [1, 1, 1, 1] + range: [ -1, 1] + params: + s: !!python/tuple [3, 3] + axes: !!python/tuple [-2, -1] + norm: 'backward' + pytorch: + api_name: "torch.fft.ifftn" + mapping: + ins: { x: input, s: s, axes: dim, norm: norm } + +Multinomial_0: + desc: "Multinomial 表示实验次数为 total_count,概率为 probs 的多项分布" + enable_backward: false + paddle: + api_name: "paddle.distribution.Multinomial" + params: + total_count: 1 + probs: + random: false + type: "Tensor" + dtype: "float32" + value: [0.5] + method: + sample: + shape: [1] + pytorch: + api_name: "torch.distributions.multinomial.Multinomial" + mapping: + ins: { total_count: total_count, probs: probs } + method: + sample: + sample_shape: [1] + +Conv3DTranspose_0: + desc: "1维反卷积" + paddle: + api_name: "paddle.nn.Conv3DTranspose" + inputs: + data: + random: true + type: "Tensor" + dtype: "float32" + shape: [1, 1, 1, 1, 1] + range: [ -1, 1] + params: + in_channels: 1 + out_channels: 1 + kernel_size: 1 + stride: 1 + padding: 0 + dilation: 1 + pytorch: + api_name: "torch.nn.ConvTranspose3d" + mapping: + ins: { data: input, in_channels: in_channels, out_channels: out_channels, kernel_size: kernel_size, stride: stride, padding: padding, dilation: dilation } + +Conv2DTranspose_0: + desc: "二维转置卷积层 0" + paddle: + api_name: "paddle.nn.Conv2DTranspose" + inputs: + x: + random: true + dtype: "float32" + shape: [ 1, 1, 1, 1 ] + range: [ -1, 1 ] + params: + in_channels: 1 + out_channels: 1 + kernel_size: 1 + stride: 1 + padding: 0 + output_padding: 0 + groups: 1 + dilation: 1 + pytorch: + api_name: "torch.nn.ConvTranspose2d" + mapping: + ins: {x: input, in_channels: in_channels, out_channels: out_channels, kernel_size: kernel_size, stride: stride, padding: padding, output_padding: output_padding, groups: groups, dilation: dilation} + +CTCLoss_0: + desc: "比较两个tensor的余弦相似度" + enable_backward: false + paddle: + api_name: "paddle.nn.CTCLoss" + inputs: + logits: + random: true + type: "Tensor" + dtype: "float32" + shape: [1, 1, 1] + range: [ -1, 1] + labels: + random: true + type: "Tensor" + dtype: "int32" + shape: [1, 1] + range: [ -1, 1] + input_lengths: + random: false + type: "Tensor" + dtype: "int64" + value: [1] + label_lengths: + random: false + type: "Tensor" + dtype: "int64" + value: [1] + params: + blank: 0 + reduction: 'mean' + pytorch: + api_name: "torch.nn.CTCLoss" + mapping: + ins: { logits: Log_probs, labels: Targets, input_lengths: Input_lengths, label_lengths: Target_lengths, blank: blank, reduction: reduction } + +gcd_0: + desc: "计算两个输入的按元素绝对值的最大公约数,输入必须是整型" + enable_backward: false + paddle: + api_name: "paddle.gcd" + inputs: + x: + random: true + type: "Tensor" + dtype: "int32" + shape: [1, 1, 1, 1] + range: [ -1, 1] + y: + random: true + type: "Tensor" + dtype: "int32" + shape: [1, 1, 1, 1] + range: [ -1, 1] + pytorch: + api_name: "torch.gcd" + mapping: + ins: { x: input, y: other } + +hfft2_0: + desc: "通过快速傅里叶变换(FFT)算法计算二维厄米特(Hermitian)傅里叶变换" + paddle: + api_name: "paddle.fft.hfft2" + inputs: + x: + random: true + type: "Tensor" + dtype: "complex" + shape: [1, 1, 1, 1] + range: [ -1, 1] + params: + s: !!python/tuple [3, 3] + axes: !!python/tuple [-2, -1] + norm: 'backward' + pytorch: + api_name: "torch.fft.hfft2" + mapping: + ins: { x: input, s: s, axes: dim, norm: norm } + +irfft2_0: + desc: "通过快速傅里叶变换(FFT)算法计算二维实数傅里叶变换(rfft)的逆变换" + paddle: + api_name: "paddle.fft.irfft2" + inputs: + x: + random: true + type: "Tensor" + dtype: "complex" + shape: [1, 1, 1, 1] + range: [ -1, 1] + params: + s: !!python/tuple [3, 3] + axes: !!python/tuple [-2, -1] + norm: 'backward' + pytorch: + api_name: "torch.fft.irfft2" + mapping: + ins: { x: input, s: s, axes: dim, norm: norm } + +ctc_loss_0: + desc: "比较两个tensor的余弦相似度" + enable_backward: false + paddle: + api_name: "paddle.nn.functional.ctc_loss" + inputs: + log_probs: + random: true + type: "Tensor" + dtype: "float32" + shape: [1, 1, 1] + range: [ -1, 1] + labels: + random: true + type: "Tensor" + dtype: "int32" + shape: [1, 1] + range: [ -1, 1] + input_lengths: + random: false + type: "Tensor" + dtype: "int64" + value: [1] + label_lengths: + random: false + type: "Tensor" + dtype: "int64" + value: [1] + params: + blank: 0 + reduction: 'mean' + pytorch: + api_name: "torch.nn.functional.ctc_loss" + mapping: + ins: { log_probs: log_probs, labels: targets, input_lengths: input_lengths, label_lengths: target_lengths, blank: blank, reduction: reduction } + +irfftn_0: + desc: "通过快速傅里叶变换(FFT)算法计算二维实数傅里叶变换(rfft)的逆变换" + paddle: + api_name: "paddle.fft.irfftn" + inputs: + x: + random: true + type: "Tensor" + dtype: "complex" + shape: [1, 1, 1, 1] + range: [ -1, 1] + params: + s: !!python/tuple [3, 3] + axes: !!python/tuple [-2, -1] + norm: 'backward' + pytorch: + api_name: "torch.fft.irfftn" + mapping: + ins: { x: input, s: s, axes: dim, norm: norm } + +hfftn_0: + desc: "通过快速傅里叶变换(FFT)算法计算N维厄米特(Hermitian)傅里叶变换" + paddle: + api_name: "paddle.fft.hfftn" + inputs: + x: + random: true + type: "Tensor" + dtype: "complex" + shape: [1, 1, 1, 1] + range: [ -1, 1] + params: + s: !!python/tuple [3, 3] + axes: !!python/tuple [-2, -1] + norm: 'backward' + pytorch: + api_name: "torch.fft.hfftn" + mapping: + ins: { x: input, s: s, axes: dim, norm: norm } + +ihfft2_0: + desc: "使用快速傅里叶变换(FFT)算法计算二维厄米特(Hermitian)傅里叶变换的逆变换" + paddle: + api_name: "paddle.fft.ihfft2" + inputs: + x: + random: true + type: "Tensor" + dtype: "float32" + shape: [1, 1, 1, 1] + range: [ -1, 1] + params: + s: !!python/tuple [3, 3] + axes: !!python/tuple [-2, -1] + norm: 'backward' + pytorch: + api_name: "torch.fft.ihfft2" + mapping: + ins: { x: input, s: s, axes: dim, norm: norm } + +rfft2_0: + desc: "通过快速傅里叶变换(FFT)算法计算二维实数傅里叶变换" + paddle: + api_name: "paddle.fft.rfft2" + inputs: + x: + random: true + type: "Tensor" + dtype: "float32" + shape: [1, 1, 1, 1] + range: [ -1, 1] + params: + s: !!python/tuple [3, 3] + axes: !!python/tuple [-2, -1] + norm: 'backward' + pytorch: + api_name: "torch.fft.rfft2" + mapping: + ins: { x: input, s: s, axes: dim, norm: norm } + +conv1d_transpose_0: + desc: "1维反卷积" + paddle: + api_name: "paddle.nn.functional.conv1d_transpose" + inputs: + x: + random: true + type: "Tensor" + dtype: "float32" + shape: [1, 1, 1] + range: [ -1, 1] + params: + weight: + random: true + type: "Tensor" + dtype: "float32" + shape: [1, 1, 1] + range: [ -1, 1 ] + stride: 1 + padding: 0 + output_padding: 0 + dilation: 1 + groups: 1 + data_format: 'NCL' + pytorch: + api_name: "torch.nn.functional.conv_transpose1d" + mapping: + ins: { x: input, weight: weight, stride: stride, padding: padding, output_padding: output_padding, + dilation: dilation, groups: groups } + +rfftn_0: + desc: "通过快速傅里叶变换(FFT)算法计算N维实数傅里叶变换" + paddle: + api_name: "paddle.fft.rfftn" + inputs: + x: + random: true + type: "Tensor" + dtype: "float32" + shape: [1, 1, 1, 1] + range: [ -1, 1] + params: + s: !!python/tuple [3, 3] + axes: !!python/tuple [-2, -1] + norm: 'backward' + pytorch: + api_name: "torch.fft.rfftn" + mapping: + ins: { x: input, s: s, axes: dim, norm: norm } + +lstsq_0: + desc: "求解线性方程组的最小二乘问题" + enable_backward: false + paddle: + api_name: "paddle.linalg.lstsq" + inputs: + x: + random: true + type: "Tensor" + dtype: "float32" + shape: [1, 1, 1] + range: [ -1, 1] + y: + random: true + type: "Tensor" + dtype: "float32" + shape: [1, 1, 1] + range: [ -1, 1] + pytorch: + api_name: "torch.linalg.lstsq" + mapping: + ins: { x: input, y: b } + +ihfftn_0: + desc: "使用快速傅里叶变换(FFT)算法计算N维厄米特(Hermitian)傅里叶变换的逆变换" + paddle: + api_name: "paddle.fft.ihfftn" + inputs: + x: + random: true + type: "Tensor" + dtype: "float32" + shape: [1, 1, 1, 1] + range: [ -1, 1] + params: + s: !!python/tuple [3, 3] + axes: !!python/tuple [-2, -1] + norm: 'backward' + pytorch: + api_name: "torch.fft.ihfftn" + mapping: + ins: { x: input, s: s, axes: dim, norm: norm } + +Uniform_0: + desc: "在概率论中,Beta 分布是指一组定义在 [0,1] 区间的连续概率分布" + enable_backward: false + paddle: + api_name: "paddle.distribution.Uniform" + params: + low: -0.5 + high: 0.5 + method: + sample: + shape: [1, 1, 1] + pytorch: + api_name: "torch.distributions.uniform.Uniform" + mapping: + ins: { low: low, high: high } + method: + sample: + sample_shape: [1, 1, 1] + +conv2d_transpose_0: + desc: "2维反卷积" + paddle: + api_name: "paddle.nn.functional.conv2d_transpose" + inputs: + x: + random: true + type: "Tensor" + dtype: "float32" + shape: [1, 1, 1, 1] + range: [ -1, 1] + params: + weight: + random: true + type: "Tensor" + dtype: "float32" + shape: [1, 1, 1, 1] + range: [ -1, 1 ] + bias: + random: true + type: "Tensor" + dtype: "float32" + shape: [1] + range: [ -1, 1] + stride: 1 + padding: 0 + output_padding: 0 + dilation: 1 + data_format: 'NCHW' + pytorch: + api_name: "torch.nn.functional.conv_transpose2d" + mapping: + ins: { x: input, weight: weight, bias: bias, stride: stride, padding: padding, output_padding: output_padding, dilation: dilation } + +conv3d_transpose_0: + desc: "2维反卷积" + paddle: + api_name: "paddle.nn.functional.conv3d_transpose" + inputs: + x: + random: true + type: "Tensor" + dtype: "float32" + shape: [1, 1, 1, 1, 1] + range: [ -1, 1] + params: + weight: + random: true + type: "Tensor" + dtype: "float32" + shape: [1, 1, 1, 1, 1] + range: [ -1, 1 ] + bias: + random: true + type: "Tensor" + dtype: "float32" + shape: [1] + range: [ -1, 1] + stride: 1 + padding: 0 + output_padding: 0 + dilation: 1 + data_format: 'NCDHW' + pytorch: + api_name: "torch.nn.functional.conv_transpose3d" + mapping: + ins: { x: input, weight: weight, bias: bias, stride: stride, padding: padding, output_padding: output_padding, dilation: dilation } + +AdaptiveAvgPool1D_0: + desc: "1维自适应池化" + paddle: + api_name: "paddle.nn.AdaptiveAvgPool1D" + inputs: + data: + random: true + type: "Tensor" + dtype: "float32" + shape: [1, 1, 1] + range: [ -1, 1] + params: + output_size: 1 + pytorch: + api_name: "torch.nn.AdaptiveAvgPool1d" + mapping: + ins: { data: input, output_size: output_size } + +ifft_0: + desc: "一维傅里叶变换(fft)的逆变换" + paddle: + api_name: "paddle.fft.ifft" + inputs: + x: + random: true + type: "Tensor" + dtype: "complex" + shape: [1, 1, 1, 1] + range: [ -1, 1] + params: + n: 3 + axis: -1 + norm: 'backward' + pytorch: + api_name: "torch.fft.ifft" + mapping: + ins: { x: input, n: n, axis: dim, norm: norm } + +adaptive_avg_pool1d_0: + desc: "1维自适应平均池化" + paddle: + api_name: "paddle.nn.functional.adaptive_avg_pool1d" + inputs: + x: + random: true + type: "Tensor" + dtype: "float32" + shape: [1, 1, 1] + range: [ -1, 1] + params: + output_size: 1 + pytorch: + api_name: "torch.nn.functional.adaptive_avg_pool1d" + mapping: + ins: { x: input, output_size: output_size } + +cond_0: + desc: "根据范数种类 p 计算一个或一批矩阵的条件数" + paddle: + api_name: "paddle.linalg.cond" + inputs: + x: + random: true + type: "Tensor" + dtype: "float32" + shape: [1, 1, 1] + range: [ -1, 1] + params: + p: 2 + pytorch: + api_name: "torch.linalg.cond" + mapping: + ins: { x: input, p: p } + +Normal_0: + desc: "正态分布" + enable_backward: false + paddle: + api_name: "paddle.distribution.Normal" + params: + loc: 0. + scale: 0.1 + method: + sample: + shape: [1] + pytorch: + api_name: "torch.distributions.normal.Normal" + mapping: + ins: { loc: loc, scale: scale } + method: + sample: + sample_shape: [1] + +Conv2D_0: + desc: "二维卷积 0" + paddle: + api_name: "paddle.nn.Conv2D" + inputs: + x: + random: true + dtype: "float32" + shape: [ 1, 1, 1, 1 ] + range: [ -1, 1 ] + params: + in_channels: 1 + out_channels: 1 + kernel_size: 1 + stride: 1 + padding: 0 + dilation: 1 + groups: 1 + pytorch: + api_name: "torch.nn.Conv2d" + mapping: + ins: {x: input, in_channels: in_channels, out_channels: out_channels, kernel_size: kernel_size, stride: stride, padding: padding, dilation: dilation, groups: groups} + +Conv3D_0: + desc: "三维卷积层 0" + paddle: + api_name: "paddle.nn.Conv3D" + inputs: + x: + random: true + dtype: "float32" + shape: [ 1, 1, 1, 1, 1 ] + range: [ -1, 1 ] + params: + in_channels: 1 + out_channels: 1 + kernel_size: 1 + stride: 1 + padding: 0 + dilation: 1 + groups: 1 + padding_mode: 'zeros' + pytorch: + api_name: "torch.nn.Conv3d" + mapping: + ins: {x: input, in_channels: in_channels, out_channels: out_channels, kernel_size: kernel_size, stride: stride, padding: padding, dilation: dilation, groups: groups, padding_mode: padding_mode} + +fft2_0: + desc: "二维离散傅里叶变换" + paddle: + api_name: "paddle.fft.fft2" + inputs: + x: + random: true + type: "Tensor" + dtype: "float32" + shape: [1, 1, 1, 1] + range: [ -1, 1] + params: + axes: !!python/tuple [-2, -1] + norm: 'backward' + pytorch: + api_name: "torch.fft.fft2" + mapping: + ins: { x: input, axes: dim, norm: norm } + +conv3d_0: + desc: "3维卷积" + paddle: + api_name: "paddle.nn.functional.conv3d" + inputs: + x: + random: true + type: "Tensor" + dtype: "float32" + shape: [1, 1, 1, 1, 1] + range: [ -1, 1] + params: + weight: + random: true + type: "Tensor" + dtype: "float32" + shape: [1, 1, 1, 1, 1] + range: [ -1, 1 ] + bias: + random: true + type: "Tensor" + dtype: "float32" + shape: [1] + range: [ -1, 1 ] + stride: 1 + padding: 0 + pytorch: + api_name: "torch.nn.functional.conv3d" + mapping: + ins: { x: input, weight: weight, bias: bias, stride: stride, padding: padding } + +conv2d_0: + desc: "2维卷积" + paddle: + api_name: "paddle.nn.functional.conv2d" + inputs: + x: + random: true + type: "Tensor" + dtype: "float32" + shape: [1, 1, 1, 1] + range: [ -1, 1] + params: + weight: + random: true + type: "Tensor" + dtype: "float32" + shape: [1, 1, 1, 1] + range: [ -1, 1] + bias: + random: true + type: "Tensor" + dtype: "float32" + shape: [1] + range: [ -1, 1] + stride: 1 + padding: 0 + pytorch: + api_name: "torch.nn.functional.conv2d" + mapping: + ins: { x: input, weight: weight, bias: bias, stride: stride, padding: padding } + +SoftmaxTransform_0: + desc: "Softmax 变换,首先进行 y=exp(x) 变换,然后归一化 " + enable_backward: false + paddle: + api_name: "paddle.distribution.SoftmaxTransform" + params: + method: + forward: + x: + value: [1.] + pytorch: + api_name: "torch.distributions.transforms.SoftmaxTransform" + mapping: + ins: { } + method: + __call__: + x: + value: [1.] + +Categorical_0: + desc: "类别分布是一种离散概率分布,其随机变量可以取 K 个相互独立类别的其中一个" + enable_backward: false + paddle: + api_name: "paddle.distribution.Categorical" + params: + logits: + random: false + type: "Tensor" + dtype: "float32" + value: [0.1253, 0.5213] + method: + sample: + shape: [1, 1] + pytorch: + api_name: "torch.distributions.categorical.Categorical" + mapping: + ins: { logits: logits } + method: + sample: + sample_shape: [1, 1] + +MaxUnPool1D_0: + desc: "1D 最大反池化 操作" + enable_backward: false + paddle: + api_name: "paddle.nn.MaxUnPool1D" + inputs: + x: + random: true + type: "Tensor" + dtype: "float32" + shape: [1, 1, 1] + range: [ -1, 1] + indices: + random: false + type: "Tensor" + dtype: "int32" + value: [[[0]]] + params: + kernel_size: 1 + stride: 1 + padding: 0 +# pytorch: +# api_name: "torch.nn.MaxUnpool1d" +# mapping: +# ins: { x: input, indices: indices, kernel_size: kernel_size, stride: stride, padding: padding } + +irfft_0: + desc: "通过快速傅里叶变换(FFT)算法计算一维实数傅里叶变换(rfft)的逆变换" + paddle: + api_name: "paddle.fft.irfft" + inputs: + x: + random: true + type: "Tensor" + dtype: "complex" + shape: [1, 1, 1, 1] + range: [ -1, 1] + params: + n: 3 + axis: -1 + norm: 'backward' + pytorch: + api_name: "torch.fft.irfft" + mapping: + ins: { x: input, n: n, axis: dim, norm: norm } + +hfft_0: + desc: "通过快速傅里叶变换(FFT)算法计算一维厄米特(Hermitian)傅里叶变换" + paddle: + api_name: "paddle.fft.hfft" + inputs: + x: + random: true + type: "Tensor" + dtype: "complex" + shape: [1, 1, 1, 1] + range: [ -1, 1] + params: + n: 3 + axis: -1 + norm: 'backward' + pytorch: + api_name: "torch.fft.hfft" + mapping: + ins: { x: input, n: n, axis: dim, norm: norm } + +max_unpool1d_0: + desc: "1D 最大反池化 操作" + enable_backward: false + paddle: + api_name: "paddle.nn.functional.max_unpool1d" + inputs: + x: + random: true + type: "Tensor" + dtype: "float32" + shape: [1, 1, 1] + range: [ -1, 1] + indices: + random: false + type: "Tensor" + dtype: "int32" + value: [[[0]]] + params: + kernel_size: 1 + stride: 1 + padding: 0 +# pytorch: +# api_name: "torch.nn.functional.max_unpool1d" +# mapping: +# ins: { x: input, indices: indices, kernel_size: kernel_size, stride: stride, padding: padding } + +AvgPool1D_0: + desc: "1维平均池化" + paddle: + api_name: "paddle.nn.AvgPool1D" + inputs: + data: + random: true + type: "Tensor" + dtype: "float32" + shape: [1, 1, 1] + range: [ -1, 1] + params: + kernel_size: 1 + stride: 1 + padding: 0 + pytorch: + api_name: "torch.nn.AvgPool1d" + mapping: + ins: { data: input, kernel_size: kernel_size, stride: stride, padding: padding } + +affine_grid_0: + desc: "生成仿射变换前后的feature maps的坐标映射关系" + paddle: + api_name: "paddle.nn.functional.affine_grid" + inputs: + theta: + random: true + type: "Tensor" + dtype: "float32" + shape: [1, 2, 3] + range: [ -1, 1] + params: + out_shape: [1, 1, 1, 1] + align_corners: True + pytorch: + api_name: "torch.nn.functional.affine_grid" + mapping: + ins: { theta: theta, out_shape: size, align_corners: align_corners } + +ReflectionPad3d_0: + desc: "3维pad填充" + paddle: + api_name: "paddle.nn.Pad3D" + inputs: + data: + random: true + type: "Tensor" + dtype: "float32" + shape: [1, 2, 3] + range: [ -1, 1] + params: + padding: [1, 2] + mode: "reflect" + data_format: 'NCL' + pytorch: + api_name: "torch.nn.ReflectionPad3d" + mapping: + ins: { data: input, padding: padding } + +ihfft_0: + desc: "使用快速傅里叶变换(FFT)算法计算一维厄米特(Hermitian)傅里叶变换的逆变换" + paddle: + api_name: "paddle.fft.ihfft" + inputs: + x: + random: true + type: "Tensor" + dtype: "float32" + shape: [1, 1, 1, 1] + range: [ -1, 1] + params: + n: 3 + axis: -1 + norm: 'backward' + pytorch: + api_name: "torch.fft.ihfft" + mapping: + ins: { x: input, n: n, axis: dim, norm: norm } + +Pad1D_0: + desc: "1维pad填充" + paddle: + api_name: "paddle.nn.Pad1D" + inputs: + data: + random: true + type: "Tensor" + dtype: "float32" + shape: [1, 1, 1] + range: [ -1, 1] + params: + padding: [1, 1] + mode: "constant" + value: 0 + data_format: 'NCL' + pytorch: + api_name: "torch.nn.ConstantPad1d" + mapping: + ins: { data: input, padding: padding, value: value } + +ReflectionPad2d_0: + desc: "2维pad填充" + paddle: + api_name: "paddle.nn.Pad2D" + inputs: + data: + random: true + type: "Tensor" + dtype: "float32" + shape: [1, 2, 3] + range: [ -1, 1] + params: + padding: [1, 2] + mode: "reflect" + data_format: 'NCL' + pytorch: + api_name: "torch.nn.ReflectionPad2d" + mapping: + ins: { data: input, padding: padding } + +rfft_0: + desc: "通过快速傅里叶变换(FFT)算法计算一维实数傅里叶变换" + paddle: + api_name: "paddle.fft.rfft" + inputs: + x: + random: true + type: "Tensor" + dtype: "float32" + shape: [1, 1, 1, 1] + range: [ -1, 1] + params: + n: 3 + axis: -1 + norm: 'backward' + pytorch: + api_name: "torch.fft.rfft" + mapping: + ins: { x: input, n: n, axis: dim, norm: norm } + +ZeroPad2D_0: + desc: "按照 padding 属性对输入进行零填充" + paddle: + api_name: "paddle.nn.ZeroPad2D" + inputs: + x: + random: true + type: "Tensor" + dtype: "float32" + shape: [1, 1, 1, 1] + range: [ -1, 1] + params: + padding: 1 + data_format: 'NCHW' + pytorch: + api_name: "torch.nn.ZeroPad2d" + mapping: + ins: { x: input, padding: padding } + +ReflectionPad1d_0: + desc: "1维pad填充" + paddle: + api_name: "paddle.nn.Pad1D" + inputs: + data: + random: true + type: "Tensor" + dtype: "float32" + shape: [1, 2, 3] + range: [ -1, 1] + params: + padding: [1, 2] + mode: "reflect" + data_format: 'NCL' + pytorch: + api_name: "torch.nn.ReflectionPad1d" + mapping: + ins: { data: input, padding: padding } + +MaxPool1D_0: + desc: "1维最大池化" + paddle: + api_name: "paddle.nn.MaxPool1D" + inputs: + data: + random: true + type: "Tensor" + dtype: "float32" + shape: [1, 1, 1] + range: [ -1, 1] + params: + kernel_size: 1 + stride: 1 + padding: 0 + pytorch: + api_name: "torch.nn.MaxPool1d" + mapping: + ins: { data: input, kernel_size: kernel_size, stride: stride, padding: padding } + +avg_pool1d_0: + desc: "1维平均池化" + paddle: + api_name: "paddle.nn.functional.avg_pool1d" + inputs: + x: + random: true + type: "Tensor" + dtype: "float32" + shape: [1, 1, 1] + range: [ -1, 1] + params: + kernel_size: 1 + stride: 1 + padding: 0 + pytorch: + api_name: "torch.nn.functional.avg_pool1d" + mapping: + ins: { x: input, kernel_size: kernel_size, stride: stride, padding: padding } + +Pad2D_0: + desc: "2维pad填充" + paddle: + api_name: "paddle.nn.Pad2D" + inputs: + data: + random: true + type: "Tensor" + dtype: "float32" + shape: [1, 1, 1, 1] + range: [ -1, 1] + params: + padding: [1, 1, 1, 1] + mode: "constant" + value: 0 + data_format: 'NCHW' + pytorch: + api_name: "torch.nn.ConstantPad2d" + mapping: + ins: { data: input, padding: padding, value: value } + +ReplicationPad1d_0: + desc: "按照 padding、mode 和 value 属性对输入进行填充 0" + paddle: + api_name: "paddle.nn.Pad1D" + inputs: + x: + random: true + dtype: "float32" + shape: [ 1, 1, 1] + range: [ -1, 1 ] + params: + padding: [1, 1] + mode: 'replicate' + pytorch: + api_name: "torch.nn.ReplicationPad1d" + mapping: + ins: {x: input, padding: padding} + +diff_0: + desc: "沿着指定轴计算输入Tensor的n阶前向差值 0" + enable_backward: False + paddle: + api_name: "paddle.diff" + inputs: + x: + random: true + dtype: "float32" + shape: [ 1, 1, 1, 1 ] + range: [ -1, 1 ] + params: + n: 1 + axis: -1 + prepend: + append: + pytorch: + api_name: "torch.diff" + mapping: + ins: {x: input, n: n, axis: dim, prepend: prepend, append: append} + +nansum_0: + desc: "对指定维度上的Tensor元素求和, 将nan视为0 0" + paddle: + api_name: "paddle.nansum" + inputs: + x: + random: true + dtype: "float32" + shape: [ 1, 1, 1, 1 ] + range: [ -1, 1 ] + params: + axis: + keepdim: False + pytorch: + api_name: "torch.nansum" + mapping: + ins: {x: input, axis: dim, keepdim: keepdim} + +ReplicationPad2d_0: + desc: "按照 padding、mode 和 value 属性对输入进行填充 0" + paddle: + api_name: "paddle.nn.Pad2D" + inputs: + x: + random: true + dtype: "float32" + shape: [ 1, 1, 1, 1 ] + range: [ -1, 1 ] + params: + padding: [1, 0, 1, 1] + mode: 'replicate' + pytorch: + api_name: "torch.nn.ReplicationPad2d" + mapping: + ins: {x: input, padding: padding} + +BatchNorm1D_0: + desc: "1维BN批归一化" + paddle: + api_name: "paddle.nn.BatchNorm1D" + inputs: + data: + random: true + type: "Tensor" + dtype: "float32" + shape: [1, 1, 2] + range: [ -1, 1] + params: + num_features: 1 + pytorch: + api_name: "torch.nn.BatchNorm1d" + mapping: + ins: { data: input, num_features: num_features } + excess: + device: + +pad_0: + desc: "该OP依照 pad 和 mode 属性对 x 进行 pad" + paddle: + api_name: "paddle.nn.functional.pad" + inputs: + x: + random: true + type: "Tensor" + dtype: "float32" + shape: [1, 1, 1, 1] + range: [ -1, 1] + params: + pad: [2, 1, 3, 1] + mode: 'constant' + value: 0.0 + data_format: 'NCHW' + pytorch: + api_name: "torch.nn.functional.pad" + mapping: + ins: { x: input, pad: pad, mode: mode, value: value } + +UpsamplingNearest2D_0: + desc: "该OP用于最近邻插值插值调整一个batch中图片的大小" + paddle: + api_name: "paddle.nn.UpsamplingNearest2D" + inputs: + data0: + random: true + type: "Tensor" + dtype: "float32" + shape: [1, 1, 1, 1] + range: [ -1, 1] + params: + size: [1, 1] + data_format: 'NCHW' + pytorch: + api_name: "torch.nn.UpsamplingNearest2d" + mapping: + ins: { data0: input, size: size } + +fftn_0: + desc: "N维离散傅里叶变换" + paddle: + api_name: "paddle.fft.fftn" + inputs: + x: + random: true + type: "Tensor" + dtype: "float32" + shape: [1, 1, 1, 1] + range: [ -1, 1] + params: + norm: 'backward' + pytorch: + api_name: "torch.fft.fftn" + mapping: + ins: { x: input, norm: norm } + +BatchNorm3D_0: + desc: "3维BN批归一化" + paddle: + api_name: "paddle.nn.BatchNorm3D" + inputs: + data: + random: true + type: "Tensor" + dtype: "float32" + shape: [1, 2, 2, 2, 2] + range: [ -1, 1] + params: + num_features: 2 + momentum: 0.9 + epsilon: 0.00001 + data_format: 'NCDHW' + pytorch: + api_name: "torch.nn.BatchNorm3d" + mapping: + ins: { data: input, num_features: num_features, momentum: momentum, epsilon: eps } + excess: + device: + +max_pool1d_0: + desc: "1维最大池化" + paddle: + api_name: "paddle.nn.functional.max_pool1d" + inputs: + x: + random: true + type: "Tensor" + dtype: "float32" + shape: [1, 1, 1] + range: [ -1, 1] + params: + kernel_size: 1 + stride: 1 + padding: 0 + pytorch: + api_name: "torch.nn.functional.max_pool1d" + mapping: + ins: { x: input, kernel_size: kernel_size, stride: stride, padding: padding } + +UpsamplingBilinear2D_0: + desc: "该OP用于双线性插值插值调整一个batch中图片的大小" + paddle: + api_name: "paddle.nn.UpsamplingBilinear2D" + inputs: + data0: + random: true + type: "Tensor" + dtype: "float32" + shape: [1, 1, 1, 1] + range: [ -1, 1] + params: + size: [1, 1] + data_format: 'NCHW' + pytorch: + api_name: "torch.nn.UpsamplingBilinear2d" + mapping: + ins: { data0: input, size: size } + +BatchNorm2D_0: + desc: "2维BN批归一化" + paddle: + api_name: "paddle.nn.BatchNorm2D" + inputs: + data: + random: true + type: "Tensor" + dtype: "float32" + shape: [1, 1, 2, 2] + range: [ -1, 1] + params: + num_features: 1 + momentum: 0.9 + epsilon: 0.00001 + data_format: 'NCHW' + pytorch: + api_name: "torch.nn.BatchNorm2d" + mapping: + ins: { data: input, num_features: num_features, momentum: momentum, epsilon: eps } + excess: + device: + +Upsample_0: + desc: "调整一个batch中图片的大小 0" + paddle: + api_name: "paddle.nn.Upsample" + inputs: + x: + random: true + dtype: "float32" + shape: [ 1, 1, 1, 1 ] + range: [ -1, 1 ] + params: + size: [2, 2] + scale_factor: + mode: 'nearest' + pytorch: + api_name: "torch.nn.Upsample" + mapping: + ins: {x: input, size: size, scale_factor: scale_factor, mode: mode } + +Beta_0: + desc: "在概率论中,Beta 分布是指一组定义在 [0,1] 区间的连续概率分布" + enable_backward: false + paddle: + api_name: "paddle.distribution.Beta" + params: + alpha: 0.5 + beta: 0.5 + method: + sample: + shape: [1, 1, 1] + pytorch: + api_name: "torch.distributions.beta.Beta" + mapping: + ins: { alpha: concentration1, beta: concentration0 } + method: + sample: + sample_shape: [1, 1, 1] + +KLDivLoss_0: + desc: "计算输入(Input)和输入(Label)之间的 Kullback-Leibler 散度损失" + paddle: + api_name: "paddle.nn.KLDivLoss" + inputs: + input: + random: true + type: "Tensor" + dtype: "float32" + shape: [1, 1] + range: [ -1, 1] + label: + random: true + type: "Tensor" + dtype: "float32" + shape: [1, 1] + range: [ -1, 1] + params: + reduction: 'mean' + pytorch: + api_name: "torch.nn.KLDivLoss" + mapping: + ins: { input: input, label: target, reduction: reduction } + +fft_0: + desc: "一维离散傅里叶变换" + paddle: + api_name: "paddle.fft.fft" + inputs: + x: + random: true + type: "Tensor" + dtype: "float32" + shape: [1, 1, 1, 1] + range: [ -1, 1] + params: + axis: -1 + norm: 'backward' + pytorch: + api_name: "torch.fft.fft" + mapping: + ins: { x: input, axis: dim, norm: norm } + +glu_0: + desc: "门控线性单元。输入按照给定的维度二等分,其中第一部分被用作内容,第二部分经过一个 sigmoid 函数之后被用作门限" + paddle: + api_name: "paddle.nn.functional.glu" + inputs: + x: + random: true + type: "Tensor" + dtype: "float32" + shape: [1, 1, 1, 2] + range: [ -1, 1] + params: + axis: -1 + pytorch: + api_name: "torch.nn.functional.glu" + mapping: + ins: { x: input, axis: dim } + +AdaptiveMaxPool1D_0: + desc: "1维自适应最大值池化" + paddle: + api_name: "paddle.nn.AdaptiveMaxPool1D" + inputs: + data: + random: true + type: "Tensor" + dtype: "float32" + shape: [1, 1, 1] + range: [ -1, 1] + params: + output_size: 1 + pytorch: + api_name: "torch.nn.AdaptiveMaxPool1d" + mapping: + ins: { data: input, output_size: output_size } + +upsample_0: + desc: "调整一个batch中图片的大小" + paddle: + api_name: "paddle.nn.functional.upsample" + inputs: + x: + random: true + type: "Tensor" + dtype: "float32" + shape: [1, 1, 1, 1] + range: [ -1, 1] + params: + size: [1, 1] + mode: 'nearest' + align_corners: False + align_mode: 0 + data_format: 'NCHW' + pytorch: + api_name: "torch.nn.functional.upsample" + mapping: + ins: { x: input, size: size } + +ifftshift_0: + desc: "fftshift 的逆变换" + paddle: + api_name: "paddle.fft.ifftshift" + inputs: + x: + random: true + type: "Tensor" + dtype: "float32" + shape: [1, 1, 1, 1] + range: [ -1, 1] + pytorch: + api_name: "torch.fft.ifftshift" + mapping: + ins: { x: input } + +AdaptiveAvgPool2D_0: + desc: "2D的自适应平均池化 0" + paddle: + api_name: "paddle.nn.AdaptiveAvgPool2D" + inputs: + x: + random: true + dtype: "float32" + shape: [ 1, 1, 1, 1 ] + range: [ -1, 1 ] + params: + output_size: 1 + pytorch: + api_name: "torch.nn.AdaptiveAvgPool2d" + mapping: + ins: {x: input, output_size: output_size} + +take_along_axis_0: + desc: "基于输入索引矩阵,沿着指定 axis 从 arr 矩阵里选取 1d 切片。" + enable_backward: false + paddle: + api_name: "paddle.take_along_axis" + inputs: + arr: + random: true + type: "Tensor" + dtype: "float32" + shape: [ 1, 1, 1, 1 ] + range: [ -1, 1 ] + indices: + random: true + type: "Tensor" + dtype: "int" + shape: [ 1, 1, 1, 1 ] + range: [ 0, 1 ] + params: + axis: 0 + pytorch: + api_name: "torch.take_along_dim" + mapping: + ins: { arr: input, indices: indices, axis: dim } + +interpolate_0: + desc: "调整一个batch中图片的大小" + paddle: + api_name: "paddle.nn.functional.interpolate" + inputs: + x: + random: true + type: "Tensor" + dtype: "float32" + shape: [1, 1, 1, 1] + range: [ -1, 1] + params: + size: [12, 12] + mode: 'nearest' + align_corners: False + align_mode: 0 + data_format: 'NCHW' + pytorch: + api_name: "torch.nn.functional.interpolate" + mapping: + ins: { x: input, size: size, mode: mode, } + +MarginRankingLoss_0: + desc: "计算输入 input,other 和 标签 label 间的 margin rank loss 损失" + paddle: + api_name: "paddle.nn.MarginRankingLoss" + inputs: + input: + random: true + type: "Tensor" + dtype: "float32" + shape: [1, 1] + range: [ 0, 1] + other: + random: false + type: "Tensor" + dtype: "float32" + value: [[1]] + label: + random: false + type: "Tensor" + dtype: "float32" + value: [[1]] + params: + margin: 0.0 + reduction: 'mean' + pytorch: + api_name: "torch.nn.MarginRankingLoss" + mapping: + ins: { input: input1, other: input2, label: target, margin: margin, reduction: reduction } + +grid_sample_0: + desc: "基于flow field网格的对输入X进行双线性插值采样" + paddle: + api_name: "paddle.nn.functional.grid_sample" + inputs: + x: + random: true + type: "Tensor" + dtype: "float32" + shape: [1, 1, 1, 2] + range: [ -1, 1] + params: + grid: + random: true + type: "Tensor" + dtype: "float32" + shape: [1, 1, 1, 2] + range: [ -1, 1] + mode: 'bilinear' + padding_mode: 'zeros' + align_corners: True + pytorch: + api_name: "torch.nn.functional.grid_sample" + mapping: + ins: { x: input, grid: grid, mode: mode, padding_mode: padding_mode, align_corners: align_corners } + +adaptive_max_pool1d_0: + desc: "1维自适应平均池化" + paddle: + api_name: "paddle.nn.functional.adaptive_max_pool1d" + inputs: + x: + random: true + type: "Tensor" + dtype: "float32" + shape: [1, 1, 1] + range: [ -1, 1] + params: + output_size: 1 + pytorch: + api_name: "torch.nn.functional.adaptive_max_pool1d" + mapping: + ins: { x: input, output_size: output_size } + +kl_div_0: + desc: "计算输入(Input)和输入(Label)之间的 Kullback-Leibler 散度损失" + paddle: + api_name: "paddle.nn.functional.kl_div" + inputs: + input: + random: true + type: "Tensor" + dtype: "float32" + shape: [1, 1] + range: [ 0, 1] + label: + random: false + type: "Tensor" + dtype: "float32" + value: [[1]] + params: + reduction: 'mean' + pytorch: + api_name: "torch.nn.functional.kl_div" + mapping: + ins: { input: input, label: target, reduction: reduction } + +adaptive_avg_pool2d_0: + desc: "2维自适应平均池化" + paddle: + api_name: "paddle.nn.functional.adaptive_avg_pool2d" + inputs: + x: + random: true + type: "Tensor" + dtype: "float32" + shape: [1, 1, 1, 1] + range: [ -1, 1] + params: + output_size: [1, 1] + pytorch: + api_name: "torch.nn.functional.adaptive_avg_pool2d" + mapping: + ins: { x: input, output_size: output_size } + +margin_ranking_loss_0: + desc: "计算输入 input,other 和 标签 label 间的 margin rank loss 损失" + paddle: + api_name: "paddle.nn.functional.margin_ranking_loss" + inputs: + input: + random: true + type: "Tensor" + dtype: "float32" + shape: [1, 1] + range: [ -1, 1] + other: + random: true + type: "Tensor" + dtype: "float32" + shape: [1, 1] + range: [ -1, 1] + label: + random: true + type: "Tensor" + dtype: "float32" + shape: [1, 1] + range: [ -1, 1] + params: + margin: 0.0 + reduction: 'mean' + pytorch: + api_name: "torch.nn.functional.margin_ranking_loss" + mapping: + ins: { input: input1, other: input2, label: target, margin: margin, reduction: reduction } + +Fold_0: + desc: "将一个滑动局部块组合成一个大的张量" + paddle: + api_name: "paddle.nn.Fold" + inputs: + data: + random: true + type: "Tensor" + dtype: "float32" + shape: [3, 12, 9] + range: [ -1, 1] + params: + output_sizes: 4 + kernel_sizes: 2 + pytorch: + api_name: "torch.nn.Fold" + mapping: + ins: { data: input, output_sizes: output_size, kernel_sizes: kernel_size } + +AdaptiveAvgPool3D_0: + desc: "3维自适应池化" + paddle: + api_name: "paddle.nn.AdaptiveAvgPool3D" + inputs: + data: + random: true + type: "Tensor" + dtype: "float32" + shape: [1, 1, 1, 1, 1] + range: [ -1, 1] + params: + output_size: [1, 1, 1] + pytorch: + api_name: "torch.nn.AdaptiveAvgPool3d" + mapping: + ins: { data: input, output_size: output_size } + +fftshift_0: + desc: "将零频率项移动到频谱的中心" + paddle: + api_name: "paddle.fft.fftshift" + inputs: + x: + random: true + type: "Tensor" + dtype: "float32" + shape: [1, 1, 1, 1] + range: [ -1, 1] + pytorch: + api_name: "torch.fft.fftshift" + mapping: + ins: { x: input } + +rot90_0: + desc: "沿axes指定的平面将n维tensor旋转90度 0" + paddle: + api_name: "paddle.rot90" + inputs: + x: + random: true + dtype: "float32" + shape: [ 1, 1, 1, 1 ] + range: [ -1, 1 ] + params: + k: 1 + axes: [2, 3] + pytorch: + api_name: "torch.rot90" + mapping: + ins: {x: input, k: k, axes: dims} + +fold_0: + desc: "将一个滑动局部块组合成一个大的张量" + paddle: + api_name: "paddle.nn.functional.fold" + inputs: + x: + random: true + type: "Tensor" + dtype: "float32" + shape: [3, 12, 9] + range: [ -1, 1] + params: + output_sizes: 4 + kernel_sizes: 2 + pytorch: + api_name: "torch.nn.functional.fold" + mapping: + ins: { x: input, output_sizes: output_size, kernel_sizes: kernel_size } + +LayerNorm_0: + desc: "层归一化" + paddle: + api_name: "paddle.nn.LayerNorm" + inputs: + data: + random: true + type: "Tensor" + dtype: "float32" + shape: [1, 1, 1, 1] + range: [ -1, 1] + params: + normalized_shape: !!python/tuple [1, 1, 1] + pytorch: + api_name: "torch.nn.LayerNorm" + mapping: + ins: { data: input, normalized_shape: normalized_shape } + excess: + device: + +adaptive_avg_pool3d_0: + desc: "3维自适应平均池化" + paddle: + api_name: "paddle.nn.functional.adaptive_avg_pool3d" + inputs: + x: + random: true + type: "Tensor" + dtype: "float32" + shape: [1, 1, 1, 1, 1] + range: [ -1, 1] + params: + output_size: [1, 1, 1] + pytorch: + api_name: "torch.nn.functional.adaptive_avg_pool3d" + mapping: + ins: { x: input, output_size: output_size } + +eig_0: + desc: "计算一般方阵 x 的的特征值和特征向量" + enable_backward: false + paddle: + api_name: "paddle.linalg.eig" + inputs: + x: + random: true + type: "Tensor" + dtype: "float32" + shape: [1, 1, 1, 1] + range: [ -1, 1] + pytorch: + api_name: "torch.linalg.eig" + mapping: + ins: { x: input } + +pixel_unshuffle_0: + desc: "将一个形为 [N,C,H,W] 或 [N,H,W,C] 的 Tensor 重新排列成形为 [N,r2C,H/r,W/r] 或 [N,H/r,W/r,r2C] 的 Tensor" + paddle: + api_name: "paddle.nn.functional.pixel_unshuffle" + inputs: + x: + random: true + type: "Tensor" + dtype: "float32" + shape: [1, 1, 1, 1] + range: [ -1, 1] + params: + downscale_factor: 1 + data_format: 'NCHW' + pytorch: + api_name: "torch.nn.functional.pixel_unshuffle" + mapping: + ins: { x: input, downscale_factor: downscale_factor } + +outer_0: + desc: "计算两个Tensor的外积 0" + paddle: + api_name: "paddle.outer" + inputs: + x: + random: true + dtype: "float32" + shape: [ 1 ] + range: [ -1, 1 ] + params: + y: + random: true + dtype: "float32" + shape: [ 1 ] + range: [ -1, 1 ] + pytorch: + api_name: "torch.outer" + mapping: + ins: {x: input, y: vec2} + +MSELoss_0: + desc: "计算预测值和目标值的均方差误差 0" + paddle: + api_name: "paddle.nn.MSELoss" + inputs: + x: + random: true + dtype: "float32" + shape: [ 1, 1, 1, 1 ] + range: [ -1, 1 ] + label: + random: true + dtype: "float32" + shape: [ 1, 1, 1, 1 ] + range: [ -1, 1 ] + params: + reduction: 'mean' + pytorch: + api_name: "torch.nn.MSELoss" + mapping: + ins: {x: input, label: target, reduction: reduction} + +BCEWithLogitsLoss_0: + desc: "计算输入 logit 和标签 label 间的 binary cross entropy with logits loss 损失 0" + paddle: + api_name: "paddle.nn.BCEWithLogitsLoss" + inputs: + x: + random: true + dtype: "float32" + shape: [ 1, 1, 1, 1 ] + range: [ -1, 1 ] + label: + random: true + dtype: "float32" + shape: [ 1, 1, 1, 1 ] + range: [ -1, 1 ] + params: + reduction: 'mean' + pytorch: + api_name: "torch.nn.BCEWithLogitsLoss" + mapping: + ins: {x: input, label: target, reduction: reduction} + +Unfold_0: + desc: "通被称作为im2col过程. 对于每一个输入形状为[N, C, H, W]的 x ,都将计算出一个形状为[N, Cout, Lout]的输出" + paddle: + api_name: "paddle.nn.Unfold" + inputs: + x: + random: true + type: "Tensor" + dtype: "float32" + shape: [1, 1, 1, 1] + params: + kernel_sizes: [1, 1] + strides: 1 + paddings: 0 + dilations: 1 + pytorch: + api_name: "torch.nn.Unfold" + mapping: + ins: { x: input, kernel_sizes: kernel_size, strides: stride, paddings: padding, dilations: dilation } + +mse_loss_0: + desc: "用于计算预测值和目标值的均方差误差" + paddle: + api_name: "paddle.nn.functional.mse_loss" + inputs: + input: + random: true + type: "Tensor" + dtype: "float32" + shape: [1, 1] + range: [ -1, 1] + label: + random: true + type: "Tensor" + dtype: "float32" + shape: [1, 1] + range: [ -1, 1] + params: + reduction: 'mean' + pytorch: + api_name: "torch.nn.functional.mse_loss" + mapping: + ins: { input: input, label: target, reduction: reduction } + +InstanceNorm1D_0: + desc: "1维实例归一化" + paddle: + api_name: "paddle.nn.InstanceNorm1D" + inputs: + data: + random: true + type: "Tensor" + dtype: "float32" + shape: [2, 2, 2] + range: [ -1, 1] + params: + num_features: 2 + epsilon: 0.00001 + momentum: 0.9 + data_format: 'NCL' + pytorch: + api_name: "torch.nn.InstanceNorm1d" + mapping: + ins: { data: input, num_features: num_features, epsilon: eps, momentum: momentum } + +InstanceNorm3D_0: + desc: "3维实例归一化" + paddle: + api_name: "paddle.nn.InstanceNorm3D" + inputs: + data: + random: true + type: "Tensor" + dtype: "float32" + shape: [2, 2, 2, 2, 2] + range: [ -1, 1] + params: + num_features: 2 + epsilon: 0.00001 + momentum: 0.9 + data_format: 'NCL' + pytorch: + api_name: "torch.nn.InstanceNorm3d" + mapping: + ins: { data: input, num_features: num_features, epsilon: eps, momentum: momentum } + +L1Loss_0: + desc: "L1 loss 损失 0" + paddle: + api_name: "paddle.nn.L1Loss" + inputs: + input: + random: true + dtype: "float32" + shape: [ 1, 1, 1, 1 ] + range: [ -1, 1 ] + label: + random: true + dtype: "float32" + shape: [ 1, 1, 1, 1 ] + range: [ -1, 1 ] + params: + reduction: 'mean' + pytorch: + api_name: "torch.nn.L1Loss" + mapping: + ins: {input: input, label: Target} + +matrix_rank_0: + desc: "计算矩阵的秩" + enable_backward: false + paddle: + api_name: "paddle.linalg.matrix_rank" + inputs: + x: + random: true + type: "Tensor" + dtype: "float32" + shape: [1, 1, 1] + range: [ -1, 1] + params: + hermitian: False + pytorch: + api_name: "torch.linalg.matrix_rank" + mapping: + ins: { x: input, hermitian: hermitian } + +binary_cross_entropy_with_logits_0: + desc: "计算输入 logit 和标签 label 间的 binary cross entropy with logits loss 损失" + paddle: + api_name: "paddle.nn.functional.binary_cross_entropy_with_logits" + inputs: + logit: + random: true + type: "Tensor" + dtype: "float32" + shape: [1, 1, 1] + range: [ 0, 1] + label: + random: false + type: "Tensor" + dtype: "float32" + value: [[[1]]] + params: + reduction: 'mean' + pytorch: + api_name: "torch.nn.functional.binary_cross_entropy_with_logits" + mapping: + ins: { logit: input, label: target, reduction: reduction } + +InstanceNorm2D_0: + desc: "2维实例归一化" + paddle: + api_name: "paddle.nn.InstanceNorm2D" + inputs: + data: + random: true + type: "Tensor" + dtype: "float32" + shape: [1, 1, 2, 2] + range: [ -1, 1] + params: + num_features: 1 + pytorch: + api_name: "torch.nn.InstanceNorm2d" + mapping: + ins: { data: input, num_features: num_features } + excess: + device: + +inner_0: + desc: "计算两个Tensor的内积 0" + paddle: + api_name: "paddle.inner" + inputs: + x: + random: true + dtype: "float32" + shape: [ 1, 1, 1, 1] + range: [ -1, 1 ] + y: + random: true + dtype: "float32" + shape: [ 1, 1, 1, 1] + range: [ -1, 1 ] + pytorch: + api_name: "torch.inner" + mapping: + ins: {x: input, y: other} + +lu_unpack_0: + desc: "对 paddle.linalg.lu 返回结果的 LU、pivot 进行展开得到原始的单独矩阵 L、U、P" + enable_backward: False + paddle: + api_name: "paddle.linalg.lu_unpack" + inputs: + x: + random: false + type: "Tensor" + dtype: "float32" + value: [[1., 3.], [3., 2.], [5., 6.]] + y: + random: false + type: "Tensor" + dtype: "int32" + value: [[3, 3]] + params: + unpack_ludata: True + unpack_pivots: True + pytorch: + api_name: "torch.lu_unpack" + mapping: + ins: { x: LU_data, y: LU_pivots, unpack_ludata: unpack_data, unpack_pivots: unpack_pivots } + +unfold_0: + desc: "通被称作为im2col过程. 对于每一个输入形状为[N, C, H, W]的 x ,都将计算出一个形状为[N, Cout, Lout]的输出" + paddle: + api_name: "paddle.nn.functional.unfold" + inputs: + x: + random: true + type: "Tensor" + dtype: "float32" + shape: [1, 1, 1, 1] + params: + kernel_sizes: [1, 1] + strides: 1 + paddings: 0 + dilations: 1 + pytorch: + api_name: "torch.nn.functional.unfold" + mapping: + ins: { x: input, kernel_sizes: kernel_size, strides: stride, paddings: padding, dilations: dilation } + +Linear_0: + desc: "线性层 0" + paddle: + api_name: "paddle.nn.Linear" + inputs: + x: + random: true + dtype: "float32" + shape: [ 1, 1] + range: [ -1, 1 ] + params: + in_features: 1 + out_features: 1 + pytorch: + api_name: "torch.nn.Linear" + mapping: + ins: {x: input, in_features: in_features, out_features: out_features} + +cholesky_solve_0: + desc: "对输入的N维(N>=2)矩阵x进行LU分解" + paddle: + api_name: "paddle.linalg.cholesky_solve" + inputs: + x: + random: true + type: "Tensor" + dtype: "float32" + shape: [1, 1] + range: [ 0, 10] + y: + random: true + type: "Tensor" + dtype: "float32" + shape: [1, 1] + range: [ 0, 10] + pytorch: + api_name: "torch.cholesky_solve" + mapping: + ins: { x: input, y: input2, upper: upper } + +lerp_0: + desc: "基于给定的 weight 计算 x 与 y 的线性插值" + enable_backward: false + paddle: + api_name: "paddle.lerp" + inputs: + x: + random: true + type: "Tensor" + dtype: "float32" + shape: [1, 1] + range: [ -1, 1] + y: + random: true + type: "Tensor" + dtype: "float32" + shape: [1, 1] + range: [ -1, 1] + params: + weight: 0.5 + pytorch: + api_name: "torch.lerp" + mapping: + ins: { x: input, y: end, weight: weight } + +l1_loss_0: + desc: "计算输入 input 和标签 label 间的 L1 loss 损失" + paddle: + api_name: "paddle.nn.functional.l1_loss" + inputs: + input: + random: true + type: "Tensor" + dtype: "float32" + shape: [1, 1] + range: [ 0, 1] + label: + random: false + type: "Tensor" + dtype: "float32" + value: [[1]] + params: + reduction: 'mean' + pytorch: + api_name: "torch.nn.functional.l1_loss" + mapping: + ins: { input: input, label: target, reduction: reduction } + +frac_0: + desc: "得到输入 Tensor 的小数部分" + paddle: + api_name: "paddle.frac" + inputs: + x: + random: true + type: "Tensor" + dtype: "float32" + shape: [1, 1, 1, 1] + range: [ -10, 10] + pytorch: + api_name: "torch.frac" + mapping: + ins: { x: input } + +svd_0: + desc: "计算一个或一批矩阵的奇异值分解" + enable_backward: false + paddle: + api_name: "paddle.linalg.svd" + inputs: + x: + random: true + type: "Tensor" + dtype: "float32" + shape: [1, 1, 1] + range: [ -1, 1] + params: + full_matrices: False + pytorch: + api_name: "torch.linalg.svd" + mapping: + ins: { x: A, full_matrices: full_matrices } + +SmoothL1Loss_0: + desc: "如果逐个元素的绝对误差低于 1,则创建使用平方项的条件, 否则为 L1 损失" + paddle: + api_name: "paddle.nn.SmoothL1Loss" + inputs: + input: + random: true + type: "Tensor" + dtype: "float32" + shape: [1, 1] + range: [ -1, 1] + label: + random: true + type: "Tensor" + dtype: "float32" + shape: [1, 1] + range: [ -1, 1] + params: + reduction: 'mean' + delta: 1.0 + pytorch: + api_name: "torch.nn.SmoothL1Loss" + mapping: + ins: { input: input, label: target, reduction: reduction, delta: beta } + +avg_pool3d_0: + desc: "3维平均池化" + paddle: + api_name: "paddle.nn.functional.avg_pool3d" + inputs: + x: + random: true + type: "Tensor" + dtype: "float32" + shape: [1, 1, 1, 1, 1] + range: [ -1, 1] + params: + kernel_size: [1, 1, 1] + stride: [1, 1, 1] + padding: [0, 0, 0] + pytorch: + api_name: "torch.nn.functional.avg_pool3d" + mapping: + ins: { x: input, kernel_size: kernel_size, stride: stride, padding: padding } + +Bilinear_0: + desc: "该层对两个输入执行双线性张量积" + paddle: + api_name: "paddle.nn.Bilinear" + inputs: + data0: + random: true + type: "Tensor" + dtype: "float32" + shape: [1, 1] + range: [ -1, 1] + data1: + random: true + type: "Tensor" + dtype: "float32" + shape: [1, 1] + range: [ -1, 1] + params: + in1_features: 1 + in2_features: 1 + out_features: 1 + pytorch: + api_name: "torch.nn.Bilinear" + mapping: + ins: { data0: input1, data1: input2, in1_features: in1_features, in2_features: in2_features, out_features: out_features } + +nanmedian_0: + desc: "忽略nan返回Tensor中位数 0" + enable_backward: false + paddle: + api_name: "paddle.nanmedian" + inputs: + x: + random: true + dtype: "float32" + shape: [ 1, 1, 1, 1, 1 ] + range: [ -1, 1 ] + params: + axis: -1 + keepdim: False + pytorch: + api_name: "torch.nanmedian" + mapping: + ins: {x: input, axis: dim, keepdim: keepdim} + +Pad3D_0: + desc: "3维pad填充" + paddle: + api_name: "paddle.nn.Pad3D" + inputs: + data: + random: true + type: "Tensor" + dtype: "float32" + shape: [1, 1, 1, 1, 1] + range: [ -1, 1] + params: + padding: [1, 1, 1, 1, 1, 1] + mode: "constant" + value: 0 + data_format: 'NCDHW' + pytorch: + api_name: "torch.nn.ConstantPad3d" + mapping: + ins: { data: input, padding: padding, value: value } + +GroupNorm_0: + desc: "分组归一化" + paddle: + api_name: "paddle.nn.GroupNorm" + inputs: + data: + random: true + type: "Tensor" + dtype: "float32" + shape: [1, 2, 1, 1] + range: [ -1, 1] + params: + num_groups: 1 + num_channels: 2 + pytorch: + api_name: "torch.nn.GroupNorm" + mapping: + ins: { data: input, num_groups: num_groups, num_channels: num_channels } + excess: + device: + +ReplicationPad3d_0: + desc: "按照 padding、mode 和 value 属性对输入进行填充 0" + paddle: + api_name: "paddle.nn.Pad3D" + inputs: + x: + random: true + dtype: "float32" + shape: [ 1, 1, 1, 1, 1 ] + range: [ -1, 1 ] + params: + padding: [1, 0, 1, 1, 1, 1] + mode: 'replicate' + pytorch: + api_name: "torch.nn.ReplicationPad3d" + mapping: + ins: {x: input, padding: padding} + +max_pool3d_0: + desc: "3维最大池化" + paddle: + api_name: "paddle.nn.functional.max_pool3d" + inputs: + x: + random: true + type: "Tensor" + dtype: "float32" + shape: [1, 1, 1, 1, 1] + range: [ -1, 1] + params: + kernel_size: [1, 1, 1] + stride: [1, 1, 1] + padding: [0, 0, 0] + pytorch: + api_name: "torch.nn.functional.max_pool3d" + mapping: + ins: { x: input, kernel_size: kernel_size, stride: stride, padding: padding } + +MaxUnPool2D_0: + desc: "2D 最大反池化 操作" + enable_backward: false + paddle: + api_name: "paddle.nn.MaxUnPool2D" + inputs: + x: + random: true + type: "Tensor" + dtype: "float32" + shape: [1, 1, 1, 1] + range: [ -1, 1] + indices: + random: false + type: "Tensor" + dtype: "int32" + value: [[[[0]]]] + params: + kernel_size: 1 + stride: 1 + padding: 0 +# pytorch: +# api_name: "torch.nn.MaxUnpool2d" +# mapping: +# ins: { x: input, indices: indices, kernel_size: kernel_size, stride: stride, padding: padding } + +Tensor_subtract__0: + desc: "逐元素相减算子,输入 x 与输入 y 逐元素相减,并将各个位置的输出元素保存到返回结果中。" + enable_backward: false + paddle: + api_name: "paddle.Tensor.subtract_" + inputs: + x: + random: true + type: "Tensor" + dtype: "float32" + shape: [ 1, 1, 1, 1 ] + range: [ -1, 1 ] + params: + y: + random: true + type: "Tensor" + dtype: "float32" + shape: [ 1, 1, 1, 1 ] + range: [ -1, 1 ] +# pytorch: +# api_name: "torch.Tensor.subtract_" +# mapping: +# ins: { x: input, y: other } + +AvgPool2D_0: + desc: "构建一个二维平均池化层 0" + paddle: + api_name: "paddle.nn.AvgPool2D" + inputs: + x: + random: true + dtype: "float32" + shape: [ 1, 1, 1, 1 ] + range: [ -1, 1 ] + params: + kernel_size: 1 + stride: + padding: 0 + ceil_mode: False + exclusive: True + divisor_override: + pytorch: + api_name: "torch.nn.AvgPool2d" + mapping: + ins: {x: input, kernel_size: kernel_size, stride: stride, padding: padding, ceil_mode: ceil_mode, exclusive: count_include_pad, divisor_override: divisor_override} + +Tensor_add__0: + desc: "逐元素相加算子,输入 x 与输入 y 逐元素相加,并将各个位置的输出元素保存到返回结果中。" + enable_backward: false + paddle: + api_name: "paddle.Tensor.add_" + inputs: + x: + random: true + type: "Tensor" + dtype: "float32" + shape: [ 1, 1, 1, 1 ] + range: [ -1, 1 ] + y: + random: true + type: "Tensor" + dtype: "float32" + shape: [ 1, 1, 1, 1 ] + range: [ -1, 1 ] +# pytorch: +# api_name: "torch.Tensor.add_" +# mapping: +# ins: { x: input, y: other } + +clip_0: + desc: "向上取整运算函数" + paddle: + api_name: "paddle.clip" + inputs: + x: + random: true + type: "Tensor" + dtype: "float32" + shape: [1, 1, 1, 1] + range: [ -10, 10] + params: + min: -5.0 + max: 5.0 + pytorch: + api_name: "torch.clip" + mapping: + ins: { x: input , min: min, max: max} + +linear_0: + desc: "线性变换" + paddle: + api_name: "paddle.nn.functional.linear" + inputs: + x: + random: true + type: "Tensor" + dtype: "float32" + shape: [1, 1] + range: [ -1, 1] + params: + weight: + random: true + type: "Tensor" + dtype: "float32" + shape: [1, 1] + range: [ -1, 1] + bias: + random: true + type: "Tensor" + dtype: "float32" + shape: [1] + range: [ -1, 1] + pytorch: + api_name: "torch.nn.functional.linear" + mapping: + ins: { x: input, weight: weight, bias: bias } + +avg_pool2d_0: + desc: "2维平均池化" + paddle: + api_name: "paddle.nn.functional.avg_pool2d" + inputs: + x: + random: true + type: "Tensor" + dtype: "float32" + shape: [1, 1, 1, 1] + range: [ -1, 1] + params: + kernel_size: [1, 1] + pytorch: + api_name: "torch.nn.functional.avg_pool2d" + mapping: + ins: { x: input, kernel_size: kernel_size } + +MaxUnPool3D_0: + desc: "3D 最大反池化 操作" + enable_backward: false + paddle: + api_name: "paddle.nn.MaxUnPool3D" + inputs: + x: + random: true + type: "Tensor" + dtype: "float32" + shape: [1, 1, 1, 1, 1] + range: [ -1, 1] + indices: + random: false + type: "Tensor" + dtype: "int32" + value: [[[[[0]]]]] + params: + kernel_size: 1 + stride: 1 + padding: 0 +# pytorch: +# api_name: "torch.nn.MaxUnpool3d" +# mapping: +# ins: { x: input, indices: indices, kernel_size: kernel_size, stride: stride, padding: padding } + +AvgPool3D_0: + desc: "3维平均池化" + paddle: + api_name: "paddle.nn.AvgPool3D" + inputs: + data: + random: true + type: "Tensor" + dtype: "float32" + shape: [1, 1, 1, 1, 1] + range: [ -1, 1] + params: + kernel_size: 1 + stride: 1 + padding: 0 + pytorch: + api_name: "torch.nn.AvgPool3d" + mapping: + ins: { data: input, kernel_size: kernel_size, stride: stride, padding: padding } + +smooth_l1_loss_0: + desc: "如果逐个元素的绝对误差低于 1,则创建使用平方项的条件, 否则为 L1 损失" + paddle: + api_name: "paddle.nn.functional.smooth_l1_loss" + inputs: + input: + random: true + type: "Tensor" + dtype: "float32" + shape: [1, 1] + range: [ -1, 1] + label: + random: true + type: "Tensor" + dtype: "float32" + shape: [1, 1] + range: [ -1, 1] + params: + reduction: 'mean' + delta: 1.0 + pytorch: + api_name: "torch.nn.functional.smooth_l1_loss" + mapping: + ins: { input: input, label: target, reduction: reduction, delta: beta } + +Tensor_clip__0: + desc: "将输入的所有元素进行剪裁,使得输出元素限制在[min, max]内" + enable_backward: false + paddle: + api_name: "paddle.Tensor.clip_" + inputs: + x: + random: true + type: "Tensor" + dtype: "float32" + shape: [1, 1, 1, 1] + range: [ -1, 1] + params: + min: -1.0 + max: 1.0 +# pytorch: +# api_name: "torch.Tensor.clip_" +# mapping: +# ins: { x: input, min: min, max: max } + +solve_0: + desc: "计算线性方程组的解 0" + paddle: + api_name: "paddle.linalg.solve" + inputs: + x: + random: true + dtype: "float32" + shape: [ 1, 1 ] + range: [ -1, 1 ] + y: + random: true + dtype: "float32" + shape: [ 1, 1 ] + range: [ -1, 1 ] + pytorch: + api_name: "torch.linalg.solve" + mapping: + ins: {x: A, y: B} + +unique_consecutive_0: + desc: "将 Tensor 中连续重复的元素进行去重" + enable_backward: false + paddle: + api_name: "paddle.unique_consecutive" + inputs: + x: + random: true + type: "Tensor" + dtype: "float32" + shape: [ 1, 1, 1, 1 ] + range: [ -1, 1 ] + params: + return_inverse: False + return_counts: False + axis: 0 + pytorch: + api_name: "torch.unique_consecutive" + mapping: + ins: { x: input, return_inverse: return_inverse, return_counts: return_counts, axis: dim} + +instance_norm_0: + desc: "InstanceNorm计算" + paddle: + api_name: "paddle.nn.functional.instance_norm" + inputs: + x: + random: true + type: "Tensor" + dtype: "float32" + shape: [1, 1, 2, 2] + range: [ -1, 1] + pytorch: + api_name: "torch.nn.functional.instance_norm" + mapping: + ins: { x: input } + +BCELoss_0: + desc: "二值交叉熵损失值 0" + paddle: + api_name: "paddle.nn.BCELoss" + inputs: + x: + random: true + dtype: "float32" + shape: [ 1, 1, 1, 1 ] + range: [ 0, 1 ] + label: + random: true + dtype: "float32" + shape: [ 1, 1, 1, 1 ] + range: [ 0, 1 ] + params: + reduction: 'mean' + pytorch: + api_name: "torch.nn.BCELoss" + mapping: + ins: {x: input, label: target, reduction: reduction} + +kthvalue_0: + desc: "Tensor的kthvalue求值" + enable_backward: false + paddle: + api_name: "paddle.kthvalue" + inputs: + x: + random: true + type: "Tensor" + dtype: "float32" + shape: [1, 1, 1] + range: [ -1, 1] + params: + k: 1 + axis: 0 + pytorch: + api_name: "torch.kthvalue" + mapping: + ins: { x: input, k: k, axis: dim } + +MaxPool2D_0: + desc: "二维最大池化层 0" + paddle: + api_name: "paddle.nn.MaxPool2D" + inputs: + x: + random: true + dtype: "float32" + shape: [ 1, 1, 1, 1 ] + range: [ -1, 1 ] + params: + kernel_size: 1 + stride: 1 + padding: 0 + ceil_mode: False + return_mask: False + pytorch: + api_name: "torch.nn.MaxPool2d" + mapping: + ins: {x: input, kernel_size: kernel_size, stride: stride, padding: padding, ceil_mode: ceil_mode, return_mask: return_indices} + +concat_0: + desc: "对输入沿参数 axis 轴进行联结,返回一个新的 Tensor" + paddle: + api_name: "paddle.concat" + inputs: + x: + - + random: true + type: "Tensor" + dtype: "float32" + shape: [ 1, 1 ] + range: [ -1, 1 ] + - + random: true + type: "Tensor" + dtype: "float32" + shape: [ 1, 1 ] + range: [ -1, 1 ] + params: + axis: 0 + pytorch: + api_name: "torch.concat" + mapping: + ins: { x: tensors, axis: dim } + +Tensor_fill_diagonal__0: + desc: "以 value 值填充输入 Tensor x 的对角线元素值。" + enable_backward: false + paddle: + api_name: "paddle.Tensor.fill_diagonal_" + inputs: + x: + random: true + type: "Tensor" + dtype: "float32" + shape: [1, 1, 1, 1] + range: [ -1, 1] + params: + value: 2 + offset: 0 + wrap: False +# pytorch: +# api_name: "torch.Tensor.fill_diagonal_" +# mapping: +# ins: { x: input, value: fill_value, wrap: wrap } + +diagflat_0: + desc: "如果 x 是一维张量,则返回带有 x 元素作为对角线的二维方阵. 如果 x 是大于等于二维的张量,则返回一个二维方阵" + paddle: + api_name: "paddle.diagflat" + inputs: + x: + random: true + type: "Tensor" + dtype: "float32" + shape: [1, 1, 1, 1] + range: [ -1, 1] + pytorch: + api_name: "torch.diagflat" + mapping: + ins: { x: input } + +max_pool2d_0: + desc: "2维最大池化" + paddle: + api_name: "paddle.nn.functional.max_pool2d" + inputs: + x: + random: true + type: "Tensor" + dtype: "float32" + shape: [1, 1, 1, 1] + range: [ -1, 1] + params: + kernel_size: [1, 1] + pytorch: + api_name: "torch.nn.functional.max_pool2d" + mapping: + ins: { x: input, kernel_size: kernel_size } + +Tensor_squeeze__0: + desc: "删除输入 Tensor 的 Shape 中尺寸为 1 的维度。" + enable_backward: false + paddle: + api_name: "paddle.Tensor.squeeze_" + inputs: + x: + random: true + type: "Tensor" + dtype: "float32" + shape: [ 1, 1, 1, 1, 1 ] + range: [ -1, 1 ] +# pytorch: +# api_name: "torch.Tensor.squeeze_" +# mapping: +# ins: { x: input } + +linalg_triangular_solve_0: + desc: "计算具有唯一解的线性方程组解,torch.triangular_solve替换为linalg.solve_triangular" + paddle: + api_name: "paddle.linalg.triangular_solve" + inputs: + x: + random: true + type: "Tensor" + dtype: "float32" + shape: [ 1, 1, 1, 1 ] + range: [ -1, 1 ] + y: + random: true + type: "Tensor" + dtype: "float32" + shape: [ 1, 1, 1, 1 ] + range: [ -1, 1 ] + params: + upper: True + transpose: False + unitriangular: False + pytorch: + api_name: "torch.linalg.solve_triangular" + mapping: + ins: { x: input, y: B, upper: upper, transpose: left, unitriangular: unitriangular } + +Dirichlet_0: + desc: "狄利克雷分布(Dirichlet distribution)是一类在实数域以正单纯形(standard simplex)为支撑集的高维连续概率分布,是 Beta 分布在高维情形的推广" + enable_backward: false + paddle: + api_name: "paddle.distribution.Dirichlet" + params: + concentration: + random: false + type: "Tensor" + dtype: "float32" + value: [1.] + method: + sample: + shape: [1] + pytorch: + api_name: "torch.distributions.dirichlet.Dirichlet" + mapping: + ins: { concentration: concentration } + method: + sample: + sample_shape: [1] + +MaxPool3D_0: + desc: "三维最大池化层 0" + paddle: + api_name: "paddle.nn.MaxPool3D" + inputs: + x: + random: true + dtype: "float32" + shape: [ 1, 1, 1, 1, 1 ] + range: [ -1, 1 ] + params: + kernel_size: 1 + stride: 1 + padding: 0 + ceil_mode: False + return_mask: False + pytorch: + api_name: "torch.nn.MaxPool3d" + mapping: + ins: {x: input, kernel_size: kernel_size, stride: stride, padding: padding, ceil_mode: ceil_mode, return_mask: return_indices} + +max_unpool2d_0: + desc: "2D 最大反池化 操作" + enable_backward: false + paddle: + api_name: "paddle.nn.functional.max_unpool2d" + inputs: + x: + random: true + type: "Tensor" + dtype: "float32" + shape: [1, 1, 1, 1] + range: [ -1, 1] + indices: + random: false + type: "Tensor" + dtype: "int32" + value: [[[[0]]]] + params: + kernel_size: 1 + stride: 1 + padding: 0 +# pytorch: +# api_name: "torch.nn.functional.max_unpool2d" +# mapping: +# ins: { x: input, indices: indices, kernel_size: kernel_size, stride: stride, padding: padding } + +lu_0: + desc: "对输入的 N 维(N>=2)矩阵 x 进行 LU 分解" + enable_backward: false + paddle: + api_name: "paddle.linalg.lu" + inputs: + x: + random: true + type: "Tensor" + dtype: "float32" + shape: [1, 1, 1] + range: [ -1, 1] + params: + pivot: True + pytorch: + api_name: "torch.linalg.lu" + mapping: + ins: { x: A, pivot: pivot } + +Embedding_0: + desc: "embedding嵌入层 0" + enable_backward: False + paddle: + api_name: "paddle.nn.Embedding" + inputs: + x: + random: true + dtype: "int32" + shape: [ 1, 1, 1, 1 ] + range: [ 0, 3 ] + params: + num_embeddings: 4 + embedding_dim: 4 + padding_idx: + sparse: False + pytorch: + api_name: "torch.nn.Embedding" + mapping: + ins: {x: input, num_embeddings: num_embeddings, embedding_dim: embedding_dim, padding_idx: padding_idx, sparse: sparse} + +empty_like_0: + desc: "按照 padding、mode 和 value 属性对输入进行填充 0" + enable_backward: false + paddle: + api_name: "paddle.empty_like" + inputs: + x: + random: true + dtype: "float32" + shape: [ 1, 1, 1, 1 ] + range: [ -1, 1 ] + pytorch: + api_name: "torch.empty_like" + mapping: + ins: {x: input, padding: padding} + +rad2deg_0: + desc: "将元素从弧度的角度转换为度" + paddle: + api_name: "paddle.rad2deg" + inputs: + x: + random: true + type: "Tensor" + dtype: "float32" + shape: [1, 1, 1, 1] + range: [ -1, 1] + pytorch: + api_name: "torch.rad2deg" + mapping: + ins: { x: input } + +eigvalsh_0: + desc: "计算一个(或一批)普通方阵的特征值" + paddle: + api_name: "paddle.linalg.eigvalsh" + inputs: + x: + random: true + type: "Tensor" + dtype: "float32" + shape: [1, 1, 1] + range: [ -1, 1] + params: + UPLO: 'L' + pytorch: + api_name: "torch.linalg.eigvalsh" + mapping: + ins: { x: input, UPLO: UPLO } + +AdaptiveMaxPool3D_0: + desc: "3维自适应最大值池化" + paddle: + api_name: "paddle.nn.AdaptiveMaxPool3D" + inputs: + data: + random: true + type: "Tensor" + dtype: "float32" + shape: [1, 1, 1, 1, 1] + range: [ -1, 1] + params: + output_size: 1 + pytorch: + api_name: "torch.nn.AdaptiveMaxPool3d" + mapping: + ins: { data: input, output_size: output_size } + +max_unpool3d_0: + desc: "3D 最大反池化 操作" + enable_backward: false + paddle: + api_name: "paddle.nn.functional.max_unpool3d" + inputs: + x: + random: true + type: "Tensor" + dtype: "float32" + shape: [1, 1, 1, 1, 1] + range: [ -1, 1] + indices: + random: false + type: "Tensor" + dtype: "int32" + value: [[[[[0]]]]] + params: + kernel_size: 1 + stride: 1 + padding: 0 +# pytorch: +# api_name: "torch.nn.functional.max_unpool3d" +# mapping: +# ins: { x: input, indices: indices, kernel_size: kernel_size, stride: stride, padding: padding } + +eigh_0: + desc: "计算厄米特矩阵或者实数对称矩阵的特征值和特征向量" + enable_backward: false + paddle: + api_name: "paddle.linalg.eigh" + inputs: + x: + random: true + type: "Tensor" + dtype: "float32" + shape: [1, 1, 1] + range: [ -1, 1] + params: + UPLO: 'L' + pytorch: + api_name: "torch.linalg.eigh" + mapping: + ins: { x: input, UPLO: UPLO } + +Tensor_uniform__0: + desc: "均匀分布采样的随机数 0" + enable_backward: false + paddle: + api_name: "paddle.Tensor.uniform_" + inputs: + x: + random: true + dtype: "float32" + shape: [ 1, 1, 1, 1 ] + range: [ -1, 1 ] + params: + min: -1.0 + max: 1.0 + seed: 0 +# pytorch: +# api_name: "torch.Tensor.uniform_" +# mapping: +# ins: {min: min, max: max, seed: seed} + +NLLLoss_0: + desc: "NLLLoss损失函数 0" + enable_backward: False + paddle: + api_name: "paddle.nn.NLLLoss" + inputs: + input: + random: true + dtype: "float32" + shape: [ 1, 1, 1, 1 ] + range: [ -1, 1 ] + label: + random: true + dtype: "int64" + shape: [ 1, 1, 1 ] + range: [ 0, 3 ] + params: + weight: + ignore_index: -100 + reduction: 'mean' + pytorch: + api_name: "torch.nn.NLLLoss" + mapping: + ins: {input: input, label: target, weight: weight, ignore_index: ignore_index, reduction: reduction} + +AffineTransform_0: + desc: "仿射变换 y=loc+scale×x" + enable_backward: false + paddle: + api_name: "paddle.distribution.AffineTransform" + params: + loc: + random: true + type: "Tensor" + dtype: "float32" + shape: [1, 1, 1, 1] + range: [ -1, 1] + scale: + random: true + type: "Tensor" + dtype: "float32" + shape: [1, 1, 1, 1] + range: [ -1, 1] + method: + forward: + x: + value: [1.] + pytorch: + api_name: "torch.distributions.transforms.AffineTransform" + mapping: + ins: { loc: loc, scale: scale } + method: + __call__: + x: + value: [1.0] + +AdaptiveMaxPool2D_0: + desc: "2维自适应最大值池化" + paddle: + api_name: "paddle.nn.AdaptiveMaxPool2D" + inputs: + data: + random: true + type: "Tensor" + dtype: "float32" + shape: [1, 1, 1, 1] + range: [ -1, 1] + params: + output_size: 1 + pytorch: + api_name: "torch.nn.AdaptiveMaxPool2d" + mapping: + ins: { data: input, output_size: output_size } + +deg2rad_0: + desc: "将元素从弧度的角度转换为度" + paddle: + api_name: "paddle.deg2rad" + inputs: + x: + random: true + type: "Tensor" + dtype: "float32" + shape: [1, 1, 1, 1] + range: [ -1, 1] + pytorch: + api_name: "torch.deg2rad" + mapping: + ins: { x: input } + +binary_cross_entropy_0: + desc: "该函数用于计算输入 input 和标签 label 之间的二值交叉熵损失值" + paddle: + api_name: "paddle.nn.functional.binary_cross_entropy" + inputs: + input: + random: true + type: "Tensor" + dtype: "float32" + shape: [1, 1, 1] + range: [ 0, 1] + label: + random: false + type: "Tensor" + dtype: "float32" + value: [[[1]]] + params: + reduction: 'mean' + pytorch: + api_name: "torch.nn.functional.binary_cross_entropy" + mapping: + ins: { input: input, label: target, reduction: reduction, } + +PReLU_0: + desc: "PReLU激活层 0" + paddle: + api_name: "paddle.nn.PReLU" + inputs: + x: + random: true + dtype: "float32" + shape: [ 1, 1, 1, 1] + range: [ -1, 1 ] + params: + num_parameters: 1 + init: 0.25 + pytorch: + api_name: "torch.nn.PReLU" + mapping: + ins: {x: input, num_parameters: num_parameters, init: init} + +elu__0: + desc: "elu_激活层" + enable_backward: false + paddle: + api_name: "paddle.nn.functional.elu_" + inputs: + x: + random: true + type: "Tensor" + dtype: "float32" + shape: [1, 1, 1, 1] + range: [ -1, 1] + params: + alpha: 1.0 + pytorch: + api_name: "torch.nn.functional.elu_" + mapping: + ins: { x: input, alpha: alpha } + +addmm_0: + desc: "计算x和y的乘积,将结果乘以标量alpha,再加上input与beta的乘积,得到输出" + paddle: + api_name: "paddle.addmm" + inputs: + input: + random: true + type: "Tensor" + dtype: "float32" + shape: [1, 1] + range: [ -10, 10] + x: + random: true + type: "Tensor" + dtype: "float32" + shape: [1, 1] + range: [ -10, 10] + y: + random: true + type: "Tensor" + dtype: "float32" + shape: [1, 1] + range: [ -10, 10] + params: + alpha: 5.0 + beta: 0.5 + pytorch: + api_name: "torch.addmm" + mapping: + ins: { input: input, x: mat1, y: mat2, alpha: alpha, beta: beta } + +__getitem___0: + desc: "其调用形式为:Tensor[Index]" + enable_backward: False + paddle: + api_name: "__getitem__" + inputs: + x: + random: true + type: "Tensor" + dtype: "float32" + shape: [ 2, 2 ] + range: [ -1, 1 ] + pytorch: + api_name: "__getitem__" + mapping: + ins: { x: x } + +__rtruediv___0: + desc: "真除法scalar / Tensor" + enable_backward: False + paddle: + api_name: "__rtruediv__" + inputs: + x: 1.0 + y: + random: true + type: "Tensor" + dtype: "float32" + shape: [ 1, 1, 1, 1 ] + range: [ -1, 1 ] + pytorch: + api_name: "__rtruediv__" + mapping: + ins: { x: x, y: y } + +Dropout_0: + desc: "根据给定的丢弃概率 p ,在训练过程中随机将一些神经元输出设置为0 0" + enable_backward: false + paddle: + api_name: "paddle.nn.Dropout" + inputs: + x: + random: true + dtype: "float32" + shape: [ 1, 1, 1, 1 ] + range: [ -1, 1 ] + params: + p: 0.5 + pytorch: + api_name: "torch.nn.Dropout" + mapping: + ins: {x: input, p: p} + +__rdiv___0: + desc: "除法scalar/Tensor" + enable_backward: False + paddle: + api_name: "__rdiv__" + inputs: + x: 1.0 + y: + random: true + type: "Tensor" + dtype: "float32" + shape: [ 1, 1, 1, 1 ] + range: [ -1, 1 ] + pytorch: + api_name: "__rdiv__" + mapping: + ins: { x: x, y: y } + +eigvals_0: + desc: "计算厄米特矩阵或者实数对称矩阵的特征值" + paddle: + api_name: "paddle.linalg.eigvals" + inputs: + x: + random: true + type: "Tensor" + dtype: "float32" + shape: [1, 1, 1] + range: [ -1, 1] + pytorch: + api_name: "torch.linalg.eigvals" + mapping: + ins: { x: input } + +repeat_interleave_0: + desc: "沿着指定轴 axis 对输入 x 进行复制 0" + paddle: + api_name: "paddle.repeat_interleave" + inputs: + x: + random: true + dtype: "float32" + shape: [ 1, 1, 1, 1 ] + range: [ -1, 1 ] + params: + repeats: 1 + axis: 2 + pytorch: + api_name: "torch.repeat_interleave" + mapping: + ins: {x: input, repeats: repeats, axis: dim} + +__rpow___0: + desc: "指数scalar**Tensor" + enable_backward: False + paddle: + api_name: "__rpow__" + inputs: + x: 1.0 + y: + random: true + type: "Tensor" + dtype: "float32" + shape: [ 1, 1, 1, 1 ] + range: [ -1, 1 ] + pytorch: + api_name: "__rpow__" + mapping: + ins: { x: x, y: y } + +bilinear_0: + desc: "对两个输入执行双线性张量积" + paddle: + api_name: "paddle.nn.functional.bilinear" + inputs: + x1: + random: true + type: "Tensor" + dtype: "float32" + shape: [1, 1] + range: [ -1, 1] + x2: + random: true + type: "Tensor" + dtype: "float32" + shape: [1, 1] + range: [ -1, 1] + params: + weight: + random: true + type: "Tensor" + dtype: "float32" + shape: [1, 1, 1] + range: [ -1, 1] + pytorch: + api_name: "torch.nn.functional.bilinear" + mapping: + ins: { x1: input1, x2: input2, weight: weight, } + +adaptive_max_pool3d_0: + desc: "3维自适应平均池化" + paddle: + api_name: "paddle.nn.functional.adaptive_max_pool3d" + inputs: + x: + random: true + type: "Tensor" + dtype: "float32" + shape: [1, 1, 1, 1, 1] + range: [ -1, 1] + params: + output_size: 1 + pytorch: + api_name: "torch.nn.functional.adaptive_max_pool3d" + mapping: + ins: { x: input, output_size: output_size } + +logsumexp_0: + desc: "沿着 axis 计算 x 的以e为底的指数的和的自然对数 0" + paddle: + api_name: "paddle.logsumexp" + inputs: + x: + random: true + dtype: "float32" + shape: [ 1, 1, 1, 1 ] + range: [ -1, 1 ] + params: + axis: !!python/tuple [0, 1] + keepdim: False + pytorch: + api_name: "torch.logsumexp" + mapping: + ins: {x: input, axis: dim, keepdim: keepdim} + +unique_0: + desc: "返回Tensor按升序排序后的独有元素" + enable_backward: false + paddle: + api_name: "paddle.unique" + inputs: + x: + random: true + type: "Tensor" + dtype: "int32" + shape: [3, 4, 5, 5] + range: [ -5, 5] + pytorch: + api_name: "torch.unique" + mapping: + ins: { x: input } + +adaptive_max_pool2d_0: + desc: "2维自适应平均池化" + paddle: + api_name: "paddle.nn.functional.adaptive_max_pool2d" + inputs: + x: + random: true + type: "Tensor" + dtype: "float32" + shape: [1, 1, 1, 1] + range: [ -1, 1] + params: + output_size: 1 + pytorch: + api_name: "torch.nn.functional.adaptive_max_pool2d" + mapping: + ins: { x: input, output_size: output_size } + +zeros_0: + desc: "创建形状为 shape 、数据类型为 dtype 且值全为0的Tensor" + enable_backward: False + paddle: + api_name: "paddle.zeros" + params: + shape: [1, 1, 1, 1] + pytorch: + api_name: "torch.zeros" + mapping: + ins: { shape: size } + +ones_0: + desc: "创建形状为 shape 、数据类型为 dtype 且值全为1的Tensor" + enable_backward: False + paddle: + api_name: "paddle.ones" + params: + shape: [1, 1, 1, 1] + pytorch: + api_name: "torch.ones" + mapping: + ins: { shape: size } + +Tensor_erfinv__0: + desc: "计算输入矩阵 x 的逆误差函数。 " + enable_backward: false + paddle: + api_name: "paddle.Tensor.erfinv_" + inputs: + x: + random: true + type: "Tensor" + dtype: "float32" + shape: [1, 1, 1, 1] + range: [ -1, 1] +# pytorch: +# api_name: "torch.Tensor.erfinv_" +# mapping: +# ins: { x: input } + +linalg_norm_0: + desc: "计算给定 Tensor 的矩阵范数(Frobenius 范数)和向量范数 0" + paddle: + api_name: "paddle.linalg.norm" + inputs: + x: + random: true + dtype: "float32" + shape: [ 1, 1, 1, 1 ] + range: [ -1, 1 ] + params: + p: 'fro' + axis: !!python/tuple [0, 1] + keepdim: False + pytorch: + api_name: "torch.linalg.norm" + mapping: + ins: {x: input, p: ord, axis: dim, keepdim: keepdim} + +hflip_0: + desc: "对输入图像进行水平翻转" + paddle: + api_name: "paddle.vision.transforms.hflip" + inputs: + img: + random: true + type: "Tensor" + dtype: "float32" + shape: [1, 1, 1, 1] + range: [ -1, 1] + pytorch: + api_name: "torch.fliplr" + mapping: + ins: { img: input } + +slice_0: + desc: "沿多个轴生成 input 的切片 0" + paddle: + api_name: "paddle.slice" + inputs: + input: + random: true + dtype: "float32" + shape: [ 1, 1, 1, 1 ] + range: [ -1, 1 ] + params: + axes: [2] + starts: [0] + ends: [1] +# pytorch: +# api_name: "torch.narrow" +# mapping: +# ins: {input: input, axes: dim, starts: start, ends: length} + +Tensor_ceil__0: + desc: "向上取整运算函数。" + enable_backward: False + paddle: + api_name: "paddle.Tensor.ceil_" + inputs: + x: + random: true + type: "Tensor" + dtype: "float32" + shape: [1, 1, 1, 1] + range: [ -1, 1] +# pytorch: +# api_name: "torch.Tensor.ceil_" +# mapping: +# ins: { x: input } + +full_0: + desc: "创建形状大小为 shape 并且数据类型为 dtype 的Tensor,其中元素值均为 fill_value" + enable_backward: False + paddle: + api_name: "paddle.full" + params: + shape: [1, 1, 1, 1] + fill_value: 3.0 + dtype: "float32" + pytorch: + api_name: "torch.full" + mapping: + ins: { shape: size, fill_value: fill_value } + +ChannelShuffle_0: + desc: "将一个形为 [N, C, H, W] 或是 [N, H, W, C] 的 Tensor 按通道分成 g 组,得到形为 [N, g, C/g, H, W] 或 [N, H, W, g, C/g] 的 Tensor,然后转置为 [N, C/g, g, H, W] 或 [N, H, W, C/g, g] 的形状,最后重塑为原来的形状" + enable_backward: false + paddle: + api_name: "paddle.nn.ChannelShuffle" + inputs: + data: + random: true + type: "Tensor" + dtype: "float32" + shape: [1, 1, 1, 1] + range: [ -1, 1] + params: + groups: 1 + pytorch: + api_name: "torch.nn.ChannelShuffle" + mapping: + ins: { data: input, groups: groups } + +nll_loss_0: + desc: "返回 negative log likelihood" + enable_backward: false + paddle: + api_name: "paddle.nn.functional.nll_loss" + inputs: + input: + random: true + type: "Tensor" + dtype: "float32" + shape: [1, 1] + range: [ 0, 1] + label: + random: false + type: "Tensor" + dtype: "int64" + value: [0] + params: + ignore_index: -100 + reduction: 'mean' + pytorch: + api_name: "torch.nn.functional.nll_loss" + mapping: + ins: { input: input, label: target, ignore_index: ignore_index, reduction: reduction } + +Tensor_sqrt__0: + desc: "计算输入的算数平方根" + enable_backward: false + paddle: + api_name: "paddle.Tensor.sqrt_" + inputs: + x: + random: true + type: "Tensor" + dtype: "float32" + shape: [1, 1, 1, 1] + range: [ -2, 2] +# pytorch: +# api_name: "torch.Tensor.sqrt_" +# mapping: +# ins: { x: input } + +Tensor_exp__0: + desc: "对输入,逐元素进行以自然数 e 为底指数运算。" + enable_backward: false + paddle: + api_name: "paddle.Tensor.exp_" + inputs: + x: + random: true + type: "Tensor" + dtype: "float32" + shape: [1, 1, 1, 1] + range: [ -1, 1] +# pytorch: +# api_name: "torch.Tensor.exp_" +# mapping: +# ins: { x: input } + +Tensor_floor__0: + desc: "向下取整函数。" + enable_backward: false + paddle: + api_name: "paddle.Tensor.floor_" + inputs: + x: + random: true + type: "Tensor" + dtype: "float32" + shape: [1, 1, 1, 1] + range: [ -1, 1] +# pytorch: +# api_name: "torch.Tensor.floor_" +# mapping: +# ins: { x: input } + +Tensor_rsqrt__0: + desc: "rsqrt激活函数" + enable_backward: false + paddle: + api_name: "paddle.Tensor.rsqrt_" + inputs: + x: + random: true + type: "Tensor" + dtype: "float32" + shape: [1, 1, 1, 1] + range: [ 1, 2] +# pytorch: +# api_name: "torch.Tensor.rsqrt_" +# mapping: +# ins: { x: input } + +normal_0: + desc: "二值交叉熵损失值 0" + enable_backward: false + paddle: + api_name: "paddle.normal" + params: + mean: 0.0 + std: 1.0 + shape: [1, 1, 1, 1] + pytorch: + api_name: "torch.normal" + mapping: + ins: {mean: mean, std: std, shape: size} + +Tensor_round__0: + desc: "将输入中的数值四舍五入到最接近的整数数值" + enable_backward: false + paddle: + api_name: "paddle.Tensor.round_" + inputs: + x: + random: true + type: "Tensor" + dtype: "float32" + shape: [1, 1, 1, 1] + range: [ -10, 10] +# pytorch: +# api_name: "torch.Tensor.round_" +# mapping: +# ins: { x: input } + +Tensor_tanh__0: + desc: "tanh 激活函数" + enable_backward: false + paddle: + api_name: "paddle.Tensor.tanh_" + inputs: + x: + random: true + type: "Tensor" + dtype: "float32" + shape: [ 1, 1, 1, 1 ] + range: [ -1, 1 ] +# pytorch: +# api_name: "torch.Tensor.tanh_" +# mapping: +# ins: { x: input } + +multi_dot_0: + desc: "Multi_dot 是一个计算多个矩阵乘法的算子" + paddle: + api_name: "paddle.linalg.multi_dot" + inputs: + x: + - + random: true + type: "Tensor" + dtype: "float32" + shape: [ 1, 1 ] + range: [ -1, 1 ] + - + random: true + type: "Tensor" + dtype: "float32" + shape: [ 1, 1 ] + range: [ -1, 1 ] + pytorch: + api_name: "torch.linalg.multi_dot" + mapping: + ins: { x: tensors } + +RReLU_0: + desc: "rrelu激活函数" + paddle: + api_name: "paddle.nn.RReLU" + inputs: + x: + random: true + type: "Tensor" + dtype: "float32" + shape: [1, 1, 1, 1] + range: [ -1, 1] + pytorch: + api_name: "torch.nn.RReLU" + mapping: + ins: { x: input } + +Flatten_0: + desc: "将一个连续维度的 Tensor 展平成一维 0" + paddle: + api_name: "paddle.nn.Flatten" + inputs: + x: + random: true + dtype: "float32" + shape: [ 1, 1, 1, 1 ] + range: [ -1, 1 ] + params: + start_axis: 1 + stop_axis: -1 + pytorch: + api_name: "torch.nn.Flatten" + mapping: + ins: {x: input, start_axis: start_dim, stop_axis: end_dim} + +__matmul___0: + desc: "@" + enable_backward: False + paddle: + api_name: "__matmul__" + inputs: + x: + random: true + type: "Tensor" + dtype: "float32" + shape: [ 1, 1, 1, 1] + range: [ -1, 1 ] + y: + random: true + type: "Tensor" + dtype: "float32" + shape: [ 1, 1, 1, 1 ] + range: [ -1, 1 ] + pytorch: + api_name: "__matmul__" + mapping: + ins: { x: x, y: y } + +Tensor_reciprocal__0: + desc: "对输入 Tensor 取倒数" + enable_backward: false + paddle: + api_name: "paddle.Tensor.reciprocal_" + inputs: + x: + random: true + type: "Tensor" + dtype: "float32" + shape: [ 1, 1, 1, 1 ] + range: [ -1, 1 ] +# pytorch: +# api_name: "torch.Tensor.reciprocal_" +# mapping: +# ins: { x: input } + +empty_0: + desc: "大小为shape并且数据类型为dtype的Tensor 0" + enable_backward: false + paddle: + api_name: "paddle.empty" + params: + shape: [ 1, 1, 1, 1 ] + dtype: + pytorch: + api_name: "torch.empty" + mapping: + ins: {shape: size, dtype: dtype} + +relu__0: + desc: "relu_激活函数, inplace策略" + enable_backward: false + paddle: + api_name: "paddle.nn.functional.relu_" + inputs: + x: + random: true + type: "Tensor" + dtype: "float32" + shape: [1, 1, 1, 1] + range: [ -1, 1] + pytorch: + api_name: "torch.nn.functional.relu_" + mapping: + ins: { x: input } + +stack_0: + desc: "沿 axis 轴对输入 x 进行堆叠操作" + paddle: + api_name: "paddle.stack" + inputs: + x: + - + random: true + type: "Tensor" + dtype: "float32" + shape: [ 1, 1 ] + range: [ -1, 1 ] + - + random: true + type: "Tensor" + dtype: "float32" + shape: [ 1, 1 ] + range: [ -1, 1 ] + params: + axis: 0 + pytorch: + api_name: "torch.stack" + mapping: + ins: { x: tensors, axis: dim } + +index_select_0: + desc: "返回的Tensor其余维度大小和输入 x 相等, axis 维度的大小等于 index 的大小 0" + enable_backward: false + paddle: + api_name: "paddle.index_select" + inputs: + x: + random: true + dtype: "float32" + shape: [ 1, 1, 1, 1 ] + range: [ -1, 1 ] + params: + index: + random: true + dtype: "int32" + shape: [ 2 ] + range: [ 0, 1 ] + axis: 0 + pytorch: + api_name: "torch.index_select" + mapping: + ins: {x: input, index: index, axis: dim} + +argsort_0: + desc: "对输入变量沿给定轴进行排序,输出排序好的数据的相应索引,其维度和输入相同。默认升序排列" + enable_backward: False + paddle: + api_name: "paddle.argsort" + inputs: + x: + random: true + type: "Tensor" + dtype: "float32" + shape: [1, 1, 1, 1] + range: [ -1, 1] + pytorch: + api_name: "torch.argsort" + mapping: + ins: { x: input } + +sort_0: + desc: "该OP根据perm对输入的多维Tensor进行数据重排 0" + enable_backward: false + paddle: + api_name: "paddle.sort" + inputs: + x: + random: true + dtype: "float32" + shape: [ 1, 1, 1, 1 ] + range: [ -1, 1 ] + params: + axis: -1 + descending: False + pytorch: + api_name: "torch.sort" + mapping: + ins: {x: input, axis: dim, descending: descending} + +roll_0: + desc: "沿着指定维度 axis 对输入 x 进行循环滚动,当元素移动到最后位置时,会从第一个位置重新插入" + paddle: + api_name: "paddle.roll" + inputs: + x: + random: true + type: "Tensor" + dtype: "float32" + shape: [1, 1, 1, 1] + range: [ -1, 1] + params: + shifts: 2 + pytorch: + api_name: "torch.roll" + mapping: + ins: { x: input, shifts: shifts } + +broadcast_to_0: + desc: "根据 shape 指定的形状广播 x ,广播后, x 的形状和 shape 指定的形状一致" + paddle: + api_name: "paddle.broadcast_to" + inputs: + x: + random: true + type: "Tensor" + dtype: "float32" + shape: [1, 1, 1, 1, 1] + range: [ -1, 1] + params: + shape: [1, 1, 1, 1, 1] + pytorch: + api_name: "torch.broadcast_to" + mapping: + ins: { x: input, shape: size } + +tile_0: + desc: "根据参数 repeat_times 对输入 x 的各维度进行复制" + paddle: + api_name: "paddle.tile" + inputs: + x: + random: true + type: "Tensor" + dtype: "float32" + shape: [1, 1, 1, 1] + range: [ -1, 1] + params: + repeat_times: [2, 1] + pytorch: + api_name: "torch.tile" + mapping: + ins: { x: input, repeat_times: dims } + +PixelShuffle_0: + desc: "该算子将一个形为[N, C, H, W]或是[N, H, W, C]的Tensor重新排列成形为 [N, C/r**2, H*r, W*r]或 [N, H*r, W*r, C/r**2] 的Tensor" + paddle: + api_name: "paddle.nn.PixelShuffle" + inputs: + data0: + random: true + type: "Tensor" + dtype: "float32" + shape: [1, 1, 1, 1] + range: [ -1, 1] + params: + upscale_factor: 1 + data_format: 'NCHW' + pytorch: + api_name: "torch.nn.PixelShuffle" + mapping: + ins: { data0: input, upscale_factor: upscale_factor } + +Tensor_fill__0: + desc: "以 value 值填充 Tensor x 中所有数据。对 x 的原地 Inplace 修改" + enable_backward: false + paddle: + api_name: "paddle.Tensor.fill_" + inputs: + x: + random: true + type: "Tensor" + dtype: "float32" + shape: [ 1, 1, 1, 1 ] + range: [ -1, 1 ] + params: + value: 1. +# pytorch: +# api_name: "torch.Tensor.fill_" +# mapping: +# ins: { x: input } + +split_0: + desc: "tensor分割 0" + enable_backward: false + paddle: + api_name: "paddle.split" + inputs: + x: + random: true + dtype: "float32" + shape: [ 1, 1, 1, 1] + range: [ -1, 1 ] + params: + num_or_sections: 1 + axis: 2 + pytorch: + api_name: "torch.split" + mapping: + ins: {x: tensor, num_or_sections: split_size_or_sections, axis: dim} + +chunk_0: + desc: "将输入Tensor分割成多个子Tensor" + enable_backward: false + paddle: + api_name: "paddle.chunk" + inputs: + x: + random: true + type: "Tensor" + dtype: "float32" + shape: [1, 1, 1, 1] + range: [ -1, 1] + params: + chunks: 1 + pytorch: + api_name: "torch.chunk" + mapping: + ins: { x: input, chunks: chunks } + +LogSoftmax_0: + desc: "LogSoftmax激活层" + paddle: + api_name: "paddle.nn.LogSoftmax" + inputs: + data: + random: False + type: "Tensor" + dtype: "float32" + value: [[[1]]] + params: + axis: 0 + pytorch: + api_name: "torch.nn.LogSoftmax" + mapping: + ins: { data: input, axis: dim } + +expand_as_0: + desc: "根据 y 的形状扩展 x ,扩展后, x 的形状和 y 的形状相同" + paddle: + api_name: "paddle.expand_as" + inputs: + x: + random: true + type: "Tensor" + dtype: "float32" + shape: [1, 1, 1, 1, 1] + range: [ -1, 1] + y: + random: true + type: "Tensor" + dtype: "float32" + shape: [1, 1, 1, 1, 1] + range: [ -1, 1] +# pytorch: +# api_name: "torch.Tensor.expand_as" +# mapping: +# ins: { x: input, y: other } + +topk_0: + desc: "沿着可选的 axis 查找topk最大或者最小的结果和结果所在的索引信息" + enable_backward: false + paddle: + api_name: "paddle.topk" + inputs: + x: + random: true + type: "Tensor" + dtype: "float32" + shape: [1, 1, 1, 1] + range: [ -1, 1] + params: + k: 1 + pytorch: + api_name: "torch.topk" + mapping: + ins: { x: input, k: k } + +prod_0: + desc: "对指定维度上的Tensor元素进行求乘积运算,并输出相应的计算结果" + paddle: + api_name: "paddle.prod" + inputs: + x: + random: true + type: "Tensor" + dtype: "float32" + shape: [1, 1, 1, 1] + range: [ -1, 1] + pytorch: + api_name: "torch.prod" + mapping: + ins: { x: input } + +mode_0: + desc: "沿着可选的 axis 查找对应轴上的众数和结果所在的索引信息 0" + enable_backward: false + paddle: + api_name: "paddle.mode" + inputs: + x: + random: true + dtype: "float32" + shape: [ 1, 1, 1, 1 ] + range: [ -1, 1 ] + params: + axis: -1 + keepdim: False + pytorch: + api_name: "torch.mode" + mapping: + ins: {x: input, axis: dim, keepdim: keepdim} + +bmm_0: + desc: "对输入x及输入y进行矩阵相乘" + paddle: + api_name: "paddle.bmm" + inputs: + x: + random: true + type: "Tensor" + dtype: "float32" + shape: [1, 1, 1] + range: [ -1, 1] + y: + random: true + type: "Tensor" + dtype: "float32" + shape: [1, 1, 1] + range: [ -1, 1] + pytorch: + api_name: "torch.bmm" + mapping: + ins: { x: input, y: mat2 } + +dropout_0: + desc: "根据给定的丢弃概率 p ,在训练过程中随机将一些神经元输出设置为0 0" + enable_backward: false + paddle: + api_name: "paddle.nn.functional.dropout" + inputs: + x: + random: true + dtype: "float32" + shape: [ 1, 1, 1, 1 ] + range: [ -1, 1 ] + params: + p: 0.5 + pytorch: + api_name: "torch.nn.functional.dropout" + mapping: + ins: {x: input, p: p} + +randn_0: + desc: "标准正态分布 0" + enable_backward: false + paddle: + api_name: "paddle.randn" + params: + shape: [ 1, 1, 1, 1 ] + pytorch: + api_name: "torch.randn" + mapping: + ins: {x: input, shape: size} + +Softplus_0: + desc: "Softplus激活层" + paddle: + api_name: "paddle.nn.Softplus" + inputs: + data: + random: False + type: "Tensor" + dtype: "float32" + value: [-0.4] + params: + beta: 1 + threshold: 15 + pytorch: + api_name: "torch.nn.Softplus" + mapping: + ins: { data: input, beta: beta, threshold: threshold } + +LogSigmoid_0: + desc: "LogSigmoid激活层" + paddle: + api_name: "paddle.nn.LogSigmoid" + inputs: + data: + random: true + type: "Tensor" + dtype: "float32" + shape: [1, 1, 1] + range: [ -1, 1] + pytorch: + api_name: "torch.nn.LogSigmoid" + mapping: + ins: { data: input } + +rand_0: + desc: "均匀分布 0" + enable_backward: false + paddle: + api_name: "paddle.rand" + params: + shape: [ 1, 1, 1, 1 ] + pytorch: + api_name: "torch.rand" + mapping: + ins: {x: input, shape: size} + +PowerTransform_0: + desc: "幂变换 y=x^power " + enable_backward: false + paddle: + api_name: "paddle.distribution.PowerTransform" + params: + power: + random: true + type: "Tensor" + dtype: "float32" + shape: [1, 1, 1, 1] + range: [ -5, 5] + method: + forward: + x: + value: [1.] + pytorch: + api_name: "torch.distributions.transforms.PowerTransform" + mapping: + ins: { power: exponent } + method: + __call__: + x: + value: [1.] + +dist_0: + desc: "计算 (x-y) 的 p 范数(p-norm)" + paddle: + api_name: "paddle.dist" + inputs: + x: + random: true + type: "Tensor" + dtype: "float32" + shape: [1, 1, 1, 1] + range: [ -1, 1] + y: + random: true + type: "Tensor" + dtype: "float32" + shape: [1, 1, 1, 1] + range: [ -1, 1] + pytorch: + api_name: "torch.dist" + mapping: + ins: { x: input, y: other } + +expand_0: + desc: "根据 shape 指定的形状扩展 x ,扩展后, x 的形状和 shape 指定的形状一致" + paddle: + api_name: "paddle.expand" + inputs: + x: + random: true + type: "Tensor" + dtype: "float32" + shape: [1, 1, 1, 1, 1] + range: [ -1, 1] + params: + shape: [1, 1, 1, 1, 1] +# pytorch: +# api_name: "torch.Tensor.expand" +# mapping: +# ins: { x: input, shape: size } + +Mish_0: + desc: "mish激活函数" + paddle: + api_name: "paddle.nn.Mish" + inputs: + x: + random: true + type: "Tensor" + dtype: "float32" + shape: [1, 1, 1, 1] + range: [ -1, 1] + pytorch: + api_name: "torch.nn.Mish" + mapping: + ins: { x: input } + +qr_0: + desc: "计算一个或一批矩阵的正交三角分解,也称 QR 分解" + enable_backward: false + paddle: + api_name: "paddle.linalg.qr" + inputs: + x: + random: true + type: "Tensor" + dtype: "float32" + shape: [1, 1, 1, 1] + range: [ -1, 1] + params: + mode: 'reduced' + pytorch: + api_name: "torch.linalg.qr" + mapping: + ins: { x: A, mode: mode } + +__mul___0: + desc: "乘法 tensor * tensor" + enable_backward: False + paddle: + api_name: "__mul__" + inputs: + x: + random: true + type: "Tensor" + dtype: "float32" + shape: [ 1, 1, 1, 1] + range: [ -1, 1 ] + y: + random: true + type: "Tensor" + dtype: "float32" + shape: [ 1, 1, 1, 1 ] + range: [ -1, 1 ] + pytorch: + api_name: "__mul__" + mapping: + ins: { x: x, y: y } + +__truediv___0: + desc: "除法Tensor / Tensor" + enable_backward: False + paddle: + api_name: "__truediv__" + inputs: + x: + random: true + type: "Tensor" + dtype: "float32" + shape: [ 1, 1, 1, 1 ] + range: [ -1, 1 ] + y: + random: true + type: "Tensor" + dtype: "float32" + shape: [ 1, 1, 1, 1 ] + range: [ -1, 1 ] + pytorch: + api_name: "__truediv__" + mapping: + ins: { x: x, y: y } + +Softmax_0: + desc: "Softmax激活层" + paddle: + api_name: "paddle.nn.Softmax" + inputs: + data: + random: true + type: "Tensor" + dtype: "float32" + shape: [1, 1, 1, 1] + range: [ -1, 1] + params: + axis: 1 + pytorch: + api_name: "torch.nn.Softmax" + mapping: + ins: { data: input, axis: dim } + +__div___0: + desc: "除法 tensor / tensor" + enable_backward: False + paddle: + api_name: "__div__" + inputs: + x: + random: true + type: "Tensor" + dtype: "float32" + shape: [ 1, 1, 1, 1] + range: [ -1, 1 ] + y: + random: true + type: "Tensor" + dtype: "float32" + shape: [ 1, 1, 1, 1 ] + range: [ -1, 1 ] + pytorch: + api_name: "__div__" + mapping: + ins: { x: x, y: y } + +Softshrink_0: + desc: "Softshrink激活层" + paddle: + api_name: "paddle.nn.Softshrink" + inputs: + data: + random: False + type: "Tensor" + dtype: "float32" + value: [-0.9] + params: + threshold: 0.5 + pytorch: + api_name: "torch.nn.Softshrink" + mapping: + ins: { data: input, threshold: lambd } + +__sub___0: + desc: "减法 tensor - tensor" + enable_backward: False + paddle: + api_name: "__sub__" + inputs: + x: + random: true + type: "Tensor" + dtype: "float32" + shape: [ 1, 1, 1, 1] + range: [ -1, 1 ] + y: + random: true + type: "Tensor" + dtype: "float32" + shape: [ 1, 1, 1, 1 ] + range: [ -1, 1 ] + pytorch: + api_name: "__sub__" + mapping: + ins: { x: x, y: y } + +ELU_0: + desc: "ELU激活层 0" + paddle: + api_name: "paddle.nn.ELU" + inputs: + x: + random: true + dtype: "float32" + shape: [ 1, 1, 1, 1 ] + range: [ -1, 1 ] + params: + alpha: 1.0 + pytorch: + api_name: "torch.nn.ELU" + mapping: + ins: {x: input, alpha: alpha} + +__neg___0: + desc: "取负" + enable_backward: False + paddle: + api_name: "__neg__" + inputs: + x: + random: true + type: "Tensor" + dtype: "float32" + shape: [ 1, 1, 1, 1 ] + range: [ -1, 1 ] + pytorch: + api_name: "__neg__" + mapping: + ins: { x: x, } + +CELU_0: + desc: "CELU激活层" + paddle: + api_name: "paddle.nn.CELU" + inputs: + data: + random: true + type: "Tensor" + dtype: "float32" + shape: [1, 1, 1] + range: [ -1, 1] + pytorch: + api_name: "torch.nn.CELU" + mapping: + ins: { data: input } + +Hardtanh_0: + desc: "Hardtanh激活层" + paddle: + api_name: "paddle.nn.Hardtanh" + inputs: + data: + random: False + type: "Tensor" + dtype: "float32" + value: [-1.5, 0.3, 2.5] + pytorch: + api_name: "torch.nn.Hardtanh" + mapping: + ins: { data: input } + +__pow___0: + desc: "幂次 tensor ** tensor" + enable_backward: False + paddle: + api_name: "__pow__" + inputs: + x: + random: true + type: "Tensor" + dtype: "float32" + shape: [ 1, 1, 1, 1] + range: [ -1, 1 ] + y: + random: true + type: "Tensor" + dtype: "float32" + shape: [ 1, 1, 1, 1 ] + range: [ -1, 1 ] + pytorch: + api_name: "__pow__" + mapping: + ins: { x: x, y: y } + +__add___0: + desc: "加法tensor + tensor" + enable_backward: False + paddle: + api_name: "__add__" + inputs: + x: + random: true + type: "Tensor" + dtype: "float32" + shape: [ 1, 1, 1, 1] + range: [ -1, 1 ] + y: + random: true + type: "Tensor" + dtype: "float32" + shape: [ 1, 1, 1, 1 ] + range: [ -1, 1 ] + pytorch: + api_name: "__add__" + mapping: + ins: { x: x, y: y } + +prelu_0: + desc: "prelu激活函数" + paddle: + api_name: "paddle.nn.functional.prelu" + inputs: + x: + random: true + type: "Tensor" + dtype: "float32" + shape: [1, 1, 1, 1] + range: [ -1, 1] + params: + weight: + random: true + type: "Tensor" + dtype: "float32" + shape: [1] + range: [ 0, 1] + data_format: 'NCHW' + pytorch: + api_name: "torch.nn.functional.prelu" + mapping: + ins: { x: input, weight: weight } + +Tensor_zero__0: + desc: "以 0 值填充 Tensor x 中所有数据。对 x 的原地 Inplace 修改" + enable_backward: false + paddle: + api_name: "paddle.Tensor.zero_" + inputs: + x: + random: true + type: "Tensor" + dtype: "float32" + shape: [ 1, 1, 1, 1 ] + range: [ -1, 1 ] +# pytorch: +# api_name: "torch.Tensor.zero_" +# mapping: +# ins: { x: input } + +trace_0: + desc: "计算输入 Tensor 在指定平面上的对角线元素之和,并输出相应的计算结果" + paddle: + api_name: "paddle.trace" + inputs: + x: + random: true + type: "Tensor" + dtype: "float32" + shape: [1, 1] + range: [ -1, 1] + params: + offset: 0 + axis1: 0 + axis2: 1 + pytorch: + api_name: "torch.trace" + mapping: + ins: { x: input } + +Hardshrink_0: + desc: "Hardshrink激活层" + paddle: + api_name: "paddle.nn.Hardshrink" + inputs: + data: + random: False + type: "Tensor" + dtype: "float32" + value: [-1, 0.3, 2.5] + pytorch: + api_name: "torch.nn.Hardshrink" + mapping: + ins: { data: input } + +ThresholdedReLU_0: + desc: "ThresholdedReLU激活层" + paddle: + api_name: "paddle.nn.ThresholdedReLU" + inputs: + data: + random: true + type: "Tensor" + dtype: "float32" + shape: [1, 1, 1, 1] + range: [ -1, 1] +# pytorch: +# api_name: "torch.nn.Threshold" +# mapping: +# ins: { data: input } + +Silu_0: + desc: "Silu激活层" + paddle: + api_name: "paddle.nn.Silu" + inputs: + data: + random: true + type: "Tensor" + dtype: "float32" + shape: [1, 1, 1, 1] + range: [ -10, 10] + pytorch: + api_name: "torch.nn.SiLU" + mapping: + ins: { data: input } + +linalg_qr_0: + desc: "计算一个或一批矩阵的正交三角分解 0" + enable_backward: false + paddle: + api_name: "paddle.linalg.qr" + inputs: + x: + random: true + dtype: "float32" + shape: [ 1, 1, 1, 1 ] + range: [ -1, 1 ] + params: + mode: 'reduced' + pytorch: + api_name: "torch.linalg.qr" + mapping: + ins: {x: A, mode: mode} + +slogdet_0: + desc: "计算批量矩阵的行列式值的符号值和行列式值绝对值的自然对数值" + enable_backward: false + paddle: + api_name: "paddle.linalg.slogdet" + inputs: + x: + random: true + type: "Tensor" + dtype: "float32" + shape: [1, 1, 1] + range: [ -1, 1] + pytorch: + api_name: "torch.linalg.slogdet" + mapping: + ins: { x: A } + +__pow__scalar_0: + desc: "幂次 tensor ** scalar" + enable_backward: False + paddle: + api_name: "__pow__" + inputs: + x: + random: true + type: "Tensor" + dtype: "float32" + shape: [ 1, 1, 1, 1] + range: [ -1, 1 ] + y: 1.0 + pytorch: + api_name: "__pow__" + mapping: + ins: { x: x, y: y } + +Hardswish_0: + desc: "Hardswish激活层" + paddle: + api_name: "paddle.nn.Hardswish" + inputs: + data: + random: true + type: "Tensor" + dtype: "float32" + shape: [1, 1, 1] + range: [ -1, 1] + pytorch: + api_name: "torch.nn.Hardswish" + mapping: + ins: { data: input } + +Tanhshrink_0: + desc: "Tanhshrink激活层" + paddle: + api_name: "paddle.nn.Tanhshrink" + inputs: + data: + random: False + type: "Tensor" + dtype: "float32" + value: [-0.4] + pytorch: + api_name: "torch.nn.Tanhshrink" + mapping: + ins: { data: input } + +eye_0: + desc: "创建形状大小为shape并且数据类型为dtype的Tensor,其中元素值是未初始化的" + enable_backward: False + paddle: + api_name: "paddle.eye" + params: + num_rows: 1 + pytorch: + api_name: "torch.eye" + mapping: + ins: { num_rows: n } + +scatter_0: + desc: "为 x 中的每个元素计算由 y 中相对应元素决定的赫维赛德阶跃函数" + enable_backward: false + paddle: + api_name: "paddle.scatter" + inputs: + x: + random: false + type: "Tensor" + dtype: "float32" + value: [[1, 1], [2, 2], [3, 3]] + index: + random: false + type: "Tensor" + dtype: "int64" + value: [2, 1, 0, 1] + updates: + random: false + type: "Tensor" + dtype: "float32" + value: [[1, 1], [2, 2], [3, 3], [4, 4]] +# pytorch: +# api_name: "torch.index_copy" +# mapping: +# ins: { x: input, index: index, updates: source } + +Hardsigmoid_0: + desc: "Hardsigmoid激活层" + paddle: + api_name: "paddle.nn.Hardsigmoid" + inputs: + data: + random: true + type: "Tensor" + dtype: "float32" + shape: [1, 1] + range: [ -1, 1] + pytorch: + api_name: "torch.nn.Hardsigmoid" + mapping: + ins: { data: input } + +__radd___0: + desc: "加法" + enable_backward: False + paddle: + api_name: "__radd__" + inputs: + x: 1.0 + y: + random: true + type: "Tensor" + dtype: "float32" + shape: [ 1, 1, 1, 1 ] + range: [ -1, 1 ] + pytorch: + api_name: "__radd__" + mapping: + ins: { x: x, y: y } + +renorm_0: + desc: "求Tensor的renorm值" + paddle: + api_name: "paddle.renorm" + inputs: + x: + random: False + type: "Tensor" + dtype: "float32" + value: [[[1.0, 1.0, 1.0]]] + params: + p: 1 + axis: -1 + max_norm: 2 + pytorch: + api_name: "torch.renorm" + mapping: + ins: { x: input, p: p, axis: dim, max_norm: maxnorm } + +mm_0: + desc: "用于两个输入矩阵的相乘, 两个输入的形状可为任意维度, 但当任一输入维度大于3时, 两个输入的维度必须相等" + paddle: + api_name: "paddle.mm" + inputs: + input: + random: true + type: "Tensor" + dtype: "float32" + shape: [1, 1] + range: [ -1, 1] + mat2: + random: true + type: "Tensor" + dtype: "float32" + shape: [1, 1] + range: [ -1, 1] + pytorch: + api_name: "torch.mm" + mapping: + ins: { input: input, mat2: mat2 } + +matmul_0: + desc: "计算两个Tensor的乘积,遵循完整的广播规则" + paddle: + api_name: "paddle.matmul" + inputs: + x: + random: true + type: "Tensor" + dtype: "float32" + shape: [1, 1, 1, 1] + range: [ -1, 1] + y: + random: true + type: "Tensor" + dtype: "float32" + shape: [1, 1, 1, 1] + range: [ -1, 1] + pytorch: + api_name: "torch.matmul" + mapping: + ins: { x: input, y: other } + +__rsub___0: + desc: "减法scalar - Tensor" + enable_backward: False + paddle: + api_name: "__rsub__" + inputs: + x: 1.0 + y: + random: true + type: "Tensor" + dtype: "float32" + shape: [ 1, 1, 1, 1 ] + range: [ -1, 1 ] + pytorch: + api_name: "__rsub__" + mapping: + ins: { x: x, y: y } + +bincount_0: + desc: "统计输入张量中每个元素出现的次数,如果传入weights张量则每次计数加一时会乘以weights张量对应的值" + enable_backward: false + paddle: + api_name: "paddle.bincount" + inputs: + x: + random: true + type: "Tensor" + dtype: "int32" + shape: [1] + range: [ 1, 20] + pytorch: + api_name: "torch.bincount" + mapping: + ins: { x: input } + +__truediv__scalar_0: + desc: "除法Tensor / scalar" + enable_backward: False + paddle: + api_name: "__truediv__" + inputs: + x: + random: true + type: "Tensor" + dtype: "float32" + shape: [ 1, 1, 1, 1 ] + range: [ -1, 1 ] + y: 1.0 + pytorch: + api_name: "__truediv__" + mapping: + ins: { x: x, y: y } + +__xor___0: + desc: "^" + enable_backward: false + paddle: + api_name: "__xor__" + inputs: + x: + random: true + type: "Tensor" + dtype: "bool" + shape: [ 1, 1, 1, 1] + range: [ -1, 1 ] + y: + random: true + type: "Tensor" + dtype: "bool" + shape: [ 1, 1, 1, 1 ] + range: [ -1, 1 ] + pytorch: + api_name: "__xor__" + mapping: + ins: { x: x, y: y } + +__rmul___0: + desc: "乘法scalar*Tensor" + enable_backward: False + paddle: + api_name: "__rmul__" + inputs: + x: 1.0 + y: + random: true + type: "Tensor" + dtype: "float32" + shape: [ 1, 1, 1, 1 ] + range: [ -1, 1 ] + pytorch: + api_name: "__rmul__" + mapping: + ins: { x: x, y: y } + +__sub__scalar_0: + desc: "减法 tensor - scalar" + enable_backward: False + paddle: + api_name: "__sub__" + inputs: + x: + random: true + type: "Tensor" + dtype: "float32" + shape: [ 1, 1, 1, 1] + range: [ -1, 1 ] + y: 1.0 + pytorch: + api_name: "__sub__" + mapping: + ins: { x: x, y: y } + +__eq___0: + desc: "==" + enable_backward: false + paddle: + api_name: "__eq__" + inputs: + x: + random: true + type: "Tensor" + dtype: "float32" + shape: [ 1, 1, 1, 1] + range: [ -1, 1 ] + y: + random: true + type: "Tensor" + dtype: "float32" + shape: [ 1, 1, 1, 1 ] + range: [ -1, 1 ] + pytorch: + api_name: "__eq__" + mapping: + ins: { x: x, y: y } + +__mul__scalar_0: + desc: "乘法 tensor * scalar" + enable_backward: False + paddle: + api_name: "__mul__" + inputs: + x: + random: true + type: "Tensor" + dtype: "float32" + shape: [ 1, 1, 1, 1] + range: [ -1, 1 ] + y: 1.0 + pytorch: + api_name: "__mul__" + mapping: + ins: { x: x, y: y } + +LeakyReLU_0: + desc: "LeakyReLU 激活层 0" + paddle: + api_name: "paddle.nn.LeakyReLU" + inputs: + x: + random: true + dtype: "float32" + shape: [ 1, 1, 1, 1 ] + range: [ -1, 1 ] + params: + negative_slope: 0.01 + pytorch: + api_name: "torch.nn.LeakyReLU" + mapping: + ins: {x: input, negative_slope: negative_slope} + +Softsign_0: + desc: "Softsign激活层" + paddle: + api_name: "paddle.nn.Softsign" + inputs: + data: + random: False + type: "Tensor" + dtype: "float32" + value: [-0.4] + pytorch: + api_name: "torch.nn.Softsign" + mapping: + ins: { data: input } + +__le___0: + desc: "<=" + enable_backward: false + paddle: + api_name: "__le__" + inputs: + x: + random: true + type: "Tensor" + dtype: "float32" + shape: [ 1, 1, 1, 1] + range: [ -1, 1 ] + y: + random: true + type: "Tensor" + dtype: "float32" + shape: [ 1, 1, 1, 1 ] + range: [ -1, 1 ] + pytorch: + api_name: "__le__" + mapping: + ins: { x: x, y: y } + +flatten_0: + desc: "根据给定的start_axis 和 stop_axis 将连续的维度展平" + paddle: + api_name: "paddle.flatten" + inputs: + x: + random: true + type: "Tensor" + dtype: "float32" + shape: [1, 1, 1, 1] + range: [ -1, 1] + pytorch: + api_name: "torch.flatten" + mapping: + ins: { x: input } + +__ge___0: + desc: ">=" + enable_backward: false + paddle: + api_name: "__ge__" + inputs: + x: + random: true + type: "Tensor" + dtype: "float32" + shape: [ 1, 1, 1, 1] + range: [ -1, 1 ] + y: + random: true + type: "Tensor" + dtype: "float32" + shape: [ 1, 1, 1, 1 ] + range: [ -1, 1 ] + pytorch: + api_name: "__ge__" + mapping: + ins: { x: x, y: y } + +__div__scalar_0: + desc: "除法 tensor / scalar" + enable_backward: False + paddle: + api_name: "__div__" + inputs: + x: + random: true + type: "Tensor" + dtype: "float32" + shape: [ 1, 1, 1, 1] + range: [ -1, 1 ] + y: 1.0 + pytorch: + api_name: "__div__" + mapping: + ins: { x: x, y: y } + +add_0: + desc: "逐元素相加算子" + paddle: + api_name: "paddle.add" + inputs: + x: + random: true + type: "Tensor" + dtype: "float32" + shape: [1, 1, 1, 1] + range: [ -1, 1] + y: + random: true + type: "Tensor" + dtype: "float32" + shape: [1, 1, 1, 1] + range: [ -1, 1] + pytorch: + api_name: "torch.add" + mapping: + ins: { x: input, y: other } + +__add__scalar_0: + desc: "加法tensor + scalar" + enable_backward: False + paddle: + api_name: "__add__" + inputs: + x: + random: true + type: "Tensor" + dtype: "float32" + shape: [ 1, 1, 1, 1] + range: [ -1, 1 ] + y: 1.0 + pytorch: + api_name: "__add__" + mapping: + ins: { x: x, y: y } + +subtract_0: + desc: "逐元素相减算子,输入 x 与输入 y 逐元素相减,并将各个位置的输出元素保存到返回结果中" + paddle: + api_name: "paddle.subtract" + inputs: + x: + random: true + type: "Tensor" + dtype: "float32" + shape: [1, 1, 1, 1] + range: [ -1, 1] + y: + random: true + type: "Tensor" + dtype: "float32" + shape: [1, 1, 1, 1] + range: [ -1, 1] + pytorch: + api_name: "torch.subtract" + mapping: + ins: { x: input, y: other } + +GELU_0: + desc: "GELU激活层 0" + paddle: + api_name: "paddle.nn.GELU" + inputs: + x: + random: true + dtype: "float32" + shape: [ 1, 1, 1, 1 ] + range: [ -1, 1 ] + pytorch: + api_name: "torch.nn.GELU" + mapping: + ins: {x: input, approximate: approximate} + +rrelu_0: + desc: "rrelu激活函数" + paddle: + api_name: "paddle.nn.functional.rrelu" + inputs: + x: + random: true + type: "Tensor" + dtype: "float32" + shape: [1, 1, 1, 1] + range: [ -1, 1] + params: + training: True + pytorch: + api_name: "torch.nn.functional.rrelu" + mapping: + ins: { x: input, training: training } + +__and___0: + desc: "&" + enable_backward: false + paddle: + api_name: "__and__" + inputs: + x: + random: true + type: "Tensor" + dtype: "int64" + shape: [ 1, 1, 1, 1] + range: [ -1, 1 ] + y: + random: true + type: "Tensor" + dtype: "int64" + shape: [ 1, 1, 1, 1 ] + range: [ -1, 1 ] + pytorch: + api_name: "__and__" + mapping: + ins: { x: x, y: y } + +__or___0: + desc: "|" + enable_backward: false + paddle: + api_name: "__or__" + inputs: + x: + random: true + type: "Tensor" + dtype: "bool" + shape: [ 1, 1, 1, 1] + range: [ -1, 1 ] + y: + random: true + type: "Tensor" + dtype: "bool" + shape: [ 1, 1, 1, 1 ] + range: [ -1, 1 ] + pytorch: + api_name: "__or__" + mapping: + ins: { x: x, y: y } + +__ne___0: + desc: "!=" + enable_backward: false + paddle: + api_name: "__ne__" + inputs: + x: + random: true + type: "Tensor" + dtype: "float32" + shape: [ 1, 1, 1, 1] + range: [ -1, 1 ] + y: + random: true + type: "Tensor" + dtype: "float32" + shape: [ 1, 1, 1, 1 ] + range: [ -1, 1 ] + pytorch: + api_name: "__ne__" + mapping: + ins: { x: x, y: y } + +__gt___0: + desc: ">" + enable_backward: false + paddle: + api_name: "__gt__" + inputs: + x: + random: true + type: "Tensor" + dtype: "float32" + shape: [ 1, 1, 1, 1] + range: [ -1, 1 ] + y: + random: true + type: "Tensor" + dtype: "float32" + shape: [ 1, 1, 1, 1 ] + range: [ -1, 1 ] + pytorch: + api_name: "__gt__" + mapping: + ins: { x: x, y: y } + +__lt___0: + desc: "<" + enable_backward: false + paddle: + api_name: "__lt__" + inputs: + x: + random: true + type: "Tensor" + dtype: "float32" + shape: [ 1, 1, 1, 1] + range: [ -1, 1 ] + y: + random: true + type: "Tensor" + dtype: "float32" + shape: [ 1, 1, 1, 1 ] + range: [ -1, 1 ] + pytorch: + api_name: "__lt__" + mapping: + ins: { x: x, y: y } + +SELU_0: + desc: "SELU激活层" + paddle: + api_name: "paddle.nn.SELU" + inputs: + data: + random: False + type: "Tensor" + dtype: "float32" + value: [[0.1]] + pytorch: + api_name: "torch.nn.SELU" + mapping: + ins: { data: input } + +Sigmoid_0: + desc: "Sigmoid激活函数" + paddle: + api_name: "paddle.nn.Sigmoid" + inputs: + x: + random: true + dtype: "float32" + shape: [ 1, 1, 1, 1 ] + range: [ -1, 1 ] + pytorch: + api_name: "torch.nn.Sigmoid" + mapping: + ins: {x: input} + +pixel_shuffle_0: + desc: "将一个形为[N, C, H, W]或是[N, H, W, C]的Tensor重新排列成形为 [N, C/r**2, H*r, W*r]或 [N, H*r, W*r, C/r**2] 的Tensor" + paddle: + api_name: "paddle.nn.functional.pixel_shuffle" + inputs: + x: + random: true + type: "Tensor" + dtype: "float32" + shape: [1, 1, 1, 1] + range: [ -1, 1] + params: + upscale_factor: 1 + data_format: 'NCHW' + pytorch: + api_name: "torch.nn.functional.pixel_shuffle" + mapping: + ins: { x: input, upscale_factor: upscale_factor } + +__floordiv___0: + desc: "//" + enable_backward: false + paddle: + api_name: "__floordiv__" + inputs: + x: + random: true + type: "Tensor" + dtype: "int64" + shape: [ 1, 1, 1, 1] + range: [ 1, 10 ] + y: + random: true + type: "Tensor" + dtype: "int64" + shape: [ 1, 1, 1, 1 ] + range: [ 1, 10 ] + pytorch: + api_name: "__floordiv__" + mapping: + ins: { x: x, y: y } + +cross_0: + desc: "计算张量 x 和 y 在 axis 维度上的向量积(叉积)" + paddle: + api_name: "paddle.cross" + inputs: + x: + random: true + type: "Tensor" + dtype: "float32" + shape: [1, 3, 1, 1] + range: [ -1, 1] + y: + random: true + type: "Tensor" + dtype: "float32" + shape: [1, 3, 1, 1] + range: [ -1, 1] + pytorch: + api_name: "torch.cross" + mapping: + ins: { x: input, y: other } + +ReLU6_0: + desc: "ReLU6激活层 0" + paddle: + api_name: "paddle.nn.ReLU6" + inputs: + x: + random: true + dtype: "float32" + shape: [ 1, 1, 1, 1 ] + range: [ -1, 10 ] + pytorch: + api_name: "torch.nn.ReLU6" + mapping: + ins: {x: input, y: other} + +moveaxis_0: + desc: "将输入Tensor x 的轴从 source 位置移动到 destination 位置 0" + paddle: + api_name: "paddle.moveaxis" + inputs: + x: + random: true + dtype: "float32" + shape: [ 1, 1, 1, 1 ] + range: [ -1, 1 ] + params: + source: 2 + destination: 0 + pytorch: + api_name: "torch.moveaxis" + mapping: + ins: {x: input, source: source, destination: destination} + +mv_0: + desc: "计算矩阵 x 和向量 vec 的乘积" + paddle: + api_name: "paddle.mv" + inputs: + x: + random: true + type: "Tensor" + dtype: "float32" + shape: [1, 1] + range: [ -1, 1] + vec: + random: true + type: "Tensor" + dtype: "float32" + shape: [1] + range: [ -1, 1] + pytorch: + api_name: "torch.mv" + mapping: + ins: { x: input, vec: vec } + +sum_0: + desc: "对指定维度上的Tensor元素进行求和运算,并输出相应的计算结果" + paddle: + api_name: "paddle.sum" + inputs: + x: + random: true + type: "Tensor" + dtype: "float32" + shape: [1, 1, 1, 1] + range: [ -1, 1] + pytorch: + api_name: "torch.sum" + mapping: + ins: { x: input } + +Tanh_0: + desc: "Tanh激活层" + paddle: + api_name: "paddle.nn.Tanh" + inputs: + data: + random: true + type: "Tensor" + dtype: "float32" + shape: [1, 1, 1, 1] + range: [ -4, 4] + pytorch: + api_name: "torch.nn.Tanh" + mapping: + ins: { data: input } + +__mod___0: + desc: "%" + enable_backward: false + paddle: + api_name: "__mod__" + inputs: + x: + random: true + type: "Tensor" + dtype: "int64" + shape: [ 1, 1, 1, 1] + range: [ 1, 2 ] + y: + random: true + type: "Tensor" + dtype: "int64" + shape: [ 1, 1, 1, 1 ] + range: [ 1, 2] + pytorch: + api_name: "__mod__" + mapping: + ins: { x: x, y: y } + +ReLU_0: + desc: "ReLU激活层 0" + paddle: + api_name: "paddle.nn.ReLU" + inputs: + x: + random: true + dtype: "float32" + shape: [ 1, 1, 1, 1] + range: [ -1, 1 ] + pytorch: + api_name: "torch.nn.ReLU" + mapping: + ins: {x: input} + +unbind_0: + desc: "将输入Tensor按照指定的维度分割成多个子Tensor" + enable_backward: False + paddle: + api_name: "paddle.unbind" + inputs: + input: + random: true + type: "Tensor" + dtype: "float32" + shape: [1, 1, 1, 1] + range: [ -1, 1] + pytorch: + api_name: "torch.unbind" + mapping: + ins: { input: input } + +log_softmax_0: + desc: "log_softmax激活函数" + paddle: + api_name: "paddle.nn.functional.log_softmax" + inputs: + x: + random: true + type: "Tensor" + dtype: "float32" + shape: [1, 1, 1, 1] + range: [ -2, 5] + params: + axis: -1 + pytorch: + api_name: "torch.nn.functional.log_softmax" + mapping: + ins: { x: input, axis: dim } + +det_0: + desc: "计算批量矩阵的行列式值" + paddle: + api_name: "paddle.linalg.det" + inputs: + x: + random: true + type: "Tensor" + dtype: "float32" + shape: [1, 1, 1] + range: [ -1, 1] + pytorch: + api_name: "torch.linalg.det" + mapping: + ins: { x: A } + +TanhTransform_0: + desc: "Tanh 变换 " + enable_backward: false + paddle: + api_name: "paddle.distribution.TanhTransform" + params: + method: + forward: + x: + value: [1.] + pytorch: + api_name: "torch.distributions.transforms.TanhTransform" + mapping: + ins: { } + method: + __call__: + x: + value: [1.] + +kron_0: + desc: "计算两个张量的克罗内克积, 结果是一个合成的张量, 由第二个张量经过第一个张量中的元素缩放 后的组块构成。" + paddle: + api_name: "paddle.kron" + inputs: + x: + random: true + type: "Tensor" + dtype: "float32" + shape: [1, 1] + range: [ -10, 10] + y: + random: true + type: "Tensor" + dtype: "float32" + shape: [1, 1] + range: [ -10, 10] + pytorch: + api_name: "torch.kron" + mapping: + ins: { x: input, y: other } + +SigmoidTransform_0: + desc: "Sigmoid 变换 " + enable_backward: false + paddle: + api_name: "paddle.distribution.SigmoidTransform" + params: + method: + forward: + x: + value: [1.] + pytorch: + api_name: "torch.distributions.transforms.SigmoidTransform" + mapping: + ins: { } + method: + __call__: + x: + value: [1.] + +pow_0: + desc: "指数算子,逐元素计算 x 的 y 次幂指数算子,逐元素计算 x 的 y 次幂" + paddle: + api_name: "paddle.pow" + inputs: + x: + random: true + type: "Tensor" + dtype: "float32" + shape: [1, 1, 1, 1] + range: [ -1, 1] + y: + random: true + type: "Tensor" + dtype: "float32" + shape: [1, 1, 1, 1] + range: [ -2, 6] + pytorch: + api_name: "torch.pow" + mapping: + ins: { x: input, y: exponent } + +amin_0: + desc: "对指定维度上的Tensor元素求最小值运算 0" + paddle: + api_name: "paddle.amin" + inputs: + x: + random: true + dtype: "float32" + shape: [ 1, 1, 1, 1 ] + range: [ -1, 1 ] + params: + axis: + keepdim: False + pytorch: + api_name: "torch.amin" + mapping: + ins: {x: input, axis: dim, keepdim: keepdim} + +ExpTransform_0: + desc: "指数变换 y=exp(x) " + enable_backward: false + paddle: + api_name: "paddle.distribution.ExpTransform" + params: + method: + forward: + x: + value: [1.] + pytorch: + api_name: "torch.distributions.transforms.ExpTransform" + mapping: + ins: { } + method: + __call__: + x: + value: [1.] + +divide_0: + desc: "输入 x 与输入 y 逐元素相除,并将各个位置的输出元素保存到返回结果中" + paddle: + api_name: "paddle.divide" + inputs: + x: + random: true + type: "Tensor" + dtype: "float32" + shape: [1, 1, 1, 1] + range: [ -2, 2 ] + y: + random: true + type: "Tensor" + dtype: "float32" + shape: [1, 1, 1, 1] + range: [ -2, 2 ] + pytorch: + api_name: "torch.divide" + mapping: + ins: { x: input, y: other } + +matrix_power_0: + desc: "计算一个或一批方阵的 n 次幂" + paddle: + api_name: "paddle.linalg.matrix_power" + inputs: + x: + random: true + type: "Tensor" + dtype: "float32" + shape: [1, 1, 1] + range: [ -1, 1] + params: + n: 1 + pytorch: + api_name: "torch.linalg.matrix_power" + mapping: + ins: { x: input, n: n } + +mean_0: + desc: "沿参数 axis 计算 x 的平均值" + paddle: + api_name: "paddle.mean" + inputs: + x: + random: true + type: "Tensor" + dtype: "float32" + shape: [1, 1, 1, 1] + range: [ -1, 1] + pytorch: + api_name: "torch.mean" + mapping: + ins: { x: input } + +softplus_0: + desc: "softplus激活函数" + paddle: + api_name: "paddle.nn.functional.softplus" + inputs: + x: + random: true + type: "Tensor" + dtype: "float32" + shape: [1, 1, 1, 1] + range: [ -1, 1] + params: + beta: 1 + threshold: 0 + pytorch: + api_name: "torch.nn.functional.softplus" + mapping: + ins: { x: input, beta: beta, threshold: threshold } + +cumsum_0: + desc: "沿给定 axis 计算张量 x 的累加和" + paddle: + api_name: "paddle.cumsum" + inputs: + x: + random: true + type: "Tensor" + dtype: "float32" + shape: [1, 1, 1, 1] + range: [ -1, 1] + params: + axis: -1 + pytorch: + api_name: "torch.cumsum" + mapping: + ins: { x: input, axis: dim } + +gumbel_softmax_0: + desc: "该OP实现了按Gumbel-Softmax分布进行采样的功能,通过hard可选择是否离散化" + enable_backward: false + paddle: + api_name: "paddle.nn.functional.gumbel_softmax" + params: + x: + random: true + type: "Tensor" + dtype: "float32" + shape: [1, 1, 1, 1] + range: [ -1, 1] + temperature: 1.0 + hard: False + axis: -1 + pytorch: + api_name: "torch.nn.functional.gumbel_softmax" + mapping: + ins: { x: logits, temperature: tau, hard: hard, axis: dim } + +multiply_0: + desc: "逐元素相乘算子,输入 x 与输入 y 逐元素相乘,并将各个位置的输出元素保存到返回结果中" + paddle: + api_name: "paddle.multiply" + inputs: + x: + random: true + type: "Tensor" + dtype: "float32" + shape: [1, 1, 1, 1] + range: [ -1, 1] + y: + random: true + type: "Tensor" + dtype: "float32" + shape: [1, 1, 1, 1] + range: [ -1, 1] + pytorch: + api_name: "torch.multiply" + mapping: + ins: { x: input, y: other } + +argmax_0: + desc: "沿参数``axis`` 计算输入 x 的最大元素的索引" + enable_backward: False + paddle: + api_name: "paddle.argmax" + inputs: + x: + random: true + type: "Tensor" + dtype: "float32" + shape: [1, 1, 1, 1] + range: [ -1, 1] + pytorch: + api_name: "torch.argmax" + mapping: + ins: { x: input } + +__invert___0: + desc: "对tensor取非" + enable_backward: False + paddle: + api_name: "__invert__" + inputs: + x: + random: true + type: "Tensor" + dtype: "bool" + shape: [ 1, 1, 1, 1 ] + range: [ -1, 1 ] + pytorch: + api_name: "__invert__" + mapping: + ins: { x: x } + +logcumsumexp_0: + desc: "计算 x 的指数的前缀和的对数" + enable_backward: false + paddle: + api_name: "paddle.logcumsumexp" + inputs: + x: + random: true + type: "Tensor" + dtype: "float32" + shape: [1, 1, 1, 1] + range: [ -1, 1] + params: + axis: 0 + pytorch: + api_name: "torch.logcumsumexp" + mapping: + ins: { x: input, axis: dim } + +min_0: + desc: "对指定维度上的Tensor元素求最小值运算,并输出相应的计算结果" + paddle: + api_name: "paddle.min" + inputs: + x: + random: true + type: "Tensor" + dtype: "float32" + shape: [1, 1, 1, 1] + range: [ -1, 1] + pytorch: + api_name: "torch.min" + mapping: + ins: { x: input } + +AbsTransform_0: + desc: "取绝对值变换 y=|x| " + enable_backward: false + paddle: + api_name: "paddle.distribution.AbsTransform" + params: + method: + forward: + x: + value: [1.] + pytorch: + api_name: "torch.distributions.transforms.AbsTransform" + mapping: + ins: { } + method: + __call__: + x: + value: [1.] + +amax_0: + desc: "对指定维度上的Tensor元素求最大值运算 0" + paddle: + api_name: "paddle.amax" + inputs: + x: + random: true + dtype: "float32" + shape: [ 1, 1, 1, 1 ] + range: [ -1, 1 ] + params: + axis: + keepdim: False + pytorch: + api_name: "torch.amax" + mapping: + ins: {x: input, axis: dim, keepdim: keepdim} + +masked_select_0: + desc: "返回一个1-D 的Tensor, Tensor的值是根据 mask 对输入 x 进行选择的" + enable_backward: False + paddle: + api_name: "paddle.masked_select" + inputs: + x: + random: true + type: "Tensor" + dtype: "float32" + shape: [1, 1, 1, 1] + range: [ -1, 1] + mask: + random: true + type: "Tensor" + dtype: "bool" + shape: [1, 1, 1, 1] + pytorch: + api_name: "torch.masked_select" + mapping: + ins: { x: input, mask: mask } + +celu_0: + desc: "celu激活层" + paddle: + api_name: "paddle.nn.functional.celu" + inputs: + x: + random: true + type: "Tensor" + dtype: "float32" + shape: [1, 1, 1, 1] + range: [ -1, 1] + params: + alpha: 1.0 + pytorch: + api_name: "torch.nn.functional.celu" + mapping: + ins: { x: input, alpha: alpha } + +elu_0: + desc: "elu激活层" + paddle: + api_name: "paddle.nn.functional.elu" + inputs: + x: + random: true + type: "Tensor" + dtype: "float32" + shape: [1, 1, 1, 1] + range: [ -1, 1] + params: + alpha: 1.0 + pytorch: + api_name: "torch.nn.functional.elu" + mapping: + ins: { x: input, alpha: alpha } + +max_0: + desc: "对指定维度上的Tensor元素求最大值运算,并输出相应的计算结果" + paddle: + api_name: "paddle.max" + inputs: + x: + random: true + type: "Tensor" + dtype: "float32" + shape: [1, 1, 1, 1] + range: [ -1, 1] + pytorch: + api_name: "torch.max" + mapping: + ins: { x: input } + +argmin_0: + desc: "沿参数``axis`` 计算输入 x 的最小元素的索引" + enable_backward: False + paddle: + api_name: "paddle.argmin" + inputs: + x: + random: true + type: "Tensor" + dtype: "float32" + shape: [1, 1, 1, 1] + range: [ -1, 1] + pytorch: + api_name: "torch.argmin" + mapping: + ins: { x: input } + +log_sigmoid_0: + desc: "log_sigmoid激活函数" + paddle: + api_name: "paddle.nn.functional.log_sigmoid" + inputs: + x: + random: true + type: "Tensor" + dtype: "float32" + shape: [1, 1, 1, 1] + range: [ -1, 1] + pytorch: + api_name: "torch.nn.functional.logsigmoid" + mapping: + ins: { x: input } + +mish_0: + desc: "mish激活函数" + paddle: + api_name: "paddle.nn.functional.mish" + inputs: + x: + random: true + type: "Tensor" + dtype: "float32" + shape: [1, 1, 1, 1] + range: [ -1, 1] + pytorch: + api_name: "torch.nn.functional.mish" + mapping: + ins: { x: input } + +softshrink_0: + desc: "softshrink激活函数" + paddle: + api_name: "paddle.nn.functional.softshrink" + inputs: + x: + random: true + type: "Tensor" + dtype: "float32" + shape: [1, 1, 1, 1] + range: [ -1, 1] + params: + threshold: 0.5 + pytorch: + api_name: "torch.nn.functional.softshrink" + mapping: + ins: { x: input, threshold: lambd } + +softmax_0: + desc: "Softmax激活层" + paddle: + api_name: "paddle.nn.functional.softmax" + inputs: + x: + random: true + type: "Tensor" + dtype: "float32" + shape: [1, 1, 1, 1] + range: [ -1, 1] + params: + axis: 1 + pytorch: + api_name: "torch.nn.functional.softmax" + mapping: + ins: { x: input, axis: dim } + +flip_0: + desc: "沿指定轴反转n维tensor" + paddle: + api_name: "paddle.flip" + inputs: + x: + random: true + type: "Tensor" + dtype: "float32" + shape: [1, 1, 1, 1] + range: [ -1, 1] + params: + axis: [2, 0, 1] + pytorch: + api_name: "torch.flip" + mapping: + ins: { x: input, axis: dims } + +diag_0: + desc: "如果 x 是向量(1-D张量),则返回带有 x 元素作为对角线的2-D方阵. 如果 x 是矩阵(2-D张量),则提取 x 的对角线元素,以1-D张量返回" + paddle: + api_name: "paddle.diag" + inputs: + x: + random: true + type: "Tensor" + dtype: "float32" + shape: [1] + range: [ -10, 10] + pytorch: + api_name: "torch.diag" + mapping: + ins: { x: input } + +heaviside_0: + desc: "为 x 中的每个元素计算由 y 中相对应元素决定的赫维赛德阶跃函数" + enable_backward: false + paddle: + api_name: "paddle.heaviside" + inputs: + x: + random: true + type: "Tensor" + dtype: "float32" + shape: [1, 1, 1, 1] + range: [ -1, 1] + y: + random: true + type: "Tensor" + dtype: "float32" + shape: [1, 1, 1, 1] + range: [ -1, 1] + pytorch: + api_name: "torch.heaviside" + mapping: + ins: { x: input, y: values } + +hardtanh_0: + desc: "hardtanh激活函数" + paddle: + api_name: "paddle.nn.functional.hardtanh" + inputs: + x: + random: true + type: "Tensor" + dtype: "float32" + shape: [1, 1, 1, 1] + range: [ -1, 1] + params: + min: -1.0 + max: 1.0 + pytorch: + api_name: "torch.nn.functional.hardtanh" + mapping: + ins: { x: input, min: min_val, max: max_val } + +log1p_0: + desc: "计算Log1p(加一的自然对数)结果" + paddle: + api_name: "paddle.log1p" + inputs: + x: + random: true + type: "Tensor" + dtype: "float32" + shape: [1, 1, 1, 1] + range: [ -1, 1] + pytorch: + api_name: "torch.log1p" + mapping: + ins: { x: input } + +dot_0: + desc: "计算向量的内积" + paddle: + api_name: "paddle.dot" + inputs: + x: + random: true + type: "Tensor" + dtype: "float32" + shape: [1] + range: [ -1, 1] + y: + random: true + type: "Tensor" + dtype: "float32" + shape: [1] + range: [ -1, 1] + pytorch: + api_name: "torch.dot" + mapping: + ins: { x: input, y: tensor } + +fmax_0: + desc: "比较两个 Tensor 对应位置的元素,返回一个包含该元素最大值的新 Tensor" + paddle: + api_name: "paddle.fmax" + inputs: + x: + random: False + type: "Tensor" + dtype: "float32" + value: [2, 3, 4] + y: + random: False + type: "Tensor" + dtype: "float32" + value: [1, 5, 2] + pytorch: + api_name: "torch.fmax" + mapping: + ins: { x: input, y: other } + +thresholded_relu_0: + desc: "thresholded_relu激活函数" + paddle: + api_name: "paddle.nn.functional.thresholded_relu" + inputs: + x: + random: true + type: "Tensor" + dtype: "float32" + shape: [1, 1, 1, 1] + range: [ -1, 1] + params: + threshold: 0 +# pytorch: +# api_name: "torch.nn.functional.threshold" +# mapping: +# ins: { x: input, threshold: threshold } + +hardshrink_0: + desc: "hardshrink激活函数" + paddle: + api_name: "paddle.nn.functional.hardshrink" + inputs: + x: + random: true + type: "Tensor" + dtype: "float32" + shape: [1, 1, 1, 1] + range: [ -1, 1] + params: + threshold: 0.75 + pytorch: + api_name: "torch.nn.functional.hardshrink" + mapping: + ins: { x: input, threshold: lambd } + +fmin_0: + desc: "比较两个Tensor对应位置的元素,返回一个包含该元素最小值的新Tensor 0" + paddle: + api_name: "paddle.fmin" + inputs: + x: + random: true + dtype: "float32" + shape: [ 1, 1, 1, 1 ] + range: [ -1, 1 ] + y: + random: true + dtype: "float32" + shape: [ 1, 1, 1, 1 ] + range: [ -1, 1 ] + pytorch: + api_name: "torch.fmin" + mapping: + ins: {x: input, y: other} + +complex_0: + desc: "给定实部和虚部,返回一个复数 Tensor" + paddle: + api_name: "paddle.complex" + inputs: + real: + random: true + type: "Tensor" + dtype: "float32" + shape: [1, 1] + range: [ -1, 1] + imag: + random: true + type: "Tensor" + dtype: "float32" + shape: [1, 1] + range: [ -1, 1] + pytorch: + api_name: "torch.complex" + mapping: + ins: { real: real, imag: imag } + +minimum_0: + desc: "逐元素对比输入的两个Tensor,并且把各个位置更小的元素保存到返回结果中" + paddle: + api_name: "paddle.minimum" + inputs: + x: + random: true + type: "Tensor" + dtype: "float32" + shape: [1, 1, 1, 1] + range: [ -1, 1] + y: + random: true + type: "Tensor" + dtype: "float32" + shape: [1, 1, 1, 1] + range: [ -1, 1] + pytorch: + api_name: "torch.minimum" + mapping: + ins: { x: input, y: other } + +maximum_0: + desc: "逐元素对比输入的两个Tensor,并且把各个位置更大的元素保存到返回结果中" + paddle: + api_name: "paddle.maximum" + inputs: + x: + random: true + type: "Tensor" + dtype: "float32" + shape: [1, 1, 1, 1] + range: [ -1, 1] + y: + random: true + type: "Tensor" + dtype: "float32" + shape: [1, 1, 1, 1] + range: [ -1, 1] + pytorch: + api_name: "torch.maximum" + mapping: + ins: { x: input, y: other } + +hardsigmoid_0: + desc: "hardsigmoid激活函数" + paddle: + api_name: "paddle.nn.functional.hardsigmoid" + inputs: + x: + random: true + type: "Tensor" + dtype: "float32" + shape: [1, 1, 1, 1] + range: [ -2, 5] + params: + slope: 0.1666667 + offset: 0.5 + pytorch: + api_name: "torch.nn.functional.hardsigmoid" + mapping: + ins: { x: input } + +gelu_0: + desc: "gelu激活层" + paddle: + api_name: "paddle.nn.functional.gelu" + inputs: + x: + random: true + type: "Tensor" + dtype: "float32" + shape: [1, 1, 1, 1] + range: [ -2, 5] + params: + approximate: False + pytorch: + api_name: "torch.nn.functional.gelu" + mapping: + ins: { x: input } + +neg_0: + desc: "计算输入 x 的相反数并返回" + paddle: + api_name: "paddle.neg" + inputs: + x: + random: true + type: "Tensor" + dtype: "float32" + shape: [1, 1, 1, 1] + range: [ -1, 1] + pytorch: + api_name: "torch.neg" + mapping: + ins: { x: input } + +logit_0: + desc: "实现了logit层" + paddle: + api_name: "paddle.logit" + inputs: + x: + random: true + type: "Tensor" + dtype: "float32" + shape: [1, 1, 1, 1] + range: [ -1, 1] + params: + eps: 0.02 + pytorch: + api_name: "torch.logit" + mapping: + ins: { x: input } + +inv_0: + desc: "计算方阵的逆" + paddle: + api_name: "paddle.linalg.inv" + inputs: + x: + random: true + type: "Tensor" + dtype: "float32" + shape: [1, 1, 1, 1] + range: [ -1, 1] + pytorch: + api_name: "torch.linalg.inv" + mapping: + ins: { x: A } + +tanhshrink_0: + desc: "tanhshrink激活函数" + paddle: + api_name: "paddle.nn.functional.tanhshrink" + inputs: + x: + random: true + type: "Tensor" + dtype: "float32" + shape: [1, 1, 1, 1] + range: [ -1, 1] + pytorch: + api_name: "torch.nn.functional.tanhshrink" + mapping: + ins: { x: input } + +hardswish_0: + desc: "hardswish激活函数" + paddle: + api_name: "paddle.nn.functional.hardswish" + inputs: + x: + random: true + type: "Tensor" + dtype: "float32" + shape: [1, 1, 1, 1] + range: [ -2, 5] + pytorch: + api_name: "torch.nn.functional.hardswish" + mapping: + ins: { x: input } + +log10_0: + desc: "Log10激活函数(计算底为10对数)" + paddle: + api_name: "paddle.log10" + inputs: + x: + random: true + type: "Tensor" + dtype: "float32" + shape: [1, 1, 1, 1] + range: [ 0.001, 10] + pytorch: + api_name: "torch.log10" + mapping: + ins: { x: input } + +atan2_0: + desc: "对x/y进行逐元素的arctangent运算,通过符号确定象限" + paddle: + api_name: "paddle.atan2" + inputs: + x: + random: true + type: "Tensor" + dtype: "float32" + shape: [1, 1] + range: [ -5, 5] + y: + random: true + type: "Tensor" + dtype: "float32" + shape: [1, 1] + range: [ -5, 5] + pytorch: + api_name: "torch.atan2" + mapping: + ins: { x: input, y: other } + +embedding_0: + desc: "嵌入层(Embedding Layer)" + enable_backward: false + paddle: + api_name: "paddle.nn.functional.embedding" + inputs: + x: + random: true + type: "Tensor" + dtype: "int32" + shape: [1, 1] + range: [ 0, 1] + params: + weight: + random: true + type: "Tensor" + dtype: "float32" + shape: [1, 1] + range: [ -1, 1] + padding_idx: -1 + sparse: False + pytorch: + api_name: "torch.nn.functional.embedding" + mapping: + ins: { x: input, weight: weight, padding_idx: padding_idx, sparse: sparse } + +silu_0: + desc: "silu激活函数" + paddle: + api_name: "paddle.nn.functional.silu" + inputs: + x: + random: true + type: "Tensor" + dtype: "float32" + shape: [1, 1, 1, 1] + range: [ -1, 1] + pytorch: + api_name: "torch.nn.functional.silu" + mapping: + ins: { x: input } + +cos_0: + desc: "余弦函数" + paddle: + api_name: "paddle.cos" + inputs: + x: + random: true + type: "Tensor" + dtype: "float32" + shape: [1, 1, 1, 1] + range: [ -2, 2] + pytorch: + api_name: "torch.cos" + mapping: + ins: { x: input } + +cosh_0: + desc: "双曲余弦函数" + paddle: + api_name: "paddle.cosh" + inputs: + x: + random: true + type: "Tensor" + dtype: "float32" + shape: [1, 1, 1, 1] + range: [ -2, 2] + pytorch: + api_name: "torch.cosh" + mapping: + ins: { x: input } + +sin_0: + desc: "计算输入的正弦值" + paddle: + api_name: "paddle.sin" + inputs: + x: + random: true + type: "Tensor" + dtype: "float32" + shape: [1, 1, 1, 1] + range: [ -1, 1] + pytorch: + api_name: "torch.sin" + mapping: + ins: { x: input } + +atanh_0: + desc: "arctanh函数" + paddle: + api_name: "paddle.atanh" + inputs: + x: + random: true + type: "Tensor" + dtype: "float32" + shape: [1, 1, 1, 1] + range: [ -1, 1] + pytorch: + api_name: "torch.atanh" + mapping: + ins: { x: input } + +log2_0: + desc: "Log2激活函数(计算底为2对数)" + paddle: + api_name: "paddle.log2" + inputs: + x: + random: true + type: "Tensor" + dtype: "float32" + shape: [1, 1, 1, 1] + range: [ 0.001, 10] + pytorch: + api_name: "torch.log2" + mapping: + ins: { x: input } + +unsqueeze_0: + desc: "向输入Tensor的Shape中一个或多个位置(axis)插入尺寸为1的维度" + paddle: + api_name: "paddle.unsqueeze" + inputs: + x: + random: true + type: "Tensor" + dtype: "float32" + shape: [1, 1, 1, 1] + range: [ -1, 1] + params: + axis: 1 + pytorch: + api_name: "torch.unsqueeze" + mapping: + ins: { x: input, axis: dim } + +zeros_like_0: + desc: "返回一个和 x 具有相同的形状的全零Tensor,数据类型为 dtype 或者和 x 相同" + enable_backward: False + paddle: + api_name: "paddle.zeros_like" + inputs: + x: + random: true + type: "Tensor" + dtype: "float32" + shape: [1, 1, 1, 1] + range: [ -1, 1] + pytorch: + api_name: "torch.zeros_like" + mapping: + ins: { x: input } + +acosh_0: + desc: "arccosh函数" + paddle: + api_name: "paddle.acosh" + inputs: + x: + random: true + type: "Tensor" + dtype: "float32" + shape: [1, 1, 1, 1] + range: [ -1, 1] + pytorch: + api_name: "torch.acosh" + mapping: + ins: { x: input } + +ones_like_0: + desc: "返回一个和输入参数 x 具有相同形状的数值都为1的Tensor" + enable_backward: false + paddle: + api_name: "paddle.ones_like" + inputs: + x: + random: true + type: "Tensor" + dtype: "float32" + shape: [1, 1, 1, 1] + range: [ -1, 1] + pytorch: + api_name: "torch.ones_like" + mapping: + ins: { x: input } + +sinh_0: + desc: "双曲正弦函数" + enable_backward: True + paddle: + api_name: "paddle.sinh" + inputs: + x: + random: true + type: "Tensor" + dtype: "float32" + shape: [1, 1, 1, 1] + range: [ -2, 2] + pytorch: + api_name: "torch.sinh" + mapping: + ins: { x: input } + +softsign_0: + desc: "softsign激活函数" + paddle: + api_name: "paddle.nn.functional.softsign" + inputs: + x: + random: true + type: "Tensor" + dtype: "float32" + shape: [1, 1, 1, 1] + range: [ -1, 1] + pytorch: + api_name: "torch.nn.functional.softsign" + mapping: + ins: { x: input } + +erf_0: + desc: "逐元素计算 Erf 激活函数" + paddle: + api_name: "paddle.erf" + inputs: + x: + random: true + type: "Tensor" + dtype: "float32" + shape: [1, 1, 1, 1] + range: [ -2, 2] + pytorch: + api_name: "torch.erf" + mapping: + ins: { x: input } + +atan_0: + desc: "arctangent函数" + paddle: + api_name: "paddle.atan" + inputs: + x: + random: true + type: "Tensor" + dtype: "float32" + shape: [1, 1, 1, 1] + range: [ -1, 1] + pytorch: + api_name: "torch.atan" + mapping: + ins: { x: input } + +asin_0: + desc: "arcsin函数" + paddle: + api_name: "paddle.asin" + inputs: + x: + random: true + type: "Tensor" + dtype: "float32" + shape: [1, 1, 1, 1] + range: [ -1, 1] + pytorch: + api_name: "torch.asin" + mapping: + ins: { x: input } + +randint_0: + desc: "返回服从均匀分布的、范围在[low, high)的随机Tensor 0" + enable_backward: false + paddle: + api_name: "paddle.randint" + params: + low: -100 + high: 100 + shape: [1, 1, 1, 1] + pytorch: + api_name: "torch.randint" + mapping: + ins: {low: low, high: high, shape: size} + +selu_0: + desc: "selu激活函数" + paddle: + api_name: "paddle.nn.functional.selu" + inputs: + x: + random: true + type: "Tensor" + dtype: "float32" + shape: [1, 1, 1, 1] + range: [ -1, 1] + params: + scale: 1.0507 + alpha: 1.6732 + pytorch: + api_name: "torch.nn.functional.selu" + mapping: + ins: { x: input } + +full_like_0: + desc: "创建一个和 x 具有相同的形状并且数据类型为 dtype 的Tensor,其中元素值均为 fill_value" + enable_backward: False + paddle: + api_name: "paddle.full_like" + inputs: + x: + random: true + type: "Tensor" + dtype: "float32" + shape: [1, 1, 1, 1] + range: [ -1, 1] + params: + fill_value: 3. + pytorch: + api_name: "torch.full_like" + mapping: + ins: { x: input, fill_value: fill_value } + +cumprod_0: + desc: "沿给定 axis 计算张量 x 的累乘" + paddle: + api_name: "paddle.cumprod" + inputs: + x: + random: true + type: "Tensor" + dtype: "float32" + shape: [1, 1, 1, 1] + range: [ -2, 2] + params: + dim: 0 + pytorch: + api_name: "torch.cumprod" + mapping: + ins: { x: input, dim: dim } + +sigmoid_0: + desc: "sigmoid激活函数" + paddle: + api_name: "paddle.nn.functional.sigmoid" + inputs: + x: + random: true + type: "Tensor" + dtype: "float32" + shape: [1, 1, 1, 1] + range: [ -1, 1] + pytorch: + api_name: "torch.nn.functional.sigmoid" + mapping: + ins: { x: input } + +acos_0: + desc: "arccosine函数" + paddle: + api_name: "paddle.acos" + inputs: + x: + random: true + type: "Tensor" + dtype: "float32" + shape: [1, 1, 1, 1] + range: [ -1, 1] + pytorch: + api_name: "torch.acos" + mapping: + ins: { x: input } + +tan_0: + desc: "三角函数tangent" + paddle: + api_name: "paddle.tan" + inputs: + x: + random: true + type: "Tensor" + dtype: "float32" + shape: [1, 1, 1, 1] + range: [ -1, 1] + pytorch: + api_name: "torch.tan" + mapping: + ins: { x: input } + +asinh_0: + desc: "arcsinh函数" + paddle: + api_name: "paddle.asinh" + inputs: + x: + random: true + type: "Tensor" + dtype: "float32" + shape: [1, 1, 1, 1] + range: [ -1, 1] + pytorch: + api_name: "torch.asinh" + mapping: + ins: { x: input } + +log_0: + desc: "Log激活函数(计算自然对数)" + paddle: + api_name: "paddle.log" + inputs: + x: + random: true + type: "Tensor" + dtype: "float32" + shape: [1, 1, 1, 1] + range: [ -1, 10] + pytorch: + api_name: "torch.log" + mapping: + ins: { x: input } + +relu6_0: + desc: "relu6激活函数" + paddle: + api_name: "paddle.nn.functional.relu6" + inputs: + x: + random: true + type: "Tensor" + dtype: "float32" + shape: [1, 1, 1, 1] + range: [ -1, 1] + pytorch: + api_name: "torch.nn.functional.relu6" + mapping: + ins: { x: input } + +square_0: + desc: "对输入参数``x``进行逐元素取平方运算" + paddle: + api_name: "paddle.square" + inputs: + x: + random: true + type: "Tensor" + dtype: "float32" + shape: [1, 1, 1, 1] + range: [ -2, 2] + pytorch: + api_name: "torch.square" + mapping: + ins: { x: input } + +squeeze_0: + desc: "删除输入Tensor的Shape中尺寸为1的维度" + paddle: + api_name: "paddle.squeeze" + inputs: + x: + random: true + type: "Tensor" + dtype: "float32" + shape: [1, 1, 1, 1, 1] + range: [ -1, 1] + pytorch: + api_name: "torch.squeeze" + mapping: + ins: { x: input } + +tanh_0: + desc: "tanh 激活函数" + paddle: + api_name: "paddle.tanh" + inputs: + x: + random: true + type: "Tensor" + dtype: "float32" + shape: [1, 1, 1, 1] + range: [ -1, 1] + pytorch: + api_name: "torch.tanh" + mapping: + ins: { x: input } + +sqrt_0: + desc: "计算输入的算数平方根" + paddle: + api_name: "paddle.sqrt" + inputs: + x: + random: true + type: "Tensor" + dtype: "float32" + shape: [1, 1, 1, 1] + range: [ -2, 2] + pytorch: + api_name: "torch.sqrt" + mapping: + ins: { x: input } + +exp_0: + desc: "对输入, 逐元素进行以自然数e为底指数运算" + paddle: + api_name: "paddle.exp" + inputs: + x: + random: true + type: "Tensor" + dtype: "float32" + shape: [1, 1, 1, 1] + range: [ -2, 2] + pytorch: + api_name: "torch.exp" + mapping: + ins: { x: input } + +tril_indices_0: + desc: "返回行数和列数已知的二维矩阵中下三角矩阵元素的行列坐标" + enable_backward: false + paddle: + api_name: "paddle.tril_indices" + params: + row: 1 + col: 1 + offset: 0 + pytorch: + api_name: "torch.tril_indices" + mapping: + ins: { row: row, col: col, offset: offset } + +erfinv_0: + desc: "计算输入矩阵x的逆误差函数" + paddle: + api_name: "paddle.erfinv" + inputs: + x: + random: true + type: "Tensor" + dtype: "float32" + shape: [1, 1, 1, 1] + range: [ -1, 1] + pytorch: + api_name: "torch.erfinv" + mapping: + ins: { x: input } + +digamma_0: + desc: "逐元素计算输入Tensor的digamma函数值" + paddle: + api_name: "paddle.digamma" + inputs: + x: + random: true + type: "Tensor" + dtype: "float32" + shape: [1, 1, 1, 1] + range: [ -2, 2] + pytorch: + api_name: "torch.digamma" + mapping: + ins: { x: input } + +relu_0: + desc: "relu激活函数" + paddle: + api_name: "paddle.nn.functional.relu" + inputs: + x: + random: true + type: "Tensor" + dtype: "float32" + shape: [1, 1, 1, 1] + range: [ -1, 1] + pytorch: + api_name: "torch.nn.functional.relu" + mapping: + ins: { x: input } + +abs_0: + desc: "求绝对值" + paddle: + api_name: "paddle.abs" + inputs: + x: + random: true + type: "Tensor" + dtype: "float32" + shape: [1, 1, 1, 1] + range: [ -1, 1] + pytorch: + api_name: "torch.abs" + mapping: + ins: { x: input } + +lgamma_0: + desc: "计算输入 x 的 gamma 函数的自然对数并返回" + paddle: + api_name: "paddle.lgamma" + inputs: + x: + random: true + type: "Tensor" + dtype: "float32" + shape: [1, 1, 1, 1] + range: [ -1, 1] + pytorch: + api_name: "torch.lgamma" + mapping: + ins: { x: input } + +expm1_0: + desc: "对输入,逐元素进行以自然数e为底指数运算并减1" + paddle: + api_name: "paddle.expm1" + inputs: + x: + random: true + type: "Tensor" + dtype: "float32" + shape: [1, 1, 1, 1] + range: [ -2, 2] + pytorch: + api_name: "torch.expm1" + mapping: + ins: { x: input } + +cholesky_0: + desc: "计算一个对称正定矩阵或一批对称正定矩阵的 Cholesky 分解" + paddle: + api_name: "paddle.linalg.cholesky" + inputs: + x: + random: true + type: "Tensor" + dtype: "float32" + shape: [1, 1] + range: [ 0, 10] + params: + upper: False + pytorch: + api_name: "torch.linalg.cholesky" + mapping: + ins: { x: input, upper: upper } + +randperm_0: + desc: "返回一个数值在0到n-1、随机排列的1-D Tensor 0" + enable_backward: false + paddle: + api_name: "paddle.randperm" + params: + n: 1 + pytorch: + api_name: "torch.randperm" + mapping: + ins: {n: n} + +rsqrt_0: + desc: "rsqrt激活函数" + paddle: + api_name: "paddle.rsqrt" + inputs: + x: + random: true + type: "Tensor" + dtype: "float32" + shape: [1, 1, 1, 1] + range: [ 1, 2] + pytorch: + api_name: "torch.rsqrt" + mapping: + ins: { x: input } + +angle_0: + desc: "逐元素计算复数的相位角 0" + paddle: + api_name: "paddle.angle" + inputs: + x: + random: true + dtype: "float32" + shape: [ 1, 1, 1, 1 ] + range: [ -1, 1 ] + pytorch: + api_name: "torch.angle" + mapping: + ins: {x: input} + +reciprocal_0: + desc: "对输入Tensor取倒数" + paddle: + api_name: "paddle.reciprocal" + inputs: + x: + random: true + type: "Tensor" + dtype: "float32" + shape: [1, 1, 1, 1] + range: [ -1, -2] + pytorch: + api_name: "torch.reciprocal" + mapping: + ins: { x: input } + +poisson_0: + desc: "以输入参数 x 为泊松分布的 lambda 参数" + paddle: + api_name: "paddle.poisson" + inputs: + x: + random: true + type: "Tensor" + dtype: "float32" + shape: [1, 1, 1, 1] + range: [ 0, 1] + pytorch: + api_name: "torch.poisson" + mapping: + ins: { x: input } + +sign_0: + desc: "对输入参数 ``x``中每个元素进行正负判断,并且输出正负判断值:1代表正,-1代表负,0代表零" + paddle: + api_name: "paddle.sign" + inputs: + x: + random: true + type: "Tensor" + dtype: "float32" + shape: [1, 1, 1, 1] + range: [ -10, 10] + pytorch: + api_name: "torch.sign" + mapping: + ins: { x: input } + +ceil_0: + desc: "向上取整运算函数" + paddle: + api_name: "paddle.ceil" + inputs: + x: + random: true + type: "Tensor" + dtype: "float32" + shape: [1, 1, 1, 1] + range: [ -10, 10] + pytorch: + api_name: "torch.ceil" + mapping: + ins: { x: input } + +floor_0: + desc: "向下取整函数" + paddle: + api_name: "paddle.floor" + inputs: + x: + random: true + type: "Tensor" + dtype: "float32" + shape: [1, 1, 1, 1] + range: [ -10, 10] + pytorch: + api_name: "torch.floor" + mapping: + ins: { x: input } + +all_0: + desc: "对指定维度上的Tensor元素进行逻辑与运算 0" + enable_backward: False + paddle: + api_name: "paddle.all" + inputs: + x: + random: true + dtype: "bool" + shape: [ 1, 1, 1, 1] + range: [ -1, 1 ] + params: + axis: 2 + keepdim: False + pytorch: + api_name: "torch.all" + mapping: + ins: {x: input, axis: dim, keepdim: keepdim} + +round_0: + desc: "将输入中的数值四舍五入到最接近的整数数值" + paddle: + api_name: "paddle.round" + inputs: + x: + random: true + type: "Tensor" + dtype: "float32" + shape: [1, 1, 1, 1] + range: [ -10, 10] + pytorch: + api_name: "torch.round" + mapping: + ins: { x: input } + +equal_0: + desc: "x: : y 逐元素比较x和y是否相等 0" + enable_backward: false + paddle: + api_name: "paddle.equal" + params: + x: + random: true + dtype: "float32" + shape: [ 1, 1, 1, 1 ] + range: [ -1, 1 ] + y: + random: true + dtype: "float32" + shape: [ 1, 1, 1, 1 ] + range: [ -1, 1 ] + pytorch: + api_name: "torch.eq" + mapping: + ins: {x: input, y: other} + +tril_0: + desc: "返回输入矩阵 input 的下三角部分,其余部分被设为0。 矩形的下三角部分被定义为对角线上和下方的元素" + paddle: + api_name: "paddle.tril" + inputs: + x: + random: true + type: "Tensor" + dtype: "float32" + shape: [1, 1, 1, 1] + range: [ -1, 1] + pytorch: + api_name: "torch.tril" + mapping: + ins: { x: input } + +triu_0: + desc: "返回输入矩阵 input 的上三角部分,其余部分被设为0。 矩形的上三角部分被定义为对角线上和上方的元素" + paddle: + api_name: "paddle.triu" + inputs: + x: + random: true + type: "Tensor" + dtype: "float32" + shape: [1, 1, 1, 1] + range: [ -1, 1] + pytorch: + api_name: "torch.triu" + mapping: + ins: { x: input } + +trunc_0: + desc: "将输入 Tensor 的小数部分置0,返回置0后的 Tensor ,如果输入 Tensor 的数据类型为整数,则不做处理" + paddle: + api_name: "paddle.trunc" + inputs: + input: + random: true + type: "Tensor" + dtype: "float32" + shape: [1, 1, 1, 1] + range: [ -10, 10] + pytorch: + api_name: "torch.trunc" + mapping: + ins: { input: input } + +conj_0: + desc: "是逐元素计算Tensor的共轭运算" + paddle: + api_name: "paddle.conj" + inputs: + x: + random: true + type: "Tensor" + dtype: "float32" + shape: [1, 1, 1, 1] + range: [ -2, 2] + pytorch: + api_name: "torch.conj" + mapping: + ins: { x: input } + +diagonal_0: + desc: "如果输入是 2D Tensor,则返回对角线元素. 如果输入的维度大于 2D,则返回由对角线元素组成的数组" + paddle: + api_name: "paddle.diagonal" + inputs: + x: + random: true + type: "Tensor" + dtype: "float32" + shape: [1, 1, 1, 1] + range: [ -1, 1] + pytorch: + api_name: "torch.diagonal" + mapping: + ins: { x: input } + +diag_embed_0: + desc: "其在指定的 2D 平面(由 dim1 和 dim2 指定)上的对角线由输入 input 填充" + paddle: + api_name: "paddle.diag_embed" + inputs: + input: + random: true + type: "Tensor" + dtype: "float32" + shape: [1, 1, 1, 1, 1] + range: [ -1, 1] + params: + offset: 0 + dim1: -2 + dim2: -1 + pytorch: + api_name: "torch.diag_embed" + mapping: + ins: { input: input, offset: offset, dim1: dim1, dim2: dim2 } + +clone_0: + desc: "对输入Tensor x 进行拷贝 0" + enable_backward: false + paddle: + api_name: "paddle.clone" + inputs: + x: + random: true + dtype: "float32" + shape: [ 1, 1, 1, 1] + range: [ -1, 1 ] + pytorch: + api_name: "torch.clone" + mapping: + ins: {x: input} + +any_0: + desc: "对指定维度上的Tensor元素进行逻辑或运算,并输出相应的计算结果" + enable_backward: False + paddle: + api_name: "paddle.any" + inputs: + x: + random: True + type: "Tensor" + dtype: "bool" + shape: [1] + range: [-4, 4] + pytorch: + api_name: "torch.any" + mapping: + ins: { x: input } + +equal_all_0: + desc: "如果所有相同位置的元素相同返回True,否则返回False" + enable_backward: false + paddle: + api_name: "paddle.equal_all" + inputs: + x: + random: True + type: "Tensor" + dtype: "int32" + shape: [1, 1, 1, 1] + range: [-10, 10] + y: + random: True + type: "Tensor" + dtype: "int32" + shape: [1, 1, 1, 1] + range: [-10, 10] + pytorch: + api_name: "torch.equal" + mapping: + ins: { x: input, y: other } + +nonzero_0: + desc: "返回输入 x 中非零元素的坐标" + enable_backward: false + paddle: + api_name: "paddle.nonzero" + inputs: + x: + random: true + type: "Tensor" + dtype: "int32" + shape: [1, 1, 1, 1] + range: [ -1, 2] + pytorch: + api_name: "torch.nonzero" + mapping: + ins: { x: input } + +one_hot_0: + desc: "该OP将输入'x'中的每个id转换为一个one-hot向量,其长度为 num_classes" + enable_backward: false + paddle: + api_name: "paddle.nn.functional.one_hot" + inputs: + x: + random: true + type: "Tensor" + dtype: "int64" + shape: [1, 1, 1, 1] + range: [ 0, 1] + params: + num_classes: 1 + pytorch: + api_name: "torch.nn.functional.one_hot" + mapping: + ins: { x: input, num_classes: num_classes } + +isclose_0: + desc: "逐个检查x和y的所有元素是否均相近 0" + enable_backward: false + paddle: + api_name: "paddle.isclose" + params: + x: + random: true + dtype: "float32" + shape: [ 1, 1, 1, 1 ] + range: [ -1, 1 ] + y: + random: true + dtype: "float32" + shape: [ 1, 1, 1, 1 ] + range: [ -1, 1 ] + rtol: 0.00001 + atol: 0.00000001 + equal_nan: false + pytorch: + api_name: "torch.isclose" + mapping: + ins: {x: input, y: other, rtol: rtol, atol: atol, equal_nan: equal_nan} + +transpose_0: + desc: "该OP根据perm对输入的多维Tensor进行数据重排 0" + enable_backward: false + paddle: + api_name: "paddle.transpose" + inputs: + x: + random: true + dtype: "float32" + shape: [ 1, 1, 1, 1 ] + range: [ -1, 1 ] + params: + perm: [3, 0, 1, 2] + pytorch: + api_name: "torch.permute" + mapping: + ins: {x: input, perm: dims} + +searchsorted_0: + desc: "根据给定的 values 在 sorted_sequence 的最后一个维度查找合适的索引" + enable_backward: false + paddle: + api_name: "paddle.searchsorted" + inputs: + sorted_sequence: + random: False + type: "Tensor" + dtype: "float32" + value: [1.0, 2.0, 3.0] + values: + random: False + type: "Tensor" + dtype: "float32" + value: [1.0, 2.0, 3.0] + params: + out_int32: False + right: False + pytorch: + api_name: "torch.searchsorted" + mapping: + ins: { sorted_sequence: sorted_sequence, values: input, out_int32: out_int32, right: right } + +remainder_0: + desc: "逐元素取模算子" + paddle: + api_name: "paddle.remainder" + inputs: + x: + random: true + type: "Tensor" + dtype: "float32" + shape: [1, 1, 1, 1] + range: [ -1, 1] + y: + random: true + type: "Tensor" + dtype: "float32" + shape: [1, 1, 1, 1] + range: [ -1, 1] + pytorch: + api_name: "torch.remainder" + mapping: + ins: { x: input, y: other } + +allclose_0: + desc: "逐个检查x和y的所有元素是否均满足∣x−y∣∣≤atol+rtol×∣∣y∣∣" + enable_backward: False + paddle: + api_name: "paddle.allclose" + inputs: + x: + random: true + type: "Tensor" + dtype: "float32" + shape: [1, 1, 1, 1] + range: [ -1, 1] + y: + random: true + type: "Tensor" + dtype: "float32" + shape: [1, 1, 1, 1] + range: [ -1, 1] + params: + rtol: 0.00001 + atol: 0.00000001 + pytorch: + api_name: "torch.allclose" + mapping: + ins: { x: input, y: other, rtol: rtol, atol: atol } + +logical_and_0: + desc: "逐元素的对 x 和 y 进行逻辑与运算" + enable_backward: false + paddle: + api_name: "paddle.logical_and" + inputs: + x: + random: true + type: "Tensor" + dtype: "int32" + shape: [1, 1, 1, 1] + range: [ -1, 1] + y: + random: true + type: "Tensor" + dtype: "int32" + shape: [1, 1, 1, 1] + range: [ -1, 1] + pytorch: + api_name: "torch.logical_and" + mapping: + ins: { x: input, y: other } + +logical_xor_0: + desc: "逐元素的对 X 和 Y 进行逻辑异或运算" + enable_backward: false + paddle: + api_name: "paddle.logical_xor" + inputs: + x: + random: true + type: "Tensor" + dtype: "int32" + shape: [1, 1, 1, 1] + range: [ -1, 1] + y: + random: true + type: "Tensor" + dtype: "int32" + shape: [1, 1, 1, 1] + range: [ -1, 1] + pytorch: + api_name: "torch.logical_xor" + mapping: + ins: { x: input, y: other } + +histogram_0: + desc: "计算输入张量的直方图 0" + enable_backward: False + paddle: + api_name: "paddle.histogram" + inputs: + input: + random: true + dtype: "float32" + shape: [ 1, 1, 1, 1 ] + range: [ -1, 1 ] + params: + bins: 1 + min: 0 + max: 1 + pytorch: + api_name: "torch.histogram" + mapping: + ins: {input: input, bins: bins, range: !!python/tuple [0, 1]} + +as_complex_0: + desc: "将实数 Tensor 转为复数 Tensor" + paddle: + api_name: "paddle.as_complex" + inputs: + x: + random: true + type: "Tensor" + dtype: "float32" + shape: [ 1, 1, 1, 2 ] + range: [ -1, 1 ] + pytorch: + api_name: "torch.view_as_complex" + mapping: + ins: {x: input} + +multinomial_0: + desc: "以输入 x 为概率,生成一个多项分布的Tensor 0" + enable_backward: False + paddle: + api_name: "paddle.multinomial" + inputs: + x: + random: true + dtype: "float32" + shape: [ 1, 1 ] + range: [ 0, 1 ] + params: + num_samples: 1 + replacement: False + pytorch: + api_name: "torch.multinomial" + mapping: + ins: {x: input, num_samples: num_samples, replacement: replacement} + +logical_or_0: + desc: "逐元素的对 X 和 Y 进行逻辑或运算" + enable_backward: False + paddle: + api_name: "paddle.logical_or" + inputs: + x: + random: true + type: "Tensor" + dtype: "int32" + shape: [1, 1, 1, 1] + range: [ -1, 1] + y: + random: true + type: "Tensor" + dtype: "int32" + shape: [1, 1, 1, 1] + range: [ -1, 1] + pytorch: + api_name: "torch.logical_or" + mapping: + ins: { x: input, y: other} + +greater_equal_0: + desc: "逐元素地返回 x>=y 的逻辑值,相同位置前者输入大于等于后者输入则返回True,否则返回False" + enable_backward: False + paddle: + api_name: "paddle.greater_equal" + inputs: + x: + random: true + type: "Tensor" + dtype: "float32" + shape: [1, 1, 1, 1] + range: [ -1, 1 ] + y: + random: true + type: "Tensor" + dtype: "float32" + shape: [1, 1, 1, 1] + range: [ -1, 1 ] + pytorch: + api_name: "torch.greater_equal" + mapping: + ins: { x: input, y: other } + +mod_0: + desc: "逐元素取模 0" + enable_backward: false + paddle: + api_name: "paddle.mod" + inputs: + x: + random: true + dtype: "float32" + shape: [ 1, 1, 1, 1 ] + range: [ -1, 1 ] + y: + random: true + dtype: "float32" + shape: [ 1, 1, 1, 1 ] + range: [ -1, 1 ] + pytorch: + api_name: "torch.fmod" + mapping: + ins: {x: input, y: other} + +greater_than_0: + desc: "返回 x>y 逐元素比较x和y是否相等 0" + enable_backward: false + paddle: + api_name: "paddle.greater_than" + inputs: + x: + random: true + dtype: "float32" + shape: [ 1, 1, 1, 1 ] + range: [ -1, 1 ] + y: + random: true + dtype: "float32" + shape: [ 1, 1, 1, 1 ] + range: [ -1, 1 ] + pytorch: + api_name: "torch.greater" + mapping: + ins: {x: input, y: other} + +bitwise_or_0: + desc: "逐元素的对 X 和 Y 进行按位或运算" + enable_backward: false + paddle: + api_name: "paddle.bitwise_or" + inputs: + x: + random: true + type: "Tensor" + dtype: "int32" + shape: [2, 3, 4, 4] + range: [ -1, 1] + y: + random: true + type: "Tensor" + dtype: "int32" + shape: [2, 3, 4, 4] + range: [ -1, 1] + pytorch: + api_name: "torch.bitwise_or" + mapping: + ins: { x: input, y: other } + +as_real_0: + desc: "复数 Tensor 转为实数 Tensor" + paddle: + api_name: "paddle.as_real" + inputs: + x: + random: true + type: "Tensor" + dtype: "complex64" + shape: [ 1, 1, 1, 1 ] + range: [ -1, 1 ] + pytorch: + api_name: "torch.view_as_real" + mapping: + ins: { x: input } + +less_equal_0: + desc: "逐元素地返回 x<=y 的逻辑值,相同位置前者输入小于等于后者输入则返回True,否则返回False" + enable_backward: False + paddle: + api_name: "paddle.less_equal" + inputs: + x: + random: true + type: "Tensor" + dtype: "float32" + shape: [1, 1, 1, 1] + range: [ -1, 1 ] + y: + random: true + type: "Tensor" + dtype: "float32" + shape: [1, 1, 1, 1] + range: [ -1, 1 ] + pytorch: + api_name: "torch.less_equal" + mapping: + ins: { x: input, y: other } + +not_equal_0: + desc: "返回 x!=y 逐元素比较x和y是否相等,相同位置的元素不相同则返回True,否则返回False" + enable_backward: False + paddle: + api_name: "paddle.not_equal" + inputs: + x: + random: true + type: "Tensor" + dtype: "float32" + shape: [1, 1, 1, 1] + range: [ -2, 2 ] + y: + random: true + type: "Tensor" + dtype: "float32" + shape: [1, 1, 1, 1] + range: [ -2, 2 ] + pytorch: + api_name: "torch.not_equal" + mapping: + ins: { x: input, y: other } + +floor_divide_0: + desc: "输入 x 与输入 y 逐元素整除,并将各个位置的输出元素保存到返回结果中" + enable_backward: false + paddle: + api_name: "paddle.floor_divide" + inputs: + x: + random: False + type: "Tensor" + dtype: "int32" + value: [2, 3, 4] + y: + random: False + type: "Tensor" + dtype: "int32" + value: [1, 5, 2] + pytorch: + api_name: "torch.floor_divide" + mapping: + ins: { x: input, y: other } + +less_than_0: + desc: "逐元素地返回 x Date: Tue, 9 Jan 2024 10:44:40 +0800 Subject: [PATCH 5/5] update runner ci apibm, test=model --- .../api_benchmark_new/runner_ci_multipro.py | 43 +++++++++++++------ 1 file changed, 31 insertions(+), 12 deletions(-) diff --git a/framework/e2e/api_benchmark_new/runner_ci_multipro.py b/framework/e2e/api_benchmark_new/runner_ci_multipro.py index aaf365ae87..df5ceb11df 100644 --- a/framework/e2e/api_benchmark_new/runner_ci_multipro.py +++ b/framework/e2e/api_benchmark_new/runner_ci_multipro.py @@ -133,28 +133,47 @@ def __init__(self, yaml_path, python): # 邮件报警 # self.email = Alarm(storage=self.storage) - def split_list(self, lst, n): + # def split_list(self, lst, n): + # """ + # 将列表均分为n份 + # Args: + # lst (list): 待划分的列表 + # n (int): 划分的份数 + # Returns: + # res (list): 划分后的列表,其中每个元素为原列表的1/n部分 + # """ + # if not isinstance(lst, list) or not isinstance(n, int) or len(lst) == 0 or n <= 0: + # return [] + + # quotient, remainder = divmod(len(lst), n) + # res = [] + # start = 0 + # for i in range(n): + # if i < remainder: + # end = start + quotient + 1 + # else: + # end = start + quotient + # res.append(lst[start:end]) + # start = end + # return res + + def split_list(lst, n): """ - 将列表均分为n份 + 将列表按顺序划分为 n 份 Args: lst (list): 待划分的列表 n (int): 划分的份数 Returns: - res (list): 划分后的列表,其中每个元素为原列表的1/n部分 + res (list): 划分后的列表,其中每个元素为原列表的 1/n 部分 """ if not isinstance(lst, list) or not isinstance(n, int) or len(lst) == 0 or n <= 0: return [] quotient, remainder = divmod(len(lst), n) - res = [] - start = 0 - for i in range(n): - if i < remainder: - end = start + quotient + 1 - else: - end = start + quotient - res.append(lst[start:end]) - start = end + res = [[] for _ in range(n)] + for i, value in enumerate(lst): + index = i % n + res[index].append(value) return res def _multi_run_main(self, all_cases, loops, base_times, result_queue):