@@ -35,50 +35,6 @@ at::Tensor AtenIpexJITDev::dil_convolution_base(
35
35
return convolution_impl (input, weight, bias, stride, padding, dilation, groups, ideep::attr_t ());
36
36
}
37
37
38
- at::Tensor AtenIpexJITDev::dil_convolution_swish (
39
- const at::Tensor& input,
40
- const at::Tensor& weight,
41
- const at::Tensor& bias,
42
- at::IntArrayRef stride,
43
- at::IntArrayRef padding,
44
- at::IntArrayRef dilation,
45
- int64_t groups) {
46
- #if defined(IPEX_PROFILE_OP)
47
- RECORD_FUNCTION (" AtenIpexJITDev::dil_convolution_swish" , std::vector<c10::IValue>({}));
48
- #endif
49
- return convolution_impl (
50
- input,
51
- weight,
52
- bias,
53
- stride,
54
- padding,
55
- dilation,
56
- groups,
57
- ideep::attr_t::fuse_swish ());
58
- }
59
-
60
- at::Tensor AtenIpexJITDev::dil_convolution_sigmoid (
61
- const at::Tensor& input,
62
- const at::Tensor& weight,
63
- const at::Tensor& bias,
64
- at::IntArrayRef stride,
65
- at::IntArrayRef padding,
66
- at::IntArrayRef dilation,
67
- int64_t groups) {
68
- #if defined(IPEX_PROFILE_OP)
69
- RECORD_FUNCTION (" AtenIpexJITDev::dil_convolution_sigmoid" , std::vector<c10::IValue>({}));
70
- #endif
71
- return convolution_impl (
72
- input,
73
- weight,
74
- bias,
75
- stride,
76
- padding,
77
- dilation,
78
- groups,
79
- ideep::attr_t::fuse_sigmoid ());
80
- }
81
-
82
38
/* *
83
39
* Dispatch at::matmul + at::div pattern to ipex for jit inference, but only
84
40
* one-element tensor and channel dim boadcast is enabled in oneDNN 2.2.0 now.
@@ -139,30 +95,6 @@ at::Tensor AtenIpexJITDev::dil_matmul_div(
139
95
}
140
96
}
141
97
142
- at::Tensor AtenIpexJITDev::dil_convolution_clamp (
143
- const at::Tensor& input,
144
- const at::Tensor& weight,
145
- const at::Tensor& bias,
146
- at::IntArrayRef stride,
147
- at::IntArrayRef padding,
148
- at::IntArrayRef dilation,
149
- int64_t groups,
150
- float lower_bound,
151
- float upper_bound) {
152
- #if defined(IPEX_PROFILE_OP)
153
- RECORD_FUNCTION (" AtenIpexJITDev::dil_convolution_clamp" , std::vector<c10::IValue>({}));
154
- #endif
155
- return convolution_impl (
156
- input,
157
- weight,
158
- bias,
159
- stride,
160
- padding,
161
- dilation,
162
- groups,
163
- ideep::attr_t::fuse_clamp (lower_bound, upper_bound));
164
- }
165
-
166
98
at::Tensor AtenIpexJITDev::dil_convolution_relu (
167
99
const at::Tensor& input,
168
100
const at::Tensor& weight,
@@ -185,33 +117,6 @@ at::Tensor AtenIpexJITDev::dil_convolution_relu(
185
117
ideep::attr_t::fuse_relu ());
186
118
}
187
119
188
- at::Tensor AtenIpexJITDev::dil_convolution_elu (
189
- const at::Tensor& input,
190
- const at::Tensor& weight,
191
- const at::Tensor& bias,
192
- at::IntArrayRef stride,
193
- at::IntArrayRef padding,
194
- at::IntArrayRef dilation,
195
- int64_t groups,
196
- float alpha,
197
- at::Scalar scale,
198
- at::Scalar input_scale) {
199
- #if defined(IPEX_PROFILE_OP)
200
- RECORD_FUNCTION (" AtenIpexJITDev::dil_convolution_elu" , std::vector<c10::IValue>({}));
201
- #endif
202
- auto scale_value = scale.to <float >();
203
- auto input_scale_value = input_scale.to <float >();
204
- return convolution_impl (
205
- input,
206
- weight,
207
- bias,
208
- stride,
209
- padding,
210
- dilation,
211
- groups,
212
- ideep::attr_t::fuse_elu (scale_value, alpha, input_scale_value));
213
- }
214
-
215
120
at::Tensor AtenIpexJITDev::dil_conv_transpose2d (
216
121
const at::Tensor& input,
217
122
const at::Tensor& weight,
@@ -305,52 +210,6 @@ at::Tensor AtenIpexJITDev::dil_max_pool2d(
305
210
ideep::algorithm::pooling_max);
306
211
}
307
212
308
- at::Tensor AtenIpexJITDev::dil_linear (
309
- const at::Tensor& self,
310
- const at::Tensor& weight,
311
- const at::Tensor& bias) {
312
- #if defined(IPEX_PROFILE_OP)
313
- RECORD_FUNCTION (" AtenIpexJITDev::dil_linear" , std::vector<c10::IValue>({}));
314
- #endif
315
- return linear_impl (self, weight, bias, ideep::attr_t ());
316
- }
317
-
318
- at::Tensor AtenIpexJITDev::dil_linear_fuse_eltwise (
319
- const at::Tensor& self,
320
- const at::Tensor& weight,
321
- const at::Tensor& bias,
322
- const ideep::attr_t & attr) {
323
- #if defined(IPEX_PROFILE_OP)
324
- RECORD_FUNCTION (" AtenIpexJITDev::dil_linear_fuse_eltwise" , std::vector<c10::IValue>({}));
325
- #endif
326
- return linear_impl (self, weight, bias, attr);
327
- }
328
-
329
- /* *
330
- *Dispatch Linear + Add fusion pattern to ipex oneDNN kernel for inference mode.
331
- *This feature might improve performance for cases like residual learning blocks
332
- *Pattern: accum = accum * alpha + Linear(self, weight, bias)
333
- *
334
- *@param self Activatin input for Linear
335
- *@param weight Weight for Linear
336
- *@param bias Bias for Linear
337
- *@param accum One input for add operation, another is the output of Linear
338
- *@param alpha Scale for accum when doing add operation.
339
- *
340
- *@return Value for the fusion pattern output.
341
- */
342
- at::Tensor AtenIpexJITDev::dil_linear_add (const at::Tensor &self,
343
- const at::Tensor &weight,
344
- const at::Tensor &bias,
345
- at::Tensor &accumu,
346
- at::Scalar alpha) {
347
- #if defined(IPEX_PROFILE_OP)
348
- RECORD_FUNCTION (" AtenIpexJITDev::dil_linear_add" , std::vector<c10::IValue>({}));
349
- #endif
350
- auto scale = alpha.to <float >();
351
- return linear_inplace_impl (self, weight, bias, accumu, ideep::attr_t::fuse_sum (scale));
352
- }
353
-
354
213
// Dispatch softmax to oneDNN path for jit inference
355
214
at::Tensor AtenIpexJITDev::dil_softmax (
356
215
const at::Tensor& input,
0 commit comments