@@ -2,12 +2,12 @@ use rustc_abi::{self as abi, FIRST_VARIANT};
2
2
use rustc_middle:: ty:: adjustment:: PointerCoercion ;
3
3
use rustc_middle:: ty:: layout:: { HasTyCtxt , HasTypingEnv , LayoutOf , TyAndLayout } ;
4
4
use rustc_middle:: ty:: { self , Instance , Ty , TyCtxt } ;
5
- use rustc_middle:: { bug, mir} ;
5
+ use rustc_middle:: { bug, mir, span_bug } ;
6
6
use rustc_session:: config:: OptLevel ;
7
7
use tracing:: { debug, instrument} ;
8
8
9
9
use super :: operand:: { OperandRef , OperandRefBuilder , OperandValue } ;
10
- use super :: place:: { PlaceRef , codegen_tag_value} ;
10
+ use super :: place:: { PlaceRef , PlaceValue , codegen_tag_value} ;
11
11
use super :: { FunctionCx , LocalRef } ;
12
12
use crate :: common:: { IntPredicate , TypeKind } ;
13
13
use crate :: traits:: * ;
@@ -180,7 +180,6 @@ impl<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> {
180
180
}
181
181
182
182
_ => {
183
- assert ! ( self . rvalue_creates_operand( rvalue) ) ;
184
183
let temp = self . codegen_rvalue_operand ( bx, rvalue) ;
185
184
temp. val . store ( bx, dest) ;
186
185
}
@@ -218,17 +217,26 @@ impl<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> {
218
217
219
218
/// Transmutes an `OperandValue` to another `OperandValue`.
220
219
///
221
- /// This is supported only for cases where [`Self::rvalue_creates_operand`]
222
- /// returns `true`, and will ICE otherwise. (In particular, anything that
223
- /// would need to `alloca` in order to return a `PlaceValue` will ICE,
224
- /// expecting those to go via [`Self::codegen_transmute`] instead where
225
- /// the destination place is already allocated.)
220
+ /// This is supported for all cases where the `cast` type is SSA,
221
+ /// but for non-ZSTs with [`abi::BackendRepr::Memory`] it ICEs.
226
222
pub ( crate ) fn codegen_transmute_operand (
227
223
& mut self ,
228
224
bx : & mut Bx ,
229
225
operand : OperandRef < ' tcx , Bx :: Value > ,
230
226
cast : TyAndLayout < ' tcx > ,
231
227
) -> OperandValue < Bx :: Value > {
228
+ if let abi:: BackendRepr :: Memory { .. } = cast. backend_repr
229
+ && !cast. is_zst ( )
230
+ {
231
+ span_bug ! ( self . mir. span, "Use `codegen_transmute` to transmute to {cast:?}" ) ;
232
+ }
233
+
234
+ // `Layout` is interned, so we can do a cheap check for things that are
235
+ // exactly the same and thus don't need any handling.
236
+ if abi:: Layout :: eq ( & operand. layout . layout , & cast. layout ) {
237
+ return operand. val ;
238
+ }
239
+
232
240
// Check for transmutes that are always UB.
233
241
if operand. layout . size != cast. size
234
242
|| operand. layout . is_uninhabited ( )
@@ -241,11 +249,22 @@ impl<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> {
241
249
return OperandValue :: poison ( bx, cast) ;
242
250
}
243
251
252
+ // To or from pointers takes different methods, so we use this to restrict
253
+ // the SimdVector case to types which can be `bitcast` between each other.
254
+ #[ inline]
255
+ fn vector_can_bitcast ( x : abi:: Scalar ) -> bool {
256
+ matches ! (
257
+ x,
258
+ abi:: Scalar :: Initialized {
259
+ value: abi:: Primitive :: Int ( ..) | abi:: Primitive :: Float ( ..) ,
260
+ ..
261
+ }
262
+ )
263
+ }
264
+
265
+ let cx = bx. cx ( ) ;
244
266
match ( operand. val , operand. layout . backend_repr , cast. backend_repr ) {
245
267
_ if cast. is_zst ( ) => OperandValue :: ZeroSized ,
246
- ( _, _, abi:: BackendRepr :: Memory { .. } ) => {
247
- bug ! ( "Cannot `codegen_transmute_operand` to non-ZST memory-ABI output {cast:?}" ) ;
248
- }
249
268
( OperandValue :: Ref ( source_place_val) , abi:: BackendRepr :: Memory { .. } , _) => {
250
269
assert_eq ! ( source_place_val. llextra, None ) ;
251
270
// The existing alignment is part of `source_place_val`,
@@ -256,16 +275,46 @@ impl<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> {
256
275
OperandValue :: Immediate ( imm) ,
257
276
abi:: BackendRepr :: Scalar ( from_scalar) ,
258
277
abi:: BackendRepr :: Scalar ( to_scalar) ,
259
- ) => OperandValue :: Immediate ( transmute_scalar ( bx, imm, from_scalar, to_scalar) ) ,
278
+ ) if from_scalar. size ( cx) == to_scalar. size ( cx) => {
279
+ OperandValue :: Immediate ( transmute_scalar ( bx, imm, from_scalar, to_scalar) )
280
+ }
281
+ (
282
+ OperandValue :: Immediate ( imm) ,
283
+ abi:: BackendRepr :: SimdVector { element : from_scalar, .. } ,
284
+ abi:: BackendRepr :: SimdVector { element : to_scalar, .. } ,
285
+ ) if vector_can_bitcast ( from_scalar) && vector_can_bitcast ( to_scalar) => {
286
+ let to_backend_ty = bx. cx ( ) . immediate_backend_type ( cast) ;
287
+ OperandValue :: Immediate ( bx. bitcast ( imm, to_backend_ty) )
288
+ }
260
289
(
261
290
OperandValue :: Pair ( imm_a, imm_b) ,
262
291
abi:: BackendRepr :: ScalarPair ( in_a, in_b) ,
263
292
abi:: BackendRepr :: ScalarPair ( out_a, out_b) ,
264
- ) => OperandValue :: Pair (
265
- transmute_scalar ( bx, imm_a, in_a, out_a) ,
266
- transmute_scalar ( bx, imm_b, in_b, out_b) ,
267
- ) ,
268
- _ => bug ! ( "Cannot `codegen_transmute_operand` {operand:?} to {cast:?}" ) ,
293
+ ) if in_a. size ( cx) == out_a. size ( cx) && in_b. size ( cx) == out_b. size ( cx) => {
294
+ OperandValue :: Pair (
295
+ transmute_scalar ( bx, imm_a, in_a, out_a) ,
296
+ transmute_scalar ( bx, imm_b, in_b, out_b) ,
297
+ )
298
+ }
299
+ _ => {
300
+ // For any other potentially-tricky cases, make a temporary instead.
301
+ // If anything else wants the target local to be in memory this won't
302
+ // be hit, as `codegen_transmute` will get called directly. Thus this
303
+ // is only for places where everything else wants the operand form,
304
+ // and thus it's not worth making those places get it from memory.
305
+ //
306
+ // Notably, Scalar ⇌ ScalarPair cases go here to avoid padding
307
+ // and endianness issues, as do SimdVector ones to avoid worrying
308
+ // about things like f32x8 ⇌ ptrx4 that would need multiple steps.
309
+ let align = Ord :: max ( operand. layout . align . abi , cast. align . abi ) ;
310
+ let size = Ord :: max ( operand. layout . size , cast. size ) ;
311
+ let temp = PlaceValue :: alloca ( bx, size, align) ;
312
+ bx. lifetime_start ( temp. llval , size) ;
313
+ operand. val . store ( bx, temp. with_type ( operand. layout ) ) ;
314
+ let val = bx. load_operand ( temp. with_type ( cast) ) . val ;
315
+ bx. lifetime_end ( temp. llval , size) ;
316
+ val
317
+ }
269
318
}
270
319
}
271
320
@@ -326,8 +375,6 @@ impl<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> {
326
375
bx : & mut Bx ,
327
376
rvalue : & mir:: Rvalue < ' tcx > ,
328
377
) -> OperandRef < ' tcx , Bx :: Value > {
329
- assert ! ( self . rvalue_creates_operand( rvalue) , "cannot codegen {rvalue:?} to operand" , ) ;
330
-
331
378
match * rvalue {
332
379
mir:: Rvalue :: Cast ( ref kind, ref source, mir_cast_ty) => {
333
380
let operand = self . codegen_operand ( bx, source) ;
@@ -653,8 +700,6 @@ impl<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> {
653
700
let ty = self . monomorphize ( ty) ;
654
701
let layout = self . cx . layout_of ( ty) ;
655
702
656
- // `rvalue_creates_operand` has arranged that we only get here if
657
- // we can build the aggregate immediate from the field immediates.
658
703
let mut builder = OperandRefBuilder :: new ( layout) ;
659
704
for ( field_idx, field) in fields. iter_enumerated ( ) {
660
705
let op = self . codegen_operand ( bx, field) ;
@@ -955,69 +1000,6 @@ impl<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> {
955
1000
956
1001
OperandValue :: Pair ( val, of)
957
1002
}
958
-
959
- /// Returns `true` if the `rvalue` can be computed into an [`OperandRef`],
960
- /// rather than needing a full `PlaceRef` for the assignment destination.
961
- ///
962
- /// This is used by the [`super::analyze`] code to decide which MIR locals
963
- /// can stay as SSA values (as opposed to generating `alloca` slots for them).
964
- /// As such, some paths here return `true` even where the specific rvalue
965
- /// will not actually take the operand path because the result type is such
966
- /// that it always gets an `alloca`, but where it's not worth re-checking the
967
- /// layout in this code when the right thing will happen anyway.
968
- pub ( crate ) fn rvalue_creates_operand ( & self , rvalue : & mir:: Rvalue < ' tcx > ) -> bool {
969
- match * rvalue {
970
- mir:: Rvalue :: Cast ( mir:: CastKind :: Transmute , ref operand, cast_ty) => {
971
- let operand_ty = operand. ty ( self . mir , self . cx . tcx ( ) ) ;
972
- let cast_layout = self . cx . layout_of ( self . monomorphize ( cast_ty) ) ;
973
- let operand_layout = self . cx . layout_of ( self . monomorphize ( operand_ty) ) ;
974
- match ( operand_layout. backend_repr , cast_layout. backend_repr ) {
975
- // When the output will be in memory anyway, just use its place
976
- // (instead of the operand path) unless it's the trivial ZST case.
977
- ( _, abi:: BackendRepr :: Memory { .. } ) => cast_layout. is_zst ( ) ,
978
-
979
- // Otherwise (for a non-memory output) if the input is memory
980
- // then we can just read the value from the place.
981
- ( abi:: BackendRepr :: Memory { .. } , _) => true ,
982
-
983
- // When we have scalar immediates, we can only convert things
984
- // where the sizes match, to avoid endianness questions.
985
- ( abi:: BackendRepr :: Scalar ( a) , abi:: BackendRepr :: Scalar ( b) ) =>
986
- a. size ( self . cx ) == b. size ( self . cx ) ,
987
- ( abi:: BackendRepr :: ScalarPair ( a0, a1) , abi:: BackendRepr :: ScalarPair ( b0, b1) ) =>
988
- a0. size ( self . cx ) == b0. size ( self . cx ) && a1. size ( self . cx ) == b1. size ( self . cx ) ,
989
-
990
- // Mixing Scalars and ScalarPairs can get quite complicated when
991
- // padding and undef get involved, so leave that to the memory path.
992
- ( abi:: BackendRepr :: Scalar ( _) , abi:: BackendRepr :: ScalarPair ( _, _) ) |
993
- ( abi:: BackendRepr :: ScalarPair ( _, _) , abi:: BackendRepr :: Scalar ( _) ) => false ,
994
-
995
- // SIMD vectors aren't worth the trouble of dealing with complex
996
- // cases like from vectors of f32 to vectors of pointers or
997
- // from fat pointers to vectors of u16. (See #143194 #110021 ...)
998
- ( abi:: BackendRepr :: SimdVector { .. } , _) | ( _, abi:: BackendRepr :: SimdVector { .. } ) => false ,
999
- }
1000
- }
1001
- mir:: Rvalue :: Ref ( ..) |
1002
- mir:: Rvalue :: CopyForDeref ( ..) |
1003
- mir:: Rvalue :: RawPtr ( ..) |
1004
- mir:: Rvalue :: Len ( ..) |
1005
- mir:: Rvalue :: Cast ( ..) | // (*)
1006
- mir:: Rvalue :: ShallowInitBox ( ..) | // (*)
1007
- mir:: Rvalue :: BinaryOp ( ..) |
1008
- mir:: Rvalue :: UnaryOp ( ..) |
1009
- mir:: Rvalue :: Discriminant ( ..) |
1010
- mir:: Rvalue :: NullaryOp ( ..) |
1011
- mir:: Rvalue :: ThreadLocalRef ( _) |
1012
- mir:: Rvalue :: Use ( ..) |
1013
- mir:: Rvalue :: Repeat ( ..) | // (*)
1014
- mir:: Rvalue :: Aggregate ( ..) | // (*)
1015
- mir:: Rvalue :: WrapUnsafeBinder ( ..) => // (*)
1016
- true ,
1017
- }
1018
-
1019
- // (*) this is only true if the type is suitable
1020
- }
1021
1003
}
1022
1004
1023
1005
/// Transmutes a single scalar value `imm` from `from_scalar` to `to_scalar`.
0 commit comments