diff --git a/compiler/rustc_codegen_ssa/src/mir/operand.rs b/compiler/rustc_codegen_ssa/src/mir/operand.rs index 99957c6770845..f543a62423a01 100644 --- a/compiler/rustc_codegen_ssa/src/mir/operand.rs +++ b/compiler/rustc_codegen_ssa/src/mir/operand.rs @@ -580,6 +580,13 @@ impl<'a, 'tcx, V: CodegenObject> OperandRef<'tcx, V> { pub(crate) fn builder( layout: TyAndLayout<'tcx>, ) -> Option>> { + // Uninhabited types are weird, because for example `Result` + // shows up as `FieldsShape::Primitive` and we need to be able to write + // a field into `(u32, !)`. We'll do that in an `alloca` instead. + if layout.uninhabited { + return None; + } + let val = match layout.backend_repr { BackendRepr::Memory { .. } if layout.is_zst() => OperandValue::ZeroSized, BackendRepr::Scalar(s) => OperandValue::Immediate(Err(s)), @@ -649,16 +656,46 @@ impl<'a, 'tcx, V: CodegenObject> OperandRef<'tcx, Result> { } } + /// Insert the immediate value `imm` for field `f` in the *type itself*, + /// rather than into one of the variants. + /// + /// Most things want [`OperandRef::insert_field`] instead, but this one is + /// necessary for writing things like enum tags that aren't in any variant. + pub(super) fn insert_imm(&mut self, f: FieldIdx, imm: V) { + let field_offset = self.layout.fields.offset(f.as_usize()); + let is_zero_offset = field_offset == Size::ZERO; + match &mut self.val { + OperandValue::Immediate(val @ Err(_)) if is_zero_offset => { + *val = Ok(imm); + } + OperandValue::Pair(fst @ Err(_), _) if is_zero_offset => { + *fst = Ok(imm); + } + OperandValue::Pair(_, snd @ Err(_)) if !is_zero_offset => { + *snd = Ok(imm); + } + _ => bug!("Tried to insert {imm:?} into field {f:?} of {self:?}"), + } + } + /// After having set all necessary fields, this converts the /// `OperandValue>` (as obtained from [`OperandRef::builder`]) /// to the normal `OperandValue`. /// /// ICEs if any required fields were not set. - pub fn build(&self) -> OperandRef<'tcx, V> { + pub fn build(&self, cx: &impl CodegenMethods<'tcx, Value = V>) -> OperandRef<'tcx, V> { let OperandRef { val, layout } = *self; + // For something like `Option::::None`, it's expected that the + // payload scalar will not actually have been set, so this converts + // unset scalars to corresponding `undef` values so long as the scalar + // from the layout allows uninit. let unwrap = |r: Result| match r { Ok(v) => v, + Err(s) if s.is_uninit_valid() => { + let bty = cx.type_from_scalar(s); + cx.const_undef(bty) + } Err(_) => bug!("OperandRef::build called while fields are missing {self:?}"), }; diff --git a/compiler/rustc_codegen_ssa/src/mir/place.rs b/compiler/rustc_codegen_ssa/src/mir/place.rs index 937063c24a63d..0090be9fdef06 100644 --- a/compiler/rustc_codegen_ssa/src/mir/place.rs +++ b/compiler/rustc_codegen_ssa/src/mir/place.rs @@ -1,4 +1,6 @@ -use rustc_abi::{Align, BackendRepr, FieldsShape, Size, TagEncoding, VariantIdx, Variants}; +use rustc_abi::{ + Align, BackendRepr, FieldIdx, FieldsShape, Size, TagEncoding, VariantIdx, Variants, +}; use rustc_middle::mir::PlaceTy; use rustc_middle::mir::interpret::Scalar; use rustc_middle::ty::layout::{HasTyCtxt, HasTypingEnv, LayoutOf, TyAndLayout}; @@ -239,53 +241,17 @@ impl<'a, 'tcx, V: CodegenObject> PlaceRef<'tcx, V> { bx: &mut Bx, variant_index: VariantIdx, ) { - if self.layout.for_variant(bx.cx(), variant_index).is_uninhabited() { - // We play it safe by using a well-defined `abort`, but we could go for immediate UB - // if that turns out to be helpful. - bx.abort(); - return; - } - match self.layout.variants { - Variants::Empty => unreachable!("we already handled uninhabited types"), - Variants::Single { index } => assert_eq!(index, variant_index), - - Variants::Multiple { tag_encoding: TagEncoding::Direct, tag_field, .. } => { - let ptr = self.project_field(bx, tag_field.as_usize()); - let to = - self.layout.ty.discriminant_for_variant(bx.tcx(), variant_index).unwrap().val; - bx.store_to_place( - bx.cx().const_uint_big(bx.cx().backend_type(ptr.layout), to), - ptr.val, - ); + match codegen_tag_value(bx.cx(), variant_index, self.layout) { + Err(UninhabitedVariantError) => { + // We play it safe by using a well-defined `abort`, but we could go for immediate UB + // if that turns out to be helpful. + bx.abort(); } - Variants::Multiple { - tag_encoding: - TagEncoding::Niche { untagged_variant, ref niche_variants, niche_start }, - tag_field, - .. - } => { - if variant_index != untagged_variant { - let niche = self.project_field(bx, tag_field.as_usize()); - let niche_llty = bx.cx().immediate_backend_type(niche.layout); - let BackendRepr::Scalar(scalar) = niche.layout.backend_repr else { - bug!("expected a scalar placeref for the niche"); - }; - // We are supposed to compute `niche_value.wrapping_add(niche_start)` wrapping - // around the `niche`'s type. - // The easiest way to do that is to do wrapping arithmetic on `u128` and then - // masking off any extra bits that occur because we did the arithmetic with too many bits. - let niche_value = variant_index.as_u32() - niche_variants.start().as_u32(); - let niche_value = (niche_value as u128).wrapping_add(niche_start); - let niche_value = niche_value & niche.layout.size.unsigned_int_max(); - - let niche_llval = bx.cx().scalar_to_backend( - Scalar::from_uint(niche_value, niche.layout.size), - scalar, - niche_llty, - ); - OperandValue::Immediate(niche_llval).store(bx, niche); - } + Ok(Some((tag_field, imm))) => { + let tag_place = self.project_field(bx, tag_field.as_usize()); + OperandValue::Immediate(imm).store(bx, tag_place); } + Ok(None) => {} } } @@ -471,3 +437,73 @@ fn round_up_const_value_to_alignment<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>>( let offset = bx.and(neg_value, align_minus_1); bx.add(value, offset) } + +/// Calculates the value that needs to be stored to mark the discriminant. +/// +/// This might be `None` for a `struct` or a niched variant (like `Some(&3)`). +/// +/// If it's `Some`, it returns the value to store and the field in which to +/// store it. Note that this value is *not* the same as the discriminant, in +/// general, as it might be a niche value or have a different size. +/// +/// It might also be an `Err` because the variant is uninhabited. +pub(super) fn codegen_tag_value<'tcx, V>( + cx: &impl CodegenMethods<'tcx, Value = V>, + variant_index: VariantIdx, + layout: TyAndLayout<'tcx>, +) -> Result, UninhabitedVariantError> { + // By checking uninhabited-ness first we don't need to worry about types + // like `(u32, !)` which are single-variant but weird. + if layout.for_variant(cx, variant_index).is_uninhabited() { + return Err(UninhabitedVariantError); + } + + Ok(match layout.variants { + Variants::Empty => unreachable!("we already handled uninhabited types"), + Variants::Single { index } => { + assert_eq!(index, variant_index); + None + } + + Variants::Multiple { tag_encoding: TagEncoding::Direct, tag_field, .. } => { + let discr = layout.ty.discriminant_for_variant(cx.tcx(), variant_index); + let to = discr.unwrap().val; + let tag_layout = layout.field(cx, tag_field.as_usize()); + let tag_llty = cx.immediate_backend_type(tag_layout); + let imm = cx.const_uint_big(tag_llty, to); + Some((tag_field, imm)) + } + Variants::Multiple { + tag_encoding: TagEncoding::Niche { untagged_variant, ref niche_variants, niche_start }, + tag_field, + .. + } => { + if variant_index != untagged_variant { + let niche_layout = layout.field(cx, tag_field.as_usize()); + let niche_llty = cx.immediate_backend_type(niche_layout); + let BackendRepr::Scalar(scalar) = niche_layout.backend_repr else { + bug!("expected a scalar placeref for the niche"); + }; + // We are supposed to compute `niche_value.wrapping_add(niche_start)` wrapping + // around the `niche`'s type. + // The easiest way to do that is to do wrapping arithmetic on `u128` and then + // masking off any extra bits that occur because we did the arithmetic with too many bits. + let niche_value = variant_index.as_u32() - niche_variants.start().as_u32(); + let niche_value = (niche_value as u128).wrapping_add(niche_start); + let niche_value = niche_value & niche_layout.size.unsigned_int_max(); + + let niche_llval = cx.scalar_to_backend( + Scalar::from_uint(niche_value, niche_layout.size), + scalar, + niche_llty, + ); + Some((tag_field, niche_llval)) + } else { + None + } + } + }) +} + +#[derive(Debug)] +pub(super) struct UninhabitedVariantError; diff --git a/compiler/rustc_codegen_ssa/src/mir/rvalue.rs b/compiler/rustc_codegen_ssa/src/mir/rvalue.rs index e1d8b7546cf42..690004b3c74ad 100644 --- a/compiler/rustc_codegen_ssa/src/mir/rvalue.rs +++ b/compiler/rustc_codegen_ssa/src/mir/rvalue.rs @@ -10,7 +10,7 @@ use rustc_span::{DUMMY_SP, Span}; use tracing::{debug, instrument}; use super::operand::{OperandRef, OperandValue}; -use super::place::PlaceRef; +use super::place::{PlaceRef, codegen_tag_value}; use super::{FunctionCx, LocalRef}; use crate::common::IntPredicate; use crate::traits::*; @@ -700,7 +700,14 @@ impl<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> { } mir::Rvalue::Use(ref operand) => self.codegen_operand(bx, operand), mir::Rvalue::Repeat(..) => bug!("{rvalue:?} in codegen_rvalue_operand"), - mir::Rvalue::Aggregate(_, ref fields) => { + mir::Rvalue::Aggregate(ref kind, ref fields) => { + let (variant_index, active_field_index) = match **kind { + mir::AggregateKind::Adt(_, variant_index, _, _, active_field_index) => { + (variant_index, active_field_index) + } + _ => (FIRST_VARIANT, None), + }; + let ty = rvalue.ty(self.mir, self.cx.tcx()); let ty = self.monomorphize(ty); let layout = self.cx.layout_of(ty); @@ -712,10 +719,27 @@ impl<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> { }; for (field_idx, field) in fields.iter_enumerated() { let op = self.codegen_operand(bx, field); - builder.insert_field(bx, FIRST_VARIANT, field_idx, op); + let fi = active_field_index.unwrap_or(field_idx); + builder.insert_field(bx, variant_index, fi, op); } - builder.build() + let tag_result = codegen_tag_value(self.cx, variant_index, layout); + match tag_result { + Err(super::place::UninhabitedVariantError) => { + // Like codegen_set_discr we use a sound abort, but could + // potentially `unreachable` or just return the poison for + // more optimizability, if that turns out to be helpful. + bx.abort(); + let val = OperandValue::poison(bx, layout); + OperandRef { val, layout } + } + Ok(maybe_tag_value) => { + if let Some((tag_field, tag_imm)) = maybe_tag_value { + builder.insert_imm(tag_field, tag_imm); + } + builder.build(bx.cx()) + } + } } mir::Rvalue::ShallowInitBox(ref operand, content_ty) => { let operand = self.codegen_operand(bx, operand); @@ -1043,26 +1067,11 @@ impl<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> { // Arrays are always aggregates, so it's not worth checking anything here. // (If it's really `[(); N]` or `[T; 0]` and we use the place path, fine.) mir::Rvalue::Repeat(..) => false, - mir::Rvalue::Aggregate(ref kind, _) => { - let allowed_kind = match **kind { - // This always produces a `ty::RawPtr`, so will be Immediate or Pair - mir::AggregateKind::RawPtr(..) => true, - mir::AggregateKind::Array(..) => false, - mir::AggregateKind::Tuple => true, - mir::AggregateKind::Adt(def_id, ..) => { - let adt_def = self.cx.tcx().adt_def(def_id); - adt_def.is_struct() && !adt_def.repr().simd() - } - mir::AggregateKind::Closure(..) => true, - // FIXME: Can we do this for simple coroutines too? - mir::AggregateKind::Coroutine(..) | mir::AggregateKind::CoroutineClosure(..) => false, - }; - allowed_kind && { - let ty = rvalue.ty(self.mir, self.cx.tcx()); - let ty = self.monomorphize(ty); - let layout = self.cx.spanned_layout_of(ty, span); - OperandRef::::builder(layout).is_some() - } + mir::Rvalue::Aggregate(..) => { + let ty = rvalue.ty(self.mir, self.cx.tcx()); + let ty = self.monomorphize(ty); + let layout = self.cx.spanned_layout_of(ty, span); + OperandRef::::builder(layout).is_some() } } diff --git a/tests/codegen/align-struct.rs b/tests/codegen/align-struct.rs index 402a184d4c07e..d4cc65e9158cf 100644 --- a/tests/codegen/align-struct.rs +++ b/tests/codegen/align-struct.rs @@ -15,9 +15,11 @@ pub struct Nested64 { d: i8, } +// This has the extra field in B to ensure it's not ScalarPair, +// and thus that the test actually emits it via memory, not `insertvalue`. pub enum Enum4 { A(i32), - B(i32), + B(i32, i32), } pub enum Enum64 { @@ -54,7 +56,7 @@ pub fn nested64(a: Align64, b: i32, c: i32, d: i8) -> Nested64 { // CHECK-LABEL: @enum4 #[no_mangle] pub fn enum4(a: i32) -> Enum4 { - // CHECK: %e4 = alloca [8 x i8], align 4 + // CHECK: %e4 = alloca [12 x i8], align 4 let e4 = Enum4::A(a); e4 } diff --git a/tests/codegen/common_prim_int_ptr.rs b/tests/codegen/common_prim_int_ptr.rs index a1d7a125f324c..53716adccbf21 100644 --- a/tests/codegen/common_prim_int_ptr.rs +++ b/tests/codegen/common_prim_int_ptr.rs @@ -11,9 +11,9 @@ #[no_mangle] pub fn insert_int(x: usize) -> Result> { // CHECK: start: - // CHECK-NEXT: inttoptr i{{[0-9]+}} %x to ptr - // CHECK-NEXT: insertvalue - // CHECK-NEXT: ret { i{{[0-9]+}}, ptr } + // CHECK-NEXT: %[[WO_PROV:.+]] = getelementptr i8, ptr null, [[USIZE:i[0-9]+]] %x + // CHECK-NEXT: %[[R:.+]] = insertvalue { [[USIZE]], ptr } { [[USIZE]] 0, ptr poison }, ptr %[[WO_PROV]], 1 + // CHECK-NEXT: ret { [[USIZE]], ptr } %[[R]] Ok(x) } diff --git a/tests/codegen/enum/enum-aggregate.rs b/tests/codegen/enum/enum-aggregate.rs new file mode 100644 index 0000000000000..b6a9b8dd814db --- /dev/null +++ b/tests/codegen/enum/enum-aggregate.rs @@ -0,0 +1,129 @@ +//@ compile-flags: -Copt-level=0 -Cno-prepopulate-passes +//@ min-llvm-version: 19 +//@ only-64bit + +#![crate_type = "lib"] + +use std::cmp::Ordering; +use std::num::NonZero; +use std::ptr::NonNull; + +#[no_mangle] +fn make_some_bool(x: bool) -> Option { + // CHECK-LABEL: i8 @make_some_bool(i1 zeroext %x) + // CHECK-NEXT: start: + // CHECK-NEXT: %[[WIDER:.+]] = zext i1 %x to i8 + // CHECK-NEXT: ret i8 %[[WIDER]] + Some(x) +} + +#[no_mangle] +fn make_none_bool() -> Option { + // CHECK-LABEL: i8 @make_none_bool() + // CHECK-NEXT: start: + // CHECK-NEXT: ret i8 2 + None +} + +#[no_mangle] +fn make_some_ordering(x: Ordering) -> Option { + // CHECK-LABEL: i8 @make_some_ordering(i8 %x) + // CHECK-NEXT: start: + // CHECK-NEXT: ret i8 %x + Some(x) +} + +#[no_mangle] +fn make_some_u16(x: u16) -> Option { + // CHECK-LABEL: { i16, i16 } @make_some_u16(i16 %x) + // CHECK-NEXT: start: + // CHECK-NEXT: %0 = insertvalue { i16, i16 } { i16 1, i16 poison }, i16 %x, 1 + // CHECK-NEXT: ret { i16, i16 } %0 + Some(x) +} + +#[no_mangle] +fn make_none_u16() -> Option { + // CHECK-LABEL: { i16, i16 } @make_none_u16() + // CHECK-NEXT: start: + // CHECK-NEXT: ret { i16, i16 } { i16 0, i16 undef } + None +} + +#[no_mangle] +fn make_some_nzu32(x: NonZero) -> Option> { + // CHECK-LABEL: i32 @make_some_nzu32(i32 %x) + // CHECK-NEXT: start: + // CHECK-NEXT: ret i32 %x + Some(x) +} + +#[no_mangle] +fn make_ok_ptr(x: NonNull) -> Result, usize> { + // CHECK-LABEL: { i64, ptr } @make_ok_ptr(ptr %x) + // CHECK-NEXT: start: + // CHECK-NEXT: %0 = insertvalue { i64, ptr } { i64 0, ptr poison }, ptr %x, 1 + // CHECK-NEXT: ret { i64, ptr } %0 + Ok(x) +} + +#[no_mangle] +fn make_ok_int(x: usize) -> Result> { + // CHECK-LABEL: { i64, ptr } @make_ok_int(i64 %x) + // CHECK-NEXT: start: + // CHECK-NEXT: %[[NOPROV:.+]] = getelementptr i8, ptr null, i64 %x + // CHECK-NEXT: %[[R:.+]] = insertvalue { i64, ptr } { i64 0, ptr poison }, ptr %[[NOPROV]], 1 + // CHECK-NEXT: ret { i64, ptr } %[[R]] + Ok(x) +} + +#[no_mangle] +fn make_some_ref(x: &u16) -> Option<&u16> { + // CHECK-LABEL: ptr @make_some_ref(ptr align 2 %x) + // CHECK-NEXT: start: + // CHECK-NEXT: ret ptr %x + Some(x) +} + +#[no_mangle] +fn make_none_ref<'a>() -> Option<&'a u16> { + // CHECK-LABEL: ptr @make_none_ref() + // CHECK-NEXT: start: + // CHECK-NEXT: ret ptr null + None +} + +#[inline(never)] +fn make_err_generic(e: E) -> Result { + // CHECK-LABEL: define{{.+}}make_err_generic + // CHECK-NEXT: start: + // CHECK-NEXT: call void @llvm.trap() + // CHECK-NEXT: ret i32 poison + Err(e) +} + +#[no_mangle] +fn make_uninhabited_err_indirectly(n: Never) -> Result { + // CHECK-LABEL: i32 @make_uninhabited_err_indirectly() + // CHECK-NEXT: start: + // CHECK-NEXT: call{{.+}}make_err_generic + make_err_generic(n) +} + +#[no_mangle] +fn make_fully_uninhabited_result(v: u32, n: Never) -> Result<(u32, Never), (Never, u32)> { + // We don't try to do this in SSA form since the whole type is uninhabited. + + // CHECK-LABEL: { i32, i32 } @make_fully_uninhabited_result(i32 %v) + // CHECK: %[[ALLOC_V:.+]] = alloca [4 x i8] + // CHECK: %[[RET:.+]] = alloca [8 x i8] + // CHECK: store i32 %v, ptr %[[ALLOC_V]] + // CHECK: %[[TEMP_V:.+]] = load i32, ptr %[[ALLOC_V]] + // CHECK: %[[INNER:.+]] = getelementptr inbounds i8, ptr %[[RET]] + // CHECK: store i32 %[[TEMP_V]], ptr %[[INNER]] + // CHECK: call void @llvm.trap() + // CHECK: unreachable + Ok((v, n)) +} + +enum Never {} diff --git a/tests/codegen/set-discriminant-invalid.rs b/tests/codegen/set-discriminant-invalid.rs index 0b7cb14880c98..dd584ef1c1420 100644 --- a/tests/codegen/set-discriminant-invalid.rs +++ b/tests/codegen/set-discriminant-invalid.rs @@ -16,10 +16,9 @@ impl IntoError for Api { type Source = ApiError; // CHECK-LABEL: @into_error // CHECK: llvm.trap() - // Also check the next two instructions to make sure we do not match against `trap` + // Also check the next instruction to make sure we do not match against `trap` // elsewhere in the code. - // CHECK-NEXT: load - // CHECK-NEXT: ret + // CHECK-NEXT: ret i8 poison #[no_mangle] fn into_error(self, error: Self::Source) -> Error { Error::Api { source: error } diff --git a/tests/codegen/union-aggregate.rs b/tests/codegen/union-aggregate.rs new file mode 100644 index 0000000000000..3c6053379fa3b --- /dev/null +++ b/tests/codegen/union-aggregate.rs @@ -0,0 +1,85 @@ +//@ compile-flags: -Copt-level=0 -Cno-prepopulate-passes +//@ min-llvm-version: 19 +//@ only-64bit + +#![crate_type = "lib"] +#![feature(transparent_unions)] + +#[repr(transparent)] +union MU { + uninit: (), + value: T, +} + +use std::cmp::Ordering; +use std::num::NonZero; +use std::ptr::NonNull; + +#[no_mangle] +fn make_mu_bool(x: bool) -> MU { + // CHECK-LABEL: i8 @make_mu_bool(i1 zeroext %x) + // CHECK-NEXT: start: + // CHECK-NEXT: %[[WIDER:.+]] = zext i1 %x to i8 + // CHECK-NEXT: ret i8 %[[WIDER]] + MU { value: x } +} + +#[no_mangle] +fn make_mu_bool_uninit() -> MU { + // CHECK-LABEL: i8 @make_mu_bool_uninit() + // CHECK-NEXT: start: + // CHECK-NEXT: ret i8 undef + MU { uninit: () } +} + +#[no_mangle] +fn make_mu_ref(x: &u16) -> MU<&u16> { + // CHECK-LABEL: ptr @make_mu_ref(ptr align 2 %x) + // CHECK-NEXT: start: + // CHECK-NEXT: ret ptr %x + MU { value: x } +} + +#[no_mangle] +fn make_mu_ref_uninit<'a>() -> MU<&'a u16> { + // CHECK-LABEL: ptr @make_mu_ref_uninit() + // CHECK-NEXT: start: + // CHECK-NEXT: ret ptr undef + MU { uninit: () } +} + +#[no_mangle] +fn make_mu_str(x: &str) -> MU<&str> { + // CHECK-LABEL: { ptr, i64 } @make_mu_str(ptr align 1 %x.0, i64 %x.1) + // CHECK-NEXT: start: + // CHECK-NEXT: %0 = insertvalue { ptr, i64 } poison, ptr %x.0, 0 + // CHECK-NEXT: %1 = insertvalue { ptr, i64 } %0, i64 %x.1, 1 + // CHECK-NEXT: ret { ptr, i64 } %1 + MU { value: x } +} + +#[no_mangle] +fn make_mu_str_uninit<'a>() -> MU<&'a str> { + // CHECK-LABEL: { ptr, i64 } @make_mu_str_uninit() + // CHECK-NEXT: start: + // CHECK-NEXT: ret { ptr, i64 } undef + MU { uninit: () } +} + +#[no_mangle] +fn make_mu_pair(x: (u8, u32)) -> MU<(u8, u32)> { + // CHECK-LABEL: { i8, i32 } @make_mu_pair(i8 %x.0, i32 %x.1) + // CHECK-NEXT: start: + // CHECK-NEXT: %0 = insertvalue { i8, i32 } poison, i8 %x.0, 0 + // CHECK-NEXT: %1 = insertvalue { i8, i32 } %0, i32 %x.1, 1 + // CHECK-NEXT: ret { i8, i32 } %1 + MU { value: x } +} + +#[no_mangle] +fn make_mu_pair_uninit() -> MU<(u8, u32)> { + // CHECK-LABEL: { i8, i32 } @make_mu_pair_uninit() + // CHECK-NEXT: start: + // CHECK-NEXT: ret { i8, i32 } undef + MU { uninit: () } +}