From 8c2832f4192501acb54547641c666b776e6ec90e Mon Sep 17 00:00:00 2001 From: Oliver Schneider Date: Thu, 9 Feb 2017 18:03:21 +0100 Subject: [PATCH 1/7] add the full test from rust --- tests/run-pass/dst-field-align.rs | 47 +++++++++++++++++++++++++++++++ 1 file changed, 47 insertions(+) diff --git a/tests/run-pass/dst-field-align.rs b/tests/run-pass/dst-field-align.rs index ce3fb82cb3..5631b65ed9 100644 --- a/tests/run-pass/dst-field-align.rs +++ b/tests/run-pass/dst-field-align.rs @@ -23,8 +23,55 @@ impl Bar for usize { fn get(&self) -> usize { *self } } +struct Baz { + a: T +} + +struct HasDrop { + ptr: Box, + data: T +} + fn main() { + // Test that zero-offset works properly + let b : Baz = Baz { a: 7 }; + assert_eq!(b.a.get(), 7); + let b : &Baz = &b; + assert_eq!(b.a.get(), 7); + + // Test that the field is aligned properly let f : Foo = Foo { a: 0, b: 11 }; + assert_eq!(f.b.get(), 11); + let ptr1 : *const u8 = &f.b as *const _ as *const u8; + let f : &Foo = &f; + let ptr2 : *const u8 = &f.b as *const _ as *const u8; assert_eq!(f.b.get(), 11); + + // The pointers should be the same + assert_eq!(ptr1, ptr2); + + // Test that nested DSTs work properly + let f : Foo> = Foo { a: 0, b: Foo { a: 1, b: 17 }}; + assert_eq!(f.b.b.get(), 17); + let f : &Foo> = &f; + assert_eq!(f.b.b.get(), 17); + + // Test that get the pointer via destructuring works + + let f : Foo = Foo { a: 0, b: 11 }; + let f : &Foo = &f; + let &Foo { a: _, b: ref bar } = f; + assert_eq!(bar.get(), 11); + + // Make sure that drop flags don't screw things up + + let d : HasDrop> = HasDrop { + ptr: Box::new(0), + data: Baz { a: [1,2,3,4] } + }; + assert_eq!([1,2,3,4], d.data.a); + + let d : &HasDrop> = &d; + assert_eq!(&[1,2,3,4], &d.data.a); } From 06a02187babfbe5b6a5abdee9b5625bed7a15493 Mon Sep 17 00:00:00 2001 From: Oliver Schneider Date: Thu, 9 Feb 2017 18:12:59 +0100 Subject: [PATCH 2/7] move drop code into its own file --- src/terminator/drop.rs | 236 +++++++++++++++++++++++++++++++++++++++++ src/terminator/mod.rs | 226 +-------------------------------------- 2 files changed, 240 insertions(+), 222 deletions(-) create mode 100644 src/terminator/drop.rs diff --git a/src/terminator/drop.rs b/src/terminator/drop.rs new file mode 100644 index 0000000000..32c5073421 --- /dev/null +++ b/src/terminator/drop.rs @@ -0,0 +1,236 @@ +use rustc::hir::def_id::DefId; +use rustc::ty::layout::Layout; +use rustc::ty::subst::{Substs, Kind}; +use rustc::ty::{self, Ty}; +use rustc::mir; +use syntax::codemap::Span; + +use error::{EvalError, EvalResult}; +use eval_context::{EvalContext, monomorphize_field_ty, StackPopCleanup}; +use lvalue::{Lvalue, LvalueExtra}; +use memory::{Pointer, FunctionDefinition}; +use value::PrimVal; +use value::Value; + +impl<'a, 'tcx> EvalContext<'a, 'tcx> { + + /// Creates stack frames for all drop impls. See `drop` for the actual content. + pub fn eval_drop_impls(&mut self, drops: Vec<(DefId, Value, &'tcx Substs<'tcx>)>, span: Span) -> EvalResult<'tcx> { + // add them to the stack in reverse order, because the impl that needs to run the last + // is the one that needs to be at the bottom of the stack + for (drop_def_id, self_arg, substs) in drops.into_iter().rev() { + let mir = self.load_mir(drop_def_id)?; + trace!("substs for drop glue: {:?}", substs); + self.push_stack_frame( + drop_def_id, + span, + mir, + substs, + Lvalue::from_ptr(Pointer::zst_ptr()), + StackPopCleanup::None, + Vec::new(), + )?; + let mut arg_locals = self.frame().mir.args_iter(); + let first = arg_locals.next().expect("drop impl has self arg"); + assert!(arg_locals.next().is_none(), "drop impl should have only one arg"); + let dest = self.eval_lvalue(&mir::Lvalue::Local(first))?; + let ty = self.frame().mir.local_decls[first].ty; + self.write_value(self_arg, dest, ty)?; + } + Ok(()) + } + + /// push DefIds of drop impls and their argument on the given vector + pub fn drop( + &mut self, + lval: Lvalue<'tcx>, + ty: Ty<'tcx>, + drop: &mut Vec<(DefId, Value, &'tcx Substs<'tcx>)>, + ) -> EvalResult<'tcx> { + if !self.type_needs_drop(ty) { + debug!("no need to drop {:?}", ty); + return Ok(()); + } + trace!("-need to drop {:?} at {:?}", ty, lval); + + match ty.sty { + // special case `Box` to deallocate the inner allocation + ty::TyAdt(ref def, _) if def.is_box() => { + let contents_ty = ty.boxed_ty(); + let val = self.read_lvalue(lval); + // we are going through the read_value path, because that already does all the + // checks for the trait object types. We'd only be repeating ourselves here. + let val = self.follow_by_ref_value(val, ty)?; + trace!("box dealloc on {:?}", val); + match val { + Value::ByRef(_) => bug!("follow_by_ref_value can't result in ByRef"), + Value::ByVal(ptr) => { + assert!(self.type_is_sized(contents_ty)); + let contents_ptr = ptr.to_ptr()?; + self.drop(Lvalue::from_ptr(contents_ptr), contents_ty, drop)?; + }, + Value::ByValPair(prim_ptr, extra) => { + let ptr = prim_ptr.to_ptr()?; + let extra = match self.tcx.struct_tail(contents_ty).sty { + ty::TyDynamic(..) => LvalueExtra::Vtable(extra.to_ptr()?), + ty::TyStr | ty::TySlice(_) => LvalueExtra::Length(extra.to_u64()?), + _ => bug!("invalid fat pointer type: {}", ty), + }; + self.drop(Lvalue::Ptr { ptr, extra }, contents_ty, drop)?; + }, + } + let box_free_fn = self.tcx.lang_items.box_free_fn().expect("no box_free lang item"); + let substs = self.tcx.intern_substs(&[Kind::from(contents_ty)]); + // this is somewhat hacky, but hey, there's no representation difference between + // pointers and references, so + // #[lang = "box_free"] unsafe fn box_free(ptr: *mut T) + // is the same as + // fn drop(&mut self) if Self is Box + drop.push((box_free_fn, val, substs)); + }, + + ty::TyAdt(adt_def, substs) => { + // FIXME: some structs are represented as ByValPair + let lval = self.force_allocation(lval)?; + let adt_ptr = match lval { + Lvalue::Ptr { ptr, .. } => ptr, + _ => bug!("force allocation can only yield Lvalue::Ptr"), + }; + // run drop impl before the fields' drop impls + if let Some(drop_def_id) = adt_def.destructor() { + drop.push((drop_def_id, Value::ByVal(PrimVal::Ptr(adt_ptr)), substs)); + } + let layout = self.type_layout(ty)?; + let fields = match *layout { + Layout::Univariant { ref variant, .. } => { + adt_def.struct_variant().fields.iter().zip(&variant.offsets) + }, + Layout::General { ref variants, .. } => { + let discr_val = self.read_discriminant_value(adt_ptr, ty)? as u128; + match adt_def.variants.iter().position(|v| discr_val == v.disr_val.to_u128_unchecked()) { + // start at offset 1, to skip over the discriminant + Some(i) => adt_def.variants[i].fields.iter().zip(&variants[i].offsets[1..]), + None => return Err(EvalError::InvalidDiscriminant), + } + }, + Layout::StructWrappedNullablePointer { nndiscr, ref nonnull, .. } => { + let discr = self.read_discriminant_value(adt_ptr, ty)?; + if discr == nndiscr as u128 { + assert_eq!(discr as usize as u128, discr); + adt_def.variants[discr as usize].fields.iter().zip(&nonnull.offsets) + } else { + // FIXME: the zst variant might contain zst types that impl Drop + return Ok(()); // nothing to do, this is zero sized (e.g. `None`) + } + }, + Layout::RawNullablePointer { nndiscr, .. } => { + let discr = self.read_discriminant_value(adt_ptr, ty)?; + if discr == nndiscr as u128 { + assert_eq!(discr as usize as u128, discr); + assert_eq!(adt_def.variants[discr as usize].fields.len(), 1); + let field_ty = &adt_def.variants[discr as usize].fields[0]; + let field_ty = monomorphize_field_ty(self.tcx, field_ty, substs); + // FIXME: once read_discriminant_value works with lvalue, don't force + // alloc in the RawNullablePointer case + self.drop(lval, field_ty, drop)?; + return Ok(()); + } else { + // FIXME: the zst variant might contain zst types that impl Drop + return Ok(()); // nothing to do, this is zero sized (e.g. `None`) + } + }, + Layout::CEnum { .. } => return Ok(()), + _ => bug!("{:?} is not an adt layout", layout), + }; + let tcx = self.tcx; + self.drop_fields( + fields.map(|(ty, &offset)| (monomorphize_field_ty(tcx, ty, substs), offset)), + lval, + drop, + )?; + }, + ty::TyTuple(fields, _) => { + let offsets = match *self.type_layout(ty)? { + Layout::Univariant { ref variant, .. } => &variant.offsets, + _ => bug!("tuples must be univariant"), + }; + self.drop_fields(fields.iter().cloned().zip(offsets.iter().cloned()), lval, drop)?; + }, + ty::TyDynamic(..) => { + let (ptr, vtable) = match lval { + Lvalue::Ptr { ptr, extra: LvalueExtra::Vtable(vtable) } => (ptr, vtable), + _ => bug!("expected an lvalue with a vtable"), + }; + let drop_fn = self.memory.read_ptr(vtable)?; + // some values don't need to call a drop impl, so the value is null + if drop_fn != Pointer::from_int(0) { + let FunctionDefinition {def_id, substs, sig, ..} = self.memory.get_fn(drop_fn.alloc_id)?.expect_drop_glue()?; + let real_ty = sig.inputs()[0]; + self.drop(Lvalue::from_ptr(ptr), real_ty, drop)?; + drop.push((def_id, Value::ByVal(PrimVal::Ptr(ptr)), substs)); + } else { + // just a sanity check + assert_eq!(drop_fn.offset, 0); + } + }, + ty::TySlice(elem_ty) => { + let (ptr, len) = match lval { + Lvalue::Ptr { ptr, extra: LvalueExtra::Length(len) } => (ptr, len), + _ => bug!("expected an lvalue with a length"), + }; + let size = self.type_size(elem_ty)?.expect("slice element must be sized"); + // FIXME: this creates a lot of stack frames if the element type has + // a drop impl + for i in 0..len { + self.drop(Lvalue::from_ptr(ptr.offset(i * size)), elem_ty, drop)?; + } + }, + ty::TyArray(elem_ty, len) => { + let lval = self.force_allocation(lval)?; + let (ptr, extra) = match lval { + Lvalue::Ptr { ptr, extra } => (ptr, extra), + _ => bug!("expected an lvalue with optional extra data"), + }; + let size = self.type_size(elem_ty)?.expect("array element cannot be unsized"); + // FIXME: this creates a lot of stack frames if the element type has + // a drop impl + for i in 0..(len as u64) { + self.drop(Lvalue::Ptr { ptr: ptr.offset(i * size), extra }, elem_ty, drop)?; + } + }, + // FIXME: what about TyClosure and TyAnon? + // other types do not need to process drop + _ => {}, + } + + Ok(()) + } + + fn drop_fields( + &mut self, + mut fields: I, + lval: Lvalue<'tcx>, + drop: &mut Vec<(DefId, Value, &'tcx Substs<'tcx>)>, + ) -> EvalResult<'tcx> + where I: Iterator, ty::layout::Size)>, + { + // FIXME: some aggregates may be represented by Value::ByValPair + let (adt_ptr, extra) = self.force_allocation(lval)?.to_ptr_and_extra(); + // manual iteration, because we need to be careful about the last field if it is unsized + while let Some((field_ty, offset)) = fields.next() { + let ptr = adt_ptr.offset(offset.bytes()); + if self.type_is_sized(field_ty) { + self.drop(Lvalue::from_ptr(ptr), field_ty, drop)?; + } else { + self.drop(Lvalue::Ptr { ptr, extra }, field_ty, drop)?; + break; // if it is not sized, then this is the last field anyway + } + } + assert!(fields.next().is_none()); + Ok(()) + } + + fn type_needs_drop(&self, ty: Ty<'tcx>) -> bool { + self.tcx.type_needs_drop_given_env(ty, &self.tcx.empty_parameter_environment()) + } +} diff --git a/src/terminator/mod.rs b/src/terminator/mod.rs index 2a9fe179fa..e6ed715280 100644 --- a/src/terminator/mod.rs +++ b/src/terminator/mod.rs @@ -3,19 +3,20 @@ use rustc::mir; use rustc::traits::{self, Reveal}; use rustc::ty::fold::TypeFoldable; use rustc::ty::layout::{Layout, Size}; -use rustc::ty::subst::{Substs, Kind}; +use rustc::ty::subst::Substs; use rustc::ty::{self, Ty, TyCtxt, BareFnTy}; use syntax::codemap::{DUMMY_SP, Span}; use syntax::{ast, attr, abi}; use error::{EvalError, EvalResult}; -use eval_context::{EvalContext, IntegerExt, StackPopCleanup, monomorphize_field_ty, is_inhabited}; -use lvalue::{Lvalue, LvalueExtra}; +use eval_context::{EvalContext, IntegerExt, StackPopCleanup, is_inhabited}; +use lvalue::Lvalue; use memory::{Pointer, FunctionDefinition, Function}; use value::PrimVal; use value::Value; mod intrinsic; +mod drop; impl<'a, 'tcx> EvalContext<'a, 'tcx> { @@ -161,31 +162,6 @@ impl<'a, 'tcx> EvalContext<'a, 'tcx> { Ok(()) } - pub fn eval_drop_impls(&mut self, drops: Vec<(DefId, Value, &'tcx Substs<'tcx>)>, span: Span) -> EvalResult<'tcx> { - // add them to the stack in reverse order, because the impl that needs to run the last - // is the one that needs to be at the bottom of the stack - for (drop_def_id, self_arg, substs) in drops.into_iter().rev() { - let mir = self.load_mir(drop_def_id)?; - trace!("substs for drop glue: {:?}", substs); - self.push_stack_frame( - drop_def_id, - span, - mir, - substs, - Lvalue::from_ptr(Pointer::zst_ptr()), - StackPopCleanup::None, - Vec::new(), - )?; - let mut arg_locals = self.frame().mir.args_iter(); - let first = arg_locals.next().expect("drop impl has self arg"); - assert!(arg_locals.next().is_none(), "drop impl should have only one arg"); - let dest = self.eval_lvalue(&mir::Lvalue::Local(first))?; - let ty = self.frame().mir.local_decls[first].ty; - self.write_value(self_arg, dest, ty)?; - } - Ok(()) - } - fn eval_fn_call( &mut self, def_id: DefId, @@ -699,200 +675,6 @@ impl<'a, 'tcx> EvalContext<'a, 'tcx> { vtable => bug!("resolved vtable bad vtable {:?} in trans", vtable), } } - - pub(super) fn type_needs_drop(&self, ty: Ty<'tcx>) -> bool { - self.tcx.type_needs_drop_given_env(ty, &self.tcx.empty_parameter_environment()) - } - - /// push DefIds of drop impls and their argument on the given vector - pub fn drop( - &mut self, - lval: Lvalue<'tcx>, - ty: Ty<'tcx>, - drop: &mut Vec<(DefId, Value, &'tcx Substs<'tcx>)>, - ) -> EvalResult<'tcx> { - if !self.type_needs_drop(ty) { - debug!("no need to drop {:?}", ty); - return Ok(()); - } - trace!("-need to drop {:?} at {:?}", ty, lval); - - match ty.sty { - // special case `Box` to deallocate the inner allocation - ty::TyAdt(ref def, _) if def.is_box() => { - let contents_ty = ty.boxed_ty(); - let val = self.read_lvalue(lval); - // we are going through the read_value path, because that already does all the - // checks for the trait object types. We'd only be repeating ourselves here. - let val = self.follow_by_ref_value(val, ty)?; - trace!("box dealloc on {:?}", val); - match val { - Value::ByRef(_) => bug!("follow_by_ref_value can't result in ByRef"), - Value::ByVal(ptr) => { - assert!(self.type_is_sized(contents_ty)); - let contents_ptr = ptr.to_ptr()?; - self.drop(Lvalue::from_ptr(contents_ptr), contents_ty, drop)?; - }, - Value::ByValPair(prim_ptr, extra) => { - let ptr = prim_ptr.to_ptr()?; - let extra = match self.tcx.struct_tail(contents_ty).sty { - ty::TyDynamic(..) => LvalueExtra::Vtable(extra.to_ptr()?), - ty::TyStr | ty::TySlice(_) => LvalueExtra::Length(extra.to_u64()?), - _ => bug!("invalid fat pointer type: {}", ty), - }; - self.drop(Lvalue::Ptr { ptr, extra }, contents_ty, drop)?; - }, - } - let box_free_fn = self.tcx.lang_items.box_free_fn().expect("no box_free lang item"); - let substs = self.tcx.intern_substs(&[Kind::from(contents_ty)]); - // this is somewhat hacky, but hey, there's no representation difference between - // pointers and references, so - // #[lang = "box_free"] unsafe fn box_free(ptr: *mut T) - // is the same as - // fn drop(&mut self) if Self is Box - drop.push((box_free_fn, val, substs)); - }, - - ty::TyAdt(adt_def, substs) => { - // FIXME: some structs are represented as ByValPair - let lval = self.force_allocation(lval)?; - let adt_ptr = match lval { - Lvalue::Ptr { ptr, .. } => ptr, - _ => bug!("force allocation can only yield Lvalue::Ptr"), - }; - // run drop impl before the fields' drop impls - if let Some(drop_def_id) = adt_def.destructor() { - drop.push((drop_def_id, Value::ByVal(PrimVal::Ptr(adt_ptr)), substs)); - } - let layout = self.type_layout(ty)?; - let fields = match *layout { - Layout::Univariant { ref variant, .. } => { - adt_def.struct_variant().fields.iter().zip(&variant.offsets) - }, - Layout::General { ref variants, .. } => { - let discr_val = self.read_discriminant_value(adt_ptr, ty)? as u128; - match adt_def.variants.iter().position(|v| discr_val == v.disr_val.to_u128_unchecked()) { - // start at offset 1, to skip over the discriminant - Some(i) => adt_def.variants[i].fields.iter().zip(&variants[i].offsets[1..]), - None => return Err(EvalError::InvalidDiscriminant), - } - }, - Layout::StructWrappedNullablePointer { nndiscr, ref nonnull, .. } => { - let discr = self.read_discriminant_value(adt_ptr, ty)?; - if discr == nndiscr as u128 { - assert_eq!(discr as usize as u128, discr); - adt_def.variants[discr as usize].fields.iter().zip(&nonnull.offsets) - } else { - // FIXME: the zst variant might contain zst types that impl Drop - return Ok(()); // nothing to do, this is zero sized (e.g. `None`) - } - }, - Layout::RawNullablePointer { nndiscr, .. } => { - let discr = self.read_discriminant_value(adt_ptr, ty)?; - if discr == nndiscr as u128 { - assert_eq!(discr as usize as u128, discr); - assert_eq!(adt_def.variants[discr as usize].fields.len(), 1); - let field_ty = &adt_def.variants[discr as usize].fields[0]; - let field_ty = monomorphize_field_ty(self.tcx, field_ty, substs); - // FIXME: once read_discriminant_value works with lvalue, don't force - // alloc in the RawNullablePointer case - self.drop(lval, field_ty, drop)?; - return Ok(()); - } else { - // FIXME: the zst variant might contain zst types that impl Drop - return Ok(()); // nothing to do, this is zero sized (e.g. `None`) - } - }, - Layout::CEnum { .. } => return Ok(()), - _ => bug!("{:?} is not an adt layout", layout), - }; - let tcx = self.tcx; - self.drop_fields( - fields.map(|(ty, &offset)| (monomorphize_field_ty(tcx, ty, substs), offset)), - lval, - drop, - )?; - }, - ty::TyTuple(fields, _) => { - let offsets = match *self.type_layout(ty)? { - Layout::Univariant { ref variant, .. } => &variant.offsets, - _ => bug!("tuples must be univariant"), - }; - self.drop_fields(fields.iter().cloned().zip(offsets.iter().cloned()), lval, drop)?; - }, - ty::TyDynamic(..) => { - let (ptr, vtable) = match lval { - Lvalue::Ptr { ptr, extra: LvalueExtra::Vtable(vtable) } => (ptr, vtable), - _ => bug!("expected an lvalue with a vtable"), - }; - let drop_fn = self.memory.read_ptr(vtable)?; - // some values don't need to call a drop impl, so the value is null - if drop_fn != Pointer::from_int(0) { - let FunctionDefinition {def_id, substs, sig, ..} = self.memory.get_fn(drop_fn.alloc_id)?.expect_drop_glue()?; - let real_ty = sig.inputs()[0]; - self.drop(Lvalue::from_ptr(ptr), real_ty, drop)?; - drop.push((def_id, Value::ByVal(PrimVal::Ptr(ptr)), substs)); - } else { - // just a sanity check - assert_eq!(drop_fn.offset, 0); - } - }, - ty::TySlice(elem_ty) => { - let (ptr, len) = match lval { - Lvalue::Ptr { ptr, extra: LvalueExtra::Length(len) } => (ptr, len), - _ => bug!("expected an lvalue with a length"), - }; - let size = self.type_size(elem_ty)?.expect("slice element must be sized"); - // FIXME: this creates a lot of stack frames if the element type has - // a drop impl - for i in 0..len { - self.drop(Lvalue::from_ptr(ptr.offset(i * size)), elem_ty, drop)?; - } - }, - ty::TyArray(elem_ty, len) => { - let lval = self.force_allocation(lval)?; - let (ptr, extra) = match lval { - Lvalue::Ptr { ptr, extra } => (ptr, extra), - _ => bug!("expected an lvalue with optional extra data"), - }; - let size = self.type_size(elem_ty)?.expect("array element cannot be unsized"); - // FIXME: this creates a lot of stack frames if the element type has - // a drop impl - for i in 0..(len as u64) { - self.drop(Lvalue::Ptr { ptr: ptr.offset(i * size), extra }, elem_ty, drop)?; - } - }, - // FIXME: what about TyClosure and TyAnon? - // other types do not need to process drop - _ => {}, - } - - Ok(()) - } - - fn drop_fields( - &mut self, - mut fields: I, - lval: Lvalue<'tcx>, - drop: &mut Vec<(DefId, Value, &'tcx Substs<'tcx>)>, - ) -> EvalResult<'tcx> - where I: Iterator, ty::layout::Size)>, - { - // FIXME: some aggregates may be represented by Value::ByValPair - let (adt_ptr, extra) = self.force_allocation(lval)?.to_ptr_and_extra(); - // manual iteration, because we need to be careful about the last field if it is unsized - while let Some((field_ty, offset)) = fields.next() { - let ptr = adt_ptr.offset(offset.bytes()); - if self.type_is_sized(field_ty) { - self.drop(Lvalue::from_ptr(ptr), field_ty, drop)?; - } else { - self.drop(Lvalue::Ptr { ptr, extra }, field_ty, drop)?; - break; // if it is not sized, then this is the last field anyway - } - } - assert!(fields.next().is_none()); - Ok(()) - } } #[derive(Debug)] From d92085fd0e113d4e4cb7f6c204008541760e39cc Mon Sep 17 00:00:00 2001 From: Oliver Schneider Date: Thu, 9 Feb 2017 19:08:08 +0100 Subject: [PATCH 3/7] properly extract the inner type in a drop impl --- src/terminator/drop.rs | 6 ++++-- tests/run-pass/box_box_trait.rs | 29 +++++++++++++++++++++++++++++ 2 files changed, 33 insertions(+), 2 deletions(-) create mode 100644 tests/run-pass/box_box_trait.rs diff --git a/src/terminator/drop.rs b/src/terminator/drop.rs index 32c5073421..3b0ca371bb 100644 --- a/src/terminator/drop.rs +++ b/src/terminator/drop.rs @@ -165,9 +165,11 @@ impl<'a, 'tcx> EvalContext<'a, 'tcx> { // some values don't need to call a drop impl, so the value is null if drop_fn != Pointer::from_int(0) { let FunctionDefinition {def_id, substs, sig, ..} = self.memory.get_fn(drop_fn.alloc_id)?.expect_drop_glue()?; - let real_ty = sig.inputs()[0]; + let real_ty = match sig.inputs()[0].sty { + ty::TyRef(_, mt) => self.monomorphize(mt.ty, substs), + _ => bug!("first argument of Drop::drop must be &mut T"), + }; self.drop(Lvalue::from_ptr(ptr), real_ty, drop)?; - drop.push((def_id, Value::ByVal(PrimVal::Ptr(ptr)), substs)); } else { // just a sanity check assert_eq!(drop_fn.offset, 0); diff --git a/tests/run-pass/box_box_trait.rs b/tests/run-pass/box_box_trait.rs new file mode 100644 index 0000000000..57eef52d57 --- /dev/null +++ b/tests/run-pass/box_box_trait.rs @@ -0,0 +1,29 @@ +#![feature(box_syntax)] + +struct DroppableStruct; + +static mut DROPPED: bool = false; + +impl Drop for DroppableStruct { + fn drop(&mut self) { + unsafe { DROPPED = true; } + } +} + +trait MyTrait { fn dummy(&self) { } } +impl MyTrait for Box {} + +struct Whatever { w: Box } +impl Whatever { + fn new(w: Box) -> Whatever { + Whatever { w: w } + } +} + +fn main() { + { + let f: Box<_> = box DroppableStruct; + let _a = Whatever::new(box f as Box); + } + assert!(unsafe { DROPPED }); +} From c06251b0d2765f31c7e1b3f94490c5e7f454a3b6 Mon Sep 17 00:00:00 2001 From: Oliver Schneider Date: Thu, 9 Feb 2017 19:08:24 +0100 Subject: [PATCH 4/7] double space --- src/vtable.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/vtable.rs b/src/vtable.rs index bfae608ecb..602edba0f5 100644 --- a/src/vtable.rs +++ b/src/vtable.rs @@ -94,7 +94,7 @@ impl<'a, 'tcx> EvalContext<'a, 'tcx> { self.memory.write_usize(vtable, 0)?; if let ty::TyAdt(adt_def, substs) = trait_ref.self_ty().sty { if let Some(drop_def_id) = adt_def.destructor() { - let fn_ty = match self.tcx.item_type(drop_def_id).sty { + let fn_ty = match self.tcx.item_type(drop_def_id).sty { ty::TyFnDef(_, _, fn_ty) => self.tcx.erase_regions(&fn_ty), _ => bug!("drop method is not a TyFnDef"), }; From 023ec3e39599993bb64e724ae14c797bd3ebb96e Mon Sep 17 00:00:00 2001 From: Oliver Schneider Date: Thu, 9 Feb 2017 19:15:40 +0100 Subject: [PATCH 5/7] add some comments for clarification --- src/terminator/drop.rs | 8 ++++++-- 1 file changed, 6 insertions(+), 2 deletions(-) diff --git a/src/terminator/drop.rs b/src/terminator/drop.rs index 3b0ca371bb..4ee87c34fc 100644 --- a/src/terminator/drop.rs +++ b/src/terminator/drop.rs @@ -79,10 +79,12 @@ impl<'a, 'tcx> EvalContext<'a, 'tcx> { self.drop(Lvalue::Ptr { ptr, extra }, contents_ty, drop)?; }, } + // We cannot use Box's destructor, because it is a no-op and only exists to reduce + // the number of hacks required in the compiler around the Box type. let box_free_fn = self.tcx.lang_items.box_free_fn().expect("no box_free lang item"); let substs = self.tcx.intern_substs(&[Kind::from(contents_ty)]); // this is somewhat hacky, but hey, there's no representation difference between - // pointers and references, so + // pointers, `Box`es and references, so // #[lang = "box_free"] unsafe fn box_free(ptr: *mut T) // is the same as // fn drop(&mut self) if Self is Box @@ -164,7 +166,9 @@ impl<'a, 'tcx> EvalContext<'a, 'tcx> { let drop_fn = self.memory.read_ptr(vtable)?; // some values don't need to call a drop impl, so the value is null if drop_fn != Pointer::from_int(0) { - let FunctionDefinition {def_id, substs, sig, ..} = self.memory.get_fn(drop_fn.alloc_id)?.expect_drop_glue()?; + // FIXME: change the `DropGlue` variant of `Function` to only contain `real_ty` + let FunctionDefinition {substs, sig, ..} = self.memory.get_fn(drop_fn.alloc_id)?.expect_drop_glue()?; + // The real type is taken from the self argument in `fn drop(&mut self)` let real_ty = match sig.inputs()[0].sty { ty::TyRef(_, mt) => self.monomorphize(mt.ty, substs), _ => bug!("first argument of Drop::drop must be &mut T"), From e58f750a49ce1a160a4de36a01bddcb1f1b38d42 Mon Sep 17 00:00:00 2001 From: Oliver Schneider Date: Thu, 9 Feb 2017 19:27:07 +0100 Subject: [PATCH 6/7] refactor drop glue --- src/memory.rs | 25 +++++++++---------------- src/terminator/drop.rs | 10 ++-------- src/vtable.rs | 7 ++++++- 3 files changed, 17 insertions(+), 25 deletions(-) diff --git a/src/memory.rs b/src/memory.rs index cbef71c68f..0f3094522e 100644 --- a/src/memory.rs +++ b/src/memory.rs @@ -120,9 +120,9 @@ pub enum Function<'tcx> { Concrete(FunctionDefinition<'tcx>), /// Glue required to call a regular function through a Fn(Mut|Once) trait object FnDefAsTraitObject(FunctionDefinition<'tcx>), - /// Glue required to call the actual drop impl's `drop` method. - /// Drop glue takes the `self` by value, while `Drop::drop` take `&mut self` - DropGlue(FunctionDefinition<'tcx>), + /// A drop glue function only needs to know the real type, and then miri can extract the + /// actual type at runtime. + DropGlue(ty::Ty<'tcx>), /// Glue required to treat the ptr part of a fat pointer /// as a function pointer FnPtrAsTraitObject(&'tcx ty::FnSig<'tcx>), @@ -137,9 +137,9 @@ impl<'tcx> Function<'tcx> { other => Err(EvalError::ExpectedConcreteFunction(other)), } } - pub fn expect_drop_glue(self) -> EvalResult<'tcx, FunctionDefinition<'tcx>> { + pub fn expect_drop_glue_real_ty(self) -> EvalResult<'tcx, ty::Ty<'tcx>> { match self { - Function::DropGlue(fn_def) => Ok(fn_def), + Function::DropGlue(real_ty) => Ok(real_ty), other => Err(EvalError::ExpectedDropGlue(other)), } } @@ -234,15 +234,8 @@ impl<'a, 'tcx> Memory<'a, 'tcx> { self.create_fn_alloc(Function::FnPtrAsTraitObject(fn_ty.sig.skip_binder())) } - pub fn create_drop_glue(&mut self, _tcx: TyCtxt<'a, 'tcx, 'tcx>, def_id: DefId, substs: &'tcx Substs<'tcx>, fn_ty: &'tcx BareFnTy<'tcx>) -> Pointer { - self.create_fn_alloc(Function::DropGlue(FunctionDefinition { - def_id, - substs, - abi: fn_ty.abi, - // FIXME: why doesn't this compile? - //sig: tcx.erase_late_bound_regions(&fn_ty.sig), - sig: fn_ty.sig.skip_binder(), - })) + pub fn create_drop_glue(&mut self, ty: ty::Ty<'tcx>) -> Pointer { + self.create_fn_alloc(Function::DropGlue(ty)) } pub fn create_fn_ptr(&mut self, _tcx: TyCtxt<'a, 'tcx, 'tcx>, def_id: DefId, substs: &'tcx Substs<'tcx>, fn_ty: &'tcx BareFnTy<'tcx>) -> Pointer { @@ -495,8 +488,8 @@ impl<'a, 'tcx> Memory<'a, 'tcx> { trace!("{} {}", msg, dump_fn_def(fn_def)); continue; }, - (None, Some(&Function::DropGlue(fn_def))) => { - trace!("{} drop glue for {}", msg, dump_fn_def(fn_def)); + (None, Some(&Function::DropGlue(real_ty))) => { + trace!("{} drop glue for {}", msg, real_ty); continue; }, (None, Some(&Function::FnDefAsTraitObject(fn_def))) => { diff --git a/src/terminator/drop.rs b/src/terminator/drop.rs index 4ee87c34fc..cb971eca29 100644 --- a/src/terminator/drop.rs +++ b/src/terminator/drop.rs @@ -8,7 +8,7 @@ use syntax::codemap::Span; use error::{EvalError, EvalResult}; use eval_context::{EvalContext, monomorphize_field_ty, StackPopCleanup}; use lvalue::{Lvalue, LvalueExtra}; -use memory::{Pointer, FunctionDefinition}; +use memory::Pointer; use value::PrimVal; use value::Value; @@ -166,13 +166,7 @@ impl<'a, 'tcx> EvalContext<'a, 'tcx> { let drop_fn = self.memory.read_ptr(vtable)?; // some values don't need to call a drop impl, so the value is null if drop_fn != Pointer::from_int(0) { - // FIXME: change the `DropGlue` variant of `Function` to only contain `real_ty` - let FunctionDefinition {substs, sig, ..} = self.memory.get_fn(drop_fn.alloc_id)?.expect_drop_glue()?; - // The real type is taken from the self argument in `fn drop(&mut self)` - let real_ty = match sig.inputs()[0].sty { - ty::TyRef(_, mt) => self.monomorphize(mt.ty, substs), - _ => bug!("first argument of Drop::drop must be &mut T"), - }; + let real_ty = self.memory.get_fn(drop_fn.alloc_id)?.expect_drop_glue_real_ty()?; self.drop(Lvalue::from_ptr(ptr), real_ty, drop)?; } else { // just a sanity check diff --git a/src/vtable.rs b/src/vtable.rs index 602edba0f5..78acfd9dde 100644 --- a/src/vtable.rs +++ b/src/vtable.rs @@ -98,7 +98,12 @@ impl<'a, 'tcx> EvalContext<'a, 'tcx> { ty::TyFnDef(_, _, fn_ty) => self.tcx.erase_regions(&fn_ty), _ => bug!("drop method is not a TyFnDef"), }; - let fn_ptr = self.memory.create_drop_glue(self.tcx, drop_def_id, substs, fn_ty); + // The real type is taken from the self argument in `fn drop(&mut self)` + let real_ty = match fn_ty.sig.skip_binder().inputs()[0].sty { + ty::TyRef(_, mt) => self.monomorphize(mt.ty, substs), + _ => bug!("first argument of Drop::drop must be &mut T"), + }; + let fn_ptr = self.memory.create_drop_glue(real_ty); self.memory.write_ptr(vtable, fn_ptr)?; } } From 333264c956cc4d2d821b8707abc5657a0f1e1369 Mon Sep 17 00:00:00 2001 From: Oliver Schneider Date: Fri, 10 Feb 2017 08:13:18 +0100 Subject: [PATCH 7/7] clarify comment on drop glue --- src/memory.rs | 6 ++++-- 1 file changed, 4 insertions(+), 2 deletions(-) diff --git a/src/memory.rs b/src/memory.rs index 0f3094522e..835c7bca7f 100644 --- a/src/memory.rs +++ b/src/memory.rs @@ -120,8 +120,10 @@ pub enum Function<'tcx> { Concrete(FunctionDefinition<'tcx>), /// Glue required to call a regular function through a Fn(Mut|Once) trait object FnDefAsTraitObject(FunctionDefinition<'tcx>), - /// A drop glue function only needs to know the real type, and then miri can extract the - /// actual type at runtime. + /// A drop glue function only needs to know the real type, and then miri can extract + /// that type from a vtable's drop pointer. + /// Instead of storing some drop function, we act as if there are no trait objects, by + /// mapping trait objects to their real types before acting on them. DropGlue(ty::Ty<'tcx>), /// Glue required to treat the ptr part of a fat pointer /// as a function pointer